repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jonathonw/FailoverHomework | 1,992,864,841,580 | c453ed569a8abc29d2deb7ab2cd559614f6a31ca | 2328d8ef09501a8181b8f3197a928f3d05c922a0 | /UseCpu.py | 889b1b970d41e3f66f9cceedd84c20f53ca8cb79 | [] | no_license | https://github.com/jonathonw/FailoverHomework | 70f449412ce99889125e153a41f28dee61c526a3 | 13604e0557ccc52c0a0d73e42ee4df620a5f8a95 | refs/heads/master | 2021-01-01T19:56:43.470418 | 2011-11-30T20:47:50 | 2011-11-30T20:47:50 | 2,540,786 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import sys
import threading
class BusyThread(threading.Thread):
def __init__(self, percentage):
threading.Thread.__init__(self)
self._percentage = percentage
def run(self):
while True:
result = 1
for iteration in range(1, 100):
for i in range(1, 20):
result = result * iteration
time.sleep(0.0005 - percentage*0.0005)
if __name__ == "__main__":
if len(sys.argv) != 2 or float(sys.argv[1]) < 0 or float(sys.argv[1]) > 100:
print "Usage:", sys.argv[0], "percentage"
print " where 'percentage' is a number between 0 and 100"
exit(-1)
percentage = float(sys.argv[1]) / 100.0
if percentage == 0:
exit(0)
numberThreads = int(percentage * 10 + 1)
for i in range(numberThreads):
newThread = BusyThread(0.2)
newThread.start()
while True:
time.sleep(1)
| UTF-8 | Python | false | false | 872 | py | 9 | UseCpu.py | 7 | 0.607798 | 0.557339 | 0 | 36 | 23.222222 | 78 |
saratello/annotation-software | 9,809,705,311,297 | 5590520123f754cee863a75953e058f956bb9043 | 7099f031ed770f8d0ceb4c762c9ab5023d2a7000 | /methods.py | d78de0cadf0f40d133539134cd0c721fab25fc35 | [] | no_license | https://github.com/saratello/annotation-software | 8b1d24f8a0260117e78c24b39d4e1dec8b4d6bc8 | 3a0b07b14a1fc911e3614b7d53559e6e6d3173fb | refs/heads/main | 2023-05-20T11:07:07.367780 | 2021-06-10T07:59:02 | 2021-06-10T07:59:02 | 375,738,099 | 0 | 0 | null | true | 2021-06-10T15:07:11 | 2021-06-10T15:07:10 | 2021-06-10T08:24:39 | 2021-06-10T13:50:52 | 2,913 | 0 | 0 | 0 | null | false | false | from typing import Dict, List, Union, Tuple
from string import punctuation as punc
from collections import namedtuple
import re
import os
import json
from git import Repo, RemoteReference
POS = str
ExampleFieldGulf = str # ['baseword', 'gloss', 'clitic', 'context']
ExampleFieldCODA = str # ['raw', 'coda', 'context']
ExampleFieldMSA = str # ['segment', 'gloss', 'context']
SegmentType = str # ['baseword', 'enclitic', 'proclitic']
ExampleGulf = Dict[ExampleFieldGulf, str]
ExampleCODA = Dict[ExampleFieldCODA, str]
ExampleMSA = Dict[ExampleFieldMSA, str]
ExamplesQueryFilter = namedtuple('ExamplesQueryFilter', 'segment_type match_type resource')
ARABIC_LETTERS = "ءؤئابتثجحخدذرزسشصضطظعغفقكلمنهوىي"
def search_bar_examples(query: str,
gulf_tag_examples: Dict[SegmentType, Dict[POS, List[ExampleGulf]]],
msa_tag_examples: Dict[POS, List[ExampleMSA]],
coda_examples: List[ExampleCODA],
query_filter: Tuple[str] = ('Baseword', 'Approximate', 'Gulf Tags')
) -> Union[Dict[POS, List[ExampleGulf]], List[ExampleCODA], Dict[POS, List[ExampleMSA]]]:
"""Function which allows to search for specific examples in a static JSON file using a filter.
For the query filter:
- In index 0, possible choices are 'Baseword', 'Enclitic', 'Proclitic'.
- In index 1, possible choices are 'Approximate', 'Exact' (whether the match should be exact or not).
- In index 2, possible choices are 'Gulf Tags', 'MSA Tags', 'CODA Examples'.
Args:
query (str): what the user will type in the search bar.
gulf_tag_examples (Dict[SegmentType, Dict[POS, List[Dict[ExampleField, str]]]]): already parsed static JSON object sitting in memory for Gulf Arabic tagging examples
msa_tag_examples (Dict[POS, List[Dict[ExampleField, str]]]): already parsed static JSON object sitting in memory for MSA tagging examples
coda_examples (List[ExampleCODA]): already parsed static JSON object sitting in memory for CODA standardization examples
query_filter (Optional[List[str]], optional): Should be drop down menus where the user specifies what he is searching for. Defaults to ('Baseword', 'Approximate', 'Gulf Tags').
Returns:
Union[Dict[POS, List[Union[ExampleGulf, ExampleMSA]]], List[ExampleCODA]]: Dictionary with a structure which depends on the value chosen for the resource type (index 2) in the query filter
"""
query_filter = ExamplesQueryFilter(*query_filter)
if 'Tags' in query_filter.resource:
response: Dict[POS, List[Union[ExampleGulf, ExampleMSA]]] = {}
if 'Gulf' in query_filter.resource:
tag_examples: Dict[POS, List[ExampleGulf]] = gulf_tag_examples[query_filter.segment_type.lower()]
elif 'MSA' in query_filter.resource:
tag_examples: Dict[POS, List[ExampleMSA]] = msa_tag_examples
is_pos = True if query.translate(
str.maketrans('', '', punc)).isupper() else False
is_arabic_str = True if query[0] in ARABIC_LETTERS else False
is_gloss = True if [True for char in query if char.islower(
) and char not in ARABIC_LETTERS] else False
if is_pos:
query_pos, query_features = None, None
if 'Gulf' in query_filter.resource:
query_split = query.split(':')
if len(query_split) == 2:
query_pos, query_features = query_split[0], set(query_split[1])
for k, v in tag_examples.items():
k_pos, k_features = None, None
if 'Gulf' in query_filter.resource and query_pos:
k = k.split(':')
k_pos, k_features = k[0], set(k[1]) if len(k) == 2 else set()
if not query_pos:
if query_filter.match_type == 'Approximate':
if query in k:
response[k] = v
elif query_filter.match_type == 'Exact':
if query == k:
response[k] = v
else:
if query_filter.match_type == 'Approximate':
if query_pos in k_pos or bool(k_features.intersection(query_features)):
response[k] = v
elif query_filter.match_type == 'Exact':
if query_pos == k_pos and query_features == k_features:
response[k] = v
elif is_arabic_str or is_gloss:
if 'Gulf' in query_filter.resource:
example_key = 'baseword' if query_filter.segment_type.lower() == 'baseword' else 'clitic'
example_key = 'gloss' if is_gloss else example_key
elif 'MSA' in query_filter.resource:
example_key = 'segment'
for k, v in tag_examples.items():
v_: List[ExampleGulf] = []
for example in v:
if query_filter.match_type == 'Approximate':
if query in example[example_key]:
v_.append(example)
elif query_filter.match_type == 'Exact':
if query == example[example_key]:
v_.append(example)
if v_:
response.setdefault(k, []).append(v_)
elif query_filter.resource == 'CODA Examples':
response: List[ExampleCODA] = []
for example in coda_examples:
if query_filter.match_type == 'Approximate':
if query in example['raw'] or query in example['coda']:
response.append(example)
elif query_filter.match_type == 'Exact':
if query == example['raw'] or query == example['coda']:
response.append(example)
return response
Annotator = str
Feature = str # ['raw', 'coda', 'segments']
AnnotationField = str # ['text', 'verb_form', 'pos', 'lemma']
Segment = Dict[AnnotationField, str]
Annotation = Dict[Feature, Union[str, List[str], List[Segment]]]
FilteredAnnotation = namedtuple('FilteredAnnotation', 'annotator id annotation')
AnnotationsQueryFilter = namedtuple('AnnotationsQueryFilter', 'feature field match annotators')
# Probably not the final list
ANNOTATORS = ['Christian', 'Jana', 'Wiaam', 'Sarah', 'Carine']
def search_bar_previous_annotations(query: str,
annotations_json: Dict[Annotator, List[Annotation]],
query_filter: Tuple[str] = ('Raw', 'Text', 'Approximate', 'Christian')
) -> List[Annotation]:
"""Function which allows to search for previously done annotations the contents of which match the values of the filter.
For the query filter:
- In index 0, possible choices are 'Raw', 'CODA', 'Segments'.
- In index 1, possible choices are 'Text', 'Verb Form', 'POS', 'Lemma'.
- In index 2, possible choices are 'Approximate', 'Exact' (whether the match should be exact or not).
- In index 3, possible choices are the names of the annotators, to be defined in a separate list.
Args:
query (str): what the user will type in the search bar.
annotations_json (Dict[Annotator, List[Annotation]]): JSON file containing the already finished annotations
query_filter (Tuple[str], optional): Should be dropdown menus where the user specifies what he is searching for. Defaults to ('Raw', 'Text', 'Approximate', 'Christian').
Returns:
List[Annotation]: List of annotations to show that we can scroll through.
"""
query_filter = AnnotationsQueryFilter(*query_filter)
# Filtering by annotator
annotators = ANNOTATORS
annotators = [query_filter.annotators] if query_filter.annotators in ANNOTATORS else ANNOTATORS
if query_filter.annotators in [f'All But {annotator}' for annotator in ANNOTATORS]:
annotators.remove(re.sub('All But ', '', query_filter.annotators))
# Filtering by feature
annotations_filtered: List[FilteredAnnotation] = []
for annotator, annotations in annotations_json.items():
if annotator in annotators:
for i, annotation in enumerate(annotations):
if query_filter.feature == 'Segments':
for token in annotation[query_filter.feature.lower()]:
for segment in token:
annotations_filtered.append(FilteredAnnotation(
annotator, i, segment[query_filter.field.lower()]))
else:
annotations_filtered.append(FilteredAnnotation(
annotator, i, annotation['_'.join(query_filter.feature.lower().split())]))
already_added = []
response: List[Annotation] = []
if query_filter.field == 'POS':
query_pos, query_features = None, None
query_split = query.split(':')
if len(query_split) == 2:
query_pos, query_features = query_split[0], set(query_split[1])
for annotation in annotations_filtered:
k_pos, k_features = None, None
if query_filter.field == 'POS' and query_pos:
k = annotation.annotation.split(':')
k_pos, k_features = k[0], set(k[1]) if len(k) == 2 else set()
if not query_pos:
if query_filter.match == 'Approximate':
if query in annotation.annotation and \
(annotation.annotator, annotation.id) not in already_added:
response.append(
annotations_json[annotation.annotator][annotation.id])
elif query_filter.match == 'Exact':
if query == annotation.annotation and \
(annotation.annotator, annotation.id) not in already_added:
response.append(
annotations_json[annotation.annotator][annotation.id])
else:
if (annotation.annotator, annotation.id) not in already_added:
if query_filter.match == 'Approximate':
if query_pos in k_pos or bool(k_features.intersection(query_features)):
response.append(
annotations_json[annotation.annotator][annotation.id])
elif query_filter.match == 'Exact':
if query_pos == k_pos and query_features == k_features:
response.append(
annotations_json[annotation.annotator][annotation.id])
return response
COMMIT_MESSAGE = 'No message'
def clone_repo(repo_dir='/Users/chriscay/thesis/annotation_wiaam',
username='christios',
auth_key='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',
repo_name='annotation',
annotator_name='Wiaam') -> None:
"""This method is called once, when the annotator sets up their local application.
What it does:
- Clones a remote repository that I have already set up
- Sets up a local branch in the annotator's name and its corresponding up-stream branch
"""
repo_url = f"https://{username}:{auth_key}@github.com/{username}/{repo_name}.git"
repo = Repo.clone_from(repo_url, repo_dir)
origin = repo.remote('origin')
current = repo.create_head(annotator_name)
current.checkout()
origin.push(annotator_name)
# Create up-stream branch
repo.head.reference = repo.create_head(annotator_name)
rem_ref = RemoteReference(repo, f"refs/remotes/origin/{annotator_name}")
repo.head.reference.set_tracking_branch(rem_ref)
def sync_annotations(repo_dir='/Users/chriscay/thesis/annotation',
annotator_name='Christian') -> None:
"""This method is called each time the annotator presses the `Sync` button.
What it does:
- Checks out the branch in the annotator's name
- Commits the contents of the working directory
- Pushes the commit to the remote branch in the annotator's name
- Checks out local main
- Locally merges all the remote branches (one for each annotator) into local main
- Checks out the branch in the annotator's name again
"""
repo = Repo(repo_dir)
repo.git.checkout(annotator_name)
annotator_file_path = f'{annotator_name}.json'
open(os.path.join(repo_dir, annotator_file_path), 'w').close()
repo.index.add([annotator_file_path])
repo.index.commit(COMMIT_MESSAGE)
repo.git.push('origin', annotator_name)
repo.git.fetch()
remote_branches = repo.git.branch('-r').splitlines()
remote_branches = [branch.strip()
for branch in remote_branches if re.match(r'\w+/\w+$', branch.strip(), re.M) and 'main' not in branch]
repo.git.checkout('main')
for branch in remote_branches:
repo.git.merge(branch.strip())
repo.git.checkout(annotator_name)
def get_merged_json(repo_dir='/Users/chriscay/thesis/annotation',
annotator_name='Christian') -> Dict[Annotator, List[Annotation]]:
"""This method should be called to get the JSON file with the annotator's respective
annotations. This is the file which should be edited by the platform in the working
directory.
"""
repo = Repo(repo_dir)
repo.git.checkout('main')
annotator_file_paths = [file_path for file_path in os.listdir(repo_dir) if '.json' in file_path]
annotations_json: Dict[Annotator, List[Annotation]] = {}
for annotator_file_path in annotator_file_paths:
with open(os.path.join(repo_dir, annotator_file_path)) as f:
try:
annotations_json[annotator_file_path.strip(
'.json')] = json.load(f)
except json.JSONDecodeError:
annotations_json[annotator_file_path.strip(
'.json')] = []
repo.git.checkout(annotator_name)
return annotations_json
# with open('/Users/chriscay/annotation-software/examples/gulf_tag_examples.json') as f_gulf, \
# open('/Users/chriscay/annotation-software/examples/coda_examples.json') as f_coda, \
# open('/Users/chriscay/annotation-software/examples/msa_tag_examples.json') as f_msa:
# gulf_tag_examples = json.load(f_gulf)
# coda_examples = json.load(f_coda)
# msa_tag_examples = json.load(f_msa)
# with open('/Users/chriscay/Library/Containers/com.apple.mail/Data/Library/Mail Downloads/6A3F79B7-E791-498F-87DD-A0238023A21E/data.json') as f:
# annotations_json = json.load(f)
# search_bar_previous_annotations('p1:1', annotations_json, ('Segments', 'POS', 'Exact', 'Nana'))
# search_bar_examples('NOUN', gulf_tag_examples, msa_tag_examples, coda_examples, ('Enclitic', 'Approximate', 'MSA Tags'))
# clone_repo(repo_dir='/Users/chriscay/thesis/annotation_carine',
# annotator_name='Carine')
# sync_annotations(repo_dir='/Users/chriscay/thesis/annotation_wiaam',
# annotator_name='Wiaam')
# get_merged_json(repo_dir='/Users/chriscay/thesis/annotation_wiaam',
# annotator_name='Wiaam')
| UTF-8 | Python | false | false | 15,570 | py | 14 | methods.py | 2 | 0.601236 | 0.597632 | 0 | 301 | 49.621262 | 196 |
MahnazAkbariKasten/Algorithms_And_DataStructur_Challenges | 4,818,953,351,357 | b2d9e6ab6f049e6b7d30e6ad527d9d70bc067454 | b28ea5f1b69ca5c15fafd78e2aea96cd69dba7cf | /leetcode/circularArrayLoop/circular_array_loop.py | 1963a0ec8c9810a4e3ab7104b6d63ffc8f9f668a | [] | no_license | https://github.com/MahnazAkbariKasten/Algorithms_And_DataStructur_Challenges | 43a7dbee21d84170c2375760e30bf0b278102aee | 828ec611088751475308407dfed4de8ceefd221c | refs/heads/master | 2021-08-28T09:28:11.052107 | 2017-12-11T20:38:57 | 2017-12-11T20:38:57 | 113,905,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'pretymoon'
import unittest
class MethodTest(unittest.TestCase):
def test_mixed_directions(self):
self.assertFalse(circularArrayLoop([-1, 2, 2]), False)
self.assertFalse(circularArrayLoop([3, 3, -1, 3, 3]), False)
def test_forward_loop(self):
self.assertTrue(circularArrayLoop([2, -1, 1, 2, 2]), True)
def test_backward_loop(self):
self.assertTrue(circularArrayLoop([-1, 1, -2, -1]), True)
def test_short_path(self):
# self.assertFalse(circularArrayLoop([-1, 2]), False)
self.assertFalse(circularArrayLoop([3, 1, 2]), True)
def circularArrayLoop(nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = len(nums)
for i in range(s):
loop_direction = int(nums[i] / abs(nums[i]))
going = True
cnt = 0 # number of jumps
# num_sum = 0 # sum of jump-lengths
idx = i # next index after jump
# print(going, cnt, s, i, idx, nums[idx])
while going and cnt <= s and nums[idx] != 0:
cnt += 1
# num_sum += nums[idx]
idx = (idx + nums[idx]) % s
if nums[idx] == 0 or int(nums[idx] / abs(nums[idx])) != loop_direction:
going = False
if going and idx == i:
if cnt > 1:
# print(cnt)
return True
else:
return False
return False
if __name__ == "__main__":
unittest.main()
# print("[2, -1, 1, 2, 2] has a loop? ", circularArrayLoop([2, -1, 1, 2, 2]))
# print("[-1, 2] has a loop? ", circularArrayLoop([-1, 2]))
# print("[3, 3, -1, 3, 3] has a loop? ", circularArrayLoop([3, 3, -1, 3, 3]))
# print("[-1, 3, 3, 4, 2] has a loop? ", circularArrayLoop([-1, 3, 3, 4, 2]))
# def check_for_loop(aray):
# l = len(aray)
#
# for i in range(l):
# path = []
# loop = False
# # no_move = True
# next_idx = (i + aray[i]) % l
# while next_idx < i:
# # if aray[i] * aray[next_idx] < 0: # moving back and forth
# # no_move = False
# # break
#
#
# path.append(next_idx)
# next_idx = (next_idx + aray[next_idx]) % l | UTF-8 | Python | false | false | 2,331 | py | 136 | circular_array_loop.py | 126 | 0.481338 | 0.454311 | 0 | 68 | 33.294118 | 87 |
sunilka/pylabs | 13,752,485,329,423 | 01bc214654d9f9a7082990135c6a2967acbb4c87 | 8b2f6c139b39f8f30793015a35ecf9dab5685b8b | /interviews/bogotobogo/Q1/Init_dict_with_list-1.py | 5651e5900f153830b4891bda7eabbb89af819554 | [] | no_license | https://github.com/sunilka/pylabs | daaabfe101f99c0ec58b434e43ea8029c5e83436 | 422915e7876df5880b7177e93f1faa95adae441e | refs/heads/master | 2021-01-19T17:57:55.237621 | 2020-12-08T02:15:35 | 2020-12-08T02:15:35 | 88,345,998 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | cities = {'San Francisco': 'US', 'London':'UK',
'Manchester':'UK', 'Paris':'France',
'Los Angeles':'US', 'Seoul':'Korea'}
from collections import defaultdict
d=defaultdict(list)
for k,v in cities.items():
d[v].append(k)
print(d)
| UTF-8 | Python | false | false | 250 | py | 87 | Init_dict_with_list-1.py | 83 | 0.616 | 0.616 | 0 | 9 | 26.777778 | 47 |
NeuObito/ggs-api | 9,019,431,326,661 | 02de29927b772347537f868382b87fdd378e507c | 30f9a649ce1d9e341b0e683f44756982342ee5df | /authentication/views.py | 29c377dd1964768ad981137826615796370cfae6 | [] | no_license | https://github.com/NeuObito/ggs-api | 11ce4618002ba877e7ad08877e1778479038a158 | 55d46d8b8aa5a86ada14045f31f4e51e5050f3f0 | refs/heads/master | 2021-01-21T19:58:26.014577 | 2017-07-31T08:18:38 | 2017-07-31T08:18:38 | 92,179,704 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import login, logout
from django.views.generic import View
from django.urls.resolvers import get_resolver
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from .models import User
from .forms import LoginForm, RegisterForm
__author__ = "nabito"
class RegisterView(View):
"""
实现注册功能。
"""
def post(self, request):
form = RegisterForm(request.POST)
if form.is_valid():
form.register()
return JsonResponse({"msg": "Hello World."})
def options(self, request):
response = HttpResponse()
response['Access-Control-Allow-Headers'] = "accept, content-type"
response['Access-Control-Allow-Method'] = "POST"
response['Access-Control-Allow-Origin'] = "*"
return HttpResponse(response, content_type="application/json")
class LoginView(View):
"""
实现登录功能。
"""
def post(self, request):
print(request)
print(request.session)
form = LoginForm(request.POST)
if form.is_valid():
user = form.login()
print(user)
if user:
login(request, user)
return JsonResponse({"msg": "登录成功", "succode": 20001})
return JsonResponse({"msg": "登录失败", "errcode": 10001})
def options(self, request):
response = HttpResponse()
response['Access-Control-Allow-Headers'] = "accept, content-type"
response['Access-Control-Allow-Method'] = "POST"
response['Access-Control-Allow-Origin'] = "*"
return HttpResponse(response, content_type="application/json")
| UTF-8 | Python | false | false | 1,781 | py | 8 | views.py | 7 | 0.624064 | 0.617732 | 0 | 59 | 28.440678 | 73 |
determined-ai/determined | 12,103,217,870,564 | df86ed4d31cecf4e62a10d733e9ac3baf6002822 | ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f | /examples/computer_vision/fasterrcnn_coco_pytorch/model_def.py | a1ca8489e69a51280bf8a2d152efa367d3383a20 | [
"Apache-2.0"
] | permissive | https://github.com/determined-ai/determined | 9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e | 8239b1993f4f44390f4e88901ffaf3b12429b83c | refs/heads/main | 2023-08-21T12:13:36.651298 | 2023-08-21T08:34:16 | 2023-08-21T08:34:16 | 253,846,879 | 2,531 | 330 | Apache-2.0 | false | 2023-09-14T21:54:17 | 2020-04-07T16:12:29 | 2023-09-13T21:34:41 | 2023-09-14T21:54:16 | 219,165 | 2,448 | 319 | 142 | Go | false | false | """
This is an object detection finetuning example. We finetune a Faster R-CNN
model pretrained on COCO to detect pedestrians in the relatively small PennFudan
dataset.
Useful References:
https://docs.determined.ai/latest/reference/api/pytorch.html
https://www.cis.upenn.edu/~jshi/ped_html/
Based on: https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
"""
import copy
from typing import Any, Dict, Sequence, Union
import torch
import torchvision
from data import PennFudanDataset, collate_fn, download_data, get_transform
from torch import nn
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from determined.pytorch import DataLoader, LRScheduler, PyTorchTrial, PyTorchTrialContext
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
class ObjectDetectionTrial(PyTorchTrial):
def __init__(self, context: PyTorchTrialContext) -> None:
self.context = context
# Create a unique download directory for each rank so they don't overwrite each
# other when doing distributed training.
self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
download_data(
download_directory=self.download_directory,
data_config=self.context.get_data_config(),
)
dataset = PennFudanDataset(self.download_directory + "/PennFudanPed", get_transform())
# Split 80/20 into training and validation datasets.
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
dataset, [train_size, test_size]
)
model = fasterrcnn_resnet50_fpn(pretrained=True)
# Replace the classifier with a new two-class classifier. There are
# only two "classes": pedestrian and background.
num_classes = 2
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# Wrap the model.
self.model = self.context.wrap_model(model)
# Wrap the optimizer.
self.optimizer = self.context.wrap_optimizer(
torch.optim.SGD(
self.model.parameters(),
lr=self.context.get_hparam("learning_rate"),
momentum=self.context.get_hparam("momentum"),
weight_decay=self.context.get_hparam("weight_decay"),
)
)
# Wrap the LR scheduler.
self.lr_scheduler = self.context.wrap_lr_scheduler(
torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=3, gamma=0.1),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def build_training_data_loader(self) -> DataLoader:
return DataLoader(
self.dataset_train,
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=collate_fn,
)
def build_validation_data_loader(self) -> DataLoader:
return DataLoader(
self.dataset_val,
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=collate_fn,
)
def train_batch(
self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
images, targets = batch
loss_dict = self.model(list(images), list(targets))
self.context.backward(loss_dict["loss_box_reg"])
self.context.step_optimizer(self.optimizer)
return {"loss": loss_dict["loss_box_reg"]}
def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]:
images, targets = batch
output = self.model(list(images), copy.deepcopy(list(targets)))
sum_iou = 0
num_boxes = 0
# Our eval metric is the average best IoU (across all predicted
# pedestrian bounding boxes) per target pedestrian. Given predicted
# and target bounding boxes, IoU is the area of the intersection over
# the area of the union.
for idx, target in enumerate(targets):
# Filter out overlapping bounding box predictions based on
# non-maximum suppression (NMS)
predicted_boxes = output[idx]["boxes"]
prediction_scores = output[idx]["scores"]
keep_indices = torchvision.ops.nms(predicted_boxes, prediction_scores, 0.1)
predicted_boxes = torch.index_select(predicted_boxes, 0, keep_indices)
# Tally IoU with respect to the ground truth target boxes
target_boxes = target["boxes"]
boxes_iou = torchvision.ops.box_iou(target_boxes, predicted_boxes)
sum_iou += sum(max(iou_result) for iou_result in boxes_iou)
num_boxes += len(target_boxes)
return {"val_avg_iou": sum_iou / num_boxes}
| UTF-8 | Python | false | false | 4,942 | py | 2,965 | model_def.py | 1,767 | 0.654189 | 0.650344 | 0 | 123 | 39.178862 | 94 |
karthikpappu/pyc_source | 9,242,769,652,806 | bce21d7019c31db120f3fda79d3f305e79b05323 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/kyoka-0.2.1-py2.7/deep_q_learning_test.py | 61ab9a9d7926557f0e758fcdf2822168f2e3c03f | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.macosx-10.12-intel/egg/tests/kyoka/algorithm/td_learning/deep_q_learning_test.py
# Compiled at: 2016-10-28 07:31:35
from tests.base_unittest import BaseUnitTest
from kyoka.algorithm.td_learning.deep_q_learning import DeepQLearning
from kyoka.value_function.base_deep_q_learning_action_value_function import BaseDeepQLearningActionValueFunction
from kyoka.policy.base_policy import BasePolicy
from kyoka.policy.greedy_policy import GreedyPolicy
from mock import Mock
from mock import patch
import os
class DeepQLearningTest(BaseUnitTest):
def setUp(self):
self.algo = DeepQLearning(gamma=0.1, N=3, C=3, minibatch_size=2, replay_start_size=2)
self.algo.replay_memory.store_transition(2.5, 3, 25, (5, False))
self.algo.replay_memory.store_transition(5.0, 7, 144, (5.5, True))
self.domain = self.__setup_stub_domain()
self.value_func = self.TestValueFunctionImpl()
self.policy = self.NegativePolicyImple()
self.algo.setUp(self.domain, self.policy, self.value_func)
def tearDown(self):
dir_path = self.__generate_tmp_dir_path()
file_path = os.path.join(dir_path, 'dqn_algorithm_state.pickle')
if os.path.exists(dir_path):
if os.path.exists(file_path):
os.remove(file_path)
os.rmdir(dir_path)
def test_update_value_function_learning_minibatch_delivery(self):
with patch('random.sample', side_effect=lambda lst, n: lst[len(lst) - n:]):
self.algo.update_value_function(self.domain, self.policy, self.value_func)
learning_minibatch_expected = [
[
(5.0, 7, 144), (0.5, 1.5, 2.85)],
[
(0.5, 1.5, 2.85), (2, 4, 30.25)]]
actual = [ arg[0][0] for arg in self.value_func.Q.train_on_minibatch.call_args_list ]
self.eq(learning_minibatch_expected, actual)
self.value_func.Q_hat.train_on_minibatch.assert_not_called()
def test_update_value_function_experience_replay_memory_state(self):
with patch('random.sample', side_effect=lambda lst, n: lst[len(lst) - n:]):
self.algo.update_value_function(self.domain, self.policy, self.value_func)
replay_memory_expected = [(5.0, 7, 144, (5.5, True)),
(
0.5, 1.5, 2.25, (2, False)),
(
2, 4, 30.25, (6.0, True))]
self.eq(replay_memory_expected, self.algo.replay_memory.queue)
def test_update_value_function_reset_target_network(self):
with patch('random.sample', side_effect=lambda lst, n: lst[-n:]):
self.algo.update_value_function(self.domain, self.policy, self.value_func)
self.eq(2, self.algo.reset_step_counter)
self.eq('Q_hat_network_0', self.value_func.Q_hat.name)
self.algo.update_value_function(self.domain, self.policy, self.value_func)
self.eq(0, self.algo.reset_step_counter)
self.eq('Q_hat_network_1', self.value_func.Q_hat.name)
def test_initialize_replay_memory(self):
algo = DeepQLearning(gamma=0.1, N=3, C=3, minibatch_size=2, replay_start_size=2)
value_func = self.TestValueFunctionImpl(strict_mode=False)
self.domain.is_terminal_state.side_effect = lambda state: state == 4 or state >= 100
self.eq(0, len(algo.replay_memory.queue))
algo.setUp(self.domain, self.policy, value_func)
self.eq(2, len(algo.replay_memory.queue))
def test_save_and_load_algorithm_state(self):
dir_path = self.__generate_tmp_dir_path()
file_path = os.path.join(dir_path, 'dqn_algorithm_state.pickle')
os.mkdir(dir_path)
self.algo.save_algorithm_state(dir_path)
self.true(os.path.exists(file_path))
new_algo = DeepQLearning(replay_start_size=100)
domain = self.__setup_stub_domain()
domain.is_terminal_state.side_effect = lambda state: state == 4 or state >= 100
value_func = self.TestValueFunctionImpl(strict_mode=False)
policy = self.NegativePolicyImple()
new_algo.setUp(domain, policy, value_func)
new_algo.load_algorithm_state(dir_path)
self.eq(self.algo.gamma, new_algo.gamma)
self.eq(self.algo.C, new_algo.C)
self.eq(self.algo.minibatch_size, new_algo.minibatch_size)
self.eq(self.algo.replay_start_size, new_algo.replay_start_size)
self.eq(self.algo.reset_step_counter, new_algo.reset_step_counter)
self.eq(domain, new_algo.domain)
self.eq(policy, new_algo.policy)
self.eq(value_func, new_algo.value_function)
self.eq(self.algo.replay_memory.max_size, new_algo.replay_memory.max_size)
self.eq(self.algo.replay_memory.queue, new_algo.replay_memory.queue)
self.true(isinstance(new_algo.greedy_policy, GreedyPolicy))
with patch('random.sample', side_effect=lambda lst, n: lst[len(lst) - n:]):
new_algo.update_value_function(self.domain, self.policy, self.value_func)
replay_memory_expected = [(5.0, 7, 144, (5.5, True)),
(
0.5, 1.5, 2.25, (2, False)),
(
2, 4, 30.25, (6.0, True))]
self.eq(replay_memory_expected, new_algo.replay_memory.queue)
def __setup_stub_domain(self):
mock_domain = Mock()
mock_domain.generate_initial_state.return_value = 0
mock_domain.is_terminal_state.side_effect = lambda state: state == 5.5
mock_domain.transit_state.side_effect = lambda state, action: state + action
mock_domain.generate_possible_actions.side_effect = lambda state: [] if state == 5.5 else [state + 1, state + 2]
mock_domain.calculate_reward.side_effect = lambda state: state ** 2
return mock_domain
def __generate_tmp_dir_path(self):
return os.path.join(os.path.dirname(__file__), 'tmp')
class TestValueFunctionImpl(BaseDeepQLearningActionValueFunction):
def __init__(self, strict_mode=True):
self.deepcopy_counter = 0
self.strict_mode = strict_mode
def initialize_network(self):
mock_q_network = Mock(name='Q_network')
mock_q_network.predict.side_effect = self.q_predict_scenario
return mock_q_network
def deepcopy_network(self, q_network):
mock_q_hat_network = Mock(name='Q_hat_network')
mock_q_hat_network.name = 'Q_hat_network_%d' % self.deepcopy_counter
mock_q_hat_network.predict.side_effect = self.q_hat_predict_scenario
self.deepcopy_counter += 1
return mock_q_hat_network
def preprocess_state_sequence(self, raw_state_sequence):
return raw_state_sequence[(-1)] + 0.5
def predict_action_value(self, network, processed_state, action):
return network.predict(processed_state, action)
def train_on_minibatch(self, network, learning_minibatch):
network.train_on_minibatch(learning_minibatch)
def q_predict_scenario(self, state, action):
if state == 0.5 and action == 1.5:
return 1
if state == 0.5 and action == 2.5:
return 2
if state == 2 and action == 3:
return 4
if state == 2 and action == 4:
return 3
if self.strict_mode:
raise AssertionError('q_network received unexpected state-action pair (state=%s, action=%s)' % (state, action))
else:
return 1
def q_hat_predict_scenario(self, state, action):
if state == 2 and action == 3:
return 5
if state == 2 and action == 4:
return 6
if self.strict_mode:
raise AssertionError('q_hat_network received unexpected state-action pair (state=%s, action=%s)' % (state, action))
else:
return 1
class NegativePolicyImple(BasePolicy):
def choose_action(self, domain, value_function, state):
actions = domain.generate_possible_actions(state)
calc_Q_value = lambda state, action: value_function.calculate_value(state, action)
Q_value_for_actions = [ calc_Q_value(state, action) for action in actions ]
min_Q_value = min(Q_value_for_actions)
Q_act_pair = zip(Q_value_for_actions, actions)
worst_actions = [ act for Q_value, act in Q_act_pair if min_Q_value == Q_value ]
return worst_actions[0] | UTF-8 | Python | false | false | 8,588 | py | 114,545 | deep_q_learning_test.py | 111,506 | 0.628551 | 0.604797 | 0 | 179 | 46.98324 | 131 |
Harut0726/pointnet-luis | 2,276,332,695,618 | 1a07a0157b5367594043cb65709ced837ff456a3 | 566b47e16e4fee52911e154325e2cdd537cd7582 | /utils/helpers.py | 93f6e5a9f8a0f67c76d6f4124a14759348738951 | [] | no_license | https://github.com/Harut0726/pointnet-luis | 904173d5eb2bca5e217a6dc7502c763de2946c87 | e078a646c0c111e40a3ef49d444859665bfb9881 | refs/heads/master | 2020-09-22T13:54:38.137918 | 2019-12-01T20:52:04 | 2019-12-01T20:52:04 | 225,228,404 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
def norm_pts(x):
x -= np.mean(x, axis=0)
dists = np.linalg.norm(x, axis=1)
return x / np.max(dists)
| UTF-8 | Python | false | false | 132 | py | 14 | helpers.py | 9 | 0.598485 | 0.583333 | 0 | 6 | 21 | 37 |
GaneshGoel/Basic-Python-programs | 3,212,635,579,295 | 66392762b78127e459631a4e0b673e5d637897b9 | 6ceb7bd42428410174ff2b110754d1bc40a343c3 | /Merge List.py | 1a81bc771a873a61c3289a04b933e73754464612 | [] | no_license | https://github.com/GaneshGoel/Basic-Python-programs | 382e4560406ade35cf011d48b9e83e7db407a0f9 | f59b7449f2230b9ebcdf0f0ae829ea1618a4d3ca | refs/heads/master | 2020-05-30T08:28:49.811571 | 2019-05-31T16:26:28 | 2019-05-31T16:26:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Merge two lists and sort it
l1=[]
l2=[]
l3=[]
l4=[]
x=int(input("Enter size of list 1:"))
y=int(input("Enter size of list 2:"))
print("\nEnter items of list 1")
for i in range(x):
a=int(input("Enter list item:"))
l1.append(a)
print("\nEnter items of list 2")
for i in range(y):
b=int(input("Enter list item:"))
l2.append(b)
l3=l1+l2
j=0
while j in range(len(l3)):
m=l3[0]
s=len(l3)
for i in range(s):
if l3[i]<m:
m=l3[i]
l4.append(m)
l3.remove(m)
print("Sorted list in ascending order:",l4)
| UTF-8 | Python | false | false | 577 | py | 17 | Merge List.py | 17 | 0.556326 | 0.516464 | 0 | 27 | 19.37037 | 43 |
KoMinjae/codingtest | 14,087,492,735,232 | 9017cd81a8d1581caa5d4d330fb120c6ce90ab30 | 6cb5afbe80fe07f837848f56c1c0cc194534155c | /전자카트.py | 2273864edb2afc43632a862f06106059e7deca6a | [] | no_license | https://github.com/KoMinjae/codingtest | 9073dbd094e0675c0f3cac35085703d8d1c546f6 | 160dfc5f73cad9d1d00a9a497550ab34cdf31a32 | refs/heads/master | 2022-12-19T14:11:00.972578 | 2020-09-25T03:08:16 | 2020-09-25T03:08:16 | 285,505,309 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def solution(N,MAPS):
answer=list()
stack=list()
stack.append((0,["0"],0))
while stack:
v, path, point = stack.pop()
if len(path)==N:
answer.append((point+MAPS[v][0]))
else:
for i in range(len(MAPS[v])):
if MAPS[v][i]!=0 and str(i) not in path:
stack.append((i,path+list(str(i)),point+MAPS[v][i]))
return min(answer)
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
# ///////////////////////////////////////////////////////////////////////////////////
N=int(input())
MAPS=[list(map(int,input().split())) for _ in range(N)]
print("#"+str(test_case), solution(N,MAPS))
###
""" 3
3
0 18 34
48 0 55
18 7 0
4
0 83 65 97
82 0 78 6
19 19 0 82
6 34 94 0
5
0 9 26 85 42
14 0 84 31 27
58 88 0 16 46
83 61 94 0 17
40 71 24 38 0
"""
#1 89
#2 96
#3 139
### | UTF-8 | Python | false | false | 969 | py | 176 | 전자카트.py | 175 | 0.470141 | 0.356135 | 0 | 43 | 20.44186 | 89 |
Maciej-Glowacki/Python_exercises | 14,465,449,856,831 | 8aeda0ace19d30fc828ab29268168316c8040871 | 595482c645343a92db7b458711d9e8c88cf8b014 | /exe_87_classmethod.py | 907eb7db7a6efd33dd18ec8031805dea049cf24d | [] | no_license | https://github.com/Maciej-Glowacki/Python_exercises | a78af3dd59327f2920451a36fa9c6b08ec0d622b | a1eb553a8ab66d76d8a3f34691e592ffa0522b32 | refs/heads/main | 2023-07-11T19:28:34.805514 | 2021-08-23T15:35:37 | 2021-08-23T15:35:37 | 372,899,923 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def create_person(cls, name, year_of_birth):
actual_year = datetime.datetime.now().year
age = actual_year - year_of_birth
return cls(name, age)
def __str__(self):
return f'{self.name} is {self.age} years old.'
John = Person.create_person('Jan', 1987)
Cindy = Person.create_person('Cindy', 1977)
print(John)
print(Cindy)
| UTF-8 | Python | false | false | 497 | py | 94 | exe_87_classmethod.py | 93 | 0.61167 | 0.595573 | 0 | 22 | 21.545455 | 54 |
jingyuluo/HCALPFG | 15,247,133,910,993 | 0274a5111089d102881af4d0a7ae676e1963312a | f9ff4434e976adb78aadcd544e2cd953476a55f0 | /HcalTupleMaker/test/analysis_utca_noise_test_cfg.py | 7e9c382833e2c0467e2ad2d43a93eb2d11ba4bcb | [] | no_license | https://github.com/jingyuluo/HCALPFG | 4ec1e2156f370d35e31100afdf2f5e1520db5d5d | deac0a0825a36f6ee9c39cdaacb1d2004b97172b | refs/heads/master | 2021-07-08T06:42:22.407287 | 2020-12-01T02:16:23 | 2020-12-01T02:16:23 | 207,335,036 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #------------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------------
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
#------------------------------------------------------------------------------------
# Options
#------------------------------------------------------------------------------------
options = VarParsing.VarParsing()
options.register('skipEvents',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Number of events to skip")
options.register('processEvents',
-1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Number of events to process")
options.register('inputFiles',
"root://cmsxrootd-site.fnal.gov//store/data/Commissioning2016/MinimumBias/RAW/v1/000/264/232/00000/1EDB8470-71D1-E511-A299-02163E014504.root", #default value, corresponds to LFN /store/data/Commissioning2016/MinimumBias/RAW/v1/000/264/232/00000/1EDB8470-71D1-E511-A299-02163E014504.root from run 267594
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"Input files")
options.register('outputFile',
"file:/uscms/home/dryu/HCAL/data/HCALPFG/utca_noise_test/hcalTupleTree_267594.root", #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Output file")
options.register('doReco',
False, # default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.bool,
"Run HCAL reconstruction algo?")
options.register('globalTag',
'80X_dataRun2_Prompt_v2',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Global Tag")
options.parseArguments()
print "Skip events =", options.skipEvents
print "Process events =", options.processEvents
print "inputFiles =", options.inputFiles
print "outputFile =", options.outputFile
print "doReco =", options.doReco
#------------------------------------------------------------------------------------
# Declare the process
#------------------------------------------------------------------------------------
process = cms.Process("ANA")
#------------------------------------------------------------------------------------
# What files should we run over?
#------------------------------------------------------------------------------------
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
options.inputFiles
),
skipEvents = cms.untracked.uint32(
options.skipEvents
)
)
#------------------------------------------------------------------------------------
# How many events should we run over?
#------------------------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(
options.processEvents
)
)
#------------------------------------------------------------------------------------
# Set up the output
#------------------------------------------------------------------------------------
process.TFileService = cms.Service("TFileService",
fileName = cms.string( options.outputFile )
)
#------------------------------------------------------------------------------------
# Various python configuration files
#------------------------------------------------------------------------------------
# Need to set up MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1000)
# Need to set up the global tag
# Which to use? https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideFrontierConditions
#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
#from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'GR_P_V49', '')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'GR_P_V46::All')
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag)
# Need the topology to unpack/analyze digis
process.load("Geometry.HcalCommonData.hcalDBConstants_cff")
process.load("Geometry.HcalEventSetup.HcalTopology_cfi")
# Make trigger tower geometry
process.HcalTrigTowerGeometryESProducer = cms.ESProducer("HcalTrigTowerGeometryESProducer")
# Need to unpack digis from RAW
process.load("EventFilter.HcalRawToDigi.HcalRawToDigi_cfi")
# Use an emap that has the uTCA (now from the database)
#from CalibCalorimetry.HcalPlugins.Hcal_FrontierConditions_cff import es_pool
#process.my_es_pool = es_pool
#process.my_es_pool.connect = cms.string("frontier://FrontierPrep/CMS_COND_HCAL")
#process.my_es_pool.toGet = cms.VPSet(
# cms.PSet(record = cms.string("HcalElectronicsMapRcd"),
# tag = cms.string("HcalElectronicsMap_v7.06_hlt_test")
# )
#)
#process.my_es_prefer = cms.ESPrefer('PoolDBESSource','my_es_pool')
process.es_ascii = cms.ESSource('HcalTextCalibrations',
input = cms.VPSet(
cms.PSet(
object = cms.string('ElectronicsMap'),
file = cms.FileInPath('HCALPFG/HcalTupleMaker/data/2016-feb-24/version_G_emap_all.txt')
),
)
)
process.es_prefer = cms.ESPrefer('HcalTextCalibrations','es_ascii')
# Set up utcaDigis unpacker
process.hcalDigis.FilterDataQuality = cms.bool(False)
process.hcalDigis.FEDs = cms.untracked.vint32()
#for FED in [x+700 for x in range(32)] + [1118, 1120, 1122]:
for FED in xrange(1100, 1124, 2):
process.hcalDigis.FEDs.append ( FED )
# Need the geometry to get digi and rechit positions
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
# Set up our analyzer
process.load("HCALPFG.HcalTupleMaker.HcalTupleMaker_cfi")
# Modify hbhedigis tuple maker for this specific test
process.hcalTupleHBHEDigis.DoEnergyReco = cms.untracked.bool ( False )
# HF digis
process.hcalTupleHFDigis.DoEnergyReco = cms.untracked.bool ( False )
# Make a path
process.p = cms.Path(
process.hcalDigis*
process.hcalTupleEvent*
process.hcalTupleHBHEDigis*
process.hcalTupleHFDigis*
process.hcalTupleUnpackReport*
process.hcalTupleTree
)
| UTF-8 | Python | false | false | 6,799 | py | 85 | analysis_utca_noise_test_cfg.py | 78 | 0.590528 | 0.567142 | 0 | 174 | 38.074713 | 319 |
SWannell/cro55-prompt-test | 13,039,520,740,632 | c12547a307b4323cb2543c88afaa5b772ab4af9c | e8e5d40fea01d50356fee0caf7c344c3b24111fc | /ttest_MDE_model.py | 096671e236d209d189fc12420d736594a5cc704d | [] | no_license | https://github.com/SWannell/cro55-prompt-test | de0da0054d65b7b99e8f8e8c225440a2d9a02d96 | fc00a914351bd76f3cdf478934bad70533b7f9e4 | refs/heads/master | 2022-10-20T10:45:08.240099 | 2020-06-11T15:32:10 | 2020-06-11T15:32:10 | 263,640,548 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri May 15 16:07:28 2020
@author: SWannell
"""
import numpy as np
from scipy.stats import ttest_ind_from_stats
import matplotlib.pyplot as plt; plt.style.use('ggplot')
import seaborn as sns
np.random.seed(1234)
# Set parameters
params = {'ctrl': {'prompts': [5, 25, 50],
'p': [0.25, 0.375, 0.375]},
'test': {'prompts': [15, 35, 50]}}
cellvol = 500
# Get a starting distribution that has an equal average gift
size = 10000
trial_dist = [0.48, 0.26, 0.26]
ctrl_vals = np.random.choice(params['ctrl']['prompts'], size,
p=params['ctrl']['p'])
test_vals = np.random.choice(params['test']['prompts'], size,
p=trial_dist)
print(ctrl_vals.mean(), test_vals.mean())
# set initial values in dict
params['test']['p'] = [0.48, 0.26, 0.26]
for cell in params.keys():
params[cell]['vals'] = np.random.choice(params[cell]['prompts'],
size=cellvol,
p=params[cell]['p'])
params[cell]['mean'] = params[cell]['vals'].mean()
params[cell]['std'] = params[cell]['vals'].std()
# Plot initial dist
figs, axs = plt.subplots(2, 1, sharex=True, sharey=True)
sns.violinplot(x=params['ctrl']['vals'], color='#1d1a1c', ax=axs[0])
sns.violinplot(x=params['test']['vals'], color='#ee2a24', ax=axs[0])
alpha = 0.7
plt.setp(axs[0].collections, alpha=alpha)
# MDE calc
p_MDE = 1
t_MDE = 0
lift_MDE = 0.00
while p_MDE > 0.05:
# Apply expected uplift - flatten the diff between prompts
lift = np.array([-lift_MDE, 0.5*lift_MDE, 0.5*lift_MDE])
params['test']['p'] = np.array(params['test']['p']) + lift
for cell in params.keys():
params[cell]['vals'] = np.random.choice(params[cell]['prompts'],
size=cellvol,
p=params[cell]['p'])
params[cell]['mean'] = params[cell]['vals'].mean()
params[cell]['std'] = params[cell]['vals'].std()
# t-test
t_MDE, p_MDE = ttest_ind_from_stats(
params['ctrl']['mean'],
params['ctrl']['std'],
cellvol,
params['test']['mean'],
params['test']['std'],
cellvol
)
degf = cellvol*2 - 2
upliftMDE = lift_MDE*100
print("Ctrl: (mean={:.2f}, std={:.2f})".format(
params['ctrl']['mean'],
params['ctrl']['std']),
"\nTest: (mean={:.2f}, std={:.2f})".format(
params['test']['mean'],
params['test']['std']),
"\nt({:.0f})={:.2f}, p={:.2f}, uplift={:,.0f}%\n".format(
degf,
t_MDE,
p_MDE,
upliftMDE))
if p_MDE < 0.05:
break
lift_MDE += 0.01
# PLot resulting dist
sns.violinplot(x=params['ctrl']['vals'], color='#1d1a1c', ax=axs[1])
sns.violinplot(x=params['test']['vals'], color='#ee2a24', ax=axs[1])
plt.setp(axs[1].collections, alpha=alpha) | UTF-8 | Python | false | false | 3,031 | py | 16 | ttest_MDE_model.py | 7 | 0.52359 | 0.484659 | 0 | 89 | 33.067416 | 72 |
lukehuang/bitblog | 10,754,598,117,185 | aa568486f795594f99e26a2651641829738d50cb | d94591fb254d2d0bddaa3f879a27c623bf8bdd7f | /apps/contrib/models.py | a3b55187f4ed1e8445c6f54aaf73fc045949269b | [
"MIT"
] | permissive | https://github.com/lukehuang/bitblog | 2c55fd37276d140c75769a91b66106193116e575 | 903dd83164c9238f24348e8c63e4d2ff1b9f1d4f | refs/heads/master | 2021-06-11T10:45:35.340433 | 2017-03-07T19:15:02 | 2017-03-07T19:15:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import uuid
import gc
from uuid import UUID
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, UserManager
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username and password are required. Other fields are optional.
"""
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(_('active'), default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class TimestampMixin(models.Model):
creation_date = models.DateTimeField(
verbose_name=_("Creación"),
auto_now=True,
)
class Meta:
abstract = True
class CodeMixin(models.Model):
code = models.UUIDField(
verbose_name=_("Código"),
default=uuid.uuid4,
unique=True,
db_index=True,
max_length=255,
)
class Meta:
abstract = True
def validate_uuid4(uuid_string):
"""
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
"""
try:
val = UUID(uuid_string, version=4)
except ValueError:
return False
# If the uuid_string is a valid hex code,
# but an invalid uuid4,
# the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == uuid_string
def queryset_iterator(queryset, chunksize=1000):
"""
Iterate over a Django Queryset ordered by the primary key
This method loads a maximum of chunksize (default: 1000) rows in it's
memory at the same time while django normally would load all rows in it's
memory. Using the iterator() method only causes it to not preload all the
classes.
Note that the implementation of the iterator does not support ordered query sets.
"""
pk = 0
last_pk = queryset.order_by('-pk')[0].pk
queryset = queryset.order_by('pk')
while pk < last_pk:
for row in queryset.filter(pk__gt=pk)[:chunksize]:
pk = row.pk
yield row
gc.collect()
| UTF-8 | Python | false | false | 3,811 | py | 171 | models.py | 62 | 0.618273 | 0.611709 | 0 | 126 | 29.230159 | 108 |
tommyliverani/RulPrediction | 2,783,138,833,717 | 2e70e25477e27e90aeb6f0e63684516d707776e9 | 4f394d0ded063cef913972ee2a2c108e9995e2d7 | /evaluate.py | 95ec6202eb30968d3d7afb40bfcc031a2788ef28 | [] | no_license | https://github.com/tommyliverani/RulPrediction | d91db638ede73b015c3849ff5699c604e5b04e09 | ada762488cab30c7e8c0a6bd2524df5d3d2bee94 | refs/heads/main | 2023-01-29T00:38:53.556346 | 2020-12-10T14:22:21 | 2020-12-10T14:22:21 | 316,713,775 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from tensorflow import keras
from tensorflow.keras import layers, callbacks
from tensorflow_probability import distributions
from sklearn.metrics import r2_score
from rul_utils import split_data
fig_size=(9, 3)
def plot_pred_scatter(y_pred, y_true, figsize=fig_size, autoclose=True):
if autoclose:
plt.close('all')
plt.figure(figsize=figsize)
plt.scatter(y_pred, y_true, marker='.', alpha=0.1)
xl, xu = plt.xlim()
yl, yu = plt.ylim()
l, u = min(xl, yl), max(xu, yu)
plt.plot([l, u], [l, u], ':', c='0.3')
plt.xlim(l, u)
plt.ylim(l, u)
plt.xlabel('prediction')
plt.ylabel('target')
plt.tight_layout()
plt.show()
stop=10000
def plot_rul(pred=None, target=None,
stddev=None,
q1_3=None,
same_scale=True,
figsize=fig_size, autoclose=True):
if autoclose:
plt.close('all')
plt.figure(figsize=figsize)
if target is not None:
plt.plot(range(len(target)), target, label='target',
color='tab:orange')
if pred is not None:
if same_scale or target is None:
ax = plt.gca()
else:
ax = plt.gca().twinx()
ax.plot(range(len(pred)), pred, label='pred',
color='tab:blue')
# if stddev is not None:
# ax.fill_between(range(len(pred)),
# pred-stddev, pred+stddev,
# alpha=0.3, color='tab:blue', label='+/- std')
if q1_3 is not None:
ax.fill_between(range(len(pred)),
q1_3[0], q1_3[1],
alpha=0.3, color='tab:blue', label='1st/3rd quartile')
plt.legend()
plt.tight_layout()
plt.show()
tr=pd.read_csv('data/tr.csv')
ts=pd.read_csv('data/ts.csv')
dt_in = list(tr.columns[3:-1])
#model 1
def build_regressor(hidden):
input_shape = (len(dt_in), )
model_in = keras.Input(shape=input_shape, dtype='float32')
x = model_in
for h in hidden:
x = layers.Dense(h, activation='relu')(x)
model_out = layers.Dense(1, activation='linear')(x)
model = keras.Model(model_in, model_out)
return model
#modello 2
def build_cnn2(self):
inp = KL.Input(shape=(self.input_shape))
x = inp
x = KL.Conv1D(32,16,activation='relu')(x)
x = KL.MaxPool1D(4)(x)
x = KL.Conv1D(64,3,activation='relu')(x)
x = KL.MaxPool1D(4)(x)
x = KL.Conv1D(64,3,activation='relu')(x)
x = KL.MaxPool1D(2)(x)
x = KL.Conv1D(64,3,activation='relu')(x)
x = KL.MaxPool1D(2)(x)
x = KL.Conv1D(64,3,activation='relu')(x)
x = KL.MaxPool1D(2)(x)
x = KL.Flatten()(x)
x = KL.Dropout(0.3)(x)
x = KL.Dense(128,activation='relu')(x)
x = KL.Dropout(0.3)(x)
x = KL.Dense(self.feature_size,activation='relu',name='feature')(x)
out = KL.Dense(1)(x)
model = keras.Model(inp,out)
#model.summary()
model.compile(optimizer=keras.optimizers.Adam(lr=0.001),
loss='mse')
return model
#modello 1
#nn=build_regressor(hidden=[64, 64,64,64])
#modello2
nn=build_cnn2()
nn.load_weights("checkpoint.ckt")
tr_in=tr[dt_in]
tr_out=tr['rul']
ts_in=ts[dt_in]
ts_out=ts['rul']
input("Press enter to valuate prediction on training set")
result = nn.predict(tr_in).ravel()
print(f"Result:{r2_score(result,tr_out)}")
plot_pred_scatter(result ,tr_out,fig_size)
plot_rul(result[:stop] ,tr_out[:stop],fig_size)
input("Press enter to valuate prediction on training set")
result = nn.predict(ts_in).ravel()
print(f"Result:{r2_score(result,ts_out)}")
plot_pred_scatter(result ,ts_out,fig_size)
plot_rul(result[:stop] ,ts_out[:stop],fig_size)
| UTF-8 | Python | false | false | 3,769 | py | 9 | evaluate.py | 4 | 0.5922 | 0.56779 | 0 | 130 | 27.969231 | 75 |
esddse/leetcode | 12,137,577,610,875 | 19bb40ea68670719c0911ba14fb9629518d3dc44 | 15373eaa353e8aece47a26741b7fb27795268bf6 | /medium/525_contiguous_array.py | d82833fd5893042e2b6b0120549c22947bc575ca | [] | no_license | https://github.com/esddse/leetcode | e1a9bacf04c68a8d642a1e53c90e6c2dda2c1980 | 0ceccdb262149f7916cb30fa5f3dae93aef9e9cd | refs/heads/master | 2021-06-08T19:15:14.346584 | 2020-01-09T01:41:23 | 2020-01-09T01:41:23 | 109,675,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def findMaxLength(self, nums: List[int]) -> int:
size = len(nums)
sum_start = {0:-1}
res = 0
s = 0
for i, num in enumerate(nums):
if num == 0:
s += -1
else:
s += 1
if s in sum_start:
res = max(i-sum_start[s], res)
else:
sum_start[s] = i
return res | UTF-8 | Python | false | false | 442 | py | 423 | 525_contiguous_array.py | 411 | 0.373303 | 0.357466 | 0 | 16 | 25.75 | 52 |
xiaohai12/leetcode | 17,016,660,451,237 | 9ad2cc14edadd00c5254de2ac099c8f4eb09eae9 | f0fc8e34f1da807eeb89bd02da9a17a78fd7b4dc | /271 encode decode string.py | d8d0c55c8e9c938b47d44cb66418ffc01f13d4ac | [] | no_license | https://github.com/xiaohai12/leetcode | 91ceea6aed9b1e0bcc7e6a7c19e04cddf5dfe017 | a443d6dee491c6f93f2847cd0def7ed579f3170b | refs/heads/master | 2021-08-07T10:47:01.881877 | 2020-04-12T13:18:57 | 2020-04-12T13:18:57 | 149,550,272 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Codec:
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
ret = ''
for s in strs:
ret += str(len(s)) +'/' +s
return ret
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
i = 0
ret = []
while i<len(s):
ind = s[i:].find('/')
size = int(s[i:i+ind])
ret.append(s[i+ind+1:i+ind+size+1])
i += ind + size +1
return ret
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs)) | UTF-8 | Python | false | false | 668 | py | 154 | 271 encode decode string.py | 152 | 0.480539 | 0.474551 | 0 | 25 | 25.76 | 60 |
milvus-io/milvus | 12,713,103,222,589 | b3155bdee4cb0e03867ff0ae1167975939e28f02 | 2cf314b8237fc6a77b7f1a096f17a679179b0057 | /tools/core_gen/assemble.py | a9ce6c9cfebd78262017cf9cb14be18c8cffa012 | [
"Apache-2.0"
] | permissive | https://github.com/milvus-io/milvus | a02d732cf746effec1ea723f9e4d17856843f8a8 | 0530fd80c91dc5b200606c00214c12bf8dd17cb4 | refs/heads/master | 2023-09-04T06:28:57.681855 | 2023-09-04T02:01:04 | 2023-09-04T02:01:04 | 208,728,772 | 23,316 | 2,917 | Apache-2.0 | false | 2023-09-14T15:06:12 | 2019-09-16T06:43:43 | 2023-09-14T14:54:06 | 2023-09-14T15:06:11 | 166,634 | 22,863 | 2,509 | 579 | Go | false | false | #!python
from meta_gen import *
import re
def assemble(template, **kwargs):
pattern = re.compile("@@@@(.*?)\n((.|\n)*?)\n####", re.MULTILINE)
temp_info = pattern.findall(template)
# print(temp_info)
mapping = dict()
rep_map = dict()
# drop repetive field from mapping
for k, v in kwargs.items():
if isinstance(v, list):
rep_map[k] = v
else:
mapping[k] = v
for k, v, _ in temp_info:
info = k.split("@")
new_v = replace_all(v, **mapping)
assert(1 <= len(info) <= 2)
if len(info) == 2:
k = info[0]
rep = info[1]
new_v = "\n\n".join([new_v.replace("@@" + rep + "@@", rep_v) for rep_v in rep_map[rep]])
mapping[k] = new_v
return mapping["main"]
# import sys
# if __name__ == "__main__":
# assert(len(sys.argv) == 2)
# root_file = sys.argv[1]
# namespace, root_base, struct_name = meta_gen(readfile(root_file))
# gen_all(readfile("templates/node_full.cpp"), namespace=namespace, root_base=root_base, struct_name=struct_name)
| UTF-8 | Python | false | false | 1,097 | py | 1,980 | assemble.py | 1,639 | 0.533273 | 0.526892 | 0 | 36 | 29.416667 | 117 |
pawarspeaks/HackFest21 | 12,618,613,929,544 | f155d09c3d8e770045628bf6e011ea54209cddc2 | 246096c11c7ac7bd47fdf81f0be5faede3588f40 | /Python Scripts/print_any_pattern/print_any_pattern.py | 99ed9ccc67d472e651e36cc937f4cdbb87685f4e | [
"MIT"
] | permissive | https://github.com/pawarspeaks/HackFest21 | 4aa75e5e388e3776919a26487bf507810d27ddc3 | 71f161bf68f81aed3d2d5b154c01eadea5634b48 | refs/heads/main | 2023-08-21T11:47:44.675343 | 2021-10-29T19:44:19 | 2021-10-29T19:44:19 | 422,694,801 | 4 | 0 | null | true | 2021-10-29T19:43:35 | 2021-10-29T19:43:35 | 2021-10-27T12:45:34 | 2021-10-29T16:03:05 | 78,432 | 0 | 0 | 0 | null | false | false | """
Use Patterns.Alphabets_pattern file from the current project
which contains printer logic such as printA, printB, etc functions
These functions have been mapped with the printer alphabets
which will print out the given input by the user
"""
from Patterns.Alphabets_pattern import *
characters = 'abcdefghijklmnopqrstuvwxyz'
text = input('Enter what you want to print:\n').lower()
for character in text:
if character == 'a':
printA(character)
elif character == 'b':
printB(character)
elif character == 'c':
printC(character)
elif character == 'd':
printD(character)
elif character == 'e':
printE(character)
elif character == 'f':
printF(character)
elif character == 'g':
printG(character)
elif character == 'h':
printH(character)
elif character == 'i':
printI(character)
elif character == 'j':
printJ(character)
elif character == 'k':
printK(character)
elif character == 'l':
printL(character)
elif character == 'm':
printM(character)
elif character == 'n':
printN(character)
elif character == 'o':
printO(character)
elif character == 'p':
printP(character)
elif character == 'q':
printQ(character)
elif character == 'r':
printR(character)
elif character == 's':
printS(character)
elif character == 't':
printT(character)
elif character == 'u':
printU(character)
elif character == 'v':
printV(character)
elif character == 'w':
printW(character)
elif character == 'x':
printX(character)
elif character == 'y':
printY(character)
elif character == 'z':
printZ(character)
| UTF-8 | Python | false | false | 1,785 | py | 204 | print_any_pattern.py | 170 | 0.602241 | 0.602241 | 0 | 64 | 26.890625 | 66 |
glotzerlab/gsd | 3,178,275,806,198 | 2422878d2a1a63ca3cf2f14fe807b2bcf7384278 | b17758025b2ab936cedd80c0cbd6d4b4fa837bf4 | /gsd/__main__.py | 7d85c3da480f1dd05d56090f05978e7107adaccb | [
"BSD-2-Clause"
] | permissive | https://github.com/glotzerlab/gsd | 58c620675ee912d7af762845f357e1f63c709fc1 | de226fed1862ae2a3a76ac9ef06ce6df4eb970f5 | refs/heads/trunk-patch | 2023-08-31T05:08:31.674725 | 2023-08-03T19:06:20 | 2023-08-03T19:06:20 | 147,664,770 | 22 | 10 | BSD-2-Clause | false | 2023-09-05T12:05:28 | 2018-09-06T11:40:13 | 2023-07-28T16:34:55 | 2023-09-05T12:05:27 | 2,154 | 21 | 7 | 1 | Python | false | false | # Copyright (c) 2016-2023 The Regents of the University of Michigan
# Part of GSD, released under the BSD 2-Clause License.
"""The GSD command line interface.
To simplify ad hoc usage of :py:mod:`gsd`, this module provides a command line
interface for interacting with GSD files. The primary entry point is a single
command for starting a Python interpreter with a GSD file pre-loaded::
$ gsd read trajectory.gsd
The following options are available for the ``read`` subcommand:
.. program:: read
.. option:: -s schema, --schema schema
The schema of the GSD file. Supported values for ``schema`` are "hoomd" and
"none".
.. option:: -m mode, --mode mode
The mode in which to open the file. Valid modes are identical to those
accepted by :func:`gsd.fl.open`.
"""
import sys
import argparse
import code
from . import version
from .hoomd import open as hoomd_open
from . import fl
def _print_err(msg=None, *args):
print(msg, *args, file=sys.stderr)
SHELL_BANNER = """Python {python_version}
gsd {gsd_version}
File: {fn}
{extras}
The GSD file handle is available via the "handle" variable.
For supported schema, you may access the trajectory using the "traj" variable.
Type "help(handle)" or "help(traj)" for more information.
The gsd and gsd.fl packages are always loaded.
Schema-specific modules (e.g. gsd.hoomd) are loaded if available."""
def main_read(args):
"""Main function to launch a Python interpreter with an open GSD file."""
# Default to a new line for well-formatted printing.
local_ns = {
'gsd': sys.modules['gsd'],
'gsd.hoomd': sys.modules['gsd.hoomd'],
'gsd.fl': sys.modules['gsd.fl'],
}
attributes = {}
if args.schema == 'hoomd':
traj = hoomd_open(args.file, mode=args.mode)
handle = traj.file
local_ns.update({
'handle': handle,
'traj': traj,
})
attributes.update({"Number of frames": len(traj)})
else:
if args.mode not in ['rb', 'rb+', 'ab', 'a', 'r', 'r+']:
raise ValueError("Unsupported schema for creating a file.")
handle = fl.open(args.file, args.mode)
local_ns.update({
'handle': handle,
})
extras = "\n".join(
"{}: {}".format(key, val) for key, val in attributes.items())
code.interact(local=local_ns,
banner=SHELL_BANNER.format(python_version=sys.version,
gsd_version=version.version,
fn=args.file,
extras=extras + "\n"))
def main():
"""Entry point to the GSD command-line interface.
This function handles parsing command-line arguments and launching the
appropriate subcommand based on the first argument to ``gsd`` on the
command line. At present the following commands are supported:
* read
"""
parser = argparse.ArgumentParser(
description="The gsd package encodes canonical readers and writers "
"for the gsd file format.")
parser.add_argument('--version',
action='store_true',
help="Display the version number and exit.")
parser.add_argument('--debug',
action='store_true',
help="Show traceback on error for debugging.")
subparsers = parser.add_subparsers()
parser_read = subparsers.add_parser('read')
parser_read.add_argument('file',
type=str,
nargs='?',
help="GSD file to read.")
parser_read.add_argument('-s',
'--schema',
type=str,
default='hoomd',
choices=['hoomd', 'none'],
help="The file schema.")
parser_read.add_argument('-m',
'--mode',
type=str,
default='r',
choices=[
'rb',
'rb+',
'wb',
'wb+',
'xb',
'xb+',
'ab',
'w',
'r',
'r+',
'x',
'a',
],
help="The file mode.")
parser_read.set_defaults(func=main_read)
# This is a hack, as argparse itself does not
# allow to parse only --version without any
# of the other required arguments.
if '--version' in sys.argv:
print('gsd', version.version)
sys.exit(0)
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_usage()
sys.exit(2)
try:
args.func(args)
except KeyboardInterrupt:
_print_err()
_print_err("Interrupted.")
if args.debug:
raise
sys.exit(1)
except RuntimeWarning as warning:
_print_err("Warning: {}".format(warning))
if args.debug:
raise
sys.exit(1)
except Exception as error:
_print_err('Error: {}'.format(error))
if args.debug:
raise
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 5,594 | py | 65 | __main__.py | 15 | 0.512692 | 0.510011 | 0 | 175 | 30.965714 | 79 |
john-mwangi/multilabel-classification | 6,201,932,778,581 | d497db33327e3564a764f0c374a0c78e41369982 | dfb155865bbc0405de56d215233aa4011139c7f2 | /src/train.py | fb620b5ca7c49a80ac7eccb6d155adac4a86ec1f | [] | no_license | https://github.com/john-mwangi/multilabel-classification | 7528021a0b04663c986583f592ff3635c08ec54a | 72a142b0ed355f3d27f1e871fbaa42479c4dfeae | refs/heads/main | 2023-08-03T03:03:51.477384 | 2021-09-21T14:55:31 | 2021-09-21T14:55:31 | 387,020,414 | 0 | 0 | null | false | 2021-07-20T04:25:55 | 2021-07-17T19:16:34 | 2021-07-20T04:17:30 | 2021-07-20T04:25:54 | 66,282 | 0 | 0 | 0 | Python | false | false | """
This module is for training the actual neural network and hyper-parameter
tuning using optuna.
"""
import pandas as pd
import numpy as np
import torch
import optuna
from torch.utils.data import DataLoader
import utils
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
EPOCHS = 10
def run_training(fold, params, save_model=False):
df = pd.read_csv("../inputs/train_features.csv")
df = utils.Engine.process_data(df=df)
targets_df = pd.read_csv("../outputs/train_targets_folds.csv")
features_columns = df.drop(labels="sig_id", axis=1).columns
target_columns = targets_df.drop(["sig_id", "kfold"], axis=1).columns
df = pd.merge(left=df, right=targets_df, how="left", on="sig_id")
train_df = df[df.kfold != fold].reset_index(drop=True)
valid_df = df[df.kfold == fold].reset_index(drop=True)
xtrain = train_df[features_columns].to_numpy()
xvalid = valid_df[features_columns].to_numpy()
ytrain = train_df[target_columns].to_numpy()
yvalid = valid_df[target_columns].to_numpy()
train_dataset = utils.MoADataset(features=xtrain, targets=-ytrain)
valid_dataset = utils.MoADataset(features=xvalid, targets=yvalid)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=1024,
num_workers=2,
shuffle=True
)
valid_loader = DataLoader(
dataset=valid_dataset,
batch_size=1024,
num_workers=2
)
model = utils.Model(
nfeatures=xtrain.shape[1],
ntargets=ytrain.shape[1],
nlayers=params["num_layers"],
hidden_size=params["hidden_size"],
dropout=params["dropout"]
)
model.to(device=DEVICE)
optimizer = torch.optim.Adam(params=model.parameters(),
lr=params["learning_rate"])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode="min",
patience=3,
threshold=1e-5,
verbose=True
)
eng = utils.Engine(model=model, optimizer=optimizer, device=DEVICE)
best_loss = np.inf
early_stopping_iter = 10
early_stopping_counter = 0
for epoch in range(EPOCHS):
train_loss = eng.train_loss(data_loader=train_loader)
valid_loss = eng.eval_loss(data_loader=valid_loader)
scheduler.step(valid_loss)
print(f"{fold}, {epoch}, {train_loss}, {valid_loss}")
if valid_loss < best_loss:
best_loss = valid_loss
if save_model:
torch.save(obj=model.state_dict(), f=f"model_{fold}.bin")
else:
early_stopping_counter += 1
if early_stopping_counter > early_stopping_iter:
break
return best_loss
def objective(trial):
params = {
"num_layers": trial.suggest_int("num_layers", 1, 7),
"hidden_size": trial.suggest_int("hidden_size", 16, 2048),
"dropout": trial.suggest_uniform("dropout", 0.1, 0.7),
"learning_rate": trial.suggest_loguniform("learning_rate", 1e-6, 1e-3)
}
all_losses = []
for f_ in range(5):
temp_loss = run_training(fold=f_, params=params, save_model=False)
all_losses.append(temp_loss)
return np.mean(all_losses)
if __name__ == "__main__":
study = optuna.create_study(direction="minimize")
study.optimize(func=objective, n_trials=20)
print("Best trial:")
trial_ = study.best_trial
print(trial_.values)
print(trial_.params)
scores = 0
for j in range(5):
scr = run_training(fold=j, params=trial_.params, save_model=True)
scores += scr
print(f"CV Score of best params: {scores/5}")
| UTF-8 | Python | false | false | 3,648 | py | 5 | train.py | 4 | 0.622533 | 0.610197 | 0 | 130 | 27.061538 | 78 |
jhartman86/gitdeployhooks | 11,991,548,714,720 | 3962463ecf7cc4fee660598cfbf35829baf57df2 | cda266d434d19fd80f7eeefd9d4183ca70de4ba5 | /deploy_coordinator/system/__init__.py | d9deb9a580926d52d41381ae4e13633a26c87fc8 | [] | no_license | https://github.com/jhartman86/gitdeployhooks | 84f18ed35ad59e498e60aae2b2cb99913dc52780 | 93050346be4f341ad14b87371842af72b99f4e12 | refs/heads/master | 2016-09-05T18:19:26.644667 | 2015-09-10T02:55:10 | 2015-09-10T02:55:10 | 34,279,904 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # from execute import Execute
# from file_system import FileSystem
# from git import Git
# from parse_json import ParseJson | UTF-8 | Python | false | false | 123 | py | 14 | __init__.py | 12 | 0.796748 | 0.796748 | 0 | 4 | 30 | 36 |
Etenil/anvil | 13,950,053,825,270 | 4350daff015eb24cb3b8c946198cc7d3b1884828 | 6db970b6a09bdf07767aae00ea4cc2b5591300f4 | /anvil.py | fe0b11c25c91f55957ab5657e83d0426d346c96d | [
"MIT"
] | permissive | https://github.com/Etenil/anvil | fc439d854dc3be8f14493d0200488bb6ca8a9268 | d40c296ed3ca0c05fce6d59038fc7a1f9890dccd | refs/heads/master | 2021-01-25T07:18:55.439521 | 2013-06-16T11:36:53 | 2013-06-16T11:36:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import web
import common
import sys
import os
from controller.user import User
from controller.project import Project
from controller.message import Message
from controller.bug import Bug
from controller.doc import Doc
import model.project
import model.message
import model.event
from anvillib import config
### Parsing the configuration
config.load_conf()
# Generating an argv from the config file (for web.py; pretty dirty I
# know).
sys.argv = ['anvil.py', config.val('port')]
### URL mapping
urls = (
'/' , 'Main',
'/(login)' , 'User',
'/(logout)' , 'User',
'/(register)' , 'User',
'/(profile)' , 'User',
'/(users)' , 'User',
'/message(?:/(.+))?$' , 'Message',
'/ajax/(listusers)' , 'User',
'/project(?:/(.+))?$' , 'Project',
'/\*([a-z0-9._-]+)$' , 'User',
'/\*([a-z0-9._-]+)/(key)(?:/(.+?)(?:/(.+))?)?$' , 'User',
'/\*([a-z0-9._-]+)/(branch)(?:/(.+?)(?:/(.+?)(?:/(.+))?)?)?$' , 'User',
'/\*([a-z0-9._-]+)/(events)$' , 'User',
'/([a-z0-9._-]+)/bugs(?:/(.+?)(?:/(.+))?)?$' , 'Bug',
'/([a-z0-9._-]+)/doc(?:/(.+?)(?:/(.+))?)?$' , 'Doc',
'/([a-z0-9._-]+)/(commiters)/(del|add)/(.+)$' , 'Project',
'/([a-z0-9._-]+)/(branch)/(.+?)(?:/(.+?)(?:/(.+))?)?$' , 'Project',
'/([a-z0-9._-]+)/(events)$' , 'Project',
'/([a-z0-9._-]+)(?:/(.+))?$' , 'Project', #Leave at bottom!
'.*' , 'Main',
)
### Runing the server
app = web.application(urls, globals(), autoreload=False)
common.session = web.session.Session(app,
web.session.DBStore(common.db, 'sessions'),
initializer={'user': None})
# This is a hook that is run on every request handling. This ensures
# we always display the number of unread messages to the user.
def refresh_messages(handler):
common.msgs = model.message.num_unread_msgs(common.session.user)
web.header('Content-Type', 'text/html')
return handler()
app.add_processor(refresh_messages)
# Default page in case we don't know what to do (shouldn't happen).
class Main:
def GET(self):
activity = model.event.get_events(0, 30)
custom_logged_page=None
custom_visitor_page=None
if os.path.exists("custom.logged.html"):
f = open("custom.logged.html")
custom_logged_page = f.read()
f.close()
if os.path.exists("custom.visitor.html"):
f = open("custom.visitor.html")
custom_visitor_page = f.read()
f.close()
return common.render.main(content="Welcome to " + config.val('title'),
is_main=True,
num_proj=model.project.count_proj(),
activity=activity,
custom_logged_page=custom_logged_page,
custom_visitor_page=custom_visitor_page,
htTitle="Welcome to " + config.val('title') + "!")
# Defining the mode
if config.val('mode') == 'fcgi':
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
# Serving.
if __name__ == "__main__": app.run()
| UTF-8 | Python | false | false | 3,868 | py | 62 | anvil.py | 28 | 0.431489 | 0.425543 | 0 | 94 | 40.148936 | 95 |
ShihabSikder/tronics | 12,163,347,390,491 | f0a8216535af9545bb46ebf3115d329d5b3f9972 | 2e89eaa81cd06caf31702b46679d4c1d7aa0f912 | /portfolio/urls.py | a4a1536008b35b38b281f24b1220fa42f09ba764 | [] | no_license | https://github.com/ShihabSikder/tronics | aa5914074efc1040e146dd1dcc2ba1bf3547e9fc | 58fbbdd749e2ee5978f61c141721c7517749ce53 | refs/heads/master | 2020-09-23T20:32:18.494172 | 2019-12-03T18:59:39 | 2019-12-03T18:59:39 | 225,580,607 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import url
from blog import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('blog.urls')),
url(r'aboutme/',views.aboutme, name='about')
]
| UTF-8 | Python | false | false | 266 | py | 3 | urls.py | 1 | 0.710526 | 0.710526 | 0 | 9 | 28.444444 | 48 |
bethebunny/sos | 10,127,532,892,267 | 3fc69467e29c0a73c9ff8ef2a7f9179636c8f739 | 8e280407a657f81b793bff7d5cdc851b2ba9a6bf | /utils/sosh.py | b8b0d9794e23e21277e56117f089f1a13546b3fd | [] | no_license | https://github.com/bethebunny/sos | 152e909de045c1db1e0056d9c7a2a1efce4a6802 | 458cdc466b93e453fc457e44144403493fa33ecc | refs/heads/master | 2023-07-30T21:03:48.765757 | 2021-09-05T08:13:09 | 2021-09-05T08:13:09 | 400,320,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from IPython.terminal.embed import embed
from sos.execution_context import current_execution_context
from sos.kernel_main import Kernel
from sos.scheduler import Scheduler
from sos.service.remote import Remote
from sos.services.services import RemoteServicesBackend
if __name__ == "__main__":
services = RemoteServicesBackend(Remote.Args(("localhost", 2222)))
kernel = Kernel(
services=services,
root_ec=current_execution_context(),
scheduler=Scheduler(),
)
# Can't point to kernel.main directly; IPython tries to pickle the function
# and event loops don't pickle well :/
def run(coro):
return kernel.main(coro)
embed(colors="neutral", using=run)
| UTF-8 | Python | false | false | 713 | py | 25 | sosh.py | 22 | 0.713885 | 0.708275 | 0 | 23 | 30 | 79 |
kzm0211/Lchika | 6,828,998,035,831 | 8648782449100acd5f1d1b4d7f69ac7b626a4cfe | 6eeed3094c766601a970e4584759b846d0867a36 | /led3onoff_mod.py | b8631e80a2c2a1bd5abf7c6fbdecf25caeaaff44 | [] | no_license | https://github.com/kzm0211/Lchika | 1ce79c25085051993a03f59e54b64e1abcbf5e27 | 61c07e916cce0e165548d785ca577f946daa318a | refs/heads/master | 2021-01-20T19:56:42.743778 | 2016-07-19T15:18:45 | 2016-07-19T15:18:45 | 63,612,013 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
def led_init(leds):
GPIO.setup(leds, GPIO.OUT)
def led_on(leds):
GPIO.output(leds, GPIO.HIGH)
def led_off(leds):
GPIO.output(leds, GPIO.LOW)
LED1 = 6
led_init(LED1)
LED2 = 5
led_init(LED2)
LED3 = 11
led_init(LED3)
for i in range(10):
print(i)
led_on(LED1)
led_on(LED2)
led_on(LED3)
time.sleep(0.5)
led_off(LED1)
led_off(LED2)
led_off(LED3)
time.sleep(0.5)
GPIO.cleanup()
| UTF-8 | Python | false | false | 452 | py | 5 | led3onoff_mod.py | 5 | 0.672566 | 0.623894 | 0 | 36 | 11.527778 | 29 |
HMQstu/FileServer | 13,322,988,581,198 | 342d1949f2d9db0075d816fb7acd6ab2677aef68 | 802e89a1c0c5149df76dad4e85700a47b27b2be0 | /user_service.py | 5116c3eebf162cf2dea53ba9e335b9df0a2089df | [] | no_license | https://github.com/HMQstu/FileServer | 63f72be2c93293989dd3b02064fa43d1d39d58fc | 3f6e7a232a18ec6a2809adf8b9fd6183a59c547c | refs/heads/master | 2021-09-15T07:01:53.483116 | 2018-05-28T07:56:38 | 2018-05-28T07:56:38 | 126,126,643 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import db_helper
def query_user(username, password):
user = db_helper.find_user_by_username(username)
if user is None:
# 用户不存在
return None
if password != user.password:
# 密码错误
return None
return user
def register_user(user):
old_user = db_helper.find_user_by_username(user.username)
if old_user is not None:
db_helper.remove_user_by_username(user.username)
db_helper.insert_user(user)
| UTF-8 | Python | false | false | 492 | py | 14 | user_service.py | 13 | 0.647679 | 0.64557 | 0 | 21 | 21.571429 | 61 |
AbelRapha/Python-Exercicios-CeV | 18,476,949,328,451 | a5550f0b1bd30829eb30df234aad296c7fa82117 | 2dafd069ae283ea707c644e3aacedca35493df3c | /Mundo 1/ex035 Aprovando emprestimo.py | deb1285ad5249e298af3bd573281f0d405c4c92e | [
"MIT"
] | permissive | https://github.com/AbelRapha/Python-Exercicios-CeV | 49353c7284aa9467ac8ab1cb416b121a927bbccb | 17e7055c982c8a1224992602ece50bae8eeee365 | refs/heads/main | 2023-08-29T04:55:03.430294 | 2021-09-12T03:53:09 | 2021-09-12T03:53:09 | 405,525,468 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
valorCasa = float(input("Digite o valor da Casa que deseja adquirir: "))
salario = float(input("Digite o seu salario: "))
anos = int(input("Em quantos voce quer pagar esse financiamento? "))
prestacao = valorCasa/ (12*anos)
def aprovacaoEmprestimo(prestacao, salario):
if prestacao <= 0.3*salario:
return "APROVADO"
else:
return "REPROVADO"
print("Para pagar uma casa no valor de R$ {:,.2f} em {} anos a prestacao sera de R$ {:,.2f} por mes.".format(valorCasa,anos, prestacao))
print("Analisando a aprovacao do financiamento. Aguarde... ")
time.sleep(3)
print(aprovacaoEmprestimo(prestacao,salario)) | UTF-8 | Python | false | false | 638 | py | 107 | ex035 Aprovando emprestimo.py | 106 | 0.710031 | 0.69906 | 0 | 17 | 36.588235 | 136 |
ColDog/battlesnake-training-snakes | 14,809,047,264,372 | b8694438eeec1051223bb6d0052f82fb7e35d890 | e652bc36ce2a447eaaf46e00e40941fd20afddce | /app/snakes/snake_3.py | bd45503f45ebb4a021faacb6e88bef31ff0f2aa5 | [] | no_license | https://github.com/ColDog/battlesnake-training-snakes | 478770854ab8fb703dc7476e7d7b86b5a24c284c | 39f0b85714582ff936c2145754a6d96975df57b8 | refs/heads/master | 2021-05-08T23:05:12.150527 | 2018-02-06T04:59:09 | 2018-02-06T04:59:09 | 119,697,932 | 0 | 0 | null | true | 2018-01-31T14:27:51 | 2018-01-31T14:27:51 | 2018-01-30T23:56:12 | 2018-01-31T00:36:42 | 21 | 0 | 0 | 0 | null | false | null | from utils.vector import Vector, up, down, left, right, noop
from base_snake import BaseSnake
class Snake3(BaseSnake):
def move(self, gamestate):
first_food = gamestate.food[0]
ordered_directions = self._directions_to(first_food, gamestate)
head = gamestate.my_head
for v in ordered_directions:
if gamestate.is_empty(head + v):
return v
return up
def _directions_to(self, goal, gamestate):
to_travel = goal - gamestate.my_head
horizontal = [left, right] if goal.x < gamestate.my_head.x else [right, left]
vertical = [up, down] if goal.y < gamestate.my_head.y else [down, up]
if to_travel.x > to_travel.y:
return horizontal + vertical
return vertical + horizontal
def name(self):
return "Training Snake 3"
def color(self):
return "#05f299"
def head_url(self):
return ""
def taunt(self):
return ""
def end(self):
pass
| UTF-8 | Python | false | false | 1,013 | py | 12 | snake_3.py | 11 | 0.596249 | 0.588351 | 0 | 37 | 26.378378 | 85 |
IrinaMun/python-base | 10,703,058,535,666 | e1177396c0e1a4a1d261769f00699110b65f9bf3 | a130ad550ce437dbd9487d6f37b8c1121bc46289 | /Lesson9/task5.py | 899f70c0feadc35b9de1ec591491ba4a1531d359 | [] | no_license | https://github.com/IrinaMun/python-base | 0172f71f2617e1a70321106d0e2b758943e6dae9 | ada695419c43639d4021aa6ef3d279d699bdbc12 | refs/heads/main | 2023-04-08T22:42:10.139091 | 2021-04-05T16:23:46 | 2021-04-05T16:23:46 | 341,997,663 | 0 | 0 | null | false | 2021-04-10T13:51:52 | 2021-02-24T18:33:33 | 2021-04-05T16:23:49 | 2021-04-10T13:51:06 | 588 | 0 | 0 | 2 | Python | false | false | """
5. Реализуйте базовый класс Car.
при создании класса должны быть переданы атрибуты: color (str), name (str).
реализовать в классе методы: go(speed), stop(), turn(direction), которые должны изменять состояние машины - для хранения
этих свойств вам понадобятся дополнительные атрибуты - придумайте какие.
добавьте метод is_police() - который возвращает True/False, в зависимости от того является ли этот автомобиль
полицейским (см.дальше)
Сделайте несколько производных классов: TownCar, SportCar, WorkCar, PoliceCar;
Добавьте в базовый класс метод get_status(), который должен возвращать в виде строки название, цвет, текущую скорость
автомобиля и направление движения (в случае если автомобиль едет), для полицейских автомобилей перед названием
автомобиля должно идти слово POLICE;
Для классов TownCar и WorkCar в методе get_status() рядом со значением скорости должна выводиться фраза "ПРЕВЫШЕНИЕ!",
если скорость превышает 60 (TownCar) и 40 (WorkCar).
Создайте по одному экземпляру каждого производного класса. В цикле из 10 итераций, для каждого автомобиля сделайте одно
из случайных действий: go, stop, turn со случайными параметрами. После каждого действия показывайте статус автомобиля.
"""
from random import randrange
directions = ['Север', 'Юг', 'Запад', 'Восток']
class Car(object):
def __init__(self, color, name):
self.color = color
self.name = name
self.speed = 0
self.direction = None
def go(self, speed, direction=None):
self.speed = speed
if direction:
self.direction = direction
def stop(self):
self.speed = 0
def turn(self, direction):
self.direction = direction
def show_speed(self):
return self.speed
def is_police(self):
return False
def get_status(self):
status_str = f'Скорость: {self.show_speed()}\n' \
f'Цвет: {self.color}\n' \
f'Название: {self.name}\n' \
f'Направление: {self.direction}\n' \
f'*********************************'
return status_str
class TownCar(Car):
def show_speed(self):
result = ''
if self.speed > 60:
result += 'ПРЕВЫШЕНИЕ: '
result += str(self.speed)
return result
class SportCar(Car):
pass
class WorkCar(Car):
def show_speed(self):
result = ''
if self.speed > 40:
result += 'ПРЕВЫШЕНИЕ: '
result += str(self.speed)
return result
class PoliceCar(Car):
def is_police(self):
return True
def get_status(self):
result = "POLICE: " + super().get_status()
return result
if __name__ == '__main__':
town_car = TownCar(
color="Green",
name="Nissan",
)
sport_car = SportCar(
color="Red",
name="lamborghini",
)
work_car = WorkCar(
color="Gray",
name="Ford",
)
police_car = PoliceCar(
color="Blue",
name="Hyundai",
)
car_list = [town_car, sport_car, work_car, police_car]
for count in range(10):
for car in car_list:
car.go(randrange(0, 100))
car.turn(directions[randrange(0, 3)])
if randrange(0, 10) > 8:
car.stop()
for car in car_list:
print(car.get_status())
| UTF-8 | Python | false | false | 4,242 | py | 5 | task5.py | 5 | 0.612798 | 0.605357 | 0 | 121 | 26.768595 | 120 |
vaezc/PythonTraining | 17,815,524,346,343 | 3632a1f2c0c5503a55de0ce06ff82a6ed8a66aad | d6a0d07a6ba5667ed33b5f0576555308553fbd56 | /统计代码行数/main.py | 7a468bb422a1b1a94353971c911669795be099b1 | [] | no_license | https://github.com/vaezc/PythonTraining | 5f371ab4ed72cf01d54348ec6018284c130366a6 | bc305e37a748751e5bd4eed21a6031fbd3c72186 | refs/heads/master | 2020-03-13T12:26:42.507244 | 2018-04-26T07:49:09 | 2018-04-26T07:49:09 | 131,119,081 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
length = 0
def writeFile(path,fileList):
str = ""
for line in fileList:
str = str + line
with open(path,"w+") as f:
f.write(str)
def readFile(path):
global length
with open(path,"r") as f:
fileList = f.readlines()
for line in fileList:
length += 1
def listAllDir(path):
list = os.listdir(path)
for line in list:
newline = os.path.join(path, line)
if os.path.isdir(newline):
listAllDir(newline)
else:
if newline.endswith('.h') or newline.endswith('.m'):
if 'Pods' in newline:
continue
else:
readFile(newline)
path = ''
listAllDir(path)
print length | UTF-8 | Python | false | false | 756 | py | 1 | main.py | 1 | 0.527778 | 0.525132 | 0 | 37 | 19.459459 | 64 |
DominicBurkart/wikipedia-revisions | 16,252,156,274,104 | de1d7edc42c7312e077cfb3bc15d44991460501f | 6ceccf5269b468ebbdb792d5ab42834635ba2fc8 | /wikipedia_revisions/download.py | bf44941e7802a011567bee10ac47ab8f964e7c10 | [
"BSD-3-Clause"
] | permissive | https://github.com/DominicBurkart/wikipedia-revisions | 9bee9a7bd9ed1e67b4b612a12c1ebb645b4d7c2f | 826e455e32d440bb2d4af5d17e5c939b1da067df | refs/heads/master | 2023-06-08T22:43:26.391137 | 2020-08-27T11:43:50 | 2020-08-27T11:43:50 | 250,873,585 | 2 | 0 | BSD-3-Clause | false | 2023-05-22T23:22:43 | 2020-03-28T19:10:25 | 2020-08-27T11:44:02 | 2023-05-22T23:22:43 | 175 | 2 | 0 | 3 | Python | false | false | import bz2
import errno
import hashlib
import os
import platform
import re
import threading
import time
import traceback
import xml.etree.ElementTree as ET
from typing import Optional, Dict, Generator, Iterable, Callable, Set
import click
import requests
from wikipedia_revisions import config
from wikipedia_revisions.utils import timestr
def download_bz2_file(session: requests.Session, url: str) -> str:
CHUNK_SIZE = 1024 * 1024 * 5
filename = url.split("/")[-1]
retries = 0
while True:
try:
if os.path.exists(filename):
print(f"{timestr()} using local file {filename} 👩🌾")
break
print(f"{timestr()} downloading {url}. saving to {filename}. 📁")
resp = session.get(url, stream=True, timeout=60)
assert resp.status_code == 200
print(f"{timestr()} response for {url}: {resp.status_code}. 🕺")
with open(filename, "wb") as file:
for chunk in resp.iter_content(chunk_size=CHUNK_SIZE):
file.write(chunk)
break
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
traceback.print_exc()
retries += 1
print(
f"{timestr()} timeout for {url}: sleeping 60 seconds and restarting download... (retry #{retries}) ↩️"
)
time.sleep(60)
return filename
def download_and_verify_bz2_files(
session: requests.Session, update_urls: Set[str]
) -> Generator[str, None, None]:
verified_files = VerifiedFilesRecord()
# perform checksum
if config["low_storage"]:
print(
f"{timestr()} [low storage mode] "
f"deleting all records of previously verified files. 🔥"
)
verified_files.remove_local_file_verification()
while len(update_urls) > 0:
url = update_urls.pop()
unverified_filename = download_bz2_file(session, url)
verified_filename = check_hash(verified_files, unverified_filename)
if verified_filename is not None:
yield verified_filename
else:
update_urls.add(url)
class MalformattedInput(Exception):
...
def generate_revisions(file) -> Generator[Dict, None, None]:
def prefixed(s: str) -> str:
"""element names have the following string prepended to them."""
return "{http://www.mediawiki.org/xml/export-0.10/}" + s
ID_STR = prefixed("id")
NS_STR = prefixed("ns")
TITLE_STR = prefixed("title")
PAGE_STR = prefixed("page")
REVISION_STR = prefixed("revision")
PARENT_ID_STR = prefixed("parentid")
TIMESTAMP_STR = prefixed("timestamp")
CONTRIBUTOR_STR = prefixed("contributor")
IP_STR = prefixed("ip")
USERNAME_STR = prefixed("username")
COMMENT_STR = prefixed("comment")
TEXT_STR = prefixed("text")
page_id = None
page_ns = None
page_title = None
for event, element in ET.iterparse(file, events=["start", "end"]):
if event == "end" and element.tag == PAGE_STR:
page_id = None
page_ns = None
page_title = None
element.clear()
elif event == "end":
# hack: assume that the id, ns, and title for a revision all precede the first revision.
# if this is not the case, a MalformattedInput exception is thrown.
if page_id is None and element.tag == ID_STR:
page_id = element.text
elif page_ns is None and element.tag == NS_STR:
page_ns = element.text
elif page_title is None and element.tag == TITLE_STR:
page_title = element.text
elif element.tag == REVISION_STR:
revision_id = element.find(ID_STR).text
parent_id_element = element.find(PARENT_ID_STR)
parent_id = (
parent_id_element.text if parent_id_element is not None else None
)
timestamp = element.find(TIMESTAMP_STR).text
contributor_element = element.find(CONTRIBUTOR_STR)
ip_element = contributor_element.find(IP_STR)
contributor_ip = ip_element.text if ip_element is not None else None
contributor_id_element = contributor_element.find(ID_STR)
contributor_id = (
contributor_id_element.text
if contributor_id_element is not None
else None
)
contributor_name_element = contributor_element.find(USERNAME_STR)
contributor_name = (
contributor_name_element.text
if contributor_name_element is not None
else None
)
comment_element = element.find(COMMENT_STR)
comment = comment_element.text if comment_element is not None else None
text = element.find(TEXT_STR).text
if any(v is None for v in (page_id, page_ns, page_title)):
raise MalformattedInput
yield {
"id": revision_id,
"parent_id": parent_id,
"timestamp": timestamp,
"page_id": page_id,
"page_title": page_title,
"page_ns": page_ns,
"contributor_id": contributor_id,
"contributor_name": contributor_name,
"contributor_ip": contributor_ip,
"comment": comment,
"text": text,
}
element.clear()
def parse_one_file(filename: str) -> Generator[Dict, None, None]:
pid = os.getpid()
print(
f"{timestr()} extracting revisions from update file {filename} in process #{pid}... 🧛"
)
with bz2.open(filename, "rt", newline="") as uncompressed:
for revision in generate_revisions(uncompressed):
yield revision
print(f"{timestr()} exhausted file: {filename} 😴")
if config["low_storage"]:
print(f"{timestr()} Deleting {filename}... ✅")
os.remove(filename)
class VerifiedFilesRecord:
"""
retain the hash and basename for each downloaded file. downloads the
canonical hashes from wikipedia if they are not stored locally.
"""
def __init__(self):
self.canonical_record = "canonical_hashes.txt"
self.lock = threading.Lock()
while not os.path.exists(self.canonical_record):
resp = requests.get(config["md5_hashes_url"])
if resp.status_code != 200:
print(
f"{timestr()} unable to get md5 hashes from wikipedia. "
"Sleeping for five minutes then retrying... 🛌"
)
time.sleep(5 * 60)
else:
with open(self.canonical_record, "w") as local_record:
local_record.write(resp.text)
self.canonical_hashes = {
line.split(" ")[1].strip(): line.split(" ")[0]
for line in open(self.canonical_record).readlines()
}
self.record_in_storage = "verified_files_record.txt"
if os.path.exists(self.record_in_storage):
self.files = set(
map(lambda s: s.strip(), open(self.record_in_storage).readlines())
)
else:
open(self.record_in_storage, "a").close()
self.files = set()
def __contains__(self, filename):
with self.lock:
return filename in self.files
def add(self, filename):
with self.lock:
base = os.path.basename(filename)
with open(self.record_in_storage, "a") as store:
store.write(base + "\n")
self.files.add(base)
def canonical_hash(self, filename) -> str:
base = os.path.basename(filename)
return self.canonical_hashes[base]
def remove_local_file_verification(self):
with self.lock:
with open(self.record_in_storage, "w") as store:
store.write("")
self.files.clear()
def get_hash(filename: str) -> str:
hash = hashlib.md5()
with open(filename, "rb") as f:
while True:
chunk = f.read(1000000)
if not chunk:
break
hash.update(chunk)
return hash.hexdigest()
def check_hash(verified_files: VerifiedFilesRecord, filename: str) -> Optional[str]:
if filename not in verified_files:
print(f"{timestr()} checking hash for {filename}... 📋")
file_hash = get_hash(filename)
if file_hash == verified_files.canonical_hash(filename):
if not config["low_storage"]:
# ^ hack in low_storage mode the files are deleted when exhausted
verified_files.add(filename)
else:
print(f"{timestr()} hash mismatch with {filename}. Deleting file.🗑️")
os.remove(filename)
return None
print(f"{timestr()} {filename} hash verified 💁")
return filename
def full_dump_url_from_partial(partial: str):
if config["date"] != "latest" and partial.startswith("/"):
return "https://dumps.wikimedia.org" + partial
elif config["date"] == "latest" and not partial.startswith("/"):
return "https://dumps.wikimedia.org/enwiki/latest/" + partial
else:
raise ValueError("dump page format has been updated.")
def download_and_parse_files() -> Iterable[Callable[..., Generator[Dict, None, None]]]:
print(f"{timestr()} requesting dump directory... 📚")
session = requests.Session()
session.headers.update(
{
"User-Agent": "Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Mobile Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,"
"image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
)
dump_page = session.get(config["dump_page_url"])
assert dump_page.status_code == 200
print(f"{timestr()} parsing dump directory... 🗺️🗺️")
# read history file links in dump summary
bz2_urls = set(
map(
full_dump_url_from_partial,
filter(
lambda url: "pages-meta-history" in url and url.endswith(".bz2"),
re.findall('href="(.+?)"', dump_page.text),
),
)
)
# download & verify history files
verified_files = download_and_verify_bz2_files(session, bz2_urls)
# create functions that read and parse valid files
for filename in verified_files:
yield lambda: parse_one_file(filename)
def write_to_csv(
filename: Optional[str],
revision_iterator_functions: Iterable[Callable[..., Iterable[Dict]]],
):
from wikipedia_revisions.write_to_files import write_to_csv as write
write(filename, revision_iterator_functions)
def write_to_database(
revision_iterator_functions: Iterable[Callable[..., Iterable[Dict]]]
) -> None:
from wikipedia_revisions.write_to_database import write
write(config, revision_iterator_functions)
@click.command()
@click.option(
"--date",
"date",
default="latest",
help="Wikipedia dump page in YYYYMMDD format (like 20200101). "
"Find valid dates by checking which entries on "
"https://dumps.wikimedia.org/enwiki/ have .bz2 files that "
'contain the include "pages-meta-history" in the name and '
"have been successfully written.",
)
@click.option(
"--low-storage/--large-storage",
"low_storage",
default=True,
help="Cut performance to decrease storage requirements. Deletes "
"files when they are exhausted and keeps a limited number of "
".xml.bz2 files on disk at any time. If --large-storage, "
"downloads all xml.bz2 files and never deletes them.",
)
@click.option(
"--database/--csv",
"use_database",
default=False,
help="Write output into a database instead of a CSV. "
"Requires additional installations (run pip install -r "
"database_requirements.txt) and for the database URL (see "
"--database-url) to be available.",
)
@click.option(
"-o",
"--output",
"output_file",
default=os.path.join(os.path.curdir, "revisions.csv.bz2"),
help="set output path for csv.bz2. Defaults to revisions.csv.bz2 in current directory.",
)
@click.option(
"--database-url",
default="postgres:///wikipedia_revisions"
if platform.python_implementation() == "CPython"
else "postgresql+psycopg2cffi:///wikipedia_revisions",
help="Database URL to use. Defines database dialect used (any "
"database dialect supported by SQLAlchemy should work). Ignored"
"if --database is not set. Default is postgres:///wikipedia_revisions on CPython, and "
"postgresql+psycopg2cffi:///wikipedia_revisions on all other implementations (e.g. PyPy).",
)
@click.option(
"--low-memory/--large-memory",
"low_memory",
default=True,
help="Optimize for low-memory systems. Limits the number of "
"dump files concurrently processed to 3, instead of "
"the number of CPU cores. If writing to a database, "
"also commits every megabyte instead of gigabyte to limit "
"memory usage.",
)
@click.option(
"--delete-database/--do-not-delete-database",
"delete_database",
default=False,
help="drop everything in the passed database and overwrite it with "
"the wikipedia revisions data.",
)
@click.option(
"--num-subprocesses",
"concurrent_reads",
default=2,
type=int,
help="number of concurrent processes, each reading one .xml.bz2 file. Default is 2. When using storage media "
"with fast concurrent reads and high throughput (SSDs), higher values (e.g. the number of "
"cpu cores) are better.",
)
@click.option(
"--insert-multiple-values/--batch-insert",
"insert_multiple_values",
default=False,
help="if writing to a database, insert multiple values within a single statement. Not supported for all "
"SQLAlchemy-covered databases. For more information on multi-value inserts in SQLAlchemy, see: "
"http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.Insert.values.params.*args",
)
@click.option(
"--db-connections-per-process",
"num_db_connections",
default=4,
type=int,
help="number of DB connections per process. Default is 4. Must be > 0.",
)
@click.option(
"-p",
"--pipe-dir",
"pipe_dir",
default=False,
help="write revisions as uncompressed csvs to a series of named pipes. Pipes are named "
"revisions-<process number>-<numeric time string>.pipe, and are placed in the passed directory.",
)
def run(
date,
low_storage,
use_database,
output_file,
database_url,
low_memory,
delete_database,
concurrent_reads,
insert_multiple_values,
num_db_connections,
pipe_dir,
):
config["date"] = date
config["dump_page_url"] = f"https://dumps.wikimedia.org/enwiki/{date}/"
config[
"md5_hashes_url"
] = f"https://dumps.wikimedia.org/enwiki/{date}/enwiki-{date}-md5sums.txt"
config["low_storage"] = low_storage
config["database_url"] = database_url
config["low_memory"] = low_memory
config["delete_database"] = delete_database
config["concurrent_reads"] = concurrent_reads
config["insert_multiple_values"] = insert_multiple_values
config["num_db_connections"] = num_db_connections
config["backlog"] = 300 if low_memory else 5000
config["pipe_dir"] = pipe_dir
if concurrent_reads < 1:
raise ValueError("concurrent_reads must be at least 1.")
if num_db_connections < 1:
raise ValueError("num_db_connections must be at least 1.")
print(f"{timestr()} program started. 👋")
if config["low_storage"]:
print(f"{timestr()} low storage mode active. 🐈 📦")
complete = False
while not complete:
try:
# download XML files from wikipedia and collect revisions
revision_iterator_functions = download_and_parse_files()
# write collected revisions to output.
if pipe_dir is not None:
write_to_csv(None, revision_iterator_functions)
elif use_database:
write_to_database(revision_iterator_functions)
else:
if os.path.exists(output_file):
print(f"{timestr()} overwriting file {output_file}... 🥛")
write_to_csv(output_file, revision_iterator_functions)
print(f"{timestr()} program complete. 💐")
complete = True
except Exception as e:
if getattr(e, "errno", None) == errno.ENOSPC:
print(f"{timestr()} no space left on device. Ending program. 😲")
raise e
SLEEP_SECONDS = 5 * 60
print(
f"{timestr()} caught exception ({e}). Sleeping {SLEEP_SECONDS/60} minutes..."
)
time.sleep(SLEEP_SECONDS)
print(f"{timestr()} Restarting...")
finally:
for fname in ["verified_files.txt", "canonical_hashes.txt"]:
if os.path.exists(fname):
os.remove(fname)
if __name__ == "__main__":
run()
| UTF-8 | Python | false | false | 17,530 | py | 10 | download.py | 7 | 0.59945 | 0.591487 | 0 | 479 | 35.442589 | 118 |
johnmay/rdkit | 2,353,642,081,957 | 39a7f35a569f334c9ca645ebdb46f761a4171f91 | de1810660d8a62630a06be06f48024c3ab6a5a35 | /rdkit/utils/PilTools.py | bd4fedbf2d752221419ae4ec9e10b2b18962aff6 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | https://github.com/johnmay/rdkit | 3909cc8ac66c405f313f21a0fc1f61d690347ca8 | b6f5fefeecdaabcf163a93afe28d50d3986282dd | refs/heads/master | 2021-01-18T12:31:49.441251 | 2014-04-04T05:19:22 | 2014-04-04T05:19:22 | 18,447,327 | 0 | 0 | NOASSERTION | true | 2019-06-26T07:30:31 | 2014-04-04T17:53:22 | 2014-04-04T17:53:24 | 2019-06-26T07:30:30 | 151,880 | 0 | 0 | 0 | C++ | false | false | ## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2000,2001,2002,2003 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" tools for working with PIL images
"""
#try:
# from wxPython.wx import *
#except ImportError:
# hasWx=0
#else:
# hasWx=1
hasWx=0
try:
from qt import *
except ImportError:
hasQt = 0
else:
hasQt=1
from PIL import Image
# these are here to help the Installer work:
import PIL.ImageFile
import PIL.GifImagePlugin
import PIL.PngImagePlugin
import PIL.JpegImagePlugin
import PIL.BmpImagePlugin
import PIL.TiffImagePlugin
import PIL.PpmImagePlugin
def ResizeImage(origImg,newSize,filter=Image.BILINEAR,maintainAspect=0,
priorityX=1):
"""Resizes an image to fit a given space. The new image is returned.
**Arguments**
- origImg: a PIL image
- newSize: the requested size (either a 2-tuple or a list 2 elements long)
- filter: the filter to be used in resizing the image
- maintainAspect: toggles maintaining the aspect ratio of the image
- prioritiyX: (only meaningful when _maintainAspect_ is nonzero)
if nonzero, the X size will be given priority in setting the new size,
otherwise the Y size will take priority
**Returns**
a PIL image
**Notes**
- if maintainAspect is nonzero, the aspect ratio of the image
will not be changed. This implies that the final image may not
actually be the requested size.
"""
if maintainAspect:
if priorityX:
scaleFact = float(origImg.size[0])/newSize[0]
else:
scaleFact = float(origImg.size[1])/newSize[1]
newSize = (int(origImg.size[0]*scaleFact),int(origImg.size[1].scaleFact))
newImg = origImg.resize(newSize,filter)
return newImg
def FitImage(origImg,newSize,filter=Image.BILINEAR,bgColor=(255,255,255)):
"""Fits an image into a box of a particular size.
**Arguments**
- origImg: a PIL image
- newSize: the requested size (either a 2-tuple or a list 2 elements long)
- filter: the filter to be used in resizing the image
- bgColor: the background color to start with
**Returns**
a PIL image
**Notes**
- there may be blank spaces around the original image in the new image,
these will be filled with _bgColor_
"""
tmpImg = origImg.convert('RGB')
newImg = Image.new(tmpImg.mode,newSize,bgColor)
scaleFact = min(float(newSize[0])/origImg.size[0],
float(newSize[1])/origImg.size[1])
if scaleFact < 1:
tImg = origImg.resize((int(origImg.size[0]*scaleFact),int(origImg.size[1]*scaleFact)),filter)
else:
tImg = origImg
xDiff = newSize[0] - tImg.size[0]
if xDiff > 0:
xLoc = xDiff/2
else:
xLoc = 0
yDiff = newSize[1] - tImg.size[1]
if yDiff > 0:
yLoc = yDiff/2
else:
yLoc = 0
newImg.paste(tImg,(xLoc,yLoc))
return newImg.convert(origImg.mode)
def NumericMatrixToImage(data,scaleCols=0,transposeIt=0,
minColor=(0,0,0),maxColor=(255,255,255)):
import numpy
# copy the data
data = numpy.array(data,numpy.float)
if numpy.transpose:
data = numpy.transpose(data)
#nRows,nCols = data.shape
nRows,nCols = data.shape
if scaleCols:
minIndices = numpy.argmin(data)
maxIndices = numpy.argmax(data)
mins = zeros(nCols)
maxs = zeros(nCols)
for i in range(nCols):
mins[i] = data[i][minIndices[i]]
maxs[i] = data[i][maxIndices[i]]
# subtract off the minimum
data -= mins
maxs -= mins
# no zeros here please, we're dividing:
maxs += numpy.equal(maxs,0.0)
# and divide:
data /= maxs
# okey dokey, get a three D matrix:
imgMat = numpy.ones((nRows,nCols,3),numpy.integer)
# start at minColor:
minColor = numpy.array(minColor)
maxColor = numpy.array(maxColor)
imgMat *= minColor
deltaColor = maxColor-minColor
# and move to maxColor:
for i in range(nRows):
for j in range(nCols):
imgMat[i,j] += (deltaColor*data[i,j]).astype(numpy.integer)
d = imgMat.astype('B').tostring()
img = Image.fromstring('RGB',(nCols,nRows),d)
return img
if hasWx:
def PilImgToWxBmp(pilImg):
""" converts a PIL image into a wxPython bitmap
**Arguments**
- pilImg: a PIL image
**Returns**
a wxPython bitmap
"""
wxImg = wxEmptyImage(pilImg.size[0],pilImg.size[1])
wxImg.SetData(pilImg.tostring())
bmp = wxImg.ConvertToBitmap()
return bmp
if hasQt:
def PilImgToQPixmap(pilImg):
from StringIO import StringIO
sio = StringIO()
pilImg.save(sio,format='png')
pm = QPixmap()
pm.loadFromData(sio.getvalue())
return pm
if __name__ == '__main__':
if 0:
boxImg = Image.open('12h.gif').convert('RGB')
img1 = ResizeImage(boxImg,(100,200))
img1.save('test1.gif')
img2 = FitImage(boxImg,(100,200),bgColor=(200,200,200))
img2.save('test2.gif')
img3 = FitImage(boxImg,(50,200),bgColor=(200,200,200))
img3.save('test3.gif')
else:
vs = array([[1.,.5,0,0,0,0],[.5,1,.5,1,0,1],[0,.5,1,0,0,1]])
img = NumericMatrixToImage(vs)
img = img.resize((200,200))
img.save('foo.gif')
| UTF-8 | Python | false | false | 5,232 | py | 137 | PilTools.py | 107 | 0.648318 | 0.618693 | 0 | 209 | 24.028708 | 97 |
diogoolsen/CozinhaPet | 3,968,549,803,295 | 0ed52de97e051fbc7e6201eaaaa5924b8a01da22 | 9e30856cc1450c19b4e55e13a867534aa66441e2 | /tests/model/test_ingredient.py | 055b63af044e8b3f72abbd66a31b29cfa4d1eed3 | [] | no_license | https://github.com/diogoolsen/CozinhaPet | e0a7ce2c65b02347e32653ec625f9e4676e98a03 | adbb4e4e3a92096b66d94170b8bdf6db8bd0e0d8 | refs/heads/master | 2023-05-08T17:42:57.283182 | 2021-01-18T13:29:47 | 2021-01-18T13:29:47 | 326,803,382 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# import pytest
import datetime
from pytest import approx, raises
from src.model.ingredient import Ingredient
class TestIngredient():
def assert_item_by_item(self, actual, expected, message=''):
assert actual['name'] == expected['name'], message
assert actual['searchable'] == expected['searchable'], message
assert actual['type'] == expected['type'], message
assert actual['unity'] == expected['unity'], message
assert actual['establishedCostPer1K'] == \
approx(expected['establishedCostPer1K'])
assert isinstance(actual['factorsLog'][0]['date'],
datetime.datetime)
assert actual['factorsLog'][0]['cookingFactor'] ==\
approx(expected['factorsLog'][0]['cookingFactor']), message
assert actual['factorsLog'][0]['safetyMargin'] ==\
approx(expected['factorsLog'][0]['safetyMargin']), message
assert actual['factorsLog'][0]['actualFactor'] ==\
approx(expected['factorsLog'][0]['actualFactor']), message
assert isinstance(actual['costLog'][0]['date'],
datetime.datetime)
assert actual['costLog'][0]['price'] ==\
approx(expected['costLog'][0]['price']), message
assert actual['costLog'][0]['amount'] ==\
approx(expected['costLog'][0]['amount']), message
assert actual['costLog'][0]['costPer1Unity'] ==\
approx(expected['costLog'][0]['costPer1Unity']), message
assert actual['costLog'][0]['costPer1K'] ==\
approx(expected['costLog'][0]['costPer1K']), message
def test_create_ingredient_OK_v1(self):
name = 'abobrinha'
type = 'vegetal'
unity = 'g'
price = '3.14'
established = '3'
actual = Ingredient(name, type, unity, price, established).dict
expected_factors_dict = {
'date': datetime.datetime.now(),
'cookingFactor': 1.,
'safetyMargin': 1.,
'actualFactor': 1.
}
expected_cost_dict = {
'date': datetime.datetime.now(),
'price': 3.14,
'amount': 1000.,
'costPer1Unity': 0.00314,
'costPer1K': 3.14
}
expected = {
'name': 'abobrinha',
'searchable': 'ABOBRINHA',
'type': 'vegetal',
'unity': 'g',
# 'amount': 1000.,
'factorsLog': [expected_factors_dict],
'costLog': [expected_cost_dict],
'establishedCostPer1K': 3.
}
message = ('test_create_ingredient_OK_v1 returned'
'{0}'
'instead of'
'{1}'.format(actual, expected)
)
self.assert_item_by_item(actual, expected, message)
def test_create_ingredient_OK_v2(self):
name = 'figado bovino'
type = 'carne'
unity = 'g'
price = '32.5'
established = '65'
amount = '500'
cookingFactor = '0.75'
safetyMargin = '1.05'
actual = Ingredient(name,
type,
unity,
price,
established,
amount,
cookingFactor,
safetyMargin).dict
expected_factors_dict = {
'date': datetime.datetime.now(),
'cookingFactor': 0.75,
'safetyMargin': 1.05,
'actualFactor': 0.7875
}
expected_cost_dict = {
'date': datetime.datetime.now(),
'price': 32.5,
'amount': 500.,
'costPer1Unity': 0.065,
'costPer1K': 65
}
expected = {
'name': 'figado bovino',
'searchable': 'FIGADO BOVINO',
'type': 'carne',
'unity': 'g',
# 'amount': 500.,
'factorsLog': [expected_factors_dict],
'costLog': [expected_cost_dict],
'establishedCostPer1K': 65
}
message = ('test_create_ingredient_OK_v2 returned'
'{0}'
'instead of'
'{1}'.format(actual, expected)
)
self.assert_item_by_item(actual, expected, message)
def test_create_ingredient_validate_v1(self):
name = ''
type = 'complemento'
unity = 'ml'
price = '32'
established = '32'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established)
# Check if ValueError contains correct message
assert exception_info.match('Nome do ingrediente inválido.')
def test_create_ingredient_validate_v2(self):
name = 'Óleo de coco'
type = ''
unity = 'ml'
price = '32'
established = '64'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established)
# Check if ValueError contains correct message
assert exception_info.match('Tipo do ingrediente inválido.')
def test_create_ingredient_validate_v3(self):
name = 'Óleo de coco'
type = 'complemento'
unity = ''
price = '32'
established = '64'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established)
# Check if ValueError contains correct message
assert exception_info.match('Unidade do ingrediente inválido')
def test_create_ingredient_validate_v4(self):
name = 'Óleo de coco'
type = 'complemento'
unity = 'ml'
price = '32,5'
established = '65'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established)
# Check if ValueError contains correct message
assert exception_info.match('Custo do ingrediente inválido.')
def test_create_ingredient_validate_v5(self):
name = 'Óleo de coco'
type = 'complemento'
unity = 'ml'
price = '32'
established = '64'
amount = '22x'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established, amount)
# Check if ValueError contains correct message
assert exception_info.match('Quantidade de ingrediente inválida.')
def test_create_ingredient_validate_v6(self):
name = 'Óleo de coco'
type = 'complemento'
unity = 'ml'
price = '32'
established = '64'
amount = '22'
cookingFactor = '1,2'
safetyMargin = '1.1'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established, amount,
cookingFactor, safetyMargin)
# Check if ValueError contains correct message
assert exception_info.match('Fator de cocção do ingrediente inválido.')
def test_create_ingredient_validate_v7(self):
name = 'Óleo de coco'
type = 'complemento'
unity = 'ml'
price = '32'
established = '64'
amount = '22'
cookingFactor = '1.2'
safetyMargin = '1,1'
with raises(ValueError) as exception_info:
# store the exception
Ingredient(name, type, unity, price, established, amount,
cookingFactor, safetyMargin)
# Check if ValueError contains correct message
assert exception_info.match(
'Margem de segurança do ingrediente inválida.')
| UTF-8 | Python | false | false | 7,904 | py | 20 | test_ingredient.py | 20 | 0.535877 | 0.5161 | 0 | 251 | 30.422311 | 79 |
sakakazu2468/AtCoder_py | 11,321,533,808,078 | 43e1795f8d91e21a750443edcf3438a69f858baf | cbf9f600374d7510988632d7dba145c8ff0cd1f0 | /abc/010/b.py | e4d104dbea76c571eab349e5d78cb84962c97296 | [] | no_license | https://github.com/sakakazu2468/AtCoder_py | d0945d03ad562474e40e413abcec39ded61e6855 | 34bdf39ee9647e7aee17e48c928ce5288a1bfaa5 | refs/heads/master | 2022-04-27T18:32:28.825004 | 2022-04-21T07:27:00 | 2022-04-21T07:27:00 | 225,844,364 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
a = list(map(int, input().split()))
ans = 0
for i in a:
if i == 9:
pass
elif i >= 7:
ans += i - 7
elif i >= 3:
ans += i - 3
else:
ans += i - 1
print(ans)
| UTF-8 | Python | false | false | 219 | py | 554 | b.py | 553 | 0.401826 | 0.369863 | 0 | 13 | 15.846154 | 35 |
scottsilverlabs/raspberrystem-hw-base | 901,943,154,393 | 1d7aa8444246ad5189265de45279dca5dd739f37 | 739a21f0322b41e0020ad0b42208f8e14815353f | /PCB/logo/eagle-scale.py | 8bfb8df0c2ba5633e956c706ec87bf6a261e18cf | [
"Apache-2.0"
] | permissive | https://github.com/scottsilverlabs/raspberrystem-hw-base | 528c720ffde1e7c0df0cc0a5599d83ea118ea5ff | 47c591ae8a7bcf0205d4f6b6f2b078505c93e240 | refs/heads/master | 2021-01-17T11:59:38.145540 | 2015-07-01T19:32:43 | 2015-07-01T19:32:43 | 20,490,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Simple scaleable BRD file script.
#
# Reads in Eagle BRD file given as arg, and scales attributes in the <plain> section by the given
# factor.
#
# Original file is overwritten - beware!
#
# See also hflip/vflip scripts.
#
# Example:
#
# cp Logo.brd new.brd && python eagle-scale.py new.brd 1.8 open new.brd
#
# The above scales Logo.brd by 1.8x, and opens the scaled file in Eagle. In
# Eagle, you can then:
# - group the file,
# - copy the group (then hit ESC),
# - open the board file you want to paste it into,
# - use the paste command to paste it.
#
from bs4 import BeautifulSoup
import sys
try:
prog, name, scale_factor = sys.argv
except:
print "Usage: eagle-scale.py <file> <scale_factor> "
sys.exit()
with file(name) as f:
soup = BeautifulSoup(f)
for tag in soup.plain.find_all(["vertex", "polygon", "wire"]):
for attr in ["width", "x", "y", "x1", "y1", "x2", "y2"]:
if attr in tag.attrs:
tag[attr] = float(tag[attr]) * float(scale_factor)
with file(name, "w") as f:
f.write(str(soup))
| UTF-8 | Python | false | false | 1,069 | py | 20 | eagle-scale.py | 2 | 0.637979 | 0.62956 | 0 | 40 | 25.7 | 97 |
yeonggilee/django_study | 5,866,925,372,494 | 717cd7a39809c99c5588d5c4b175b8bd6e4ba03f | e530fbb22d6e73932fec6d763cf6498d803a907a | /ssido_app/migrations/0003_auto_20180908_1612.py | b92c6af9c8e2b50141e0aed6ed4d20fe08fa8d97 | [] | no_license | https://github.com/yeonggilee/django_study | 5082bdee2e871a79297e63ac5fca3bf734fe5b2e | c6ed5223eaaed07fe96609553421c3b11192b4b3 | refs/heads/master | 2020-03-30T05:54:53.517328 | 2018-09-29T06:55:36 | 2018-09-29T06:55:36 | 150,827,150 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.1 on 2018-09-08 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ssido_app', '0002_test'),
]
operations = [
migrations.RenameModel(
old_name='Test',
new_name='BestFood',
),
migrations.RenameField(
model_name='bestfood',
old_name='best_food',
new_name='food_name',
),
]
| UTF-8 | Python | false | false | 464 | py | 4 | 0003_auto_20180908_1612.py | 3 | 0.534483 | 0.493534 | 0 | 22 | 20.090909 | 47 |
uhayate/CTF | 5,222,680,236,610 | 12eee49b2737bc7d80c643ab95046311e40d026b | a8e846a8a62762871b505595828207fa81afad23 | /解题脚本/eval计算题/计算题1.py | 7c57463dff857775aa64c256bf39dc4648191a29 | [] | no_license | https://github.com/uhayate/CTF | e3180ab2413bc5e1c380e780afb99f6ba7659d8f | a01e5703abfd4e6351fd709ff55dc88d656e7c76 | refs/heads/master | 2016-05-25T13:48:48.414786 | 2015-11-02T00:54:04 | 2015-11-02T00:54:04 | 41,954,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*-coding:gbk -*-
import re
import requests
get_url = 'http://ctf8.simplexue.com/jia/'
post_url = 'http://ctf8.simplexue.com/jia/?action=check_pass'
s = requests.Session()
get = s.get(get_url)
num1 = re.findall(r"'my_expr'>(.*?)</div>=?", get.content)[0]
num2 = num1.replace('x', '*')
post = s.post(post_url, {'pass_key': eval(num2)})
print post.content
'''
第二种做法,使用BeautifulSoup
# -*-coding:gbk -*-
from bs4 import BeautifulSoup
import requests
get_url = 'http://ctf8.simplexue.com/jia/'
post_url = 'http://ctf8.simplexue.com/jia/?action=check_pass'
s = requests.Session()
get = s.get(get_url)
soup = BeautifulSoup(get.content, 'lxml')
calculation = str(soup.form.find_all('div')[0].string).replace('x', '*')
post = s.post(post_url, {'pass_key': eval(calculation)})
print post.content
'''
# <div class="content_title_01">这是一道计算题</div>
# <div class="horizontal_divider_01"> </div>
# <div class="cleaner"> </div>
# <form method='POST' action='?action=check_pass'>
# <p>
# Please input your answer in 3 sesounds!
# <div name='my_expr'>(6826 + 999) x (8504 - 64) - (753 + 475 - 477) x 699 </div>=?
# </p>
# <input name='pass_key' type='text'></input>
# <input type='submit'/>
# </form>
| GB18030 | Python | false | false | 1,265 | py | 14 | 计算题1.py | 12 | 0.634008 | 0.60081 | 0 | 42 | 28.380952 | 86 |
hkariti/higlass_proxy | 9,148,280,382,601 | dbfb2ce171bcd201b37b1027f69cc6d22a2be167 | 425c382463939d23ee0c84143ba3002d126dc087 | /setup.py | 7c5c08c04154b03fc01fb037c4fa3f3bfa9bda30 | [] | no_license | https://github.com/hkariti/higlass_proxy | 3fdd4d43f45729d0e8290aa380cc70e63ed3fa25 | 2c5f093c77295d7c54bc2c75aac38b7c6d3db100 | refs/heads/master | 2023-02-11T17:31:21.572877 | 2021-01-06T12:42:27 | 2021-01-06T12:42:27 | 297,071,453 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup, find_packages
def get_requirements(path):
content = open(path).read()
return [req for req in content.split("\n") if req != "" and not req.startswith("#")]
setup_args = {
"name": "higlass_proxy",
"version": 0.2,
"description": "Proxy for jupyter for higlass daemons using unix sockets",
"url": "https://github.com/hkariti/higlass_proxy",
"author": "Hagai Kariti",
"author_email": "hkariti@gmail.com",
"keywords": ["higlass", "jupyter"],
"install_requires": get_requirements("requirements.txt"),
"packages": find_packages(),
}
setup(**setup_args)
| UTF-8 | Python | false | false | 623 | py | 2 | setup.py | 2 | 0.65008 | 0.64687 | 0 | 20 | 30.15 | 88 |
cmdiaz6/fod_placer | 3,547,642,994,802 | 899ab2f3af0649752c78751fb6847850e5503a93 | 9c1fadab453f09eb68f08a49328b9a6d04672b22 | /pointclass.py | 3e735cd05a4bf2eec3b14fba948cf9759531eecd | [
"MIT"
] | permissive | https://github.com/cmdiaz6/fod_placer | d9e6ea2fbba405ae8f9ac8c7df45793097901609 | 0f7dc934a08b7530560264bf86768f3ae36bc49b | refs/heads/master | 2021-05-06T00:34:15.053425 | 2020-05-14T16:12:12 | 2020-05-14T16:12:12 | 114,309,670 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""class for 3D point in space
also used for operations on vectors center at origin"""
import math
__author__ = "Carlos Diaz"
class Point:
def __init__(self, x = 0.0, y = 0.0, z = 0.0):
self.x, self.y, self.z = x, y, z
def __str__(self):
return "({0},{1},{2})".format(self.x,self.y,self.z)
def __add__(self,other):
x = self.x + other.x
y = self.y + other.y
z = self.z + other.z
return Point(x,y,z)
def __sub__(self,other):
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return Point(x,y,z)
def __mul__(self,other):
"""multiply Point() by float on right side"""
x = self.x * other
y = self.y * other
z = self.z * other
return Point(x,y,z)
def __rmul__(self,other):
return self.__mul__(other)
# def __div__(self,other):
def __truediv__(self,other):
"""divide Point() by float"""
x = self.x / other
y = self.y / other
z = self.z / other
return Point(x,y,z)
def dot(self,other):
"""dot product between two vectors"""
x = self.x * other.x
y = self.y * other.y
z = self.z * other.z
return x+y+z
def cross(self,other):
"""cross product between two vectors"""
x = self.y*other.z - self.z*other.y
y = self.z*other.x - self.x*other.z
z = self.x*other.y - self.y*other.x
return Point(x,y,z)
def rotatearound(self,n,t):
"""Rodrigues' rotation formula"""
#vrot = self*math.cos(t) + n.cross(self)*math.sin(t) + n*n.dot(self)*(1-math.cos(t))
vrot = self*math.cos(t) + n.cross(self)*math.sin(t) + n*n.dot(self)*(2*math.sin(t/2)**2) #more stable according to Yoh
return vrot
def normalize(self):
"""normalize vector"""
normalized_self = self / norm(self)
return normalized_self
def norm(self):
norm=math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
return norm
def rotatepoints(pointlist,n,vrotate,valign):
"""rotates points around n
aligns vrotate vector with valign vector"""
if norm(n) == 0.0:
if valign.x < 0.0:
theta = math.pi #180 degrees
else:
theta = 0.0
else:
n = normalize(n) #normalize
#define angle of rotation as angle between top and valign
#theta = acos( a.b / ||a||*||b|| )
theta = vrotate.dot(valign) / (norm(vrotate)*norm(valign))
theta = math.acos(theta)
print('by theta: {0:4.4f}'.format(theta))
#Rotate tetrahedron to point along bond
newlist = pointlist
for i, point in enumerate(newlist):
#rotate around n by theta
pointlist[i] = point.rotatearound(n,theta)
return pointlist
def translatepoints(pointlist,center):
"""translates points by center vector"""
newlist = pointlist
for i, point in enumerate(newlist):
#translate to atom center
pointlist[i] = point + center
return pointlist
| UTF-8 | Python | false | false | 3,092 | py | 11 | pointclass.py | 10 | 0.554981 | 0.546248 | 0 | 106 | 28.160377 | 126 |
ArkadiuszMichalRys/bug-compare | 16,827,681,884,382 | c99e40c9c8244ef5c6f58637f1f3e2c4702ad358 | 59fb1f85bb84c4106d58c3cae4654ee78d335b94 | /processor/processor/workers/evaluator.py | 19079260cc6ec7416db18da8a5f779464d9f9ec5 | [] | no_license | https://github.com/ArkadiuszMichalRys/bug-compare | b1902f0165336065da88be2a38ba0422fd62b1d1 | 1e5fe88645f09ef89bb20d2796b473011c83f777 | refs/heads/master | 2020-04-24T05:50:30.944026 | 2019-08-30T19:14:48 | 2019-08-30T19:14:48 | 171,743,887 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import json
from collections import defaultdict
from pathlib import Path
from statistics import median
from typing import List
from processor.workers.worker import Worker
from processor.util import increase_csv_field_size_limit
class Evaluator(Worker):
""""""
def __init__(self, settings: dict, base_directory: Path = Path(__file__).resolve().parent.parent.parent):
""""""
super().__init__(settings, base_directory)
# Input
self._in_path = base_directory.joinpath("data").joinpath(settings["input"])
self._in_path_type = settings["input"].split(".")[-1].lower()
self._annotations_path = base_directory.joinpath("data").joinpath(settings["annotations"])
self._criteria = settings["criteria"]
def create_id_time_dict(self) -> dict:
""""""
id_time = dict()
with self._annotations_path.open() as in_file:
csv_in_reader = csv.DictReader(in_file)
for report in csv_in_reader:
id_time[report["id"]] = report["fix_time"]
return id_time
def parse_criteria(self) -> List:
""""""
options = []
for split in self._criteria:
options.append([split["operation"], split["value"], split["class"]])
return options
def compare_class(self, actual, predicted) -> str:
""""""
if actual == "slow" and predicted == actual:
return "TP"
if actual == "slow" and predicted != actual:
return "FP"
if actual == "fast" and predicted == actual:
return "TN"
if actual == "fast" and predicted != actual:
return "FN"
raise ValueError
def get_precision(self, confusion_matrix) -> float:
""""""
return confusion_matrix["TP"] / (confusion_matrix["TP"] + confusion_matrix["FP"])
def get_recall(self, confusion_matrix) -> float:
""""""
return confusion_matrix["TP"] / (confusion_matrix["TP"] + confusion_matrix["FN"])
def get_accuracy(self, confusion_matrix) -> float:
""""""
numerator = confusion_matrix["TP"] + confusion_matrix["TN"]
denominator = numerator + confusion_matrix["FN"] + confusion_matrix["FP"]
return numerator / denominator
def get_f_measure(self, precision, recall) -> float:
""""""
# TODO precision + recall can't be 0
return 2 * ((precision * recall) / (precision + recall))
def __to_csv(self):
""""""
increase_csv_field_size_limit()
id_time = self.create_id_time_dict()
confusion_matrix = defaultdict(int)
parsed_criteria = self.parse_criteria()
self._logger.info("Gathering statistics")
with self._in_path.open() as in_file:
csv_in_reader = csv.DictReader(in_file)
for classification in csv_in_reader:
report_id = classification["report_id"]
report_class = classification["class"]
actual_time = id_time[report_id]
actual_class = None
for operation, value, suffix in parsed_criteria:
if operation == ">=":
if float(actual_time) >= float(value):
actual_class = suffix
elif operation == "<":
if float(actual_time) < float(value):
actual_class = suffix
else:
self._logger.warning(f"Operation {operation} is not supported")
confusion_matrix[self.compare_class(actual_class, report_class)] += 1
precision = self.get_precision(confusion_matrix)
recall = self.get_recall(confusion_matrix)
accuracy = self.get_accuracy(confusion_matrix)
f = self.get_f_measure(precision, recall)
stats = {
"precision": precision,
"recall": recall,
"accuracy": accuracy,
"F": f,
"TP": confusion_matrix["TP"],
"FP": confusion_matrix["FP"],
"FN": confusion_matrix["FN"],
"TN": confusion_matrix["TN"],
}
with self._out_path.open('w') as out_file:
bug_writer = csv.DictWriter(out_file, fieldnames=stats)
bug_writer.writeheader()
bug_writer.writerow(stats)
self._logger.info(f"Saving to file: {self._out_path}")
def perform_activity(self):
""""""
if self._out_path_type != self._in_path_type:
self._logger.error(f"Input type and output type ({self._in_path_type}->{self._out_path_type}) don't match.")
return
if self._out_path_type == "json":
raise NotImplementedError
elif self._out_path_type == "csv":
self.__to_csv()
else:
self._logger.error(f"Output file type ({self._out_path_type}) is not supported.")
| UTF-8 | Python | false | false | 5,028 | py | 59 | evaluator.py | 23 | 0.547932 | 0.547136 | 0 | 132 | 37.090909 | 120 |
nw0428/Integer-Arithmetic | 18,906,446,045,106 | 8324a5792d76cf687785c469e3286aba1faa06df | a359677a11c69921bfe0e9238382773b0e639019 | /divs.py | bda1ce0097cba105079af96f71116fb116eedd10 | [] | no_license | https://github.com/nw0428/Integer-Arithmetic | 8caeb15e83bfdd31c05b20543f8ee9ce67bccb64 | b3925d691e28ab91e728105382779e247b80a4f8 | refs/heads/master | 2022-12-26T07:54:38.736183 | 2020-10-14T14:21:30 | 2020-10-14T14:21:30 | 304,038,899 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def div(x, y):
return x/y
def int_div(x, y):
return x //y
def mod(x, y):
return x % y | UTF-8 | Python | false | false | 93 | py | 4 | divs.py | 3 | 0.548387 | 0.548387 | 0 | 8 | 10.75 | 18 |
zhangleihong/pyQt5project | 6,253,472,390,617 | 970bbbcda7e572d2eec1c7bb5efc7d383fc41170 | fd61f78d7114cd52b54ffeebb8ea484a3901be55 | /数据分析/机器学习(算法篇)/2.1linear_grade.py | 2a990455b5f8a8fc5691a4a1af203913ffc25a91 | [] | no_license | https://github.com/zhangleihong/pyQt5project | 10c966ed5a3ed330ed3a64f1f2fa39f5e18e7726 | 89bb93b8c70f785d4d6bfa36aa2614698c819a0a | refs/heads/master | 2021-05-19T08:59:48.157153 | 2021-01-27T07:56:04 | 2021-01-27T07:56:04 | 251,617,073 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
@project:数据分析
@author:zlh
@file:2.1linear_grade.py
@time:2020/5/7 16:04
"""
'''
1.获取数据集
2.数据基本处理(该案例中省略)
3.特征工程(该案例中省略)
4.机器学习
5.模型评估(该案例中省略)
'''
from sklearn.linear_model import LinearRegression
# 平时成绩 期末成绩
x = [[80, 86],
[82, 80],
[85, 78],
[90, 90],
[86, 82],
[82, 90],
[78, 80],
[92, 94]]
y = [84.2, 80.6, 80.1, 90, 83.2, 87.6, 79.4, 93.4]
# 实例化API
estimator = LinearRegression()
# 使用fit方法进行训练
estimator.fit(x,y)
#线性回归系数 estimator.coef_
print("线性回归的系数是:\n",estimator.coef_)
# 打印预测结果
print("输出预测结果:\n",estimator.predict([[100, 80]]))
| UTF-8 | Python | false | false | 739 | py | 17 | 2.1linear_grade.py | 14 | 0.640367 | 0.499083 | 0 | 35 | 14.571429 | 50 |
Frankd35/JingXiShop | 13,297,218,783,218 | 4a34114b40c589a60bf8da26f3d30c1ac82b5fcb | 2ff7843847542e4f4e64e63aa2a8a9578f2c2c06 | /WebProject/goods/migrations/0003_category_comment.py | c5bfa932f26298bbf572d75b5957e6d9ca9110fe | [] | no_license | https://github.com/Frankd35/JingXiShop | 513872a3a3ee2b4a49a882d3e39549663311979b | 71a494f7cf80024baad3e05313ae43be5d770274 | refs/heads/main | 2023-08-16T21:00:21.230243 | 2021-10-12T15:14:12 | 2021-10-12T15:14:12 | 397,312,750 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.6 on 2021-08-23 02:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0002_alter_goods_price'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('state', models.IntegerField(blank=True, default=0)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('goods_id', models.IntegerField()),
('text', models.TextField()),
('mark', models.DecimalField(blank=True, decimal_places=2, default=5.0, max_digits=10)),
('createtime', models.DateField(auto_now=True)),
],
),
]
| UTF-8 | Python | false | false | 1,133 | py | 55 | 0003_category_comment.py | 34 | 0.543689 | 0.518976 | 0 | 32 | 34.40625 | 117 |
lmwgv/advent | 14,336,600,862,504 | 1af9be65efb15cafee654aa228cb58a2547ff840 | c80bd50e0515ca6852f3b74371c1fc661b33d151 | /advent07.py | 7cb8bb70e58873b0eeecf6d85417db4f96f57fdb | [] | no_license | https://github.com/lmwgv/advent | e5d93ef43c85425a837c7a24818a6592ae796708 | 0effc7973c7b5fa10fc2b1570a2b881b626a4e83 | refs/heads/master | 2021-01-11T06:37:55.637394 | 2017-02-22T19:39:13 | 2017-02-22T19:39:13 | 81,434,937 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | if __name__ == '__main__':
ips_supporting_tls = 0
ips_supporting_ssl = 0
with open('advent07.in', 'r') as f:
for line in f.readlines():
abas_in = []
abas_out = []
abba_out = False
abba_in = False
inside_brackets = False
for c in range(len(line) - 2):
if line[c] == '[':
inside_brackets = True
if line[c] == ']':
inside_brackets = False
if c + 3 < len(line) and line[c] == line[c + 3] and line[c + 1] == line[c + 2] and line[c] != line[c + 1]:
if inside_brackets:
abba_in = True
else:
abba_out = True
if line[c] == line[c + 2] and line[c] != line[c + 1]:
if inside_brackets:
abas_in.append((line[c], line[c + 1]))
else:
abas_out.append((line[c + 1], line[c]))
for aba in abas_in:
if aba in abas_out:
ips_supporting_ssl += 1
print(str(aba) + ' ' + line[:-1])
break
if abba_out and not abba_in:
ips_supporting_tls += 1
print(str(ips_supporting_tls))
print(str(ips_supporting_ssl))
| UTF-8 | Python | false | false | 1,408 | py | 12 | advent07.py | 12 | 0.391335 | 0.379261 | 0 | 34 | 39.411765 | 122 |
RianneLam96/Thesis_Project_Crowd_Prediction | 7,215,545,075,444 | dd46f29b3bed3007b9808d7de2a2af59785d1055 | 1213b3720f187f058cf2ae7b53a74ea1fd4bcbce | /code/preprocessing.py | 7b5780882781be8f45bacf2c36f4c86fe88c3ac4 | [] | no_license | https://github.com/RianneLam96/Thesis_Project_Crowd_Prediction | 2a459b7368b0a7f77f00d8766b53f384f5aa993f | 165073441b86c0ad98d8d289a4cfc6d9a860d5cd | refs/heads/master | 2023-06-09T07:36:37.403527 | 2021-07-06T12:08:36 | 2021-07-06T12:08:36 | 382,340,967 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # General
import pandas as pd
import geopandas as gpd
import numpy as np
import os
from sqlalchemy import create_engine, inspect
# Dates
from datetime import datetime, timedelta, date
import datetime as dt
import pytz
# Cleaning
from scipy.optimize import curve_fit
from sklearn import preprocessing, svm
### FUNCTIONS - pre-processing
def get_start_learnset(train_length = None, date_str = None):
"""
Get a datetime string for the starting date for training
One of:
train_length: integer indicating the number of weeks used for training
date_str: str indicating a starting date (in the format "2020-01-01 00:00:00")
"""
if train_length:
date_time_current = datetime.now()
start_learnset = date_time_current - dt.timedelta(days = train_length*7)
elif date_str:
start_learnset = pd.to_datetime(date_str)
return start_learnset
def prepare_engine(cred):
'''
Prepare engine to read in data.
cred: the env object with credentials.
Returns the engine and the table names.
'''
# Create engine object
engine_azure = create_engine() # DB information removed in public version
# Check available tables
inspector = inspect(engine_azure)
table_names = []
for table_name in inspector.get_table_names('ingested'):
table_names.append(table_name)
return engine_azure, table_names
def get_data(engine, table_name, names, start_learnset):
"""
Read in data source from the database.
table_name: name of the database table
names: list of one or more location names (have to match location_name column in table); or 'all' for all locations
start_learnset: date indicating the moment to begin using data for training.
"""
# Read in data from two weeks before starting date learn set (necessary for lag features)
start_date = start_learnset - dt.timedelta(days = 14)
start_date = start_date.strftime("%Y-%m-%d %H:%M:%S")
# Select all locations
if names == 'all':
query = "SELECT * FROM " + table_name + " WHERE measured_at >= '" + start_date + "' LIMIT 3000000"
df_raw = pd.read_sql_query(query, con = engine)
# Select one location
elif isinstance(names, str):
query = "SELECT * FROM " + table_name + " WHERE location_name = '" + names + "' AND measured_at >= '".format(names) + start_date + "' LIMIT 3000000"
df_raw = pd.read_sql_query(query, con = engine)
# Select locations out of list of location names
else:
names = tuple(names)
query = "SELECT * FROM " + table_name + " WHERE location_name IN {} AND measured_at >= '".format(names) + start_date + "' LIMIT 3000000"
df_raw = pd.read_sql_query(query, con = engine)
return df_raw
def preprocess_resono_data(df_raw, freq, end_prediction):
"""
Prepare the raw resono data for modelling.
"""
# Drop duplicates
df = df_raw.copy()
df = df.drop_duplicates()
# Fix timestamp
df["datetime"] = pd.to_datetime(df["measured_at"])
df = df.sort_values(by = "datetime", ascending = True)
# Wide format
df = df.pivot_table(index = ["datetime"], columns = "location_name", values = "total_count").reset_index()
df = df.set_index('datetime')
# Change column names
df.rename(columns = {'location_name': 'location'})
# Set right sampling frequency
idx = pd.date_range(df.index[0], end_prediction, freq = freq)
df = df.reindex(idx) # Any new samples are treated as missing data
return df
def preprocess_covid_data(df_raw, freq, end_prediction):
"""
Prepare the raw covid stringency data for modelling.
"""
# Put data to dataframe
df_raw_unpack = df_raw.T['NLD'].dropna()
df = pd.DataFrame.from_records(df_raw_unpack)
# Add datetime column
df['datetime'] = pd.to_datetime(df['date_value'])
# Select columns
df_sel = df[['datetime', 'stringency']]
# extend dataframe to 14 days in future (based on latest value)
dates_future = pd.date_range(df['datetime'].iloc[-1], periods = 14, freq='1d')
df_future = pd.DataFrame(data = {'datetime': dates_future,
'stringency': df['stringency'].iloc[-1]})
# Add together and set index
df_final = df_sel.append(df_future.iloc[1:])
df_final = df_final.set_index('datetime')
# Set right sampling frequency
idx = pd.date_range(df_final.index[0], end_prediction, freq = freq)
df_final = df_final.reindex(idx)
# Fill missing values with nearest value
df_final = df_final.fillna(method = "ffill")
df_final = df_final.fillna(method = "bfill")
return df_final
def preprocess_holidays_data(holidays, freq, end_prediction):
"""
Prepare the raw holiday data for modelling.
"""
# Put in dataframe
holiday_df = pd.DataFrame(holidays).rename(columns = {0: 'date', 1: 'holiday'})
# Create datetime index
holiday_df['datetime'] = pd.to_datetime(holiday_df['date'])
holiday_df = holiday_df.set_index('datetime')
# Create dummy variable
holiday_df['holiday_dummy'] = 1
holiday_df_d = holiday_df.resample('1d').asfreq() # dataframe with all days
holiday_df_d['holiday_dummy'] = holiday_df_d['holiday_dummy'].fillna(0)
holiday_df_d['holiday_dummy'] = holiday_df_d['holiday_dummy'].astype(int)
# Select column
holiday_df_d = holiday_df_d[['holiday_dummy']]
# Set right sampling frequency
idx = pd.date_range(holiday_df_d.index[0], end_prediction, freq = freq)
holiday_df_d = holiday_df_d.reindex(idx)
# Fill missing values with nearest value
holiday_df_d = holiday_df_d.fillna(method = "ffill")
# set back to right dtype
holiday_df_d['holiday_dummy'] = holiday_df_d['holiday_dummy'].astype(int)
return holiday_df_d
def get_crowd_levels(df, Y_name, thresholds_all = None, thresholds_one = None):
'''
Recalculate the crowd levels based on the thresholds from the static Resono/CMSA table.
'''
df_level = df.copy()
# Re-calculate crowd levels
if thresholds_all is not None:
th = thresholds_all[Y_name]
elif thresholds_one is not None:
th = thresholds_one
# If the scaled thresholds are identical (no samples with crowd level 1.0), only use the first threshold
if (th[1] == th[2]) | (len(th) == 3):
if th[1] == th[2]:
th.pop(2)
labels = [-1.0, 0.0]
df_level[Y_name] = pd.cut(df_level[Y_name], bins = th, labels = labels)
else:
labels = [-1.0, 0.0, 1.0]
df_level[Y_name] = pd.cut(df_level[Y_name], bins = th, labels = labels)
df_level[Y_name] = df_level[Y_name].astype('category')
return df_level
### FUNCTIONS - cleaning
def clean_data(df, target, Y_name, n_samples_day, cols_to_clean, outlier_removal, nu = 0.2, gamma = 0.1):
"""
Clean data by imputing missing values and removing outliers (replacing with interpolation/extrapolation).
Days that are fully missing are dropped from the dataframe (to prevent strange interpolation results).
cols_to_clean: column names of columns to clean
outlier_removal: "yes" or "no"
# nu: value for nu parameter for outlier removal model (default is 0.2)
# gamma: value for gamma parameter for outlier removal model (default = 0.1)
Returns the dataframe with missing values interpolated and outliers replaced.
"""
# Initialize data frame to clean
df_to_clean = df.copy()
# If target variable is count, add to columns to clean
if cols_to_clean is not None:
cols = cols_to_clean.copy()
if target == 'count':
cols.append(Y_name)
else:
cols = Y_name
# Define date column to group on
df_to_clean['date'] = df_to_clean.index.date
# Find missing days for target column
dates_to_drop = []
# Index of fully missing days
day_missing_idx = np.where(df_to_clean[Y_name].isnull().groupby(df_to_clean['date']).sum() == n_samples_day)[0]
# Find the dates
dates_to_drop.extend(df_to_clean['date'].unique()[day_missing_idx])
# Select columns to impute
df_to_impute = df_to_clean[[cols]]
# First interpolate/extrapolate missing values
df_to_impute = interpolate_ts(df_to_impute)
df_to_impute = extrapolate_ts(df_to_impute)
if outlier_removal == "yes":
# Outlier detection and replacement
SVM_models = create_SVM_models(df_to_impute)
df_to_impute = SVM_outlier_detection(df_to_impute, SVM_models)
df_to_impute = interpolate_ts(df_to_impute)
df_to_impute = extrapolate_ts(df_to_impute)
# Put cleaned variables in data frame
df_cleaned = df_to_clean.copy()
df_cleaned[cols] = df_to_impute.copy()
df_cleaned.index.name = 'datetime'
# Drop the dates that are fully mising
df_cleaned = df_cleaned[~(df_cleaned['date'].isin(dates_to_drop))]
# Drop date column
df_cleaned = df_cleaned.drop('date', 1)
# Use forward fill imputation if the target is categorical
if target == "level":
df_cleaned[Y_name] = df_cleaned[Y_name].fillna(method = "ffill")
return df_cleaned
def interpolate_ts(df, min_samples = 2):
"""
Interpolate missing values.
df: dataframe to interpolate
min_samples: minimum number of samples necessary to perform interpolation (default = 2)
Returns the dataframe with missing values interpolated.
"""
# Initialize new dataframe
df_ip = df.copy()
# For each location
for idx, location in enumerate(df.columns):
# Initialize new location data
ts = df.iloc[:, idx]
ts_ip = ts.copy()
# Only interpolate if there are enough data points
if ts_ip.count() >= min_samples:
# Interpolate missing values
ts_ip = ts_ip.interpolate(method = 'cubicspline', limit_area = 'inside')
df_ip.iloc[:, idx] = ts_ip
# No negative values for counts
df_ip = df_ip.clip(lower = 0)
return df_ip
def extrapolate_ts(df, min_samples = 1):
"""
Extrapolate missing values.
df: dataframe to extrapolate
min_samples: minimum number of samples necessary to perform extrapolation (default = 1)
Returns the dataframe with missing values extrapolated.
"""
# Initialize new dataframe
df_ep = df.copy()
# For each location
for idx, location in enumerate(df.columns):
# Initialize new location data
ts = df.iloc[:, idx]
ts_ep = ts.copy()
# Only extrapolate if there are enough data points
if ts_ep.count() >= min_samples:
# Temporarily remove dates and make index numeric
index = ts.index
ts_temp = ts_ep.reset_index()
ts_temp = ts_temp.iloc[:, 1]
# Function to curve fit to the data (3rd polynomial)
def func(x, a, b, c, d):
return a * (x ** 3) + b * (x ** 2) + c * x + d
# Initial parameter guess, just to kick off the optimization
guess = (0.5, 0.5, 0.5, 0.5)
# Create copy of data to remove NaNs for curve fitting
ts_fit = ts_temp.dropna()
# Curve fit
x = ts_fit.index.astype(float).values
y = ts_fit.values
# Curve fit column and get curve parameters
params = curve_fit(func, x, y, guess)
# Get the index values for NaNs in the column
x = ts_temp[pd.isnull(ts_temp)].index.astype(float).values
# Extrapolate those points with the fitted function
ts_temp[x] = func(x, * params[0])
# Put date index back
ts_temp.index = index
ts_ep = ts_temp.copy()
df_ep.iloc[:, idx] = ts_ep
# No negative values for counts
df_ep = df_ep.clip(lower = 0)
return df_ep
def create_SVM_models(df, min_samples = 1, nu = 0.05, gamma = 0.01):
"""
Create one-class SVM model for each variable to perform outlier removal.
Code adapted from: https://github.com/kdrelczuk/medium/blob/master/anomalies_local.py
# df: data frame to perform outlier removal on
# min_samples: minimum number of samples needed (for each variable) to create a SVM model (default = 1)
# nu: value for nu parameter (default is 0.05)
# gamma: value for gamma parameter (default = 0.01)
# Returns a list of SVM models for each variable.
"""
# Initialize list of models for each location
SVM_models = []
# For each location
for idx, location in enumerate(df.columns):
# Select location and fit one-class SVM
ts = df.iloc[:, idx]
# Only create a model if there are enough data points
if ts.count() >= min_samples:
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
model = svm.OneClassSVM(nu = nu, kernel = "rbf", gamma = gamma)
model.fit(ts_scaled)
# Save the model
SVM_models.append(model)
# Otherwise add None to the list of models
else:
SVM_models.append(None)
return SVM_models
def SVM_outlier_detection(df, SVM_models):
"""
Detects outliers for each variable in the dataframe.
Code adapted from: https://github.com/kdrelczuk/medium/blob/master/anomalies_local.py
df: dataframe to apply outlier detection on using SVM models
SVM_models: list of SVM models (one for each variable)
Returns a dataframe with the outliers replaced by NaNs.
"""
# Initialize new dataframe
df_detected = df.copy()
# For each location
for idx, location in enumerate(df.columns):
# Initialize new location data
ts = df.iloc[:, idx]
ts_detected = ts.copy()
# Detect outliers using the one-class SVM model for this location
if SVM_models[idx] is not None and ts.isnull().sum().sum() == 0:
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
pred = SVM_models[idx].predict(ts_scaled)
to_idx = lambda x: True if x==-1 else False
v_to_idx = np.vectorize(to_idx)
outliers_idx = v_to_idx(pred)
# Set outliers to NaN
# If not all data points have been marked as outliers; otherwise do not clean
if outliers_idx.sum() != len(ts_detected):
ts_detected[outliers_idx] = np.nan
df_detected.iloc[:, idx] = ts_detected
df_detected.index.name = 'datetime'
return df_detected
def get_train_df(df, Y_name, start_prediction):
"""
Split dataframe into X and y sets for training data
"""
df_X_train = df.drop(Y_name, 1)[:start_prediction].iloc[:-1]
df_y_train = df[[Y_name]][:start_prediction].iloc[:-1]
return df_X_train, df_y_train
def get_future_df(start_pred, predict_period, freq):
"""
Create empty data frame for predictions of the target variable for the specfied prediction period
"""
datetime_predict = pd.date_range(start_pred, periods = predict_period, freq = freq)
df = pd.DataFrame(data = {'datetime' : datetime_predict}).set_index('datetime')
return df
def add_time_variables(df):
"""
Create dummy variables from weekday, weekend and hour and add these to the dataframe.
Also, add cos and sine times
"""
df = df.reset_index()
# add weekday and hour dummies
df['weekday'] = pd.Categorical(df['datetime'].dt.weekday)
df['hour'] = pd.Categorical(df['datetime'].dt.hour)
weekday_dummies = pd.get_dummies(df[['weekday']], prefix='weekday_')
hour_dummies = pd.get_dummies(df[['hour']], prefix='hour_')
df_time = df.merge(weekday_dummies, left_index = True, right_index = True)
df_time = df_time.merge(hour_dummies, left_index = True, right_index = True)
# add cyclical time features
df_time['minutes'] = df_time['datetime'].dt.hour * 60 + df_time['datetime'].dt.minute
df_time['sin_time'] = np.sin(2 * np.pi * df_time['minutes'] / (24 * 60))
df_time['cos_time'] = np.cos(2 * np.pi * df_time['minutes'] / (24 * 60))
df_time = df_time.set_index('datetime')
return df_time
def add_lag_variables(df, Y_name, target, predict_period, n_samples_day, n_samples_week):
"""
Add lag variables (features that are lagged version of the target variable).
"""
df[Y_name + "_prev_2h"] = df[Y_name].shift(predict_period)
df[Y_name + "_prev_day"] = df[Y_name].shift(n_samples_day)
df[Y_name + "_prev_week"] = df[Y_name].shift(n_samples_week)
if target == 'count':
df[Y_name + "_prev_2h_mean_diff"] = df[Y_name + "_prev_2h"] - df[Y_name].mean()
df[Y_name + "_prev_2h_diff_size"] = df[Y_name] - df[Y_name].shift(predict_period+1)
return df
def scale_variables(df_unscaled, Y_name, target, method):
"""
Scale the variables and store the scaler object for the target variable.
method: "standard" or "minmax"
"""
if target == 'count':
# Select target variable
Y_unscaled = df_unscaled[Y_name].values
Y_unscaled = Y_unscaled.reshape(-1, 1)
# Select continuous columns (do not scale binary/category variables)
cont_idx = df_unscaled.select_dtypes('float').columns.tolist()
df_unscaled_cont = df_unscaled.loc[:, cont_idx]
# Standardization
if method == "standard":
scaler = preprocessing.StandardScaler().fit(df_unscaled_cont.values)
if target == "count":
# Store scaler object for target variable
Y_scaler = preprocessing.StandardScaler().fit(Y_unscaled)
# Min-max scaling
elif method == "minmax":
scaler = preprocessing.MinMaxScaler().fit(df_unscaled_cont.values)
if target == "count":
# Store scaler object for target variable
Y_scaler = preprocessing.MinMaxScaler().fit(Y_unscaled)
# Scale variables
df_scaled_cont = scaler.transform(df_unscaled_cont.values)
df_scaled = df_unscaled.copy()
df_scaled.loc[:, cont_idx] = df_scaled_cont
# Convert back to right format
df_scaled = pd.DataFrame(df_scaled, columns = df_unscaled.columns, index = df_unscaled.index)
df_scaled.index.name = 'datetime'
if target == "level":
Y_scaler = None
return df_scaled, Y_scaler
| UTF-8 | Python | false | false | 19,049 | py | 7 | preprocessing.py | 4 | 0.610583 | 0.603076 | 0 | 573 | 32.242583 | 156 |
Stebalien/Mist | 4,080,218,962,713 | 98f54754b5d2f64b0cbf5ef541efa0742a769f56 | b1943593888d5a99815b2b3e34c99ba682ffa115 | /bin/metaedit.py | e2d5e6636be65877e2fda8359cbb025f001eadb6 | [] | no_license | https://github.com/Stebalien/Mist | b3a1ab39a7fb26b663d09b2e55ac2ac956862254 | aa34d1488f196e53823a0df21ffb3f7fc3712fe3 | refs/heads/master | 2020-04-15T08:21:03.105938 | 2011-11-30T19:40:43 | 2011-11-30T19:40:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Metadata editor for Mist program. This is a wrapper class for the mutagen module."""
__author__ = "Jonathan Allen"
__all__ = ["FileError", "NonexistantFileError", "UnsupportedFiletypeError",
"EncodeInput", "EncodeOutput", "Metadata", "GENERAL_TAGNAMES"]
import mimetypes
import os
import types
from jonathan.dictools import *
#===============================================================================
# Constants
#===============================================================================
GENERAL_TAGNAMES = ["TITLE", "ALBUM", "ARTIST", "ARTISTSORTORDER", "PRODUCER",
"PERFORMER", "RECORDINGLABEL", "DATE", "GENRE", "LOCATION"]
#===============================================================================
# Exceptions
#===============================================================================
class FileError(Exception):
pass
class NonexistantFileError(FileError):
pass
class UnsupportedFiletypeError(FileError):
pass
#===============================================================================
# Decorators
#===============================================================================
def EncodeInput(function):
"""
Encodes values in specified metadata as ASCII. Ignores invalid characters.
"""
def wrapped(self, data):
for key, value in data.items():
try: value.decode('ascii')
except UnicodeDecodeError:
data[key] = value.encode('ascii', errors = 'ignore')
return function(self, data)
return wrapped
def EncodeOutput(function):
"""
Encodes output metadata as ASCII. Ignores invalid characters.
"""
def wrapped(*args, **kwargs):
output = function(*args, **kwargs)
for key, value in output.items():
try: value.decode('ascii')
except UnicodeDecodeError:
try: output[key] = value.encode('ascii', errors = 'ignore')
except: output[key] = "????"
return output
return wrapped
mimetypes.add_type('audio/mp4', '.m4a')
class Metadata:
"""
Class to handle reading and writing metadata for music files.
"""
def __init__(self, fileName):
self.fileName = fileName
self.fileType = mimetypes.guess_type(fileName)[0]
self.data = ExtendedDictionary()
self.failedReads = set()
self.failedWrites = set()
def __new__(cls, fileName):
if not os.path.exists(fileName): raise NonexistantFileError("{} could not be found.".format(fileName))
fileType = mimetypes.guess_type(fileName)[0]
from mp3metaedit import MP3Metadata
from standardmetaedit import StandardMetadata
output = {
'audio/mpeg': MP3Metadata,
'audio/ogg': StandardMetadata,
'audio/flac': StandardMetadata,
'audio/mp4': StandardMetadata
}
try:
return output[fileType](fileName)
except KeyError:
raise UnsupportedFiletypeError("cannot read metadata for filetype: <{}>.".format(fileType))
@EncodeOutput
def read(self, tagNames = [], ignore = []):
raise NotImplementedError()
def write(self, data):
raise NotImplementedError()
def truncateOutput(self, data):
"""
If data is a list, returns the first value. Otherwise simply returns the given
data.
"""
if isinstance(data, (types.ListType, types.TupleType)):
return bytes(data[0])
return bytes(data)
| UTF-8 | Python | false | false | 3,565 | py | 18 | metaedit.py | 17 | 0.53324 | 0.530715 | 0 | 106 | 32.632075 | 110 |
MiscCoding/gsp_web | 704,374,649,433 | 9828ca405d7b4253a29147f9b7c499668c79208b | 6e43937c521b841595fbe7f59268ffc72dfefa9d | /GSP_WEB/views/system/backup.py | b224a04664acc8d3bba67e2e97aabcd755c11f6d | [] | no_license | https://github.com/MiscCoding/gsp_web | a5e50ce7591157510021cae49c6b2994f4eaabbe | a24e319974021ba668c5f8b4000ce96d81d1483e | refs/heads/master | 2020-03-28T15:11:30.301700 | 2019-08-12T04:47:42 | 2019-08-12T04:47:42 | 148,565,440 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*- coding: utf-8 -*-
import datetime
from flask import request, Response, render_template, Blueprint, json, make_response, g, session
from GSP_WEB import login_required, db_session, app
from GSP_WEB.common.util.invalidUsage import InvalidUsage
from GSP_WEB.common.util.logUtil import logUtil
from GSP_WEB.common.util.textUtil import RepresentsInt
from GSP_WEB.models.IP_WhiteList import IP_WhiteList
from GSP_WEB.models.Account import Account
blueprint_page = Blueprint('bp_backup_page', __name__, url_prefix='/system')
@blueprint_page.route('/backup', methods=['GET'])
#@login_required
def config():
return render_template('system/backup.html') | UTF-8 | Python | false | false | 655 | py | 168 | backup.py | 124 | 0.769466 | 0.767939 | 0 | 18 | 35.444444 | 96 |
kimhtae/shop | 13,013,750,936,900 | c0f459293031dfd4ba6c10a1854575ef6da38f33 | 48d16bf3e3074937e16f9d95e847a5fd5f363cae | /final/baemin/forms.py | 40ce4b4b154e8b9ad272467c2f7d782788f7ee45 | [] | no_license | https://github.com/kimhtae/shop | 271d216a8eb640b832c44eac493a0e6f55dbdcf3 | b4f176900379fbac4eb81f1ec2bcb0258cde58f9 | refs/heads/master | 2020-03-21T17:02:55.506717 | 2018-07-04T23:09:47 | 2018-07-04T23:09:47 | 138,810,012 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import Order
# Create your models here.
class OrderForm(forms.ModelForm):
def __init__(self, shop, *args, **kwargs):
super().__init__(*args, **kwargs)
# 해당 시점의 상품목록만 보이도록 Filter
self.fields['item_set'].queryset = self.fields['item_set'].queryset.filter(shop=shop)
class Meta:
model = Order
fields = ('item_set',) | UTF-8 | Python | false | false | 396 | py | 10 | forms.py | 6 | 0.679348 | 0.679348 | 0 | 15 | 23.6 | 87 |
xuguoliang1995/leetCodePython | 5,652,176,977,294 | 41c340eb8e3d3f1b5c91f758bcbe8b3052197e61 | 8fc1a5f5c81df49ff5f8e7af956b516de32786a8 | /python_know/high/demo7.py | f7a485f66ffb90dc6895e966994d178d791e41d5 | [
"Apache-2.0"
] | permissive | https://github.com/xuguoliang1995/leetCodePython | 1f88d4dedf401fe5a1748b30ad878ce94655b317 | 9e4a96efd21506e8b0443a52be16c1280643b48c | refs/heads/master | 2020-07-26T16:33:58.022862 | 2019-11-20T07:00:19 | 2019-11-20T07:00:19 | 208,704,541 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 字符串知识
"""
1、ASCII标准是美国创建的,定义了从0到127的字符代码,允许每个字符存储在一个8位的字节中(实际上是127位)
2、每个字符一个字节不够,各种符号和重音字符并不在ASCII所定义的可能字符的范围中,所以使用0到255来表示字符
并且把128到255分配给特殊字符,这样的标准叫做Latin-I,广泛使用西欧地区。
3、一些字母表定义了如此多的字符,以至于无法把其中的每一个都表示成一个字节。Unicode考虑到更多的灵活性。
Unicode通常用在国际化的程序中,拥有比8位字符所能表示的更多的字符
4、 UTF-8编码,采用了可变的字节数的方案,小于128的字符代码表示为单个字节,128到0x7ff(2047)之间的代码
转换为两个字节,而每个字节拥有一个128到255之间的值,0x7ff代码转换为3个或者4个字节序列,每个字节序列在
128到255之间。
python2中使用
(1)str表示8位文本和二进制数据
(2)unicode用来表示宽字符Unicode文本
python3
(1)str表示Unicode文本(8位的和更宽的)
(2)bytes表示二进制数据(不可变不可修改的)
(3)bytearray 是一种可变的bytes类型。
二进制文件:以为二进制模式打开一个文件的时候,读取数据不会以任何方式解码它,
是直接返回其内容raw并且未经修改。
• str.encode()和bytes(S, encoding)把一个字符串转换为其raw bytes形式,并且
在此过程中根据一个str创建一个bytes。
• bytes.decode()和str(B, encoding)把raw bytes转换为其字符串形式,并且在此
过程中根据一个bytes创建一个str。
字符串格式化只对str有效,对bytes对象无效。
总结:• 对文本数据使用str;
• 对二进制数据使用bytes;
• 对想要原处修改的二进制数据使用bytearray。
"""
| UTF-8 | Python | false | false | 1,926 | py | 45 | demo7.py | 44 | 0.824324 | 0.755631 | 0 | 30 | 28.533333 | 62 |
GGiiuusseeppee/isde | 19,189,913,906,037 | ae6d36da376d9ab5e323e62e0a34e488e477ca43 | 5c056a5443273a7b8cb38c2edab1357cec628cac | /src2/timeslot/timeslot_2.py | f179d5b2814eaddb9d149418cf459ef6eda58865 | [
"Apache-2.0"
] | permissive | https://github.com/GGiiuusseeppee/isde | 7c02444581e124a5acd315966b0adb2315673f8b | a9603d8b8d1a347447cec483108132aa1e8457eb | refs/heads/master | 2023-08-27T19:55:57.178818 | 2021-10-21T15:31:07 | 2021-10-21T15:31:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class TimeSlot:
"""A class to store time slot"""
minutes_in_hour = 60
def __init__(self): # initialize an empty slot
self.timeslot = {'h': 0, 'm': 0}
# timeslot is an instance attribute
# (attribute of the object)
def set_h_m(self, h, m):
# set_h_m() is an instance method #(method of the object)
self.timeslot['h'] = h
self.timeslot['m'] = m
def set_m(self, m):
self.timeslot['h'] = int(m / self.minutes_in_hour)
self.timeslot['m'] = m % self.minutes_in_hour
def get_h_m(self):
return self.timeslot['h'], self.timeslot['m']
def get_m(self):
return self.timeslot['h'] * self.minutes_in_hour + self.timeslot['m']
t1 = TimeSlot() # t1 is an object
t1.set_h_m(2, 20)
print(t1.get_m()) # Expected value: 140
print(t1.get_h_m()) # Expected value: 2, 120
t1.set_m(140)
print(t1.get_m()) # Expected value: 140
print(t1.get_h_m()) # Expected value: 2, 120
| UTF-8 | Python | false | false | 970 | py | 67 | timeslot_2.py | 61 | 0.579381 | 0.546392 | 0 | 34 | 27.529412 | 77 |
abulovic/binner | 11,965,778,922,872 | 77f6d1cbdc96854db57ec0bfdddea3ce82466b62 | 913c80188ec68991fa929b995d8b34d909438eab | /solver/read2cds/Read2CDSSolver.py | 3d5b63391ba64fad27a2bb1bc38f68d99e6eaa31 | [] | no_license | https://github.com/abulovic/binner | 2360116a44a36fbefa7074169071f44897819530 | 470c9e66e50e7b51a0be40e2ea1b30df44e6eba6 | refs/heads/master | 2016-09-06T21:52:13.900983 | 2013-06-16T16:57:59 | 2013-06-16T16:57:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # @author: Martin Sosic, sosic.martin@gmail.com
class Read2CDSSolver (object):
""" Class that decides for each read to which CDS does it belong (to which CDS does it map).
It operates on CdsAlnContainer -> it uses data from it and modifies it.
Method map_reads_2_cdss() must be called first.
"""
def __init__(self):
"""
(CdsAlnContainer) _cds_aln_container It contains list of all CDSs. Solver operates upon it.
"""
self._cds_aln_container = None
# Should be overrided and called with super() at the beginning of overriding method.
def map_reads_2_cdss(self, cds_aln_container):
""" Main method of solver: it decides for each read to which cds does it belong(map).
When method is finished CDS alignment container will be modified in such way that
there will not be two CDSs that have same read assigned (each read will be assigned to exactly one CDS).
Read is considered to be assigned to CDS if it is activated (attribute active) in cds alignment of that CDS.
@param (CdsAlnContainer) cdsAlnContainer Reference to container is stored in object to be used for possible updating.
"""
self._cds_aln_container = cds_aln_container
# Should be overrided and called with super() at the beginning of overriding method.
def remove_cds_and_remap_reads(self, cds_aln):
""" Called after map_reads_2_cdss.
It remaps active reads from given cds alignment to alternative cds alignments.
After that it deletes given cds alignment from cds alignment container.
Each read is activated in some alternative cds alignment (if there is one).
If read has no alternative cds alignments then it will not be maped anywhere.
Works upon cds alignment container that was given in map_reads_2_cdss(),
which will be modified.
@param (CdsAlignment) cds_aln
@return ({read_id:CdsAlignment}) Dictionary where key is read_id and value is
cds alignment to which it maps.
If it does not map anywhere then value is None.
"""
if (self._cds_aln_container == None):
raise Exception ("Cds alignment container was not specified! Have you executed map_reads_2_cdss?")
@staticmethod
def test_cds_alignment_container_consistency(cds_aln_container):
""" This function is intended to be used for testing purposes.
It should be run after execution of map_reads_2_cdss() or
after execution of remove_cds_and_remap_reads in order to test
cds alignment container for consistency.
Cds alignment container is considered to be consistent (after execution
of map_reads_2_cdss()) if each read is active in (mapped to) at most
one cds alignment. Also, if cds alignment contains read (active or not),
that cds must be element of read2cds for that read and vice versa.
@param (CdsAlnContainer) cds_aln_container Container produced using map_reads_2_cdss().
@return (boolean) True if test passed, False otherwise.
"""
active_read_ids = set()
for cds_aln in cds_aln_container.cds_repository.values():
for aln_reg in cds_aln.aligned_regions.values():
if aln_reg.active:
# Check if it is active in some other cds.
if aln_reg.read_id in active_read_ids: return False
else: active_read_ids.add(aln_reg.read_id)
# Check if there is mapping in read2cds.
if not(cds_aln in cds_aln_container.read2cds[aln_reg.read_id]):
return False
# For each mapping in read2cds check if there is mapping in cds_repository.
for (read_id, cds_alns) in cds_aln_container.read2cds.items():
for cds_aln in cds_alns:
try:
if not(read_id in cds_aln_container.cds_repository[cds_aln.cds].aligned_regions.keys()):
return False
except KeyError:
return False
return True
| UTF-8 | Python | false | false | 4,180 | py | 34 | Read2CDSSolver.py | 31 | 0.641627 | 0.638278 | 0 | 75 | 54.72 | 125 |
sidhuking07/quick-attendance | 8,727,373,584,377 | 891797aedde989e82d267348f761161650fe3a40 | 89f4fc7e44f4ea5042c1f07fc23c9b8c442ab7d4 | /projecting_faces.py | 1b69c4daab960b07a0b6a003b36314d98b2eab47 | [] | no_license | https://github.com/sidhuking07/quick-attendance | bcd5c8a31c4b0603be2da01c60cccebe243fa3b1 | 2bd5c9180cdc518c546d15ffd95ba35dc10dcfe2 | refs/heads/master | 2021-08-31T20:05:35.992362 | 2017-12-22T17:47:54 | 2017-12-22T17:47:54 | 111,775,688 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import dlib
import cv2
import openface
predictor_model = "shape_predictor_68_face_landmarks.dat"
file_name = sys.argv[1] #take input from command
face_detector = dlib.get_frontal_face_detector() #create HOG face detector from dlib
face_pose_predictor = dlib.shape_predictor(predictor_model)
face_aligner = openface.AlignDlib(predictor_model) #use AlignDlib from openface for alignment
file_name = sys.argv[1] #take the input from command
image = cv2.imread(file_name) #load image as array
detected_faces = face_detector(image, 1) #run HOG face detector
# Loop through each face we found in the image
for i, face_rect in enumerate(detected_faces):
pose_landmarks = face_pose_predictor(image, face_rect) #get the pose
alignedFace = face_aligner.align(534, image, face_rect, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) #perfrom alignment
cv2.imwrite("aligned_face_{}.jpg".format(i), alignedFace) #save the aligned image
| UTF-8 | Python | false | false | 955 | py | 6 | projecting_faces.py | 5 | 0.771728 | 0.760209 | 0 | 28 | 33.107143 | 132 |
Akpanwal2001/CodeAddiction | 11,416,023,120,091 | f4b7271f5102778f6ea96aa85e0deb6461664b7d | 6ecf369f015241d0265d05b6d0b9e676fb2d45ff | /blog/views.py | f30a7b1b31f1c892c63f75a462a9f8e9470d6fe6 | [] | no_license | https://github.com/Akpanwal2001/CodeAddiction | 404c5dd536e249384f8038a57d6d52a277481c6f | 5b8bce2d63b651b522e74332e62ffe0028b21273 | refs/heads/master | 2023-06-20T06:46:12.256762 | 2021-07-16T13:11:10 | 2021-07-16T13:11:10 | 386,641,810 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Post
# all Blogs page function
def blog(request):
allPosts = Paginator(Post.objects.all().order_by('-datetime'), 2)
page = request.GET.get('page')
try:
Posts = allPosts.page(page)
except PageNotAnInteger:
Posts = allPosts.page(1)
except EmptyPage:
Posts = allPosts.page(allPosts.num_pages)
context = {'allPosts' : Posts}
return render(request, 'blog.html', context)
# Blogpost page function
def blogpost(request, slug):
post = Post.objects.filter(slug = slug).first()
post.view = post.view + 1
post.save()
context = { 'post' : post }
return render(request, 'blogpost.html', context)
| UTF-8 | Python | false | false | 795 | py | 19 | views.py | 15 | 0.674214 | 0.67044 | 0 | 26 | 29.538462 | 72 |
DamirNesimi/min_max.py | 2,276,332,668,092 | bb85a2d2dbbb434bf4c0bd44a0147ca84e83104d | 701d1ce081e43d8f1469666540a6c5104fd26d99 | /min_max.py | c8f5bc078d266adcfe72e3491aa0befcdca1858c | [] | no_license | https://github.com/DamirNesimi/min_max.py | 6ea9916ed296a16cea5ab63df7267995c16ba963 | e7c8fe8400b36621e2275649ceac15286927a903 | refs/heads/master | 2022-12-20T03:26:27.246955 | 2020-09-27T14:15:40 | 2020-09-27T14:15:40 | 299,046,625 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Dette programmet bruker funksjonene egen_max og egen_min som beregner maximum og minimum av to tall
skrevet inn av en bruker.
27.09.2020
"""
# funksjon som returnerer den største verdien av tall_1 og tall_2
def egen_max(tall_1, tall_2):
max = (tall_1 + tall_2 + abs(tall_1 - tall_2))//2
return max
# funksjon som returnerer den minste verdien av tall_1 og tall_2
def egen_min(tall_1, tall_2):
min = (tall_1 + tall_2 - abs(tall_1 - tall_2))//2
return min
# variabler som lagrer verdien fra input av bruker
tall_1 = input("Tall 1: ")
tall_2 = input("Tall 2: ")
# verdien konverteres til heltallsverdi
tall_1_int = int(tall_1)
tall_2_int = int(tall_2)
# output til skjerm
print()
print("egen_max(" + tall_1 + ", " + tall_2 + ") = " + str(egen_max(tall_1_int, tall_2_int)))
print("max(" + tall_1 + ", " + tall_2 + ") = " + str(max(tall_1_int, tall_2_int)))
print()
print("egen_min(" + tall_1 + ", " + tall_2 + ") = " + str(egen_min(tall_1_int, tall_2_int)))
print("min(" + tall_1 + ", " + tall_2 + ") = " + str(min(tall_1_int, tall_2_int)))
| UTF-8 | Python | false | false | 1,065 | py | 1 | min_max.py | 1 | 0.62406 | 0.577068 | 0 | 33 | 31.242424 | 99 |
NBGodlike/dz | 11,269,994,232,089 | 3df8e0b35c592268c5427041349ddf1cbbab00ba | d8335ac15d7cbe3ad6f87e8cabfa48124aa60d61 | /users/migrations/0005_course_picture.py | 4dbcc82eee8dcbaec49e22b8c9830cc69158938b | [] | no_license | https://github.com/NBGodlike/dz | a5d56bc4b7a190e1ca75f2f3c04ed2dc6d646cd6 | 7dc67aedb373411ca2637a38622f97d0bf97889a | refs/heads/master | 2021-04-29T02:56:39.502536 | 2017-01-05T08:39:52 | 2017-01-05T08:39:52 | 78,049,378 | 0 | 1 | null | false | 2017-03-16T03:23:52 | 2017-01-04T20:14:27 | 2017-01-05T08:31:34 | 2017-01-10T14:46:51 | 4,013 | 0 | 0 | 0 | CSS | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-01 19:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20161231_2028'),
]
operations = [
migrations.AddField(
model_name='course',
name='Picture',
field=models.TextField(max_length=255, null=True),
),
]
| UTF-8 | Python | false | false | 460 | py | 23 | 0005_course_picture.py | 14 | 0.595652 | 0.517391 | 0 | 20 | 22 | 62 |
nkyllonen/ACVC | 14,345,190,794,352 | 211810653fc2266d120652706553277fb7eb0e83 | 68f651118c0e6363163cc970ed650a86a9cc5f77 | /DecisionMaker.py | 5025df4a8b4f50e4ee0678aa862c75b3e9ef0e82 | [] | no_license | https://github.com/nkyllonen/ACVC | 784d8d2543238d7a56b000dfbc05796e29e34ba4 | f2f675080d5716f1cbf45a40edcf0ed169b9c4f2 | refs/heads/master | 2022-12-10T06:30:49.977124 | 2020-05-11T16:02:44 | 2020-05-11T16:02:44 | 256,346,234 | 0 | 1 | null | false | 2022-12-08T09:41:18 | 2020-04-16T22:43:00 | 2020-05-11T16:02:56 | 2022-12-08T09:41:17 | 13,850 | 0 | 1 | 2 | Python | false | false | '''
DecisionMaker: module for deciding potential answers
Alex Berg and Nikki Kyllonen
'''
from __future__ import print_function
import State
import string, nltk, random
from nltk.corpus import stopwords
nltk.download("stopwords")
from textwrap import wrap
from terminaltables import AsciiTable
## GLOBAL VARIABLES ##
punc = set(string.punctuation)
engStopWords = set(stopwords.words('english'))
## HELPER FUNCTIONS ##
def clean_string(s):
""" Remove double spaces, stopwords, and all punctuation """
s = s.replace(" " , " ")
words = [ w for w in s.split(" ") if w not in engStopWords ]
s = " ".join(words)
return "".join( ch.lower() for ch in s if ch not in punc )
## MODULE FUNCTIONS ##
def get_possible_words(corpus, wordLen, wordHint):
""" Construct list of possible word matches """
if State.METRIC == State.Metric.JACCARD:
possible = use_jaccard_metric(corpus, wordLen, wordHint)
elif State.METRIC == State.Metric.COSINE:
# TODO: cosine similarity metric
possible = [("example", 0.5, "some def")]
# sort and only keep the top 10 possible words
possible = sorted(possible, key = lambda x : x[1], reverse=True)[:10]
return possible
def use_jaccard_metric(corpus, wordLen, wordHint):
""" Use brute force with jaccard similarity metric """
possible = []
for word in list(corpus.keys()):
if len(word) == wordLen:
maxJaccard = 0
maxJaccardString = ""
for val in corpus[word]:
m = max(maxJaccard, jaccard(wordHint, val))
if (m != maxJaccard):
maxJaccardString = val
maxJaccard = m
if maxJaccard > 0:
possible.append((word, maxJaccard, maxJaccardString))
return possible
def jaccard(query1, query2):
""" Calculate the jaccard value between two inputs """
q1 = set(clean_string(query1).split(" "))
q2 = set(clean_string(query2).split(" "))
return len(q1.intersection(q2)) / len(q1.union(q2))
def average_sentence_vec(words):
""" """
# TODO: calc sentence feature vector
return ""
def evaluate_corpus(corpus, golden):
""" Evaluate the given corpus against given golden standard """
withinCorrectWords = []
withinIncorrectWords = []
withoutNum = 0
# Randomly sample golden corpus
sampleWords = random.sample(golden.keys(), State.SAMPLES)
if State.DEBUG:
print("[DEBUG] Using sample words:" , sampleWords)
for answer in sampleWords:
clue = random.choice(golden[answer])
possibleWords = get_possible_words(corpus, len(answer), clue)
# Check if possible words contains correct answer
check = list(filter( lambda x : x[0] == answer , possibleWords ))
if len(check) > 0:
# Store corresponding result values + golden hint + max jaccard of all results
result = list(check[0])
result.append(clue)
result.append(possibleWords[0][1])
# Calculate jaccard distance
d = float(possibleWords[0][1]) - float(result[1])
result.append(d)
withinCorrectWords.append(result)
if State.DEBUG:
print("[DEBUG]" , answer , "CORRECT" , result)
else:
# Check if answer is in corpus
if answer in corpus.keys():
withinIncorrectWords.append(answer)
else:
withoutNum += 1
if State.DEBUG:
print("[DEBUG]", answer , "INCORRECT")
return {"withinCor" : withinCorrectWords, "withinIncor" : withinIncorrectWords, "withoutN" : withoutNum}
def run_evaluation(corpus, golden):
""" Generate tables containing evaluation data """
wordsTable, correctTable, statsTable = None, None, None
results = []
for i in range(State.LOOPS):
results.append(evaluate_corpus(corpus, golden))
if State.DEBUG:
# TODO: format this output with labels and a table?
print("[DEBUG]" , results)
# Initialize counters
percentCor, percentIncor, percentWithin = 0, 0, 0
distances, totalWithin, totalWithinCor = 0, 0, 0
for evalResult in results:
# Format output into tables
WORDS_DATA = (
("Within Correct", "Within Incorrect", "Without Number"),
("\n".join([ val[0] for val in evalResult["withinCor"] ]) ,
"\n".join(evalResult["withinIncor"]),
evalResult["withoutN"])
)
wordsTable = AsciiTable(WORDS_DATA, "Word Results")
# Only output word result tables if NOT looping --minimize output
if State.DEBUG or State.LOOPS == 1:
print("\n" + wordsTable.table)
MATCH_DATA = ("Word" , "Jaccard Value" , "Matching Corpus Value" , "Hint", "Max Jaccard Value", "Jaccard Distance")
data = []
correctTable = AsciiTable([MATCH_DATA, []])
# Cap column widths at 35 chars for widest columns
maxValWidth = min(correctTable.column_max_width(2), 35)
maxHintWidth = min(correctTable.column_max_width(3), 35)
for i in range(len(evalResult["withinCor"])):
r = list(evalResult["withinCor"][i])
# Format text to wrap
wrappedVal = '\n'.join(wrap(r[2], maxValWidth))
wrappedHint = '\n'.join(wrap(r[3], maxHintWidth))
r[2] = wrappedVal
r[3] = wrappedHint
if State.DEBUG:
print(r)
data.append(r)
correctTable = AsciiTable(tuple([MATCH_DATA] + data), "Correct Matches Results")
# Only output word result tables if NOT looping --minimize output
if State.DEBUG or State.LOOPS == 1:
print("\n" + correctTable.table)
# Check if we had any hits at all, otherwise output zeroes
if (State.SAMPLES != evalResult["withoutN"]):
withinN = State.SAMPLES - evalResult["withoutN"]
withinCor = len(evalResult["withinCor"])
percentCor += withinCor / withinN
percentIncor += len(evalResult["withinIncor"]) / withinN
percentWithin += withinN / State.SAMPLES
# Sum the distances for each result from this loop to calc overal average
for word in evalResult["withinCor"]:
distances += word[5]
totalWithin += withinN
totalWithinCor += withinCor
STATS_DATA = (
("Average Percentage Within Correct" , "Average Percentage Within Incorrect", "Average Percentage Within", "Average Jaccard Distance"),
(percentCor / State.LOOPS,
percentIncor / State.LOOPS,
percentWithin / State.LOOPS,
distances / totalWithinCor if totalWithinCor > 0 else 0.0)
)
statsTable = AsciiTable(STATS_DATA, "Statistics")
print("\n" + statsTable.table)
| UTF-8 | Python | false | false | 6,916 | py | 14 | DecisionMaker.py | 6 | 0.606131 | 0.598323 | 0 | 200 | 33.58 | 143 |
silverstar194/Nano-SpeedTest | 3,470,333,582,658 | 5d2d1fa3dd8565971010275fd3d1a0e092e145c5 | c68fbb8f11d33a08326827d90fe96b1805214e28 | /app/backend/speedtest_api/services/nodes.py | a709949555d78ecd62756a69de5c4ecb300a1120 | [] | no_license | https://github.com/silverstar194/Nano-SpeedTest | ce8decf9a616b100449b786a53d3cf67798e1a9b | c5b11305050aac44ea940d7241d61423c63a2406 | refs/heads/master | 2022-12-12T12:17:02.062680 | 2020-12-12T23:55:11 | 2020-12-12T23:55:11 | 149,526,364 | 28 | 7 | null | false | 2022-12-07T23:53:06 | 2018-09-20T00:02:48 | 2022-11-18T20:19:32 | 2022-12-07T23:53:06 | 5,409 | 25 | 4 | 7 | CSS | false | false | from django.conf import settings as settings
from .. import models as models
class NodeNotFoundException(Exception):
def __init__(self, node):
Exception.__init__(self, "Fatal error occurred occurred. Node %s not found." % (node.URL))
def new_node(URL, latitude, longitude, location_name=None):
"""
Create a new node from the given information
@param URL: URL to connect to RPC of the node
@param latitude: Latitude of the node
@param longitude: Longitude of the node
@param location_name: Friendly name of the node's location (256 character limit)
@return: New node object
"""
return models.Node.objects.create(URL=URL, latitude=latitude, longitude=longitude, location_name=location_name)
def get_nodes(enabled=True):
"""
Get all nodes
@param enabled: Filter nodes by enability.
@return: Query of Node objects
"""
return models.Node.objects.filter(enabled=enabled)
def get_node(id, enabled=True):
"""
Get a node with the specified node.id
@return: Node object or None if the node was not found
@raise MultipleObjectsReturned: If more than one node with the Id is found, this is raised
"""
try:
return models.Node.objects.get(id=id, enabled=enabled)
except models.Node.DoesNotExist:
return None
except MultipleObjectsReturned:
raise MultipleObjectsReturned()
| UTF-8 | Python | false | false | 1,398 | py | 145 | nodes.py | 110 | 0.69671 | 0.694564 | 0 | 46 | 29.391304 | 115 |
Nidocq/Sudoku_Solver | 7,842,610,288,689 | 06f3f2662dc1955027e608fa39758e57d1f3d257 | 71c3683c843a0773311d0692d7f5af1195995643 | /Board.py | b8992be65c5f2d882ee390a86dbf37b5bc7b366a | [] | no_license | https://github.com/Nidocq/Sudoku_Solver | b4eb2168406406753f4316fef01dce1860294c09 | 17d41ec86c1d7df4b2a9b04f5f8f9f0544db9cc1 | refs/heads/main | 2023-06-14T06:37:08.607379 | 2021-07-08T20:47:06 | 2021-07-08T20:47:06 | 383,120,452 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
class Board():
def __init__(self):
self.grid = [[6, 0, 0, 1, 0, 0, 0, 4, 0],
[0, 0, 1, 8, 6, 0, 2, 0, 0],
[8, 9, 0, 2, 5, 4, 0, 0, 0],
[2, 0, 0, 3, 8, 0, 1, 5, 0],
[0, 7, 0, 0, 2, 6, 9, 8, 3],
[3, 8, 9, 7, 1, 0, 0, 0, 0],
[0, 2, 8, 9, 0, 1, 6, 3, 4],
[7, 0, 4, 6, 0, 0, 8, 0, 5],
[0, 0, 0, 5, 4, 0, 7, 1, 2]]
self.size = 9
def validPlacement(self, row, col, value):
for lines in range(0, self.size):
if self.grid[row][lines] == value:
return False
if self.grid[lines][col] == value:
return False
startRow = row - row % 3
startCol = col - col % 3
for i in range(3):
for j in range(3):
if self.grid[i + startRow][j + startCol] == value:
return False
return True
def showGridTerminal(self):
for i in self.grid:
print(i)
def showGrid(self) -> str:
boardString = ''
for i in self.grid:
boardString += str(i) + "</br>"
return boardString | UTF-8 | Python | false | false | 1,187 | py | 5 | Board.py | 4 | 0.410278 | 0.336142 | 0 | 42 | 27.285714 | 66 |
gabriellaec/desoft-analise-exercicios | 11,020,886,091,469 | ecb2eeab5c7af369435d0c5c0e2b200801684bce | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch80_2020_04_08_20_35_59_699840.py | 4a781044dd6978c7d01548ae00b9cde383ae5340 | [] | no_license | https://github.com/gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def interseccao_chaves(x,y):
lista=[]
for k in x.keys():
if k in y.keys():
if k not in lista:
lista.append(k)
return lista | UTF-8 | Python | false | false | 170 | py | 35,359 | ch80_2020_04_08_20_35_59_699840.py | 35,352 | 0.494118 | 0.494118 | 0 | 7 | 23.428571 | 31 |
equinor/webviz-core-components | 9,199,819,960,594 | 67c46cf98c24ad00dc66569c82c47e46d817b048 | bfd641697aaf7777e7c6b921d6b8b04e867fa877 | /webviz_core_components/wrapped_components/frame.py | 095dd25afee6c584167b1143fd1cf4a2012a6d0b | [
"MIT"
] | permissive | https://github.com/equinor/webviz-core-components | cb02480f16050c704e2d46d635f12f95235aa2fa | cba53f3ea37beabd6cee5765de2b556d9f4aef48 | refs/heads/master | 2023-08-29T23:54:37.086036 | 2023-08-09T10:52:08 | 2023-08-09T10:52:08 | 201,783,679 | 13 | 15 | MIT | false | 2023-08-17T07:55:19 | 2019-08-11T15:42:13 | 2023-06-23T09:13:35 | 2023-08-11T16:38:59 | 11,088 | 11 | 15 | 43 | TypeScript | false | false | from typing import Any
from dash import html
class Frame(html.Div):
"""A html.Div with border and background styling
Keyword arguments:
- children (a list of or a singular dash component, string or number; required):
The children of this component.
- color (str; default #F9F9F9):
Background color of the frame
- highlight (bool; default: True):
Adds additional shadow when hovering over the box
- className (string; optional):
Additional css class to apply
- style (string; optional):
Additional style for the component
"""
def __init__(
self,
children: Any,
color: str = "#F9F9F9",
highlight: bool = True,
className: str = "",
style: dict = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.className = (
className + " webviz-frame"
if highlight
else className + " webviz-frame-no-hover"
)
self.style = style if style is not None else {}
self.style.update({"backgroundColor": color})
self.children = children
| UTF-8 | Python | false | false | 1,157 | py | 186 | frame.py | 70 | 0.582541 | 0.577355 | 0 | 46 | 24.152174 | 84 |
masakiaota/kyoupuro | 1,443,109,038,543 | eef9666bf68f7b3056de2f1837df9759b2c0bed7 | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /practice/E_ABC/abc149_e.py | 0e334dcd7bf5adaa753340eb3b4a63fba759f475 | [] | no_license | https://github.com/masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://atcoder.jp/contests/abc149/tasks/abc149_e
# すべての握手の組み合わせN**2を列挙しソートしM番目までを足し合わせればOK
# だけど制約からこれを行うことは困難
# すべてを列挙しなくともM番目の値を知ることは二分探索で可能(参考:億マス計算)
# Aの累積和を保持しておけば、M番目の値の探索中にMまでの値の合計もついでに計算できる
# 以下reverseでソート済みだと仮定
# XがM番目の数→X以上である数はM個以上(cntとする)→cntがM個以上の条件を満たすうちの最大となるXがM番目の値
# そのあと余分な分を引く処理とか必要
from bisect import bisect_right, bisect_left
import sys
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
class cumsum1d:
def __init__(self, ls: list):
'''
1次元リストを受け取る
'''
from itertools import accumulate
self.ls_accum = [0] + list(accumulate(ls))
def total(self, i, j):
# もとの配列lsにおける[i,j)の中合計
return self.ls_accum[j] - self.ls_accum[i]
N, M = read_ints()
A = read_ints()
A.sort() # bisectを使う都合上 reverseは抜き
A_reversed = list(reversed(A))
A_rev_acc = cumsum1d(A_reversed)
def is_ok(X):
# M番目の数はXである→X以上の個数>=M となるうちで最大のX(もっとも左の方のX)
# X以上の個数>=Mを返す
# X以下の個数はai+aj>=Xを満たす個数
cnt = 0
ans = 0
for a in A:
aa = X - a
idx_reverse = N - bisect_left(A, aa) # 大きい方からだと何番目か
# これはbisect_right(A_reversed,aa)に等しい
cnt += idx_reverse
ans += A_rev_acc.total(0, idx_reverse) + idx_reverse * a
return cnt >= M, ans, cnt
def meguru_bisect(ng, ok):
'''
define is_okと
初期値のng,okを受け取り,is_okを満たす最小(最大)のokを返す
ng ok は とり得る最小の値-1 とり得る最大の値+1
最大最小が逆の場合はよしなにひっくり返す
'''
while (abs(ok - ng) > 1):
mid = (ok + ng) // 2
flg, ans, cnt = is_ok(mid)
if flg:
ok = mid
ans_true = ans # さいごにokとなる状態がans
cnt_true = cnt
else:
ng = mid
return ans_true, ok, cnt_true
ans_tmp, M_th_num, M_plus_alpha_th = \
meguru_bisect(2 * 10 ** 5 + 1, 0)
# print(ans_tmp, M_th_num, M_plus_alpha_th)
print(ans_tmp - (M_plus_alpha_th - M) * M_th_num)
| UTF-8 | Python | false | false | 2,628 | py | 891 | abc149_e.py | 842 | 0.60982 | 0.597149 | 0 | 77 | 23.597403 | 64 |
NaraS91/AdventOfCode | 695,784,729,214 | 3498b64f7b5dfe5563b7ad0a4250f65716e4862b | 97c7933f831fe4d7458f540764475a4360899e91 | /AdventOfCode/Day05/solver.py | 9051b30c81c274cba6ad1b8753573f3bdd2c0ab9 | [] | no_license | https://github.com/NaraS91/AdventOfCode | 2e462c3c35e85b55cab7d05c02cf26745064a2fd | b753dafc96b65faa1ccebd2173f94663fcb8ad7c | refs/heads/main | 2023-02-05T15:49:49.072298 | 2020-12-22T20:24:00 | 2020-12-22T20:24:00 | 317,490,059 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def boarding_pass_to_seat(boarding_pass):
row = 0
powOf2 = 64
for i in range(0, 7):
if boarding_pass[i] == 'B':
row += powOf2
powOf2 /= 2
column = 0
powOf2 = 4
for i in range(7, 10):
if boarding_pass[i] == 'R':
column += powOf2
powOf2 /= 2
return (row, column)
def row_column_to_id(row_column):
return row_column[0] * 8 + row_column[1]
def input_to_list_of_boarding_passes(file_address):
f = open(file_address, "r")
return f.readlines()
def solve1(input_file):
boarding_passes = input_to_list_of_boarding_passes(input_file)
seats = map(boarding_pass_to_seat, boarding_passes)
ids = map(row_column_to_id, seats)
return max(ids)
def solve2(input_file):
boarding_passes = input_to_list_of_boarding_passes(input_file)
seats = map(boarding_pass_to_seat, boarding_passes)
ids = list(map(row_column_to_id, seats))
ids_set = set()
for id in ids:
ids_set.add(id)
for id in ids:
if id + 1 not in ids_set and id + 2 in ids_set:
return id + 1
print(solve1("input"))
print(solve2("input"))
| UTF-8 | Python | false | false | 1,072 | py | 21 | solver.py | 13 | 0.638993 | 0.612873 | 0 | 47 | 21.808511 | 64 |
ispras/lingvodoc | 9,740,985,841,877 | 3ffc2e3a0aaed92a3b79c6faa26faa0b4690ee59 | 9cd2a076f5044f29ba336d3a8c9721133f90b8d4 | /lingvodoc/views/v2/delete.py | 6795f33a737eea883ed35dfdb8b957c3c6c6a067 | [
"BSD-3-Clause",
"BSD-3-Clause-Modification",
"LGPL-3.0-or-later",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Zlib",
"ZPL-2.1",
"LGPL-2.1-only",
"Apache-2.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"ZPL-2.0",
"MIT"
] | permissive | https://github.com/ispras/lingvodoc | 19e889a92bfd5428fe8f2a409e21b44bd8a25d06 | 4e129f73e99a1dea93d900c4abf476409bc56957 | refs/heads/heavy_refactor | 2023-08-17T10:48:17.617483 | 2023-08-10T13:06:56 | 2023-08-10T13:06:56 | 22,783,020 | 7 | 22 | Apache-2.0 | false | 2023-09-06T14:13:29 | 2014-08-09T09:19:56 | 2023-07-03T22:44:46 | 2023-09-06T14:13:27 | 102,130 | 6 | 12 | 11 | JavaScript | false | false | from lingvodoc.models import (
Client,
DBSession,
Dictionary,
DictionaryPerspective,
DictionaryPerspectiveToField,
Field,
LexicalEntry,
Entity,
PublishingEntity,
Language,
Organization,
User,
UserBlobs,
TranslationAtom,
TranslationGist,
categories,
ObjectTOC
)
from sqlalchemy import and_
from pyramid.request import Request
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import joinedload
import datetime
import base64
import json
import os
import shutil
from pathvalidate import sanitize_filename
from collections import deque
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPNotFound,
HTTPOk,
HTTPInternalServerError,
HTTPConflict
)
from lingvodoc.exceptions import CommonException
from sqlalchemy.exc import IntegrityError
# todo: all of this should be in models
def real_delete_object(obj):
DBSession.delete(DBSession.query(ObjectTOC).filter_by(client_id=obj.client_id, object_id=obj.object_id).first())
DBSession.delete(obj)
def real_delete_language(language, settings):
for child in language.language:
real_delete_language(child, settings)
for dictionary in language.dictionary:
real_delete_dictionary(dictionary, settings)
real_delete_object(language)
def real_delete_dictionary(dictionary, settings):
for perspective in dictionary.dictionaryperspective:
real_delete_perspective(perspective, settings)
real_delete_object(dictionary)
def real_delete_perspective(perspective, settings):
for field in perspective.dictionaryperspectivetofield:
real_delete_object(field)
for lex in perspective.lexicalentry:
real_delete_lexical_entry(lex, settings)
real_delete_object(perspective)
def real_delete_lexical_entry(lexical_entry, settings):
for entity in lexical_entry.entity:
if not entity.upper_level:
real_delete_entity(entity, settings)
real_delete_object(lexical_entry)
def real_delete_entity(entity, settings):
DBSession.delete(DBSession.query(PublishingEntity).filter_by(client_id=entity.client_id, object_id=entity.object_id).first())
for child in entity.entity:
real_delete_entity(child, settings)
if entity.additional_metadata and 'data_type' in entity.additional_metadata:
try:
path = entity.content
base_path = settings['storage']['path']
storage_dir = os.path.join(base_path, 'entity', entity.additional_metadata['data_type'],
str(entity.client_id), str(entity.object_id))
split_path = path.split('/')
path = os.path.join(storage_dir, split_path[len(split_path) - 1])
# todo: make path in windows
os.remove(path)
except:
print('fail with entity', entity.client_id, entity.object_id)
real_delete_object(entity)
def real_delete_translation_gist(translation_gist, settings):
for translation_atom in translation_gist.translationatom:
real_delete_object(translation_atom)
real_delete_object(translation_gist) | UTF-8 | Python | false | false | 3,146 | py | 386 | delete.py | 169 | 0.709472 | 0.708519 | 0 | 106 | 28.688679 | 129 |
foamliu/facesdk | 7,224,135,030,147 | 9c0727bb83157176b6ab032d960b5bdf353692bb | efbb9d3ebb2b2c44d05ffe88c74bf4c7da2d4479 | /examples/detect.py | 7794cba7b2147e4db4f314000cf3dd01b906aa31 | [
"MIT"
] | permissive | https://github.com/foamliu/facesdk | 5ff858ad144a6a26f0eded8ecbc98cb7ca580441 | af83d440eafe8523e4323df7c4c8dc5370fb3d70 | refs/heads/master | 2020-12-12T19:33:49.278947 | 2020-01-19T02:42:06 | 2020-01-19T02:42:06 | 234,211,843 | 2 | 1 | MIT | false | 2020-01-18T00:47:33 | 2020-01-16T01:52:34 | 2020-01-17T13:02:37 | 2020-01-18T00:47:32 | 13,073 | 0 | 1 | 0 | Python | false | false | import cv2 as cv
from facesdk.core import FaceSDK
def draw_bboxes(img, bboxes, landmarks):
num_faces = bboxes.shape[0]
# show image
for i in range(num_faces):
b = bboxes[i]
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv.putText(img, text, (cx, cy),
cv.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
landms = landmarks[i]
cv.circle(img, (landms[0], landms[5]), 1, (0, 0, 255), 4)
cv.circle(img, (landms[1], landms[6]), 1, (0, 255, 255), 4)
cv.circle(img, (landms[2], landms[7]), 1, (255, 0, 255), 4)
cv.circle(img, (landms[3], landms[8]), 1, (0, 255, 0), 4)
cv.circle(img, (landms[4], landms[9]), 1, (255, 0, 0), 4)
return img
if __name__ == "__main__":
img = cv.imread('examples/data/aqgy_0.jpg')
facesdk = FaceSDK()
bboxes, landmarks = facesdk.detect_faces(img)
img = draw_bboxes(img, bboxes, landmarks)
cv.imshow('face detection', img)
cv.waitKey(0)
cv.imwrite('examples/output/face_detection.jpg', img)
| UTF-8 | Python | false | false | 1,191 | py | 8 | detect.py | 6 | 0.533165 | 0.465995 | 0 | 41 | 28.04878 | 69 |
tom-sb/farmManage | 9,775,345,595,600 | bd24f25b9c0c8cbcd0f1abb011a8ef59b76cfe26 | f4a43d4eead4b1ffb145a27d30eb4b3571f80fed | /moduloGranja/apps/alimentos/models.py | 5348e0d77bb5c26470c76f10e5fa346996bb996a | [] | no_license | https://github.com/tom-sb/farmManage | 256dd968416c5449d52e118ad0e3d3cd8ebdaae9 | 0f24f4a3f790e30accd70bf0c7876f635e27a9ba | refs/heads/master | 2023-02-25T15:22:30.073951 | 2021-02-05T01:28:48 | 2021-02-05T01:28:48 | 298,927,843 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from apps.clusters.models import Cluster
from datetime import date
# Create your models here.
class Alimento(models.Model):
nombre_alimento = models.CharField(max_length = 50)
stock_alimento = models.IntegerField()
def __str__(self):
return self.nombre_alimento
def updateStock(self, val):
self.stock_alimento = self.stock_alimento + val
class FichaAlimento(models.Model):
nombre_ficha = models.CharField(max_length = 50)
alimento = models.ForeignKey(Alimento, on_delete=models.CASCADE)
cluster = models.ForeignKey(Cluster, on_delete=models.CASCADE)
fecha_expiracion = models.DateField(default = (date.today))
frecuencia_xdia = models.PositiveSmallIntegerField()
cantidad_xtoma = models.PositiveSmallIntegerField()
auto_renovar = models.BooleanField(default = False)
def __str__(self):
return self.nombre_ficha
| UTF-8 | Python | false | false | 867 | py | 85 | models.py | 48 | 0.767013 | 0.762399 | 0 | 28 | 29.964286 | 65 |
hidetomo-watanabe/analysis_for_kaggle | 9,560,597,246,364 | 338dd6377e2848f156dd0b46a6592140966a7f4e | ab291f94e5cc97703196bb1d0f2f7438f5c90390 | /sklearn_wrapper/modules/Notifier.py | a5544b631f2177826e4b3ddc8a6f923e6ffc9f26 | [
"Apache-2.0"
] | permissive | https://github.com/hidetomo-watanabe/analysis_for_kaggle | 45c50fc629fc963a3dc3bb81de686dcd51050c7f | 0d688b5fae418c61a022e583412e2437b9f86592 | refs/heads/master | 2021-06-03T00:02:58.092592 | 2021-06-02T23:48:26 | 2021-06-02T23:48:26 | 97,419,388 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from logging import getLogger
import requests
logger = getLogger('predict').getChild('Notifier')
if 'ConfigReader' not in globals():
from .ConfigReader import ConfigReader
class Notifier(ConfigReader):
def __init__(self):
self.configs = {}
def notify_slack(self):
mode = self.configs['notify'].get('mode')
if not mode:
logger.warning('NO NOTIFICATION')
return
logger.info('notification: %s' % mode)
if mode == 'slack':
text = 'Finished.'
requests.post(
self.configs['notify'][mode],
data=json.dumps({'text': text}))
else:
logger.error('NOT IMPLEMENTED NOTIFICATION: %s' % mode)
raise Exception('NOT IMPLEMENTED')
| UTF-8 | Python | false | false | 795 | py | 59 | Notifier.py | 40 | 0.579874 | 0.579874 | 0 | 29 | 26.413793 | 67 |
saraitne11/CodingTest | 670,014,918,275 | df08459e55a4a4f8a18bee5c05e80314cc658cfa | c4a44a38eb04701688ed9f8e5dd20e3ad41535d7 | /baekjoon/baekjoon7576/baekjoon7576.py | 1e5660099a9aa3d52075324f61b575e1f12a3228 | [] | no_license | https://github.com/saraitne11/CodingTest | 2374514f934f5b59e3c137100899069a874c517b | 1c9eb6bb263d6aa2f5c4a37aa0060c2843526dc9 | refs/heads/master | 2022-06-07T12:40:26.779146 | 2022-05-16T00:48:26 | 2022-05-16T00:48:26 | 210,196,388 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
https://www.acmicpc.net/problem/7576
토마토
"""
from pprint import pprint
from collections import deque
UP = (-1, 0)
DOWN = (1, 0)
LEFT = (0, -1)
RIGHT = (0, 1)
MOVES = (UP, DOWN, LEFT, RIGHT)
def in_matrix(i, j, n, m):
if 0 <= i < n and 0 <= j < m:
return True
else:
return False
def bfs(mtx, starts):
"""
:param mtx: matrix
:param starts: [(i1, j1), (i2, j2), ...]
:return:
"""
n = len(mtx)
m = len(mtx[0])
queue = deque()
for i, j in starts:
queue.append((i, j, 0))
max_d = 0
while queue:
# list.pop(0) 는 추가적으로 O(N)을 소모함. 앞으로 한칸씩 다 땡켜줘야해서
i, j, d = queue.popleft()
for di, dj in MOVES:
new_i = i + di
new_j = j + dj
new_d = d + 1
if in_matrix(new_i, new_j, n, m):
if mtx[new_i][new_j] == 0:
mtx[new_i][new_j] = 1
queue.append((new_i, new_j, new_d))
max_d = max(max_d, new_d)
return max_d
def main():
m, n = tuple(map(int, input().rstrip().split(' ')))
# print(n, m)
map_ = [list(map(int, input().rstrip().split(' '))) for _ in range(n)]
# pprint(map_)
starts = []
for i in range(n):
for j in range(m):
if map_[i][j] == 1:
starts.append((i, j))
max_day = bfs(map_, starts)
for i in range(n):
for j in range(m):
if map_[i][j] == 0:
print(-1)
return
print(max_day)
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,636 | py | 56 | baekjoon7576.py | 54 | 0.44697 | 0.429293 | 0 | 78 | 19.307692 | 74 |
prompto/prompto-python3 | 9,672,266,390,222 | 7e2615d2ae4a1a6a5747947a93736511d20d3796 | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/test/prompto/translate/oeo/TestCast.py | 50587ecb1cc5b18f3e4f0162e7b5297ba4a9c6ee | [] | no_license | https://github.com/prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | false | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | 2019-04-27T02:51:34 | 2019-05-04T11:06:04 | 12,379 | 0 | 0 | 0 | Python | false | false | from prompto.parser.o.BaseOParserTest import BaseOParserTest
class TestCast(BaseOParserTest):
def setUp(self):
super(type(self), self).setUp()
def testAutoDowncast(self):
self.compareResourceOEO("cast/autoDowncast.poc")
def testAutoDowncastMethod(self):
self.compareResourceOEO("cast/autoDowncastMethod.poc")
def testCastChild(self):
self.compareResourceOEO("cast/castChild.poc")
def testCastEnum(self):
self.compareResourceOEO("cast/castEnum.poc")
def testCastMethod(self):
self.compareResourceOEO("cast/castMethod.poc")
def testCastMissing(self):
self.compareResourceOEO("cast/castMissing.poc")
def testCastNull(self):
self.compareResourceOEO("cast/castNull.poc")
def testCastParent(self):
self.compareResourceOEO("cast/castParent.poc")
def testIsAChild(self):
self.compareResourceOEO("cast/isAChild.poc")
def testIsAText(self):
self.compareResourceOEO("cast/isAText.poc")
def testMutableEntity(self):
self.compareResourceOEO("cast/mutableEntity.poc")
def testMutableList(self):
self.compareResourceOEO("cast/mutableList.poc")
def testNullIsNotAText(self):
self.compareResourceOEO("cast/nullIsNotAText.poc")
| UTF-8 | Python | false | false | 1,303 | py | 689 | TestCast.py | 685 | 0.701458 | 0.701458 | 0 | 45 | 27.911111 | 62 |
wushijie312/yanjiuKJ | 4,707,284,165,509 | d4e302172d3249365c8120f997d529b0343042ad | 6af9c3905a649dae6960e730dcaa770c83878beb | /Django_myblog/blog/migrations/0005_artical_pub_time.py | 1afaa40531073ebce5a39029ff90f2be79944e29 | [] | no_license | https://github.com/wushijie312/yanjiuKJ | c712cb702bc209a25d1e5b9168cd7e25874fe738 | d6da709bc180f8721ea0ad2986f7f32ab139bf25 | refs/heads/master | 2020-03-23T09:12:36.885699 | 2018-07-18T03:10:50 | 2018-07-18T03:10:50 | 141,374,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.5 on 2018-05-11 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180508_1833'),
]
operations = [
migrations.AddField(
model_name='artical',
name='pub_time',
field=models.DateTimeField(auto_now=True),
),
]
| UTF-8 | Python | false | false | 388 | py | 50 | 0005_artical_pub_time.py | 26 | 0.585052 | 0.505155 | 0 | 18 | 20.555556 | 54 |
Moons08/TIL | 10,110,353,047,310 | 7eb30fb1889b4853731088cca51f1470be73f022 | cb3af0b9c456764ffd6f1c331ea452798c7efc1f | /Past/DSS/Programming/Scraping/180306_secondhand_search/search.py | 76f5024fccb400e4e1a07a69f748d4eda3bd820c | [
"MIT"
] | permissive | https://github.com/Moons08/TIL | fed16d93ac7c23af97dfe4f952e32032c2ad6649 | e257854e1f7b9af5a6e349f38037f3010c07310f | refs/heads/master | 2021-05-11T18:06:16.406383 | 2020-01-20T13:02:57 | 2020-01-20T13:02:57 | 117,811,982 | 0 | 0 | MIT | false | 2018-01-21T07:20:49 | 2018-01-17T09:07:51 | 2018-01-17T09:37:59 | 2018-01-21T07:20:49 | 7 | 0 | 0 | 0 | Jupyter Notebook | false | null | import requests
import pandas as pd
from bs4 import BeautifulSoup
import pickle
# 전체 데이터 갯수를 가져오는 함수
def get_total(keyword):
response = requests.get("https://m.cafe.naver.com/ArticleSearchList.nhn?search.query={}\
&search.menuid=&search.searchBy=1&search.sortBy=date&search.clubid=10050146&search.option=0&search.defaultValue=".format(keyword))
soup = BeautifulSoup(response.content, "html.parser")
return soup.select_one("#ct > div.search_contents > div.search_sort > div.sort_l > span").text
# 아이템 리스트를 가져오는 함수
def get_items(keyword, page):
response = requests.get("https://m.cafe.naver.com/ArticleSearchListAjax.nhn?search.query={}\
&search.menuid=&search.searchBy=1&search.sortBy=date&search.clubid=10050146&search.option=0&search.defaultValue=&search.page={}".format(keyword, page))
soup = BeautifulSoup(response.content, "html.parser")
return soup.select('ul li')
# 상세 페이지에서 가격정보를 가져오는 함수
def get_price(link):
with open('cookie.bin', 'rb') as f:
a = pickle.load(f) # when you use pickle, never ever name that file as pickle.py like that
headers = {
"cookie" : "{}".format(a),
"user-agent" : 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)'
}
f.close()
response = requests.get(link, headers=headers)
soup = BeautifulSoup(response.content, "html.parser")
return soup.select_one('.price em').text
# 아이템을 데이터 리스트로 만드는 함수
def make_datas(items):
datas = []
for item in items:
#판매중인지 체크
try:
sell = item.select_one(".icon_txt").text
if sell == "완료":
continue
except:
continue
title = item.select_one("h3").text
link = "https://m.cafe.naver.com/{}".format(item.select_one("a").attrs["href"])
view = item.select_one(".no em").text
date = item.select_one(".time").text
price = get_price(link)
datas.append([title, view, date, price, link])
print(len(datas))
return datas
# 키워드를 입력받아 전체 데이터를 가져오는 함수
def all_datas(keyword):
datas=[]
a = int(get_total(keyword))
total = a//20 + 1
for i in range(1, total+1):
item = get_items(keyword, i)
datas.extend(make_datas(item))
return datas
# 데이터 프레임화 + csv파일로 저장
datas = all_datas("공기측정기")
columns = ["title", "views", "date", "price", "link"]
df = pd.DataFrame(datas, columns=columns)
df.to_csv("PeacefulWorld.csv")
| UTF-8 | Python | false | false | 2,774 | py | 149 | search.py | 29 | 0.626953 | 0.608203 | 0 | 73 | 34.068493 | 179 |
ashesknight/tof-mpi-remove | 8,650,064,169,131 | 18878db74650037429012ea0c330a812638e3cba | 0ae7462dece6a980afb75a4aa4f40c1d8bf24e30 | /pipe/kinect_pipeline.py | 0ca60fd729a8d7c47259400f2108eb187451f547 | [
"MIT"
] | permissive | https://github.com/ashesknight/tof-mpi-remove | 8e3375931c2c5aa52b7d25ea2ca965a202efd97a | 11ecac5db4b30affbb1785ac01397e7aa53f22cf | refs/heads/master | 2022-12-23T12:47:50.601122 | 2020-09-24T13:58:11 | 2020-09-24T13:58:11 | 220,787,315 | 6 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.insert(0, '../sim/')
import tensorflow as tf
from tof_class import *
from kinect_spec import *
tf.logging.set_verbosity(tf.logging.INFO)
from kinect_init import *
tof_cam = kinect_real_tf()
PI = 3.14159265358979323846
flg = False
dtype = tf.float32
def kinect_pipeline(meas):
## Kinect Pipeline, use the algorithm of Kinect v2 to compute the denoised depth
# convert to the default data type
x_kinect = tf.cast(meas, tf.float32)
# make the size to be 424,512 (padding 0)
y_idx = int((424 - int(x_kinect.shape[1])) / 2)
zero_mat = tf.zeros([tf.shape(x_kinect)[0], y_idx, tf.shape(x_kinect)[2], 9])
x_kinect = tf.concat([zero_mat, x_kinect, zero_mat], 1)
msk = kinect_mask_tensor()
msk = tf.expand_dims(tf.expand_dims(msk, 0), -1)
x_kinect = x_kinect * msk * tof_cam.cam['map_max']
# final depth prediction: kinect pipeline
ira, irb, iramp = processPixelStage1_mat(x_kinect)
depth_outs, ir_sum_outs, ir_outs, msk_out1 = processPixelStage2(ira, irb, iramp)
# creates the mask
ms = tf.concat([ira, irb, iramp], -1)
bilateral_max_edge_tests = filterPixelStage1(ms)[1]
depth_out_edges = depth_outs * bilateral_max_edge_tests
msk_out2 = filterPixelStage2(depth_outs, depth_out_edges, ir_outs)[1]
msk_out3 = tf.cast(tf.greater(depth_outs, prms['min_depth']), dtype=dtype)
msk_out4 = tf.cast(tf.less(depth_outs, prms['max_depth']), dtype=dtype)
depth_msk = tf.cast(tf.greater(msk_out2 * msk_out3 * msk_out4, 0.5), dtype=dtype)
depth_outs /= 1000.0
depth_outs *= depth_msk
# baseline correction
depth_outs = depth_outs * base_cor['k'] + base_cor['b']
depth_outs = depth_outs[:, 20:-20, :]
depth_msk = depth_msk[:, 20:-20, :]
amplitude_outs = ir_outs[:, 20:-20, :]
return depth_outs, depth_msk, amplitude_outs
def kinect_mask_tensor():
# return the kinect mask that creates the positive-negative interval
mask = np.zeros((424, 512))
idx = 1
for i in range(mask.shape[0]):
mask[i, :] = idx
if i != (mask.shape[0] / 2 - 1):
idx = -idx
mask = tf.convert_to_tensor(mask)
mask = tf.cast(mask, tf.float32)
return mask
def processPixelStage1(m):
# m is (None,424, 512, 9)
# the first three is the first frequency
tmp = []
tmp.append(processMeasurementTriple(m[:, :, :, 0:3], prms['ab_multiplier_per_frq'][0], trig_table0))
tmp.append(processMeasurementTriple(m[:, :, :, 3:6], prms['ab_multiplier_per_frq'][1], trig_table1))
tmp.append(processMeasurementTriple(m[:, :, :, 6:9], prms['ab_multiplier_per_frq'][2], trig_table2))
m_out = [ \
tmp[0][:, :, :, 0], tmp[1][:, :, :, 0], tmp[2][:, :, :, 0],
tmp[0][:, :, :, 1], tmp[1][:, :, :, 1], tmp[2][:, :, :, 1],
tmp[0][:, :, :, 2], tmp[1][:, :, :, 2], tmp[2][:, :, :, 2],
]
m_out = tf.stack(m_out, -1)
# return processMeasurementTriple(m[:,:,:,0:3], prms['ab_multiplier_per_frq'][0], trig_table0)
return m_out
def processPixelStage1_mat(m):
# if not saturated
cos_tmp0 = np.stack([trig_table0[:, :, 0], trig_table1[:, :, 0], trig_table2[:, :, 0]], -1)
cos_tmp1 = np.stack([trig_table0[:, :, 1], trig_table1[:, :, 1], trig_table2[:, :, 1]], -1)
cos_tmp2 = np.stack([trig_table0[:, :, 2], trig_table1[:, :, 2], trig_table2[:, :, 2]], -1)
sin_negtmp0 = np.stack([trig_table0[:, :, 3], trig_table1[:, :, 3], trig_table2[:, :, 3]], -1)
sin_negtmp1 = np.stack([trig_table0[:, :, 4], trig_table1[:, :, 4], trig_table2[:, :, 4]], -1)
sin_negtmp2 = np.stack([trig_table0[:, :, 5], trig_table1[:, :, 5], trig_table2[:, :, 5]], -1)
# stack
cos_tmp0 = np.expand_dims(cos_tmp0, 0)
cos_tmp1 = np.expand_dims(cos_tmp1, 0)
cos_tmp2 = np.expand_dims(cos_tmp2, 0)
sin_negtmp0 = np.expand_dims(sin_negtmp0, 0)
sin_negtmp1 = np.expand_dims(sin_negtmp1, 0)
sin_negtmp2 = np.expand_dims(sin_negtmp2, 0)
#
abMultiplierPerFrq = np.expand_dims(np.expand_dims(np.expand_dims(prms['ab_multiplier_per_frq'], 0), 0), 0)
ir_image_a = cos_tmp0 * m[:, :, :, 0::3] + cos_tmp1 * m[:, :, :, 1::3] + cos_tmp2 * m[:, :, :, 2::3]
ir_image_b = sin_negtmp0 * m[:, :, :, 0::3] + sin_negtmp1 * m[:, :, :, 1::3] + sin_negtmp2 * m[:, :, :, 2::3]
ir_image_a *= abMultiplierPerFrq
ir_image_b *= abMultiplierPerFrq
ir_amplitude = tf.sqrt(ir_image_a ** 2 + ir_image_b ** 2) * prms['ab_multiplier']
return ir_image_a, ir_image_b, ir_amplitude
def processMeasurementTriple(m, abMultiplierPerFrq, trig_table):
# m is (None,424,512,3)
zmultiplier = tf.constant(z_table, dtype=dtype)
# judge where saturation happens
saturated = tf.cast(tf.less(tf.abs(m), 1.0), dtype=dtype)
saturated = 1 - saturated[:, :, :, 0] * saturated[:, :, :, 1] * saturated[:, :, :, 2]
# if not saturated
cos_tmp0 = trig_table[:, :, 0]
cos_tmp1 = trig_table[:, :, 1]
cos_tmp2 = trig_table[:, :, 2]
sin_negtmp0 = trig_table[:, :, 3]
sin_negtmp1 = trig_table[:, :, 4]
sin_negtmp2 = trig_table[:, :, 5]
# stack
cos_tmp0 = np.expand_dims(cos_tmp0, 0)
cos_tmp1 = np.expand_dims(cos_tmp1, 0)
cos_tmp2 = np.expand_dims(cos_tmp2, 0)
sin_negtmp0 = np.expand_dims(sin_negtmp0, 0)
sin_negtmp1 = np.expand_dims(sin_negtmp1, 0)
sin_negtmp2 = np.expand_dims(sin_negtmp2, 0)
ir_image_a = cos_tmp0 * m[:, :, :, 0] + cos_tmp1 * m[:, :, :, 1] + cos_tmp2 * m[:, :, :, 2]
ir_image_b = sin_negtmp0 * m[:, :, :, 0] + sin_negtmp1 * m[:, :, :, 1] + sin_negtmp2 * m[:, :, :, 2]
ir_image_a *= abMultiplierPerFrq
ir_image_b *= abMultiplierPerFrq
ir_amplitude = tf.sqrt(ir_image_a ** 2 + ir_image_b ** 2) * prms['ab_multiplier']
m_out = tf.stack([ir_image_a, ir_image_b, ir_amplitude], -1)
return m_out
def processPixelStage2(ira, irb, iramp):
ratio = 100
tmp0 = tf.atan2(ratio * (irb + 1e-10), ratio * (ira + 1e-10))
flg = tf.cast(tf.less(tmp0, 0.0), dtype)
tmp0 = flg * (tmp0 + PI * 2) + (1 - flg) * tmp0
tmp1 = tf.sqrt(ira ** 2 + irb ** 2) * prms['ab_multiplier']
ir_sum = tf.reduce_sum(tmp1, -1)
# disable disambiguation
ir_min = tf.reduce_min(tmp1, -1)
# phase mask
phase_msk1 = tf.cast( \
tf.greater(ir_min, prms['individual_ab_threshold']),
dtype=dtype
)
phase_msk2 = tf.cast( \
tf.greater(ir_sum, prms['ab_threshold']),
dtype=dtype
)
phase_msk_t = phase_msk1 * phase_msk2
# compute phase
t0 = tmp0[:, :, :, 0] / (2.0 * PI) * 3.0
t1 = tmp0[:, :, :, 1] / (2.0 * PI) * 15.0
t2 = tmp0[:, :, :, 2] / (2.0 * PI) * 2.0
t5 = tf.floor((t1 - t0) * 0.3333333 + 0.5) * 3.0 + t0
t3 = t5 - t2
t4 = t3 * 2.0
c1 = tf.cast(tf.greater(t4, -t4), dtype=dtype)
f1 = c1 * 2.0 + (1 - c1) * (-2.0)
f2 = c1 * 0.5 + (1 - c1) * (-0.5)
t3 = t3 * f2
t3 = (t3 - tf.floor(t3)) * f1
c2 = tf.cast(tf.less(0.5, tf.abs(t3)), dtype=dtype) * \
tf.cast(tf.less(tf.abs(t3), 1.5), dtype=dtype)
t6 = c2 * (t5 + 15.0) + (1 - c2) * t5
t7 = c2 * (t1 + 15.0) + (1 - c2) * t1
t8 = (tf.floor((t6 - t2) * 0.5 + 0.5) * 2.0 + t2) * 0.5
t6 /= 3.0
t7 /= 15.0
# transformed phase measurements (they are transformed and divided
# by the values the original values were multiplied with)
t9 = t8 + t6 + t7
t10 = t9 / 3.0 # some avg
t6 = t6 * 2.0 * PI
t7 = t7 * 2.0 * PI
t8 = t8 * 2.0 * PI
t8_new = t7 * 0.826977 - t8 * 0.110264
t6_new = t8 * 0.551318 - t6 * 0.826977
t7_new = t6 * 0.110264 - t7 * 0.551318
t8 = t8_new
t6 = t6_new
t7 = t7_new
norm = t8 ** 2 + t6 ** 2 + t7 ** 2
mask = tf.cast(tf.greater(t9, 0.0), dtype)
t10 = t10
slope_positive = float(0 < prms['ab_confidence_slope'])
ir_min_ = tf.reduce_min(tmp1, -1)
ir_max_ = tf.reduce_max(tmp1, -1)
ir_x = slope_positive * ir_min_ + (1 - slope_positive) * ir_max_
ir_x = tf.log(ir_x)
ir_x = (ir_x * prms['ab_confidence_slope'] * 0.301030 + prms['ab_confidence_offset']) * 3.321928
ir_x = tf.exp(ir_x)
ir_x = tf.maximum(prms['min_dealias_confidence'], ir_x)
ir_x = tf.minimum(prms['max_dealias_confidence'], ir_x)
ir_x = ir_x ** 2
mask2 = tf.cast(tf.greater(ir_x, norm), dtype)
t11 = t10
mask3 = tf.cast( \
tf.greater(prms['max_dealias_confidence'] ** 2, norm),
dtype
)
t10 = t10
phase = t11
# mask out dim regions
phase = phase
# phase to depth mapping
zmultiplier = z_table
xmultiplier = x_table
phase_msk = tf.cast(tf.less(0.0, phase), dtype)
phase = phase_msk * (phase + prms['phase_offset']) + (1 - phase_msk) * phase
depth_linear = zmultiplier * phase
depth = depth_linear
max_depth = phase * prms['unambiguous_dist'] * 2
cond1 = tf.cast(tf.less(0.0, depth_linear), dtype) * \
tf.cast(tf.less(0.0, max_depth), dtype)
depth_out = depth
ir_sum_out = ir_sum
ir_out = tf.minimum( \
tf.reduce_sum(iramp, -1) * 0.33333333 * prms['ab_output_multiplier'],
65535.0
)
msk_out = cond1 * phase_msk_t * mask * mask2 * mask3
return depth_out, ir_sum_out, ir_out, msk_out
def filterPixelStage1(m):
# m is (None, 424, 512, 9)
# the first three is measurement a
# the second three is measurement b
# the third three is amplitude
norm2 = m[:, :, :, 0:3] ** 2 + m[:, :, :, 3:6] ** 2
inv_norm = 1.0 / tf.sqrt(norm2)
# get rid of those nan
inv_norm = tf.minimum(inv_norm, 1e10)
m_normalized = tf.stack([m[:, :, :, 0:3] * inv_norm, m[:, :, :, 3:6] * inv_norm], -1)
threshold = prms['joint_bilateral_ab_threshold'] ** 2 / prms['ab_multiplier'] ** 2
joint_bilateral_exp = prms['joint_bilateral_exp']
threshold = tf.constant(threshold, dtype=dtype)
joint_bilateral_exp = tf.constant(joint_bilateral_exp, dtype=dtype)
# set the parts with norm2 < threshold to be zero
norm_flag = tf.cast(tf.less(norm2, threshold), dtype=dtype)
threshold = (1 - norm_flag) * threshold
joint_bilateral_exp = (1 - norm_flag) * joint_bilateral_exp
# guided bilateral filtering
gauss = prms['gaussian_kernel']
weight_acc = tf.ones(tf.shape(m_normalized)[0:4]) * gauss[1, 1]
weighted_m_acc0 = gauss[1, 1] * m[:, :, :, 0:3]
weighted_m_acc1 = gauss[1, 1] * m[:, :, :, 3:6]
# coefficient for bilateral space
m_n = m_normalized
# proxy for other m normalized
m_l = tf.concat([m_n[:, :, 1::, :], m_n[:, :, 0:1, :]], 2)
m_r = tf.concat([m_n[:, :, -1::, :], m_n[:, :, 0:-1, :]], 2)
m_u = tf.concat([m_n[:, 1::, :, :], m_n[:, 0:1, :, :]], 1)
m_d = tf.concat([m_n[:, -1::, :, :], m_n[:, 0:-1, :, :]], 1)
m_lu = tf.concat([m_l[:, 1::, :, :], m_l[:, 0:1, :, :]], 1)
m_ru = tf.concat([m_r[:, 1::, :, :], m_r[:, 0:1, :, :]], 1)
m_ld = tf.concat([m_l[:, -1::, :, :], m_l[:, 0:-1, :, :]], 1)
m_rd = tf.concat([m_r[:, -1::, :, :], m_r[:, 0:-1, :, :]], 1)
m_n_shift = [ \
m_rd, m_d, m_ld, m_r, m_l, m_ru, m_u, m_lu
]
m_n_shift = tf.stack(m_n_shift, -1)
# proxy of other_norm2
norm2_l = tf.concat([norm2[:, :, 1::, :], norm2[:, :, 0:1, :]], 2)
norm2_r = tf.concat([norm2[:, :, -1::, :], norm2[:, :, 0:-1, :]], 2)
norm2_u = tf.concat([norm2[:, 1::, :, :], norm2[:, 0:1, :, :]], 1)
norm2_d = tf.concat([norm2[:, -1::, :, :], norm2[:, 0:-1, :, :]], 1)
norm2_lu = tf.concat([norm2_l[:, 1::, :, :], norm2_l[:, 0:1, :, :]], 1)
norm2_ru = tf.concat([norm2_r[:, 1::, :, :], norm2_r[:, 0:1, :, :]], 1)
norm2_ld = tf.concat([norm2_l[:, -1::, :, :], norm2_l[:, 0:-1, :, :]], 1)
norm2_rd = tf.concat([norm2_r[:, -1::, :, :], norm2_r[:, 0:-1, :, :]], 1)
other_norm2 = tf.stack([ \
norm2_rd, norm2_d, norm2_ld, norm2_r,
norm2_l, norm2_ru, norm2_u, norm2_lu,
], -1)
dist = [ \
m_rd * m_n, m_d * m_n, m_ld * m_n, m_r * m_n,
m_l * m_n, m_ru * m_n, m_u * m_n, m_lu * m_n,
]
dist = -tf.reduce_sum(tf.stack(dist, -1), -2)
dist += 1.0
dist *= 0.5
# color filtering
gauss_f = gauss.flatten()
gauss_f = np.delete(gauss_f, [4])
joint_bilateral_exp = tf.tile(tf.expand_dims(joint_bilateral_exp, -1), [1, 1, 1, 1, 8])
weight_f = tf.exp(-1.442695 * joint_bilateral_exp * dist)
weight = tf.stack([gauss_f[k] * weight_f[:, :, :, :, k] for k in range(weight_f.shape[-1])], -1)
# if (other_norm2 >= threshold)...
threshold = tf.tile(tf.expand_dims(threshold, -1), [1, 1, 1, 1, 8])
wgt_msk = tf.cast(tf.less(threshold, other_norm2), dtype=dtype)
weight = wgt_msk * weight
dist = wgt_msk * dist
# coefficient for bilateral space
ms = tf.stack([m[:, :, :, 0:3], m[:, :, :, 3:6]], -1)
# proxy for other m normalized
m_l = tf.concat([ms[:, :, 1::, :], ms[:, :, 0:1, :]], 2)
m_r = tf.concat([ms[:, :, -1::, :], ms[:, :, 0:-1, :]], 2)
m_u = tf.concat([ms[:, 1::, :, :], ms[:, 0:1, :, :]], 1)
m_d = tf.concat([ms[:, -1::, :, :], ms[:, 0:-1, :, :]], 1)
m_lu = tf.concat([m_l[:, 1::, :, :], m_l[:, 0:1, :, :]], 1)
m_ru = tf.concat([m_r[:, 1::, :, :], m_r[:, 0:1, :, :]], 1)
m_ld = tf.concat([m_l[:, -1::, :, :], m_l[:, 0:-1, :, :]], 1)
m_rd = tf.concat([m_r[:, -1::, :, :], m_r[:, 0:-1, :, :]], 1)
m_shift = [ \
m_rd, m_d, m_ld, m_r, m_l, m_ru, m_u, m_lu
]
m_shift = tf.stack(m_shift, -1)
weighted_m_acc0 += tf.reduce_sum(weight * m_shift[:, :, :, :, 0, :], -1)
weighted_m_acc1 += tf.reduce_sum(weight * m_shift[:, :, :, :, 1, :], -1)
dist_acc = tf.reduce_sum(dist, -1)
weight_acc += tf.reduce_sum(weight, -1)
# test the edge
bilateral_max_edge_test = tf.reduce_prod(tf.cast( \
tf.less(dist_acc, prms['joint_bilateral_max_edge']),
dtype
), -1)
m_out = []
wgt_acc_msk = tf.cast(tf.less(0.0, weight_acc), dtype=dtype)
m_out.append(wgt_acc_msk * weighted_m_acc0 / weight_acc)
m_out.append(wgt_acc_msk * weighted_m_acc1 / weight_acc)
m_out.append(m[:, :, :, 6:9])
m_out = tf.concat(m_out, -1)
# mask out the edge
# do not filter the edge
edge_step = 1
edge_msk = np.zeros(m.shape[1:3])
edge_msk[0:0 + edge_step, :] = 1
edge_msk[-1 - edge_step + 1::, :] = 1
edge_msk[:, 0:0 + edge_step] = 1
edge_msk[:, -1 - edge_step + 1::] = 1
edge_msk = tf.constant(edge_msk, dtype=dtype)
edge_msk = tf.tile(tf.expand_dims(tf.expand_dims(edge_msk, -1), 0), [tf.shape(m)[0], 1, 1, 9])
m_out = edge_msk * m + (1 - edge_msk) * m_out
return m_out, bilateral_max_edge_test
def filterPixelStage2(raw_depth, raw_depth_edge, ir_sum):
# raw depth is the raw depth prediction
# raw_depth_edge is roughly the same as raw depth, except some part are zero if
# don't want to do edge filtering
# mask out depth that is out of region
depth_msk = tf.cast(tf.greater(raw_depth, prms['min_depth']), dtype) * \
tf.cast(tf.less(raw_depth, prms['max_depth']), dtype)
# mask out the edge
# do not filter the edge of the image
edge_step = 1
edge_msk = np.zeros(raw_depth.shape[1:3])
edge_msk[0:0 + edge_step, :] = 1
edge_msk[-1 - edge_step + 1::, :] = 1
edge_msk[:, 0:0 + edge_step] = 1
edge_msk[:, -1 - edge_step + 1::] = 1
edge_msk = tf.constant(edge_msk, dtype=dtype)
edge_msk = tf.tile(tf.expand_dims(edge_msk, 0), [tf.shape(raw_depth)[0], 1, 1])
#
knl = tf.constant(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]), dtype=dtype)
knl = tf.expand_dims(tf.expand_dims(knl, -1), -1)
ir_sum_exp = tf.expand_dims(ir_sum, -1)
ir_sum_acc = tf.nn.conv2d(ir_sum_exp, knl, strides=[1, 1, 1, 1], padding='SAME')
squared_ir_sum_acc = tf.nn.conv2d(ir_sum_exp ** 2, knl, strides=[1, 1, 1, 1], padding='SAME')
ir_sum_acc = tf.squeeze(ir_sum_acc, -1)
squared_ir_sum_acc = tf.squeeze(squared_ir_sum_acc, -1)
min_depth = raw_depth
max_depth = raw_depth
# min_depth, max_depth
m_n = raw_depth_edge
m_l = tf.concat([m_n[:, :, 1::], m_n[:, :, 0:1]], 2)
m_r = tf.concat([m_n[:, :, -1::], m_n[:, :, 0:-1]], 2)
m_u = tf.concat([m_n[:, 1::, :], m_n[:, 0:1, :]], 1)
m_d = tf.concat([m_n[:, -1::, :], m_n[:, 0:-1, :]], 1)
m_lu = tf.concat([m_l[:, 1::, :], m_l[:, 0:1, :]], 1)
m_ru = tf.concat([m_r[:, 1::, :], m_r[:, 0:1, :]], 1)
m_ld = tf.concat([m_l[:, -1::, :], m_l[:, 0:-1, :]], 1)
m_rd = tf.concat([m_r[:, -1::, :], m_r[:, 0:-1, :]], 1)
m_shift = [ \
m_rd, m_d, m_ld, m_r, m_l, m_ru, m_u, m_lu
]
m_shift = tf.stack(m_shift, -1)
nonzero_msk = tf.cast(tf.greater(m_shift, 0.0), dtype=dtype)
m_shift_min = nonzero_msk * m_shift + (1 - nonzero_msk) * 99999999999
min_depth = tf.minimum(tf.reduce_min(m_shift_min, -1), min_depth)
max_depth = tf.maximum(tf.reduce_max(m_shift, -1), max_depth)
#
tmp0 = tf.sqrt(squared_ir_sum_acc * 9.0 - ir_sum_acc ** 2) / 9.0
edge_avg = tf.maximum( \
ir_sum_acc / 9.0, prms['edge_ab_avg_min_value']
)
tmp0 /= edge_avg
#
abs_min_diff = tf.abs(raw_depth - min_depth)
abs_max_diff = tf.abs(raw_depth - max_depth)
avg_diff = (abs_min_diff + abs_max_diff) * 0.5
max_abs_diff = tf.maximum(abs_min_diff, abs_max_diff)
cond0 = []
cond0.append(tf.cast(tf.less(0.0, raw_depth), dtype))
cond0.append(tf.cast(tf.greater_equal(tmp0, prms['edge_ab_std_dev_threshold']), dtype))
cond0.append(tf.cast(tf.less(prms['edge_close_delta_threshold'], abs_min_diff), dtype))
cond0.append(tf.cast(tf.less(prms['edge_far_delta_threshold'], abs_max_diff), dtype))
cond0.append(tf.cast(tf.less(prms['edge_max_delta_threshold'], max_abs_diff), dtype))
cond0.append(tf.cast(tf.less(prms['edge_avg_delta_threshold'], avg_diff), dtype))
cond0 = tf.reduce_prod(tf.stack(cond0, -1), -1)
depth_out = (1 - cond0) * raw_depth
# !cond0 part
edge_test_msk = 1 - tf.cast(tf.equal(raw_depth_edge, 0.0), dtype)
depth_out = raw_depth * (1 - cond0) * edge_test_msk
# mask out the depth out of the range
depth_out = depth_out * depth_msk
# mask out the edge
depth_out = edge_msk * raw_depth + (1 - edge_msk) * depth_out
# msk_out
msk_out = edge_msk + (1 - edge_msk) * depth_msk * (1 - cond0) * edge_test_msk
return depth_out, msk_out | UTF-8 | Python | false | false | 18,323 | py | 15 | kinect_pipeline.py | 14 | 0.54407 | 0.491459 | 0 | 490 | 36.395918 | 113 |
phanrahan/loam | 2,585,570,338,936 | 41a68bead09b92b4d046b152fc2385d7726e04eb | f28dd5d5dd4a966cfb981075c59023e737085f42 | /tests/test_mantle/mothball/arith/_fulladder.py | 6492acc68a01bcc9b7bf0628e08736967934c99a | [
"MIT"
] | permissive | https://github.com/phanrahan/loam | 91ec366a3652363207f42b2b5f04dad693669d6a | 10b08bd622b7cfd63eabaec4729f6238e4521b30 | refs/heads/master | 2021-03-24T14:01:40.877743 | 2019-10-07T18:15:09 | 2019-10-07T18:15:09 | 84,332,116 | 16 | 3 | NOASSERTION | false | 2019-12-30T08:24:19 | 2017-03-08T14:55:39 | 2019-11-09T00:45:40 | 2019-10-07T18:15:26 | 12,323 | 8 | 3 | 6 | Verilog | false | false | import sys
from magma import *
from mantle import *
from loam.shields.megawing import MegaWing
megawing = MegaWing()
megawing.Switch.on(3)
megawing.LED.on(2)
main = megawing.main()
fa = FullAdder(2, A0^A1,A0&A1)
fa( main.SWITCH[0], main.SWITCH[1] )
wire( main.SWITCH[2], fa.CIN )
wire( fa.O, main.LED[0] )
wire( fa.COUT, main.LED[1] )
compile(sys.argv[1], main)
| UTF-8 | Python | false | false | 370 | py | 507 | _fulladder.py | 279 | 0.691892 | 0.656757 | 0 | 20 | 17.4 | 42 |
LaGvidilo/ALIEN | 6,399,501,281,834 | 8d0763d87eb5f80fd6ea71f38f8bef19fa74d3bc | 9f65e7373ab61b456f68cf3bdc01dfb0e5634802 | /dico/DICO20.py | 9decdbf74b275e6d54b00743d1f3f0ccb2324db3 | [] | no_license | https://github.com/LaGvidilo/ALIEN | a8b43a78eff4a7079be2823a65846df6c70f5e90 | a4ffc4c0f1aed33ee42a8f581729579b43143e6e | refs/heads/master | 2016-08-03T17:23:41.376795 | 2015-09-15T03:15:15 | 2015-09-15T03:15:15 | 42,492,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
def get_seed(data):
if data==chr(203)+chr(85)+chr(166):
return chr(9)+chr(10)+chr(1)+chr(1)
if data==chr(9)+chr(10)+chr(1)+chr(1):
return chr(203)+chr(85)+chr(166)
if data==chr(0)+chr(187)+chr(84):
return chr(10)+chr(10)+chr(1)+chr(1)
if data==chr(10)+chr(10)+chr(1)+chr(1):
return chr(0)+chr(187)+chr(84)
if data==chr(203)+chr(108)+chr(24):
return chr(11)+chr(10)+chr(1)+chr(1)
if data==chr(11)+chr(10)+chr(1)+chr(1):
return chr(203)+chr(108)+chr(24)
if data==chr(111)+chr(75)+chr(109):
return chr(12)+chr(10)+chr(1)+chr(1)
if data==chr(12)+chr(10)+chr(1)+chr(1):
return chr(111)+chr(75)+chr(109)
if data==chr(80)+chr(112)+chr(232):
return chr(13)+chr(10)+chr(1)+chr(1)
if data==chr(13)+chr(10)+chr(1)+chr(1):
return chr(80)+chr(112)+chr(232)
if data==chr(218)+chr(19)+chr(32):
return chr(14)+chr(10)+chr(1)+chr(1)
if data==chr(14)+chr(10)+chr(1)+chr(1):
return chr(218)+chr(19)+chr(32)
if data==chr(138)+chr(235)+chr(238):
return chr(15)+chr(10)+chr(1)+chr(1)
if data==chr(15)+chr(10)+chr(1)+chr(1):
return chr(138)+chr(235)+chr(238)
if data==chr(15)+chr(251)+chr(203):
return chr(0)+chr(11)+chr(1)+chr(1)
if data==chr(0)+chr(11)+chr(1)+chr(1):
return chr(15)+chr(251)+chr(203)
if data==chr(155)+chr(120)+chr(173):
return chr(1)+chr(11)+chr(1)+chr(1)
if data==chr(1)+chr(11)+chr(1)+chr(1):
return chr(155)+chr(120)+chr(173)
if data==chr(78)+chr(183)+chr(230):
return chr(2)+chr(11)+chr(1)+chr(1)
if data==chr(2)+chr(11)+chr(1)+chr(1):
return chr(78)+chr(183)+chr(230)
if data==chr(57)+chr(88)+chr(7):
return chr(3)+chr(11)+chr(1)+chr(1)
if data==chr(3)+chr(11)+chr(1)+chr(1):
return chr(57)+chr(88)+chr(7)
if data==chr(22)+chr(239)+chr(33):
return chr(4)+chr(11)+chr(1)+chr(1)
if data==chr(4)+chr(11)+chr(1)+chr(1):
return chr(22)+chr(239)+chr(33)
if data==chr(37)+chr(31)+chr(121):
return chr(5)+chr(11)+chr(1)+chr(1)
if data==chr(5)+chr(11)+chr(1)+chr(1):
return chr(37)+chr(31)+chr(121)
if data==chr(18)+chr(247)+chr(104):
return chr(6)+chr(11)+chr(1)+chr(1)
if data==chr(6)+chr(11)+chr(1)+chr(1):
return chr(18)+chr(247)+chr(104)
if data==chr(44)+chr(117)+chr(132):
return chr(7)+chr(11)+chr(1)+chr(1)
if data==chr(7)+chr(11)+chr(1)+chr(1):
return chr(44)+chr(117)+chr(132)
if data==chr(143)+chr(175)+chr(253):
return chr(8)+chr(11)+chr(1)+chr(1)
if data==chr(8)+chr(11)+chr(1)+chr(1):
return chr(143)+chr(175)+chr(253)
if data==chr(63)+chr(53)+chr(213):
return chr(9)+chr(11)+chr(1)+chr(1)
if data==chr(9)+chr(11)+chr(1)+chr(1):
return chr(63)+chr(53)+chr(213)
if data==chr(51)+chr(118)+chr(195):
return chr(10)+chr(11)+chr(1)+chr(1)
if data==chr(10)+chr(11)+chr(1)+chr(1):
return chr(51)+chr(118)+chr(195)
if data==chr(109)+chr(23)+chr(214):
return chr(11)+chr(11)+chr(1)+chr(1)
if data==chr(11)+chr(11)+chr(1)+chr(1):
return chr(109)+chr(23)+chr(214)
if data==chr(185)+chr(209)+chr(65):
return chr(12)+chr(11)+chr(1)+chr(1)
if data==chr(12)+chr(11)+chr(1)+chr(1):
return chr(185)+chr(209)+chr(65)
if data==chr(177)+chr(174)+chr(104):
return chr(13)+chr(11)+chr(1)+chr(1)
if data==chr(13)+chr(11)+chr(1)+chr(1):
return chr(177)+chr(174)+chr(104)
if data==chr(182)+chr(239)+chr(45):
return chr(14)+chr(11)+chr(1)+chr(1)
if data==chr(14)+chr(11)+chr(1)+chr(1):
return chr(182)+chr(239)+chr(45)
if data==chr(48)+chr(48)+chr(151):
return chr(15)+chr(11)+chr(1)+chr(1)
if data==chr(15)+chr(11)+chr(1)+chr(1):
return chr(48)+chr(48)+chr(151)
if data==chr(66)+chr(249)+chr(30):
return chr(0)+chr(12)+chr(1)+chr(1)
if data==chr(0)+chr(12)+chr(1)+chr(1):
return chr(66)+chr(249)+chr(30)
if data==chr(191)+chr(56)+chr(206):
return chr(1)+chr(12)+chr(1)+chr(1)
if data==chr(1)+chr(12)+chr(1)+chr(1):
return chr(191)+chr(56)+chr(206)
if data==chr(82)+chr(155)+chr(136):
return chr(2)+chr(12)+chr(1)+chr(1)
if data==chr(2)+chr(12)+chr(1)+chr(1):
return chr(82)+chr(155)+chr(136)
if data==chr(89)+chr(133)+chr(246):
return chr(3)+chr(12)+chr(1)+chr(1)
if data==chr(3)+chr(12)+chr(1)+chr(1):
return chr(89)+chr(133)+chr(246)
if data==chr(167)+chr(245)+chr(52):
return chr(4)+chr(12)+chr(1)+chr(1)
if data==chr(4)+chr(12)+chr(1)+chr(1):
return chr(167)+chr(245)+chr(52)
if data==chr(249)+chr(36)+chr(66):
return chr(5)+chr(12)+chr(1)+chr(1)
if data==chr(5)+chr(12)+chr(1)+chr(1):
return chr(249)+chr(36)+chr(66)
if data==chr(93)+chr(15)+chr(2):
return chr(6)+chr(12)+chr(1)+chr(1)
if data==chr(6)+chr(12)+chr(1)+chr(1):
return chr(93)+chr(15)+chr(2)
if data==chr(156)+chr(196)+chr(218):
return chr(7)+chr(12)+chr(1)+chr(1)
if data==chr(7)+chr(12)+chr(1)+chr(1):
return chr(156)+chr(196)+chr(218)
if data==chr(247)+chr(60)+chr(191):
return chr(8)+chr(12)+chr(1)+chr(1)
if data==chr(8)+chr(12)+chr(1)+chr(1):
return chr(247)+chr(60)+chr(191)
if data==chr(99)+chr(100)+chr(39):
return chr(9)+chr(12)+chr(1)+chr(1)
if data==chr(9)+chr(12)+chr(1)+chr(1):
return chr(99)+chr(100)+chr(39)
if data==chr(153)+chr(168)+chr(242):
return chr(10)+chr(12)+chr(1)+chr(1)
if data==chr(10)+chr(12)+chr(1)+chr(1):
return chr(153)+chr(168)+chr(242)
if data==chr(6)+chr(215)+chr(132):
return chr(11)+chr(12)+chr(1)+chr(1)
if data==chr(11)+chr(12)+chr(1)+chr(1):
return chr(6)+chr(215)+chr(132)
if data==chr(146)+chr(233)+chr(165):
return chr(12)+chr(12)+chr(1)+chr(1)
if data==chr(12)+chr(12)+chr(1)+chr(1):
return chr(146)+chr(233)+chr(165)
if data==chr(137)+chr(46)+chr(229):
return chr(13)+chr(12)+chr(1)+chr(1)
if data==chr(13)+chr(12)+chr(1)+chr(1):
return chr(137)+chr(46)+chr(229)
if data==chr(198)+chr(167)+chr(87):
return chr(14)+chr(12)+chr(1)+chr(1)
if data==chr(14)+chr(12)+chr(1)+chr(1):
return chr(198)+chr(167)+chr(87)
if data==chr(21)+chr(211)+chr(36):
return chr(15)+chr(12)+chr(1)+chr(1)
if data==chr(15)+chr(12)+chr(1)+chr(1):
return chr(21)+chr(211)+chr(36)
if data==chr(103)+chr(72)+chr(196):
return chr(0)+chr(13)+chr(1)+chr(1)
if data==chr(0)+chr(13)+chr(1)+chr(1):
return chr(103)+chr(72)+chr(196)
if data==chr(191)+chr(129)+chr(139):
return chr(1)+chr(13)+chr(1)+chr(1)
if data==chr(1)+chr(13)+chr(1)+chr(1):
return chr(191)+chr(129)+chr(139)
if data==chr(72)+chr(111)+chr(90):
return chr(2)+chr(13)+chr(1)+chr(1)
if data==chr(2)+chr(13)+chr(1)+chr(1):
return chr(72)+chr(111)+chr(90)
if data==chr(60)+chr(134)+chr(130):
return chr(3)+chr(13)+chr(1)+chr(1)
if data==chr(3)+chr(13)+chr(1)+chr(1):
return chr(60)+chr(134)+chr(130)
if data==chr(54)+chr(70)+chr(169):
return chr(4)+chr(13)+chr(1)+chr(1)
if data==chr(4)+chr(13)+chr(1)+chr(1):
return chr(54)+chr(70)+chr(169)
if data==chr(125)+chr(166)+chr(226):
return chr(5)+chr(13)+chr(1)+chr(1)
if data==chr(5)+chr(13)+chr(1)+chr(1):
return chr(125)+chr(166)+chr(226)
if data==chr(170)+chr(167)+chr(20):
return chr(6)+chr(13)+chr(1)+chr(1)
if data==chr(6)+chr(13)+chr(1)+chr(1):
return chr(170)+chr(167)+chr(20)
if data==chr(103)+chr(228)+chr(180):
return chr(7)+chr(13)+chr(1)+chr(1)
if data==chr(7)+chr(13)+chr(1)+chr(1):
return chr(103)+chr(228)+chr(180)
if data==chr(97)+chr(164)+chr(83):
return chr(8)+chr(13)+chr(1)+chr(1)
if data==chr(8)+chr(13)+chr(1)+chr(1):
return chr(97)+chr(164)+chr(83)
if data==chr(100)+chr(47)+chr(114):
return chr(9)+chr(13)+chr(1)+chr(1)
if data==chr(9)+chr(13)+chr(1)+chr(1):
return chr(100)+chr(47)+chr(114)
if data==chr(114)+chr(141)+chr(7):
return chr(10)+chr(13)+chr(1)+chr(1)
if data==chr(10)+chr(13)+chr(1)+chr(1):
return chr(114)+chr(141)+chr(7)
if data==chr(82)+chr(71)+chr(145):
return chr(11)+chr(13)+chr(1)+chr(1)
if data==chr(11)+chr(13)+chr(1)+chr(1):
return chr(82)+chr(71)+chr(145)
if data==chr(159)+chr(4)+chr(132):
return chr(12)+chr(13)+chr(1)+chr(1)
if data==chr(12)+chr(13)+chr(1)+chr(1):
return chr(159)+chr(4)+chr(132)
if data==chr(39)+chr(197)+chr(26):
return chr(13)+chr(13)+chr(1)+chr(1)
if data==chr(13)+chr(13)+chr(1)+chr(1):
return chr(39)+chr(197)+chr(26)
if data==chr(136)+chr(9)+chr(55):
return chr(14)+chr(13)+chr(1)+chr(1)
if data==chr(14)+chr(13)+chr(1)+chr(1):
return chr(136)+chr(9)+chr(55)
if data==chr(46)+chr(92)+chr(30):
return chr(15)+chr(13)+chr(1)+chr(1)
if data==chr(15)+chr(13)+chr(1)+chr(1):
return chr(46)+chr(92)+chr(30)
if data==chr(175)+chr(212)+chr(142):
return chr(0)+chr(14)+chr(1)+chr(1)
if data==chr(0)+chr(14)+chr(1)+chr(1):
return chr(175)+chr(212)+chr(142)
if data==chr(154)+chr(48)+chr(5):
return chr(1)+chr(14)+chr(1)+chr(1)
if data==chr(1)+chr(14)+chr(1)+chr(1):
return chr(154)+chr(48)+chr(5)
if data==chr(41)+chr(120)+chr(190):
return chr(2)+chr(14)+chr(1)+chr(1)
if data==chr(2)+chr(14)+chr(1)+chr(1):
return chr(41)+chr(120)+chr(190)
if data==chr(86)+chr(68)+chr(232):
return chr(3)+chr(14)+chr(1)+chr(1)
if data==chr(3)+chr(14)+chr(1)+chr(1):
return chr(86)+chr(68)+chr(232)
if data==chr(61)+chr(155)+chr(13):
return chr(4)+chr(14)+chr(1)+chr(1)
if data==chr(4)+chr(14)+chr(1)+chr(1):
return chr(61)+chr(155)+chr(13)
if data==chr(4)+chr(64)+chr(41):
return chr(5)+chr(14)+chr(1)+chr(1)
if data==chr(5)+chr(14)+chr(1)+chr(1):
return chr(4)+chr(64)+chr(41)
if data==chr(60)+chr(121)+chr(33):
return chr(6)+chr(14)+chr(1)+chr(1)
if data==chr(6)+chr(14)+chr(1)+chr(1):
return chr(60)+chr(121)+chr(33)
if data==chr(202)+chr(228)+chr(70):
return chr(7)+chr(14)+chr(1)+chr(1)
if data==chr(7)+chr(14)+chr(1)+chr(1):
return chr(202)+chr(228)+chr(70)
if data==chr(30)+chr(53)+chr(164):
return chr(8)+chr(14)+chr(1)+chr(1)
if data==chr(8)+chr(14)+chr(1)+chr(1):
return chr(30)+chr(53)+chr(164)
if data==chr(105)+chr(65)+chr(227):
return chr(9)+chr(14)+chr(1)+chr(1)
if data==chr(9)+chr(14)+chr(1)+chr(1):
return chr(105)+chr(65)+chr(227)
if data==chr(46)+chr(243)+chr(216):
return chr(10)+chr(14)+chr(1)+chr(1)
if data==chr(10)+chr(14)+chr(1)+chr(1):
return chr(46)+chr(243)+chr(216)
if data==chr(102)+chr(46)+chr(130):
return chr(11)+chr(14)+chr(1)+chr(1)
if data==chr(11)+chr(14)+chr(1)+chr(1):
return chr(102)+chr(46)+chr(130)
if data==chr(123)+chr(119)+chr(205):
return chr(12)+chr(14)+chr(1)+chr(1)
if data==chr(12)+chr(14)+chr(1)+chr(1):
return chr(123)+chr(119)+chr(205)
if data==chr(61)+chr(94)+chr(207):
return chr(13)+chr(14)+chr(1)+chr(1)
if data==chr(13)+chr(14)+chr(1)+chr(1):
return chr(61)+chr(94)+chr(207)
if data==chr(192)+chr(237)+chr(76):
return chr(14)+chr(14)+chr(1)+chr(1)
if data==chr(14)+chr(14)+chr(1)+chr(1):
return chr(192)+chr(237)+chr(76)
if data==chr(40)+chr(232)+chr(30):
return chr(15)+chr(14)+chr(1)+chr(1)
if data==chr(15)+chr(14)+chr(1)+chr(1):
return chr(40)+chr(232)+chr(30)
if data==chr(211)+chr(54)+chr(151):
return chr(0)+chr(15)+chr(1)+chr(1)
if data==chr(0)+chr(15)+chr(1)+chr(1):
return chr(211)+chr(54)+chr(151)
if data==chr(154)+chr(195)+chr(185):
return chr(1)+chr(15)+chr(1)+chr(1)
if data==chr(1)+chr(15)+chr(1)+chr(1):
return chr(154)+chr(195)+chr(185)
if data==chr(125)+chr(212)+chr(49):
return chr(2)+chr(15)+chr(1)+chr(1)
if data==chr(2)+chr(15)+chr(1)+chr(1):
return chr(125)+chr(212)+chr(49)
if data==chr(156)+chr(116)+chr(96):
return chr(3)+chr(15)+chr(1)+chr(1)
if data==chr(3)+chr(15)+chr(1)+chr(1):
return chr(156)+chr(116)+chr(96)
if data==chr(171)+chr(234)+chr(243):
return chr(4)+chr(15)+chr(1)+chr(1)
if data==chr(4)+chr(15)+chr(1)+chr(1):
return chr(171)+chr(234)+chr(243)
if data==chr(246)+chr(110)+chr(90):
return chr(5)+chr(15)+chr(1)+chr(1)
if data==chr(5)+chr(15)+chr(1)+chr(1):
return chr(246)+chr(110)+chr(90)
if data==chr(127)+chr(57)+chr(155):
return chr(6)+chr(15)+chr(1)+chr(1)
if data==chr(6)+chr(15)+chr(1)+chr(1):
return chr(127)+chr(57)+chr(155)
if data==chr(20)+chr(94)+chr(79):
return chr(7)+chr(15)+chr(1)+chr(1)
if data==chr(7)+chr(15)+chr(1)+chr(1):
return chr(20)+chr(94)+chr(79)
if data==chr(83)+chr(28)+chr(191):
return chr(8)+chr(15)+chr(1)+chr(1)
if data==chr(8)+chr(15)+chr(1)+chr(1):
return chr(83)+chr(28)+chr(191)
if data==chr(42)+chr(138)+chr(73):
return chr(9)+chr(15)+chr(1)+chr(1)
if data==chr(9)+chr(15)+chr(1)+chr(1):
return chr(42)+chr(138)+chr(73)
if data==chr(244)+chr(122)+chr(5):
return chr(10)+chr(15)+chr(1)+chr(1)
if data==chr(10)+chr(15)+chr(1)+chr(1):
return chr(244)+chr(122)+chr(5)
if data==chr(101)+chr(244)+chr(75):
return chr(11)+chr(15)+chr(1)+chr(1)
if data==chr(11)+chr(15)+chr(1)+chr(1):
return chr(101)+chr(244)+chr(75)
if data==chr(19)+chr(254)+chr(92):
return chr(12)+chr(15)+chr(1)+chr(1)
if data==chr(12)+chr(15)+chr(1)+chr(1):
return chr(19)+chr(254)+chr(92)
if data==chr(192)+chr(3)+chr(110):
return chr(13)+chr(15)+chr(1)+chr(1)
if data==chr(13)+chr(15)+chr(1)+chr(1):
return chr(192)+chr(3)+chr(110)
if data==chr(158)+chr(99)+chr(192):
return chr(14)+chr(15)+chr(1)+chr(1)
if data==chr(14)+chr(15)+chr(1)+chr(1):
return chr(158)+chr(99)+chr(192)
if data==chr(133)+chr(122)+chr(254):
return chr(15)+chr(15)+chr(1)+chr(1)
if data==chr(15)+chr(15)+chr(1)+chr(1):
return chr(133)+chr(122)+chr(254)
if data==chr(86)+chr(217)+chr(211):
return chr(0)+chr(0)+chr(2)+chr(1)
if data==chr(0)+chr(0)+chr(2)+chr(1):
return chr(86)+chr(217)+chr(211)
if data==chr(48)+chr(115)+chr(155):
return chr(1)+chr(0)+chr(2)+chr(1)
if data==chr(1)+chr(0)+chr(2)+chr(1):
return chr(48)+chr(115)+chr(155)
if data==chr(162)+chr(114)+chr(220):
return chr(2)+chr(0)+chr(2)+chr(1)
if data==chr(2)+chr(0)+chr(2)+chr(1):
return chr(162)+chr(114)+chr(220)
if data==chr(54)+chr(20)+chr(7):
return chr(3)+chr(0)+chr(2)+chr(1)
if data==chr(3)+chr(0)+chr(2)+chr(1):
return chr(54)+chr(20)+chr(7)
if data==chr(147)+chr(237)+chr(232):
return chr(4)+chr(0)+chr(2)+chr(1)
if data==chr(4)+chr(0)+chr(2)+chr(1):
return chr(147)+chr(237)+chr(232)
if data==chr(217)+chr(114)+chr(47):
return chr(5)+chr(0)+chr(2)+chr(1)
if data==chr(5)+chr(0)+chr(2)+chr(1):
return chr(217)+chr(114)+chr(47)
if data==chr(253)+chr(62)+chr(72):
return chr(6)+chr(0)+chr(2)+chr(1)
if data==chr(6)+chr(0)+chr(2)+chr(1):
return chr(253)+chr(62)+chr(72)
if data==chr(58)+chr(123)+chr(148):
return chr(7)+chr(0)+chr(2)+chr(1)
if data==chr(7)+chr(0)+chr(2)+chr(1):
return chr(58)+chr(123)+chr(148)
if data==chr(34)+chr(7)+chr(194):
return chr(8)+chr(0)+chr(2)+chr(1)
if data==chr(8)+chr(0)+chr(2)+chr(1):
return chr(34)+chr(7)+chr(194)
if data==chr(146)+chr(10)+chr(77):
return chr(9)+chr(0)+chr(2)+chr(1)
if data==chr(9)+chr(0)+chr(2)+chr(1):
return chr(146)+chr(10)+chr(77)
if data==chr(182)+chr(226)+chr(185):
return chr(10)+chr(0)+chr(2)+chr(1)
if data==chr(10)+chr(0)+chr(2)+chr(1):
return chr(182)+chr(226)+chr(185)
if data==chr(64)+chr(143)+chr(233):
return chr(11)+chr(0)+chr(2)+chr(1)
if data==chr(11)+chr(0)+chr(2)+chr(1):
return chr(64)+chr(143)+chr(233)
if data==chr(153)+chr(66)+chr(196):
return chr(12)+chr(0)+chr(2)+chr(1)
if data==chr(12)+chr(0)+chr(2)+chr(1):
return chr(153)+chr(66)+chr(196)
if data==chr(0)+chr(36)+chr(177):
return chr(13)+chr(0)+chr(2)+chr(1)
if data==chr(13)+chr(0)+chr(2)+chr(1):
return chr(0)+chr(36)+chr(177)
if data==chr(214)+chr(10)+chr(16):
return chr(14)+chr(0)+chr(2)+chr(1)
if data==chr(14)+chr(0)+chr(2)+chr(1):
return chr(214)+chr(10)+chr(16)
if data==chr(67)+chr(101)+chr(160):
return chr(15)+chr(0)+chr(2)+chr(1)
if data==chr(15)+chr(0)+chr(2)+chr(1):
return chr(67)+chr(101)+chr(160)
if data==chr(15)+chr(19)+chr(66):
return chr(0)+chr(1)+chr(2)+chr(1)
if data==chr(0)+chr(1)+chr(2)+chr(1):
return chr(15)+chr(19)+chr(66)
if data==chr(111)+chr(11)+chr(60):
return chr(1)+chr(1)+chr(2)+chr(1)
if data==chr(1)+chr(1)+chr(2)+chr(1):
return chr(111)+chr(11)+chr(60)
if data==chr(34)+chr(163)+chr(230):
return chr(2)+chr(1)+chr(2)+chr(1)
if data==chr(2)+chr(1)+chr(2)+chr(1):
return chr(34)+chr(163)+chr(230)
if data==chr(210)+chr(240)+chr(114):
return chr(3)+chr(1)+chr(2)+chr(1)
if data==chr(3)+chr(1)+chr(2)+chr(1):
return chr(210)+chr(240)+chr(114)
if data==chr(85)+chr(9)+chr(44):
return chr(4)+chr(1)+chr(2)+chr(1)
if data==chr(4)+chr(1)+chr(2)+chr(1):
return chr(85)+chr(9)+chr(44)
if data==chr(208)+chr(80)+chr(225):
return chr(5)+chr(1)+chr(2)+chr(1)
if data==chr(5)+chr(1)+chr(2)+chr(1):
return chr(208)+chr(80)+chr(225)
if data==chr(199)+chr(95)+chr(55):
return chr(6)+chr(1)+chr(2)+chr(1)
if data==chr(6)+chr(1)+chr(2)+chr(1):
return chr(199)+chr(95)+chr(55)
if data==chr(137)+chr(43)+chr(222):
return chr(7)+chr(1)+chr(2)+chr(1)
if data==chr(7)+chr(1)+chr(2)+chr(1):
return chr(137)+chr(43)+chr(222)
if data==chr(155)+chr(147)+chr(174):
return chr(8)+chr(1)+chr(2)+chr(1)
if data==chr(8)+chr(1)+chr(2)+chr(1):
return chr(155)+chr(147)+chr(174)
if data==chr(137)+chr(0)+chr(88):
return chr(9)+chr(1)+chr(2)+chr(1)
if data==chr(9)+chr(1)+chr(2)+chr(1):
return chr(137)+chr(0)+chr(88)
if data==chr(31)+chr(114)+chr(141):
return chr(10)+chr(1)+chr(2)+chr(1)
if data==chr(10)+chr(1)+chr(2)+chr(1):
return chr(31)+chr(114)+chr(141)
if data==chr(171)+chr(41)+chr(103):
return chr(11)+chr(1)+chr(2)+chr(1)
if data==chr(11)+chr(1)+chr(2)+chr(1):
return chr(171)+chr(41)+chr(103)
if data==chr(9)+chr(170)+chr(21):
return chr(12)+chr(1)+chr(2)+chr(1)
if data==chr(12)+chr(1)+chr(2)+chr(1):
return chr(9)+chr(170)+chr(21)
if data==chr(97)+chr(57)+chr(196):
return chr(13)+chr(1)+chr(2)+chr(1)
if data==chr(13)+chr(1)+chr(2)+chr(1):
return chr(97)+chr(57)+chr(196)
if data==chr(245)+chr(36)+chr(102):
return chr(14)+chr(1)+chr(2)+chr(1)
if data==chr(14)+chr(1)+chr(2)+chr(1):
return chr(245)+chr(36)+chr(102)
if data==chr(135)+chr(191)+chr(201):
return chr(15)+chr(1)+chr(2)+chr(1)
if data==chr(15)+chr(1)+chr(2)+chr(1):
return chr(135)+chr(191)+chr(201)
if data==chr(63)+chr(59)+chr(15):
return chr(0)+chr(2)+chr(2)+chr(1)
if data==chr(0)+chr(2)+chr(2)+chr(1):
return chr(63)+chr(59)+chr(15)
if data==chr(235)+chr(230)+chr(231):
return chr(1)+chr(2)+chr(2)+chr(1)
if data==chr(1)+chr(2)+chr(2)+chr(1):
return chr(235)+chr(230)+chr(231)
if data==chr(132)+chr(100)+chr(186):
return chr(2)+chr(2)+chr(2)+chr(1)
if data==chr(2)+chr(2)+chr(2)+chr(1):
return chr(132)+chr(100)+chr(186)
if data==chr(143)+chr(26)+chr(91):
return chr(3)+chr(2)+chr(2)+chr(1)
if data==chr(3)+chr(2)+chr(2)+chr(1):
return chr(143)+chr(26)+chr(91)
if data==chr(236)+chr(199)+chr(133):
return chr(4)+chr(2)+chr(2)+chr(1)
if data==chr(4)+chr(2)+chr(2)+chr(1):
return chr(236)+chr(199)+chr(133)
if data==chr(84)+chr(229)+chr(131):
return chr(5)+chr(2)+chr(2)+chr(1)
if data==chr(5)+chr(2)+chr(2)+chr(1):
return chr(84)+chr(229)+chr(131)
if data==chr(185)+chr(121)+chr(13):
return chr(6)+chr(2)+chr(2)+chr(1)
if data==chr(6)+chr(2)+chr(2)+chr(1):
return chr(185)+chr(121)+chr(13)
if data==chr(150)+chr(111)+chr(220):
return chr(7)+chr(2)+chr(2)+chr(1)
if data==chr(7)+chr(2)+chr(2)+chr(1):
return chr(150)+chr(111)+chr(220)
if data==chr(43)+chr(147)+chr(152):
return chr(8)+chr(2)+chr(2)+chr(1)
if data==chr(8)+chr(2)+chr(2)+chr(1):
return chr(43)+chr(147)+chr(152)
if data==chr(215)+chr(43)+chr(164):
return chr(9)+chr(2)+chr(2)+chr(1)
if data==chr(9)+chr(2)+chr(2)+chr(1):
return chr(215)+chr(43)+chr(164)
if data==chr(84)+chr(166)+chr(48):
return chr(10)+chr(2)+chr(2)+chr(1)
if data==chr(10)+chr(2)+chr(2)+chr(1):
return chr(84)+chr(166)+chr(48)
if data==chr(11)+chr(62)+chr(11):
return chr(11)+chr(2)+chr(2)+chr(1)
if data==chr(11)+chr(2)+chr(2)+chr(1):
return chr(11)+chr(62)+chr(11)
if data==chr(98)+chr(66)+chr(112):
return chr(12)+chr(2)+chr(2)+chr(1)
if data==chr(12)+chr(2)+chr(2)+chr(1):
return chr(98)+chr(66)+chr(112)
if data==chr(168)+chr(97)+chr(33):
return chr(13)+chr(2)+chr(2)+chr(1)
if data==chr(13)+chr(2)+chr(2)+chr(1):
return chr(168)+chr(97)+chr(33)
if data==chr(186)+chr(51)+chr(78):
return chr(14)+chr(2)+chr(2)+chr(1)
if data==chr(14)+chr(2)+chr(2)+chr(1):
return chr(186)+chr(51)+chr(78)
if data==chr(78)+chr(28)+chr(114):
return chr(15)+chr(2)+chr(2)+chr(1)
if data==chr(15)+chr(2)+chr(2)+chr(1):
return chr(78)+chr(28)+chr(114)
if data==chr(108)+chr(168)+chr(14):
return chr(0)+chr(3)+chr(2)+chr(1)
if data==chr(0)+chr(3)+chr(2)+chr(1):
return chr(108)+chr(168)+chr(14)
if data==chr(174)+chr(13)+chr(113):
return chr(1)+chr(3)+chr(2)+chr(1)
if data==chr(1)+chr(3)+chr(2)+chr(1):
return chr(174)+chr(13)+chr(113)
if data==chr(80)+chr(135)+chr(252):
return chr(2)+chr(3)+chr(2)+chr(1)
if data==chr(2)+chr(3)+chr(2)+chr(1):
return chr(80)+chr(135)+chr(252)
if data==chr(142)+chr(103)+chr(189):
return chr(3)+chr(3)+chr(2)+chr(1)
if data==chr(3)+chr(3)+chr(2)+chr(1):
return chr(142)+chr(103)+chr(189)
if data==chr(97)+chr(156)+chr(237):
return chr(4)+chr(3)+chr(2)+chr(1)
if data==chr(4)+chr(3)+chr(2)+chr(1):
return chr(97)+chr(156)+chr(237)
if data==chr(18)+chr(214)+chr(237):
return chr(5)+chr(3)+chr(2)+chr(1)
if data==chr(5)+chr(3)+chr(2)+chr(1):
return chr(18)+chr(214)+chr(237)
if data==chr(60)+chr(91)+chr(149):
return chr(6)+chr(3)+chr(2)+chr(1)
if data==chr(6)+chr(3)+chr(2)+chr(1):
return chr(60)+chr(91)+chr(149)
if data==chr(71)+chr(244)+chr(147):
return chr(7)+chr(3)+chr(2)+chr(1)
if data==chr(7)+chr(3)+chr(2)+chr(1):
return chr(71)+chr(244)+chr(147)
if data==chr(157)+chr(91)+chr(120):
return chr(8)+chr(3)+chr(2)+chr(1)
if data==chr(8)+chr(3)+chr(2)+chr(1):
return chr(157)+chr(91)+chr(120)
if data==chr(220)+chr(251)+chr(220):
return chr(9)+chr(3)+chr(2)+chr(1)
if data==chr(9)+chr(3)+chr(2)+chr(1):
return chr(220)+chr(251)+chr(220)
if data==chr(56)+chr(144)+chr(194):
return chr(10)+chr(3)+chr(2)+chr(1)
if data==chr(10)+chr(3)+chr(2)+chr(1):
return chr(56)+chr(144)+chr(194)
if data==chr(251)+chr(44)+chr(105):
return chr(11)+chr(3)+chr(2)+chr(1)
if data==chr(11)+chr(3)+chr(2)+chr(1):
return chr(251)+chr(44)+chr(105)
if data==chr(197)+chr(177)+chr(157):
return chr(12)+chr(3)+chr(2)+chr(1)
if data==chr(12)+chr(3)+chr(2)+chr(1):
return chr(197)+chr(177)+chr(157)
if data==chr(216)+chr(114)+chr(253):
return chr(13)+chr(3)+chr(2)+chr(1)
if data==chr(13)+chr(3)+chr(2)+chr(1):
return chr(216)+chr(114)+chr(253)
if data==chr(35)+chr(169)+chr(199):
return chr(14)+chr(3)+chr(2)+chr(1)
if data==chr(14)+chr(3)+chr(2)+chr(1):
return chr(35)+chr(169)+chr(199)
if data==chr(58)+chr(157)+chr(228):
return chr(15)+chr(3)+chr(2)+chr(1)
if data==chr(15)+chr(3)+chr(2)+chr(1):
return chr(58)+chr(157)+chr(228)
if data==chr(92)+chr(82)+chr(7):
return chr(0)+chr(4)+chr(2)+chr(1)
if data==chr(0)+chr(4)+chr(2)+chr(1):
return chr(92)+chr(82)+chr(7)
if data==chr(103)+chr(190)+chr(137):
return chr(1)+chr(4)+chr(2)+chr(1)
if data==chr(1)+chr(4)+chr(2)+chr(1):
return chr(103)+chr(190)+chr(137)
if data==chr(161)+chr(151)+chr(36):
return chr(2)+chr(4)+chr(2)+chr(1)
if data==chr(2)+chr(4)+chr(2)+chr(1):
return chr(161)+chr(151)+chr(36)
if data==chr(187)+chr(236)+chr(71):
return chr(3)+chr(4)+chr(2)+chr(1)
if data==chr(3)+chr(4)+chr(2)+chr(1):
return chr(187)+chr(236)+chr(71)
if data==chr(70)+chr(250)+chr(193):
return chr(4)+chr(4)+chr(2)+chr(1)
if data==chr(4)+chr(4)+chr(2)+chr(1):
return chr(70)+chr(250)+chr(193)
if data==chr(95)+chr(133)+chr(227):
return chr(5)+chr(4)+chr(2)+chr(1)
if data==chr(5)+chr(4)+chr(2)+chr(1):
return chr(95)+chr(133)+chr(227)
if data==chr(251)+chr(2)+chr(20):
return chr(6)+chr(4)+chr(2)+chr(1)
if data==chr(6)+chr(4)+chr(2)+chr(1):
return chr(251)+chr(2)+chr(20)
if data==chr(123)+chr(64)+chr(22):
return chr(7)+chr(4)+chr(2)+chr(1)
if data==chr(7)+chr(4)+chr(2)+chr(1):
return chr(123)+chr(64)+chr(22)
if data==chr(98)+chr(188)+chr(210):
return chr(8)+chr(4)+chr(2)+chr(1)
if data==chr(8)+chr(4)+chr(2)+chr(1):
return chr(98)+chr(188)+chr(210)
if data==chr(12)+chr(223)+chr(222):
return chr(9)+chr(4)+chr(2)+chr(1)
if data==chr(9)+chr(4)+chr(2)+chr(1):
return chr(12)+chr(223)+chr(222)
if data==chr(115)+chr(18)+chr(95):
return chr(10)+chr(4)+chr(2)+chr(1)
if data==chr(10)+chr(4)+chr(2)+chr(1):
return chr(115)+chr(18)+chr(95)
if data==chr(253)+chr(147)+chr(95):
return chr(11)+chr(4)+chr(2)+chr(1)
if data==chr(11)+chr(4)+chr(2)+chr(1):
return chr(253)+chr(147)+chr(95)
if data==chr(245)+chr(183)+chr(149):
return chr(12)+chr(4)+chr(2)+chr(1)
if data==chr(12)+chr(4)+chr(2)+chr(1):
return chr(245)+chr(183)+chr(149)
if data==chr(235)+chr(56)+chr(174):
return chr(13)+chr(4)+chr(2)+chr(1)
if data==chr(13)+chr(4)+chr(2)+chr(1):
return chr(235)+chr(56)+chr(174)
if data==chr(77)+chr(186)+chr(132):
return chr(14)+chr(4)+chr(2)+chr(1)
if data==chr(14)+chr(4)+chr(2)+chr(1):
return chr(77)+chr(186)+chr(132)
if data==chr(236)+chr(71)+chr(57):
return chr(15)+chr(4)+chr(2)+chr(1)
if data==chr(15)+chr(4)+chr(2)+chr(1):
return chr(236)+chr(71)+chr(57)
if data==chr(139)+chr(85)+chr(117):
return chr(0)+chr(5)+chr(2)+chr(1)
if data==chr(0)+chr(5)+chr(2)+chr(1):
return chr(139)+chr(85)+chr(117)
if data==chr(226)+chr(231)+chr(131):
return chr(1)+chr(5)+chr(2)+chr(1)
if data==chr(1)+chr(5)+chr(2)+chr(1):
return chr(226)+chr(231)+chr(131)
if data==chr(85)+chr(246)+chr(90):
return chr(2)+chr(5)+chr(2)+chr(1)
if data==chr(2)+chr(5)+chr(2)+chr(1):
return chr(85)+chr(246)+chr(90)
if data==chr(29)+chr(117)+chr(11):
return chr(3)+chr(5)+chr(2)+chr(1)
if data==chr(3)+chr(5)+chr(2)+chr(1):
return chr(29)+chr(117)+chr(11)
if data==chr(79)+chr(133)+chr(69):
return chr(4)+chr(5)+chr(2)+chr(1)
if data==chr(4)+chr(5)+chr(2)+chr(1):
return chr(79)+chr(133)+chr(69)
if data==chr(44)+chr(73)+chr(92):
return chr(5)+chr(5)+chr(2)+chr(1)
if data==chr(5)+chr(5)+chr(2)+chr(1):
return chr(44)+chr(73)+chr(92)
if data==chr(223)+chr(155)+chr(192):
return chr(6)+chr(5)+chr(2)+chr(1)
if data==chr(6)+chr(5)+chr(2)+chr(1):
return chr(223)+chr(155)+chr(192)
if data==chr(174)+chr(215)+chr(103):
return chr(7)+chr(5)+chr(2)+chr(1)
if data==chr(7)+chr(5)+chr(2)+chr(1):
return chr(174)+chr(215)+chr(103)
if data==chr(246)+chr(88)+chr(135):
return chr(8)+chr(5)+chr(2)+chr(1)
if data==chr(8)+chr(5)+chr(2)+chr(1):
return chr(246)+chr(88)+chr(135)
if data==chr(37)+chr(190)+chr(58):
return chr(9)+chr(5)+chr(2)+chr(1)
if data==chr(9)+chr(5)+chr(2)+chr(1):
return chr(37)+chr(190)+chr(58)
if data==chr(250)+chr(4)+chr(100):
return chr(10)+chr(5)+chr(2)+chr(1)
if data==chr(10)+chr(5)+chr(2)+chr(1):
return chr(250)+chr(4)+chr(100)
if data==chr(91)+chr(15)+chr(72):
return chr(11)+chr(5)+chr(2)+chr(1)
if data==chr(11)+chr(5)+chr(2)+chr(1):
return chr(91)+chr(15)+chr(72)
if data==chr(86)+chr(253)+chr(30):
return chr(12)+chr(5)+chr(2)+chr(1)
if data==chr(12)+chr(5)+chr(2)+chr(1):
return chr(86)+chr(253)+chr(30)
if data==chr(103)+chr(255)+chr(116):
return chr(13)+chr(5)+chr(2)+chr(1)
if data==chr(13)+chr(5)+chr(2)+chr(1):
return chr(103)+chr(255)+chr(116)
if data==chr(95)+chr(105)+chr(237):
return chr(14)+chr(5)+chr(2)+chr(1)
if data==chr(14)+chr(5)+chr(2)+chr(1):
return chr(95)+chr(105)+chr(237)
if data==chr(197)+chr(151)+chr(211):
return chr(15)+chr(5)+chr(2)+chr(1)
if data==chr(15)+chr(5)+chr(2)+chr(1):
return chr(197)+chr(151)+chr(211)
if data==chr(177)+chr(253)+chr(247):
return chr(0)+chr(6)+chr(2)+chr(1)
if data==chr(0)+chr(6)+chr(2)+chr(1):
return chr(177)+chr(253)+chr(247)
if data==chr(160)+chr(213)+chr(185):
return chr(1)+chr(6)+chr(2)+chr(1)
if data==chr(1)+chr(6)+chr(2)+chr(1):
return chr(160)+chr(213)+chr(185)
if data==chr(229)+chr(129)+chr(34):
return chr(2)+chr(6)+chr(2)+chr(1)
if data==chr(2)+chr(6)+chr(2)+chr(1):
return chr(229)+chr(129)+chr(34)
if data==chr(216)+chr(42)+chr(111):
return chr(3)+chr(6)+chr(2)+chr(1)
if data==chr(3)+chr(6)+chr(2)+chr(1):
return chr(216)+chr(42)+chr(111)
if data==chr(80)+chr(142)+chr(11):
return chr(4)+chr(6)+chr(2)+chr(1)
if data==chr(4)+chr(6)+chr(2)+chr(1):
return chr(80)+chr(142)+chr(11)
if data==chr(130)+chr(101)+chr(218):
return chr(5)+chr(6)+chr(2)+chr(1)
if data==chr(5)+chr(6)+chr(2)+chr(1):
return chr(130)+chr(101)+chr(218)
if data==chr(21)+chr(150)+chr(100):
return chr(6)+chr(6)+chr(2)+chr(1)
if data==chr(6)+chr(6)+chr(2)+chr(1):
return chr(21)+chr(150)+chr(100)
if data==chr(94)+chr(130)+chr(178):
return chr(7)+chr(6)+chr(2)+chr(1)
if data==chr(7)+chr(6)+chr(2)+chr(1):
return chr(94)+chr(130)+chr(178)
if data==chr(218)+chr(163)+chr(173):
return chr(8)+chr(6)+chr(2)+chr(1)
if data==chr(8)+chr(6)+chr(2)+chr(1):
return chr(218)+chr(163)+chr(173)
if data==chr(90)+chr(216)+chr(62):
return chr(9)+chr(6)+chr(2)+chr(1)
if data==chr(9)+chr(6)+chr(2)+chr(1):
return chr(90)+chr(216)+chr(62)
if data==chr(180)+chr(14)+chr(251):
return chr(10)+chr(6)+chr(2)+chr(1)
if data==chr(10)+chr(6)+chr(2)+chr(1):
return chr(180)+chr(14)+chr(251)
if data==chr(72)+chr(140)+chr(178):
return chr(11)+chr(6)+chr(2)+chr(1)
if data==chr(11)+chr(6)+chr(2)+chr(1):
return chr(72)+chr(140)+chr(178)
if data==chr(230)+chr(27)+chr(145):
return chr(12)+chr(6)+chr(2)+chr(1)
if data==chr(12)+chr(6)+chr(2)+chr(1):
return chr(230)+chr(27)+chr(145)
if data==chr(109)+chr(180)+chr(67):
return chr(13)+chr(6)+chr(2)+chr(1)
if data==chr(13)+chr(6)+chr(2)+chr(1):
return chr(109)+chr(180)+chr(67)
if data==chr(25)+chr(141)+chr(83):
return chr(14)+chr(6)+chr(2)+chr(1)
if data==chr(14)+chr(6)+chr(2)+chr(1):
return chr(25)+chr(141)+chr(83)
if data==chr(107)+chr(113)+chr(160):
return chr(15)+chr(6)+chr(2)+chr(1)
if data==chr(15)+chr(6)+chr(2)+chr(1):
return chr(107)+chr(113)+chr(160)
if data==chr(143)+chr(53)+chr(48):
return chr(0)+chr(7)+chr(2)+chr(1)
if data==chr(0)+chr(7)+chr(2)+chr(1):
return chr(143)+chr(53)+chr(48)
if data==chr(125)+chr(73)+chr(30):
return chr(1)+chr(7)+chr(2)+chr(1)
if data==chr(1)+chr(7)+chr(2)+chr(1):
return chr(125)+chr(73)+chr(30)
if data==chr(78)+chr(76)+chr(242):
return chr(2)+chr(7)+chr(2)+chr(1)
if data==chr(2)+chr(7)+chr(2)+chr(1):
return chr(78)+chr(76)+chr(242)
if data==chr(183)+chr(246)+chr(240):
return chr(3)+chr(7)+chr(2)+chr(1)
if data==chr(3)+chr(7)+chr(2)+chr(1):
return chr(183)+chr(246)+chr(240)
if data==chr(89)+chr(75)+chr(114):
return chr(4)+chr(7)+chr(2)+chr(1)
if data==chr(4)+chr(7)+chr(2)+chr(1):
return chr(89)+chr(75)+chr(114)
if data==chr(132)+chr(5)+chr(245):
return chr(5)+chr(7)+chr(2)+chr(1)
if data==chr(5)+chr(7)+chr(2)+chr(1):
return chr(132)+chr(5)+chr(245)
if data==chr(253)+chr(97)+chr(198):
return chr(6)+chr(7)+chr(2)+chr(1)
if data==chr(6)+chr(7)+chr(2)+chr(1):
return chr(253)+chr(97)+chr(198)
if data==chr(75)+chr(83)+chr(89):
return chr(7)+chr(7)+chr(2)+chr(1)
if data==chr(7)+chr(7)+chr(2)+chr(1):
return chr(75)+chr(83)+chr(89)
if data==chr(153)+chr(11)+chr(43):
return chr(8)+chr(7)+chr(2)+chr(1)
if data==chr(8)+chr(7)+chr(2)+chr(1):
return chr(153)+chr(11)+chr(43)
if data==chr(15)+chr(118)+chr(155):
return chr(9)+chr(7)+chr(2)+chr(1)
if data==chr(9)+chr(7)+chr(2)+chr(1):
return chr(15)+chr(118)+chr(155)
if data==chr(164)+chr(21)+chr(26):
return chr(10)+chr(7)+chr(2)+chr(1)
if data==chr(10)+chr(7)+chr(2)+chr(1):
return chr(164)+chr(21)+chr(26)
if data==chr(241)+chr(236)+chr(136):
return chr(11)+chr(7)+chr(2)+chr(1)
if data==chr(11)+chr(7)+chr(2)+chr(1):
return chr(241)+chr(236)+chr(136)
if data==chr(95)+chr(118)+chr(24):
return chr(12)+chr(7)+chr(2)+chr(1)
if data==chr(12)+chr(7)+chr(2)+chr(1):
return chr(95)+chr(118)+chr(24)
if data==chr(32)+chr(104)+chr(78):
return chr(13)+chr(7)+chr(2)+chr(1)
if data==chr(13)+chr(7)+chr(2)+chr(1):
return chr(32)+chr(104)+chr(78)
if data==chr(63)+chr(40)+chr(57):
return chr(14)+chr(7)+chr(2)+chr(1)
if data==chr(14)+chr(7)+chr(2)+chr(1):
return chr(63)+chr(40)+chr(57)
if data==chr(195)+chr(3)+chr(23):
return chr(15)+chr(7)+chr(2)+chr(1)
if data==chr(15)+chr(7)+chr(2)+chr(1):
return chr(195)+chr(3)+chr(23)
if data==chr(159)+chr(237)+chr(74):
return chr(0)+chr(8)+chr(2)+chr(1)
if data==chr(0)+chr(8)+chr(2)+chr(1):
return chr(159)+chr(237)+chr(74)
if data==chr(249)+chr(120)+chr(160):
return chr(1)+chr(8)+chr(2)+chr(1)
if data==chr(1)+chr(8)+chr(2)+chr(1):
return chr(249)+chr(120)+chr(160)
if data==chr(98)+chr(176)+chr(126):
return chr(2)+chr(8)+chr(2)+chr(1)
if data==chr(2)+chr(8)+chr(2)+chr(1):
return chr(98)+chr(176)+chr(126)
if data==chr(52)+chr(76)+chr(19):
return chr(3)+chr(8)+chr(2)+chr(1)
if data==chr(3)+chr(8)+chr(2)+chr(1):
return chr(52)+chr(76)+chr(19)
if data==chr(199)+chr(15)+chr(53):
return chr(4)+chr(8)+chr(2)+chr(1)
if data==chr(4)+chr(8)+chr(2)+chr(1):
return chr(199)+chr(15)+chr(53)
if data==chr(208)+chr(131)+chr(253):
return chr(5)+chr(8)+chr(2)+chr(1)
if data==chr(5)+chr(8)+chr(2)+chr(1):
return chr(208)+chr(131)+chr(253)
if data==chr(132)+chr(1)+chr(171):
return chr(6)+chr(8)+chr(2)+chr(1)
if data==chr(6)+chr(8)+chr(2)+chr(1):
return chr(132)+chr(1)+chr(171)
if data==chr(8)+chr(197)+chr(222):
return chr(7)+chr(8)+chr(2)+chr(1)
if data==chr(7)+chr(8)+chr(2)+chr(1):
return chr(8)+chr(197)+chr(222)
if data==chr(228)+chr(63)+chr(188):
return chr(8)+chr(8)+chr(2)+chr(1)
if data==chr(8)+chr(8)+chr(2)+chr(1):
return chr(228)+chr(63)+chr(188)
if data==chr(168)+chr(139)+chr(173):
return chr(9)+chr(8)+chr(2)+chr(1)
if data==chr(9)+chr(8)+chr(2)+chr(1):
return chr(168)+chr(139)+chr(173)
if data==chr(124)+chr(210)+chr(104):
return chr(10)+chr(8)+chr(2)+chr(1)
if data==chr(10)+chr(8)+chr(2)+chr(1):
return chr(124)+chr(210)+chr(104)
else: return'-'
| UTF-8 | Python | false | false | 33,705 | py | 257 | DICO20.py | 257 | 0.610295 | 0.443525 | 0 | 907 | 36.16097 | 40 |
kalthommusa/100DaysOfCode | 16,552,803,994,497 | 762b7d086f440e19513f0bd3fba2940106c45f12 | 7cc342310bac85d2600d9f3246a3d6b62ae40c3d | /Day72_mysql_orderby.py | 2e919304f5f9379c797926c97ec178d10a37e4e6 | [] | no_license | https://github.com/kalthommusa/100DaysOfCode | 52b1111a81671b68e8ddb8806bb22fec58355f11 | 3b7b1e036aaeaee7243e4dabfbd360d3deb8d6dd | refs/heads/master | 2020-07-07T05:51:28.971917 | 2019-11-12T20:11:13 | 2019-11-12T20:11:13 | 203,270,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mysql.connector
#create a connection to MySQL server
mydb=mysql.connector.connect(
host="127.0.0.1",
user="root",
passwd="pass111",
database="mydatabase"
)
mycursor=mydb.cursor()
#Select all the records and sort the result alphabetically by name (ascending by default)
sql="SELECT * FROM customers ORDER BY name"
"""
Select all the records and sort the result alphabetically by name (descending)
sql="SELECT * FROM customers ORDER BY name DESC"
"""
mycursor.execute(sql)
myresult=mycursor.fetchall()
for i in myresult:
print(i) | UTF-8 | Python | false | false | 546 | py | 89 | Day72_mysql_orderby.py | 86 | 0.754579 | 0.738095 | 0 | 20 | 26.35 | 90 |
NAH8/dmpipe | 12,343,736,018,176 | bc698692c90c259ee91c104e5573fa66cb69c908 | cfd310aaad4d40c3689c28672baabe11ec783d19 | /setup.py | bdcea345c03dc51d59f9f59a64d7d0e6ce66e429 | [
"BSD-3-Clause"
] | permissive | https://github.com/NAH8/dmpipe | e7fda3531f87c10ac3cf091d99cc91bebd17f5f3 | 041aca0a778d5b31adc770e53ce571ac5e6e8ec8 | refs/heads/master | 2021-05-15T10:21:14.990087 | 2017-06-27T01:58:12 | 2017-06-27T01:58:12 | 108,181,123 | 0 | 0 | null | true | 2017-10-24T20:41:05 | 2017-10-24T20:41:05 | 2017-05-16T22:18:30 | 2017-06-27T01:58:51 | 101 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
setup(
name='dmpipe',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Matthew Wood',
author_email='mdwood@slac.stanford.edu',
description='Pipeline Scripts for LAT DM Analysis',
license='BSD',
packages=find_packages(),
include_package_data=True,
url="https://github.com/fermiPy/dmpipe",
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Astronomy',
'Development Status :: 4 - Beta',
],
scripts=[],
entry_points={'console_scripts': [
'dmpipe-analyze-roi = dmpipe.target_analysis:main_roi_single',
'dmpipe-analyze-sed = dmpipe.target_analysis:main_sed_single',
'dmpipe-prepare-targets = dmpipe.target_analysis:main_prepare_targets',
'dmpipe-analyze-roi-sg = dmpipe.target_analysis:main_roi_batch',
'dmpipe-analyze-sed-sg = dmpipe.target_analysis:main_sed_batch',
'dmpipe-convert-castro = dmpipe.dm_spectral:main_convert_single',
'dmpipe-convert-castro-sg = dmpipe.dm_spectral:main_convert_batch',
'dmpipe-stack-likelihood = dmpipe.dm_spectral:main_stack_likelihood',
'dmpipe-spec-table = dmpipe.dm_spectral:main_spec_table',
'dmpipe-pipeline-dsph = dmpipe.pipeline_dsph:main_chain',
'dmpipe-plot-dm = dmpipe.scripts.plot_castro_dm:main_single',
'dmpipe-plot-dm-sg = dmpipe.scripts.plot_castro_dm:main_batch',
'dmpipe-plot-castro = dmpipe.scripts.plot_castro:main_single',
'dmpipe-plot-castro-sg = dmpipe.scripts.plot_castro:main_batch',
]},
install_requires=[
'numpy >= 1.6.1',
'astropy >= 1.2.1',
'matplotlib >= 1.5.0',
'scipy >= 0.14',
'fermipy >= 0.14.0',
'pyyaml',
'healpy',
'dmsky'
],
extras_require=dict(
all=[],
),
)
| UTF-8 | Python | false | false | 2,229 | py | 8 | setup.py | 8 | 0.628084 | 0.619112 | 0 | 56 | 38.803571 | 79 |
lianglin74/CoOp | 6,399,501,290,664 | b48cfdbf949e235d16cb897e952509857fbe1869 | 9cdc5ba730439cc1a2e5bddfc7d1d34cdd17e5bc | /src/qd/evaluate/evaluate_openimages_google.py | 25ae4fe2900efdcca22a1d4d2c603f9277de53b6 | [] | no_license | https://github.com/lianglin74/CoOp | dff26848a5cee1ac3388c6c1a0abdfd73052d9ff | 1b0b82bcea1dea9bc91935cab480d218f2aea8b0 | refs/heads/master | 2023-08-11T07:13:20.706047 | 2021-09-08T22:01:08 | 2021-09-08T22:01:40 | 406,508,593 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import os.path as op
import json
from qd.tsv_io import load_list_file
from qd.tsv_io import TSVFile, tsv_reader
from qd.evaluate.oid_hierarchical_labels_expansion_tsv import expand_labels
import numpy as np
import argparse
import time
import logging
def parallel_oi5c_to_submit_csv(pred_tsv, submit_csv, num_decimal=4,
min_conf=0.):
from qd.tsv_io import TSVDataset
dataset = TSVDataset('OpenImageV5C')
name_to_cid = {name: cid for cid, name in tsv_reader(op.join(dataset._data_root,
'cid_to_name.tsv'))}
name_to_cid['Studio couch'] = name_to_cid['studio couch']
hw_tsv = dataset.get_data('test', t='hw')
from qd.process_tsv import parallel_multi_tsv_process
parallel_multi_tsv_process(lambda x: oi5c_to_submit_csv_row_processor(x,
num_decimal=num_decimal,
name_to_cid=name_to_cid,
min_conf=min_conf),
[pred_tsv, hw_tsv],
submit_csv,
num_process=200,
out_sep=',',
head=('ImageId', 'PredictionString'))
def oi5c_to_submit_csv_row_processor(row, num_decimal, name_to_cid, min_conf=0.):
(key, str_rects), (key_hw, str_hw) = row
rects = json.loads(str_rects)
h, w = map(float, str_hw.split(' '))
float_pattern = '{:.' + str(num_decimal) + 'f}'
pred = []
for r in rects:
if r['conf'] < min_conf:
continue
pred.append(name_to_cid[r['class']])
pred.append(float_pattern.format(r['conf']))
x1, y1, x2, y2 = r['rect']
pred.extend([float_pattern.format(x1 / w),
float_pattern.format(y1 / h),
float_pattern.format(x2 / w),
float_pattern.format(y2 / h)])
return key, ' '.join(pred)
def load_truths_label(tsv_file, imagelabel_tsv_file, all_idx, label):
tsv = TSVFile(tsv_file)
imagelabel_tsv = TSVFile(imagelabel_tsv_file)
key_to_info = {}
for idx in all_idx:
row = tsv.seek(idx)
key = row[0]
imagelabel_key, str_imagelabel = imagelabel_tsv.seek(idx)
assert imagelabel_key == row[0]
img_label = json.loads(str_imagelabel)
box_label = json.loads(row[1])
if img_label==[] and box_label==[]:
continue
for obj in img_label:
if obj['conf'] == 0: # negative labels
if obj['class'] != label:
continue
if key not in key_to_info:
key_to_info[key] = -1
else:
assert key_to_info[key] == -1
for obj in box_label:
if obj['class'] != label:
continue
if key not in key_to_info:
key_to_info[key] = []
key_to_info[key].append((int(obj['IsGroupOf']), obj['rect']))
return key_to_info
def load_truths(tsv_file, imagelabel_tsv_file, shuf_file=None):
""" Load ground-truth annotations in both image-level and box-level.
Args:
tsv_file: A string of the tsv file with all ground-truth annotations.
The format of each row is as follows:
row[0]: image key
row[1]: a json string of image-level positive/negative labels
row[2]: a json string of box-level annotations for positive labels
shuf_file: A string of the shuffle file with a list of indexes.
This feature enables to evaluate on a subset of images in tsv_file.
Assert:
Check if there is conflict labels. Eg., a class is labeled as negative while
there is also box-level labels.
Returns:
gt_dict: A dictionary of all annotations.
It is grouped by class label first and then by image key.
For positive label, gt_dict[label][key] is a list of tuple with (IsGroupOf, rect).
For negative label, gt_dict[label][key] = -1
"""
tsv = TSVFile(tsv_file)
imagelabel_tsv = TSVFile(imagelabel_tsv_file)
if shuf_file is None:
shuf_list = [_ for _ in range(tsv.num_rows())]
else:
shuf_list = [int(x) for x in load_list_file(shuf_file)]
gt_dict = {}
for idx in shuf_list:
row = tsv.seek(idx)
key = row[0]
imagelabel_key, str_imagelabel = imagelabel_tsv.seek(idx)
assert imagelabel_key == row[0]
img_label = json.loads(str_imagelabel)
box_label = json.loads(row[1])
if img_label==[] and box_label==[]:
continue
for obj in img_label:
if obj['conf'] == 0: # negative labels
label = obj['class']
if label not in gt_dict:
gt_dict[label] = {}
if key not in gt_dict[label]:
gt_dict[label][key] = -1
else:
assert gt_dict[label][key] == -1
for obj in box_label:
label = obj['class']
if label not in gt_dict:
gt_dict[label] = {}
if key not in gt_dict[label]:
gt_dict[label][key] = []
gt_dict[label][key] += [(int(obj['IsGroupOf']), obj['rect'])]
return gt_dict
def load_dets_label(tsv_file, all_idx, label):
key_to_info = {}
from tqdm import tqdm
tsv = TSVFile(tsv_file)
for i in tqdm(all_idx):
row = tsv[i]
key = row[0]
rects = json.loads(row[1])
for obj in rects:
if obj['class'] != label:
continue
if is_valid_rect(obj['rect']):
if key not in key_to_info:
key_to_info[key] = []
key_to_info[key].append((obj['conf'], obj['rect']))
for key in key_to_info:
key_to_info[key] = sorted(key_to_info[key], key=lambda x:-x[0])
return key_to_info
def load_dets(tsv_file, truths):
""" Load detection results.
Args:
tsv_file: A string of the tsv file with all detection results.
The format of each row is as follows:
row[0]: image key
row[1]: a json string of bounding box predictions (including class, confidence, rect)
truths: A dictionary of ground-truth annotations.
Detections of unknown classes are not recorded.
Returns:
det_dict: A dictionary of detections of all classes in truths.
Any invalid detections are removed (i.e., x2<x1, y2<y1).
It is grouped by class label first and then by image key.
det_dict[label][key] is a list of tuple with (confidence, rect)
and it is sorted by confidence (high to low).
"""
det_dict = {cls:{} for cls in truths}
from tqdm import tqdm
for i, row in tqdm(enumerate(tsv_reader(tsv_file))):
key = row[0]
rects = json.loads(row[1])
for obj in rects:
label = obj['class']
if label in det_dict:
if is_valid_rect(obj['rect']):
if key not in det_dict[label]:
det_dict[label][key] = []
det_dict[label][key] += [(obj['conf'], obj['rect'])]
for label in det_dict:
for key in det_dict[label]:
det_dict[label][key] = sorted(det_dict[label][key], key=lambda x:-x[0])
return det_dict
def is_valid_rect(rc):
# a rect is valid if x2 > x1 and y2 > y1
return rc[2] > rc[0] and rc[3] > rc[1]
def rect_area(rc):
return (float(rc[2]) - rc[0]) * (rc[3] - rc[1])
def IoU(rc1, rc2):
rc_inter = [max(rc1[0], rc2[0]), max(rc1[1], rc2[1]), min(rc1[2], rc2[2]), min(rc1[3], rc2[3])]
if is_valid_rect(rc_inter):
return rect_area(rc_inter) / (rect_area(rc1) + rect_area(rc2) - rect_area(rc_inter))
else:
return 0
def IoA(rc1, rc2):
""" Intersection over smaller box area, used in group-of box evaluation.
Args:
rc1: A list of the smaller box coordinates in xyxy mode
rc2: A list of the group box coordinates in xyxy mode
Returns:
ioa: A float number of ioa score = intersection(rc1, rc2) / area(rc1)
"""
rc_inter = [max(rc1[0], rc2[0]), max(rc1[1], rc2[1]), min(rc1[2], rc2[2]), min(rc1[3], rc2[3])]
if is_valid_rect(rc_inter):
return rect_area(rc_inter) / rect_area(rc1)
else:
return 0
def get_overlaps(det, gt):
""" Calculate IoU and IoA for a list of detection boxes and ground-truth boxes.
Args:
det: A list of D detection results (from det_dict[label][key])
gt: A list of G ground-truth results (from gt_dict[label][key]),
and say there are G1 group-of box and G2 non group-of box
Returns:
ious: A float numpy array (D*G1) of IoU scores between detection and non group-of ground-truth boxes
ioas: A float numpy array (D*G2) of IoA scores between detection and group-of ground-truth boxes
"""
gt_is_group = [g for g in gt if g[0]!=0]
gt_is_non_group = [g for g in gt if g[0]==0]
ious = [[IoU(d[1], g[1]) for g in gt_is_non_group] for d in det]
ioas = [[IoA(d[1], g[1]) for g in gt_is_group] for d in det]
return np.array(ious), np.array(ioas)
def eval_per_class(c_dets, c_truths, overlap_threshold=0.5, count_group_of=True):
""" Evaluation for each class.
Args:
c_dets: A dictionary of all detection results (from det_dict[label])
c_truths: A dictionary of all ground-truth annotations (from gt_dict[label])
overlap_threshold: A float indicates the threshold used in IoU and IoA matching
count_group_of: A bool indicates whether to consider group-of box or not
Returns:
scores_all: A list of numpy float array collecting the confidence scores of both
truth positives and false positives in each image(ignored detections are not included)
tp_fp_labels_all: A list of numpy float array collecting the true positives (=1)
and false positives (=0) labels in each image
num_gt_all: An integer of the total number of valid ground-truth boxes
Note: the IsGroupOf feature can be 0, 1, and -1 (unknown).
Follow Google's implementation, unknown is considered as group-of.
"""
num_gt_all = 0
for key in c_truths:
if c_truths[key] == -1:
continue # negative label does not count
is_group_of = [1 if x[0]!=0 else 0 for x in c_truths[key]]
if count_group_of:
num_gt_all += len(is_group_of)
else:
num_gt_all += sum(is_group_of)
scores_all = []
tp_fp_labels_all = []
for key in c_dets:
img_det = c_dets[key]
num_det = len(img_det)
scores = np.array([det[0] for det in img_det])
tp_fp_labels = np.zeros(num_det, dtype=float)
is_matched_to_group_of_box = np.zeros(num_det, dtype=bool)
if key not in c_truths:
continue # ignore missing labels
img_gt = c_truths[key]
if img_gt == -1:
# for negative label, all detections are fp
scores_all.append(scores)
tp_fp_labels_all.append(tp_fp_labels)
else:
######## This part is imported modified from Google's implementation ########
# The evaluation is done in two stages:
# 1. All detections are matched to non group-of boxes; true positives are
# determined and detections matched to difficult boxes are ignored.
# 2. Detections that are determined as false positives are matched against
# group-of boxes and scored with weight w per ground truth box is matched.
[ious, ioas] = get_overlaps(img_det, img_gt)
# Tp-fp evaluation for non-group of boxes (if any).
if ious.shape[1] > 0:
max_overlap_gt_ids = np.argmax(ious, axis=1)
is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool)
for i in range(num_det):
gt_id = max_overlap_gt_ids[i]
if ious[i, gt_id] >= overlap_threshold:
if not is_gt_box_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_box_detected[gt_id] = True
scores_group_of = np.zeros(ioas.shape[1], dtype=float)
tp_fp_labels_group_of = int(count_group_of) * np.ones(ioas.shape[1], dtype=float)
# Tp-fp evaluation for group of boxes.
if ioas.shape[1] > 0:
max_overlap_group_of_gt_ids = np.argmax(ioas, axis=1)
for i in range(num_det):
gt_id = max_overlap_group_of_gt_ids[i]
if not tp_fp_labels[i] and ioas[i, gt_id] >= overlap_threshold:
is_matched_to_group_of_box[i] = True
scores_group_of[gt_id] = max(scores_group_of[gt_id], scores[i])
selector = np.where((scores_group_of > 0) & (tp_fp_labels_group_of > 0))
scores_group_of = scores_group_of[selector]
tp_fp_labels_group_of = tp_fp_labels_group_of[selector]
scores_all.append(np.concatenate((scores[~is_matched_to_group_of_box], scores_group_of)))
tp_fp_labels_all.append(np.concatenate((tp_fp_labels[~is_matched_to_group_of_box], tp_fp_labels_group_of)))
######## end ########
return scores_all, tp_fp_labels_all, num_gt_all
def compute_precision_recall(scores, labels, num_gt):
assert np.sum(labels) <= num_gt, "number of true positives must be no larger than num_gt."
assert len(scores) == len(labels), "scores and labels must be of the same size."
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
tp_labels = labels[sorted_indices]
fp_labels = (tp_labels <= 0).astype(float)
cum_tp = np.cumsum(tp_labels)
cum_fp = np.cumsum(fp_labels)
precision = cum_tp.astype(float) / (cum_tp + cum_fp)
recall = cum_tp.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
if not precision.size:
return 0.0
assert len(precision) == len(recall), "precision and recall must be of the same size."
assert np.amin(precision) >= 0 and np.amax(precision) <= 1, "precision must be in the range of [0, 1]."
assert np.amin(recall) >= 0 and np.amax(recall) <= 1, "recall must be in the range of [0, 1]."
assert all(recall[i] <= recall[i+1] for i in range(len(recall)-1)), "recall must be a non-decreasing array"
rec = np.concatenate([[0], recall, [1]])
prec = np.concatenate([[0], precision, [0]])
# pre-process precision to be a non-decreasing array
for i in range(len(prec) - 2, -1, -1):
prec[i] = np.maximum(prec[i], prec[i + 1])
indices = np.where(rec[1:] != rec[:-1])[0] + 1
ap = np.sum((rec[indices] - rec[indices - 1]) * prec[indices])
return ap
def parallel_processor(task):
label = task['label']
label_order = task['label_order']
pred_file = task['pred_file']
inverted_file = task['inverted_file']
truths = task['truths']
imagelabel_truths = task['imagelabel_truths']
overlap_threshold = task['overlap_threshold']
count_group_of = task['count_group_of']
label_, str_idx = TSVFile(inverted_file)[label_order]
assert label == label_
all_idx = list(map(int, str_idx.split(' ')))
c_truths = load_truths_label(truths, imagelabel_truths, all_idx,
label)
c_dets = load_dets_label(pred_file, all_idx, label)
scores, tp_fp_labels, num_gt = eval_per_class(c_dets, c_truths, overlap_threshold, count_group_of)
if scores and num_gt:
scores = np.concatenate(scores)
tp_fp_labels = np.concatenate(tp_fp_labels)
precision, recall = compute_precision_recall(scores, tp_fp_labels, num_gt)
ap = compute_average_precision(precision, recall)
else:
ap = 0.0 # there are cases when num_gt = 0 and there are false positives.
return ap, num_gt
def create_inverted_label_to_posneg(truths, imagelabel_truths,
inverted_file):
from collections import defaultdict
class_to_idx = defaultdict(list)
for i, (key, str_rects) in enumerate(tsv_reader(truths)):
rects = json.loads(str_rects)
for c in set([r['class'] for r in rects]):
class_to_idx[c].append(i)
for i, (key, str_rects) in enumerate(tsv_reader(imagelabel_truths)):
rects = json.loads(str_rects)
for c in set([r['class'] for r in rects if r['conf'] == 0]):
class_to_idx[c].append(i)
from qd.tsv_io import tsv_writer
tsv_writer(((c, ' '.join(map(str, idx))) for c, idx in
class_to_idx.items()), inverted_file)
def parallel_evaluate(truths, imagelabel_truths, dets, shuf_file=None, expand_label_gt=False, expand_label_det=False, apply_nms_gt=False, apply_nms_det=False,
json_hierarchy_file=None, count_group_of=True, overlap_threshold=0.5, save_file=None):
if expand_label_gt:
assert json_hierarchy_file is not None, "need json hierarchy file for label expansion"
if not apply_nms_gt:
new_file = op.splitext(truths)[0] + '.expanded.tsv'
new_imagelevel_truths = op.splitext(imagelabel_truths)[0] + '.expanded.tsv'
else:
new_file = op.splitext(truths)[0] + '.expanded.nms.tsv'
new_imagelevel_truths = op.splitext(imagelabel_truths)[0] + '.expanded.nms.tsv'
if not (op.isfile(new_file) and op.isfile(new_imagelevel_truths)):
logging.info('expanding labels for ground-truth file and save to: ' + new_file)
expand_labels(truths, imagelabel_truths, json_hierarchy_file,
new_file, new_imagelevel_truths, True, apply_nms_gt)
truths = new_file
imagelabel_truths = new_imagelevel_truths
if expand_label_det:
assert json_hierarchy_file is not None, "need json hierarchy file for label expansion"
if not apply_nms_det:
new_file = op.splitext(dets)[0] + '.expanded.tsv'
else:
new_file = op.splitext(dets)[0] + '.expanded.nms.tsv'
if not op.isfile(new_file):
logging.info('expanding labels for detection file and save to: ' + new_file)
expand_labels(dets, None, json_hierarchy_file, new_file,
None, False, apply_nms_det)
dets = new_file
# create the inverted file
from qd.qd_common import hash_sha1
inverted_file = truths + '.inverted.{}'.format(hash_sha1(imagelabel_truths)) + '.tsv'
valid_labelmap = inverted_file + '.validlabel.tsv'
if not op.isfile(inverted_file):
create_inverted_label_to_posneg(truths, imagelabel_truths,
inverted_file)
from qd.qd_common import write_to_file
write_to_file('\n'.join([key for key, _ in tsv_reader(inverted_file)]),
valid_labelmap)
labelmap = load_list_file(valid_labelmap)
all_task = [{'label': l,
'label_order': i,
'truths': truths,
'imagelabel_truths': imagelabel_truths,
'pred_file': dets,
'inverted_file': inverted_file,
'count_group_of': count_group_of,
'overlap_threshold': overlap_threshold,
}
for i, l in enumerate(labelmap)]
from qd.qd_common import parallel_map
all_result = parallel_map(parallel_processor, all_task, num_worker=200,
isDebug=False)
class_ap = {}
class_num_gt = {}
for task, (ap, num_gt) in zip(all_task, all_result):
label = task['label']
class_ap[label] = ap
class_num_gt[label] = num_gt
logging.info('{} = {}'.format(label, ap))
mean_ap = sum([class_ap[cls] for cls in class_ap]) / len(class_ap)
total_gt = sum([class_num_gt[cls] for cls in class_num_gt])
return {'class_ap': class_ap,
'map': mean_ap,
'class_num_gt': class_num_gt,
'total_gt': total_gt}
def evaluate(truths, imagelabel_truths, dets, shuf_file=None, expand_label_gt=False, expand_label_det=False, apply_nms_gt=False, apply_nms_det=False,
json_hierarchy_file=None, count_group_of=True, overlap_threshold=0.5, save_file=None):
if expand_label_gt:
assert json_hierarchy_file is not None, "need json hierarchy file for label expansion"
if not apply_nms_gt:
new_file = op.splitext(truths)[0] + '.expanded.tsv'
new_imagelevel_truths = op.splitext(imagelabel_truths)[0] + '.expanded.tsv'
else:
new_file = op.splitext(truths)[0] + '.expanded.nms.tsv'
new_imagelevel_truths = op.splitext(imagelabel_truths)[0] + '.expanded.nms.tsv'
if not (op.isfile(new_file) and op.isfile(new_imagelevel_truths)):
logging.info('expanding labels for ground-truth file and save to: ' + new_file)
expand_labels(truths, imagelabel_truths, json_hierarchy_file,
new_file, new_imagelevel_truths, True, apply_nms_gt)
truths = new_file
imagelabel_truths = new_imagelevel_truths
if expand_label_det:
assert json_hierarchy_file is not None, "need json hierarchy file for label expansion"
if not apply_nms_det:
new_file = op.splitext(dets)[0] + '.expanded.tsv'
else:
new_file = op.splitext(dets)[0] + '.expanded.nms.tsv'
if not op.isfile(new_file):
logging.info('expanding labels for detection file and save to: ' + new_file)
expand_labels(dets, None, json_hierarchy_file, new_file,
None, False, apply_nms_det)
dets = new_file
truths_dict = load_truths(truths, imagelabel_truths, shuf_file)
dets_dict = load_dets(dets, truths_dict)
class_ap = {}
class_num_gt = {}
for label in sorted(truths_dict.keys()):
c_truths = truths_dict[label]
c_dets = dets_dict[label]
scores, tp_fp_labels, num_gt = eval_per_class(c_dets, c_truths, overlap_threshold, count_group_of)
if scores and num_gt:
scores = np.concatenate(scores)
tp_fp_labels = np.concatenate(tp_fp_labels)
precision, recall = compute_precision_recall(scores, tp_fp_labels, num_gt)
ap = compute_average_precision(precision, recall)
else:
ap = 0.0 # there are cases when num_gt = 0 and there are false positives.
class_ap[label] = ap
class_num_gt[label] = num_gt
logging.info('{} = {}'.format(label, ap))
if len(truths_dict) == 0:
mean_ap = 0
else:
mean_ap = sum([class_ap[cls] for cls in class_ap]) / len(truths_dict)
total_gt = sum([class_num_gt[cls] for cls in class_num_gt])
return {'class_ap': class_ap,
'map': mean_ap,
'class_num_gt': class_num_gt,
'total_gt': total_gt}
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='OpenImage Challenge Evaluation')
parser.add_argument('--truths', required=True, help='import groundtruth file')
parser.add_argument('--shuf_file', required=False, default=None,
help='shuffle list to select a subset of images')
parser.add_argument('--dets', required=True, help='import detection results')
parser.add_argument('--overlap_threshold', required=False, type=float, default=0.5,
help='IoU overlap threshold, default=0.5')
parser.add_argument('--count_group_of', required=False, type=bool, default=True,
help='include group-of box evaluation or ignore default=True')
parser.add_argument('--expand_label_gt', required=False, default=False, action='store_true',
help='whether to expand labels for gt annotations or not default=False')
parser.add_argument('--expand_label_det', required=False, default=False, action='store_true',
help='whether to expand labels for detection annotations or not default=False')
parser.add_argument('--apply_nms_gt', required=False, default=False, action='store_true',
help='whether to apply nms after gt label expansion, default=False')
parser.add_argument('--apply_nms_det', required=False, default=False, action='store_true',
help='whether to apply nms after det label expansion, default=True')
parser.add_argument('--json_hierarchy_file', required=False, type=str, default=None,
help='json file used for label expansion default=None')
parser.add_argument('--save_file', required=False, type=str, default=None,
help='filename to save evaluation results (class AP)')
args = parser.parse_args()
return args
if __name__ == '__main__':
"""
This script is for OpenImage Detection (OID) Challenge evaluation.
It follows Google's official implementation to produce the same result,
but it is much faster compared to Google's evaluation (2 mins vs. 50 mins
not consider label expansion).
However, there are three known issues with Google's implementation.
1) Box area calculation: should +1 for height and width computation
2) Duplicate label expansion: child class is expanded to the same parent
more than once. (eg. Shrimp->Shellfish->seafood, Shrimp->Shellfish->..->Animal)
3) Dense box matching: not considered (should follow COCO's evaluation)
For example, we should not assign a detection to be false positive immediately
when it is matched to an already matched ground-truth box. Instead, we should
look at the next largest iou score to see if it can be matched to another
un-matched ground-truth box. This is helpful for crowd scenes like human crowd.
(we observe a 1.5 AP improvement on "Person" class after turn on this feature)
A few other notes:
1) The IsGroupOf attribute can be 0, 1 and -1 (unknown). There are 1,166 boxes
are unknown in the validation set, which are considered as group-of boxes.
"""
start = time.time()
args = parse_args()
evaluate(**vars(args))
end = time.time()
print('Elapsed time: ' + str(end - start))
| UTF-8 | Python | false | false | 27,090 | py | 370 | evaluate_openimages_google.py | 291 | 0.579919 | 0.571724 | 0 | 585 | 44.307692 | 158 |
Throupy/Discord-Bot | 1,176,821,078,397 | 52abbc33d244aa0bab5d77c760dd4a2b19523b11 | 37bc1e6300f61702975366b17141864519c87505 | /cogs/main.py | 7dd4d39f1d84b9a737441a24f0534997eb585716 | [] | no_license | https://github.com/Throupy/Discord-Bot | 9b8a5f4506e8eae6b4587146b8a0d4ea1f74264e | 8778b019585b8106008822dd0924c6e59e93dcac | refs/heads/master | 2020-05-05T05:26:40.626234 | 2019-04-17T21:24:20 | 2019-04-17T21:24:20 | 179,752,527 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Cog for main commands."""
import aiohttp
import datetime
import random
from dateutil.relativedelta import relativedelta
import discord
from bs4 import BeautifulSoup
from discord.ext import commands
from utils.consts import Consts
from utils.embedgenerator import Embed
class MainCog(commands.Cog):
"""Main cog."""
CONSTS = Consts()
def __init__(self, bot):
"""Initialize the cog."""
print("Main cog Initialized")
self.bot = bot
@commands.command()
@commands.cooldown(1, 1, commands.BucketType.user)
async def roll(self, ctx, max: int = None):
"""Run when the roll command is called."""
if not type(max) == int or not max or max < 0:
return await ctx.channel.send("Please do ~roll <max(int)>")
number = random.randint(1, max)
return await ctx.channel.send(f"`You rolled a {number}`")
@commands.command(aliases=['cls'])
@commands.cooldown(1, 15, commands.BucketType.user)
async def clear(self, ctx, number: int = 10):
"""Clear messages."""
print("Clear called")
if "administrator" in [y.name.lower() for y in ctx.author.roles]:
if number > 0 and number < 75:
deleted = await ctx.channel.purge(limit=number + 1)
await ctx.channel.send(
f"Deleted {len(deleted) - 1} messages")
return await ctx.channel.send(
"You can specify a number of messages (1 - 75)")
return await ctx.channel.send(
"You don't have the `administrator` role!")
@commands.command(aliases=['who'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def whois(self, ctx, member: discord.Member = None):
"""Run when the whois command is called."""
if member is None:
return await ctx.channel.send("Please do: ~whois @user")
embed = Embed([("Display Username", f":name_badge: {member.name}"),
("Status", f":o:{str(member.status)}"),
("Joined On", f":date: {member.joined_at.date()}"),
("Role(s)", f":bow:" + ''.join(
[str(role.mention) for role in member.roles[1:]]))])
embed.author = ctx.message.author
embed.thumbnail = member.avatar_url
embed = embed.generate_embed()
return await ctx.channel.send(embed=embed)
@commands.command()
@commands.cooldown(1, 120, commands.BucketType.user)
async def report(self, ctx, victim: discord.Member, reason: str):
"""Run when the report command is called."""
if victim is None or reason is None:
return await ctx.channel.send("Please do ~report <user> <reason>")
for member in ctx.guild.members:
for role in member.roles:
print(f"{role.name} - {role.id}")
# Administrator or owner
if role.id in self.CONSTS.administrators:
try:
await member.send("{} reported {} for {}".format(
ctx.author.name, victim, reason
))
# User has DMs from members disabled
except discord.errors.HTTPException:
pass
return await ctx.channel.send(":white_check_mark: Thank you")
@commands.command(aliases=['say'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def echo(self, ctx, *args):
"""Run when the echo command is called."""
if args is None:
return await ctx.channel.send("Please do ~echo <message>")
return await ctx.channel.send(f"`{' '.join([x for x in args])}`")
@commands.command(aliases=['serverinfo', 'svinfo'])
@commands.cooldown(1, 30, commands.BucketType.user)
async def server(self, ctx):
"""Run when the server command is called - gives info about server."""
guild = ctx.message.author.guild
embed = Embed([("Server Name", f":name_badge: {guild.name}"),
("Region", f":o:{guild.region}"),
("Owner", f":person_with_blond_hair: {guild.owner}"),
("Member Count", f":100: {guild.member_count}"),
("Date Created", f":date: {guild.created_at}")])
embed.thumbnail = guild.icon_url
embed = embed.generate_embed()
return await ctx.channel.send(embed=embed)
@commands.command(aliases=['depressme', 'exams'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def gcses(self, ctx):
"""Run when the gcse countdown command is called."""
today = datetime.datetime.today()
td = relativedelta(datetime.datetime(2019, 5, 13, 9, 0, 0), today)
return await ctx.channel.send(
"`{} months, {} weeks, {} days, {} hours {} minutes until GCSES!`"
.format(
td.months, td.weeks, td.days, td.hours, td.minutes
))
async def fetch(self, session, url):
"""Fetch the word of the day."""
async with session.get(url) as response:
return await response.text()
@commands.command(aliases=['word', 'wordoftheday', 'spaword'])
@commands.cooldown(1, 15, commands.BucketType.user)
async def wotd(self, ctx):
"""Run when the wotd command is called."""
async with aiohttp.ClientSession() as session:
html = await self.fetch(session, 'https://www.spanishdict.com/')
soup = BeautifulSoup(html, features="lxml")
spa = soup.find('a', {'class': 'wotd-sidebar-word'}).text
eng = soup.find('div', {'class': 'wotd-sidebar-translation'}).text
return await ctx.channel.send(f":flag_es:`{spa} - {eng}`:flag_es:")
def setup(bot):
"""Initialize and add to main script."""
bot.add_cog(MainCog(bot))
| UTF-8 | Python | false | false | 5,967 | py | 8 | main.py | 7 | 0.572314 | 0.564103 | 0 | 136 | 42.875 | 79 |
p56071078/python-practice | 14,998,025,804,731 | fc0d0df1a374c4cf6fea43fbadd7e459fd9f8960 | 2706674a5547e58ced2cf0bb330e270c1ecc731c | /HW4_for_loop.py | 81d4e58b204a7fdcc3758a55c228efff9d2d9334 | [] | no_license | https://github.com/p56071078/python-practice | 548db33ca5e1e30292ec5a510cce4ec9ef5bd330 | 165241fa81605c12cdb7a0386d7792b40929a603 | refs/heads/master | 2023-02-22T02:23:29.794974 | 2021-01-27T12:59:12 | 2021-01-27T12:59:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #4.1
a = int(input())
b = int(input())
for i in range(a,b+1):
print(i,end=' ')
#4.2
# Read an integer:
a = int(input())
b = int(input())
# Print a value:
#
if a>=b:
for i in range(a, b-1,-1):
print(i,end=' ')
else:
for i in range(a, b+1):
print(i,end=' ')
#4.3
#老師解
sum=0
for i in range(10):
sum=sum+int(input())
print(sum)
#4.4
sum=0
N=int(input())
for i in range(N):
sum=sum+int(input())
print(sum)
#4.5
sum=0
N=int(input())
for i in range(1,N+1):
sum=sum+(i**3)
print(sum)
#4.6
#階層的話起始ersult不能等於0,要設成1
result=1
n=int(input())
for i in range(1,n+1):
result=result*i
print(result)
#4.7
#判斷輸入的數字中有幾個數字是0
count=0
n=int(input())
for i in range(n):
if int(input())==0:
count=count+1
print(count)
#4.8 難
#先計算階層之後,再把階層結果相加
#n=4 -> 1!+2!+3!+4!=33
result=1
sum=0
n=int(input())
for i in range(1,n+1):
result=result*i
sum=sum+result
print(sum)
#4.9
#找不見的牌,假設有5張牌,只拿到4張,猜是哪個數字沒拿到
#先算N!,再算拿到的數字加總,相減就可以知道缺哪個數字
result=0
sum=0
N=int(input())
for i in range(1,N+1):
result += i
for i in range(1,N):
sum += int(input())
# print(result)
# print(sum)
print(result-sum)
#4.A
#巢狀迴圈
#如果想讓印出的東西在不同行的話,中間可以插入print(),就可以換行了
#ex:input(3)
#output:
#1
#12
#123
a = int(input())
for i in range(1,a+1):
for j in range(1,i+1):
print(j,end="")
print()
| UTF-8 | Python | false | false | 1,547 | py | 10 | HW4_for_loop.py | 10 | 0.611068 | 0.557288 | 0 | 95 | 12.505263 | 37 |
ParticleFilter101/Time-series-forecasting | 9,526,237,505,712 | bc92f9d79188a3d4fe2e278f5d2fadc5d7d618b3 | 3085b3992489cddb5ec256105ded0b7cdf9ec86b | /lstmty3.py | 60795e2a629d51c34b07552199d847ecc56cea83 | [] | no_license | https://github.com/ParticleFilter101/Time-series-forecasting | 81a754ae871c1321391540d74e88f253b1830afe | a9e95da89c497d3298dba65a8b7bd53b13290b0c | refs/heads/master | 2023-07-06T20:57:45.243204 | 2019-08-30T13:11:43 | 2019-08-30T13:11:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 14:38:47 2019
@author: shivambhardwaj
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import randint
import seaborn as sns # used for plot interactive graph.
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn import metrics
from sklearn.metrics import mean_squared_error,r2_score
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import itertools
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import Dropout
#loading data
data = pd.read_csv('household_power_consumption.txt', sep=';',
parse_dates={'dt' : ['Date', 'Time']}, infer_datetime_format=True,
low_memory=False, na_values=['nan','?'], index_col='dt')
## finding all columns that have nan:
droping_list_all=[]
for j in range(0,7):
if not data.iloc[:, j].notnull().all():
droping_list_all.append(j)
#print(df.iloc[:,j].unique())
droping_list_all
# filling nan with mean in any columns
for j in range(0,7):
data.iloc[:,j]=data.iloc[:,j].fillna(data.iloc[:,j].mean())
data.Global_active_power.plot()
plt.tight_layout()
plt.show()
# ploting different features sampled over the day
# specify columns to plot
cols = [0, 1, 2, 3, 5, 6]
i = 1
groups=cols
values = data.resample('D').mean().values
# plot each column
plt.figure(figsize=(15, 10))
for group in groups:
plt.subplot(len(cols), 1, i)
plt.plot(values[:, group])
plt.title(data.columns[group], y=0.75, loc='right')
i += 1
plt.show()
plt.figure()
plt.matshow(data.resample('D').mean().corr(method='spearman'),vmax=1,vmin=-1,cmap='PRGn')
plt.title('resampled over day', size=15)
plt.colorbar()
plt.margins(0.02)
plt.show
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
dff = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(dff.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(dff.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
## resampling of data over hour
data_resample = data.resample('h').mean()
data_resample.shape
values = data_resample.values
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
reframed = series_to_supervised(scaled, 1, 1)
reframed.drop(reframed.columns[[8,9,10,11,12,13]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_time = 365*24
train = values[:n_train_time, :]
test = values[n_train_time:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# We reshaped the input into the 3D format as expected by LSTMs, namely [samples, timesteps, features].
model = Sequential()
model.add(LSTM(100, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
fit_history = model.fit(train_X, train_y, epochs=20, batch_size=70,
validation_data=(test_X, test_y), verbose=2, shuffle=False)
# summarize history for loss
plt.figure()
plt.plot(fit_history.history['loss'])
plt.plot(fit_history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], 7))
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, test_X[:, -6:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((test_y, test_X[:, -6:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
aa=[x for x in range(500)]
plt.figure()
plt.plot(aa, inv_y[:500], marker='.', label="actual")
plt.plot(aa, inv_yhat[:500], 'r', label="prediction")
plt.ylabel('Global_active_power', size=15)
plt.xlabel('Time step', size=15)
plt.legend(fontsize=15)
plt.show()
| UTF-8 | Python | false | false | 5,379 | py | 3 | lstmty3.py | 2 | 0.688418 | 0.664622 | 0 | 179 | 29.044693 | 103 |
zwei2stein/mtg-commandline-tool | 2,190,433,345,208 | 6038aef09071dc7929cc20820bd2510934854b5a | 8af5ff2a64e2589bd7898a87f1cc909919782239 | /price_source/mysticshop.py | 001488df7f3e42833f77da00054b32018670cf4f | [] | no_license | https://github.com/zwei2stein/mtg-commandline-tool | b461dd44fd17722b3974bece5e23545e68f47d5c | 7ee6b211ff7d15aa4dd20231c28955ccb00166f5 | refs/heads/master | 2023-05-27T01:13:39.420872 | 2022-12-17T21:13:11 | 2022-12-17T21:13:11 | 162,145,046 | 1 | 0 | null | false | 2023-05-23T05:22:59 | 2018-12-17T14:44:35 | 2022-12-22T13:31:44 | 2023-05-23T05:22:58 | 326 | 1 | 0 | 2 | Python | false | false | import re
import requests
from price_source.priceSource import PriceSource
class MysticShop(PriceSource):
def __init__(self, base_cache_dir, clearCache, cacheTimeout, smartFlush, priority):
super().__init__(base_cache_dir, '.mysticShopCache')
self.clearCache = clearCache
self.cacheTimeout = cacheTimeout
self.smartFlush = smartFlush
self.sourceName = 'Mystic Shop'
self.supportedCurrency = 'czk'
self.priority = priority
self.baseUrl = "http://mysticshop.cz/mtgshop.php"
def fetch_card_price(self, card, page = 0, cheapestPrice = None):
response = requests.post(self.baseUrl, params={
'name': card.name,
'limit': 500,
'p': page + 1})
start = response.text.find('<tbody>')
end = response.text.find('</tbody>', start + 1)
regexRow = '<td class="detail"><b>(.+?)</b>'
names = re.findall(regexRow, response.text[start:end])
regexRow = '<td class="price2">([0-9]+),-</td>'
prices = re.findall(regexRow, response.text[start:end])
for name, price in zip(names, prices):
price = int(price)
if ((cheapestPrice == None or cheapestPrice > price) and name.lower() == card.name.lower()):
cheapestPrice = price
return cheapestPrice | UTF-8 | Python | false | false | 1,191 | py | 54 | mysticshop.py | 38 | 0.68178 | 0.674223 | 0 | 43 | 26.72093 | 95 |
westgate458/LeetCode | 8,890,582,322,253 | 90cc4a29da2fcac7cd1f5ce7c392d0ca8a94e12d | 8773e8c9b9a0a6e407f91b6f7c6321141d7e8356 | /apple/P1804_apple.py | 39be60380ea474768740062f36b0c62f59a029be | [] | no_license | https://github.com/westgate458/LeetCode | 1836bb21e8dd95386ccab390f5fd04567a429a02 | 36d7f9e967a62db77622e0888f61999d7f37579a | refs/heads/master | 2021-12-28T04:16:36.875737 | 2021-12-17T05:48:09 | 2021-12-17T05:48:09 | 152,928,584 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Trie(object):
def __init__(self):
self.d = {}
def insert(self, word):
"""
:type word: str
:rtype: None
"""
node = self.d
for c in word:
if c not in node: node[c] = {}
node = node[c]
if '@' not in node: node['@'] = 0
node['@'] += 1
if '*' not in node: node['*'] = 0
node['*'] += 1
def countWordsEqualTo(self, word):
"""
:type word: str
:rtype: int
"""
node = self.d
for c in word:
if c not in node: return 0
node = node[c]
return node.get('*', 0)
def countWordsStartingWith(self, prefix):
"""
:type prefix: str
:rtype: int
"""
node = self.d
for c in prefix:
if c not in node: return 0
node = node[c]
return node.get('@', 0)
def erase(self, word):
"""
:type word: str
:rtype: None
"""
node = self.d
for c in word:
if c not in node: return
node = node[c]
node['@'] -= 1
if '*' in node: node['*'] -= 1
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.countWordsEqualTo(word)
# param_3 = obj.countWordsStartingWith(prefix)
# obj.erase(word) | UTF-8 | Python | false | false | 1,542 | py | 640 | P1804_apple.py | 640 | 0.407263 | 0.399481 | 0 | 60 | 24.716667 | 59 |
jinjinfan/An_Introduction_to_Interactive_Programming_in_Python- | 7,456,063,227,217 | 0b872b6ffc9932da6b58aa2615a0da735b8abdb2 | 0df40e835f27acc62d50fe79b6a2407542b8d317 | /Stopwatch.py | 296aa4c531e4b236246e400fe25f54e02821cf83 | [] | no_license | https://github.com/jinjinfan/An_Introduction_to_Interactive_Programming_in_Python- | 5a1abd6c645e60eeb0aa9492ae2617ad9a67f088 | 89ee143a5f7de3eb0441bf2533d0f865a53045cc | refs/heads/master | 2021-01-16T21:01:03.765411 | 2015-06-28T15:14:01 | 2015-06-28T15:14:01 | 38,201,072 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # "Stopwatch: The Game"
import simplegui
# define global variables
time = 0
# the total number of stops
number_stops = 0
# the number of successful stops at a whole second
nmber_win = 0
# determine whether stopwatch is running
timer_running = False
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
minute = t // 600
second = (t % 600 ) // 10
second_tenth = t - minute * 600 - second * 10
if second < 10:
str_sec = "0" + str(second)
else:
str_sec = str(second)
return str(minute) + ":" + str_sec + "." + str(second_tenth)
# display the game results
def game_result():
return str(nmber_win) + " / " + str(number_stops)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
global timer_running
# set the variable to True when running the timer
timer_running = True
timer.start()
def stop():
global number_stops
global nmber_win
global timer_running
if timer_running == True:
# add the total number of stops by 1
number_stops = number_stops + 1
# when successfully stops at a whole second
if time % 10 == 0:
# add the win number by 1
nmber_win = nmber_win + 1
# set the variable to False when the stopwatch stops
timer_running = False
timer.stop()
def reset():
global time
global number_stops
global nmber_win
global timer_running
time = 0
number_stops = 0
nmber_win = 0
timer_running = False
# define event handler for timer with 0.1 sec interval
def timer():
global time
time = time + 1
# define draw handler
def display(canvas):
canvas.draw_text(format(time), (50, 90), 40, 'White')
canvas.draw_text("win / total",(135, 20),15,'Red')
canvas.draw_text(game_result(), (140, 40), 23, 'Red')
# create frame
frame = simplegui.create_frame("Stop watch", 200, 150)
label = frame.add_label("Stopwatch: The Game")
# register event handlers
frame.add_button("Start", start, 120)
frame.add_button("Stop", stop, 120)
frame.add_button("Reset", reset, 120)
frame.set_draw_handler(display)
timer = simplegui.create_timer(100, timer)
# start frame
frame.start()
| UTF-8 | Python | false | false | 2,388 | py | 7 | Stopwatch.py | 6 | 0.611809 | 0.582496 | 0 | 84 | 26.404762 | 70 |
chaitanyak52/fairing | 5,729,486,409,668 | b5f2f9e6357928a68f8e067e743e35bbfdd67d58 | 1d4dbd4bf687c3b7e12a329a16eb600f8cbab72b | /fairing/deployers/gcp/gcp.py | 9f8d8c18db35e0538d100f7034587d7ed87e2459 | [
"Apache-2.0"
] | permissive | https://github.com/chaitanyak52/fairing | 1d9075fcf42d5305d04f6a34d85e2a5cce2ec322 | bf7930f0ee8832d4a942ca7e308b56ed0b9a924c | refs/heads/master | 2021-06-13T19:14:02.380885 | 2019-06-27T14:49:57 | 2019-06-27T14:49:57 | 193,507,639 | 2 | 0 | Apache-2.0 | false | 2021-06-01T23:52:36 | 2019-06-24T13:08:38 | 2019-08-14T12:29:32 | 2021-06-01T23:52:35 | 3,355 | 2 | 0 | 1 | Jsonnet | false | false | from fairing import utils
from fairing.deployers.deployer import DeployerInterface
from fairing.cloud.gcp import guess_project_name
from fairing import http_utils
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors
class GCPJob(DeployerInterface):
"""Handle submitting training job to GCP.
Attributes:
project_id: Google Cloud project ID to use.
region: region in which the job has to be deployed.
Ref: https://cloud.google.com/compute/docs/regions-zones/
scale_tier: machine type to use for the job.
Ref: https://cloud.google.com/ml-engine/docs/tensorflow/machine-types
job_config: Custom job configuration options. If an option is specified
in the job_config and as a top-level parameter, the parameter overrides
the value in the job_config.
Ref: https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs
"""
def __init__(self, project_id=None, region=None, scale_tier=None, job_config=None):
self._project_id = project_id or guess_project_name()
self._region = region or 'us-central1'
self._job_config = job_config or {}
self.scale_tier = scale_tier
self._ml = discovery.build('ml', 'v1')
self._ml._http = http_utils.configure_http_instance(self._ml._http)
def create_request_dict(self, pod_template_spec):
"""Return the request to be sent to the ML Engine API."""
# TODO: Update deploy interface to pass image directly instad of
# PodTemplateSpec.
# Retrieve image uri from pod template spec.
image_uri = pod_template_spec.containers[0].image
self._job_name = 'fairing_job_{}'.format(utils.random_tag())
# Merge explicitly specified parameters with the job config dictionary
request_dict = self._job_config
request_dict['jobId'] = self._job_name
if 'trainingInput' not in request_dict:
request_dict['trainingInput'] = {}
if self.scale_tier:
request_dict['trainingInput']['scaleTier'] = self.scale_tier
if 'masterConfig' not in request_dict['trainingInput']:
request_dict['trainingInput']['masterConfig'] = {}
request_dict['trainingInput']['masterConfig']['imageUri'] = image_uri
if self._region:
request_dict['trainingInput']['region'] = self._region
return request_dict
def deploy(self, pod_template_spec):
"""Deploys the training job"""
request_dict = self.create_request_dict(pod_template_spec)
try:
print('Creating training job with the following options: {}'.format(
str(request_dict)))
response = self._ml.projects().jobs().create(
parent='projects/{}'.format(self._project_id),
body=request_dict
).execute()
print('Job submitted successfully.')
self.get_logs()
return self._job_name
except errors.HttpError as err:
print('There was an error submitting the job.')
print(err._get_reason())
def get_logs(self):
"""Streams the logs for the training job"""
print('Access job logs at the following URL:')
print('https://console.cloud.google.com/mlengine/jobs/{}?project={}'
.format(self._job_name, self._project_id))
| UTF-8 | Python | false | false | 3,464 | py | 114 | gcp.py | 88 | 0.637991 | 0.636547 | 0 | 80 | 42.3 | 87 |
b2220333/panda3d-editor | 18,657,337,943,299 | 15c28b9efea38f45fbc7d7627b255d627bd96aa6 | c311242ac4796a275c84c503c4ca2f7eb2db4b9a | /src/pandaEditor/editor/nodes/sceneRoot.py | c7e6c8c87d978da3890bb4a3763d3e6a208af446 | [
"MIT"
] | permissive | https://github.com/b2220333/panda3d-editor | c9773631733d78e71fc76abfca72bc588600141e | f87fadf03048508a909d38f9714a72cb01abf069 | refs/heads/master | 2020-03-27T08:59:03.964485 | 2016-10-08T01:41:37 | 2016-10-08T01:41:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from game.nodes.sceneRoot import SceneRoot as GameSceneRoot
class SceneRoot( GameSceneRoot ):
def GetChildren( self ):
children = []
comps = [render2d, render] + base.scene.comps.keys()
for comp in comps:
children.append( base.game.nodeMgr.Wrap( comp ) )
return children | UTF-8 | Python | false | false | 343 | py | 140 | sceneRoot.py | 140 | 0.603499 | 0.600583 | 0 | 13 | 25.461538 | 61 |
Lifefang/python-labs | 7,765,300,880,670 | 971ab5ab1a8bffb6e59b4252f60a9ee1720bc8ab | 69c5b9a024dfdaf8e0fd9be5df68e6afcf292557 | /Lab3_Act1_d.py | 11f52128054b97c1a131351090c2204376e208f3 | [] | no_license | https://github.com/Lifefang/python-labs | a85bab0a66b2170d047752c923e2d9b21c78a491 | c257c35f73c87f27e7dd90119428f097bea2b9e9 | refs/heads/master | 2020-08-02T21:16:58.817993 | 2019-12-07T15:10:28 | 2019-12-07T15:10:28 | 211,509,984 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # “Aggies do not lie, cheat, or steal, or tolerate those who do”
# “I have not given or received any unauthorized aid on this assignment”
#
# Name: RYAN WALTERBACH
# Section: 537
# Assignment: LAB4b_Prog4
# Date: 20 9 2019
from math import *
print('Enter the coefficient for A:')
A = float(input())
print('Enter the coefficient for B:')
B = float(input())
print('Enter the coefficient for C:')
C = float(input())
# The quadratic formula is: (-row_4 +- sqrt(row_4**2 - 4*A*C)) / (2*A)
if A != 0: # If input A doesnt equal zero then it will have 2 zeros
b1 = -B #
squareroot = B**2 - 4*A*C
a1 = 2 * A
if squareroot > 0: # If the square root is positive then there will be real zeros
zero1 = (b1 + sqrt(squareroot)) / a1
zero2 = (b1 - sqrt(squareroot)) / a1
print('The zeros are line_number=', zero1, 'and line_number=', zero2)
elif squareroot < 0: # If the square root is negative then there will be imaginary zeros
squareroot = -squareroot
zero1 =print('line_number=', (b1/a1),'+ months', (sqrt(squareroot)/a1))
zero2 = print('line_number=',(b1/a1),'+ months', (sqrt(squareroot)/a1))
if A == 0 and B != 0: # If only A = 0 then there will be one zero
zero = (-C)/ B
print('If A=0 there is only one root. It being line_number=',zero,)
if A == 0 and B == 0: # If A and B = 0 then C must also equal zero
if C != 0: # C has to be zero is A and B are. If then there will be an error
print('Error')
elif C == 0: # If C does equal 0 then there will be no zeros.
print('No zeros found.') | UTF-8 | Python | false | false | 1,600 | py | 71 | Lab3_Act1_d.py | 70 | 0.621231 | 0.588568 | 0 | 36 | 43.25 | 93 |
stratosm/NLP_modules | 14,190,571,989,278 | 7ea92369c18c224dadcf1d80ff86765725cb2705 | 1662a53ab7e9c07b96bfd7fc9648f5f9d305d028 | /src/clean_rows.py | ea8608563090f6d5eda6f12d8b37cc228dce9949 | [] | no_license | https://github.com/stratosm/NLP_modules | deec43d17e2bdb60f60202b10cf9355748dcf69c | 5a30de3dfcd761d2538edbaf95e912be6fc6c64b | refs/heads/master | 2022-07-04T18:46:04.875672 | 2020-05-16T14:17:32 | 2020-05-16T14:17:32 | 264,003,463 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Author: Stratos mansalis
import numpy as np
import pandas as pd
def clean_NaN(df):
"""
Clean the sentences from NaN (Not a Number) and empty rows.
Note! Drop the unnecessary columns first and then use this function.
Arguments
---------
df: the given dataframe
Usage
-----
new_df = clean_rows(df)
Returns
-------
A dataframe cleaned from empty rows or rows contain NaNs
"""
# there are few nan as text values, not np.nan
df = df.replace('nan', '')
df = df.replace('NaN', '')
# replace field that's entirely space (or empty) with NaN
df = df.replace(r'^\s*$', np.nan, regex=True)
# total number of NaNs
total_nan = df.isna().sum().sum()
# total number of rows
total_rows = df
print('There are ',total_nan,' rows that contain NaN values.')
print('Removing these rows ...')
columns = df.columns.astype(str).tolist()
df.dropna(subset=columns, inplace=True)
print('Succesfully Deleted!')
return df
| UTF-8 | Python | false | false | 1,100 | py | 19 | clean_rows.py | 10 | 0.571818 | 0.571818 | 0 | 47 | 22.404255 | 76 |
ntadej/racorodja | 4,638,564,728,277 | da6c8d6c9cd95c1e54dfb2b1e2072858324b7d25 | 65b9bc780fd8226cc1a27782fae4761def781a60 | /python/branje_matrika.py | 9a11d27717d22046fddbd5dcb3a5aaffad61409f | [
"MIT"
] | permissive | https://github.com/ntadej/racorodja | 044f1f93594b3e8c52523492737d827f6d1f538b | 8c391c42d5f4cb6861982e367da31ee19f9f24c8 | refs/heads/master | 2023-08-25T09:21:05.023814 | 2016-06-02T06:47:45 | 2016-06-02T06:47:54 | 53,779,403 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Ustvarimo prazen seznam vrstic, ki jih bomo prebrali
data = []
# Ukaz 'with' zagotovi, da se datoteka na koncu bloka zapre in nam
# ni potrebno vec paziti na to
with open('../data/matrika.dat', 'r') as f:
# beremo vrstico po vrstico
for line in f:
# odstranimo odvecne presledke
line = line.strip()
# locimo podake po presledkih in jih pretvorimo v celo stevilo
h = [int(x) for x in line.split()]
# dodamo vrstico
data.append(h)
print(data)
| UTF-8 | Python | false | false | 501 | py | 15 | branje_matrika.py | 8 | 0.642715 | 0.642715 | 0 | 16 | 30.3125 | 70 |
sophiaems1/ColumbiaPythonCourse | 13,451,837,593,718 | 46fa798908180931e9a8aaf53b66f75f3ee9b967 | aa3e05df216aa135b9062a5608d46aec249dadd6 | /A2_Hot_or_Cold_sem2275.py | a96298183471fb6d536076b8afe3c35b090fb105 | [] | no_license | https://github.com/sophiaems1/ColumbiaPythonCourse | 5d5fa4686531033e1ad25e5480f10aad8e35bcdf | f39bcd63191ad59678313f859e28387ad5d3ace4 | refs/heads/main | 2023-06-15T20:25:35.790379 | 2021-07-14T13:41:45 | 2021-07-14T13:41:45 | 385,954,362 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | code = 25
print("Welcome to Sophia's Hot or Cold game!")
print("You have a total of 10 tries to figure out the secret number between 1 and 100 (only integers).")
x = int(input("Enter a guess:"))
guesses = 1
while x != code:
if x >= 0 and x < 15:
print("You're warm!")
x = int(input("Enter a new guess (so close!): "))
guesses = guesses + 1
if x > 35 and x <= 50:
print("You're warm!")
x = int(input("Enter a new guess (so close!): "))
guesses = guesses + 1
if x >= 15 and x <= 35:
print("You're on fire!")
x = int(input("Enter a new guess (YOU GOT THIS!!): "))
guesses = guesses + 1
if x > 50 and x <= 75:
print("You're cold!")
x = int(input("Enter a new guess (try smaller numbers): "))
guesses = guesses + 1
if x > 75:
print("You're freezing cold!")
x = int(input("Enter a new guess (think MUCH smaller): "))
guesses = guesses + 1
elif guesses == 10:
print(f"Sorry, you lost the game. The correct number was: {code}.")
break
print()
print()
if x == 25:
print("You're amazing! Great job, you found Sophia's secret code!")
print()
print(f"It took you {guesses} guesses!".format(guesses))
| UTF-8 | Python | false | false | 1,298 | py | 7 | A2_Hot_or_Cold_sem2275.py | 7 | 0.546995 | 0.520031 | 0 | 37 | 33.027027 | 104 |
spivachuk/sovrin-node | 962,072,708,230 | 18350f3a66aa3c89ba3f491f90d92d85bb058145 | 5503910c7093a4977156aab2f04261a4bdb76fbe | /scripts/performance/Measuring_Transactions/measuring_transactions.py | 1ff94384c131427be09d884a5228391d05b5774c | [
"Apache-2.0"
] | permissive | https://github.com/spivachuk/sovrin-node | 8092cc3b9aab589b6c20d9662a5f3e0564219533 | 4db8ed69407d11bffb90ca6f998efd671df697c5 | refs/heads/master | 2020-12-31T05:39:53.508220 | 2018-07-24T13:56:02 | 2018-07-24T13:56:02 | 80,642,284 | 0 | 0 | Apache-2.0 | true | 2018-07-24T14:01:21 | 2017-02-01T16:59:41 | 2018-07-24T13:56:11 | 2018-07-24T14:01:21 | 10,090 | 0 | 0 | 0 | Python | false | null | '''
Created on Feb 26, 2018
@author: khoi.ngo
'''
import argparse
from asyncio.events import get_event_loop
import json
from indy import ledger, signus, pool, wallet
class Options:
"""
This class use to pass the value from command line.
"""
def __init__(self):
parser = argparse.ArgumentParser(
description='This script will show the current number of '
'transaction and then show how many transactions per minute.')
parser.add_argument('-c',
help='Use to get current seqNo of transactions',
action='store_true',
default=False, required=False, dest='count')
parser.add_argument('-s',
help='It is the starting seqNo of the transaction',
action='store',
default=False, required=False, dest='start_seqNo')
parser.add_argument('-e',
help='It is the ending seqNo of the transaction',
action='store',
default=False, required=False, dest='end_seqNo')
self.args = parser.parse_args()
class Colors:
""" Class to set the colors for text. """
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m' # Normal default color.
class Var:
pool_handle = 0
wallet_handle = 0
def force_print_to_console(message: str, color: str):
"""
Force print a message to console (no matter log is captured or not).
:param message:
:param color:
"""
msg = color + message + Colors.ENDC
print(msg)
def print_green(message: str):
"""
Force print a message with green color to console
(no matter log is captured or not).
:param message:
"""
force_print_to_console(message, Colors.OKGREEN)
def print_error(message: str):
"""
Force print a message with green color to console
(no matter log is captured or not).
:param message:
"""
force_print_to_console(message, Colors.FAIL)
def generate_random_string(prefix="", suffix="", size=20):
"""
Generate random string .
:param prefix: (optional) Prefix of a string.
:param suffix: (optional) Suffix of a string.
:param size: (optional) Max length of a string (include prefix and suffix)
:return: The random string.
"""
import random
import string
left_size = size - len(prefix) - len(suffix)
random_str = ""
if left_size > 0:
random_str = ''.join(
random.choice(string.ascii_uppercase +
string.digits) for _ in range(left_size))
else:
print("Warning: Length of prefix and suffix more than %s chars"
% str(size))
result = str(prefix) + random_str + str(suffix)
return result
async def create_submitter_did():
try:
# variable
pool_txn = "/var/lib/indy/sandbox/pool_transactions_genesis"
pool_name = generate_random_string("test_pool")
wallet_name = generate_random_string("test_wallet")
seed_default_steward = "000000000000000000000000Steward1"
pool_config = json.dumps({"genesis_txn": pool_txn})
# Create pool
await pool.create_pool_ledger_config(pool_name, pool_config)
Var.pool_handle = await pool.open_pool_ledger(pool_name, None)
# Create Wallet
await wallet.create_wallet(pool_name, wallet_name,
None, None, None)
Var.wallet_handle = await wallet.open_wallet(wallet_name, None, None)
submitter_did, _ = await signus.create_and_store_my_did(
Var.wallet_handle, json.dumps({"seed": seed_default_steward}))
target_did, _ = await signus.create_and_store_my_did(
Var.wallet_handle, json.dumps({}))
return submitter_did, target_did
except Exception as e:
print_error("Exception: " + str(e))
async def get_current_number_of_the_transaction():
"""
Get the current number of transaction.
@return: return the number of transaction.
"""
submitter_did, target_did = await create_submitter_did()
seqNo = 0
try:
nym_req = await ledger.build_nym_request(submitter_did, target_did,
None, None, None)
response = await ledger.sign_and_submit_request(Var.pool_handle,
Var.wallet_handle,
submitter_did,
nym_req)
seqNo = json.loads(response)['result']['seqNo']
print_green("Current number of transactions: " + str(seqNo))
return seqNo
except Exception as e:
print_error("Exception: " + str(e))
async def get_a_transaction_by_seqNo(seqNo):
"""
Get the transaction by number.
@param seqNo: The seqNo of the transaction.
That will be used to get transaction information.
@return: return the transaction information.
"""
submitter_did, _ = await create_submitter_did()
get_txn_request = await ledger.build_get_txn_request(submitter_did,
int(seqNo))
result = await ledger.sign_and_submit_request(Var.pool_handle,
Var.wallet_handle,
submitter_did,
get_txn_request)
return result
async def calculate_transactions_per_minute(start_seqNo, end_seqNo):
"""
Calculating the transactions per minute by getting the begin transaction
and the current transaction.
Then it shows the number of transactions per minute, second.
@param start_seqNo: The starting seqNo of the transaction.
That will be used to get transaction information.
@param end_seqNo: The ending seqNo of the transaction.
That will be used to get transaction information.
"""
try:
# get time of the begin transaction
start_number = int(start_seqNo)
begin_trans = await get_a_transaction_by_seqNo(start_number)
begin_time = int(json.loads(begin_trans)['result']['data']['txnTime'])
# get number and time of the latest transaction
if not end_seqNo:
latest_number = await get_current_number_of_the_transaction() - 1
else:
latest_number = int(end_seqNo)
latest_trans = await get_a_transaction_by_seqNo(latest_number)
latest_time = int(json.loads(latest_trans)['result']['data']['txnTime'])
# calculate the transactions per second
num_of_trans = latest_number - int(start_number)
duration_as_second = latest_time - begin_time
duration_as_minute = (latest_time - begin_time) / 60
result_minute = num_of_trans / duration_as_minute
result_second = num_of_trans / duration_as_second
print_green("From number: " + str(start_number) + " - Timestamp: " + str(begin_time))
print_green("To number: " + str(latest_number) + " - Timestamp: " + str(latest_time))
print_green("ADD measurement")
print_green(str(int(result_minute)) + " txns/min")
print_green(str(int(result_second)) + " txns/sec")
except Exception as e:
print("Exception: " + str(e))
if __name__ == '__main__':
args = Options().args
try:
loop = get_event_loop()
if args.count:
loop.run_until_complete(get_current_number_of_the_transaction())
elif args.start_seqNo:
loop.run_until_complete(
calculate_transactions_per_minute(
args.start_seqNo, args.end_seqNo))
loop.close()
except Exception as e:
print("Exception: " + str(e))
| UTF-8 | Python | false | false | 7,963 | py | 18 | measuring_transactions.py | 16 | 0.578802 | 0.572021 | 0 | 224 | 34.549107 | 93 |
MarkLyck/py_ai | 764,504,205,695 | 123e4d708c472d8f581e62d61ce3561b7b3e024e | 380bdc92f71ca472d9268af7476235b24fd38a22 | /NLP/NLP.py | 976b8b528468f477fa0ec52a8750b2d6e276bd45 | [] | no_license | https://github.com/MarkLyck/py_ai | 662e8e121cd3c0130455fe9518690877a0a32d54 | 6d398978a248eafb944f738888d4109817eaecec | refs/heads/master | 2021-01-13T14:05:18.351013 | 2016-12-13T02:52:02 | 2016-12-13T02:52:02 | 76,211,511 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def tasks(originalMessage):
return { 'originalMessage': originalMessage }
| UTF-8 | Python | false | false | 79 | py | 2 | NLP.py | 2 | 0.746835 | 0.746835 | 0 | 2 | 38 | 49 |
ParallelMeaningBank/easyccg | 12,996,571,049,643 | 0de744edd6edc9f7e9ce1509ff7ae1cf299a03da | 37f5037bb23db69b934c0940b5fa3c3400266626 | /training/eval_test.py | fd2c130515ebda6082ea0b79376cd161d43c5487 | [
"MIT"
] | permissive | https://github.com/ParallelMeaningBank/easyccg | 052d2a429267876893df834cba7999c27fa0c5fb | a36ea6037e905cc7917cc5163e810ec0e2670a62 | refs/heads/master | 2020-04-05T16:13:28.043930 | 2019-11-14T10:58:49 | 2019-11-14T10:59:00 | 61,124,350 | 2 | 1 | null | true | 2016-06-14T13:19:23 | 2016-06-14T13:19:22 | 2016-05-27T11:51:05 | 2016-05-31T14:24:24 | 61,522 | 0 | 0 | 0 | null | null | null | import sys
from collections import Counter
rawfile=sys.argv[1]
goldfile=sys.argv[2]
infile=sys.argv[3]
brackets={}
brackets["-LRB-"]="("
brackets["-RRB-"]=")"
brackets["-LSB-"]="["
brackets["-RSB-"]="]"
brackets["-LCB-"]="{"
brackets["-RCB-"]="}"
file=open("/group/corpora/public/ccgbank/data/LEX/CCGbank.02-21.lexicon")
seenw=set()
for line in file:
word=line.split()[0]
cat=line.split()[1]
#cat=rgx.sub("S",cat)
word=brackets.get(word,word)
wordcat=word+" "+cat
if word not in seenw:
seenw.add(word)
file.close()
stoks={}
snum=0
file=open(rawfile)
for line in file:
stuff=line.split()
t=0
stoks[snum]={}
for tok in stuff:
stoks[snum][t]=tok
t=t+1
snum=snum+1
file.close()
file=open(goldfile)
snum=0
sentences={}
totalr=Counter()
totr=0
totrCov=0
for line in file:
stuff=line.split()
id=stuff[0]
sentences[snum]=set()
for dep in stuff[1:]:
argstype=dep.rstrip("]").split("[")
args=argstype[0].split("-")
deptype=argstype[1].lstrip("<").rstrip(">")
argl=int(args[0])
argr=int(args[1])
if argstype[1][0]=="<":
temp=argr
argr=argl
argl=temp
sentences[snum].add(deptype+" %03d %03d "%(argl,argr))
#totalr[deptype]=totalr[deptype]+1
totr=totr+1
snum=snum+1
file.close()
print snum
stuffcount=Counter()
file=open(infile)
flag=0
snum=0
totc=0
correct=Counter()
corpos=Counter()
scorpos=Counter()
slcorrect=Counter()
srcorrect=Counter()
lrtotc=0
wrong=Counter()
totp=0
totalp=Counter()
totpos=Counter()
stotpos=Counter()
sltotalp=Counter()
srtotalp=Counter()
lrtotp=0
sltotalr=Counter()
srtotalr=Counter()
lrtotr=0
cov=0
deps=set()
sents=0
for line in file:
stuff=line.split()
if len(stuff) > 0 and stuff[0]=="<c>":
t=0
wpos={}
for tokcat in stuff[1:]:
tok=tokcat.split("|")[0]
pos=tokcat.split("|")[1]
wpos[t]=pos
if tok != stoks[snum][t] and "&" not in stoks[snum][t]:
print snum, t, tok, stoks[snum][t]
snum=snum+1
t=t+1
args=set()
if deps:
for dep in sentences[snum]:
deptype=dep.split()[0]
argl=int(dep.split()[1])
argr=int(dep.split()[2])
args.add(argl)
args.add(argr)
wordl=stoks[snum][argl]
wordr=stoks[snum][argr]
seenl=(wordl not in seenw)
seenr=(wordr not in seenw)
if seenl:
sltotalr[deptype]=sltotalr[deptype]+1
if seenr:
srtotalr[deptype]=srtotalr[deptype]+1
if seenl and seenr:
lrtotr=lrtotr+1
totalr[deptype]=totalr[deptype]+1
totrCov=totrCov+1
#if deptype=="nsubj":
# totpos[wpos[argl]]=totpos[wpos[argl]]+1
# if seenl:
# stotpos[wpos[argl]]=stotpos[wpos[argl]]+1
for depw in deps:
dep=" ".join(depw.split()[0:3])+" "
wordl=depw.split()[3]
wordr=depw.split()[4]
seenl=(wordl not in seenw)
seenr=(wordr not in seenw)
deptype=dep.split()[0]
argl=int(dep.split()[1])
argr=int(dep.split()[2])
if dep in sentences[snum]:
totc=totc+1
correct[deptype]=correct[deptype]+1
if seenl:
slcorrect[deptype]=slcorrect[deptype]+1
if seenr:
srcorrect[deptype]=srcorrect[deptype]+1
if seenl and seenr:
lrtotc=lrtotc+1
if deptype=="nsubj":
corpos[wpos[argl]]=corpos[wpos[argl]]+1
if seenl:
scorpos[wpos[argl]]=scorpos[wpos[argl]]+1
if argl in args and argr in args:
totp=totp+1
totalp[deptype]=totalp[deptype]+1
if seenl:
sltotalp[deptype]=sltotalp[deptype]+1
if seenr:
srtotalp[deptype]=srtotalp[deptype]+1
if seenl and seenr:
lrtotp=lrtotp+1
if flag==1:
cov=cov+1
if len(sentences[snum]) > 0:
sents=sents+1
snum=snum+1
flag=0
deps=set()
elif len(line)>0 and line[0]=="(":
flag=1
#if stuff[1] in ["_"]:#,"part","num","poss"]:
# del stuff[1]
stuffcount[len(stuff)]=stuffcount[len(stuff)]+1
if len(stuff) == 3:
deptype=stuff[0][1:]
wordl=stuff[1].split("_")[0]
wordr=stuff[2].split("_")[0]
if stuff[1].split("_")[1] != "" and stuff[2].split("_")[1] != "":
argl=int(stuff[1].split("_")[1])
argr=int(stuff[2].split("_")[1].rstrip(")"))
deps.add(deptype+" %03d %03d "%(argl,argr)+" "+wordl+" "+wordr)
file.close()
#print snum
print "Coverage: ", sents, snum, float(cov)/float(snum)
precision=float(totc)/float(totp)
recall=float(totc)/float(totrCov)
fscore=2*(precision*recall)/(precision+recall)
print "recall (cov):", totc, totrCov, recall
print "precision (cov):", totp, totc, precision
print "fscore (cov):", fscore
print
precision=float(totc)/float(totp)
recall=float(totc)/float(totr)
fscore=2*(precision*recall)/(precision+recall)
print "recall (all):", totr, totc, recall
print "precision (all):", totp, totc, precision
print "fscore (all):", fscore
print
for deptype, countr in totalr.most_common():
countp=max(totalp[deptype],1)
slcountp=max(sltotalp[deptype],1)
srcountp=max(srtotalp[deptype],1)
slcountr=max(sltotalr[deptype],1)
srcountr=max(srtotalr[deptype],1)
print deptype, countr, correct[deptype], float(correct[deptype])/float(countr), float(correct[deptype])/float(countp), slcountr, float(slcorrect[deptype])/float(slcountr), srcountr, float(srcorrect[deptype])/float(srcountr)
print
for pos, count in totpos.most_common():
scount=max(1,stotpos[pos])
print pos, count, scount, float(corpos[pos])/float(count), float(scorpos[pos])/float(scount)
| UTF-8 | Python | false | false | 5,491 | py | 42 | eval_test.py | 32 | 0.624841 | 0.603715 | 0 | 226 | 22.287611 | 224 |
NareshPeshwe/new_project | 3,985,729,677,463 | d36d26e5b2bfbd3564d14f8e2e03db910ade6e92 | 5b84cd2bbd88a9d89a76b43713a9fa3bf2ec8f01 | /social/urls.py | 796eb7bc0e2f820d4f66991c841b2a431e1049a9 | [] | no_license | https://github.com/NareshPeshwe/new_project | a5c0f0cde66125ced05c30c5387421c86fc5dbf5 | dda51c7729e690ac038497c25cce719639a7d3c2 | refs/heads/master | 2018-03-23T09:51:18.196333 | 2016-06-06T09:48:18 | 2016-06-06T09:48:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^key/$', views.index, name='index'),
url(r'^register/$',views.sign_up,name="sign_up"),
url(r'^loggedin/$',views.sign_in,name="sign_in"),
url(r'^update/$',views.update,name="update"),
url(r'^log_out/$',views.log_out,name="log_out"),
#url(r'^build_team/$',views.build_team,name="build_team"),
url(r'^player_profile/$',views.player_profile,name="player_profile"),
#url(r'^owner_profile/$',views.owner_profile,name="owner_profile"),
#url(r'^add_ground/$',views.add_ground,name="add_ground"),
] | UTF-8 | Python | false | false | 646 | py | 5 | urls.py | 4 | 0.636223 | 0.636223 | 0 | 16 | 39.4375 | 73 |
SakuraYUI/recognition_SVM | 8,366,596,337,935 | c6d4503da0788abcb9901ba552ad8d17b8f0b9e2 | d54c3b3f345fedc4e64dbbf18455147199dd5bb2 | /save.py | f4301bedb1e1f7a58edc842a4f8541a750dde806 | [] | no_license | https://github.com/SakuraYUI/recognition_SVM | 1fda9f54eb6c6ae8543056397b9ab12fc0efe8b7 | a59383dd0584175befc91c3179d07d4a18082ac2 | refs/heads/master | 2021-01-11T15:00:47.020697 | 2017-02-17T06:27:09 | 2017-02-17T06:27:09 | 80,279,443 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
path = "D:\Python 2.7.9\libsvm-3.16\python"
sys.path.append(path)
from svmutil import *
import Image
import random
NUM = 30
for i in range(1, NUM):
train_label, train_matrix = svm_read_problem('./dataocr/ocr_' + str(i))
model = svm_train(train_label, train_matrix, '-c 3 -g 0.015625')
svm_save_model('./datamodel/model_' + str(i), model) | UTF-8 | Python | false | false | 408 | py | 8 | save.py | 7 | 0.620098 | 0.57598 | 0 | 14 | 27.214286 | 75 |
yogendrabohara/tdddjango | 11,948,599,033,477 | 76917498b6b9f6a50caaf8016a0b22d5caf7f194 | ee2c2188eacf35de859ddff22337b2caf0e488de | /xword_data/views.py | ca4852484a930bb43fb9f978a936ce18558b6025 | [] | no_license | https://github.com/yogendrabohara/tdddjango | 020098401886338f2c3abb5d1f1027eedb4abb80 | 949958d36201d8582b3190d90aac725638f97b73 | refs/heads/main | 2023-08-15T23:59:22.249395 | 2021-09-10T17:28:21 | 2021-09-10T17:28:21 | 404,807,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import Clue
from django.http import Http404
from django.shortcuts import redirect
# Create your views here.
def drillView(request):
if request.method == 'GET':
clue = Clue.objects.order_by('?').first()
results = ""
else:
clue = Clue.objects.get(id=request.POST['clue_id'])
print(clue)
if clue.entry.entry_text.lower() == request.POST['answer'].lower():
return redirect('xword-answer', clue.id)
else:
results = 'not correct'
# for requested count
if request.session.get('requested_count'):
requested_count = request.session.get('requested_count') + 1
else:
requested_count = 1
return render(request, 'drill.html', {'clue': clue, 'requested_count': requested_count, 'result': results})
| UTF-8 | Python | false | false | 848 | py | 13 | views.py | 9 | 0.639151 | 0.633255 | 0 | 25 | 32.92 | 111 |
moigagoo/sphinxcontrib-swagger2sphinx | 16,939,351,058,224 | 884745461698033b8f20bffc12845d37c38a11d0 | da3a59dc6e6e23b6086fd0166d82dce639d76ff2 | /setup.py | 7bf124b56b559acdef5b96b7efa4065e7cdabe3f | [] | no_license | https://github.com/moigagoo/sphinxcontrib-swagger2sphinx | 2b7fdbb6d01fc80c5512d74bec7e3f95b67cdc80 | bb10723ebca52974fc316e44488688e03bbbc2a6 | refs/heads/master | 2020-05-23T08:19:02.549958 | 2016-10-11T08:28:56 | 2016-10-11T08:28:56 | 70,225,816 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
def readme():
try:
with open("README.rst") as f:
return f.read()
except IOError:
pass
setup(
name="sphinxcontrib-swagger2sphinx",
version="0.1.5",
url="https://github.com/moigagoo/sphinxcontrib-swagger2sphinx",
download_url="https://pypi.org/project/sphinxcontrib-swagger2sphinx",
license="MIT",
author="Konstantin Molchanov",
author_email="moigagoo@live.com",
description="Converter from Swagger to Sphinx HTTP domain.",
long_description=readme(),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Documentation",
"Topic :: Utilities",
],
py_modules=["sphinxcontrib.swagger2sphinx"],
packages=["sphinxcontrib"],
package_dir={"sphinxcontrib": '.'},
platforms="any",
install_requires=["Sphinx>=1.0", "requests"]
)
| UTF-8 | Python | false | false | 1,171 | py | 3 | setup.py | 2 | 0.605465 | 0.596926 | 0 | 37 | 29.648649 | 73 |
LukeEuler/project-euler | 5,093,831,219,422 | 8980bb8ec4f46a6245647ff96da6b0a44ab07f6b | b9dbf54c5ac0083a094145ac6c22781e5e1c0ffb | /problem_12.py | 4ef6bd286d406254fa20e651b647ebf32498d576 | [] | no_license | https://github.com/LukeEuler/project-euler | 8197ed068f673010f13d19045fef1aa05899a843 | 0833a8ab931f5c3499a6e6ca695b1fe579eca62c | refs/heads/master | 2020-12-02T18:15:10.184004 | 2017-10-01T07:45:47 | 2017-10-01T07:45:47 | 96,504,562 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""Highly divisible triangular number."""
# https://projecteuler.net/problem=12
from app import Problem
from utils import Prime
class Problem12(Problem):
"""Highly divisible triangular number."""
def __init__(self):
self.name = "problem 12"
self.num = 500
self.primes = []
for prime in Prime():
self.primes.append(prime)
if len(self.primes) > 500:
break
def triangle(self, num):
return int(num * (num + 1) / 2)
def factorsNum(self, num):
result = 1
for prime in self.primes:
if prime > num:
break
primeFactorsNum = 0
while num % prime == 0:
primeFactorsNum += 1
num = int(num / prime)
result *= (primeFactorsNum + 1)
return result
def solve(self):
n = 1
while True:
triangle = self.triangle(n)
result = self.factorsNum(triangle)
if result > self.num:
return triangle
n += 1
Problem12().run()
| UTF-8 | Python | false | false | 1,127 | py | 28 | problem_12.py | 27 | 0.510204 | 0.488909 | 0 | 48 | 22.479167 | 46 |
AlexPraefectus/labs_web | 2,396,591,773,523 | 5fce201ecfac0aa59eb2ddb3083b70dd62aa018f | 327482022ed6ba5ce79c63ceb1bbf9538cbdb338 | /labs_web/views/tutor/answer_ticket.py | 07260c3b9c2beaad61f6d88990c8590e2a3b62b0 | [
"BSD-3-Clause"
] | permissive | https://github.com/AlexPraefectus/labs_web | b798d2e352d81b2dd065440581480d9b350df2dc | 9bfd69ffc2196832523d5809cf921debe530f886 | refs/heads/master | 2021-08-16T11:07:27.637687 | 2020-06-19T08:18:25 | 2020-06-19T08:18:25 | 127,518,643 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask.views import View
from flask_login import current_user, login_required
from labs_web.extensions import AnswerTicketForm, Tickets, mongo_oid
from flask import request, flash, render_template, abort
from datetime import datetime
import math
class AnswerTicket(View):
PER_PAGE = 5
methods = ["GET", "POST"]
decorators = [login_required]
def dispatch_request(self, *args, **kwargs):
try:
page = int(request.args.get('page'))
except (ValueError, TypeError):
flash("invalid integer literal for page/no page provided")
return abort(404)
pages = int(math.ceil(Tickets.count({'course.id': {'$in': [course.course_id for course in
current_user.courses]},
'checked': {'$exists': False}}) / AnswerTicket.PER_PAGE))
if page > pages and page != 1: # page number should exist but first page is always available
abort(404)
# pagination
tickets = Tickets.find({'course.id': {'$in': [course.course_id for course in
current_user.courses]},
'checked': {'$exists': False}}). \
skip((page - 1) * AnswerTicket.PER_PAGE).limit(AnswerTicket.PER_PAGE)
form = AnswerTicketForm()
form.selected_ticket.choices = [(str(ticket['_id']),
"{} - {} ({}) {}".format(ticket['topic'],
ticket['author']['name'],
ticket['author']['group'],
ticket['sent'].strftime("%d %b %Y: %H:%M")))
for ticket in tickets]
if request.method == 'POST' and form.validate_on_submit():
Tickets.find_one_and_update({'_id': mongo_oid(form.data.get('selected_ticket'))},
{'$set':
{'checked': datetime.utcnow(),
'answ_body': form.data.get('answ_body'),
'public': form.data.get('make_public')}})
flash('Ticket answer saved')
for field, errors in form.errors.items():
for message in errors:
flash(message)
return render_template('tutor/answer_ticket.html',
tickets=tickets,
form=form,
page=page,
pages=pages)
| UTF-8 | Python | false | false | 2,753 | py | 143 | answer_ticket.py | 89 | 0.455866 | 0.452597 | 0 | 52 | 51.942308 | 110 |
fictionic/pacman_ai | 2,843,268,369,026 | 4951ddda4cf3ef88d3261d4844958d352d351fb0 | e7e2e74dbe32be78f2ca4a483066ebe83eb0f5e0 | /project2/multiAgents.py | c96d8c324f650bfde876bad2fd03a2f80aab3d19 | [] | no_license | https://github.com/fictionic/pacman_ai | b3146a9b34e8f42e16dc448cd9bc0364898c8492 | ce52db1fe44eeb236bacdc5973cb81b4cbb3d922 | refs/heads/master | 2021-06-16T14:33:33.793903 | 2017-02-03T04:59:07 | 2017-02-03T04:59:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
import sys
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
"""
def getAction(self, gameState):
"""
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
This evaluation function is not particularly good; using information
from the game state would allow it to be much better, although still
not as good as an agent that plans. You may find the information listed
below helpful in later parts of the project (e.g., when designing
an evaluation function for your planning agent).
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
return successorGameState.getScore()
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
def debug(s):
# print(s)
pass
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 1)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
# we're pacman
return self.maxValueAction(0, 0, gameState)[1]
def minValueAction(self, ghostIndex, curDepth, state):
# if we're at a terminal state
if state.isLose() or state.isWin():
return self.evaluationFunction(state), None
nextDepth = None
if ghostIndex == state.getNumAgents() - 1:
func = self.maxValueAction
nextIndex = 0
nextDepth = curDepth + 1
else:
func = self.minValueAction
nextIndex = ghostIndex + 1
nextDepth = curDepth
v = None
for action in state.getLegalActions(ghostIndex):
child = state.generateSuccessor(ghostIndex, action)
childV = func(nextIndex, nextDepth, child)[0]
if v is None or childV < v:
v = childV
# if we're at a leaf
if len(state.getLegalActions()) == 0:
v = self.evaluationFunction(state)
return v, None
def maxValueAction(self, pacmanIndex, curDepth, state):
v = None
a = None
# if we've reached the max depth, or if we're at a terminal state
if curDepth == self.depth or state.isLose() or state.isWin():
return self.evaluationFunction(state), None
nextIndex = 1
nextDepth = curDepth
for action in state.getLegalActions(pacmanIndex):
child = state.generateSuccessor(pacmanIndex, action)
childV = self.minValueAction(nextIndex, nextDepth, child)[0]
if v is None or childV > v:
v = childV
a = action
# if we're at a leaf
if len(state.getLegalActions()) == 0:
v = self.evaluationFunction(state)
return v, a
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
# we're pacman
return self.maxValueAction(0, 0, gameState, -sys.maxint, sys.maxint)[1]
def maxValueAction(self, pacmanIndex, curDepth, state, alpha, beta):
# if we've reached the max depth, or if we're at a terminal state
if curDepth == self.depth or state.isLose() or state.isWin():
return self.evaluationFunction(state), None
v = None
a = None
nextIndex = 1
nextDepth = curDepth
for action in state.getLegalActions(pacmanIndex):
child = state.generateSuccessor(pacmanIndex, action)
childV = self.minValueAction(nextIndex, nextDepth, child, alpha, beta)[0]
if v is None or childV > v:
v = childV
a = action
# prune of v>beta
if beta is None or v > beta:
return v, action
alpha = max(v, alpha) if alpha is not None else v
# if we're at a leaf
if len(state.getLegalActions()) == 0:
v = self.evaluationFunction(state)
return v, a
def minValueAction(self, ghostIndex, curDepth, state, alpha, beta):
# if we're at a terminal state
if state.isLose() or state.isWin():
return self.evaluationFunction(state), None
nextDepth = None
if ghostIndex == state.getNumAgents() - 1:
func = self.maxValueAction
nextIndex = 0
nextDepth = curDepth + 1
else:
func = self.minValueAction
nextIndex = ghostIndex + 1
nextDepth = curDepth
v = None
for action in state.getLegalActions(ghostIndex):
child = state.generateSuccessor(ghostIndex, action)
childV = func(nextIndex, nextDepth, child, alpha, beta)[0]
if v is None or childV < v:
v = childV
# if the parent is pacman
if alpha is None or v < alpha:
return v, action
beta = min(v, beta) if beta is not None else v
# if we're at a leaf
if len(state.getLegalActions()) == 0:
v = self.evaluationFunction(state)
return v, None
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
return self.maxValueAction(0, 0, gameState)[1]
def maxValueAction(self, pacmanIndex, curDepth, state):
# if we've reached the max depth, or if we're at a terminal state
if curDepth == self.depth or state.isLose() or state.isWin():
return self.evaluationFunction(state), None
v = None
a = None
nextIndex = 1
nextDepth = curDepth
for action in state.getLegalActions(pacmanIndex):
child = state.generateSuccessor(pacmanIndex, action)
childV = self.chanceValueAction(nextIndex, nextDepth, child)[0]
if v is None or childV > v:
v = childV
a = action
# if we're at a leaf
if len(state.getLegalActions()) == 0:
v = self.evaluationFunction(state)
return v, a
def chanceValueAction(self, ghostIndex, curDepth, state):
# if we've reached the max depth, or if we're at a terminal state
if state.isLose() or state.isWin():
return self.evaluationFunction(state), None
nextDepth = None
if ghostIndex == state.getNumAgents() - 1:
func = self.maxValueAction
nextIndex = 0
nextDepth = curDepth + 1
else:
func = self.chanceValueAction
nextIndex = ghostIndex + 1
nextDepth = curDepth
numLegalActions = len(state.getLegalActions(ghostIndex))
expectedV = 0.0
for action in state.getLegalActions(ghostIndex):
child = state.generateSuccessor(ghostIndex, action)
childV = func(nextIndex, nextDepth, child)[0]
expectedV += childV
# if we're at a leaf
if len(state.getLegalActions()) == 0:
expectedV = self.evaluationFunction(state)
expectedV /= numLegalActions
return expectedV, None
#### helper things for evaluationFunction
def getNearestGhost(gameState):
curPos = gameState.getPacmanPosition()
ghostDist = None
closestIndex = None
for ghostIndex in xrange(gameState.getNumAgents()):
if ghostIndex == 0:
continue
ghostPos = gameState.getGhostPosition(ghostIndex)
dist = manhattanDistance(ghostPos, curPos)
if ghostDist is None or dist < ghostDist:
ghostDist = dist
closestIndex = ghostIndex
return closestIndex
def getNearestAngryGhost(gameState):
curPos = gameState.getPacmanPosition()
ghostDist = None
closestIndex = None
for ghostIndex in xrange(gameState.getNumAgents()):
if ghostIndex == 0:
continue
ghostState = gameState.getGhostState(ghostIndex)
# skip all of the scared ghosts for this
if ghostState.scaredTimer > 0:
continue
ghostPos = gameState.getGhostPosition(ghostIndex)
dist = manhattanDistance(ghostPos, curPos)
if ghostDist is None or dist < ghostDist:
ghostDist = dist
closestIndex = ghostIndex
return closestIndex
class Feature:
def __init__(self, weight, valueFn):
self.weight = weight
self.valueFn = valueFn
features = []
# number of foods left
weight = 70
def fn(gameState):
numFood = gameState.getNumFood()
if numFood == 0:
return 500
return 1.0/numFood**2
feature = Feature(weight, fn)
features.append(feature)
# manhattan distance to nearest capsule
weight = 1
def fn(gameState):
ret = None
curPos = gameState.getPacmanPosition()
for capsule in gameState.getCapsules():
dist = manhattanDistance(capsule, curPos)
if ret is None or dist < ret:
ret = dist
if ret is None:
return 0
return 1.0/ret
feature = Feature(weight, fn)
features.append(feature)
# if nearest ghost is scared: distance to scared ghost;
# else negative distance to nearest ghost
weight = 8
def fn(gameState):
ghostIndex = getNearestGhost(gameState)
if ghostIndex is None:
return 0
ghostState = gameState.getGhostState(ghostIndex)
ghost = gameState.getGhostPosition(ghostIndex)
curPos = gameState.getPacmanPosition()
dist = manhattanDistance(ghost, curPos)
if ghostState.scaredTimer > 0:
if dist == 0:
return 200
if dist == 1:
return 0
return 1.0 / dist
else:
if dist == 0:
return -200
if (dist > 6):
return 0
return -1.0 / dist
feature = Feature(weight, fn)
features.append(feature)
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: We have a series of features with given weights that are
applied to every state when determining value. These features include
the number of food left, the distance to the closest capsule and the distance
to the nearest ghost (positive or negative value based on whether it is scared
or not scared). These are weighted with values 50, 1, and 8, respectively.
"""
ret = 0
for feature in features:
ret += feature.weight * feature.valueFn(currentGameState)
return ret
# Abbreviation
better = betterEvaluationFunction
| UTF-8 | Python | false | false | 14,997 | py | 3 | multiAgents.py | 2 | 0.639595 | 0.633593 | 0 | 404 | 36.118812 | 91 |
atyamsriharsha/VideoEditor | 6,914,897,386,426 | b21dfb454b0e2d8dbe2223799be7a63dac357bab | 74ae604fef2ecd1d53ca4150ab1d510c386251b9 | /generatehighlights.py | e13f649130118c933753acca9f8e315c987b0472 | [] | no_license | https://github.com/atyamsriharsha/VideoEditor | 118fed5fedd9a9f869e1d9155d51e3ad890a22b4 | 034c8904931bb3ac4b0356093747a031ab03f991 | refs/heads/master | 2021-01-22T04:01:26.413506 | 2016-10-20T16:42:49 | 2016-10-20T16:42:49 | 69,271,536 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from moviepy.editor import VideoFileClip, concatenate
clip = VideoFileClip("match.mp4")
cut = lambda i: clip.audio.subclip(i,i+1).to_soundarray(fps=22000)
volume = lambda array: np.sqrt(((1.0*array)**2).mean())
volumes = [volume(cut(i)) for i in range(0,int(clip.audio.duration-2))]
averaged_volumes = np.array([sum(volumes[i:i+10])/10
for i in range(len(volumes)-10)])
increases = np.diff(averaged_volumes)[:-1]>=0
decreases = np.diff(averaged_volumes)[1:]<=0
peaks_times = (increases * decreases).nonzero()[0]
peaks_vols = averaged_volumes[peaks_times]
peaks_times = peaks_times[peaks_vols>np.percentile(peaks_vols,90)]
final_times=[peaks_times[0]]
for t in peaks_times:
if (t - final_times[-1]) < 60:
if averaged_volumes[t] > averaged_volumes[final_times[-1]]:
final_times[-1] = t
else:
final_times.append(t)
final = concatenate([clip.subclip(max(t-10,0),min(t+10, clip.duration))
for t in final_times])
final.to_videofile('highlights.mp4')
| UTF-8 | Python | false | false | 1,053 | py | 12 | generatehighlights.py | 7 | 0.660019 | 0.624881 | 0 | 27 | 38 | 71 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.