repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bachya/py17track | 11,501,922,444,998 | a3e7328aaf33b01ad0dc2c15cc6ffc579c2b49ad | 7fe4bc96f8c19a6d1e1d47513ff200a862ccd26b | /py17track/profile.py | b117dcd37e5fc8deac1c1476de7d2edc9879ecd2 | [
"MIT"
]
| permissive | https://github.com/bachya/py17track | c150978fd0a98ffebc3a198dc34e1293f816d10b | b31246c81f87de052d138313c206446a18b53782 | refs/heads/dev | 2022-07-11T19:30:02.168527 | 2022-07-01T01:43:19 | 2022-07-01T01:43:19 | 133,867,522 | 32 | 12 | MIT | false | 2022-07-01T01:43:20 | 2018-05-17T20:55:14 | 2022-04-02T18:45:06 | 2022-07-01T01:43:20 | 131 | 20 | 4 | 0 | Python | false | false | """Define interaction with a user profile."""
import json
import logging
from typing import Callable, Coroutine, List, Optional, Union
from .errors import InvalidTrackingNumberError, RequestError
from .package import PACKAGE_STATUS_MAP, Package
_LOGGER: logging.Logger = logging.getLogger(__name__)
API_URL_BUYER: str = "https://buyer.17track.net/orderapi/call"
API_URL_USER: str = "https://user.17track.net/userapi/call"
class Profile:
"""Define a 17track.net profile manager."""
def __init__(self, request: Callable[..., Coroutine]) -> None:
"""Initialize."""
self._request: Callable[..., Coroutine] = request
self.account_id: Optional[str] = None
async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp: dict = await self._request(
"post",
API_URL_USER,
json={
"version": "1.0",
"method": "Signin",
"param": {"Email": email, "Password": password, "CaptchaCode": ""},
"sourcetype": 0,
},
)
_LOGGER.debug("Login response: %s", login_resp)
if login_resp.get("Code") != 0:
return False
self.account_id = login_resp["Json"]["gid"]
return True
async def packages(
self,
package_state: Union[int, str] = "",
show_archived: bool = False,
tz: str = "UTC",
) -> list:
"""Get the list of packages associated with the account."""
packages_resp: dict = await self._request(
"post",
API_URL_BUYER,
json={
"version": "1.0",
"method": "GetTrackInfoList",
"param": {
"IsArchived": show_archived,
"Item": "",
"Page": 1,
"PerPage": 40,
"PackageState": package_state,
"Sequence": "0",
},
"sourcetype": 0,
},
)
_LOGGER.debug("Packages response: %s", packages_resp)
packages: List[Package] = []
for package in packages_resp.get("Json", []):
event: dict = {}
last_event_raw: str = package.get("FLastEvent")
if last_event_raw:
event = json.loads(last_event_raw)
kwargs: dict = {
"id": package.get("FTrackInfoId"),
"destination_country": package.get("FSecondCountry", 0),
"friendly_name": package.get("FRemark"),
"info_text": event.get("z"),
"location": " ".join([event.get("c", ""), event.get("d", "")]).strip(),
"timestamp": event.get("a"),
"tz": tz,
"origin_country": package.get("FFirstCountry", 0),
"package_type": package.get("FTrackStateType", 0),
"status": package.get("FPackageState", 0),
}
packages.append(Package(package["FTrackNo"], **kwargs))
return packages
async def summary(self, show_archived: bool = False) -> dict:
"""Get a quick summary of how many packages are in an account."""
summary_resp: dict = await self._request(
"post",
API_URL_BUYER,
json={
"version": "1.0",
"method": "GetIndexData",
"param": {"IsArchived": show_archived},
"sourcetype": 0,
},
)
_LOGGER.debug("Summary response: %s", summary_resp)
results: dict = {}
for kind in summary_resp.get("Json", {}).get("eitem", []):
key = PACKAGE_STATUS_MAP.get(kind["e"], "Unknown")
value = kind["ec"]
results[key] = value if key not in results else results[key] + value
return results
async def add_package(
self, tracking_number: str, friendly_name: Optional[str] = None
):
"""Add a package by tracking number to the tracking list."""
add_resp: dict = await self._request(
"post",
API_URL_BUYER,
json={
"version": "1.0",
"method": "AddTrackNo",
"param": {"TrackNos": [tracking_number]},
},
)
_LOGGER.debug("Add package response: %s", add_resp)
code = add_resp.get("Code")
if code != 0:
raise RequestError(f"Non-zero status code in response: {code}")
if not friendly_name:
return
packages = await self.packages()
try:
new_package = next(
p for p in packages if p.tracking_number == tracking_number
)
except StopIteration:
raise InvalidTrackingNumberError(
f"Recently added package not found by tracking number: {tracking_number}"
)
_LOGGER.debug("Found internal ID of recently added package: %s", new_package.id)
await self.set_friendly_name(new_package.id, friendly_name)
async def set_friendly_name(self, internal_id: str, friendly_name: str):
"""Set a friendly name to an already added tracking number.
internal_id is not the tracking number, it's the ID of an existing package.
"""
remark_resp: dict = await self._request(
"post",
API_URL_BUYER,
json={
"version": "1.0",
"method": "SetTrackRemark",
"param": {"TrackInfoId": internal_id, "Remark": friendly_name},
},
)
_LOGGER.debug("Set friendly name response: %s", remark_resp)
code = remark_resp.get("Code")
if code != 0:
raise RequestError(f"Non-zero status code in response: {code}")
| UTF-8 | Python | false | false | 5,871 | py | 24 | profile.py | 9 | 0.514393 | 0.509283 | 0 | 172 | 33.133721 | 89 |
mahfudza/change-account-ip | 9,998,683,873,121 | e2750e6af9b4f143b1195fabc49c8a865288e488 | 419fa8483f06f171e304a610f942473f08f000bb | /make_dict.py | bbbf217a52dcba9c4f97eba61897877b534ee89a | []
| no_license | https://github.com/mahfudza/change-account-ip | 4413a20b3a0fffe26dc76d5217478f9d6e833b6d | 87faf5a19d54c16e3159882c61c28d9079c51144 | refs/heads/master | 2020-09-03T12:06:04.170066 | 2019-11-04T09:01:02 | 2019-11-04T09:01:02 | 218,936,130 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import os
print("Please input user-domain file:")
rawdata=input()
print("Please input record type eg A, NS, MX")
record_type=input()
f=open(rawdata,"r")
user_domain=f.read().splitlines()
uniq_user=subprocess.check_output("cat "+rawdata+"| awk -F' ' '{print $1}' | sort | uniq", shell=True).decode("utf-8")
uniq_user=str(uniq_user).splitlines()
#to make user=>domain dictionary
mydict={}
for i in uniq_user:
for y in user_domain:
y=y.strip()
user=y.split()[0]
domain=y.split()[1]
if i==user:
if i in mydict:
mydict[i].append(domain)
else:
mydict[i]=[domain]
'''
check ns used
gather all user that has domain pointing to specified ns
'''
for i in mydict:
for domain in mydict[i]:
dig_domain=subprocess.check_output("dig "+domain+" "+record_type+" >>temp", shell=True).decode("utf-8")
niaga_user=set()
check=subprocess.check_output("cat temp | grep -w NS | grep 'niagahoster.com' |awk '{print $1}'| sort | uniq | sed s'/.$//'", shell=True).decode("utf-8").splitlines()
for i in check:
user=subprocess.check_output("grep -w "+i+" user-domain1 | awk '{print $1}' | sort | uniq", shell=True).decode("utf-8")
niaga_user.add(user.strip())
niaga_user.remove("")
for i in niaga_user:
subprocess.check_call("/usr/local/cpanel/bin/set_zone_ttl --user $user --newttl 600 --force", shell=True)
#get ip
#grep -w IP /var/cpanel/users/* | awk -F: '{print $2}' | cut -d= -f2 | sort -n -t'.' -k4 |uniq
ip_list=['127.0.0.1', '153.92.11.2', '153.92.11.5', '153.92.11.6', '153.92.11.7',
'153.92.11.8', '153.92.11.10', '153.92.11.11', '153.92.11.12', '153.92.11.13',
'153.92.11.14', '153.92.11.15', '153.92.11.16', '153.92.11.17', '153.92.11.18',
'153.92.11.19', '153.92.11.20', '153.92.11.21', '153.92.11.22', '153.92.11.23',
'153.92.11.24', '153.92.11.25', '153.92.11.26', '153.92.11.27', '153.92.11.28',
'153.92.11.29', '153.92.11.30', '153.92.11.31', '153.92.11.32', '153.92.11.33',
'153.92.11.34', '153.92.11.35', '153.92.11.36', '153.92.11.37', '153.92.11.38',
'153.92.11.39', '153.92.11.40', '153.92.11.41', '153.92.11.42', '153.92.11.43',
'153.92.11.44', '153.92.11.45', '153.92.11.46', '153.92.11.47', '153.92.11.48',
'153.92.11.49', '153.92.11.50']
'''
ip_list=['127.0.0.1', '153.92.11.2', '153.92.11.5', '153.92.11.6', '153.92.11.7',
'153.92.11.8', '153.92.11.10', '153.92.11.11', '153.92.11.12', '153.92.11.13',
'153.92.11.14', '153.92.11.15', '153.92.11.16', '153.92.11.17', '153.92.11.18',
'153.92.11.19', '153.92.11.20', '153.92.11.21', '153.92.11.22', '153.92.11.23',
'153.92.11.24', '153.92.11.25']
'''
user_ip={}
y=0
for i in niaga_user:
if y>=len(ip_list)-1:
y=0
else:
y+=1
#user_ip[i]=ip_list[y]
change_ip=user=subprocess.check_call("usr/local/cpanel/bin/setsiteip -u "+i+" "+y+"", shell=True)
#print(niaga_user)
#print(user_ip)
#print("jumlah user: "+str(len(niaga_user)))
#print("jumlah ip: "+str(len(ip_list)))
#akhirnya file user-domain dan dns-record disimpan dalam 1 archive file dengan penamaan IP_ALAMATIP_TANGGAL
| UTF-8 | Python | false | false | 3,176 | py | 1 | make_dict.py | 1 | 0.588476 | 0.391373 | 0 | 91 | 33.868132 | 166 |
elvirion/AMC | 1,589,137,908,172 | 613d1c9f676d2fdb7faf4d3ca2084e42a4e5ad54 | 10c6cdfea30308f4d73643f05bcb9291de1eb690 | /config.py | 511140ddd04b510f154da20a221dfbdcea130489 | []
| no_license | https://github.com/elvirion/AMC | 7a3264de9e9544f19e10eae651de611efc8790e8 | db2846e2cfec247b4c1ac15b41342c6f7c61dec7 | HEAD | 2019-06-28T22:21:15.602766 | 2017-01-23T08:51:45 | 2017-01-23T08:51:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import fractions
import logging
import os
import random
from logging import handlers
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SQLALCHEMY_ECHO = False
CSRF_ENABLED = True
DEBUG = False
TESTING = False
LOGGING_LEVEL = 'INFO'
BASEDIR = basedir
BCRYPT_ROUNDS = 12
# one month
TOKEN_EXPIRATION = 3600 * 24 * 30
OBSCURE_ID_MODULUS = 2 ** 20 - -1
# has to be coprime to OBSCURE_ID_MODULUS
OBSCURE_ID_KEY = 542174
# find a coprime by running this function
def find_coprime(self, modulus=None):
modulus = modulus or self.OBSCURE_ID_MODULUS
# every number has a coprime so this loop will always terminate.
while True:
other = random.randrange(modulus)
if fractions.gcd(modulus, other) == 1:
break
return other
@staticmethod
def add_loghandler(logger, loglevel, logfile):
logger.setLevel(getattr(logging, loglevel, 'DEBUG'))
log_handler = handlers.RotatingFileHandler(logfile,
maxBytes=5 * 1024 * 1024,
backupCount=2)
log_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
logger.addHandler(log_handler)
@classmethod
def init_loggers(cls, app=None):
sqla_logger = logging.getLogger('sqlalchemy.engine')
cls.add_loghandler(sqla_logger,
cls.LOGGING_LEVEL,
os.path.join(cls.BASEDIR, 'sqla.log'))
if app:
cls.add_loghandler(app.logger,
cls.LOGGING_LEVEL,
os.path.join(cls.BASEDIR, 'app.log'))
class DevelopmentConfig(Config):
DEBUG = True
SECRET_KEY = 'seekrit'
HASHID_SALT = 'SaAaAalTy'
BCRYPT_ROUNDS = 4
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URI')
class TestingConfig(DevelopmentConfig):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI')
class ProductionConfig(Config):
LOGGING_LEVEL = 'WARNING'
SECRET_KEY = os.environ.get('SECRET_KEY')
OBSCURE_ID_KEY = os.environ.get('OBSCURE_ID_KEY')
HASHID_SALT = os.environ.get('HASHID_SALT')
SQLALCHEMY_DATABASE_URI = os.environ.get('PROD_DATABASE_URI')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| UTF-8 | Python | false | false | 2,600 | py | 50 | config.py | 40 | 0.601923 | 0.589615 | 0 | 86 | 29.232558 | 76 |
asterinwl/2021-K-Digital-Training_selfstudy | 15,109,694,961,310 | dc8901e71ae8f9093ba64cf01bd72439c220b615 | 97c4d3d0509897c8a3591cd340dc2b77c4eeba77 | /5.20/05_For/for_ex1.py | 300185b16c8417c5047d5700fa8fb6185c852315 | []
| no_license | https://github.com/asterinwl/2021-K-Digital-Training_selfstudy | a056f3e7fa92c6b51914171d5171f05fd13fd9ec | cd9c136ddba5df4be6ae2ceb447ee6bb8d5d53bc | refs/heads/master | 2023-08-29T02:18:13.234499 | 2021-10-09T15:21:55 | 2021-10-09T15:21:55 | 373,746,421 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #1에서 100까지의 합 구하기
total=0
for i in range(101) :
total=total+i
print(total) #print의 커서를 앞으로 쭉 당겨야 한다. 당기지 않는 경우 모든 경우 나온다.
| UTF-8 | Python | false | false | 219 | py | 199 | for_ex1.py | 130 | 0.585034 | 0.530612 | 0 | 5 | 27 | 73 |
cmsplt/PLTOffline | 3,461,743,651,129 | 35daee48a5b26aa94895de08eb12b229a6da2098 | a246840aff57f864b89fc1e845802dc248215241 | /scripts/plotOccupancy.py | 57d54811caf278333109ed4976688585b56c3f95 | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | https://github.com/cmsplt/PLTOffline | 197e174f8221399aa5931c69ce3a46c7e79ee19b | 160e4976ab854cc255bde06f0ce014fbc224c23d | refs/heads/master | 2023-08-08T04:40:37.817282 | 2023-08-01T15:03:13 | 2023-08-01T15:03:13 | 13,862,531 | 2 | 25 | null | false | 2023-08-17T07:22:06 | 2013-10-25T14:14:08 | 2022-08-03T14:45:16 | 2023-08-17T07:22:05 | 125,930 | 5 | 16 | 0 | C++ | false | false | #!/usr/bin/env python3
# pip install uproot # [https://github.com/scikit-hep/uproot4]
import os, sys, re, uproot, pandas, matplotlib.pyplot
def testFile(filePath='histo_occupancy.root'):
if not os.path.isfile(filePath):
os.system(f'wget https://adelannoy.com/CMS/PLT/tmp/Fill4984-Slink20160603.153652/histo_occupancy.root -O {filePath}') # ~750 kB
def plotOccupancyROC(rootFile, key=f'Occupancy Ch02 ROC0;1'):
# [https://uproot.readthedocs.io/en/latest/basic.html]
# rootFile.keys()
# (rootFile[key] == rootFile[f'{key[:-1]}2']) and (rootFile[key] == rootFile[f'{key[:-1]}3'])
# rootFile[key].to_numpy()[1].astype(int).tolist() == [*range(53)]
# rootFile[key].to_numpy()[2].astype(int).tolist() == [*range(81)]
df = pandas.DataFrame(rootFile[key].to_numpy()[0]).T
matplotlib.pyplot.pcolormesh(df, cmap='inferno')
matplotlib.pyplot.title(key[:-2])
matplotlib.pyplot.ylabel('row')
matplotlib.pyplot.xlabel('column')
matplotlib.pyplot.colorbar()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(f'{key[:-2].replace(" ","")}.png', dpi=300)
matplotlib.pyplot.close('all')
def main():
# testFile()
def usage(): sys.exit('usage:\n./plotOccupancy.py /path/to/histo_occupancy/root/file')
try: filePath = str(sys.argv[1])
except: usage()
if not os.path.isfile(filePath): usage()
with uproot.open(filePath) as rootFile: # [https://uproot.readthedocs.io/en/latest/uproot.reading.open.html]
errors = pandas.Series(rootFile['Errors;1'].values()).rename(dict(zip([*range(6)],['timeOut','eventNum','nearFull','trailer','tbm','unknown'])))
for key in [ k for k in rootFile.keys() if re.match('Occupancy Ch[\d]{2} ROC[\d];1', k) ]:
print(f'plotting {key[:-2]}...')
plotOccupancyROC(rootFile, key)
return errors
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,882 | py | 1,054 | plotOccupancy.py | 127 | 0.650372 | 0.624336 | 0 | 40 | 46.05 | 152 |
mkiterian/nouns-pubsub | 6,760,278,573,335 | d78d934a7f126daddd15e80f148aae6b22efc822 | 70f292381b1686df39ba8d1d7f8b033485f676ad | /app.py | a27ecce08cd23e059b0255e977f8cc0ba676bb1f | []
| no_license | https://github.com/mkiterian/nouns-pubsub | a4ae7f90024e1d87ae41837b6a9275e592bc654c | 0e800ce58cc1ba28777111dd95d309f6da59a775 | refs/heads/master | 2022-12-13T21:15:23.476749 | 2019-11-24T22:58:24 | 2019-11-24T22:58:24 | 224,189,252 | 0 | 0 | null | false | 2022-12-08T06:58:59 | 2019-11-26T12:38:28 | 2019-11-26T12:38:59 | 2022-12-08T06:58:59 | 5 | 0 | 0 | 2 | Python | false | false | import json
from kafka import KafkaProducer
from get_nouns import get_all_icons, get_icon_groups
if __name__ == '__main__':
items = ['fish', 'dog', 'cat', 'bird']
icons = get_all_icons(items)
if icons:
producer = KafkaProducer(bootstrap_servers='localhost:9092')
grouped_icons = get_icon_groups(icons)
for key in grouped_icons:
for icon in grouped_icons[key]:
key_bytes = bytes(icon['id'], encoding='utf-8')
value_bytes = bytes(json.dumps(icon), encoding='utf-8')
producer.send(key, key=key_bytes, value=value_bytes)
producer.flush()
print('Successfully produced!!')
| UTF-8 | Python | false | false | 715 | py | 2 | app.py | 2 | 0.574825 | 0.566434 | 0 | 19 | 35.736842 | 71 |
perryyo/compose | 8,624,294,338,869 | 01d21ddcd8f7cabf90732d41ecb28434766fd420 | decb5b3e2568c777dc9c8dd919d61f72235f7cde | /compose/__init__.py | cedb7cf040d26467c13a775514678ff742181510 | [
"Apache-2.0"
]
| permissive | https://github.com/perryyo/compose | b9e7b053b292f84e6c1322307c7ce8239796f72e | 0907f5d2b29c197482eb78a7f1fa4bf4f3af2f60 | refs/heads/master | 2020-04-08T01:54:29.949820 | 2019-02-11T07:13:55 | 2019-02-11T07:13:55 | 158,913,474 | 0 | 0 | Apache-2.0 | true | 2018-11-24T07:37:18 | 2018-11-24T07:37:18 | 2017-08-31T13:26:19 | 2017-08-31T07:18:12 | 9,628 | 0 | 0 | 0 | null | false | null | from __future__ import absolute_import
from __future__ import unicode_literals
__version__ = '1.16.0-dev'
| UTF-8 | Python | false | false | 107 | py | 6 | __init__.py | 5 | 0.700935 | 0.663551 | 0 | 4 | 25.75 | 39 |
ykmc/contest | 14,293,651,172,511 | c58313c772eb4e0ec7de4fcb1969b1e748513c6a | b2472967910be9c12576f0f97d33bca0576a8667 | /atcoder-old/2018/0325_abc092/b.py | 9eee718f131578cddf387f7cfbdacc8c95304d5b | []
| no_license | https://github.com/ykmc/contest | 85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4 | 69a73da70f7f987eb3e85da503ea6da0744544bd | refs/heads/master | 2020-09-01T22:56:10.444803 | 2020-07-14T11:36:43 | 2020-07-14T11:36:43 | 217,307,953 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
N = int(input())
D,X = map(int,input().split())
A = [int(input()) for _ in range(N)]
from math import ceil
ans = 0
for i in range(N):
ans += ceil(D/A[i])
print(ans+X) | UTF-8 | Python | false | false | 507 | py | 892 | b.py | 264 | 0.299803 | 0.289941 | 0 | 22 | 22.090909 | 63 |
bwenke1/bootcamp_utils | 11,596,411,743,459 | 3c5ae6014bd244c01d4702dcd7fe2156f5d7cd59 | 40867e1b36808c68e8c06f14bb2d72ded0e6dd6c | /plot_example.py | 0465ff340fe391abfd7612d4158a6a93e533586f | []
| no_license | https://github.com/bwenke1/bootcamp_utils | 2363075db245c5a30ebf6b7ad3abf70d082b3a0a | b98819ba0939f24ba1f21594dc00d3ed27cc0502 | refs/heads/master | 2021-01-21T22:01:30.018810 | 2017-06-22T20:02:39 | 2017-06-22T20:02:39 | 95,146,241 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
#Load data
xa_high = np.loadtxt('data/xa_high_food.csv', comments='#')
xa_low = np.loadtxt('data/xa_low_food.csv', comments='#')
#Define ECDF function
def ecdf(data):
x = np.sort(data)
y = np.arange(1, len(data)+1) / len(data)
return x, y
x, y = ecdf(xa_high)
#Make smooth x-values
x2 = np.linspace(1600, 2500, 400)
#Compute theoretical Normal distribution
fig, ax = plt.subplots(1,1)
cdf_theor = sp.stats.norm.cdf(x2, loc=np.mean(xa_high), scale=np.std(xa_high))
ax.set_xlabel('egg cross sectional area (sq. µm)')
ax.set_ylabel('CDF')
ax.plot(x2, cdf_theor, color='gray')
#Plot real data
#fig, ax = plt.subplots(1,1)
_ = ax.plot(x, y, marker='.', linestyle='none', color='gray', label='high food')
#Make a legend
plt.legend()
plt.show()
| UTF-8 | Python | false | false | 830 | py | 3 | plot_example.py | 3 | 0.6731 | 0.648975 | 0 | 35 | 22.685714 | 80 |
DerrickOdhiambo/Personal-Blog | 4,131,758,555,030 | d76750e04e9c6a3db4bdc0f929e8ae293f5a6cd6 | 3067d6fbabcb4d50fde9608d0241ee28c7cae4b2 | /blog/request.py | 74e4726439beee63108af04bf119a1e15f45a711 | [
"MIT"
]
| permissive | https://github.com/DerrickOdhiambo/Personal-Blog | d39b9c9073b7f136e6a88307cda4bcc7787eed1f | 5470e07125669c6f45301acdc30f3f74dc9ed742 | refs/heads/master | 2022-12-23T05:42:21.815004 | 2020-09-30T14:29:57 | 2020-09-30T14:29:57 | 298,553,521 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.request, json
from .models import Quotes
base_url = None
def configure_request(app):
global base_url
base_url = app.config['QUOTES_API_BASE_URL']
def get_quotes():
count=0
quotes=[]
while count!=4:
with urllib.request.urlopen(base_url) as url:
get_quotes_data = url.read()
get_quotes_response = json.loads(get_quotes_data)
if get_quotes_response:
quote_text=get_quotes_response['quote']
quote_author=get_quotes_response['author']
quote_object = Quotes(quote_text,quote_author)
quotes.append(quote_object)
count=count+1
return quotes
| UTF-8 | Python | false | false | 614 | py | 18 | request.py | 12 | 0.684039 | 0.679153 | 0 | 26 | 22.576923 | 55 |
Symorph/coder-ability | 4,518,305,629,989 | 74c3f3a0afb486f77240b5b7334bed8636673fbf | 227ee8c929c494a42df1a12a68f8346258b4111c | /taskd/taskd/config.py | 43bb60e9fc43704494e379360338b0f3b3041c6a | []
| no_license | https://github.com/Symorph/coder-ability | 13e5b08ed015b23250bb73a042094cabe4bd205c | 26c6c23641dec1315fa3ec6c57ceb0b697bc43d1 | refs/heads/master | 2016-08-03T18:10:16.601920 | 2015-02-27T04:59:04 | 2015-02-27T04:59:04 | 30,775,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Configuration for worker daemon
"""
import logging
import os
import sys
import yaml
from taskd import TASKD_HOME
_config = None # singleton instance
logger = logging.getLogger()
DEFAULT_CONFIG_PATH = os.path.join(TASKD_HOME, "conf", "config.yml")
def _load(filename):
global _config
try:
logger.debug("Loading environment config...")
with open(filename) as stream:
_config = yaml.load(stream)
from pprint import pprint
pprint(_config)
return _config
except IOError as e:
print e
sys.exit(1)
except:
print "Unexpected error:", sys.exc_info()[0]
sys.exit(1)
def get_config(config_path=DEFAULT_CONFIG_PATH):
return _load(config_path)
| UTF-8 | Python | false | false | 759 | py | 75 | config.py | 42 | 0.631094 | 0.627141 | 0 | 34 | 21.294118 | 68 |
Menturan/aoc2018 | 19,636,590,486,602 | bb5f36b5d8ed45fddc9fccbafa018350083eda52 | 8c48e579770d811c2b6303c31226ad4f1a2f8520 | /dec_03.py | eccbae2d2a673018364abb7f1f6980a24d10023c | []
| no_license | https://github.com/Menturan/aoc2018 | ffdf8f1598aea060cc0d5dc1151b543fb90aab54 | cb22319774503359526975e009f875fcbf570b20 | refs/heads/master | 2020-04-09T03:05:29.380903 | 2018-12-05T21:23:04 | 2018-12-05T21:23:04 | 159,968,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from puzzle_util.christmas_tree import tree
from puzzle_util.read_file import read_file_to_list_of_strings
from puzzle_util.test import test
def convert_to_dict_and_calculate(claim: str) -> dict:
# #1 @ 1,3: 4x4
splitted = claim.split(' ') # [#1, @, 1,3:, 4x4]
xy = splitted[2].replace(':', '').split(',') # [1, 3]
size = splitted[3].split('x') # [4, 4]
return {'x': int(xy[0]), 'y': int(xy[1]), 'wide': int(size[0]), 'tall': int(size[1]),
'x_end': int(xy[0]) + int(size[0]), 'y_end': int(xy[1]) + int(size[1])}
def part1(claims: list) -> int:
inches_overlapping = 0
for claim in set(claims):
compare_claims = set(claims.copy())
compare_claims.remove(claim)
claim = convert_to_dict_and_calculate(claim)
print('Claim: %s' % claim)
for compare_claim in compare_claims:
compare_claim = convert_to_dict_and_calculate(compare_claim)
print('Compare claim: %s' % compare_claim)
if (claim['x'] < compare_claim['x_end'] and claim['x_end'] > compare_claim['x'] and
claim['y'] > compare_claim['y_end'] and claim['y_end'] < compare_claim['y']):
x = max(claim['x'], compare_claim['x'])
y = max(claim['y'], compare_claim['y'])
tall = min(claim['y_end'], compare_claim['y_end']) - y
wide = min(claim['x_end'], compare_claim['x_end']) - x
inches_overlapping += wide * tall
return inches_overlapping
def part1_test():
test_input = ['#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2']
"""
........
...2222.
...2222.
.11XX22.
.11XX22.
.111133.
.111133.
........
"""
test(part1(test_input), 4)
def part2(input) -> int:
pass
def part2_test():
test(0, 0)
print(tree)
print('### Part 1 ###')
part1_test()
print('Resultat: ' + str(part1(read_file_to_list_of_strings('dec_03.txt'))))
print('### Part 2 ###')
part2_test()
print('Resultat: ' + str(part2(read_file_to_list_of_strings('dec_03.txt'))))
| UTF-8 | Python | false | false | 2,099 | py | 9 | dec_03.py | 9 | 0.532635 | 0.491663 | 0 | 66 | 30.80303 | 97 |
iSaran/robamine | 18,674,517,818,710 | cc589a14185a794210680b0b831be93274dd156b | 8b0fd40319279992b25d851d99d488d58bc03e4a | /robamine/utils/cv_tools.py | 52bf82203556dd4c9d5422f2a48633c94d375816 | []
| no_license | https://github.com/iSaran/robamine | b61998bded85ac2722becd3334c888a83a4d0c49 | d9649b7451a14ce18156e0addfb5f1b6b4032af8 | refs/heads/master | 2023-05-30T11:05:37.585351 | 2020-02-27T16:38:28 | 2020-02-27T16:38:28 | 178,202,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import numpy as np
import cv2
import open3d
import math
'''
Computer Vision Utils
============
'''
def depth_to_point_cloud(depth, camera_intrinsics):
"""
Converts a depth map to a point cloud(
:param depth: depth image
:param camera_intrinsics: focal length and center point
:return: nx3 numpy array
"""
fx = camera_intrinsics[0]
fy = camera_intrinsics[1]
cx = camera_intrinsics[2]
cy = camera_intrinsics[3]
point_cloud = []
h, w = depth.shape
for x in range(0, w):
for y in range(0, h):
if depth[y][x] != 0:
Z = -depth[y][x] # z is negative because Z axis is inward
X = (x - cx) * Z / fx
Y = (y - cy) * Z / fy
point_cloud.append([X, Y, Z])
return np.asarray(point_cloud)
def depth2pcd(depth, fovy):
height, width = depth.shape
# Camera intrinsics
f = 0.5 * height / math.tan(fovy * math.pi / 360)
cx = width / 2
cy = height / 2
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
valid = (depth > 0)
z = np.where(valid, -depth, 0)
x = np.where(valid, z * (c - cx) / f, 0)
y = np.where(valid, z * (r - cy) / f, 0)
pcd = np.dstack((x, y, z))
return pcd.reshape(-1, 3)
return pcd
def transform_point_cloud(point_cloud, affine_transformation):
"""
Apply an affine transformation to the point cloud
:param point_cloud: input point cloud
:param affine_transformation: 4x4 matrix that describes the affine transformation [R|t]
:return:
"""
# Convert cartesian to homogeneous coordinates
ones = np.ones((point_cloud.shape[0], 1), dtype=np.float32)
point_cloud = np.concatenate((point_cloud, ones), axis=1)
# Transform cloud
for i in range(point_cloud.shape[0]):
point_cloud[i] = np.matmul(affine_transformation, point_cloud[i])
# point_cloud = np.matmul(affine_transformation, point_cloud.T)
# point_cloud = point_cloud.T
# Convert homogeneous to cartesian
w = point_cloud[:, 3]
point_cloud /= w[:, np.newaxis]
return point_cloud[:, 0:3]
def gl2cv(depth, z_near, z_far):
"""
Converts the depth from OpenGl to OpenCv
:param depth: the depth in OpenGl format
:param z_near: near clipping plane
:param z_far: far clipping plane
:return: a depth image
"""
h, w = depth.shape
linear_depth = np.zeros((h, w), dtype=np.float32)
# for x in range(0, w):
# for y in range(0, h):
# if depth[y][x] != 1:
# linear_depth[y][x] = 2 * z_far * z_near / (z_far + z_near - (z_far - z_near) * (2 * depth[y][x] - 1))
#
# linear_depth = np.flip(linear_depth, axis=1)
# return np.flip(linear_depth, axis=0)
valid = np.where(depth!=1.0)
linear_depth[valid] = 2 * z_far * z_near / (z_far + z_near - (z_far - z_near) * (2 * depth[valid] - 1))
linear_depth = np.flip(linear_depth, axis=1)
return np.flip(linear_depth, axis=0)
def rgb2bgr(rgb):
"""
Converts a rgb image to bgr
(Vertical flipping of the image)
:param rgb: the image in bgr format
"""
h, w, c = rgb.shape
bgr = np.zeros((h, w, c), dtype=np.uint8)
r = rgb[:, :, 0]
g = rgb[:, :, 1]
b = rgb[:, :, 2]
bgr[:, :, 0] = b
bgr[:, :, 1] = g
bgr[:, :, 2] = r
return np.flip(bgr, axis=0)
def plot_height_map(heightmap):
width, height = heightmap.shape
cv_height = np.zeros((height, width), dtype=np.float32)
min_height = np.min(heightmap)
max_height = np.max(heightmap)
for i in range(0, width):
for j in range(0, height):
cv_height[i][j] = (heightmap[i][j] - min_height) / (max_height - min_height)
cv2.imshow("height_map", cv_height)
cv2.waitKey()
def generate_height_map(point_cloud, shape=(100, 100), grid_step=0.0025, plot=False, rotations=0):
"""
see kiatos19
:param point_cloud: point cloud aligned with the target object
:param plot: if True, plot the generated height map
:param shape: the shape of the height map
:param grid_step: the side of each cell in the generated height map
:return: the height map
"""
width = shape[0]
height = shape[1]
height_grid = np.zeros((height, width), dtype=np.float32)
for i in range(point_cloud.shape[0]):
x = point_cloud[i][0]
y = point_cloud[i][1]
z = point_cloud[i][2]
idx_x = int(np.floor(x / grid_step)) + int(width / 2)
idx_y = int(np.floor(y / grid_step)) + int(height / 2)
if 0 < idx_x < width - 1 and 0 < idx_y < height - 1:
if height_grid[idx_y][idx_x] < z:
height_grid[idx_y][idx_x] = z
if rotations > 0:
step_angle = 360 / rotations
center = (width / 2, height / 2)
heightmaps = []
for i in range(rotations):
angle = i * step_angle
m = cv2.getRotationMatrix2D(center, angle, scale=1)
heightmaps.append(cv2.warpAffine(height_grid, m, (height, width)))
if plot:
plot_height_map(heightmaps[i])
return heightmaps
else:
if plot:
plot_height_map(height_grid)
return height_grid
def extract_features(height_map, dim, max_height, normalize=True, rotation_angle=0, plot=False):
"""
Extract features from height map(see kiatos19)
:param height_map: height map aligned with the target
:param bbox: target's dimensions
:return: N-d feature vector
"""
h, w = height_map.shape
bbox = np.asarray([dim[0], dim[1]])
if plot:
cv_height = np.zeros((h, w), dtype=np.float32)
min_height = np.min(height_map)
max_height = np.max(height_map)
for i in range(0, w):
for j in range(0, h):
cv_height[i][j] = ((height_map[i][j] - min_height) / (max_height - min_height))
rgb = cv2.cvtColor(cv_height, cv2.COLOR_GRAY2RGB)
cells = []
cx = int(w/2)
cy = int(h/2)
# Target features
m_per_pixel = 480 #ToDo:
side = m_per_pixel * bbox
cx1 = cx - int(side[0])
cx2 = cx + int(side[0])
cy1 = cy - int(side[1])
cy2 = cy + int(side[1])
# cells.append([(cx1, cy1), (cx, cy)])
# cells.append([(cx, cy1), (cx2, cy)])
# cells.append([(cx1, cy), (cx, cy2)])
# cells.append([(cx, cy), (cx2, cy2)])
m = cv2.getRotationMatrix2D((cx, cy), rotation_angle, scale=1)
(cx, cy) = np.matmul(m, np.array([cx, cy, 1])).astype(int)
c1 = np.matmul(m, np.array([cx1, cy1, 1])).astype(int)
c2 = np.matmul(m, np.array([cx2, cy1, 1])).astype(int)
c3 = np.matmul(m, np.array([cx2, cy2, 1])).astype(int)
c4 = np.matmul(m, np.array([cx1, cy2, 1])).astype(int)
cx1 = min(c1[0], c2[0], c3[0], c4[0])
cy1 = min(c1[1], c2[1], c3[1], c4[1])
cx2 = max(c1[0], c2[0], c3[0], c4[0])
cy2 = max(c1[1], c2[1], c3[1], c4[1])
#cells.append([(cx1, cy1), (cx, cy)])
#cells.append([(cx, cy1), (cx2, cy)])
#cells.append([(cx1, cy), (cx, cy2)])
#cells.append([(cx, cy), (cx2, cy2)])
# Features around target
# 1. Define the up left corners for each 32x32 region around the target
up_left_corners = []
# up_left_corners.append((int(cx - 16), int(cy - side[1] - 32))) # f_up
# up_left_corners.append((int(cx + side[0]), int(cy - 16))) # f_right
# up_left_corners.append((int(cx - 16), int(cy + side[1]))) # f_down
# up_left_corners.append((int(cx - side[0] - 32), int(cy - 16))) # f_left
# up_left_corners.append((int(cx - 32), int(cy - side[1] - 32))) # f_up
# up_left_corners.append((int(cx + side[0]), int(cy - 32))) # f_right
# up_left_corners.append((int(cx - 32), int(cy + side[1]))) # f_down
# up_left_corners.append((int(cx - side[0] - 32), int(cy - 32))) # f_left
#
# x_limit = [16, 8, 16, 8]
# y_limit = [8, 16, 8, 16]
#
# for i in range(len(up_left_corners)):
# corner = up_left_corners[i]
# for x in range(x_limit[i]):
# for y in range(y_limit[i]):
# c = (corner[0] + x * 4, corner[1] + y * 4)
# cells.append([c, (c[0]+4, c[1]+4)])
corner = (int(cx - 32), int(cy - 32))
for x in range(16):
for y in range(16):
c = (corner[0] + x * 4, corner[1] + y * 4)
cells.append([c, (c[0]+4, c[1]+4)])
features = []
for i in range(len(cells)):
cell = cells[i]
x1 = cell[0][0]
x2 = cell[1][0]
y1 = cell[0][1]
y2 = cell[1][1]
if i < 4:
avg_height = np.sum(height_map[y1:y2, x1:x2]) / (side[0] * side[1])
else:
avg_height = np.sum(height_map[y1:y2, x1:x2]) / 16
features.append(avg_height)
if plot:
rgb = draw_cell(cell, rgb)
if plot:
cv2.imshow('rgb', rgb)
cv2.waitKey()
if normalize:
for i in range(len(features)):
features[i] /= max_height
return features
def draw_cell(cell, rgb):
p1 = cell[0]
p2 = (cell[1][0], cell[0][1])
p3 = cell[1]
p4 = (cell[0][0], cell[1][1])
cv2.line(rgb, p1, p2, (0, 255, 0), thickness=1)
cv2.line(rgb, p2, p3, (0, 255, 0), thickness=1)
cv2.line(rgb, p3, p4, (0, 255, 0), thickness=1)
cv2.line(rgb, p4, p1, (0, 255, 0), thickness=1)
return rgb
def plot_point_cloud(point_cloud):
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(point_cloud)
frame = open3d.create_mesh_coordinate_frame(size=0.1)
open3d.draw_geometries([pcd, frame])
| UTF-8 | Python | false | false | 9,645 | py | 124 | cv_tools.py | 64 | 0.554069 | 0.516641 | 0 | 315 | 29.619048 | 119 |
RagnarGothur/bamazon | 8,899,172,282,854 | 2b23269358db41eb52b7ea5072f3e42ae64410c8 | 9ecb5182ed9044e0714107fe1c1a7e43a4fb5520 | /core/urls.py | 7a34eddedea365b861015856074450c5c16547c4 | []
| no_license | https://github.com/RagnarGothur/bamazon | daaa49346e3ac7fa882c185f22947faae12cb250 | 9bfc7762b8a68426cb024e2777fbc0a0f66288a8 | refs/heads/master | 2022-10-09T21:09:03.470877 | 2019-06-27T18:35:13 | 2019-06-27T18:35:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.Main.as_view(), name='main'),
path('books/', views.Books.as_view(), name='books'),
path('books/<int:book_id>', views.ShowBook.as_view(), name='show_book'),
path('authors', views.Authors.as_view(), name='authors'),
path('authors/<int:author_id>', views.ShowAuthor.as_view(), name='show_author'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| UTF-8 | Python | false | false | 550 | py | 11 | urls.py | 4 | 0.689091 | 0.689091 | 0 | 15 | 35.666667 | 84 |
todie42/linux_env_setup | 4,380,866,669,287 | 416747faa789f6844f069775334cdc28a1a2fa57 | 97a01ed80d890472e24eb0ecce32d500dcad3231 | /LinuxSA/support/generate_base64_encode.py | 31f8cde897cee461be5609364340f030589c965e | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/todie42/linux_env_setup | e92112ae628f0031d4ed04e71204f6c9182ab113 | 536c01aa6e191438c41f2fa8c15c4adf6f9eaec0 | refs/heads/master | 2021-12-11T03:03:11.524629 | 2021-12-08T21:07:23 | 2021-12-08T21:07:23 | 151,981,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Copyright 2018 Battelle Energy Alliance, LLC
import subprocess
import getpass
import argparse
import sys
import re
import textwrap
from argparse import RawTextHelpFormatter
import os
import base64
### Arguments ######################################################################################################################
parser = argparse.ArgumentParser(
description='Encode or decode message in base64',
epilog=textwrap.dedent('''
Examples:
%(prog)s -e encodethis
%(prog)s -d =base64string
%(prog)s
'''),
formatter_class=RawTextHelpFormatter
)
group = parser.add_mutually_exclusive_group()
group.add_argument('-e', '--encode', action='store_true', help="Encode message")
group.add_argument('-d', '--decode', action='store_true', help="Decode message")
parser.add_argument('-m', '--message', help="message to be encoded or decoded")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.encode and not args.message:
to_code = getpass.getpass('Please enter string to encode:').rstrip()
coded = base64.b64encode(to_code)
print(coded),
elif args.encode:
coded = base64.b64encode(args.message)
print(coded),
elif args.decode and not args.message:
parser.print_help()
sys.exit(1)
elif args.decode:
decoded = base64.b64decode(args.message)
print(decoded),
| UTF-8 | Python | false | false | 1,486 | py | 125 | generate_base64_encode.py | 24 | 0.608345 | 0.591521 | 0 | 50 | 27.6 | 132 |
balarsen/pymc_learning | 2,877,628,124,209 | 2bdd6ac007d49c851ff997912528dce56b0c54d4 | 1767600b696c05964b650bf4947b39f888524e08 | /Learning/LatentDirichletAllocation.py | 8201e39330738c4dd31e94c87b451fcdcaf7bb19 | []
| no_license | https://github.com/balarsen/pymc_learning | 4864a23836f834834e3a104c7a183f737eff5460 | 24add9e0bf7e938d01a403d020770c6bdf01822c | refs/heads/master | 2022-05-13T04:50:00.765940 | 2022-05-02T20:51:55 | 2022-05-02T20:51:55 | 54,926,619 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # http://stats.stackexchange.com/questions/104771/latent-dirichlet-allocation-in-pymc
#
import spacepy.plot as spp # for the styles
import numpy as np
import pymc as pm
K = 2 # number of topics
V = 4 # number of words
D = 3 # number of documents
data = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]])
alpha = np.ones(K)
beta = np.ones(V)
theta = pm.Container([pm.CompletedDirichlet("theta_%s" % i, pm.Dirichlet("ptheta_%s" % i, theta=alpha)) for i in range(D)])
phi = pm.Container([pm.CompletedDirichlet("phi_%s" % k, pm.Dirichlet("pphi_%s" % k, theta=beta)) for k in range(K)])
Wd = [len(doc) for doc in data]
z = pm.Container([pm.Categorical('z_%i' % d,
p = theta[d],
size=Wd[d],
value=np.random.randint(K, size=Wd[d]))
for d in range(D)])
# cannot use p=phi[z[d][i]] here since phi is an ordinary list while z[d][i] is stochastic
w = pm.Container([pm.Categorical("w_%i_%i" % (d,i),
p = pm.Lambda('phi_z_%i_%i' % (d,i),
lambda z=z[d][i], phi=phi: phi[z]),
value=data[d][i],
observed=True)
for d in range(D) for i in range(Wd[d])])
model = pm.Model([theta, phi, z, w])
mcmc = pm.MCMC(model)
mcmc.sample(10000)
# pm.Matplot.plot(mcmc)
| UTF-8 | Python | false | false | 1,337 | py | 145 | LatentDirichletAllocation.py | 10 | 0.554974 | 0.535527 | 0 | 38 | 34.184211 | 123 |
ndb1995/comp805 | 13,580,686,604,136 | 580073e7136e5ed76fbf01d365745be277e7ff23 | 099aab7f5cb42975b7fbcf111c9f21dd9198052f | /labs/week4/test_lab4.py | 4dba8c593dd77eeb8bed77819afb9ad8816b44f7 | []
| no_license | https://github.com/ndb1995/comp805 | 8e41f1493a09528934ccfe6b004dbd1bae248106 | 90541b41f80722cd07aa4e16141b5214c8457b7a | refs/heads/master | 2021-05-09T20:15:36.448229 | 2018-03-20T21:30:48 | 2018-03-20T21:30:48 | 118,682,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
print("\nFirst Function: Switch Case\n")
print (lab3.switch_case(['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']))
print("\nSecond Function: Only Even\n")
print (lab3.only_even(['orange', 5, 'pear', 2, 'kiwi', 7, '10']))
print("\nThird Function: Greatest Difference\n")
print (lab3.greatest_difference([1,20,15,5,10,17,34,45,13,65]))
print("\nFourth Function: Make Title\n")
print (lab3.make_title([1,20,15,5,10,17,34,45,13,65]))
print("\nFifth Function: Test Title\n")
print (lab3.test_title(['Orange', 'Apple', 'pear', 'Strawberry', 'kiwi', 'apple', 'banana']))
print("\nSixth Function: Create Word\n")
print (lab3.create_word(['Orange', 'Apple', 'pear', 'Strawberry', 'kiwi', 'apple', 'banana']))
print("\nSeventh Function: Three Times Number\n")
print (lab3.three_times_nums([1,20,15,5,10,17,34,45,13,65]))
print("\nEigth Function: Keep Lowercase\n")
print (lab3.keep_lowercase(['ORANGE', 'APPLE', 'pear', 'STRAWBERRY', 'kiwi', 'apple', 'banana']))
print("\nNinth Function: Multiplication Total Of\n")
print (lab3.multiplication_total_of([1,20,15,5,10,17,34,45,13,65]))
print("\nTenth Function: Square Nums\n")
print (lab3.square_nums([1,20,15,5,10,17,34,45,13,65]))
print("\nEleventh Function: Less Than 5\n")
print (lab3.lessthan_5([1,20,15,5,10,17,34,45,13,65]))
"""
import unittest
class Lab4Test(unittest.TestCase):
def test_only_even(self):
"""
tests only even function from lab4
"""
func = lab4.only_even
case1 = [1,2,3,4,5]
expected1 = [2,4]
self.assertEqual(func(case1), expected1)
if __name__ == '__main__':
try:
import lab4
print ("lab4.py module found, testing")
unittest.main()
except ImportError:
print ("Missing lab4.py module")
| UTF-8 | Python | false | false | 1,781 | py | 22 | test_lab4.py | 14 | 0.639528 | 0.559236 | 0 | 53 | 32.603774 | 97 |
sauleddy/HomePortal | 6,777,458,398,609 | 50e2bb1e6fc5b93297aea9118817b87016c12f10 | 53bfd08966578e9321fd22e0eaf957fed4566f66 | /resource_help/ResHelp/ResHelpBase.py | 8a19eb3ee96786f0cc27966faf68a4195bbffa6d | [
"MIT"
]
| permissive | https://github.com/sauleddy/HomePortal | f1cd638de1f0c5074717e97b05c8cb496370f627 | 5fff01024da7d0f1ed2990edea0de2ac1d3f07ab | refs/heads/master | 2021-03-16T05:27:57.799266 | 2017-06-02T00:32:27 | 2017-06-02T00:32:27 | 91,551,883 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' this is a base class for ResHelp '
import logging
from abc import ABCMeta, abstractmethod
__author__ = 'Ed Hsu'
class ResHelpBase(metaclass=ABCMeta):
@abstractmethod
def __init__(self, image_help, storage_help):
self.myImageHelp = image_help
self.myStorageHelp = storage_help
@abstractmethod
def resize_images(self, length, quality, src_dir, target_dir):
pass
@abstractmethod
def upload_images(self, src_dir, target_dir):
pass
if __name__ == '__main__':
pass
| UTF-8 | Python | false | false | 577 | py | 86 | ResHelpBase.py | 82 | 0.641248 | 0.637782 | 0 | 28 | 19.607143 | 66 |
EmmaK0822/Python | 18,957,985,663,637 | 64ad7b7a41ac9b62a022407d27d94e5c37be6d8d | d6e5159aed7d665ef98c9e6b7caaf2d12942b8b2 | /PyPoll/main.py | c2352a36eb0253a05b4f876f2e359ec936af7b21 | [
"MIT"
]
| permissive | https://github.com/EmmaK0822/Python | bb33e01728fb280f979116c9a4c133f40858cf04 | 10865a96319b0dd685b00eb8c810cde68dfbdaef | refs/heads/master | 2020-03-15T19:21:12.527808 | 2018-07-23T01:46:50 | 2018-07-23T01:46:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
filepath1 = '.\\raw_data\\election_data_1.csv'
filepath2 = '.\\raw_data\\election_data_2.csv'
ID = []
Candidate = []
with open(filepath1, 'r', encoding = 'utf-8', newline = '') as data1:
#print(data1)
csvreader1 = csv.DictReader(data1)
for row in csvreader1:
ID.append(row['Voter ID'])
Candidate.append(row['Candidate'])
#ID = [row['Voter ID'] for row in csvreader1]
#print(ID[0:5])
# Why it doesn't work? -> ID = [row['Voter ID'] for row in csvreader1]
# Why it doesn't work? -> Candidate = [row['Candidate'] for row in csvreader1]
with open(filepath2, 'r', encoding = 'utf-8', newline = '') as data2:
#print(data2)
csvreader2 = csv.DictReader(data2)
for row in csvreader2:
ID.append(row['Voter ID'])
Candidate.append(row['Candidate'])
#print(ID[0:5])
#print(ID[-5:])
set_Candidate = set(Candidate)
list_Candidate = list(set(Candidate))
# print(list_Candidate)
dictionary_Candidate = {}
list_count = []
for i in range(len(list_Candidate)):
dictionary_Candidate.update({list_Candidate[i]:Candidate.count(list_Candidate[i])})
list_count.append(Candidate.count(list_Candidate[i]))
#print(dictionary_Candidate)
#print(list_count)
# Make sure no duplicate element in Voter ID
if len(ID) == len(set(ID)):
print("Election Results")
print("----------------")
print("Total Votes: " + str(len(Candidate)))
print("----------------")
candidate_list = list(set(Candidate))
for i in candidate_list:
percent = round(Candidate.count(i)/len(Candidate)*100, 1)
print(i + " : " + str(percent) + "%" + " (" + str(Candidate.count(i)) + ")")
print("----------------")
for Candidate in list_Candidate:
if dictionary_Candidate[Candidate] == max(list_count):
print("winner: " + str(Candidate))
print("----------------")
else:
print("Verify duplicates in Voter ID")
print("ID list value: " + str(len(ID)))
print("ID set value: " + str(len(set(ID))))
'''
# Outcome 2
Election Results
----------------
Total Votes: 4324001
----------------
Li : 11.4% (492940)
Khan : 51.3% (2218231)
Cordin : 0.6% (24090)
Torres : 8.2% (353320)
O'Tooley : 2.4% (105630)
Correy : 16.3% (704200)
Vestal : 8.9% (385440)
Seth : 0.9% (40150)
----------------
winner: Khan
----------------
# Why it doesn't work?
Election Results
----------------
Total Votes: 3521001
----------------
Correy : 20.0% (704200)
Li : 14.0% (492940)
O'Tooley : 3.0% (105630)
Khan : 63.0% (2218231)
----------------
Winner: Khan
----------------
'''
| UTF-8 | Python | false | false | 2,564 | py | 8 | main.py | 6 | 0.582293 | 0.524961 | 0 | 93 | 26.569892 | 87 |
neardws/fog-computing-based-collision-warning-system | 4,861,903,013,061 | 4ca79c61d96b21e9140f9dfa9efd194f5fe899d0 | 11376deaca40b881de1ad04bf92160a22e57d6b2 | /train_hmm/training_by_hmmlearn_with_mp.py | 3071e8948ac9c2632b49c28b91d3159e56e9c113 | []
| no_license | https://github.com/neardws/fog-computing-based-collision-warning-system | dd591186ddc5e713b60f500c98b6931f2e5104b3 | d878e664763382f173b3b38bcdbfde82ba9bfa8f | refs/heads/master | 2022-11-05T17:21:44.838776 | 2022-10-21T00:29:55 | 2022-10-21T00:29:55 | 185,387,044 | 8 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''''
多进程
'''''
from hmmlearn import hmm
import numpy as np
import multiprocessing as mp
import linecache
import pickle
TRAIN_DATA = r'E:\NearXu\hmm_train_data\train_'
MODEL_PATH = r'E:\NearXu\model\model_'
"""
hmmlearn 有三种隐马尔可夫模型:
GaussianHMM:观测状态是连续状态,且符合高斯分布
GMMHMM:观测状态是连续状态,且符合混合高斯分布
MultinomialHMM:观测状态是离散的
"""
train_file = TRAIN_DATA + '0' + '.txt'
chunk_lines = 1000
be_big = 10000
def read_distributed(*lines):
print('mp started')
the_x = np.array([])
the_x_len = np.array([])
for line in lines:
status = line.split()
len_traj = len(status)
if len_traj >= 10:
x_status = np.array([])
x_status = np.hstack((x_status, status))
the_x = np.append(the_x,x_status)
the_x_len = np.append(the_x_len, len_traj)
return the_x, the_x_len
def main():
x = np.array([])
x_len = np.array([])
line_cache = linecache.getlines(train_file)
count = len(line_cache)
number = int(count / chunk_lines)
print(count)
print(number)
pool = mp.Pool(processes=10)
jobs = []
for i in range(10):
jobs.append(pool.apply_async(read_distributed, line_cache[i * chunk_lines : i * chunk_lines + chunk_lines]))
# jobs.append(pool.apply_async(read_distributed, line_cache[number * chunk_lines : count]))
for job in jobs:
x = np.append(x, job.get()[0])
x_len = np.append(x_len, job.get()[1])
print(x)
print(len(x))
print(x_len)
pool.close()
x = x[:, np.newaxis]
x = x.astype(np.float64)
x = x * be_big
print(x)
print(len(x))
print(x_len)
number_of_status = 100
print('¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥')
print('Start Training')
model = hmm.GaussianHMM(n_components=number_of_status, n_iter=10, tol=0.001, covariance_type='diag', verbose=True)
model.fit(x, x_len)
# print(model.score(x,x_len))
print('**************************************')
print(model.transmat_)
model_name = MODEL_PATH +'.pkl'
with open(model_name, 'wb') as model_file:
pickle.dump(model, model_file)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 2,405 | py | 44 | training_by_hmmlearn_with_mp.py | 37 | 0.559101 | 0.546067 | 0 | 81 | 25.493827 | 118 |
xxxxHolic/sklearn_comment | 1,649,267,446,591 | 940fd19d9df8e1ca271e601ab28e5df47aaedd09 | da82804a4e408a23b899de416551a49cf865197b | /Linear_Model_base/sklearn_utils_validation__shape_repr.py | 9c452ed026a6ded6861f147145d1557cca4abdb9 | []
| no_license | https://github.com/xxxxHolic/sklearn_comment | 7de5ba8c5f143ba13ef97e2c58f126bc0da606d5 | 405daa7242c83ea8ba996a77368f480167aa7299 | refs/heads/master | 2020-04-04T14:01:18.298280 | 2018-11-03T13:14:29 | 2018-11-03T13:14:29 | 155,983,881 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 21:12:03 2018
@author: USER
"""
"""
scikit-learn/sklearn/utils/validation.py
Utilities for input validation
"""
# validation.py 用于输入数据的验证以及不同系统和py2,py3版本的兼容性
# 函数 _shape_repr
# 用于 report data shape 时,打印信息时不会因为平台和版本的问题打印
# 出错误的信息,‘long’type 会有后缀 L,自己定义打印信息
# 依赖的函数和库:
def _shape_repr(shape):
"""
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
#------------------------------------------------------
# x for x in shape 语法:
# for x in shape:
# return x
# example [x for x in [1,2,3]] = [1,2,3]
# 因此 '%d' % e for e in shape 等于把 shape 中的数字以 str 形式
# 打印出来
# example ['%d' %x for x in [1,2]] = ['1','2']
#------------------------------------------------------
# join 语法
# str.join(sequence)
# sequence:要连接的元素序列
# str 连接字符
# ','.join('%d' % e for e in shape) 即是将 shape 中的元素用‘,’连接起来
# if shape = [1,2] -> '(1,2)'
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
# 如果 len(shape) = 1, 比如 shape = (1,)
# joined = ", ".join("%d" % e for e in shape)
# 的结果是 '1'
# '(%s)' % joined = '(%s)' % '1,' = '(1,)'
return "(%s)" % joined
| UTF-8 | Python | false | false | 2,553 | py | 23 | sklearn_utils_validation__shape_repr.py | 22 | 0.512899 | 0.487101 | 0 | 76 | 28.092105 | 79 |
MARCELNAKA/Python-Is-Easy | 13,649,406,094,564 | c64c4680e147b6aca8a96734bb8709ae12be8b0c | 25d094e5cbf45069f3f1198fb353a7de17ccf34f | /class_3_if.py | 4cf87457b0fd0c3420801e3fb4045d529169f862 | []
| no_license | https://github.com/MARCELNAKA/Python-Is-Easy | 8ce3aac2f58a4bab2307b14de92c9097b2fc0fda | 7866c068c9a5ca3b56a9bdf0a2d1ced76fe46c69 | refs/heads/master | 2020-04-22T07:29:56.736353 | 2019-03-28T01:32:01 | 2019-03-28T01:32:01 | 170,218,339 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #homework
a = "5"
b = 6
c = 9
x = int(a)
y = int(b)
z = int(c)
result= False
def check(x,y,z):
if x==y and x==z:
result = True
elif x==y and y==z:
result = True
else:
result = False
print (result)
| UTF-8 | Python | false | false | 267 | py | 6 | class_3_if.py | 6 | 0.438202 | 0.426966 | 0 | 21 | 10.714286 | 23 |
SNHajar765/PSO_FYP | 1,537,598,341,020 | 2af15c633feee78e9396b0351742116d34deada8 | 4ead4802b26107c4780bf375c813cf48ed1421dc | /run/tabs/tab_1_PSO.py | a7c38ede512b95ec1ad343961e236760fd749fa5 | []
| no_license | https://github.com/SNHajar765/PSO_FYP | 344e60063e52d48c55e8549b729aec1c55ab7fb5 | db0d0839961508968f5f41ff25314acc70776ee4 | refs/heads/master | 2020-08-26T16:45:02.870210 | 2019-10-26T14:22:24 | 2019-10-26T14:22:24 | 217,077,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import pandas as pd
import random
import numpy as np
from oct2py import Oct2Py
from oct2py import octave
octave.addpath("C:/Users/QA/Desktop")
W = 0.5 #inertia
c1 = 0.8 #cognitive weight
c2 = 0.9 #social weight
n_iterations = 10
target_error = 1e-4
n_particles = 30
min_range = 0
max_range = 0
pso_layout = html.Div([
html.H1('PSO Implementation'),
html.H3('Enter Minimum Value'),
dbc.Input(id='min_val', type = 'number', style={'width': '20%'}),
html.P(id='min-place'),
# html.H3('Enter Minimum Value'),
# dbc.Input(id='min_val', type = 'number', style={'width': '20%'}),
# html.P(id='min-place')
html.H3('Enter Maximum Value'),
dbc.Input(id='max_val', type = 'number', style={'width': '20%'}),
#use this as alternative for output min-val
html.P(id='max-place'),
html.Button('Submit',id='button_a', n_clicks= 1),
html.Div(id='output-disp')
])
########################## PSO PROGRAM ########################################################################################################
def calc(click,mini,maxi):
global min_range, max_range
if click > 1:
min_range = float(mini)
max_range = float(maxi)
class Particle():
def __init__(self):
#randomizing particle position within permissible range of search spave
self.position = np.array([random.uniform(min_range,max_range), random.uniform(min_range,max_range), random.uniform(min_range,max_range)])
#assigning current position to personal best position
self.pbest_position = self.position
#'inf' is used to compare solution in algorithms for best solution
self.pbest_value = float('inf')
#initializing velocity
self.velocity = np.array([0,0,0])
def __return_str__(self):
return self.position
#clamp range of particle movement
def clamp(self):
rules = [ self.position > min_range, self.position < max_range]
return rules
#how each position move to next destination
def move(self):
#moving current position
if (Particle.clamp(self)==bool('true')):
self.position = self.position + self.velocity
class Space():
def __init__(self,target, target_error,n_particles):
self.target = target
self.target_error = target_error
self.n_particles = n_particles
#store data of particles
self.particles = []
self.gbest_value = float('inf')
self.gbest_position = np.array([random.random()*max_range, random.random()*max_range])
def print_particles(self):
list_x = []
list_y = []
list_z = []
for particle in self.particles:
plotData = particle.__return_str__()
X = plotData[0]
Y = plotData[1]
Z = plotData[2]
list_x.append(X)
list_y.append(Y)
list_z.append(Z)
df = pd.DataFrame()
df['X'] = list_x
df['Y'] = list_y
df['Z'] = list_z
export = df.to_json(r'C:\Users\QA\Desktop\PSO_Py\run\tabs\Export_DataFrame.json')
def fitness(self, particle):
#pass particle to octave script
return octave.javelin([particle.position[0],particle.position[1],particle.position[2]])
def set_pbest(self):
for particle in self.particles:
#assign fitness to fitness_cadidate
fitness_cadidate = self.fitness(particle)
#if personal value is more than fitness candidate,
if (particle.pbest_value > fitness_cadidate):
#update personal best value
particle.pbest_value = fitness_cadidate
#update personal best position
particle.pbest_position = particle.position
def set_gbest(self):
for particle in self.particles:
#assigning best of the best to best fitness
best_fitness_cadidate = self.fitness(particle)
if (self.gbest_value > best_fitness_cadidate):
self.gbest_value = best_fitness_cadidate
self.gbest_position = particle.position
def move_particles(self):
for particle in self.particles:
global W
new_velocity = (W*particle.velocity) + (c1*random.random()) * (particle.pbest_position - particle.position) + \
(random.random()*c2) * (self.gbest_position - particle.position)
particle.velocity = new_velocity
particle.move()
#particle.clamp()
search_space = Space(1,target_error,n_particles)
#search_space = Space(1,target_error,n_particles)
particles_vector = [Particle() for _ in range(search_space.n_particles)]
search_space.particles = particles_vector
search_space.print_particles()
iteration = 0
while (iteration < n_iterations):
search_space.set_pbest()
search_space.set_gbest()
if(abs(search_space.gbest_value - search_space.target) <= search_space.target_error):
break
search_space.move_particles()
iteration = iteration + 1
def return_solution():
best_value = search_space.gbest_position
best_solution = octave.javelin(best_value)
return (best_value, abs(best_solution))
return u'''Best Solution : {} '''.format(return_solution())
| UTF-8 | Python | false | false | 6,396 | py | 4 | tab_1_PSO.py | 3 | 0.520169 | 0.512977 | 0 | 158 | 38.455696 | 153 |
leesohyeon/Python_ac | 19,439,022,008,777 | e0a97fdff1573b119b4035da1e5e812c19b0c48a | 53888075a5fae164872e82f841751f291cba6c4a | /1112/exceptex/ExceptTest02.py | c8bbf9ae56651f7e0c8493a40f6019fd4c5a37fa | []
| no_license | https://github.com/leesohyeon/Python_ac | e27e9bf394dbf41c0815120d9e6601bc7e45c6b6 | 1f71f6a21c958b7d82d0fb8019c385100b4370c8 | refs/heads/master | 2020-05-02T04:59:46.384439 | 2019-03-26T10:09:57 | 2019-03-26T10:09:57 | 177,758,112 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
문제 )
반복문을 통해 numbers의 리스트요소를 3으로 나눠보자
이때 0이라는 요소를 3으로 나누려 하면, 에러가 발생할 것이다.
try-except를 통해 예외처리를 해보자!
'''
numbers=[0.2,2.5,0,10]
for i in numbers:
try:
print(3/i)
except ZeroDivisionError:
print("연산할 수 없음") | UTF-8 | Python | false | false | 383 | py | 118 | ExceptTest02.py | 111 | 0.563707 | 0.521236 | 0 | 14 | 17.571429 | 44 |
adrian13579/cool-compiler-2021 | 17,102,559,774,961 | b19ac7d2fc21be73bcf7689c1ebbb791a4343592 | 45d4349b4af21869693849b25a35b7d385ac6114 | /src/compiler/visitors/semantics/tools/type.py | 5d82509ddb5a3077bf15d88e0c262c37a5ab3f77 | [
"MIT"
]
| permissive | https://github.com/adrian13579/cool-compiler-2021 | 07bb7190f41f51e5c2441872a0d0fc345be0e786 | 2ccf55b3eb8c0a60abc70938257634c70a1d87d4 | refs/heads/master | 2023-02-28T16:13:14.512175 | 2022-03-14T02:41:43 | 2022-03-14T02:41:43 | 340,008,534 | 0 | 1 | MIT | true | 2021-03-01T02:00:10 | 2021-02-18T10:07:14 | 2021-02-28T08:52:13 | 2021-03-01T02:00:10 | 2,145 | 0 | 1 | 0 | Cool | false | false | from visitors.semantics.tools.errors import *
from typing import List, Set, Tuple
from collections import OrderedDict
class Attribute:
def __init__(self, name, typex):
self.name = name
self.type = typex
def __str__(self):
return f"[attrib] {self.name} : {self.type.name};"
def __repr__(self):
return str(self)
class Method:
def __init__(self, name, param_names, params_types, return_type):
self.name = name
self.param_names = param_names
self.param_types = params_types
self.return_type = return_type
def __str__(self):
params = ", ".join(
f"{n}:{t.name}" for n, t in zip(self.param_names, self.param_types)
)
return f"[method] {self.name}({params}): {self.return_type.name};"
def __eq__(self, other):
return (
other.name == self.name
and other.return_type == self.return_type
and other.param_types == self.param_types
)
class Type:
def __init__(self, name: str):
self.name = name
self.attributes = []
self.methods = []
self.parent = None
self.index = -1
def set_parent(self, parent):
if self.parent is not None:
raise SemanticError(
f"Type '{self.name}' already has parent type '{self.parent.name}'. Type '{parent.name}' cannot be set as parent."
)
if parent.name in {"String", "Int", "Bool"}:
raise SemanticError(
f"Cannot set '{self.name}' parent, '{parent.name}' type cannot be inherited."
)
self.parent = parent
def define_attribute(self, name: str, typex):
try:
self.get_attribute(name)
except SemanticError:
attribute = Attribute(name, typex)
self.attributes.append(attribute)
return attribute
else:
raise SemanticError(
f'Attribute "{name}" is already defined in "{self.name}".'
)
def get_attribute(self, name: str, first=None):
if not first:
first = self.name
elif first == self.name:
raise AttributeError(f'Attribute "{name}" is not defined in {self.name}.')
try:
return next(attr for attr in self.attributes if attr.name == name)
except StopIteration:
if self.parent is None:
raise AttributeError(
f'Attribute "{name}" is not defined in {self.name}.'
)
try:
return self.parent.get_attribute(name, first=first)
except SemanticError:
raise AttributeError(
f'Attribute "{name}" is not defined in {self.name}.'
)
def get_method(self, name: str, local: bool = False, first=None):
if not first:
first = self.name
elif first == self.name:
raise AttributeError(
f'Method "{name}" is not defined in class {self.name}.'
)
try:
return next(method for method in self.methods if method.name == name)
except StopIteration:
if self.parent is None:
raise AttributeError(
f'Method "{name}" is not defined in class {self.name}.'
)
try:
return self.parent.get_method(name, first=first)
except AttributeError:
raise AttributeError(
f'Method "{name}" is not defined in class {self.name}.'
)
def define_method(
self, name: str, param_names: list, param_types: list, return_type
):
if name in (method.name for method in self.methods):
raise SemanticError(f"Method '{name}' already defined in '{self.name}'")
try:
parent_method = self.get_method(name)
except SemanticError:
parent_method = None
if parent_method:
error_list = []
return_type.swap_self_type(self)
return_clone = return_type.clone()
parent_method.return_type.swap_self_type(self)
if not conforms(return_type, parent_method.return_type):
error_list.append(
f" -> Same return type: Redefined method has '{return_clone.name}' as return type instead of '{parent_method.return_type.name}'."
)
if len(param_types) != len(parent_method.param_types):
error_list.append(
f" -> Same amount of params: Redefined method has {len(param_types)} params instead of {len(parent_method.param_types)}."
)
else:
count = 0
err = []
for param_type, parent_param_type in zip(
param_types, parent_method.param_types
):
param_clone = param_type.clone()
if not conforms(param_type, parent_param_type):
err.append(
f" -Param number {count} has {param_clone.name} as type instead of {parent_param_type.name}"
)
count += 1
if err:
s = f" -> Same param types:\n" + "\n".join(
child for child in err
)
error_list.append(s)
return_type.swap_self_type(self, back=True)
parent_method.return_type.swap_self_type(self, back=True)
if error_list:
err = (
f"Redifined method '{name}' in class '{self.name}' does not have:\n"
+ "\n".join(child for child in error_list)
)
raise SemanticError(err)
method = Method(name, param_names, param_types, return_type)
self.methods.append(method)
return method
def all_attributes(self, clean=True, first=None):
if not first:
first = self.name
elif first == self.name:
return OrderedDict.values() if clean else OrderedDict()
plain = (
OrderedDict()
if self.parent is None
else self.parent.all_attributes(clean=False, first=first)
)
for attr in self.attributes:
plain[attr.name] = (attr, self)
return plain.values() if clean else plain
def all_methods(self, clean=True, first=None):
if not first:
first = self.name
elif first == self.name:
return OrderedDict.values() if clean else OrderedDict()
plain = (
OrderedDict()
if self.parent is None
else self.parent.all_methods(clean=False, first=first)
)
for method in self.methods:
plain[method.name] = (method, self)
return plain.values() if clean else plain
def conforms_to(self, other, first=None):
if not first:
first = self.name
elif self.name == first:
return False
return (
other.bypass()
or self == other
or self.parent
and self.parent.conforms_to(other, first)
)
def bypass(self):
return False
def least_common_ancestor(self, other):
this: Type = self
if isinstance(this, ErrorType) or isinstance(other, ErrorType):
return ErrorType()
while this.index < other.index:
other = other.parent
while other.index < this.index:
this = this.parent
if not (this and other):
return None
while this.name != other.name:
this = this.parent
other = other.parent
if this == None:
return None
return this
def __str__(self):
output = f"type {self.name}"
parent = "" if self.parent is None else f" : {self.parent.name}"
output += parent
output += " {"
output += "\n\t" if self.attributes or self.methods else ""
output += "\n\t".join(str(x) for x in self.attributes)
output += "\n\t" if self.attributes else ""
output += "\n\t".join(str(x) for x in self.methods)
output += "\n" if self.methods else ""
output += "}\n"
return output
def __repr__(self):
return str(self)
class TypeBag:
def __init__(self, type_set, heads=[]) -> None:
self.type_set: set = (
type_set if isinstance(type_set, set) else from_dict_to_set(type_set)
)
self.heads: List[Type] = heads
if len(self.type_set) == 1:
self.heads = list(self.type_set)
self.condition_list = []
self.conform_list = []
@property
def error_type(self) -> bool:
return len(self.heads) == 0
@property
def name(self) -> str:
return self.generate_name()
def set_conditions(self, condition_list, conform_list) -> None:
if self.error_type:
return
self.condition_list = condition_list
self.conform_list = conform_list
self.__update_type_set_from_conforms()
def __update_type_set_from_conforms(self) -> None:
intersect_set = set()
for conform_set in self.conform_list:
intersect_set = intersect_set.union(conform_set)
self.type_set = self.type_set.intersection(intersect_set)
self.update_heads()
def update_heads(self) -> None:
if self.error_type:
return
# new_heads = []
# visited = set()
# for head in self.heads:
# if head in self.type_set:
# new_heads.append(head)
# continue
pos_new_head = []
lower_index = 2 ** 32
for typex in self.type_set:
# if typex in visited:
# continue
if typex.index < lower_index:
pos_new_head = [typex]
lower_index = typex.index
elif typex.index == lower_index:
pos_new_head.append(typex)
# new_heads += pos_new_head
self.heads = pos_new_head # new_heads
def swap_self_type(self, swap_type, back=False):
if self.error_type:
return self
if not back:
remove_type = SelfType()
add_type = swap_type
else:
remove_type = swap_type
add_type = SelfType()
try:
self.type_set.remove(remove_type)
self.type_set.add(add_type)
except KeyError:
return self
self.update_heads()
return self
def add_self_type(self, add_type) -> bool:
if self.error_type:
return False
if SelfType() in self.type_set and not add_type in self.type_set:
self.type_set.add(add_type)
return True
return False
def remove_self_type(self, remove_type) -> None:
if self.error_type:
return
try:
self.type_set.remove(remove_type)
except KeyError:
pass
self.type_set.add(SelfType())
self.update_heads()
def generate_name(self) -> str:
if self.error_type:
return "<error-type>"
if len(self.type_set) == 1:
name = self.heads[0].name
return name
s = "{"
s += ", ".join(
typex.name for typex in sorted(self.type_set, key=lambda t: t.index)
)
s += "}"
return s
def clone(self):
clone = TypeBag(self.type_set.copy(), self.heads.copy())
clone.condition_list = self.condition_list.copy()
clone.conform_list = self.conform_list.copy()
return clone
def update(self, other):
self.name = other.name
self.condition_list = other.condition_list
self.conform_list = other.conform_list
self.type_set = other.type_set
self.heads = other.heads
def __str__(self):
return self.name
def __repr__(self):
return str(self)
class SelfType(Type):
def __init__(self):
self.name = "SELF_TYPE"
self.index = 2 ** 31
def conforms_to(self, other):
if isinstance(other, SelfType):
return True
return False
raise InternalError("SELF_TYPE is yet to be assigned, cannot conform.")
def bypass(self):
return False
raise InternalError("SELF_TYPE is yet to be assigned, cannot bypass.")
def __hash__(self) -> int:
return hash(self.name)
def __eq__(self, o: object) -> bool:
return isinstance(o, SelfType)
def __str__(self):
return self.name
def __repr__(self):
return str(self)
class ErrorType(TypeBag):
def __init__(self):
self.name = "<error-type>"
self.index = 2 ** 32
self.type_set = frozenset()
self.heads = frozenset()
def conforms_to(self, other):
return True
def bypass(self):
return True
def swap_self_type(self, swap_type, back=False):
return self
def set_conditions(self, *params):
return
def generate_name(self):
return "<error-type>"
def clone(self):
return self
def conforms(bag1: TypeBag, bag2: TypeBag) -> bool:
if isinstance(bag1, ErrorType) or isinstance(bag2, ErrorType):
raise InternalError("Use of deprecated ErrorType in conforms")
if bag1.error_type or bag2.error_type:
return True
ordered_set = order_set_by_index(bag2.type_set)
condition_list = []
conform_list = []
for condition in ordered_set:
conform = conform_to_condition(bag1.type_set, condition)
for i in range(len(condition_list)):
conform_i = conform_list[i]
if len(conform_i) == len(conform) and len(
conform.intersection(conform_i)
) == len(conform):
condition_list[i].add(condition)
break
else:
condition_list.append({condition})
conform_list.append(conform)
bag1.set_conditions(condition_list, conform_list)
return len(bag1.type_set) >= 1
def try_conform(bag1: TypeBag, bag2: TypeBag) -> TypeBag:
clone1 = bag1.clone()
if not conforms(bag1, bag2):
return clone1
return bag2
def join(bag1: TypeBag, bag2: TypeBag) -> TypeBag:
if isinstance(bag1, ErrorType) or isinstance(bag2, ErrorType):
raise InternalError("Use of deprecated ErrorType in Join")
if bag1.error_type:
return bag2
if bag2.error_type:
return bag1
ancestor_set = set()
head_list = []
ordered_set1: Set[Type] = order_set_by_index(bag1.type_set)
ordered_set2: Set[Type] = order_set_by_index(bag2.type_set)
ordered_set1, ordered_set2 = (
(ordered_set1, ordered_set2)
if len(ordered_set1) < len(ordered_set2)
else (ordered_set2, ordered_set1)
)
for type1 in ordered_set1:
same_branch = False
previous_ancestor = None
previous_type = None
for type2 in ordered_set2:
if same_branch and type2.conforms_to(previous_type):
previous_type = type2
continue
common_ancestor = type1.least_common_ancestor(type2)
previous_type = type2
if not previous_ancestor:
smart_add(ancestor_set, head_list, common_ancestor)
previous_ancestor = common_ancestor
else:
if previous_ancestor == common_ancestor:
same_branch = True
else:
same_branch = False
smart_add(ancestor_set, head_list, common_ancestor)
previous_ancestor = common_ancestor
join_result = TypeBag(ancestor_set, head_list)
return join_result
def join_list(type_list):
join_result = type_list[0]
for i in range(1, len(type_list)):
type_i = type_list[i]
join_result = join(join_result, type_i)
return join_result
def equal(bag1: TypeBag, bag2: TypeBag):
if isinstance(bag1, ErrorType) or isinstance(bag2, ErrorType):
raise InternalError("Use of deprecated type ErrorType")
if bag1.error_type or bag2.error_type:
return True
set1 = bag1.type_set
set2 = bag2.type_set
return len(set1) == len(set2) and len(set1.intersection(set2)) == len(set2)
def smart_add(type_set: set, head_list: list, typex: Type):
if isinstance(typex, TypeBag):
return auto_add(type_set, head_list, typex)
type_set.add(typex)
there_is = False
for i in range(len(head_list)):
head = head_list[i]
ancestor = typex.least_common_ancestor(head)
if ancestor in type_set:
there_is = True
if ancestor == typex:
head_list[i] = typex
break
if not there_is:
head_list.append(typex)
return type_set
def auto_add(type_set: set, head_list: List[TypeBag], bag: TypeBag):
type_set = type_set.union(bag.type_set)
aux = set(bag.heads)
for i in range(len(head_list)):
head_i = head_list[i]
for head_j in bag.heads:
ancestor = head_i.least_common_ancestor(head_j)
if ancestor in type_set:
head_list[i] = ancestor
aux.remove(head_j)
break
head_list += [typex for typex in aux]
return type_set
def conform_to_condition(type_set, parent) -> set:
set_result = set()
for typex in type_set:
if typex.conforms_to(parent):
set_result.add(typex)
return set_result
def order_set_by_index(type_set):
return sorted(list(type_set), key=lambda x: x.index)
def set_intersection(parent, type_set) -> set:
set_result = set()
for typex in type_set:
if typex.conforms_to(parent):
set_result.add(typex)
return set_result
def from_dict_to_set(types: dict):
type_set = set()
for typex in types:
type_set.add(types[typex])
return type_set
def unify(a: TypeBag, b: TypeBag) -> Tuple[TypeBag, bool]:
if a.error_type:
return b, False
if b.error_type:
return a, False
intersection = set()
if len(a.type_set) == 1 and len(b.type_set) == 1:
type_a = list(a.type_set)[0]
type_b = list(b.type_set)[0]
if type_b.conforms_to(type_a):
return a, False
return TypeBag(set()), False
for type1 in a.type_set:
for type2 in b.type_set:
if type2 == type1:
intersection.add(type1)
changed = (not equals_set(a.type_set, intersection)) or (
not equals_set(b.type_set, intersection)
)
a.type_set = intersection
a.update_heads()
b.type_set = intersection
b.update_heads()
return a, changed
# def unify(a: TypeBag, b: TypeBag) -> Tuple[TypeBag, bool]:
# if a.error_type:
# return b, False
# if b.error_type:
# return a, False
# if len(a.type_set) == 1 and len(b.type_set) == 1:
# type_a = list(a.type_set)[0]
# type_b = list(b.type_set)[0]
# if type_b.conforms_to(type_a):
# return a, False
# return TypeBag(set()), False
# a_clone = a.clone()
# change = conforms(a_clone, b)
# if not change:
# return a, change
# a.type_set = a_clone.type_set
# a.update_heads()
# return a, change
def equals_set(a: set, b: set) -> bool:
return a.issubset(b) and b.issubset(a)
| UTF-8 | Python | false | false | 19,719 | py | 93 | type.py | 29 | 0.549014 | 0.544247 | 0 | 647 | 29.477589 | 152 |
daira/zcash | 16,990,890,626,966 | bed75de4e0f8e430e4cb5c887236653b86acd425 | f99fd592f3e69444138d9277632ccf34a1ffc36d | /qa/rpc-tests/mergetoaddress_ua_sapling.py | 1a67b59fb52ef7a75a046e1bea3c73c50936b5e1 | [
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-free-unknown"
]
| permissive | https://github.com/daira/zcash | a3c95a78b17e1710cae25044f016b195ed890ce6 | 26ea365f512417625cd081e2483b68c05a8676ee | refs/heads/master | 2023-06-23T02:58:44.050086 | 2023-05-26T13:48:37 | 2023-05-26T13:48:37 | 55,192,531 | 0 | 0 | MIT | true | 2020-10-23T15:34:26 | 2016-04-01T00:38:45 | 2016-04-01T00:38:49 | 2020-10-23T15:34:25 | 70,453 | 0 | 0 | 0 | C++ | false | false | #!/usr/bin/env python3
# Copyright (c) 2023 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import NU5_BRANCH_ID, nuparams
from mergetoaddress_helper import MergeToAddressHelper
def get_new_address(test, node):
account = test.nodes[node].z_getnewaccount()['account']
return test.nodes[node].z_getaddressforaccount(account)['address']
class MergeToAddressUASapling (BitcoinTestFramework):
helper = MergeToAddressHelper(get_new_address, 'ANY_SAPLING')
def setup_chain(self):
self.helper.setup_chain(self)
def setup_network(self, split=False):
self.helper.setup_network(self, [
'-anchorconfirmations=1',
nuparams(NU5_BRANCH_ID, 99999),
])
def run_test(self):
self.helper.run_test(self)
if __name__ == '__main__':
MergeToAddressUASapling().main()
| UTF-8 | Python | false | false | 1,035 | py | 235 | mergetoaddress_ua_sapling.py | 151 | 0.713043 | 0.700483 | 0 | 31 | 32.387097 | 71 |
valeriy-d/simple_task_tracker | 11,304,353,960,303 | bfc00fd6c79f457a30e476cc8525bc695623a9e2 | 863cd9af926718ad8c76463de8347a17e62a17f1 | /apps/main/views.py | e0d5c92457109c63e4c894070cfd22464cc3dcfa | []
| no_license | https://github.com/valeriy-d/simple_task_tracker | 285a733ea43f8099f24af46a87ed37e0abc2931b | a2dd9052081b11e70d2610b9ac73c0e2f0db4213 | refs/heads/master | 2021-06-21T06:32:39.995461 | 2018-05-26T17:01:15 | 2018-05-26T17:01:15 | 134,955,411 | 0 | 0 | null | false | 2021-06-10T20:27:11 | 2018-05-26T11:25:25 | 2019-09-17T08:54:07 | 2021-06-10T20:27:11 | 15 | 0 | 0 | 4 | Python | false | false | from django.db.models import F
from django.views.generic import View
from django.shortcuts import get_object_or_404
from apps.utils import success_response
from apps.utils import json_required
from apps.utils import validate_fields
from .models import TaskModel
from .models import DescriptionModel
# Список алиасов для полей других моделей
aliases = {
'project_id': F('project__id'),
'status_id': F('status__id'),
'author_id': F('author__id'),
'doer_id': F('doer__id'),
'project_name': F('project__name'),
'status_name': F('status__name'),
'author_name': F('author__username'),
'doer_name': F('doer__username'),
}
class TaskListView(View):
"""
Список задач без описаний
"""
# @validate_fields(TaskModel)
def get(self, request):
# Фильтрация будет только по наименованиям
# Добавим startswith для каждого параметра
filters = {}
for field, value in request.GET.items():
filters.update({field + '__startswith': value})
tasks = TaskModel.objects.all() \
.select_related('project', 'status', 'doer', 'author') \
.annotate(**aliases) \
.filter(**filters) \
.values('id', 'name', *aliases.keys())
return success_response(records=list(tasks), totals=tasks.count())
class TaskDescriptionView(View):
"""
Получить/добавить/удалить комментарии для определенной задачи
"""
def get(self, request, task_id):
descriptions = DescriptionModel.objects.filter(taskmodel__id=task_id) \
.annotate(taskmodel_id=F("taskmodel__id")) \
.values('text', 'taskmodel_id')
return success_response(records=list(descriptions))
@json_required('text')
def post(self, request, task_id):
data = request.json_data
print(task_id)
task = TaskModel.objects.get(id=task_id)
description = task.description.create(text=data.get('text', ''))
return success_response(id=description.id)
class CreateTaskView(View):
"""
Создать задачу
POST /task "json/application" {name: <str>, author_id: <int>, doer_id: <int>, project_id: <int>, status_id: <int>}
"""
http_method_names = ['post', ]
@json_required('name', 'author_id', 'doer_id', 'project_id', 'status_id')
def post(self, request):
data = request.json_data
print(data)
task = TaskModel.objects.create(**data)
task = TaskModel.objects.select_related('project', 'status', 'doer', 'author') \
.filter(id=task.id) \
.annotate(**aliases) \
.values('id', 'name', *aliases.keys())
return success_response(status=True, task=list(task)[0])
class ChangeTaskView(View):
def _get_task(self, task_id):
return get_object_or_404(TaskModel, id=task_id)
def delete(self, request, task_id):
task = self._get_task(task_id)
task.delete()
return success_response()
@json_required()
@validate_fields(TaskModel)
def patch(self, request, task_id):
data = request.json_data
task = TaskModel.objects.filter(id=task_id)
task.update(**data)
task = task.select_related('project', 'status', 'doer', 'author') \
.annotate(**aliases) \
.values('id', 'name', *aliases.keys())
return success_response(task=list(task)[0])
| UTF-8 | Python | false | false | 3,722 | py | 10 | views.py | 7 | 0.589649 | 0.587387 | 0 | 106 | 32.358491 | 118 |
eigen-boi/py_code | 1,589,137,939,076 | b26d66398905706044b17faa331fd2881c0d2e3a | 0f3457588cad5b6b03dcb3f59258b83f783e89aa | /tagger_settings.py | bf89613326c59943c8ddc979f97a0e4adf358260 | []
| no_license | https://github.com/eigen-boi/py_code | fa03caf4b027b6cd1d55d1f382018f4285d9d1f6 | 47852ce6e0605ea4ca830b1f5e3ca03b51ee4e9f | refs/heads/master | 2022-12-03T04:32:05.775972 | 2020-08-28T18:41:08 | 2020-08-28T18:41:08 | 284,128,519 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # tagger_settings.py
# define global variables
root_dir = ""
ev_dir = ""
data_dir = ""
data_file_arr = []
new_data_file_arr = []
mc_file_arr = []
identifier = ""
bi_z_min = 1400.0 # [mm] # float
bi_r_min = 2000.0 # [mm] # float
bi_r_max = 6000.0 # [mm] # float
bi_nhit_cleaned_min = 250 # int
po_z_min = 850.0 # [mm] # float
po_r_max = 6000.0 # [mm] # float
po_nhit_cleaned_min = 150 # int
po_nhit_cleaned_max = 350 # int
bipo_delta_r_max = 1000.0 # [mm]
bipo_delta_t_min = 3690.0 # [ns]
bipo_delta_t_max = 1800000.0 # [ns]
is_mc = True
fitName = "partialFitter"
counts = [0, 0] # count in single file (most recent), count across all files | UTF-8 | Python | false | false | 732 | py | 9 | tagger_settings.py | 8 | 0.546448 | 0.474044 | 0 | 29 | 24.275862 | 76 |
dbzahariev/Python-and-Django | 11,931,419,162,225 | 4551e3975d2d226a2bdd1e6fb033e49039755270 | e292ea10855a50c8098ede4da1fd4d0896323e8c | /Python-Basic/lec_2_Simple_Operations_And_Calculations/08_Celsius_To_Fahrenheit.py | 433c34440735253a1909fdeb72e71468aac6cb81 | []
| no_license | https://github.com/dbzahariev/Python-and-Django | f794212a21158d524bd1a7d9d5411cd58ba65f3e | ba15758db3ee0726a7e5c80c96c2b106206ae66a | refs/heads/master | 2020-05-18T18:44:35.030202 | 2019-05-25T13:24:35 | 2019-05-25T13:24:35 | 184,572,887 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | c = float(input())
f = float(c * 1.8 + 32)
print(float("{0:.2f}".format(f)))
| UTF-8 | Python | false | false | 77 | py | 113 | 08_Celsius_To_Fahrenheit.py | 109 | 0.545455 | 0.467532 | 0 | 3 | 24.666667 | 33 |
SPKUMAR65/webscraping | 3,616,362,484,920 | 3beb7eaf9967d1b814dc264f2fcc904fa5bf374f | d574670f72c79a0222b3a5ee66f7336d03a2562d | /new.py | f71a53a8288967705f92b9abef9352877724e989 | []
| no_license | https://github.com/SPKUMAR65/webscraping | b00fd97c5e17631e63847ec2141d95dee00724f3 | 4f22cd6060afd7eabc7acf5007f670fedb818179 | refs/heads/master | 2021-04-13T01:53:27.933976 | 2020-03-22T06:44:22 | 2020-03-22T06:44:22 | 249,126,532 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
my_url = 'https://www.flipkart.com/search?q=iphone&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off'
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("div", {"class":"_1UoZlX"})
#print(len(containers))
print(soup.prettify(containers[0]))
container=containers[0]
print(container.div.img["alt"])
price=container.findAll("div",{"class":"col col-5-12 _2o7WAb"})
print(price[0].text)
ratings=container.findAll("div",{"class":"niH0FQ"})
#print(ratings[0].text)
'''
filename = "products.csv"
f=open(filename, "w")
headera="procductname,Pricing,Rating\n"
f.write(headera)
for container in containers:
product_name=container.div.img["alt"]
price_container = container.findAll("div", {"class":"col col-5-12 _2o7WAb"})
price = price_container[0].text.strip()
ratings_container = container.findAll("div",{"class":"niH0FQ"})
ratings = ratings_container[0].text
#print("ProduactName : " + product_name)
#print("Price : " + price)
#print("Ratings : " + ratings)
trim_price = ''.join(price.split(','))
rm_rupee = trim_price.split(('₹'))
add_rs_rupee = "Rs."+rm_rupee[1]
split_price = add_rs_rupee.split('e')
final_price = split_price[0]
split_rating = ratings.split(" ")
final_rating = split_rating[0]
print(product_name.replace(",","|")+","+final_price+","+final_rating+'\n')
f.write(product_name.replace(",","|")+","+final_price+","+final_rating+'\n')
f.close()'''
| UTF-8 | Python | false | false | 1,693 | py | 5 | new.py | 5 | 0.643406 | 0.629213 | 0 | 56 | 28.125 | 123 |
chriscauley/intro_to_programming | 14,216,341,793,251 | 907377cf79bfe44babec450d85ee7f701c74ae62 | d96b05d37e331ae79294c631a295d8f125cdd702 | /room3.py | f9e544f06218e4b3e3cda0e4186443f2fa91f77b | []
| no_license | https://github.com/chriscauley/intro_to_programming | 1bf205cf9ea8821251c03e100cbf7032fcb38452 | fcca0eab59c530eac3fb3d6139f6be4be91445e0 | refs/heads/master | 2016-09-06T01:58:22.889555 | 2013-06-06T00:10:45 | 2013-06-06T00:10:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def number_input(question):
""" Just like raw_input, but forces a number and returns an integer """
while True:
input = raw_input(question)
if input.isdigit():
break
else:
print "Invalid input. Please enter a number."
return int(input)
length = number_input("What is the length of the room?")
width = number_input("What is the width of the room?")
height = number_input("What is the height of the room?")
perimeter = 2 * int(length) + 2 * int(width)
wall_area = perimeter * int(height)
floor_area = length * width # also the ceiling_area!
amounts = {
"trim": perimeter,
"paint": (wall_area + floor_area) / 350, #assuming 350 sq ft per gallon of paint
"carpet": floor_area,
}
prices = {
"trim": 3, #$/ft
"paint": 20, #$/gallon
"carpet": 30, #$/sq ft
}
estimates = {}
total = 0
for key in amounts:
estimates[key] = prices[key]*amounts[key]
print key.upper() + " estimate: $" + str(estimates[key])
total = total + estimates[key]
print "Total estimated cost: $" +str(total)
| UTF-8 | Python | false | false | 1,073 | py | 13 | room3.py | 10 | 0.621622 | 0.608574 | 0 | 39 | 26.512821 | 84 |
gibo-neurips-2021/GIBO | 3,934,190,084,710 | 633176a559f2922980215f49cff86f25ca4beb7d | 82c855bf18a67d9ac2dee8f5599d3a7bccbd1e31 | /gym-lqr/gym_lqr/envs/__init__.py | 9c5ad46d2711efc3e32306bf2ec79d468221bbbd | []
| no_license | https://github.com/gibo-neurips-2021/GIBO | fe414ae69c3cfab843e12585471c35d990c07d21 | f5603a77cbcec26422ef5131261ddc7d48e0d127 | refs/heads/main | 2023-04-30T01:06:01.954125 | 2021-05-28T13:12:18 | 2021-05-28T13:12:18 | 369,857,789 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gym_lqr.envs.lqr_env import LQR_Env
| UTF-8 | Python | false | false | 41 | py | 42 | __init__.py | 16 | 0.780488 | 0.780488 | 0 | 1 | 40 | 40 |
AmrAymanKhalil505/CodeForces | 4,836,133,207,694 | c4601fbabe8dfbd1163f8d542d0353a5689e91ba | ca104f22975c8b157603690c6f5295471042a1d9 | /Python/A675InfiniteSequence.py | f4e3cb50c4480eb46a1e1031d5a3a74bb0ad8a28 | []
| no_license | https://github.com/AmrAymanKhalil505/CodeForces | 73acca3912a7ffa9fe667b5147817621d48003a8 | 1c4cccde82801a84c64601223999398adb7da03f | refs/heads/master | 2021-06-05T12:00:16.638321 | 2021-05-14T15:08:09 | 2021-05-14T15:08:09 | 153,451,550 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
line = input().split(sep=" ")
start = int(line[0])
fav = int(line[1])
inc= int(line[2])
if(fav == start):
print("YES")
elif( inc == 0 ):
print("NO")
else:
bool = (fav-start % inc)==0
x = math.ceil((fav-start) / inc)
if(start + inc*x) == fav:
if((start > fav and inc <0) or (start < fav and inc >0) ):
print("YES")
else:
print("NO")
else:
print("NO")
# if (start < fav and inc > 0 ):
# if((fav-start)%inc == 0 ):
# print("YES")
# else:
# print("NO")
#
# elif (start > fav and inc < 0 ):
#
# elif (start = fav and inc = 0 ):
# else :
# print("NO")
| UTF-8 | Python | false | false | 661 | py | 93 | A675InfiniteSequence.py | 93 | 0.472012 | 0.455371 | 0 | 32 | 19.625 | 66 |
levyvix/Python-random-files | 7,859,790,156,487 | dd87aa75e62ef1e6e193106120c38e7b49544256 | 5605b52cd24537aeaec9467e849efe89a9b7e2ce | /python projects/df3 (1).py | da3dca65c5310d0799d7306475e7f829683a386c | [
"MIT"
]
| permissive | https://github.com/levyvix/Python-random-files | 030d152688e67d6d5ea1503569e882a8b5e0fb8b | 1a8601f006fcb2273b3b65e26a7663c87991e9b9 | refs/heads/master | 2020-03-27T21:02:33.299464 | 2018-09-02T19:55:37 | 2018-09-02T19:55:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n1=input('Numero1: ')
n2=input('Numero2: ')
soma=int(n1)+int(n2)
print('A soma de',n1,'e',n2,'é',soma)
| UTF-8 | Python | false | false | 105 | py | 24 | df3 (1).py | 23 | 0.615385 | 0.538462 | 0 | 5 | 19.8 | 37 |
kunal27071999/KunalAgrawal_week_1 | 13,408,887,918,191 | ecf92d71835a6bb0fa4e979ec45f72856635cd04 | 1f8ddb3d390a24fc7e634c14da2015ee471572b6 | /Week_1_conditional/Week_1_conditional_Q25.py | fd18b56d02bf7deff569c3bb1e347d67f11dff8f | []
| no_license | https://github.com/kunal27071999/KunalAgrawal_week_1 | 6464a3fb3b3acc4223fa981b28f236a123e23ada | 4d7d3219ecfe8c42b06748705ffda0f36e471398 | refs/heads/main | 2023-05-02T11:25:53.954164 | 2021-05-21T17:36:28 | 2021-05-21T17:36:28 | 364,576,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """25. WAP to compyte the log of 2 by adding n terms in the series
1 - 1/2 + 1/3 - 1/4 + 1/5 -... 1/n where n is a positive number"""
n = 10
for i in range(1,n):
sum = sum + 1/i
| UTF-8 | Python | false | false | 185 | py | 141 | Week_1_conditional_Q25.py | 139 | 0.572973 | 0.481081 | 0 | 6 | 29.666667 | 67 |
mooooondh/Programmers | 10,703,058,527,829 | 3996af3e428edb8916e725f2f9150a449f93513c | 194fca2f738235bf842da8b13dd1b1e9fc826967 | /Graph_01/graph_01.py | ed8aeaf4173f2ed2dc9521ceb16e211c5a832631 | []
| no_license | https://github.com/mooooondh/Programmers | ab9dba65186a7373d180e0613963a81a8201217c | c9b8049c3f027e27718d4234e0bca340a6354d79 | refs/heads/master | 2023-03-05T04:23:53.596495 | 2021-02-17T12:59:19 | 2021-02-17T12:59:19 | 304,018,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
def bfs(position, graph, distance):
que= deque()
que.append(position)
while(que):
now= que.popleft()
for i in graph[now]:
if(distance[i]== 0):
que.append(i)
distance[i]= distance[now]+ 1
return distance
def solution(n, edge):
graph= [[] for _ in range(n+ 1)]
distance= [0]* (n+ 1)
distance[1]= 1
for i in edge:
graph[i[0]].append(i[1])
graph[i[1]].append(i[0])
bfs(1, graph, distance)
max_val= max(distance)
answer= distance.count(max_val)
return answer
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]])) | UTF-8 | Python | false | false | 692 | py | 53 | graph_01.py | 38 | 0.524566 | 0.485549 | 0 | 31 | 21.354839 | 76 |
kamit13/python | 15,092,515,081,411 | db0be86e766913a1dbc540a9b6e1075db29b7adf | 3d67c0d4c23ede3f0e9ca8f8bb18319714f3af7f | /importingDataWithPanda.py | 47101db8ec76ac71dff4d95d8e27af58d7cd23d8 | []
| no_license | https://github.com/kamit13/python | b3574016df1cde4e6f45ee26f44f9796cf2c2732 | 98bedb2720f8149f3ec20036b00c354f539f758f | refs/heads/master | 2021-01-02T05:14:14.608098 | 2020-02-15T17:57:48 | 2020-02-15T17:57:48 | 239,503,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #importing the module of the pandas
import pandas as pd
#reading the india.csv into data frame
df=pd.read_csv("/home/amit/Documents/india.csv")
#print the firxt 5 rows of a data frame
#print(df.head())
#getting the no of rows and columns in the csv file
#print(df.shape)
#printing the five rows
#print(df.iloc[0:5,:])
#print(df.iloc[:,:])
# prints from 5th rows and first 5 columns
#print(df.iloc[5:,:5])
#print(df.loc[:5,"Time period"])
#print(df.describe())
# for computing correlations
print(df.corr())
# computes numerical data ranks
print(df.rank()) | UTF-8 | Python | false | false | 562 | py | 37 | importingDataWithPanda.py | 37 | 0.715302 | 0.701068 | 0 | 19 | 28.578947 | 52 |
minrk/ipython-svn-archive | 14,731,737,868,700 | 87ad8a662673961306ab1fe7765e171da0a2251b | 92eec9d251e24f48fa42160e5061c5fb3120fdbb | /ipython/branches/sprint1/ipython1/core1/test/tcommon.py | 5e777d844e9ebb69d20352d7b4151baf85400a69 | []
| no_license | https://github.com/minrk/ipython-svn-archive | 55dde54f52284c5c3b32ac2a12bb4734e5f8e7f8 | 9b32089282c94c706d819333a3a2388179e99e86 | refs/heads/master | 2022-12-24T19:50:01.359278 | 2020-09-25T09:34:04 | 2020-09-25T09:34:04 | 298,268,923 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Common utilities for testing IPython.
This file is meant to be used as
from tcommon import *
by any test code.
While a bit ugly, this helps us keep all testing facilities in one place, and
start coding standalone test scripts easily, which can then be pasted into the
larger test suites without any modifications required.
"""
# Required modules and packages
# Standard Python lib
import cPickle as pickle
import doctest
import math
import os
import sys
import unittest
from pprint import pformat, pprint
# path to our own installation, so we can find source files under this.
TEST_PATH = os.path.dirname(os.path.abspath(__file__))
# From Ian Bicking's http://svn.colorstudy.com/home/ianb/recipes/minimock.py:
# (c) 2006 Ian Bicking and contributors
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
r"""
minimock is a simple library for doing Mock objects with doctest.
When using doctest, mock objects can be very simple.
Here's an example of something we might test, a simple email sender::
>>> import smtplib
>>> def send_email(from_addr, to_addr, subject, body):
... conn = smtplib.SMTP('localhost')
... msg = 'To: %s\nFrom: %s\nSubject: %s\n\n%s' % (
... to_addr, from_addr, subject, body)
... conn.sendmail(from_addr, [to_addr], msg)
... conn.quit()
Now we want to make a mock ``smtplib.SMTP`` object. We'll have to
inject our mock into the ``smtplib`` module::
>>> smtplib.SMTP = Mock('smtplib.SMTP')
>>> smtplib.SMTP.mock_returns = Mock('smtp_connection')
Now we do the test::
>>> send_email('ianb@colorstudy.com', 'joe@example.com',
... 'Hi there!', 'How is it going?')
Called smtplib.SMTP('localhost')
Called smtp_connection.sendmail(
'ianb@colorstudy.com',
['joe@example.com'],
'To: joe@example.com\nFrom: ianb@colorstudy.com\nSubject: Hi there!\n\nHow is it going?')
Called smtp_connection.quit()
Voila! We've tested implicitly that no unexpected methods were called
on the object. We've also tested the arguments that the mock object
got. We've provided fake return calls (for the ``smtplib.SMTP()``
constructor). These are all the core parts of a mock library. The
implementation is simple because most of the work is done by doctest.
"""
class Mock(object):
def __init__(self, name):
self.mock_name = name
self.mock_returns = None
self.mock_attrs = {}
def __repr__(self):
return '<Mock %s %s>' % (hex(id(self)), self.mock_name)
def __call__(self, *args, **kw):
parts = [repr(a) for a in args]
parts.extend(
'%s=%r' % (items) for items in sorted(kw.items()))
msg = 'Called %s(%s)' % (self.mock_name, ', '.join(parts))
if len(msg) > 80:
msg = 'Called %s(\n %s)' % (
self.mock_name, ',\n '.join(parts))
print msg
return self.mock_returns
def __getattr__(self, attr):
if attr not in self.mock_attrs:
if self.mock_name:
new_name = self.mock_name + '.' + attr
else:
new_name = attr
self.mock_attrs[attr] = Mock(new_name)
return self.mock_attrs[attr]
| UTF-8 | Python | false | false | 3,274 | py | 473 | tcommon.py | 353 | 0.634392 | 0.63256 | 0 | 100 | 31.68 | 97 |
lhaoxuan/gems_server | 5,669,356,844,235 | a62953e46ce4c0fcd5aceb1a5c4a34cd9cea032d | 2bda0c9af2a2f33d5569a39827681dac12850a59 | /models/user_modified.py | 5436a9822a7fb0508f386c0fb9e90bce8bc55011 | []
| no_license | https://github.com/lhaoxuan/gems_server | f1a284fd3b1122b81f9bf04f5bb4b62101c2e13f | 61991f369539c0a64e1b36b9ea771833ea4643c2 | refs/heads/master | 2020-06-04T07:00:27.871465 | 2016-02-17T06:48:07 | 2016-02-17T06:48:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-*- coding: utf-8 -*-
import time
from models import GameModel
from common.exceptions import *
class UserModified(GameModel):
""" 1.处理需通知前端及时更新的数据
2.临时数据,例如保存进战场时的信息,
以便出战场时的结算
Attributs:
guide_flags(dir): 根据value决定key功能的展示方式(0 正常显示, 1不提示但引导, 2提示且引导)
arena 竞技场
pvp 天梯
invade 城战
charactor 主角系统
gacha 抽卡
cards 图鉴系统
task 任务
challenge 挑战模式
team_index_normal 获得新的军旗并进入除竞技场与城战防守外任意军旗界面-1默认转态
team_index_special 玩家第一次获得新的军旗并进入竞技场军旗界面
"""
def __init__(self, uid=''):
# 玩家 属性数据
self.uid = uid
self.modified = {}
self.temp = {}
self.guide_flags = {} # 功能引导标记
self.dungeon = {}
@classmethod
def create(cls, uid):
obj = cls(uid)
obj.guide_flags = {
'charactor': 0,
'cards': 1,
'gacha': 1,
'task': 1,
'challeng': 0,
'team_index_normal': 0,
'team_index_special': 0,
}
return obj
def set_modify_info(self, thing, info=None):
if thing == 'cards':
if 'cards' in self.modified:
for card_id in info:
if card_id in self.modified['cards']:
self.modified['cards'][card_id].update(info[card_id])
else:
self.modified['cards'][card_id] = info[card_id]
else:
self.modified['cards'] = info
elif thing == 'cities':
if 'cities' in self.modified:
for city_id in info:
if city_id in self.modified['cities']:
self.modified['cities'][city_id].update(info[city_id])
else:
self.modified['cities'][city_id] = info[city_id]
else:
self.modified['cities'] = info
else:
self.modified[thing] = info
self.put()
def update_modify(self, new_info_dict):
for item, info in new_info_dict.items():
self.set_modify_info(item, info)
self.put()
def add_dungeon_info(self, dungeon_type, info=None):
if info is None:
info = {}
info['time'] = time.time()
self.dungeon[dungeon_type] = info
self.put()
def clear_dungeon_info(self, dungeon_type):
if dungeon_type not in self.dungeon:
raise LogicError("End the wrong fight")
info = self.dungeon.pop(dungeon_type)
self.put()
return info
def has_dungeon_info(self, dungeon_type):
return dungeon_type in self.dungeon
def get_dungeon_info(self, dungeon_type):
""" 检查是否由此战场信息
"""
if dungeon_type not in self.dungeon:
raise LogicError("End the wrong fight")
return self.dungeon[dungeon_type]
def get_flags(self):
""" 主页红点标志 """
flags = []
uproperty = self.user_property
ucards = self.user_cards
utask = self.user_task
uinvade = self.user_invade
uitems = self.user_items
# 有剩余元素点
if uproperty.nature_remain:
flags.append("charactor")
# 有新卡
if ucards.new_card_num:
flags.append("cards")
# 有已完成或新任务
if utask.has_new_task:
flags.append("task")
# 有新的防守日志
if uinvade.has_new_history:
flags.append("invade")
# 可以抽卡
if uitems.get_item_num("gachadiamond_item") or uitems.get_item_num("gachacoin_item"):
flags.append("gacha")
return flags
def has_guide_flags(self, flag):
return flag in self.guide_flags
def set_guide_flags(self, flag, sign):
self.guide_flags[flag] = sign
def del_guide_flags(self, flag):
if flag in self.guide_flags:
self.guide_flags.pop(flag)
| UTF-8 | Python | false | false | 4,473 | py | 84 | user_modified.py | 51 | 0.507232 | 0.5038 | 0 | 133 | 29.601504 | 93 |
ivan-guerra/site_problems | 9,955,734,240,079 | b911507b35a4f8fc01b9cf6c491f40d8b007264c | 087f657ab3a8e97bfc0bfc7dad344ea3235e6765 | /hacker_rank/python/itertools/iter_4.py | dc6d3713030686af118b26146fbd38d380e5049b | [
"MIT"
]
| permissive | https://github.com/ivan-guerra/site_problems | 8b68d4fb94f236da107c34b919053683c34d8194 | 069c6d62f6d00c815a1c033953ec48cd3520fd61 | refs/heads/master | 2018-02-13T03:44:15.712715 | 2017-05-18T23:57:56 | 2017-05-18T23:57:56 | 64,614,183 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Itertools.combinations_with_replacement"""
from itertools import combinations_with_replacement
s, k = input().split()
k = int(k)
combos = list(combinations_with_replacement(sorted(s),k))
formatted_combos = [''.join(x) for x in combos]
for x in formatted_combos:
print(x)
| UTF-8 | Python | false | false | 281 | py | 263 | iter_4.py | 263 | 0.72242 | 0.72242 | 0 | 11 | 24.545455 | 57 |
koliambus/mbu-ukraine | 17,549,236,374,843 | 54653a5d5491e59819ca01db8ee6d0db7f481ae9 | b9473fd5e8d5ed099b710c9241e6605da5ab3c64 | /ragoogle/spiders/ternopil.py | 9227f6c87f223e0138bf2651cdc5a113b17f13e1 | []
| no_license | https://github.com/koliambus/mbu-ukraine | e4ae923e6b01f33bb48c4e2cf4a2d018e7cfc95e | dc6ddda6dc57d93feb2560440e607d3ef156f5e7 | refs/heads/master | 2023-08-28T16:43:06.212463 | 2019-10-24T21:20:25 | 2019-10-24T21:20:25 | 205,086,064 | 0 | 0 | null | false | 2023-08-11T19:49:30 | 2019-08-29T05:19:01 | 2019-10-25T09:06:33 | 2023-08-11T19:49:27 | 55 | 0 | 0 | 3 | Python | false | false | # -*- coding: utf-8 -*-
import scrapy
import re
from ragoogle.items.ternopil import MbuItem
from ragoogle.loaders import StripJoinItemLoader
class TernopilSpider(scrapy.Spider):
location_name = "Тернопіль"
name = "ternopil"
allowed_domains = ["tmrada.gov.ua"]
start_urls = ["https://tmrada.gov.ua/normative-documents/mistobudivni-umovi-y-obmegennya-pasporti-privyazki/4361.html"]
custom_settings = {
# specifies exported fields and order
'FEED_EXPORT_FIELDS': ["location_name", "number_in_order", "order_no", "order_date", "customer", "obj",
"address", "changes", "cancellation", "scan_url"],
}
def parse(self, response):
# only first table with data
for index, row in enumerate(response.css("div.post-body>table:first-of-type>tbody>tr")):
# first two are headers, skip
if index < 2:
continue
l = StripJoinItemLoader(item=MbuItem(), selector=row)
# because of errors in html, get td from current root only
l.add_xpath("number_in_order", "./td[position()=1]/span/text()|./td[position()=1]/p/span/text()", re=r"(\d+)\s?")
l.add_css("order_no", "td:nth-child(2) p span::text, td:nth-child(2) span::text", re=r"^\s*№ ?(.*)\s?від")
l.add_css("order_date", "td:nth-child(2) p span::text, td:nth-child(2) span::text", re=r"(\d{1,2}[\. /]?\d{1,2}[\. /]?\d{2,4})[\sр\.]*$")
l.add_css("customer", "td:nth-child(3) p span::text, td:nth-child(3) span::text")
l.add_css("obj", "td:nth-child(4) p span::text, td:nth-child(4) span::text")
l.add_css("address", "td:nth-child(5) p span::text, td:nth-child(5) span::text")
l.add_css("changes", "td:nth-child(6) p span::text, td:nth-child(6) span::text")
l.add_css("cancellation", "td:nth-child(7) p span::text, td:nth-child(7) span::text")
url = row.css("td:nth-child(8) p span a::attr(href), td:nth-child(8) span a::attr(href), td:nth-child(8) a::attr(href)").extract_first()
if url:
l.add_value("scan_url", response.urljoin(url))
yield l.load_item()
| UTF-8 | Python | false | false | 2,210 | py | 28 | ternopil.py | 26 | 0.58451 | 0.570387 | 0 | 43 | 50.046512 | 149 |
Shirui816/MDAnalysisNumpy | 3,169,685,909,655 | ea53688c8bcd5fd5c738d8d0d8c6871ce0cbb066 | a08a443bfe7ecc8ba5d9de416c41436413c9ac51 | /DataStructure/DtypeDict.py | 7d10e73170fb438e1c6df071a627212f2d41f4b9 | []
| no_license | https://github.com/Shirui816/MDAnalysisNumpy | a4c613493b11efa48b7787cd315ab46df967df0e | d7a4a38d5b4e946f86d0a5545525a5bf2747dba1 | refs/heads/master | 2020-04-10T22:12:12.555894 | 2016-11-21T06:30:36 | 2016-11-21T06:30:36 | 56,371,026 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
Bond = {'names': ('name', 'id1', 'id2'), 'formats': ('S16', 'i8', 'i8')}
Angle = {'names': ('name', 'id1', 'id2', 'id3'), 'formats': ('S16', 'i8', 'i8', 'i8')}
Dihedral = {'names': ('name', 'id1', 'id2', 'id3', 'id4'), 'formats': ('S16', 'i8', 'i8', 'i8', 'i8')}
Type = {'names': 'type', 'formats': 'S1'}
Type = np.dtype('S1')
Body = np.dtype('i8')
Mass = np.dtype('f8')
Pos = {'names': ('x', 'y', 'z'), 'formats': ('f8', 'f8', 'f8')}
dtypeDict = dict(bond=Bond, angle=Angle, dihedral=Dihedral, type=Type, body=Body) | UTF-8 | Python | false | false | 537 | py | 30 | DtypeDict.py | 28 | 0.52514 | 0.467412 | 0 | 12 | 43.833333 | 102 |
skylightcyber/mythril-classic | 25,769,844,699 | 3f1622ad40830bfcc1eef63496916896a9bff70e | ef6c7cb98026683afdf746fdc72df3f57dc22139 | /mythril/laser/ethereum/plugins/__init__.py | 8265e162d319771bac3188d4e530233d0033b153 | [
"MIT"
]
| permissive | https://github.com/skylightcyber/mythril-classic | 67073e89b13c6b0c7edef6f75657a1ede2c0ce3e | 8254bf1f5c336ab7ad37216fbec1333d7c11990f | refs/heads/develop | 2020-05-07T22:07:43.075507 | 2019-04-10T17:53:40 | 2019-04-10T17:53:40 | 180,933,574 | 0 | 1 | MIT | true | 2019-04-12T05:01:41 | 2019-04-12T05:01:39 | 2019-04-12T01:12:41 | 2019-04-10T17:53:41 | 51,569 | 0 | 0 | 0 | null | false | false | """ Laser plugins
This module contains everything to do with laser plugins
Laser plugins are a way of extending laser's functionality without complicating the core business logic.
Different features that have been implemented in the form of plugins are:
- benchmarking
- path pruning
Plugins also provide a way to implement optimisations outside of the mythril code base and to inject them.
The api that laser currently provides is still unstable and will probably change to suit our needs
as more plugins get developed.
For the implementation of plugins the following modules are of interest:
- laser.plugins.plugin
- laser.plugins.signals
- laser.svm
Which show the basic interfaces with which plugins are able to interact
"""
from mythril.laser.ethereum.plugins.signals import PluginSignal
| UTF-8 | Python | false | false | 798 | py | 93 | __init__.py | 37 | 0.813283 | 0.813283 | 0 | 21 | 37 | 106 |
JamesPriest/AOC | 13,357,348,296,653 | a4f929f5044ab09cad70766ca0ace6da00a1b1c9 | c4da0b2aa0e33d472b45ab17ab2391f660da4beb | /2018/13/mine_cart_madness.py | 995853a26661af7266e838181dd6ee8fc47fc517 | []
| no_license | https://github.com/JamesPriest/AOC | 06e29cdd22036021b843af0a7c9e0e34fe5aabed | 587a6b47625b264a5d0781414701b411c175f27b | refs/heads/master | 2021-12-24T05:29:41.189552 | 2021-12-18T10:34:21 | 2021-12-18T10:34:21 | 160,487,811 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def parse_map():
# Thing about the representation of the cart in memory.
pass
class cart:
def __init__(self):
self.turn = "left"
def get_intersection_turn(self):
last_direction = self.turn
turn_mapping = {"left":"straight", "straight":"right", "right":"left"}
self.turn = turn_mapping[last_direction]
return self.turn
| UTF-8 | Python | false | false | 377 | py | 42 | mine_cart_madness.py | 35 | 0.607427 | 0.607427 | 0 | 14 | 25.928571 | 78 |
AsiganTheSunk/python-torcurl | 5,695,126,644,657 | 7e09f1eb565b2ccc35d9b16546817edda11616b5 | 9061cf35d300ba84113a9e86e897ee7594e14353 | /torcurl/TorPyCurl.py | 806edc602ddd664cbbe22bf3bb5c0ec5617cd996 | [
"MIT"
]
| permissive | https://github.com/AsiganTheSunk/python-torcurl | cd1b98b6e58c217e57463c345d058efadafc8a3f | 9489a3b9b0e45bd0ff1f5279d8e4c8ed71a460f7 | refs/heads/master | 2021-01-01T19:45:27.984041 | 2017-09-06T18:06:19 | 2017-09-06T18:06:19 | 98,675,120 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pycurl
from cStringIO import StringIO
from urllib import urlencode
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from ProxyRotator import ProxyRotator
from Response import Response
# from torcurl2.listeners import ExitRelayListener as erl
LOCAL_HOST = '127.0.0.1'
class TorPyCurl():
"""Class
Attributes:
None ---
"""
def __init__(self):
self.proxy_rotator = ProxyRotator()
self.handler = pycurl.Curl()
self.user_agent = UserAgent()
# TODO read configuration from *.conf
# TODO cypher the password... plx not in clarinete
def reset_handler(self):
"""Function
Attributes:
None ---
"""
self.handler.close()
self.handler = pycurl.Curl()
def _proxy_setup(self):
"""Function _proxy_setup
Attributes:
None ---
"""
tor_instance = self.proxy_rotator.get_tor_instance()
# More reliable way of counting this
tor_instance.add_connection_use_count()
# Setup tor curl options
tor_proxy_port = tor_instance.socks_port
tor_proxy_ip = tor_instance.proxy_ip
self.handler.setopt(pycurl.PROXY, tor_proxy_ip)
self.handler.setopt(pycurl.PROXYPORT, tor_proxy_port)
self.handler.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
def _curl_setup(self,url, headers={}, attrs={}, ssl=True, timeout=15):
"""Function _curl_setup
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
user_agent -- user agent for the request
"""
if attrs:
url = "%s?%s" % (url, urlencode(attrs))
self.handler.setopt(pycurl.URL, str(url))
headers = map(lambda val: "%s: %s" % (val, headers[val]), headers)
self.handler.setopt(pycurl.HTTPHEADER, headers)
self.handler.setopt(pycurl.TIMEOUT, timeout)
self.handler.setopt(pycurl.SSL_VERIFYPEER, ssl)
self.handler.setopt(pycurl.USERAGENT, self.user_agent.random)
def _curl_perform(self):
"""Function _curl_perform
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
"""
response_buffer = StringIO()
self.handler.setopt(pycurl.WRITEFUNCTION, response_buffer.write)
self.handler.perform()
code = self.handler.getinfo(pycurl.RESPONSE_CODE)
type = self.handler.getinfo(pycurl.CONTENT_TYPE)
data = response_buffer.getvalue()
response_buffer.close()
return Response(code, type, data)
def get(self, url='https://check.torproject.org/', headers={}, attrs={}, ssl=True, timeout=15):
"""Function get
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
"""
# Reset of the Curl instance, to ensure that the new exitRelay works properly
self.reset_handler()
# Set request type: GET
self.handler.setopt(pycurl.HTTPGET, True)
# Common
self._proxy_setup()
self._curl_setup(url=url, headers=headers, attrs=attrs, ssl=ssl, timeout=timeout)
try:
return self._curl_perform()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
def post(self, url=None, headers={}, attrs={}, ssl=True, timeout=15):
"""Function post
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
"""
# Reset of the Curl instance, to ensure that the new exitRelay works properly
self.reset_handler()
# Set request type: POST
self.handler.setopt(pycurl.POST, True)
self.handler.setopt(pycurl.POSTFIELDS, urlencode(attrs))
# Common
self._proxy_setup()
self._curl_setup(url=url, headers=headers, attrs=attrs, ssl=ssl, timeout=timeout)
try:
return self._curl_perform()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
def put(self, url, headers={}, attrs={}, ssl=True, timeout=15):
"""Function put
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
"""
# Reset of the Curl instance, to ensure that the new exitRelay works properly
self.reset_handler()
# Set request type: PUT
encoded_attrs = urlencode(attrs)
request_buffer = StringIO(encoded_attrs)
self.handler.setopt(pycurl.PUT, True)
self.handler.setopt(pycurl.READFUNCTION, request_buffer.read)
self.handler.setopt(pycurl.INFILESIZE, len(encoded_attrs))
# Common
self._proxy_setup()
self._curl_setup(url=url, headers=headers, attrs=attrs, ssl=ssl, timeout=timeout)
try:
return self._curl_perform()
except pycurl.error, error:
errno, errstr = error
print 'An error ocurred: ', errstr
def delete(self, url, headers={}, attrs={}, ssl=True, timeout=15):
"""Function delete
Attributes:
url -- uri of the petition
headers -- headers of the the request
attrs -- attrs of the request
ssl -- ssl encryption parameter for the request, by default it's set to 15s
timeout -- timeout parameter for the request, by default it's set to True
"""
# Reset of the Curl instance, to ensure that the new exitRelay works properly
self.reset_handler()
# Set request type: DELETE
self.handler.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
# Common
self._proxy_setup()
self._curl_setup(url=url, headers=headers, attrs=attrs, ssl=ssl, timeout=timeout)
try:
return self._curl_perform()
except pycurl.error, error:
errno, errstr = error
print 'An error ocurred: ', errstr
def validate(self):
"""Function validate
Attributes:
None ---
"""
url = 'https://check.torproject.org/'
ssl = True
timeout = 15
try:
response = self.get(url=url, ssl=ssl, timeout=timeout)
soup = BeautifulSoup(response.data, 'html.parser')
status = soup.findAll('h1', {'class': 'not'})
current_address = soup.findAll('p')[0]
print 'TorPyCurl Connection address: ' + str(current_address.strong.text)
if 'Congratulations.' in str(status[0].text).strip():
print 'TorPyCurl Status: Connection PASS'
else:
print 'TorPyCurl Status: Connection FAIL'
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
def _dns_leak_test(self):
#POST y 2 coockies hacen falta al menos. usar tamper data
"""Function dns_leak_test
Attributes:
None ---
"""
url = 'https://www.perfect-privacy.com/check-ip/'
ssl = True
timeout = 15
try:
response = self.get(url=url, ssl=ssl, timeout=timeout)
soup = BeautifulSoup(response.data, 'html.parser')
'''
token = (soup.findAll('a', {'id': 'startbtn'}))[0]['href']
print str(url+token)
response = self.post(url=url+token,ssl=ssl, timeout=timeout)
sleep(5)
response = self.get(url=url + token, ssl=ssl, timeout=timeout)
'''
soup = BeautifulSoup(response.data, 'html.parser')
print soup
info = soup.findAll('table')
print info
#print 'TorPyCurl Connection address: ' + str(current_address.strong.text)
#if 'Congratulations.' in str(status[0].text).strip():
# print 'TorPyCurl Status: Connection PASS'
#else:
# print 'TorPyCurl Status: Connection FAIL'
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
'''
def exits(self, url='https://check.torproject.org/exit-addresses'):
return BeautifulSoup(self.get(url=url), 'html.parser')
# TODO Grab stdout line by line as it becomes available.
# TODO Retrieve information about the Exit Node in a more reliable way
def status(self):
try:
erl.ExitRelayListener()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
def login(self, url='', user='', passwd='', ssl=True, timeout=15):
attrs = {'user':user, 'password':passwd}
self.reset_handler()
self.handler.setopt(pycurl.FOLLOWLOCATION, 1)
self.handler.setopt(pycurl.COOKIEFILE, './cookie_test.txt')
self.handler.setopt(pycurl.COOKIEJAR, './cookie_test.txt')
self.handler.setopt(pycurl.POST, True)
self.handler.setopt(pycurl.POSTFIELDS, urlencode(attrs))
self._proxy_setup()
self._curl_setup(url=url, ssl=ssl, timeout=timeout)
try:
return self._curl_perform()
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
''' | UTF-8 | Python | false | false | 10,653 | py | 10 | TorPyCurl.py | 8 | 0.584812 | 0.580588 | 0 | 332 | 31.090361 | 99 |
chakki-works/elephant_sense | 10,067,403,361,266 | 7c230bcdb4d6d4227affccfb987f7b389eb57532 | 48005f304f085195cdb9215c824a904a76441c55 | /scripts/data/make_data.py | 4eddc0a811e6f058c0ae53dab99b43241ca331ef | [
"Apache-2.0"
]
| permissive | https://github.com/chakki-works/elephant_sense | 15f0eb288a7ed36eb0ccb4c29a5a8fc5d4a598f7 | ba7c95e557d8b5a2bdce699fb473de3183a7ca6f | refs/heads/master | 2021-01-23T01:13:10.708327 | 2018-12-10T03:55:29 | 2018-12-10T03:55:29 | 85,890,179 | 14 | 0 | null | false | 2017-04-07T01:04:08 | 2017-03-23T00:38:02 | 2017-03-23T00:56:06 | 2017-04-07T01:04:08 | 202 | 0 | 0 | 0 | Jupyter Notebook | null | null | import csv
import json
import os
import requests
from bs4 import BeautifulSoup
BASE_DIR = os.path.join(os.path.dirname(__file__), '../../data/')
def list_labels():
labeled_file = os.path.join(BASE_DIR, 'raw/labeled_qiita_posts.csv')
with open(labeled_file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None) # Skip header
for row in reader:
url = row[1]
labels = row[3:] # 4 column~ is annotator columns.
item_id = url.split('/')[-1]
yield item_id, labels
def get_likecount(url):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
likecount = soup.find(class_="js-likecount").text
return likecount
def add_annotations():
for item_id, labels in list_labels():
target_dir = 'processed/items'
item_file = os.path.join(BASE_DIR, 'raw/items/{}.json'.format(item_id))
save_file = os.path.join(BASE_DIR, target_dir + '/{}.json'.format(item_id))
if not os.path.isdir(os.path.join(BASE_DIR, target_dir)):
os.mkdir(os.path.join(BASE_DIR, target_dir))
with open(item_file, 'r') as rf, open(save_file, 'w') as wf:
item = json.load(rf)
item['annotations'] = [
{
"annotator-id": annotator_id,
'quality': quality
}
for annotator_id, quality in enumerate(labels)
]
item['likes'] = get_likecount(item['url'])
wf.write(json.dumps(item, indent=4, ensure_ascii=False))
def main():
add_annotations()
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,696 | py | 36 | make_data.py | 22 | 0.564858 | 0.561321 | 0 | 56 | 29.303571 | 83 |
falkzach/CSCI595-Deep-Knowledge-Transfer | 18,073,222,415,817 | 99e83be6a4cdbeb1bc8c0ebeaeabd4d82d358adb | 5ec0b0d6d4d793e6b6548453c064f96d06807bff | /checkpoint.py | 451cbafd306edc85b1731789f265700ef198bfb3 | []
| no_license | https://github.com/falkzach/CSCI595-Deep-Knowledge-Transfer | 54e14ab55e062d42ca32e450254c99db69d1b6c1 | 0cf99f4833cb16e8508f93370fb06541c2feb065 | refs/heads/master | 2021-01-19T04:13:14.979966 | 2017-05-10T15:34:57 | 2017-05-10T15:34:57 | 87,359,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import os
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
class SaveCheckpoint:
def __init__(self,):
with tf.Session() as sess:
self.saver = tf.train.Saver()
def save_model(self, experiment):
path = "sessions/" + experiment.get_save_path()
ensure_dir(path)
path += experiment.get_save_name()
saved_path = self.saver.save(experiment.session, path)
print("Session from " + experiment.name + " saved to " + saved_path + ".")
op = experiment.session.graph.get_operations()
print([m.values() for m in op][1])
col = experiment.session.graph.get_all_collection_keys()
print([m for m in col])
class LoadCheckpoint:
def __init__(self, path, name):
sess = tf.Session()
meta_path = path + "\\" + name + ".session.meta"
self.saver = tf.train.import_meta_graph(meta_path,clear_devices=True)
self.saver.restore(sess, tf.train.latest_checkpoint(path))
self.session = sess
print("Loaded Checkpoint: " + name)
op = sess.graph.get_operations()
print([m.values() for m in op][1])
col = sess.graph.get_all_collection_keys()
print([m for m in col])
if __name__ == "__main__":
exit(-1)
| UTF-8 | Python | false | false | 1,317 | py | 11 | checkpoint.py | 10 | 0.592255 | 0.589977 | 0 | 41 | 31.121951 | 82 |
BadoinoMatteo/SISTEMI_E_RETI_quinta | 2,482,491,124,695 | 702376864df8795f59688c742693e9c744091657 | 9577cc6c14f0a2150c8587f2667b164febf3e0f5 | /cittografia/rsa.py | b05a5928374d7db678ae77c48a6814b36cd9349c | []
| no_license | https://github.com/BadoinoMatteo/SISTEMI_E_RETI_quinta | 913bb38eb30cd755046bbe8c91dad3386d75aec5 | 4753a4c04f47af588e21117f55ab79c9d70734b3 | refs/heads/main | 2023-03-11T14:45:04.146211 | 2021-03-05T07:25:46 | 2021-03-05T07:25:46 | 306,089,828 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
def main():
p=int(input("inserisci un numero intero primo"))
q=int(input("inserisci un numero intero primo"))
n=p*q
m=mcm(p, q)
print(m)
c=trovaC(m)
print(c)
d=trovaD(c,m)
print(d)
num=int(input("inserisci numero da criptare"))
criptato=criptaNum(num,c,n)
print(criptato)
decriptato=decriptaNum(criptato,d,n)
print(f"messaggio decifrato={decriptato}")
def mcd(numA, numB):
if (numA < numB):
numA, numB = numB, numA
while numB > 0:
resto = numA % numB
numA = numB
numB = resto
return numA
def mcm(numA, numB):
numA-=1
numB-=1
return (numA*numB)//mcd(numA, numB)
def trovaC(m):
lista=[]
for c in range(2, m):
if(mcd(c,m)==1):
lista.append(c)
print(lista)
return random.choice(lista)
def trovaD(c,m):
for d in range(2,m):
if((c*d)%m==1):
return d
return None
def criptaNum(a,c,n):
return (pow(a,c)%n)
def decriptaNum(b,d,n):
return (pow(b,d)%n)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,142 | py | 13 | rsa.py | 12 | 0.534151 | 0.528021 | 0 | 56 | 18.428571 | 52 |
mikiihuang/pytorch_related | 455,266,536,728 | 068651a1b07277f355c27e75a38340323bc63aab | eb22f263a94efda72e0b9bb9818d13a981ffcaab | /norm_linearRegression.py | cdd23ce3e2251851ce5740efeffac91c407bb3f8 | []
| no_license | https://github.com/mikiihuang/pytorch_related | 7b9cbe11ead350fd2f3a0408025b12522d53a3f2 | 4e181bc1770847fe6aa3c9b6bc142571442765a1 | refs/heads/master | 2020-04-11T02:48:31.509693 | 2019-01-24T08:49:17 | 2019-01-24T08:49:17 | 161,456,877 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
# unsqueeze:在dim=?的维度添加一个维度
x = torch.unsqueeze(torch.linspace(-1,1,200),dim=1)
y = x.pow(2)+0.2*torch.rand(x.size())
x, y = Variable(x),Variable(y)
class NN(torch.nn.Module):
# 神经网络的累需要继承自torch.nn.Module,__init__和forward是自定义类的主要函数
# 对神经网络的模块进行声明
def __init__(self,input,hidden,output):
# 用于继承父类的初始化函数
super(NN,self).__init__()
self.hidden = torch.nn.Linear(input,hidden)
self.output = torch.nn.Linear(hidden,output)
# 搭建
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.output(x)
return x
net = NN(input=1,hidden=10,output=1)
optimzer = torch.optim.SGD(net.parameters(),lr=0.1)
criteroi = nn.MSELoss()
print(net)
# NN(
# (hidden): Linear(in_features=1, out_features=10, bias=True)
# (output): Linear(in_features=10, out_features=1, bias=True)
# )
plt.ion() # 画图
plt.show()
for i in range(300):
predict = net(x)
loss = criteroi(predict,y)
optimzer.zero_grad()
loss.backward()
optimzer.step()
print("loss:"+str(loss.item()))
if i % 5 == 0:
# plot and show learning process
plt.cla()
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), predict.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
| UTF-8 | Python | false | false | 1,639 | py | 38 | norm_linearRegression.py | 29 | 0.623436 | 0.600395 | 0 | 53 | 27.603774 | 96 |
vanphuong12a2/obs_network | 7,937,099,578,269 | ff9e1360e5768d0198d93cc556640b8ed41eac3d | 7b58df7165c1ea6e8c5be525b424eb733cf136de | /src/plot/plot_explvar.py | c3c3e09c5db0e76a648c0917a3571f51c8ba8a51 | []
| no_license | https://github.com/vanphuong12a2/obs_network | 3989ba2805730f4564c46ec62026afb78ab55d89 | 6e0d2de7964d759f9bc59603e18fee10b420fb5a | refs/heads/master | 2016-08-07T09:55:02.092556 | 2015-05-14T09:02:02 | 2015-05-14T09:02:02 | 33,307,060 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import sys
import numpy as np
import scipy.io
import numpy.ma as ma
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
sys.path.append('../')
import supports
import settings
def plotmaps_explvar(obs, varplot, lons, lats, title, text, fileout, proj, bigmarker=False):
"""
This function receives the data (explained covariances + valid observations) and plots them in a map
"""
plt.clf()
font = {'size' : 4}
matplotlib.rc('font', **font)
zarr = supports.get_bathymetry_mat()
if settings.PLOTTING_REGION == 0:
plt.figure(figsize=( 8.64, 4.32), dpi=100)
else:
if settings.PLOTTING_REGION == 1:
# clear previous plotting if there is
if proj == 'mill':
lon1, lon2 = np.argmin(np.abs(-90 - lons)), np.argmin(np.abs(30 - lons)) # from 90W -> 0W
lat1, lat2 = np.argmin(np.abs(80 - lats)), np.argmin(np.abs(-2 - lats)) # from 80N -> 2S
elif proj == 'lcc':
lon1, lon2 = np.argmin(np.abs(-90 - lons)), np.argmin(np.abs(45 - lons)) # from 90W -> 0W
lat1, lat2 = np.argmin(np.abs(80 - lats)), np.argmin(np.abs(-10 - lats)) # from 80N -> 2S
else:
# the lat and lon are chosen for the north atlantic
lon1, lon2 = np.argmin(np.abs(-84 - lons)), np.argmin(np.abs(24 - lons)) # from 90W -> 0W
lat1, lat2 = np.argmin(np.abs(80 - lats)), np.argmin(np.abs(-10 - lats)) # from 80N -> 2S
obs = np.asarray([o for o in obs if -84 <= o[0] <= 24 and 80 >= o[1] >= -10])
elif settings.PLOTTING_REGION == 2:
# clear previous plotting if there is
# the lat and lon are chosen for the med sea
lon1, lon2 = np.argmin(np.abs(-6 - lons)), np.argmin(np.abs(48 - lons)) # from 90W -> 0W
lat1, lat2 = np.argmin(np.abs(48 - lats)), np.argmin(np.abs(24 - lats)) # from 80N -> 2S
obs = np.asarray([o for o in obs if -6 <= o[0] <= 42 and 48 >= o[1] >= 30])
lons = lons[lon1:lon2]
lats = lats[lat1:lat2]
varplot = varplot[lon1:lon2, lat1:lat2]
zarr = zarr[lon1:lon2, lat1:lat2]
plt.figure(figsize=( 3.64, 4.32), dpi=100) #HARD CODE (8.64, 4.32)
mvarplot = ma.array(varplot,mask=np.isnan(varplot))
# create a basemap using the data of the area
if proj == 'aea':
if settings.PLOTTING_REGION == 1:
m = Basemap(llcrnrlon=-72, llcrnrlat=0, urcrnrlon=110, urcrnrlat=70, resolution='l', projection=proj, lat_1=0, lat_2=90, lat_0=(np.min(lats)+np.max(lats))/2, lon_0=(np.min(lons)+np.max(lons))/2)
elif settings.PLOTTING_REGION == 2:
m = Basemap(llcrnrlon=-6, llcrnrlat=24, urcrnrlon=45, urcrnrlat=45, resolution='l', projection=proj, lat_1=0, lat_2=90, lat_0=(np.min(lats)+np.max(lats))/2, lon_0=(np.min(lons)+np.max(lons))/2)
else:
m = Basemap(llcrnrlon=-180, llcrnrlat=-90, urcrnrlon=180, urcrnrlat=90, resolution='l', projection='moll', lat_1=0, lat_2=90, lat_0=(np.min(lats)+np.max(lats))/2, lon_0=(np.min(lons)+np.max(lons))/2)
else:
m = Basemap(llcrnrlon= np.min(lons), llcrnrlat=np.min(lons), urcrnrlon=np.max(lons), urcrnrlat=np.max(lats), resolution='l', projection=proj, lat_1=5, lat_2=30 , lat_0=(np.min(lats)+np.max(lats))/2, lon_0=(np.min(lons)+np.max(lons))/2)
# use this to plot the whole globe
# m = Basemap(projection='moll',lon_0=0,resolution='l')
m.drawmapboundary(fill_color='white', linewidth = 0.1)
# set color for land and water
m.fillcontinents(color=[.7,.7,.7],lake_color='white')
lons_tile, lats_tile = m(np.tile(lons[:,np.newaxis],len(lats)) , np.tile(lats,(len(lons),1)))
# plot the bathymetry mask => grey
im1 = m.pcolormesh(lons_tile, lats_tile, zarr <= settings.MAX_Z, shading='flat', cmap=plt.cm.gray, vmin = -5., vmax = 1.)
im2 = m.pcolormesh(lons_tile, lats_tile, mvarplot, shading='flat', cmap=plt.cm.jet, vmin = 0., vmax = 1.)
#plot the monitoring stations
if len(obs) > 0:
x, y = m(obs[:,0], obs[:,1])
if bigmarker:
m.scatter(x, y, 0.3, marker='o', color='k', linewidth = 0.5)
else:
m.scatter(x, y, 0.3, marker='+', color='k', linewidth = 0.1)
# set the colorbar
y = plt.colorbar(im2, orientation='vertical')
# this colors the land and draw simple black coastal line
m.drawcoastlines(linewidth = 0.1)
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,91.,6.), labels=[1,0,0,0], linewidth = 0.1) #lat
m.drawmeridians(np.arange(-180.,181.,6.), labels=[0,0,0,1], linewidth = 0.1) #lon
# title of the figure
plt.title(title + text, size = 6)
#plt.show()
plt.savefig(fileout, dpi=1000)
plt.close()
| UTF-8 | Python | false | false | 4,813 | py | 33 | plot_explvar.py | 24 | 0.603781 | 0.552047 | 0 | 106 | 44.396226 | 242 |
davewadestein/Python-Intermediate | 15,522,011,838,502 | b8696d855c83fd2c751d94134db8488e49266230 | 4b3b36da54d2c813cb23e44ef23c3df24bf740d4 | /guess.py | a25411b6c64255eb77f4034ddb15ad83d0f7c38f | []
| no_license | https://github.com/davewadestein/Python-Intermediate | 86f6f35831ae19f385ccd3e071eee5526b30dbf5 | 570cb8e677b5280e2b6de289ef673e1839c35783 | refs/heads/master | 2016-08-11T13:03:09.248010 | 2016-02-09T19:00:21 | 2016-02-09T19:00:21 | 45,193,791 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
my_number = random.randint(1, 100)
guess = 0
while guess != my_number:
guess = int(input("Your guess (0 to give up)? "))
if guess == 0:
print("Sorry that you're giving up!")
break
elif guess > my_number:
print("Guess was too high")
elif guess < my_number:
print("Guess was too low")
else:
print("Congratulations. You guessed it!")
| UTF-8 | Python | false | false | 398 | py | 24 | guess.py | 23 | 0.605528 | 0.58794 | 0 | 15 | 25.533333 | 53 |
felpssc/Python-Desafios | 4,054,449,134,054 | 88fdcb6fef5cb58fd1c39600a22e821fb2d7183c | 53c1961cb2ef83a5b907484efd513a5d617da4ba | /Desafios Extras/desafio-triangulo.py | 8e2a27a53620c7abd3034fede686e1faa86847ab | []
| no_license | https://github.com/felpssc/Python-Desafios | 2db5274872718847fe3736f64a755fdd396ec8a2 | 59ed45522d45625819c1946edaf8e4d422323cfe | refs/heads/master | 2023-04-05T13:06:57.949911 | 2021-04-07T10:00:11 | 2021-04-07T10:00:11 | 355,490,034 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | i = 2
for n in range(7):
if n == 3 or n == 4 or n == 5:
print(" " * (6 - n), " *", " " * (n - i), "*")
i -= 1
else:
print(" " * (6 - n), " *" * n)
| UTF-8 | Python | false | false | 183 | py | 65 | desafio-triangulo.py | 64 | 0.26776 | 0.224044 | 0 | 7 | 24.571429 | 54 |
wlgur011/2DGame_Project | 9,878,424,788,065 | 54c6426cde05d6acc4f6f55e6c025ea16d5e1bdc | f958beddd250283dedcae9dc3071d64668e68ca1 | /GameFrameWork/main_state.py | 1b39e612699e7435fffe40fd1db3a601c0a10a6d | []
| no_license | https://github.com/wlgur011/2DGame_Project | 3eaa45e952a416a40fbf8f7e03af2eeeafb3686e | 009833626f5e65de18dff332294d0c3562c25055 | refs/heads/master | 2020-08-27T07:00:16.603389 | 2019-12-09T12:05:42 | 2019-12-09T12:05:42 | 217,277,343 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import json
import os
from pico2d import *
import win32api
import game_framework
import title_state
import pause_state
from GameObject.Player import CPlayer
import CMap
from GameObject.Bullet import CBullet
from GameObject.Monster import CPlane_1, AirPlane,Ship
from GameObject.UI import CUI
import ListManagement
import SoundManagement
name = "MainState"
Player=None
font = None
Is_gameover=False
ListManager=ListManagement.ListManager()
SoundManager = None
Score =None
def enter():
global Map ,Score , SoundManager
Score= CUI.Score()
ListManager.Player_Lst.append(CPlayer.Player())
ListManager.UI_Lst.append(CUI.Lager_Energy(160,30))
ListManager.UI_Lst.append(CUI.Player_Bomb())
ListManager.UI_Lst.append(CUI.Life())
ListManager.UI_Lst.append(Score)
ListManager.UI_Lst.append(CUI.GameOver())
Map=CMap.Map()
#ListManager.Boss_List.append(Ship.BossShip(1400, 860))
SoundManager=SoundManagement.SoundManager()
pass
def exit():
global Map
del(Map)
ListManager.Delete_AllList()
pass
def pause():
pass
def resume():
pass
def handle_events():
events=get_events()
global Player
for event in events:
if event.type==SDL_QUIT:
game_framework.quit()
elif event.type==SDL_KEYDOWN and event.key==SDLK_ESCAPE:
game_framework.change_state(title_state)
elif event.type==SDL_KEYDOWN and event.key==SDLK_p:
game_framework.push_state(pause_state)
pass
bisAirPlaneMake = True
MakeTerm=0
RedAirPlaneTerm =0
SmallBoss_MakeTerm =0
SmallBossCnt = 2
MiddleBossCnt = 1
FinalBossCnt = 1
Time = 0
bisMiddleBossDead = False
def update():
if Is_gameover is True:
return
global MakeTerm, RedAirPlaneTerm,bisAirPlaneMake,SmallBoss_MakeTerm,SmallBossCnt,MiddleBossCnt,FinalBossCnt
global Time
MakeTerm+=game_framework.frame_time * 1
RedAirPlaneTerm += game_framework.frame_time * 1
SmallBoss_MakeTerm += game_framework.frame_time * 1
Time +=game_framework.frame_time * 1
if MakeTerm >=1 and bisAirPlaneMake is True:
MakeTerm=0
ListManager.Monster_List.append(CPlane_1.LeftPlane1(random.randint(0, 720), 960))
ListManager.Monster_List.append(CPlane_1.RightPlane1(random.randint(0, 720), 960))
ListManager.Monster_List.append(AirPlane.BlueAirPlane(random.randint(0, 300), 960))
ListManager.Monster_List.append(AirPlane.WhiteAirPlane(1000, random.randint(500,960)))
if RedAirPlaneTerm >= 4 and FinalBossCnt >0:
RedAirPlaneTerm=0
ListManager.Monster_List.append(AirPlane.RedAirPlane(random.randint(0, 300), 960))
if SmallBoss_MakeTerm > 10 and SmallBossCnt > 0:
SmallBoss_MakeTerm= 0
SmallBossCnt -=1
ListManager.Monster_List.append(AirPlane.MidAirPlane(1300, 1300, -1))
ListManager.Monster_List.append(AirPlane.MidAirPlane(-580, 1300, 1))
if Time > 20 and MiddleBossCnt > 0:
MiddleBossCnt -=1
ListManager.Monster_List.append(AirPlane.BigAirPlane(360, 1160))
if Time > 20 and bisMiddleBossDead is True and FinalBossCnt>0:
FinalBossCnt-=1
ListManager.Boss_List.append(Ship.BossShip(1400, 860))
bisAirPlaneMake= False
Map.update()
ListManager.update()
pass
def draw():
clear_canvas()
Map.draw()
ListManager.draw()
update_canvas()
pass
| UTF-8 | Python | false | false | 3,418 | py | 21 | main_state.py | 20 | 0.70158 | 0.667057 | 0 | 140 | 23.378571 | 111 |
microsoft/maro | 2,027,224,583,412 | ac7c7a1d73ed4ec6af7fbd9f002520af45d43220 | a29b8d6ae6642ef80d04ae99d721b703de06db69 | /maro/rl/distributed/abs_proxy.py | 6bf17964e900107b839405fd10a8260eeded227e | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | https://github.com/microsoft/maro | 6aab1a4e86fddabf7f242f0d1020d985a5f7a5f3 | b3c6a589ad9036b03221e776a6929b2bc1eb4680 | refs/heads/master | 2023-08-24T16:52:38.250279 | 2023-05-15T04:31:58 | 2023-05-15T04:31:58 | 230,389,247 | 764 | 158 | MIT | false | 2023-07-25T20:59:06 | 2019-12-27T06:48:27 | 2023-07-23T17:43:48 | 2023-07-25T20:59:06 | 115,360 | 729 | 141 | 19 | Python | false | false | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
import zmq
from tornado.ioloop import IOLoop
from zmq import Context
from zmq.eventloop.zmqstream import ZMQStream
from maro.rl.utils.common import get_own_ip_address
class AbsProxy(object):
"""Abstract proxy class that serves as an intermediary between task producers and task consumers.
The proxy receives compute tasks from multiple clients, forwards them to a set of back-end workers for
processing and returns the results to the clients.
Args:
frontend_port (int): Network port for communicating with clients (task producers).
backend_port (int): Network port for communicating with back-end workers (task consumers).
"""
def __init__(self, frontend_port: int, backend_port: int) -> None:
super(AbsProxy, self).__init__()
# ZMQ sockets and streams
self._context = Context.instance()
self._req_socket = self._context.socket(zmq.ROUTER)
self._ip_address = get_own_ip_address()
self._req_socket.bind(f"tcp://{self._ip_address}:{frontend_port}")
self._req_endpoint = ZMQStream(self._req_socket)
self._dispatch_socket = self._context.socket(zmq.ROUTER)
self._dispatch_socket.bind(f"tcp://{self._ip_address}:{backend_port}")
self._dispatch_endpoint = ZMQStream(self._dispatch_socket)
self._event_loop = IOLoop.current()
# register handlers
self._dispatch_endpoint.on_recv(self._send_result_to_requester)
@abstractmethod
def _route_request_to_compute_node(self, msg: list) -> None:
"""Dispatch the task to one or more workers for processing.
The dispatching strategy should be implemented here.
Args:
msg (list): Multi-part message containing task specifications and parameters.
"""
raise NotImplementedError
@abstractmethod
def _send_result_to_requester(self, msg: list) -> None:
"""Return a task result to the client that requested it.
The result aggregation logic, if applicable, should be implemented here.
Args:
msg (list): Multi-part message containing a task result.
"""
raise NotImplementedError
def start(self) -> None:
"""Start a Tornado event loop.
Calling this enters the proxy into an event loop where it starts doing its job.
"""
self._event_loop.start()
def stop(self) -> None:
"""Stop the currently running event loop."""
self._event_loop.stop()
| UTF-8 | Python | false | false | 2,603 | py | 659 | abs_proxy.py | 397 | 0.667307 | 0.667307 | 0 | 73 | 34.657534 | 106 |
chris-wood/BitcoinPrivacyProject | 19,146,964,228,089 | bc9dbf9892b0798b2b93593debb85e8c8eb3e0ec | 7a4cf1fa39e6fea008f9b816d4e5d1737220f323 | /boomerang/code/src/runSimulations.py | 5ae343fb776a210619adf26b7e5fb4bf5f527eff | []
| no_license | https://github.com/chris-wood/BitcoinPrivacyProject | 589c0344feee20581e73c59b7e0dd3dd5194c582 | b27a3281f1ba50f8a18422ec9527bc78afda1311 | refs/heads/master | 2020-05-09T05:16:07.012558 | 2014-03-14T00:24:30 | 2014-03-14T00:24:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import subprocess
import os
import shutil
import glob
# Declare variables for the delta+1 and delta+2 output
filedump = ""
# Compile!
p = subprocess.Popen('javac -cp "jyaml-1.3.jar:." *.java', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if (len(sys.argv) != 2):
print "Usage: python runSimulations.py fileListFile"
else:
filedump = sys.argv[1]
filedumpfile = open(filedump, 'r')
for fname in filedumpfile:
fname = str(fname.strip())
print >> sys.stderr, "Running: " + fname
prefix = str(os.path.basename(fname).split(".")[0]) # drop extension
p = subprocess.Popen('java -Xmx12g -cp "jyaml-1.3.jar:." Boomerang ' + str(fname), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
fout = open(prefix + ".out", 'w')
for line in p.stdout.readlines():
line = line.strip()
fout.write(line + "\n")
| UTF-8 | Python | false | false | 857 | py | 230 | runSimulations.py | 84 | 0.690782 | 0.677946 | 0 | 26 | 31.846154 | 146 |
MaxHenger/dse14-finding-venusian-volcanoes | 1,211,180,782,915 | b5eeb66253527ac2b7034e16c5e318855bbe1e18 | cdaf89eb70a6f2cc5eb482397334c28373979538 | /Mid-Term concept tradeoff designs/Aircraft/utility.py | 5397fe19702d5be427c307c800a231e6764f931c | []
| no_license | https://github.com/MaxHenger/dse14-finding-venusian-volcanoes | 39976234f8f8fb049fcd5a6fc7b4e4393ad812d7 | 00be05b652de46552acb9923e2a731dd8369db07 | refs/heads/master | 2020-04-06T05:02:57.653741 | 2016-06-28T08:15:24 | 2016-06-28T08:15:24 | 56,502,832 | 3 | 2 | null | false | 2016-06-28T08:15:24 | 2016-04-18T11:38:20 | 2016-05-26T18:48:29 | 2016-06-28T08:15:24 | 36,456 | 2 | 3 | 0 | Python | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 9 20:05:54 2016
@author: MaxHenger
"""
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import os
import pickle
import ast
import time
class settings:
def __init__(self):
# Settable parameters for this specific mission
self.ratioMass0 = 5.0
self.ratioMass1 = 0.0
self.ratioPower0 = 3.0
self.ratioPower1 = 3.0
self.ratioTime = 0.3
self.deltaLatitude = 22.5 * np.pi / 180.0
self.heightLower = 40000
self.heightUpper = 60000
self.timeLower = 2000.0
self.deltaVUpper = 70
self.radiusHeatshield = 2.5
self.massPayload = 100.0
self.powerPayload = 50.0
self.specificCapacityBattery = 0.46e6
self.designLiftCoefficient = 0.4
self.fuselageRadius = 0.4
self.minimumDrag = 0.03
self.oswaldFactor = 0.95
self.propellorArea = np.pi * self.fuselageRadius ** 2.0 * 3.0
self.efficiencyPower = 0.95
self.efficiencyCharging = 0.99
self.efficiencySolarPanel = 0.9 # on top of default efficiency
# Venus' parameters
self.venusMu = 3.24859e14
self.venusRadius = 6051800
self.venusOmega = 2 * np.pi / (243.025 * 24.0 * 60.0 * 60.0)
self.venusFlux = 2600
class dictionaryIO:
def __init__(self):
self.__namesAxes__ = None
self.__valuesAxes__ = None
self.__resultsLinear__ = None
self.__multFactors__ = None
self.__sep__ = ';SEP;SEP;'
def __checkCompoundLength__(self):
if not (self.__resultsLinear__ is None) and \
not (self.__namesAxes__ is None) and \
not (self.__valuesAxes__ is None):
compoundLength = 1.0
for iAxis in range(0, len(self.__valuesAxes__)):
compoundLength *= len(self.__valuesAxes__[iAxis])
if len(self.__resultsLinear__) != compoundLength:
raise ValueError("Linear results do not match compound length of all axes")
def __generateMultFactors__(self):
self.__multFactors__ = [1] * len(self.__valuesAxes__)
for i in range(1, len(self.__valuesAxes__)):
iRev = len(self.__valuesAxes__) - 1 - i
self.__multFactors__[iRev] = self.__multFactors__[iRev + 1] * len(self.__valuesAxes__[iRev + 1])
def setAxes(self, names, values):
if len(names) != len(values):
raise ValueError("Expected length of 'names' and 'values' to be the same")
self.__namesAxes__ = names
self.__valuesAxes__ = values
self.__checkCompoundLength__()
self.__generateMultFactors__()
def setResults(self, results):
self.__resultsLinear__ = results
self.__checkCompoundLength__
def save(self, filename):
fh = open(filename, 'w')
# Write the number of axes
fh.write(str(len(self.__namesAxes__)) + '\n')
for i in range(0, len(self.__namesAxes__)):
# Write all axes
fh.write(self.__namesAxes__[i] + '\n')
fh.write(str(len(self.__valuesAxes__[i])) + '\n')
fh.write(str(self.__valuesAxes__[i][0]))
for j in range(1, len(self.__valuesAxes__[i])):
fh.write(self.__sep__ + str(self.__valuesAxes__[i][j]))
fh.write('\n')
# Write the data
for i in range(0, len(self.__resultsLinear__)):
first = True
for key, value in self.__resultsLinear__[i].items():
# Inefficient seperator writing
if first != True:
fh.write(self.__sep__)
first = False
fh.write(key + '=' + str(pickle.dumps(value, protocol=0)))
fh.write('\n')
def load(self, filename):
self.__namesAxes__ = []
self.__valuesAxes__ = []
self.__resultsLinear__ = []
fh = open(filename, 'r')
# Read the number of columns
line = fh.readline()
if len(line) == 0 or line[0] == '\n':
raise ValueError('Expected to find number of axes')
# Read the columns
numAxes = int(line)
numLinear = 1
for i in range(0, numAxes):
line = fh.readline()
if len(line) == 0 or line[0] == '\n':
raise ValueError("Expected to find axis name for axis " + str(i))
self.__namesAxes__.append(line[:-1])
line = fh.readline()
if len(line) == 0 or line[0] == '\n':
raise ValueError("Expected to find number of values for axis " + str(i) + ": " + self.__namesAxes__[-1])
numValues = int(line)
numLinear *= numValues
line = str(fh.readline())
if len(line) == 0 or line[0] == '\n':
raise ValueError("Expected to find values for axis " + str(i) + ": " + self.__namesAxes__[-1])
seperated = line.split(self.__sep__)
values = [0] * numValues
if numValues != len(seperated):
raise ValueError("Expected " + str(numValues) + " values for axis " + str(i) + ": " +
self.__namesAxes__[-1] + " but got " + str(len(seperated)) + " values")
for j in range(0, len(seperated)):
values[j] = float(seperated[j])
self.__valuesAxes__.append(values)
for i in range(0, numLinear):
line = str(fh.readline())
if len(line) == 0 or line[0] == '\n':
raise ValueError("Unexpected EOF at value " + str(i) + " of " + str(numLinear))
seperated = line.split(self.__sep__)
dictionary = {}
for j in range(0, len(seperated)):
subSeperated = seperated[j].split('=', 1)
dictionary[subSeperated[0]] = pickle.loads(ast.literal_eval(subSeperated[1]), encoding='ASCII')
self.__resultsLinear__.append(dictionary)
self.__generateMultFactors__()
def getValue(self, indices):
if len(indices) != len(self.__valuesAxes__):
raise ValueError("Expected " + str(len(self.__valuesAxes__)) + " indices, " +
"received " + len(indices) + " indices")
compoundIndex = 0
for i in range(0, len(indices)):
compoundIndex += self.__multFactors__[i] * indices[i]
return self.__resultsLinear__[compoundIndex]
def getAxisName(self, indexAxis):
if indexAxis >= len(self.__namesAxes__):
raise ValueError("Axis index " + str(indexAxis) +
" exceeds length " + str(len(self.__namesAxes__)))
return self.__namesAxes__[indexAxis]
def getAxisValues(self, indexAxis):
if indexAxis >= len(self.__valuesAxes__):
raise ValueError("Axis index " + str(indexAxis) +
" exceeds length " + str(len(self.__valuesAxes__)))
return self.__valuesAxes__[indexAxis]
class TimeEstimator:
def __init__(self, totalIterations):
self.start = 0
self.iterationStart = 0
self.stop = 0
self.total = totalIterations
self.current = 0
def __padFront__(self, string, character, total):
string = str(string)
return character * (total - len(string)) + string
def __formatTicks__(self, delta):
delta = int(delta * 1e3)
msecs = delta % 1000
secs = (delta - msecs) / 1000
minutes = secs / 60
hours = minutes / 60
secs = int(secs) % 60
minutes = int(minutes) % 60
hours = int(hours) % 24
return self.__padFront__(hours, '0', 2) + ":" + \
self.__padFront__(minutes, '0', 2) + ":" + \
self.__padFront__(secs, '0', 2) + "." + \
self.__padFront__(msecs, '0', 3)
def startTiming(self):
self.start = time.clock()
def startIteration(self, iteration):
self.iterationStart = time.clock()
def finishedIteration(self, iteration):
self.current = iteration
self.stop = time.clock()
def getTotalElapsed(self):
return self.__formatTicks__(self.stop - self.start)
def getIterationElapsed(self):
return self.__formatTicks__(self.stop - self.iterationStart)
def getEstimatedRemaining(self):
spent = self.stop - self.start
estimation = spent / (self.current + 1) * (self.total - 1 - self.current)
return self.__formatTicks__(estimation)
def getEstimatedTotal(self):
spent = self.stop - self.start
estimation = spent / (self.current + 1) * (self.total)
return self.__formatTicks__(estimation)
def getEstimatedEnd(self):
spent = self.stop - self.start
estimation = spent / (self.current + 1) * (self.total - 1 - self.current)
time.time() + estimation
def drawVenus(ax, lat, lon, file):
# Load image and draw it
image = img.imread(file)
ax.imshow(image, extent=[-180.0, 180.0, -90, 90])
# Overlay specified latitude and longitude grid
ax.set_xticks(lon)
ax.set_yticks(lat)
ax.grid(True, color=[1.0, 1.0, 1.0])
def __testDictionaryIO__():
test = dictionaryIO()
test.setAxes(['a', 'b'], [[0, 1, 2], [25, 50, 75]])
test.setResults([{'a': 2, 'b': 3}, {'a': 2, 'b': 4}, {'a': 2, 'b': 5},
{'a': 3, 'b': 3}, {'a': 3, 'b': 4}, {'a': 3, 'b': 5},
{'a': 4, 'b': 3}, {'a': 4, 'b': 4}, {'a': 4, 'b': 5}])
test.save('testFile.txt')
print(' --- before reloading:')
for i in range(0, 2):
print('axis 1 name =', test.getAxisName(0))
print('axis 1 values =', test.getAxisValues(0))
print('axis 2 name =', test.getAxisName(1))
print('axis 2 values =', test.getAxisValues(1))
for i in range(0, len(test.getAxisValues(0))):
for j in range(0, len(test.getAxisValues(1))):
val = test.getValue([i, j])
print('[', i, ',', j, '] =', val)
if i == 0:
test = dictionaryIO()
test.load('testFile.txt')
print(' --- after reloading:')
#__testDictionaryIO__() | UTF-8 | Python | false | false | 10,876 | py | 95 | utility.py | 81 | 0.505425 | 0.481979 | 0 | 304 | 34.779605 | 120 |
SpyrosMouselinos/Learning-to-solve-geometric-construction-problems-from-images | 7,430,293,424,835 | 96c4963ecb9ae5c16471743bb7e21e8ea077c71c | cfb9c0230974aa35c4b0744385395977d4c9cf67 | /src/py_euclidea/04_delta/03_EquilateralAboutCircle.py | 250e9a0abfb84c07db240699c4f6ff79db22db20 | []
| no_license | https://github.com/SpyrosMouselinos/Learning-to-solve-geometric-construction-problems-from-images | 935bc00a0c498659efbee2c9fca1570ff787eb0c | 480c37e300ee0d3f3c6cb500c2de3f4f1ab5c8a3 | refs/heads/main | 2023-08-19T23:27:36.414324 | 2021-10-11T22:01:35 | 2021-10-11T22:01:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from py_euclidea.constructions import *
import py_euclidea.ConstructionProcess as construction
import itertools
def init(env):
C = env.add_free(337.0, 243.0)
A = env.add_free(434.0, 264.0)
env.add_circle(C, A)
env.set_tools(
"move", "Point", "Line", "Circle",
"Perpendicular_Bisector", "Angle_Bisector",
"Perpendicular", "intersection",
)
env.goal_params(C, A)
def construct_goals(C, A):
C = C.a
A = A.a
lines = []
for ang in (0, -2*np.pi/3, 2*np.pi/3):
v = rotate_vector(A - C, ang)
X = C + v
lines.append(Line(v, np.dot(v, X)))
vertices = [
intersection_tool(l1, l2)
for (l1, l2) in itertools.combinations(lines, 2)
]
return [
segment_tool(X, Y)
for (X, Y) in itertools.combinations(vertices, 2)
]
def get_construction(env, obj):
C, A = [obj[i] for i in env.goal_par_indices]
input_circle = obj[2]
line_1 = line_tool(C, A)
P1, P1_d = intersection_tool(input_circle, line_1)
if same_point(P1, A):
P1 = P1_d
circle_2 = circle_tool(P1, C)
T1, T1_d = intersection_tool(circle_2, line_1)
if same_point(T1, C):
T1 = T1_d
V1, V2 = intersection_tool(circle_2, input_circle)
return [
construction.ConstructionProcess('Line', [C, A]),
construction.ConstructionProcess('Circle', [P1, C]),
construction.ConstructionProcess('Circle', [P1, C]),
construction.ConstructionProcess('Line', [T1, V1]),
construction.ConstructionProcess('Line', [T1, V1]),
construction.ConstructionProcess('Line', [T1, V1]),
construction.ConstructionProcess('Line', [T1, V2]),
construction.ConstructionProcess('Line', [T1, V2]),
construction.ConstructionProcess('Perpendicular', [C, A]),
], [
line_1,
P1,
circle_2,
T1,
V1,
V2,
line_tool(T1, V1),
line_tool(T1, V2),
perp_tool(line_1, A)
]
| UTF-8 | Python | false | false | 2,146 | py | 142 | 03_EquilateralAboutCircle.py | 125 | 0.535415 | 0.502796 | 0 | 72 | 28.805556 | 73 |
AK-1121/code_extraction | 1,984,274,941,482 | f0285a397691ebfa9c402be7654d78df6fbced59 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_10416.py | 0ac892bb8f01c024cb8ecee2b0b48f2c977c37ac | []
| no_license | https://github.com/AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Parsing timezone in format of "-xx:00"
pytz.FixedOffset(-8*60)
| UTF-8 | Python | false | false | 65 | py | 29,367 | python_10416.py | 29,367 | 0.723077 | 0.646154 | 0 | 2 | 31.5 | 40 |
DavidPhilpott/food-tracker-update | 10,316,511,445,475 | cd6d0a7e3a3b7f7c60b3c9446e07126d0cba43d5 | 4e5f30eba202e71b69fbb96b0f8d6f720ff2abce | /app/State.py | 52f92d7726fcd496f3be20307c44682fabb7cdbd | []
| no_license | https://github.com/DavidPhilpott/food-tracker-update | fa1c4abe2ba6d6c828dcdfc8f814e61cc48961b4 | 728b3314abf068655956d073d3ffcde3d5e672f1 | refs/heads/master | 2022-12-22T01:25:39.295973 | 2020-04-22T20:54:06 | 2020-04-22T20:54:06 | 250,893,765 | 0 | 0 | null | false | 2022-12-08T09:33:14 | 2020-03-28T21:07:27 | 2020-04-22T20:54:16 | 2022-12-08T09:33:13 | 113 | 0 | 0 | 5 | Python | false | false | from app.Providers.EnvVarProvider import EnvVarProvider
from app.Providers.AwsParameterStoreProvider import AwsParameterStoreProvider
from app.Providers.LoggingProvider import LoggingProvider
class State:
def __init__(self, env_var_provider=None, aws_parameter_store_provider=None, logging_provider=None):
self._state = {}
self._sessions = {}
self._logging_provider = logging_provider
if logging_provider is None:
self._logging_provider = LoggingProvider()
self._env_var_provider = env_var_provider
if env_var_provider is None:
self._env_var_provider = EnvVarProvider(logging_provider=self._logging_provider)
self._aws_parameter_store_provider = aws_parameter_store_provider
if aws_parameter_store_provider is None:
self._aws_parameter_store_provider = AwsParameterStoreProvider(logging_provider=self._logging_provider,
env_var_provider=self._env_var_provider)
return
def get(self, key: str) -> str:
if not isinstance(key, str):
raise TypeError(f"Variables must be requested as a string. Requested type is {type(key)}.")
self.debug(__name__, f"Fetching state value for '{key}'.")
if key in self._state.keys():
key_value = self._state[key]
else:
try:
self.debug(__name__, f"'{key}' not found in local state. Searching OS env.")
key_value = self._env_var_provider.get_var(key)
except KeyError as exc_info:
self.error(__name__, f"Cannot find '{key}' on local state or OS env.")
raise
if type(key_value) == str:
if key_value.startswith("secret_secure"):
self.debug(__name__, f"'{key}' maps to '{key_value}', so fetching from SSM as a secure string.")
key_value = self._aws_parameter_store_provider.get_secure_string(variable_name=key_value)
elif key_value.startswith("secret_pem"):
self.debug(__name__, f"'{key}' maps to '{key_value}', so fetching from SSM as a PEM key.")
key_value = self._aws_parameter_store_provider.get_secure_pem_key(variable_name=key_value)
elif key_value.startswith("secret"):
self.debug(__name__, f"'{key}' maps to '{key_value}', so fetching from SSM as a regular string.")
key_value = self._aws_parameter_store_provider.get_non_secure_string(variable_name=key_value)
self.debug(__name__, f"Found value for '{key}'.")
return key_value
def set(self, key_pair: dict):
if not isinstance(key_pair, dict):
raise TypeError(f"Variables must be set as a single dict key-pair. Requested type is {type(key_pair)}.")
if len(key_pair.keys()) != 1:
raise ValueError(f"Key-pair submitted must be length 1. Current key-pair dict is length {len(key_pair.keys())}.")
self.debug(__name__, f"Setting state value for '{list(key_pair.keys())[0]}'.")
self._state.update(key_pair)
self.debug(__name__, f"Finished setting value for '{list(key_pair.keys())[0]}'.")
return
def _assemble_key_list_from_args(self, *args) -> list:
self.debug(__name__, f"Assembling key list from {args}.")
if len(args) == 0:
raise ValueError("No values passed to function. Need at least one value to form key list.")
key_list = []
for val in args:
key_list.append(val)
self.debug(__name__, f"Finished assembling key list - {key_list}.")
return key_list
def _get_session(self, session_keys: list):
self.debug(__name__, f"Fetching session from state at {session_keys}.")
structure_to_search = self._sessions
try:
for key in session_keys:
self.debug(__name__, f"Searching sessions at key '{key}'.")
structure_to_search = structure_to_search[key]
except KeyError:
self.debug(__name__, f"Could not find {session_keys} inside sessions.")
raise
return structure_to_search
def get_session(self, *session_args):
self.debug(__name__, f"Fetching session from state at {session_args}.")
session_keys = self._assemble_key_list_from_args(*session_args)
session = self._get_session(session_keys)
self.debug(__name__, f"Finished fetching session for {session_args}.")
return session
def _has_session(self, session_keys: list) -> bool:
try:
self._get_session(session_keys)
self.debug(__name__, f"Found session on state at '{session_keys}'.")
return True
except KeyError:
self.debug(__name__, f"Session '{session_keys}' not found on state.")
return False
def has_session(self, *session_args) -> bool:
self.debug(__name__, f"Checking for session from state at {session_args}.")
session_keys = self._assemble_key_list_from_args(*session_args)
return self._has_session(session_keys)
def _set_session(self, session_keys: list, session_value):
self.debug(__name__, f"Setting session for {session_keys} to '{session_value}'.")
dictionary_layer = self._sessions
for key in session_keys[:-1]:
self.debug(__name__, f"Current dictionary layer is {dictionary_layer}.")
self.debug(__name__, f"Moving to dictionary key '{key}'.")
try:
dictionary_layer = dictionary_layer[key]
self.debug(__name__, f"Moved to dictionary key '{key}'.")
except KeyError:
self.debug(__name__, f"Could not move to key. Creating default.")
dictionary_layer.update({key: {}})
dictionary_layer = dictionary_layer[key]
final_key = session_keys[-1]
self.debug(__name__, f"Setting session value '{session_value}' at final layer key '{final_key}'.")
dictionary_layer[final_key] = session_value
self.debug(__name__, f"Done setting session for {session_keys} to '{session_value}'.")
return
def set_session(self, *session_args):
self.debug(__name__, f"Setting session for {session_args}.")
if len(session_args) < 2:
raise ValueError("Less than two args passed to function. In order to set a session need a path and value.")
session_value = session_args[-1]
session_path_args = session_args[:-1]
session_keys = self._assemble_key_list_from_args(*session_path_args)
self._set_session(session_keys, session_value)
return
def info(self, name, message):
self._logging_provider.info(name, message)
return
def warning(self, name, message):
self._logging_provider.warning(name, message)
return
def debug(self, name, message):
self._logging_provider.debug(name, message)
return
def error(self, name, message):
self._logging_provider.error(name, message)
return
| UTF-8 | Python | false | false | 7,137 | py | 51 | State.py | 45 | 0.603055 | 0.601653 | 0 | 147 | 47.55102 | 125 |
GH-Lim/AlgorithmPractice | 10,685,878,664,329 | 36649589826e7dcf8d6bc1c7e5f231c45056e10e | df30f97d316e899b07b223bc86cfe53345627f06 | /problems/programmers/lv3/종이접기.py | 736862beb076c93dac20153279de0af486129213 | []
| no_license | https://github.com/GH-Lim/AlgorithmPractice | c6a3aa99fa639aa23d685ae14c1754e0605eaa98 | e7b8de2075348fb9fcc34c1d7f211fdea3a4deb0 | refs/heads/master | 2021-06-18T17:21:10.923380 | 2021-04-18T03:43:26 | 2021-04-18T03:43:26 | 199,591,747 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def solution(n):
answer = []
for _ in range(n):
answer = answer + [0] + [int(not(i)) for i in answer[::-1]]
return answer
| UTF-8 | Python | false | false | 142 | py | 187 | 종이접기.py | 186 | 0.521127 | 0.507042 | 0 | 5 | 27.4 | 67 |
kc3/Springboard | 12,086,038,000,088 | 3f3a85cbf4e7e59ee2943837ce1c8d005a68c242 | a1fbd4f9b0e7f218179fecb42b007499624502c0 | /capstone_2/src/models/train_model.py | f9427903109eb542f14da90157d079751ca30a4f | [
"MIT"
]
| permissive | https://github.com/kc3/Springboard | 745ac29596dd3a0c889fdfcb673a2048d0860c58 | 4f092ef635529bd98e91bb5a98015755bd4a7d9e | refs/heads/master | 2021-06-27T14:24:19.871355 | 2019-04-23T18:21:25 | 2019-04-23T18:21:25 | 141,273,416 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import os
from datetime import datetime
from src.models.data_manager import DataManager
from src.models.seqtoseq_model import SeqToSeqModel
from src.models.policy_model import PolicyGradientModel
# Function to train SeqToSeq models
def train_seqtoseq(model_name=None, epochs=100):
#
# Configure logging
#
abs_path = os.path.abspath(os.path.dirname(__file__))
logs_dir = os.path.join(abs_path, '../../logs')
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
os.chmod(logs_dir, 0o777)
log_path = os.path.join(abs_path, '../../logs/run-{0}.log')
logging.basicConfig(filename=log_path.format(datetime.now().strftime('%Y%m%d-%H%M%S')),
level=logging.INFO,
format='%(asctime)s-%(process)d-%(name)s-%(levelname)s-%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Seq2Seq Model Training started.')
d = DataManager()
logging.info('Cornell Data Set loaded...')
# Train individual agent
s_model = SeqToSeqModel(model_name=model_name, epochs=epochs)
s_model.fit(d)
logging.info('Finished training SeqtoSeq Model...')
# Function to train RL models
def train_rl(model_name=None):
#
# Configure logging
#
abs_path = os.path.abspath(os.path.dirname(__file__))
logs_dir = os.path.join(abs_path, '../../logs')
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
os.chmod(logs_dir, 0o777)
log_path = os.path.join(abs_path, '../../logs/run-{0}.log')
logging.basicConfig(filename=log_path.format(datetime.now().strftime('%Y%m%d-%H%M%S')),
level=logging.INFO,
format='%(asctime)s-%(process)d-%(name)s-%(levelname)s-%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('RL Model Training started.')
# Train individual agent
s_model = PolicyGradientModel(model_name=model_name, turns=2)
s_model.fit()
logging.info('Finished training RL Model...')
if __name__ == '__main__':
# train_seqtoseq(model_name='test-policy')
train_rl(model_name='test-rl')
| UTF-8 | Python | false | false | 2,163 | py | 85 | train_model.py | 22 | 0.613037 | 0.606103 | 0 | 66 | 31.757576 | 92 |
vrih/todoist_cli | 16,200,616,664,766 | 50cc6baae6d4d225b450b98ec0e6bea9b14b7abc | 8a8ed6adf9988e910f0f52b36c86990584412a34 | /interactive.py | cebcfa288e9c7f61406d3f59583a10bf97b71b7b | []
| no_license | https://github.com/vrih/todoist_cli | 31720dc88b3dfda44998369c7185afe18ede49bc | f2cb860358a043de001d28d727dcf12dced01713 | refs/heads/master | 2021-01-24T08:12:30.024012 | 2018-08-29T08:05:03 | 2018-08-29T08:05:03 | 122,973,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Interactive todoist"""
from typing import List
import completions
import input_parser as ip
import renderer as rd
import data_accessor as da
import todoist_calls as td
def view(args, date=None) -> None:
"""List interactively"""
try:
if not date:
date = args[args.index("-d") + 1]
except ValueError:
date = None
try:
project = args[args.index("-p") + 1]
except ValueError:
project = None
rd.print_all_tasks(project=project, date=date)
def today(args) -> None:
"""List tasks for today"""
view(args, date="today")
def tomorrow(args) -> None:
"""List tasks for tomorrow"""
view(args, date="tomorrow")
def update(_) -> None:
"""Update a task interactively"""
rd.print_all_tasks()
task_to_update = ip.get_task()
content = input("content: ")
if content == "":
content = None
due = ip.get_due()
priority = ip.get_priority()
labels = ip.get_labels()
td.update_task([task_to_update], due, content, labels, priority)
def add(_) -> None:
"""Add a task interactively"""
content = input("content: ")
project = ip.get_project()
due = ip.get_due()
priority = ip.get_priority()
labels = ip.get_labels()
td.add_task(content, project, due, labels, priority)
def end_task(args, func):
"""Helper for ending tasks"""
if len(args) > 1:
tasks = [int(t, 16) for t in args[1:]]
else:
rd.print_all_tasks()
tasks = [ip.get_task()]
func(tasks)
def complete(args: List[str]) -> None:
"""Complete a task interactively"""
end_task(args, td.complete_task)
def delete(args: List[str]) -> None:
"""Delete a task interactively"""
end_task(args, td.delete_task)
def comments(args: List[str]) -> None:
"""Render cmments"""
### TODO: Move to render function and make pretty
if len(args) > 1:
task = int(args[1], 16)
else:
rd.print_all_tasks()
task = ip.get_task()
for comment in da.notes(task):
print(f"{comment['posted']}:\n{comment['content']}\n")
def projects(_) -> None:
"""Print all projects"""
for project in da.project_names():
print(project)
def sync(_) -> None:
"""Resync"""
td.sync()
FUNCTIONS = {
"list": view,
"today": today,
"tomorrow": tomorrow,
"complete":complete,
"delete": delete,
"update": update,
"add": add,
"projects": projects,
"comments": comments,
"sync": sync
}
def interactive() -> None:
"""Start an interactive session"""
import readline
import shlex
readline.parse_and_bind("tab: complete")
while True:
readline.set_completer(completions.complete)
try:
command = input("(todoist) ")
except KeyboardInterrupt:
return
args = shlex.split(command)
try:
if not args:
continue
FUNCTIONS[args[0]](args)
except KeyError:
continue
except KeyboardInterrupt:
print("")
continue
| UTF-8 | Python | false | false | 3,085 | py | 8 | interactive.py | 7 | 0.578282 | 0.574716 | 0 | 129 | 22.914729 | 68 |
jackiezhung/Learning | 9,517,647,578,087 | 89998325e9d0a86bfd1a55d99d820913ddbc58f1 | bcd6d642287cbd1eedc95d5e6ff44b249db45a04 | /Network/SocketExample/SimpleServer.py | f0e555e8491dec19635b6232a81f951f77eda013 | []
| no_license | https://github.com/jackiezhung/Learning | 6aac759959a1593bee77d49f277378a1311dc770 | b51efb360dc6d1cb651f97b61fe615157166cbe9 | refs/heads/master | 2023-06-13T03:25:52.622150 | 2020-04-18T05:42:38 | 2020-04-18T05:42:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
import socket
from Network import address
server_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_s.bind(address)
server_s.listen(5)
print("Start server on: {}".format(address))
while True:
client_s, client_addr = server_s.accept()
print("Connect from {}".format(client_addr))
while True:
receive = client_s.recv(1024)
if receive != b"exit":
print(receive.decode())
else:
client_s.close()
print("Disconnect from {}".format(client_addr))
break
| UTF-8 | Python | false | false | 557 | py | 10 | SimpleServer.py | 10 | 0.621185 | 0.610413 | 0 | 21 | 25.52381 | 60 |
sdetwiler/pammo | 8,031,588,864,413 | 3bd5ab6f46e1b4a1aaad911cd7b1fc512de73b44 | 4c452179cf4c22a5e9d8ee4a9d0062ad3d6d0a45 | /editor/source/CommonDrawing.py | 9c948e89e76b53b96614ee72b7d12f7130f7a8ba | []
| no_license | https://github.com/sdetwiler/pammo | ed749bdbd150a5665bdabc263005249a821cfa2e | aee306611ae8c681a5f8c03b3b0696e2cf771864 | refs/heads/master | 2021-01-13T01:49:15.414767 | 2015-05-18T22:21:24 | 2015-05-18T22:21:24 | 35,837,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import wx
import POI
def drawCollisionShapes(display, gc, rect):
collisionShapes = display.getMap().getCollisionShapes()
# Draw all collision shapes.
for shape in collisionShapes:
if not shape.getEnemiesCollide():
gc.SetBrush(wx.Brush(wx.Color(50, 128, 50, 92)))
gc.SetPen(wx.Pen(wx.Color(64, 192, 64, 168), 3))
else:
gc.SetBrush(wx.Brush(wx.Color(0, 100, 0, 92)))
gc.SetPen(wx.Pen(wx.Color(0, 128, 0, 168), 3))
points = shape.getPoints()
path = gc.CreatePath()
path.MoveToPoint(points[0][0], points[0][1])
for point in points[1:]:
path.AddLineToPoint(point[0], point[1])
path.CloseSubpath()
gc.FillPath(path)
gc.StrokePath(path)
def drawPOIs(display, gc, rect):
# Draw all POIs.
strokeSize = 3
displaySize = 32
pois = display.getMap().getPOIs()
for poi in pois:
type = poi.getType()
if type == POI.PlayerStartTypeName:
gc.SetBrush(wx.Brush(wx.Color(0, 0, 100, 92)))
gc.SetPen(wx.Pen(wx.Color(0, 0, 128, 168), strokeSize))
elif type == POI.InfastructurePointTypeName:
gc.SetBrush(wx.Brush(wx.Color(0, 100, 100, 92)))
gc.SetPen(wx.Pen(wx.Color(0, 128, 128, 168), strokeSize))
elif type == POI.SpawnPointTypeName:
gc.SetBrush(wx.Brush(wx.Color(0, 100, 0, 92)))
gc.SetPen(wx.Pen(wx.Color(0, 128, 0, 168), strokeSize))
else: continue
pos = poi.getPos()
gc.DrawEllipse(pos[0] - displaySize/2 + strokeSize - 1, pos[1] - displaySize/2 + strokeSize - 1,
displaySize - strokeSize*2 + 1, displaySize - strokeSize*2 + 1)
def drawGrid(display, gc, rect, amount):
# Draw snap if I'm supposeda.
(worldX, worldY) = display.getMap().getSize()
tileSize = float(amount)
sizeX, sizeY = (int(worldX // tileSize), int(worldY // tileSize))
startX = int(rect[0] // tileSize)
startY = int(rect[1] // tileSize)
endX = int(rect[2] // tileSize + 1)
endY = int(rect[3] // tileSize + 1)
if endX > sizeX: endX = sizeX
if endY > sizeY: endY = sizeY
gc.SetPen(wx.Pen(wx.Color(0, 0, 0, 32), 2))
for x in range(startX, endX+1):
gc.StrokeLine(x*tileSize, 0, x*tileSize, sizeY*tileSize)
for y in range(startY, endY+1):
gc.StrokeLine(0, y*tileSize, sizeX*tileSize, y*tileSize)
def drawSafeZone(display, gc, rect):
(worldX, worldY) = display.getMap().getSize()
bounds = [480/2, 320/2,
worldX - 480/2,
worldY - 320/2]
gc.SetPen(wx.Pen(wx.Color(192, 48, 192, 128), 5))
gc.StrokeLine(bounds[0], bounds[1], bounds[0], bounds[3])
gc.StrokeLine(bounds[2], bounds[1], bounds[2], bounds[3])
gc.StrokeLine(bounds[0], bounds[1], bounds[2], bounds[1])
gc.StrokeLine(bounds[0], bounds[3], bounds[2], bounds[3])
| UTF-8 | Python | false | false | 2,923 | py | 281 | CommonDrawing.py | 244 | 0.590831 | 0.534382 | 0 | 76 | 37.394737 | 104 |
alexandder/sca-gaps | 12,275,016,544,428 | 5c0ff44fbde842de204c32685450fab0c1b78668 | 6d9600183ebdb334c5748896908e54fe589842c4 | /code/lib/ca_lib.py | 8f4ca2179ed9721dfdd31a02e62f2f6dd348f205 | []
| no_license | https://github.com/alexandder/sca-gaps | 78dec341fdbc7fcef5dfe7cd0796289dbb0ba288 | 736082515d9d726cd34d1fbaa99a14128bb9d66c | refs/heads/master | 2021-09-10T15:27:18.211920 | 2018-03-28T12:14:26 | 2018-03-28T12:14:26 | 106,101,925 | 0 | 1 | null | false | 2018-03-28T12:14:27 | 2017-10-07T13:55:47 | 2018-03-13T23:55:44 | 2018-03-28T12:14:27 | 795,560 | 0 | 1 | 0 | Python | false | null | import random
import sys
import lib.ca_data as ca_data
def apply_rule(number, l, c, r):
binary_rule_number = format(number, "#010b")[2:]
neighborhood = int(str(l) + str(c) + str(r), 2)
position = -neighborhood + 7
return int(binary_rule_number[position])
def applyRuleToVector(a, ruleNumber):
result = []
for i in range(len(a)):
if i == len(a) - 1:
result.append(apply_rule(ruleNumber, a[i - 1], a[i], a[0]))
elif i == 0:
result.append(apply_rule(ruleNumber, a[len(a) - 1], a[i], a[i + 1]))
else:
result.append(apply_rule(ruleNumber, a[i - 1], a[i], a[i + 1]))
return result
def printArray(arr):
for b in arr:
if b == 1:
sys.stdout.write(u"\u25A0")
else:
sys.stdout.write(' '),
print
def vectorToString(arr):
result = ""
for b in arr:
if b == 1:
result += u"\u25A0"
else:
result += " "
return result
def bin_vector_to_string(v):
vs = ""
for bit in v:
vs += str(bit)
return vs
def simulate(initial, T, ruleNumber):
res = []
res.append(initial)
for k in range(T):
res.append(applyRuleToVector(res[len(res) - 1], ruleNumber))
return res
def printSimulation(simulation):
for arr in simulation:
printArray(arr)
def print_simulation_as_table(simulation):
print()
for vec in simulation:
for v in vec:
sys.stdout.write(str(v))
print()
def createStandardInitalVector(N):
initial = []
for i in range(N):
if i == N / 2:
initial.append(1)
else:
initial.append(0)
return initial
def getFrequencies(vector):
res = {"111": 0, "110": 0, "101": 0, "100": 0, "011": 0, "001": 0, "010": 0, "000": 0}
for i in range(len(vector)):
if i == len(vector) - 2:
index = str(vector[i]) + str(vector[i + 1]) + str(vector[0])
res[index] = res[index] + 1
elif i == len(vector) - 1:
index = str(vector[i]) + str(vector[0]) + str(vector[1])
res[index] = res[index] + 1
else:
index = str(vector[i]) + str(vector[i + 1]) + str(vector[i + 2])
res[index] = res[index] + 1
return res
def getFrequenciesOfSimulation(simulation):
result = []
for step in simulation:
result.append(getFrequencies(step))
return result
def get_neighborhoods_count_for_simulation(simulation):
result = {"111": 0, "110": 0, "101": 0, "100": 0, "011": 0, "001": 0, "010": 0, "000": 0}
for t in range(len(simulation)):
freqs = getFrequencies(simulation[t])
for nei in ca_data.elementary_neighborhoods:
result[nei] = result[nei] + freqs[nei]
return result
def getAggregateFrequenciesForEachBlock(frequencies):
result = {"111": [], "110": [], "101": [], "100": [], "011": [], "001": [], "010": [], "000": []}
f0 = frequencies[0]
sum = 0
for b in f0:
sum = 1.0 * sum + f0[b]
for i in range(len(frequencies)):
f = frequencies[i]
for block in f:
if i == 0:
result[block].append(f[block] / sum)
else:
prev = sum * i * result[block][i - 1]
result[block].append((prev + f[block]) / (sum * (i + 1)))
return result
def printFrequencies(frequencies):
print('\t000\t 001\t 010\t 011\t 100\t 101\t 110\t 111')
for i in range(len(frequencies)):
f = frequencies[i]
print(str(i) + "\t " + str(f["000"]) + "\t" + str(f["001"]) + "\t" + str(f["010"]) + "\t" + str(
f["011"]) + "\t" + str(f["100"]) + "\t" + str(f["101"]) + "\t" + str(f["110"]) + "\t" + str(f["111"]))
def getFrequenciesForEachBlock(frequencies):
result = {"111": [], "110": [], "101": [], "100": [], "011": [], "001": [], "010": [], "000": []}
for i in range(len(frequencies)):
f = frequencies[i]
for block in f:
result[block].append(f[block])
return result
def create_random_initial_vector(n):
return [random.randint(0,1) for i in range(n)]
def is_consistent_with_id(rule, neighborhood):
return str(apply_rule(rule, int(neighborhood[0]), int(neighborhood[1]), int(neighborhood[2]))) == neighborhood[1]
def get_neighborhoods_consistent_with_id(rule):
result = []
neighborhoods = ['000', '001', '010', '011', '100', '101', '110', '111']
return filter(lambda n : is_consistent_with_id(rule, n), neighborhoods)
def get_neighborhoods_non_consistent_with_id(rule):
result = []
neighborhoods = ['000', '001', '010', '011', '100', '101', '110', '111']
return filter(lambda n : not is_consistent_with_id(rule, n), neighborhoods)
def get_maximal_k_for_rule(rule):
if ca_data.getWolframClassForRule(rule) == '3':
return 1500
if ca_data.getWolframClassForRule(rule) == '4':
return 1300
return 240
def get_number_of_neghborhoods_inconsistent_with_id():
result = {}
for r in range(256):
result[r] = len(get_neighborhoods_non_consistent_with_id(r))
return result
| UTF-8 | Python | false | false | 5,317 | py | 29 | ca_lib.py | 26 | 0.533948 | 0.481851 | 0 | 170 | 29.252941 | 117 |
enumatech/sprites-python | 120,259,094,549 | 759593c7ccd266efb571d38c315bece69fe72c00 | 242c51c14da38745931629124aa3b38a7d1ca776 | /src/tests/conftest.py | 6aa0fdc5f0070ac0433532781f81b4426051a88b | [
"MIT"
]
| permissive | https://github.com/enumatech/sprites-python | 8c3fa0003c98d2da1d3221d289bff23f5f8db7af | e2221ff9bf842fa391e9646e1cee969b12218e64 | refs/heads/master | 2022-12-09T20:01:14.434206 | 2018-09-18T06:50:23 | 2018-09-18T06:50:23 | 137,177,091 | 4 | 3 | MIT | false | 2022-12-08T02:48:51 | 2018-06-13T07:13:18 | 2022-04-24T04:45:09 | 2022-12-08T02:48:51 | 149 | 4 | 4 | 6 | Python | false | false | from collections import namedtuple
import eth_account
import pytest
from eth_utils.address import to_checksum_address
from web3 import HTTPProvider, Web3
from web3.middleware import geth_poa_middleware
from ..channel import Channel, ChannelState, Payment
from ..contracts.dappsys import DSToken
from ..contracts.PreimageManager import PreimageManager
from ..contracts.SpritesRegistry import SpritesRegistry
from ..util import (
GAS,
check_tx,
deploy_contract,
fund_account,
fund_token,
generate_preimage,
mint,
)
GETH_URL = "http://localhost:8545"
PARTY_NAMES = ["alice", "bob"]
DEPLOYER = "0xd124b979f746be85706daa1180227e716eafcc5c"
ALICE = "0xa49aad37c34e92236690b93e291ae5f10daf7cbe"
BOB = "0xb357fc3dbd4cdb7cbd96aa0a0bd905dbe56cab77"
CHARLIE = "0xcBE431FF3fdcd4d735df5706e755D0f8726549f0"
DEPLOYER_PK = "0xe33292da27178504b848586dcee3011a7e21ee6ed96f9df17487fd6518a128c7"
ALICE_PK = "0xd8ae722d3a6876fd27907c434968e7373c6fbb985242e545a427531132ef3a71"
BOB_PK = "0x28e58f2f6a924d381e243ec1ca4a2239d2b35ebd9a44cec11aead6848a52630b"
CHARLIE_PK = "0x8e1733c6774268aee3db54901086b1f642f51e60300674ae3b33f1e1217ec7f5"
TOKEN_NAMES = ["WETH", "OAX"]
THIRD_PARTY_NAME = "charlie"
FUND_TOKEN_AMOUNT = 100
DEPOSIT_AMOUNTS = {"alice": 9, "bob": 10}
SEND_AMOUNT = 7
# two parties are equivalent / symmetric in many tests
Account = namedtuple("Account", "address privateKey")
ACCOUNTS = {
name: Account(to_checksum_address(address), private_key)
for name, address, private_key in zip(
["deployer", "alice", "bob", "charlie"],
[DEPLOYER, ALICE, BOB, CHARLIE],
[DEPLOYER_PK, ALICE_PK, BOB_PK, CHARLIE_PK],
)
}
@pytest.fixture(scope="session")
def web3():
w3 = Web3(HTTPProvider(GETH_URL))
# enable eth.account
w3.eth.enable_unaudited_features()
# for POA dev chain, see
# https://web3py.readthedocs.io/en/latest/middleware.html#geth-style-proof-of-authority
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
return w3
# Account generation is slow, on the order of a second.
# We reuse the accounts for the entire session but use function scoped
# token contracts to make sure balances are zero initially.
@pytest.fixture
def mock_address():
return eth_account.Account.create().address
@pytest.fixture(scope="session")
def guy(web3):
return web3.eth.accounts[0]
@pytest.fixture(scope="session")
def tx_args(guy):
return {"from": guy, "gas": GAS}
def _get_account(web3, guy, name):
account = ACCOUNTS[name]
# web3.personal.unlockAccount(account.address, None)
fund_account(web3, guy, account)
return account
@pytest.fixture(scope="session")
def alice(web3, guy):
return _get_account(web3, guy, "alice")
@pytest.fixture(scope="session")
def bob(web3, guy):
return _get_account(web3, guy, "bob")
@pytest.fixture(scope="session")
def charlie(web3, guy):
return _get_account(web3, guy, "charlie")
@pytest.fixture(scope="session")
def deployer(web3):
return web3.eth.accounts[0]
@pytest.fixture(scope="session")
def registry(web3, deployer, preimage_manager):
return deploy_contract(
web3,
deployer,
"SpritesRegistry.sol",
"SpritesRegistry",
SpritesRegistry,
args=[preimage_manager._contract.address],
)
@pytest.fixture(scope="session")
def preimage_manager(web3, deployer):
return deploy_contract(
web3, deployer, "PreimageManager.sol", "PreimageManager", PreimageManager
)
@pytest.fixture(scope="function")
def token(web3, deployer):
token = deploy_contract(
web3, deployer, "dappsys.sol", "DSToken", DSToken, args=[deployer]
)
mint(web3, token, deployer)
return token
@pytest.fixture(scope="function")
def other_token(web3, deployer):
return token(web3, deployer)
@pytest.fixture
def mock_channel(
web3, mock_address, registry, preimage_manager, acting_party, other_party
):
tx_hash = registry.createChannel(other_party.address, mock_address).transact(
{"from": acting_party.address, "gas": GAS}
)
receipt = check_tx(web3, tx_hash)
channel_id = web3.toInt(hexstr=receipt.logs[0].data)
return Channel(
web3,
registry,
preimage_manager,
mock_address,
channel_id,
acting_party,
other_party,
)
@pytest.fixture
def channel(
web3,
token: DSToken,
registry: SpritesRegistry,
preimage_manager: PreimageManager,
acting_party,
other_party,
):
tx_hash = registry.createChannel(
other_party.address, token._contract.address
).transact({"from": acting_party.address, "gas": GAS})
receipt = check_tx(web3, tx_hash)
channel_id = web3.toInt(hexstr=receipt.logs[0].data)
return Channel(
web3, registry, preimage_manager, token, channel_id, acting_party, other_party
)
@pytest.fixture
def other_channel(
web3,
other_token: DSToken,
registry: SpritesRegistry,
preimage_manager: PreimageManager,
acting_party,
other_party,
):
tx_hash = registry.createChannel(
other_party.address, other_token._contract.address
).transact({"from": acting_party.address, "gas": GAS})
receipt = check_tx(web3, tx_hash)
channel_id = web3.toInt(hexstr=receipt.logs[0].data)
return Channel(
web3,
registry,
preimage_manager,
other_token,
channel_id,
acting_party,
other_party,
)
@pytest.fixture(params=["alice", "bob"])
def acting_party_name(request):
return request.param
@pytest.fixture
def acting_party(acting_party_name, alice, bob):
return alice if acting_party_name == "alice" else bob
@pytest.fixture
def other_party_name(acting_party_name):
return next(name for name in PARTY_NAMES if name != acting_party_name)
@pytest.fixture
def other_party(other_party_name, alice, bob):
return alice if other_party_name == "alice" else bob
@pytest.fixture
def third_party(charlie):
return charlie
@pytest.fixture
def preimage():
return generate_preimage()
@pytest.fixture
def round():
return 0
@pytest.fixture
def new_round():
return 1
@pytest.fixture
def amount():
return 0
@pytest.fixture
def new_amount():
return 0
@pytest.fixture
def payment(amount):
return Payment(amount=amount)
@pytest.fixture
def new_payment(new_amount):
return Payment(amount=new_amount)
@pytest.fixture
def deposits():
return [0, 0]
@pytest.fixture
def new_deposits(deposits):
return deposits
@pytest.fixture
def credits():
return [0, 0]
@pytest.fixture
def new_credits(credits):
return credits
@pytest.fixture
def withdrawals():
return [0, 0]
@pytest.fixture
def new_withdrawals(withdrawals):
return withdrawals
@pytest.fixture
def new_state(new_deposits, new_credits, new_withdrawals, new_round, new_payment):
return ChannelState(
deposits=new_deposits,
credits=new_credits,
withdrawals=new_withdrawals,
round=new_round,
payment=new_payment,
)
@pytest.fixture
def last_state(deposits, credits, withdrawals, round, payment):
return ChannelState(
deposits=deposits,
credits=credits,
withdrawals=withdrawals,
round=round,
payment=payment,
)
@pytest.fixture
def deposit_amount(acting_party_name):
return DEPOSIT_AMOUNTS[acting_party_name]
@pytest.fixture
def send_amount():
return SEND_AMOUNT
def test_send_tokens(web3, token, acting_party, guy):
fund_token(
web3, token=token, sender=guy, to=acting_party.address, amount=FUND_TOKEN_AMOUNT
)
assert token.balanceOf(acting_party.address).call() == FUND_TOKEN_AMOUNT
@pytest.fixture
def with_tokens(web3, token, acting_party, guy):
fund_token(
web3, token=token, sender=guy, to=acting_party.address, amount=FUND_TOKEN_AMOUNT
)
return acting_party
@pytest.fixture
def with_other_tokens(web3, other_token, other_party, guy):
fund_token(
web3,
token=other_token,
sender=guy,
to=other_party.address,
amount=FUND_TOKEN_AMOUNT,
)
return other_party
@pytest.fixture
def channel_with_deposit(channel, acting_party, deposit_amount, with_tokens):
channel.deposit(sender=acting_party, amount=deposit_amount)
return channel
@pytest.fixture
def other_channel_with_deposit(
other_channel, other_party, deposit_amount, with_other_tokens
):
other_channel.deposit(sender=other_party, amount=deposit_amount)
return other_channel
| UTF-8 | Python | false | false | 8,575 | py | 56 | conftest.py | 28 | 0.696093 | 0.655743 | 0 | 377 | 21.745358 | 91 |
pete2fiddy/Mosaicer | 11,347,303,629,104 | 6f4258d8a35440c900bc22f4beb8a2590f5a1484 | cc6d76d42368ac3d809408f2b434fcb900cdbc75 | /Feature/HOGMatch.py | e23fe40e55d6fbde41f57eaaff81b48ebe679149 | []
| no_license | https://github.com/pete2fiddy/Mosaicer | f8a36692a566cb3d417797b276f2ab68891e23a1 | 7b45a5e08520bb9499ed1be0467c7186fa9e96fd | refs/heads/master | 2021-05-23T06:09:34.527547 | 2017-06-30T13:25:16 | 2017-06-30T13:25:16 | 94,837,377 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Feature.MatchType import MatchType
import numpy as np
from Feature.FeatureMatch import FeatureMatch
import cv2
from matplotlib import pyplot as plt
import ImageOp.CVMath as CVMath
import ImageOp.ImageMath as ImageMath
from PIL import Image
from math import pi, sqrt
import ImageOp.HOG as HOG
class HOGMatch(MatchType):
GAUSSIAN_BLUR_WINDOW = (5,5)
GAUSSIAN_BLUR_STD_DEV = 1.0
DEFAULT_SMALL_WINDOW = 8
DEFAULT_NUM_AGGREGATE_WINDOWS = 4
NUM_BINS = 9
THETA_PER_INDEX = pi/float(NUM_BINS)
'''should make this take a NamedArgs object in the future'''
def __init__(self, image1, image2, mask, small_window = None, num_aggregate_windows = None):
self.small_window = small_window if small_window is not None else HOGMatch.DEFAULT_SMALL_WINDOW
self.num_aggregate_windows = num_aggregate_windows if num_aggregate_windows is not None else HOGMatch.DEFAULT_NUM_AGGREGATE_WINDOWS
self.aggregate_window_size = int(sqrt(self.num_aggregate_windows))
MatchType.__init__(self, image1, image2, mask)
def init_features(self):
self.init_hog_maps()
image1_keypoints, self.image1_descriptors = self.hog_map_to_keypoints_and_descriptors(self.hogs1)
image2_keypoints, self.image2_descriptors = self.hog_map_to_keypoints_and_descriptors(self.hogs2)
self.set_features(image1_keypoints, image2_keypoints)
def match_features(self):
'''Not sure what the paramaters for BFMatcher below do, but docs said
they were recommended for ORB (not sure about HOG)'''
bf_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
kp_matches = bf_matcher.match(self.image1_descriptors, self.image2_descriptors)
out_image = np.zeros((self.image1.shape[0] + self.image2.shape[0], self.image1.shape[1], 3))
match_image = cv2.drawMatches(self.image1, self.features1, self.image2, self.features2, kp_matches, out_image)
feature_matches = FeatureMatch.cv_matches_to_feature_matches(kp_matches, self.features1, self.features2)
self.set_matches(feature_matches)
def init_hog_maps(self):
blur_image1 = cv2.GaussianBlur(cv2.cvtColor(self.image1, cv2.COLOR_RGB2GRAY), HOGMatch.GAUSSIAN_BLUR_WINDOW, HOGMatch.GAUSSIAN_BLUR_STD_DEV)
blur_image2 = cv2.GaussianBlur(cv2.cvtColor(self.image2, cv2.COLOR_RGB2GRAY), HOGMatch.GAUSSIAN_BLUR_WINDOW, HOGMatch.GAUSSIAN_BLUR_STD_DEV)
grad_x1, grad_y1 = CVMath.get_image_gradients(blur_image1, 1, 1)
mags1 = CVMath.get_gradient_mags(grad_x1, grad_y1)
phase1 = np.mod(CVMath.get_phase_image(grad_x1, grad_y1), pi)
grad_x2, grad_y2 = CVMath.get_image_gradients(blur_image2, 1, 1)
mags2 = CVMath.get_gradient_mags(grad_x2, grad_y2)
phase2 = np.mod(CVMath.get_phase_image(grad_x2, grad_y2), pi)
small_hogs1 = self.create_small_hog_map(phase1, mags1)
small_hogs2 = self.create_small_hog_map(phase2, mags2)
self.hogs1 = self.create_large_hog_map(small_hogs1)
self.hogs2 = self.create_large_hog_map(small_hogs2)
'''creates an un-normalized gradient magnitude and angle histogram using small
self.small_window x self.small_window size windows. Output is an array that corresponds
to the [i,j]th small window'''
def create_small_hog_map(self, phase, mags):
small_hogs = np.zeros((mags.shape[0]//self.small_window, mags.shape[1]//self.small_window, HOGMatch.NUM_BINS))
for x in range(0, small_hogs.shape[0]):
for y in range(0, small_hogs.shape[1]):
phase_window = phase[x * self.small_window : (x+1) * self.small_window, y * self.small_window : (y+1) * self.small_window]
mags_window = mags[x * self.small_window : (x+1) * self.small_window, y * self.small_window : (y+1) * self.small_window]
windowed_hist = HOG.HOG_window(phase_window, mags_window, HOGMatch.NUM_BINS)#self.create_windowed_histogram(phase_window, mags_window)
small_hogs[x,y] = windowed_hist
return small_hogs
'''
def create_windowed_histogram(self, phase_window, mags_window):
hist = np.zeros((HOGMatch.NUM_BINS))
flat_phases = phase_window.flatten()
flat_mags = mags_window.flatten()
for i in range(0, flat_phases.shape[0]):
lower_hist_index = int(flat_phases[i]/HOGMatch.THETA_PER_INDEX)
upper_hist_index = lower_hist_index + 1 if lower_hist_index < hist.shape[0] - 1 else 0
proportion_to_lower_index = (flat_phases[i] - (HOGMatch.THETA_PER_INDEX * lower_hist_index))/HOGMatch.THETA_PER_INDEX
proportion_to_upper_index = 1.0 - proportion_to_lower_index
hist[lower_hist_index] += proportion_to_lower_index * float(flat_mags[i])
hist[upper_hist_index] += proportion_to_upper_index * float(flat_mags[i])
return hist
'''
'''returns a normalized small_hogs.shape[0] x small_hogs.shape[1] x NUM_BINS * num_aggregate_windows matrix. For the
index at [i,j], the vector has concetanated the small hogs into a longer vector of length num_aggregate_windows*num_bins
Each set of n*num_bins to (n+1)*num_bins describes a single HOG vector (i.e. all indexes that mod by num_bins to equal the
same value belong to the same angle)'''
def create_large_hog_map(self, small_hogs):
big_hogs = np.zeros((small_hogs.shape[0], small_hogs.shape[1], HOGMatch.NUM_BINS * self.num_aggregate_windows))
for x in range(0, big_hogs.shape[0] - self.aggregate_window_size + 1):
for y in range(0, big_hogs.shape[1] - self.aggregate_window_size + 1):
small_hogs_window = small_hogs[x:x+self.aggregate_window_size, y:y+self.aggregate_window_size]
window_hog_vector = small_hogs_window.flatten()
big_hogs[x,y] = window_hog_vector/np.linalg.norm(window_hog_vector)
return big_hogs
'''for each index of the hog map, creates a keypoint point centered on the window's center
where it is placed on the image. Also returns the HOG descriptors for each keypoint'''
def hog_map_to_keypoints_and_descriptors(self, hog_map):
kps = []
descriptors = []
window_margin = self.small_window//2
for x in range(0, hog_map.shape[0]):
for y in range(0, hog_map.shape[1]):
append_kp = cv2.KeyPoint((y*self.small_window) + window_margin, (x*self.small_window) + window_margin, self.small_window)
kps.append(append_kp)
descriptors.append(hog_map[x,y])
descriptors = np.asarray(descriptors)
'''opencv's descriptor matching method requires the vectors to be uint8'''
descriptors *= 255
descriptors = descriptors.astype(np.uint8)
return kps, descriptors
'''takes the list of descriptors and attempts to transform them so to allow HOG to be
rotationally invarient. Globally transforms all HOGS in image1 to best fit the hogs of
image2. (Note, may be best to actually do this on a per-case basis, i.e. when two descriptors
are being compared for match quality, transform one descriptor to optimally fit the second.
The drawbacks to this would be that the rotation would not be robust to noise, i.e. the rotation
necessary to apply to each HOG is likely largely uniform since if the image were rotated,
the entire image would be rotated). Will likely not be robust to images with very high rotation (> 90 degrees?)
'''
def fit_descriptors(self):
return None
| UTF-8 | Python | false | false | 7,559 | py | 26 | HOGMatch.py | 26 | 0.679455 | 0.662786 | 0 | 129 | 57.596899 | 150 |
Shahakbk/Deep_Learning-VQA_Multimodal_Problems | 9,998,683,877,413 | 7977aab735710ec75d721de336fe8d80320a8e46 | c6a8ece842d9bc73558c4050298f9c533033bdbd | /main.py | 56141c82c44232f6c695e04d316b53c8981bbdb4 | []
| no_license | https://github.com/Shahakbk/Deep_Learning-VQA_Multimodal_Problems | b5b9bad4771e0c5aff9a8197d437203388fb55ad | fabe8f237fcecd6ff80d4a3d41337082ef44aff1 | refs/heads/master | 2022-04-03T13:57:00.180425 | 2020-01-24T09:31:30 | 2020-01-24T09:31:30 | 233,609,386 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import get_data
import data_prep
import images_prep
import utils
import train
def main():
device = utils.check_cuda() # Check for CUDA availability
get_data.get_data() # Load the data
data_prep.prep_qa() # Normalize the Q&A and create json & vocabularies
images_prep.prep_images() # Pre-process the images
train.train() # Start the training process
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 416 | py | 8 | main.py | 7 | 0.673077 | 0.673077 | 0 | 17 | 23.470588 | 75 |
kernel-panic96/Dungeons-Pythons | 18,382,460,031,314 | c4ffde7a8846e21aa7edfb7c5bdbde7cb9453f54 | f0dcaee4d747ff184974fdb9298a1f9e0b715e43 | /tests/cells/tests_empty_cell.py | 863056de75faeb871f3b36535de8b82c5d981caa | []
| no_license | https://github.com/kernel-panic96/Dungeons-Pythons | c3a00a8025c0b7a7e02975eadfb8676840a5dbcf | d09be30886369c10d922aca4e1453d98e7b5cb73 | refs/heads/master | 2020-03-13T09:04:34.010035 | 2018-05-03T13:27:48 | 2018-05-03T13:27:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from src.cells.empty_cell import EmptyCell
from unittest.mock import Mock
class OccupiableEmptyCellTests(unittest.TestCase):
def test_str_on_empty_cell_returns_symbol(self):
self.assertEqual(str(EmptyCell(row=0, col=0)), EmptyCell.sym)
def test_cell_with_occupant_should_trigger_occupant_event(self):
cell = EmptyCell(row=0, col=0)
cell.occupant = Mock()
cell.trigger_enter_event(Mock())
cell.occupant.trigger_enter_event.assert_called
def test_occupant_trigger_return_true_replace_occupant_with_new_obj(self):
cell = EmptyCell(row=0, col=0)
occupant = Mock()
occupant._on_enter.return_value = True
cell.occupant = occupant
new_occupant = Mock()
new_occupant.sym = 't'
cell.trigger_enter_event(new_occupant)
self.assertEqual(cell.sym, 't')
def test_occupant_trigger_return_false_do_not_replace_occupant(self):
cell = EmptyCell(row=0, col=0)
occupant = Mock()
occupant._on_enter.return_value = False
cell.occupant = occupant
new_occupant = Mock()
new_occupant.sym = 't'
cell.trigger_enter_event(new_occupant)
self.assertEqual(cell.sym, EmptyCell.sym)
| UTF-8 | Python | false | false | 1,263 | py | 26 | tests_empty_cell.py | 24 | 0.655582 | 0.649248 | 0 | 42 | 29.071429 | 78 |
neuromorphs/grill-lmu | 1,047,972,070,495 | a0aca0d6963af90162d807f44d326a38142cdedc | 8137160e1452949c7d6d1b1925ef8b44da4babb2 | /basic/ldn-nengo-python.py | bfc6b2bee9b16b9a2dcee5f98228eae4a54deec2 | [
"MIT"
]
| permissive | https://github.com/neuromorphs/grill-lmu | 8ecda9cf84bf723d72781abec214b9ff848b8797 | 102e2f7684e36d0f043624c9a2e5816b91bc8de0 | refs/heads/master | 2022-12-15T09:22:16.264288 | 2020-09-11T22:08:18 | 2020-09-11T22:08:18 | 286,575,744 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import nengo
import numpy as np
import scipy.linalg
from scipy.special import legendre
class LDN(nengo.Process):
def __init__(self, theta, q, size_in=1):
self.q = q # number of internal state dimensions per input
self.theta = theta # size of time window (in seconds)
self.size_in = size_in # number of inputs
# Do Aaron's math to generate the matrices
# https://github.com/arvoelke/nengolib/blob/master/nengolib/synapses/analog.py#L536
A = np.zeros((q, q))
B = np.zeros((q, 1))
for i in range(q):
B[i] = (-1.)**i * (2*i+1)
for j in range(q):
A[i,j] = (2*i+1)*(-1 if i<j else (-1.)**(i-j+1))
self.A = A / theta
self.B = B / theta
super().__init__(default_size_in=size_in, default_size_out=q*size_in)
def make_step(self, shape_in, shape_out, dt, rng, state=None):
state = np.zeros((self.q, self.size_in))
# Handle the fact that we're discretizing the time step
# https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
Ad = scipy.linalg.expm(self.A*dt)
Bd = np.dot(np.dot(np.linalg.inv(self.A), (Ad-np.eye(self.q))), self.B)
# this code will be called every timestep
def step_legendre(t, x, state=state):
state[:] = np.dot(Ad, state) + np.dot(Bd, x[None, :])
return state.T.flatten()
return step_legendre
def get_weights_for_delays(self, r):
# compute the weights needed to extract the value at time r
# from the network (r=0 is right now, r=1 is theta seconds ago)
r = np.asarray(r)
m = np.asarray([legendre(i)(2*r - 1) for i in range(self.q)])
return m.reshape(self.q, -1).T
model = nengo.Network()
with model:
stim = nengo.Node(nengo.processes.WhiteSignal(period=10, high=4), size_out=1)
ldn = nengo.Node(LDN(theta=0.5, q=6), size_in=1)
nengo.Connection(stim, ldn, synapse=None)
times = [0, 0.5, 1]
w = ldn.output.get_weights_for_delays(times)
readout = nengo.Node(None, size_in=len(times))
nengo.Connection(ldn, readout, transform=w, synapse=None)
p_stim = nengo.Probe(stim)
p_ldn = nengo.Probe(ldn)
p_readout = nengo.Probe(readout)
sim = nengo.Simulator(model)
with sim:
sim.run(10)
import matplotlib.pyplot as plt
plt.figure(figsize=(12,8))
plt.subplot(3, 1, 1)
plt.plot(sim.trange(), sim.data[p_stim])
plt.ylabel('stimulus')
plt.subplot(3, 1, 2)
plt.plot(sim.trange(), sim.data[p_ldn])
plt.ylabel('state $m$')
plt.subplot(3, 1, 3)
theta = ldn.output.theta
for t in times:
plt.plot(sim.trange()+theta*t, sim.data[p_stim], ls='--')
plt.gca().set_prop_cycle(None)
plt.plot(sim.trange(), sim.data[p_readout])
plt.ylabel('readout of history\n(dotted is ideal)')
plt.xlabel('time (s)')
plt.show() | UTF-8 | Python | false | false | 2,913 | py | 5 | ldn-nengo-python.py | 2 | 0.606934 | 0.59183 | 0 | 84 | 33.690476 | 99 |
Tim232/PyExecJS | 13,383,118,096,485 | 410c1c2826e97e9ca6d4aa4061b360fbaa8abbdb | f69b53bd4fda4671760e375111923c846d3e112c | /test_execjs.py | 146865a696c140aac76ca1f7d8b5abe32330998a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/Tim232/PyExecJS | 458d08d969b583c6d4066db5a7323647bfcf6d43 | ab4f962898e0c4cbccf9447c0246d1b721aec9d3 | refs/heads/master | 2023-01-28T21:50:10.659696 | 2020-12-06T08:06:38 | 2020-12-06T08:06:38 | 318,981,752 | 1 | 0 | MIT | true | 2020-12-06T08:02:29 | 2020-12-06T08:02:28 | 2020-12-05T23:02:11 | 2020-04-04T06:49:05 | 154 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
# -*- coding: ascii -*-
from __future__ import unicode_literals
import sys
import os
import doctest
import six
import execjs
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class RuntimeTestBase:
def test_context_call(self):
context = self.runtime.compile("id = function(v) { return v; }")
self.assertEqual("bar", context.call("id", "bar"))
def test_nested_context_call(self):
context = self.runtime.compile("a = {}; a.b = {}; a.b.id = function(v) { return v; }")
self.assertEqual("bar", context.call("a.b.id", "bar"))
def test_context_call_missing_function(self):
context = self.runtime.compile("")
with self.assertRaises(execjs.Error):
context.call("missing")
def test_exec(self):
self.assertIsNone(self.runtime.exec_("1"))
self.assertIsNone(self.runtime.exec_("return"))
self.assertIsNone(self.runtime.exec_("return null"))
self.assertIsNone(self.runtime.exec_("return function() {}"))
self.assertIs(0, self.runtime.exec_("return 0"))
self.assertIs(True, self.runtime.exec_("return true"))
self.assertEqual("hello", self.runtime.exec_("return 'hello'"))
self.assertEqual([1, 2], self.runtime.exec_("return [1, 2]"))
self.assertEqual({"a": 1, "b": 2}, self.runtime.exec_("return {a:1,b:2}"))
self.assertEqual("\u3042", self.runtime.exec_('return "\u3042"')) # unicode char
self.assertEqual("\u3042", self.runtime.exec_(r'return "\u3042"')) # unicode char by escape sequence
self.assertEqual("\\", self.runtime.exec_('return "\\\\"'))
def test_eval(self):
self.assertIsNone(self.runtime.eval(""))
self.assertIsNone(self.runtime.eval(" "))
self.assertIsNone(self.runtime.eval("null"))
self.assertIsNone(self.runtime.eval("function(){}"))
self.assertIs(0, self.runtime.eval("0"))
self.assertIs(True, self.runtime.eval("true"))
self.assertEqual([1, 2], self.runtime.eval("[1, 2]"))
self.assertEqual([1, None], self.runtime.eval("[1, function() {}]"))
self.assertEqual("hello", self.runtime.eval("'hello'"))
self.assertEqual(["red", "yellow", "blue"], self.runtime.eval("'red yellow blue'.split(' ')"))
self.assertEqual({"a": 1, "b": 2}, self.runtime.eval("{a:1, b:2}"))
self.assertEqual({"a": True}, self.runtime.eval("{a:true,b:function (){}}"))
self.assertEqual("\u3042", self.runtime.eval('"\u3042"'))
self.assertEqual("\u3042", self.runtime.eval(r'"\u3042"'))
self.assertEqual(r"\\", self.runtime.eval(r'"\\\\"'))
def test_compile(self):
context = self.runtime.compile("foo = function() { return \"bar\"; }")
self.assertEqual("bar", context.exec_("return foo()"))
self.assertEqual("bar", context.eval("foo()"))
self.assertEqual("bar", context.call("foo"))
def test_this_is_global_scope(self):
self.assertIs(True, self.runtime.eval("this === (function() {return this})()"))
self.assertIs(True, self.runtime.exec_("return this === (function() {return this})()"))
def test_compile_large_scripts(self):
body = "var foo = 'bar';\n" * (10 ** 4)
code = "function foo() {\n" + body + "\n};\nreturn true"
self.assertTrue(self.runtime.exec_(code))
def test_syntax_error(self):
with self.assertRaises(execjs.Error):
self.runtime.exec_(")")
def test_thrown_exception(self):
with self.assertRaises(execjs.Error):
self.runtime.exec_("throw 'hello'")
def test_broken_substitutions(self):
s = '#{source}#{encoded_source}#{json2_source}'
self.assertEqual(s, self.runtime.eval('"' + s + '"'))
class DefaultRuntimeTest(unittest.TestCase, RuntimeTestBase):
def setUp(self):
self.runtime = execjs
class NodeRuntimeTest(unittest.TestCase, RuntimeTestBase):
def setUp(self):
self.runtime = execjs.get('Node')
class NashornRuntimeTest(unittest.TestCase, RuntimeTestBase):
def setUp(self):
self.runtime = execjs.get('Nashorn')
class PhantomJSRuntimeTest(unittest.TestCase, RuntimeTestBase):
def setUp(self):
self.runtime = execjs.get('PhantomJS')
class CommonTest(unittest.TestCase):
def test_empty_path_environ(self):
"""
Some version of passenger-nginx set PATH empty.
"""
orig_path = os.environ['PATH']
try:
del os.environ['PATH']
ctx = execjs.compile("""
function add(x, y) {
return x + y;
}
""")
ctx.call("add", 1, 2)
finally:
os.environ['PATH'] = orig_path
def test_runtime_availability(self):
r = execjs.ExternalRuntime("fail", ["nonexistent"], "")
self.assertFalse(r.is_available())
r = execjs.ExternalRuntime("success", ["python"], "")
self.assertTrue(r.is_available())
def test_attributes_export(self):
for name in execjs.__all__:
self.assertTrue(hasattr(execjs, name), "{} is not defined".format(name))
def load_tests(loader, tests, ignore):
if six.PY3:
tests.addTests(doctest.DocTestSuite(execjs))
return tests
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 5,376 | py | 8 | test_execjs.py | 4 | 0.603423 | 0.591146 | 0 | 141 | 37.12766 | 109 |
NAMEs/Python_Note | 9,526,237,510,349 | 6fa0da0ddc29822afee8709e49fcab970c32fa85 | 27b4d1b7723845812111a0c6c659ef87c8da2755 | /爬虫/爬虫学习02/1_爬虫原理和数据抓取/实例/test02_通过有道词典制作英文转中文翻译器.py | 420f4de5a32c5c4ad1b59decf2e8e6b6d2170f51 | []
| no_license | https://github.com/NAMEs/Python_Note | 59a6eff7b4287aaef04bd69fbd4af3faf56cccb4 | f560e00af37c4f22546abc4c2756e7037adcc40c | refs/heads/master | 2022-04-11T09:32:17.512962 | 2020-03-17T09:30:58 | 2020-03-17T09:30:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
存在问题, 详情可以参照 ”../爬虫学习01/19.py"
"""
from urllib import request, parse
import json
url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
key = input("请输入你先查询的单词:")
data = {
"i": key,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": "15529942853861",
"sign": "df98d565e70c7ab3740ea176f0c5036b",
"ts": "1552994285386",
"bv": "33a62fdcf6913d2da91495dad54778d1",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_REALTlME",
"typoResult": "false",
}
# data 数据编码
form_data = parse.urlencode(data).encode("utf-8")
# 构建请求,需要注明 method 参数
req = request.Request(url, data=form_data, headers=headers, method="POST")
response = request.urlopen(req)
html = response.read().decode()
# json 数据转换为 字典
html = json.loads(html)
for result in html["translateResult"]:
for i in result:
print("{0} --> {1}".format(i['src'], i['tgt']))
| UTF-8 | Python | false | false | 1,278 | py | 1,119 | test02_通过有道词典制作英文转中文翻译器.py | 505 | 0.607445 | 0.518613 | 0 | 48 | 22.583333 | 136 |
jitendra-singh01/awsgcpremediscripts | 19,378,892,447,242 | 4dd3181e56133a950ebdf4c2233b47a55ec7ffa9 | 7957aaf01a5fc9260990fad2b742a55e9593bba0 | /aws/iam/list_customer_policy.py | 75cb3201bcc7a4fa549b52887db30dcdce100eb3 | []
| no_license | https://github.com/jitendra-singh01/awsgcpremediscripts | 57f916c5fb82e826446cfa1bf9044825a52236b4 | c9aa09e6a1011dde004292a44c026d99b0a93108 | refs/heads/master | 2020-08-31T15:12:30.595045 | 2019-12-19T08:45:33 | 2019-12-19T08:45:33 | 218,719,491 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
from datetime import datetime,timezone
import re
client = boto3.client('iam')
i = 1;
policy_resource_vul = []
policy_action_vul = []
policy_not_vul = []
action_vul_str = "Policy {} is performing all actions. Please specify needed actions."
resource_vul_str = "Policy {} is providing permission tp all resources. Please specify needed resources."
not_vul_str = "Policy {}, has healthy policy statement."
process_policies = 10
max_item = 10
def list_out_iam_policy(next=""):
global i,max_item
count = 1;
if next != "":
policies = client.list_policies(Scope='Local',Marker=next,MaxItems=max_item)
else:
policies = client.list_policies(Scope='Local',MaxItems=max_item)
istruncate = policies['IsTruncated']
tpolicies = policies['Policies']
for p in tpolicies:
get_policy_response = client.get_policy(PolicyArn=p['Arn'])
version_id = get_policy_response['Policy']['DefaultVersionId']
# Get default version of policy
get_policy_version_response = client.get_policy_version(
PolicyArn=p['Arn'],
VersionId=version_id,
)
policy_document = get_policy_version_response['PolicyVersion']['Document']
statements = policy_document['Statement']
#@print(policy_document['Statement'])
for statement in statements:
resource = None
action = None
has_action = 0
has_resource = 0
if isinstance(statement,dict):
if 'Resource' in statement:
resource = str(statement['Resource'])
if "Action" in statement:
action = str(statement['Action'])
if action is not None and re.match(r"[*]",action):
has_action = has_action + 1
if resource is not None and re.match(r"[*]",resource):
has_resource = has_resource + 1
if has_resource>0:
policy_resource_vul.append(resource_vul_str.format(p['PolicyName']))
if has_action>0:
policy_action_vul.append(action_vul_str.format(p['PolicyName']))
if has_action == 0 and has_resource == 0:
policy_not_vul.append(not_vul_str.format(p['PolicyName']))
i = i+1
count = count + 1
if i%10 == 0:
print("Processed "+str(i) + " policies")
if count == len(policies):
if istruncate == True:
list_out_iam_policy(policies['Marker'])
list_out_iam_policy()
print("Processed "+str(i) + " policies")
print("Showing calculated result ....")
if(len(policy_action_vul) > 0):
print("############## Need to specify actions by action name "+str(len(policy_action_vul))+" out of "+len(i-1)+"###########.")
for p in policy_action_vul:
print(p)
if(len(policy_resource_vul) > 0):
print("############## Need to specify resource by name to "+str(len(policy_resource_vul))+" out of "+len(i-1)+"###########")
for p in policy_resource_vul:
print(p)
if(len(policy_not_vul) > 0):
print("############## "+str(len(policy_not_vul))+" Policies are good out of "+str(i-1)+"###########")
for p in policy_not_vul:
print(p) | UTF-8 | Python | false | false | 2,858 | py | 16 | list_customer_policy.py | 13 | 0.660602 | 0.651155 | 0 | 88 | 31.488636 | 127 |
ncm34/smart-seating | 4,346,506,924,812 | a0c1a4184b456ffdb751e309f48c1b839ae1c6a5 | 5550321db4b965c80364be0dae5a73fc7c26da11 | /backend/request.py | ddbc06c06e2f0f27e38321cfd93fb78b9b372495 | []
| no_license | https://github.com/ncm34/smart-seating | 5f9b94c67d9aa47e2109c91df25b528807a87946 | a5224821178689a3861dca5a46d24a2bd6e6dc01 | refs/heads/master | 2021-01-10T07:02:54.019559 | 2015-12-13T18:26:32 | 2015-12-13T18:26:32 | 43,028,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
from flask import Flask
import re
from flask_mail import Mail, Message
app = Flask(__name__)
# app.run(host='localhost', port=5000, request_handler=runtime.MyFancyRequestHandler)
app.config.update(
DEBUG=True,
#EMAIL SETTINGS
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME = 'smartseating445@gmail.com',
MAIL_PASSWORD = 'GyH-Rb2-cmY-d9e'
)
# Create mail instance
mail = Mail(app)
contactCard_neel = {'name': 'Neel Mouleeswaran', 'email': 'moulees2@illinois.edu'}
contactCard_srini = {'name': 'Srini Srikumar', 'email': 'srikuma2@illinois.edu'}
contactCard_mitch = {'name': 'Mitchell Appelbaum', 'email': 'appelbm2@illinois.edu'}
contactCard_brady = {'name': 'Brady Salz', 'email': 'salz2@illinois.edu'}
users = { '0A': contactCard_neel, '0B': contactCard_srini, '0C': contactCard_mitch, '0D': contactCard_brady,
'1A': contactCard_neel, '1B': contactCard_srini, '1C': contactCard_mitch, '1D': contactCard_brady,
'2A': contactCard_neel, '2B': contactCard_srini, '2C': contactCard_mitch, '2D': contactCard_brady,
'3A': contactCard_neel, '3B': contactCard_srini, '3C': contactCard_mitch, '3D': contactCard_brady
}
checked_in_users = []
status_string = ['(no student)', '(no student)', '(no student)', '(no student)']
@app.route('/')
def health():
return 'OK'
@app.route('/dash')
def statusStringToRes():
return 'Bench 0: ' + status_string[0] + "\n\n" + 'Bench 1: ' + status_string[1] + "\n\n" + 'Bench 2: ' + status_string[2] + "\n\n" + 'Bench 3: ' + status_string[3]
@app.route('/read/<data>')
def read(data):
global status_string
global checked_in_users
if str(data) in checked_in_users:
return 'Success'
elif str(data) not in checked_in_users:
if str(data) in users:
checked_in_users.append(data)
# print users
# print data
user = users[data]
status_string[int(data[0])] = user['name']
msg = Message("Hi " + user['name'] + ", you checked into eceB-2070, bench #" + data[0] + "!",
sender="smartseating445@gmail.com",
recipients=[user['email']])
msg.body = getMessageBodyForUser(user, data[0])
mail.send(msg)
return 'Success'
else:
return 'Success'
else:
return 'Failure'
@app.route('/reset')
def reset():
global checked_in_users
checked_in_users = []
global status_string
status_string = ['(no student)', '(no student)', '(no student)', '(no student)']
return 'Success' | UTF-8 | Python | false | false | 2,433 | py | 4 | request.py | 4 | 0.663379 | 0.642828 | 0 | 78 | 30.205128 | 164 |
H-Stylo/DevAdvancedTest | 18,708,877,549,672 | d6d531f913b9d87d0d754717ec9da578a999675f | 3318df5cf7fcca5329c43538919ebf4081a6dbaa | /context_manager/context_manager.py | b990b157c4421d894247193ba78abd7904aac6e6 | []
| no_license | https://github.com/H-Stylo/DevAdvancedTest | e8564e499ae7778de336655d42dada31af131282 | f00582c8026053c77c70372a1c8163e9719a79ad | refs/heads/master | 2021-04-09T10:26:47.369030 | 2018-04-03T13:11:43 | 2018-04-03T13:11:43 | 125,430,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # context_manager.py
from sqlite3 import connect
from contextlib import contextmanager
#---------------------------------------------------
def classicContextManager():
"""
Utilsation classique d'un context manager dans le cadre de l'ouverture d'une connexion
ou plus généralement l'ouverture d'un fichier
"""
with connect('context_manager.db') as conn:
cur = conn.cursor()
cur.execute('create table points(x int, y int)')
cur.execute('insert into points(x, y) values (4, 1)')
cur.execute('insert into points(x, y) values (2, 3)')
cur.execute('insert into points(x, y) values (1, 1)')
for row in cur.execute('select x, y from points'):
print(row)
for row in cur.execute('select sum(x * y) from points'):
print(row)
cur.execute('drop table points')
#---------------------------------------------------
@contextmanager
def tempTableGenerator(cur):
"""
Genérateur du context manager permettant de forcer l'ordre du processus de
création et de suppression de la table
"""
print("created table")
cur.execute('create table points(x int, y int)')
try:
yield
finally:
print("dropped table")
cur.execute('drop table points')
####################################################
class TempTable:
"""
Création d'une personalisation d'un context manager pour répondre plus facilement
à la problèmatique de la fonction classicContextManager()
"""
#---------------------------------------------------
def __init__(self, gen):
self.gen = gen
#---------------------------------------------------
def __call__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
return self
#---------------------------------------------------
def __enter__(self):
self.gen_inst = self.gen(*self.args, **self.kwargs)
next(self.gen_inst)
#---------------------------------------------------
def __exit__(self, *args):
next(self.gen_inst, None)
#---------------------------------------------------
def personalisationContextManager():
"""
Utilsation du context manager personalisé pour supprimer le besoin de création
et suppression de la table au sein du code métier
"""
with connect('context_manager.db') as conn:
cur = conn.cursor()
# with TempTable(tempTableGenerator)(cur): # Utilisation de TempTable : pas très esthétique
# et lourd à l'écriture du context context_manager
# --> simplification grace à contextlib possèdant un
# décorateur transformant un decorateur en contextmanager
with tempTableGenerator(cur):
cur.execute('insert into points(x, y) values (4, 1)')
cur.execute('insert into points(x, y) values (2, 3)')
cur.execute('insert into points(x, y) values (1, 1)')
for row in cur.execute('select x, y from points'):
print(row)
for row in cur.execute('select sum(x * y) from points'):
print(row)
####################################################
if __name__ == "__main__":
personalisationContextManager() | UTF-8 | Python | false | false | 2,988 | py | 9 | context_manager.py | 9 | 0.579461 | 0.575084 | 0 | 98 | 29.316327 | 95 |
stryku/hb | 12,970,801,264,383 | c68758933a6eb8f0b1de26439a830fe77e6101eb | e5f10389ed55f7cec382eec92d547b1e8701b850 | /client_server/scripts.py | 2abdc6323fd2c5b6ba1e9deb71ef68fb069d5025 | [
"MIT"
]
| permissive | https://github.com/stryku/hb | 16a21f9282f3246092e84aa5f3c0a7cc12ce04e5 | 9bfea9ecbe31f2e5c620e90434c037a2f38f6356 | refs/heads/master | 2021-07-12T12:46:49.203011 | 2017-10-14T11:56:38 | 2017-10-14T11:56:38 | 103,791,760 | 0 | 0 | null | false | 2017-09-17T18:22:50 | 2017-09-17T00:08:50 | 2017-09-17T18:00:36 | 2017-09-17T18:22:50 | 203 | 0 | 0 | 0 | Python | null | null | import consts
import utils
def textcleaner(in_file, out_file):
command = 'textcleaner -g -e stretch -f 25 -o 20 -t 30 -s 1 -T'.split()
command.append(in_file)
command.append(out_file)
#command = [consts.SCRIPTS_DIR + 'run_cleaner.sh',
# in_file,
# out_file]
return utils.run_process(command)
def tesseract(filename):
command = ['tesseract', '-l', 'hb', '--tessdata-dir', '.', filename, 'stdout']
return utils.run_process(command)
| UTF-8 | Python | false | false | 491 | py | 30 | scripts.py | 27 | 0.613035 | 0.598778 | 0 | 17 | 27.823529 | 82 |
amolenaar/gaphas | 16,217,796,548,394 | afbbe35d54c2c25902624fac43ffaa920457318d | 80812c8f7cc73ea0a4a9bc17596cc7ff1d4861fc | /tests/test_connector.py | 21483eab49344f2c6299d0d77bea7edc18be917e | [
"Apache-2.0"
]
| permissive | https://github.com/amolenaar/gaphas | e4ad3b3bbc22aa4786e7d0285a377cffa7c1385c | 4ae52b99f03e99bd9f4bd055805a9a09c7c51f91 | refs/heads/master | 2021-01-17T16:17:22.927927 | 2021-01-15T02:49:48 | 2021-01-15T02:49:48 | 230,791 | 11 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gaphas.connector import Handle
def test_handle_x_y():
h = Handle()
assert 0.0 == h.pos.x
assert 0.0 == h.pos.y
| UTF-8 | Python | false | false | 130 | py | 107 | test_connector.py | 76 | 0.607692 | 0.576923 | 0 | 7 | 17.571429 | 35 |
abcxs/ctpn_pytorch | 4,475,355,947,781 | 059568d84fa4c4ed125370e10bba84c5cad4ee8c | 607b5b1edf73780993be4f09272590acd305cfcd | /utils/bbox_coder.py | a7a12a94bbb2e179f287662530de5b9db66776bd | []
| no_license | https://github.com/abcxs/ctpn_pytorch | 1f11a501ce3f9b59e9c65437d9d8c58d8ab202cb | 752dbb2677fecc49713730be6f56cd8e7a48bd79 | refs/heads/master | 2022-11-20T03:58:25.349061 | 2020-07-27T06:09:45 | 2020-07-27T06:09:45 | 282,781,183 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import numpy as np
def encode(bboxes, gt_bboxes):
assert bboxes.size(0) == gt_bboxes.size(0)
bboxes = bboxes.float()
gt_bboxes = gt_bboxes.float()
px = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
py = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
pw = bboxes[..., 2] - bboxes[..., 0]
ph = bboxes[..., 3] - bboxes[..., 1]
gx = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
gy = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
gw = gt_bboxes[..., 2] - gt_bboxes[..., 0]
gh = gt_bboxes[..., 3] - gt_bboxes[..., 1]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
return deltas
def decode(bboxes, deltas, max_shape=None, wh_ratio_clip=16 / 1000):
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
# dw[:, :] = 0
# dx[:, :] = 0
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Compute center of each roi
px = ((bboxes[:, 0] + bboxes[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((bboxes[:, 1] + bboxes[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(1).expand_as(dw)
ph = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(1).expand_as(dh)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + pw * dx
gy = py + ph * dy
# Convert center-xy/width/height to top-left, bottom-right
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
| UTF-8 | Python | false | false | 2,108 | py | 15 | bbox_coder.py | 15 | 0.52277 | 0.479127 | 0 | 68 | 30 | 73 |
aristocrates/pathfinder | 8,469,675,514,562 | 186dbf650e3d4c11f4bcb55b3d617f6a25ab72cd | 7bee2b2273613f08dea6f1954752770fe6904c1e | /scroll.py | 88cdb4939c89191a12325c7dcc240528aab10914 | [
"BSD-3-Clause"
]
| permissive | https://github.com/aristocrates/pathfinder | 66361e82d551aafd840f10cd86e584d010cdb821 | 192390f726f3ebbe7a358052743a43f0b4377064 | refs/heads/master | 2021-09-18T12:56:41.201996 | 2018-07-09T07:59:39 | 2018-07-09T07:59:39 | 104,619,908 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Handles scroll to zoom logic
"""
class Zoom:
"""
Enforces minimum and maximum zoom, and rate of zoom
"""
def __init__(self, zoom_thresh = 120, zoom_min = -5, zoom_max = 30):
# the scroll can be no lower than zero
self.scroll = 0
self.zoom_thresh = zoom_thresh
self.zoom_min = zoom_min
self.zoom_max = zoom_max
self.prev_scroll = None
def change_scroll(self, scroll_amount):
if self.scroll + scroll_amount <= (self.zoom_min * self.zoom_thresh):
self.scroll = self.zoom_min * self.zoom_thresh
elif self.scroll + scroll_amount >= (self.zoom_max * self.zoom_thresh):
self.scroll = self.zoom_max * self.zoom_thresh
else:
self.scroll += scroll_amount
def zoom_delta(self):
if self.prev_scroll == None:
# first time
scroll_delta = self.scroll
else:
scroll_delta = self.scroll - self.prev_scroll
self.prev_scroll = self.scroll
return scroll_delta / self.zoom_thresh
def zoom_level(self):
return self.scroll // self.zoom_thresh
| UTF-8 | Python | false | false | 1,140 | py | 8 | scroll.py | 5 | 0.585088 | 0.578947 | 0 | 36 | 30.666667 | 79 |
hshore29/ListStats | 8,040,178,815,782 | 2448bd1fac6e88e7e3b66d8a827de897753d0ace | 1e71bd48c0ba7e14d7b9b3783a5e51509bf4eaa8 | /DB Builder/ftp_module.py | 2b68365d79a4328bc97920212865b104c1884af0 | []
| no_license | https://github.com/hshore29/ListStats | 41f53ddda70c696a67eaad993746aeb33bc8617c | 7313969389ffc6bb6bbceb1dfe7a3fc0d8b21ce3 | refs/heads/master | 2016-08-04T06:56:11.163487 | 2015-06-08T03:27:29 | 2015-06-08T03:27:32 | 22,278,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from ftplib import FTP
class Website:
def __init__(self,domain,path,user,passw):
self.ftp = FTP(domain,user,passw)
self.ftp.cwd(path)
def retr(self,file):
with open(file,'wb') as f:
print "Downloading " + file + "..."
self.ftp.retrbinary('RETR %s' % file, lambda data: f.write(data))
def stor(self,file):
with open(file,'rb') as f:
print "Uploading " + file + "..."
self.ftp.storbinary('STOR %s' % file, f)
| UTF-8 | Python | false | false | 520 | py | 22 | ftp_module.py | 16 | 0.544231 | 0.544231 | 0 | 17 | 29.588235 | 77 |
ggangliu/merge-bin-as-one | 17,695,265,267,529 | 31794bd57b90cc6e224badcc8bfdb3a3dda78868 | b85b28e802c3358e7f8f62a4bc77cbb1df0b5ec8 | /merge-bin-as-one.py | 021bb1b293c6fb8bf125656e334d1548041cf8a4 | []
| no_license | https://github.com/ggangliu/merge-bin-as-one | a3cf2203573331f3fdc3c83859f9fae9c9e3a42e | 3784d86c6ded3c4a753cd07810fc94a6b8bf736b | refs/heads/master | 2020-03-28T01:17:26.607980 | 2018-10-25T01:37:50 | 2018-10-25T01:37:50 | 147,493,644 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
This tool is for merge all bin as one file. Avoiding wasting of time at burning every bin one by one each time.
'''
from struct import pack
import os
from tqdm import tqdm,trange
__version__ = "0.1"
bin_map_info = []
#First step:
#Parsing config file, and get the file name\length and its start address in flash.
with open('address-map.ini') as config_file:
try:
for line in tqdm(config_file.readlines(), desc="Parsing config"):
if not line.lstrip().startswith('#'):
bin_map_info.append([line.split(',')[0].strip(), line.split(',')[1].strip(), line.split(',')[2].strip()])
finally:
config_file.close()
#print(bin_map_info)
#Creating a file which is filled by binary 1 and its size is the same with flash of DOT
with open('One-Entire-Image.bin', 'wb') as one_image:
for i in trange(0xffffff, desc="Generating bin"):
one_image.write(pack('B', 255))
one_image.close()
#Second step:
#According to these infomations to get the related file and read out it as binary format.
#Then write its into the file which is deemed as the flash. Do it one by one.
for item_bin in tqdm(bin_map_info, desc="Merging bin"):
image_name, image_addr, image_len = item_bin
#pbar.set_description("Processing %s" % image_name)
#print(image_name, image_addr, image_len)
if not os.path.exists('full-loads/'+image_name):
print("\nFailure, because: %s isn't exist!!!!" % ('full-loads/'+image_name))
break
file_size = os.path.getsize('full-loads/'+image_name)
if (file_size > int(image_len, 16)):
print("%s size(%x) greater than the reserved space(%x), failed!" % (image_name, file_size, int(image_len, 16)))
break
with open('full-loads/'+image_name, 'rb') as bin_F:
bin_content = bin_F.read()
bin_F.close()
with open('One-Entire-Image.bin', 'rb+') as target_F:
target_F.seek(int(image_addr, 16), 0)
target_F.write(bin_content)
target_F.flush()
target_F.close()
#print(" successed.")
os.system("pause")
'''
import pickle
F = open('One-Entire-Image.bin', 'wb')
pickle.dump('ff'*0xfff, F)
F.close()
S = open('One-Entire-Image.bin', 'rb')
E = pickle.load(S)
print(E)
'''
| UTF-8 | Python | false | false | 2,281 | py | 4 | merge-bin-as-one.py | 2 | 0.625164 | 0.617273 | 0 | 68 | 32.514706 | 121 |
didarDOS/mysite | 7,687,991,474,186 | c511e1974793cc214b53207974ae759c30ed589a | 5f5fb72cfbb6fd96426f0bfe21aa87324549dc1a | /Library/forms.py | 20cc44262fd36cb64c273fba53b90ae9f6a5cb9a | []
| no_license | https://github.com/didarDOS/mysite | 9cdb0796ab49823b5ea0bb91a29e6ec8709a4e0f | d0f4f2675b63f238a6aa48e7572ce9880b3d3b01 | refs/heads/master | 2020-07-21T08:03:44.371792 | 2019-09-10T13:40:06 | 2019-09-10T13:40:06 | 206,789,695 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
class BookForm(forms.Form):
name = forms.CharField(max_length=300)
author = forms.CharField(max_length=300)
description = forms.TextInput()
price = forms.FloatField() | UTF-8 | Python | false | false | 208 | py | 13 | forms.py | 11 | 0.721154 | 0.692308 | 0 | 7 | 28.857143 | 44 |
fferegrino/n4j-test-setup | 12,378,095,765,183 | bc02d50472eb3d5d7907aa5771bd034c18934741 | 81801bade1455f94aea02d1239e02caa8c488a33 | /InsertAirports.py | 1551eed8fe3d35f6b4fb027854fa531e2022f5e2 | [
"MIT"
]
| permissive | https://github.com/fferegrino/n4j-test-setup | b7a413ab159cccb6cf2f81c8961a9ce7a947f82b | 12ad0fbfa68c3902e00218203ee943c5315057ff | refs/heads/master | 2020-03-26T12:49:07.001214 | 2018-09-05T16:08:17 | 2018-09-05T16:08:17 | 144,909,454 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
# In[1]:
import pandas as pd
import gc
import argparse
from os.path import join
from FlightsDatabase import FlightsDatabase
from transformations import transform_as_is, transform_split_maps, transform_stringify_second_level
from transformations import results_as_they_are, mapify_second_level
class InsertAirports:
airports = []
airlines = []
@staticmethod
def load_data(location):
if InsertAirports.airlines and InsertAirports.airports:
print("Nothing to load")
return
print("Loading data airport!")
airports_csv = pd.read_csv(join(location, "airports.csv"))
airlines_csv = pd.read_csv(join(location, "airlines.csv"))
for _, r in airports_csv.sort_values(by='IATA_CODE').iterrows():
a = {
'iata': r['IATA_CODE'],
'name': r['AIRPORT'],
'location': {
'country': r['COUNTRY'],
'state': r['STATE'],
'city': r['CITY'],
'coordinates': {
'latitude': r['LATITUDE'],
'longitude': r['LONGITUDE']
}
}
}
InsertAirports.airports.append(a)
for _, r in airlines_csv.sort_values(by='IATA_CODE').iterrows():
a = {
'iata': r['IATA_CODE'],
'name': r['AIRLINE']
}
InsertAirports.airlines.append(a)
del airlines_csv
del airports_csv
gc.collect()
def insert(self, location, instance):
if len(InsertAirports.airlines) == 0 and len(InsertAirports.airports) == 0:
InsertAirports.load_data(location)
# ## Using *strings*
if instance == "3.4.0-plain":
db_string = FlightsDatabase("bolt://localhost:7341", "neo4j", "tokyo")
ti_strings_airlines, _ = db_string.insert(InsertAirports.airlines)
ti_strings, results_insertion_strings = db_string.insert_transform('''UNWIND {props} AS properties
CREATE (a:Airport)
SET a = properties
RETURN a''', InsertAirports.airports, transform_stringify_second_level,
indexing_script='''CREATE INDEX ON :Airport(iata)''')
tr_strings, results_retrieval_strings = db_string.get_maps('''MATCH (a:Airport)
RETURN a
ORDER BY a.iata;''', mapify_second_level)
db_string.close()
return (ti_strings_airlines, ti_strings, tr_strings)
# ## Using APOC
elif instance == "3.4.0-apoc":
db_apoc = FlightsDatabase("bolt://localhost:7342", "neo4j", "tokyo")
ti_apoc_airlines, _ = db_apoc.insert(InsertAirports.airlines)
ti_apoc, results_insertion_apoc = db_apoc.insert_transform('''UNWIND {props} AS properties
CREATE (a:Airport)
SET a = properties.other
WITH a, properties
CALL apoc.convert.setJsonProperty(a, 'location', properties.location)
RETURN a''', InsertAirports.airports, transform_split_maps,
indexing_script='''CREATE INDEX ON :Airport(iata)''')
tr_apoc, results_retrieval_apoc = db_apoc.get_maps('''MATCH (airport:Airport)
WITH apoc.convert.getJsonProperty(airport, 'location') as map, airport
RETURN { iata: airport.iata, name: airport.name, location: map } as a
ORDER BY airport.iata;''', results_as_they_are)
db_apoc.close()
return (ti_apoc_airlines, ti_apoc, tr_apoc)
# ## Using Maps
elif instance == "3.5.0-maps":
db_maps = FlightsDatabase("bolt://localhost:7343", "neo4j", "tokyo")
ti_maps_airlines, _ = db_maps.insert(InsertAirports.airlines)
ti_maps, results_insertion_maps = db_maps.insert_transform('''UNWIND {props} AS properties
CREATE (a:Airport)
SET a = properties
RETURN a''', InsertAirports.airports, transform_as_is,
indexing_script='''CREATE INDEX ON :Airport(iata)''')
tr_maps, results_retrieval_map = db_maps.get_maps('''MATCH (a:Airport)
RETURN a
ORDER BY a.iata;''', results_as_they_are)
db_maps.close()
return (ti_maps_airlines, ti_maps, tr_maps)
| UTF-8 | Python | false | false | 4,485 | py | 15 | InsertAirports.py | 6 | 0.554515 | 0.548272 | 0 | 127 | 34.314961 | 110 |
Skrypnyk81/stepik | 7,121,055,819,411 | 06d53f15caccfbbe700bc3d691794ab378651c3d | 151febf20034aee3c584c2e7ab687252d099da2f | /count_line.py | c2ce55bdefb00b1fb97449c108e2ae2c1c87f916 | []
| no_license | https://github.com/Skrypnyk81/stepik | da3ef3b96c0df94ccbcbb79ba518e796fee248f4 | 1dcc2cb6555fee67604cb6990345c3b9940b1da3 | refs/heads/master | 2021-06-05T01:19:03.009438 | 2020-08-01T07:34:16 | 2020-08-01T07:34:16 | 134,954,763 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
r = requests.get('https://stepic.org/media/attachments/course67/3.6.2/324.txt')
print(r.text.count('\n'))
| UTF-8 | Python | false | false | 123 | py | 18 | count_line.py | 17 | 0.723577 | 0.658537 | 0 | 4 | 29.75 | 79 |
LukeLry/AlienInvasion | 17,343,077,965,983 | 7ff37cdd1f84189b12dd6fd49f3e6ab5d7e0e096 | 77e94730ce847c6c84da1093675e26364d2fcef8 | /game_stats.py | 81da4c8fc49b1d3bdcf601de1d9c940ba39c035c | []
| no_license | https://github.com/LukeLry/AlienInvasion | ec387a7bc4fb0d3f7779c275813daf2bef638f72 | 903dce893f20c53bf9aed5406e5b21f09f41c12f | refs/heads/master | 2020-03-28T23:34:07.551192 | 2018-09-18T14:03:55 | 2018-09-18T14:03:55 | 149,293,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self,ai_settings):
"""初始化统计信息"""
self.ai_settings=ai_settings
self.reset_stats()
#游戏刚启动时处于活跃状态
self.game_active=False
def reset_stats(self):
"""初始化游戏运行期间可能发生变化的统计信息"""
self.ships_left=self.ai_settings.ship_limit
| UTF-8 | Python | false | false | 471 | py | 2 | game_stats.py | 2 | 0.578667 | 0.570667 | 0 | 15 | 24 | 51 |
alhajri/FUDGE | 1,700,807,062,993 | 301be07817e015e498d7d5c38c966583493d8779 | 2b030cb44b0537b1b34de6951656d612edb4a22c | /xData/constant.py | b6b37f87d402b0273bf60c54ee1d5505ab01462d | [
"BSD-3-Clause"
]
| permissive | https://github.com/alhajri/FUDGE | 2c389912addc28ddde51cf7ba455164e47574c89 | 9566131c37b45fc37f5f8ad07903264864575b6e | refs/heads/master | 2021-08-31T20:39:25.834146 | 2017-12-22T19:50:56 | 2017-12-22T19:50:56 | 115,145,816 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # <<BEGIN-copyright>>
# Copyright (c) 2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by the LLNL Nuclear Data and Theory group
# (email: mattoon1@llnl.gov)
# LLNL-CODE-683960.
# All rights reserved.
#
# This file is part of the FUDGE package (For Updating Data and
# Generating Evaluations)
#
# When citing FUDGE, please use the following reference:
# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, "Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008
#
#
# Please also read this link - Our Notice and Modified BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of LLNS/LLNL nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract with the U.S.
# Department of Energy (DOE). This work was produced at Lawrence Livermore
# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
#
# 2. Neither the United States Government nor Lawrence Livermore National Security,
# LLC nor any of their employees, makes any warranty, express or implied, or assumes
# any liability or responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that its use
# would not infringe privately-owned rights.
#
# 3. Also, reference herein to any specific commercial products, process, or services
# by trade name, trademark, manufacturer or otherwise does not necessarily constitute
# or imply its endorsement, recommendation, or favoring by the United States Government
# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the United States Government or
# Lawrence Livermore National Security, LLC, and shall not be used for advertising or
# product endorsement purposes.
#
# <<END-copyright>>
__metaclass__ = type
from numericalFunctions import pointwiseXY_C as pointwiseXY_CModule
floatToShortestString = pointwiseXY_CModule.floatToShortestString
from . import base as baseModule
from . import axes as axesModule
class constant( baseModule.xDataFunctional ) :
def __init__( self, _constant, domainMin, domainMax, axes = None, label = None ) :
baseModule.xDataFunctional.__init__( self, self.moniker, label = label, axes = axes )
self.constant = _constant
if( isinstance( domainMin, int ) ) : domainMin = float( domainMin )
if( not( isinstance( domainMin, float ) ) ) : TypeError( 'domainMin not a float instance' )
self.__domainMin = domainMin
if( isinstance( domainMax, int ) ) : domainMax = float( domainMax )
if( not( isinstance( domainMax, float ) ) ) : TypeError( 'domainMax not a float instance' )
self.__domainMax = domainMax
def copy( self ) :
axes = self.axes
if( axes is not None ) : axes = self.axes.copy( )
return( self.__class__( self.constant, self.domainMin, self.domainMax, axes = axes, label = self.label ) )
__copy__ = copy
__deepcopy__ = __copy__
@property
def constant( self ) :
return( self.__constant )
@constant.setter
def constant( self, _constant ) :
if( isinstance( _constant, int ) ) : _constant = float( _constant )
if( not( isinstance( _constant, float ) ) ) : TypeError( 'constant not a float instance' )
self.__constant = _constant
@property
def domainMin( self ) :
return( self.__domainMin )
@property
def domainMax( self ) :
return( self.__domainMax )
@property
def domainUnit( self ) :
return( self.getAxisUnitSafely( self.dimension ) )
@property
def rangeMin( self ) :
return( self.constant )
rangeMax = rangeMin
@property
def rangeUnit( self ) :
return( self.getAxisUnitSafely( 0 ) )
def fixDomainPerUnitChange( self, factors ) :
self.__domainMin *= factors[self.dimension]
self.__domainMax *= factors[self.dimension]
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
valueFormatter = kwargs.get( 'valueFormatter', floatToShortestString )
significantDigits = kwargs.get( 'significantDigits', 15 )
attributeStr = baseModule.xDataCoreMembers.attributesToXMLAttributeStr( self )
XMLList = [ '%s<%s%s constant="%s" domainMin="%s" domainMax="%s">' % ( indent, self.moniker, attributeStr,
valueFormatter( self.constant, significantDigits = significantDigits ),
valueFormatter( self.domainMin, significantDigits = significantDigits ),
valueFormatter( self.domainMax, significantDigits = significantDigits ) ) ]
if( self.isPrimaryXData( ) ) :
if( self.axes is not None ) : XMLList += self.axes.toXMLList( indent = indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
@classmethod
def parseXMLNode( cls, xDataElement, xPath, linkData, axes = None, **kwargs ) :
attrs = { 'constant' : None, 'domainMin' : None, 'domainMax' : None, 'label' : None }
attributes = { 'constant' : float, 'domainMin' : float, 'domainMax' : float, 'label' : str }
for key, item in xDataElement.items( ) :
if( key not in attributes ) : raise TypeError( 'Invalid attribute "%s"' % key )
attrs[key] = attributes[key]( item )
uncertainties = None
for subElement in xDataElement :
if( subElement.tag == 'axes' ) :
axes = axesModule.axes.parseXMLNode( subElement, xPath, linkData )
elif( subElement.tag == 'uncertainties' ) :
from . import uncertainties as uncertaintiesModule
uncertainties = uncertaintiesModule.uncertainties.parseXMLNode( subElement, xPath, linkData )
else :
raise TypeError( 'sub-element "%s" not valid' % subElement.tag )
_constant = attrs.pop( 'constant' )
newConstant = cls( _constant, axes = axes, **attrs )
newConstant.uncertainties = uncertainties
return newConstant
@staticmethod
def parseXMLString( XMLString ) :
from xml.etree import cElementTree
return( constant.parseXMLNode( cElementTree.fromstring( XMLString ), xPath = [], linkData = {} ) )
class constant1d( constant ) :
moniker = 'constant1d'
dimension = 1
def convertUnits( self, unitMap ) :
"""
unitMap is a dictionary of the for { 'eV' : 'MeV', 'b' : 'mb' }.
"""
factors = self.axes.convertUnits( unitMap )
self.constant *= factors[0]
self.fixDomainPerUnitChange( factors )
self.fixValuePerUnitChange( factors )
def evaluate( self, x ) :
return( self.constant )
| UTF-8 | Python | false | false | 8,605 | py | 335 | constant.py | 150 | 0.678094 | 0.669611 | 0 | 207 | 40.570048 | 321 |
pranavsachdev/Python_Programming_UseCases | 10,685,878,641,037 | 5696cc1907b09a8fa02348e4e0f9dafb04366f85 | cea23ceabe566f9b893b474913fe1b2f3f6ef141 | /Day_3_Numberswith4.py | 845ffe86fcd6793c2a7c8afa4d90c329c3411e3b | []
| no_license | https://github.com/pranavsachdev/Python_Programming_UseCases | eba7b620fc3db88dc2cfe409f0758a9ca44ab4f7 | 09f2c0a0fb2abe1b58225f7cc149d6dbb79fb862 | refs/heads/master | 2020-09-02T22:08:16.075206 | 2019-11-04T11:10:18 | 2019-11-04T11:10:18 | 219,316,962 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 07:37:40 2019
@author: prana
"""
def count_if_four(num):
present = 0
for i in range(1, num+1):
if (check_for_4(i) == True):
present += 1
return present
def check_for_4(x):
while (x != 0):
if (x%10 == 4):
return True
x = x // 10
# count_if_four(2000)
if __name__ == "__main__":
for _ in range(int(input())):
print(count_if_four(int(input())))
| UTF-8 | Python | false | false | 477 | py | 5 | Day_3_Numberswith4.py | 4 | 0.48847 | 0.427673 | 0 | 25 | 18.08 | 42 |
travishen/sectw | 2,052,994,398,614 | 8b8092666cebdf03ecbec5c7bb92f5011e351025 | 490b6d6b37cbe36e7b74b9a8d1f1f8d9abcedfb0 | /sectw/util.py | afbee0f65587be781d9a79f9d72db3e18f94cd65 | [
"MIT"
]
| permissive | https://github.com/travishen/sectw | 4e548d8187a4bc4484a547e44cf3e7fb4a20efcd | 862fb79594cdd9eb129d00a61fa433860ead3a22 | refs/heads/master | 2021-10-09T08:20:05.758126 | 2018-12-22T02:48:26 | 2018-12-22T03:02:05 | 108,830,464 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import re
import json
import six
import logging
from collections import namedtuple
from .database.model import hook, Version
from .database import config
class Address(object):
TOKEN_RE = re.compile('''
(?:
(?P<value>.+?)
)
(?:
(?P<unit>地號|地段|小段|區段|鎮段|鎮區|市區|[縣市鄉鎮市區村里段號])
)
''', re.X)
VALUE = 0
UNIT = 1
GLOBAL_REPLACE_RE = re.compile('''
[ 台]
|
[0-9]
''', re.X)
NO_HYPHEN_REPLACE_RE = re.compile('''
[之–—]
''', re.X)
NO_NUM_REPLACE_RE = re.compile('''
(?:
[一二三四五六七八九]?
[一二三四五六七八九]?
十?
[一二三四五六七八九]
)
(?=-|號|地號|$)
''', re.X)
# the strs matched but not in here will be removed
TO_REPLACE_MAP = {
'之': '-', '–': '-', '—': '-',
'台': '臺',
'1': '1', '2': '2', '3': '3', '4': '4', '5': '5',
'6': '6', '7': '7', '8': '8', '9': '9', '0': '0',
'一': '1', '二': '2', '三': '3', '四': '4', '五': '5',
'六': '6', '七': '7', '八': '8', '九': '9',
}
CHINESE_NUMERALS_SET = set('一二三四五六七八九')
@staticmethod
def normalize(s):
if isinstance(s, six.binary_type):
s = s.decode('utf-8')
def replace(m):
found = m.group()
if found in Address.TO_REPLACE_MAP:
return Address.TO_REPLACE_MAP[found]
return ''
def replace_num(m):
found = m.group()
if found in Address.TO_REPLACE_MAP:
return Address.TO_REPLACE_MAP[found]
if found[0] in Address.CHINESE_NUMERALS_SET:
# for '十一' to '九十九'
len_found = len(found)
if len_found == 2:
return '1' + Address.TO_REPLACE_MAP[found[1]]
if len_found == 3:
return Address.TO_REPLACE_MAP[found[0]] + Address.TO_REPLACE_MAP[found[2]]
return ''
s = Address.GLOBAL_REPLACE_RE.sub(replace, s)
s = Address.NO_HYPHEN_REPLACE_RE.sub(replace, s)
while True:
replaced = Address.NO_NUM_REPLACE_RE.sub(replace_num, s)
if s == replaced:
break
s = replaced
return s
@staticmethod
def tokenize(addr_str, normalize=True):
if normalize:
addr_str = Address.normalize(addr_str)
return Address.TOKEN_RE.findall(addr_str)
def __init__(self, addr_str, normalize=True):
self.tokens = Address.tokenize(addr_str, normalize)
def __len__(self):
return len(self.tokens)
@staticmethod
def flat(tokens, sarg=None, *sargs):
return ''.join(''.join(token) for token in tokens[slice(sarg, *sargs)])
def pick_to_flat(self, *idxs):
return ''.join(''.join(self.tokens[idx]) for idx in idxs)
def __repr__(self):
return 'Address(%r)' % Address.flat(self.tokens)
LandType = namedtuple('LandType', ['name', 'units', 'digit'])
_types = (
('county', ['縣', '市']),
('town', ['鄉', '鎮', '區', '市區', '鎮區']),
('village', ['村', '里']),
('section', ['段', '地段', '區段', '鎮段']),
('small_section', ['小段']),
('number', ['號', '地號']),
)
_land_types = [LandType(item[0], item[1], i) for i, item in enumerate(_types)]
class LandAddress(Address):
TOKEN_RE = re.compile('''
(?:
(?P<value>..+?)
)
(?:
(?P<unit>[縣市鄉鎮市區村里])
)
''', re.X)
S_TOKEN_RE = re.compile('''
(?:
(?P<value>.+?)
)
(?:
(?P<unit>地段|段|小段|地號|號)
)
''', re.X)
SEP_SIGN = ','
def __init__(self, addr_str, normalize=False):
super(LandAddress, self).__init__(addr_str, normalize)
for land_type in _land_types:
setattr(self, land_type.name, self.get_match(self.tokens, land_type.units))
def __repr__(self):
return 'LandAddress(%r)' % self.flat()
def pick_to_flat(self, *digits):
return ''.join(''.join(getattr(self, _land_types[d].name)) for d in digits)
@staticmethod
def get_digit(unit):
for land_type in _land_types:
if unit in land_type.units:
return land_type.digit
return None
@staticmethod
def singularize_address(tokens):
def flag(ts):
flags = []
for i, t in enumerate(ts):
try:
cut_here = LandAddress.get_digit(t[1]) - LandAddress.get_digit(ts[i + 1][1]) > 0
flags.append(cut_here)
except IndexError:
flags.append(True)
return [ts[i] + (f,) for i, f in enumerate(flags)]
def pre_flat(ts):
results = []
fr = 0
for i, t in enumerate(ts):
to = i + 1
if t[2]:
results.append((fr, to))
fr = to
return results
flagged_tokens = flag(tokens)
to_flat = pre_flat(flagged_tokens)
return [Address.flat(tokens, fr, to) for fr, to in to_flat]
@staticmethod
def get_match(tokens, units):
def get_first_match(lst):
return next(iter(lst or []), ('', ''))
def get_all_matches(ts, us):
return [(t[Address.VALUE], t[Address.UNIT]) for t in ts if t[Address.UNIT] in us]
all_matches = get_all_matches(tokens, units)
return get_first_match(all_matches)
class Directory(object):
def __init__(self, csv_path):
self.version = Directory.load_csv(csv_path)
@staticmethod
def load_csv(csv_path):
with open(csv_path, 'r') as file:
return json.load(file, object_hook=hook)
def load_db(self, db_path, create_date=None):
try:
config.setup_session(db_path)
with config.session_scope() as session:
if not create_date:
latest_version = Version.get_latest_version(session)
create_date = latest_version.date
self.version = Version.get_version(session, create_date)
except Exception as e:
logging.exception(e)
def find(self, addr_str, take=1):
# state the costs of each type of error for fuzzy_counts sorting
costs = (3, 1, 1)
def sum_cost(fuzzy_counts):
return sum(map(lambda x_y: x_y[0]*x_y[1], zip(fuzzy_counts, costs)))
land_addr = LandAddress(addr_str, normalize=True)
county = land_addr.pick_to_flat(0)
town = land_addr.pick_to_flat(1)
section = land_addr.pick_to_flat(3)
small_section = land_addr.pick_to_flat(4)
number = land_addr.number
if county:
counties = self.version.find(county)
else:
counties = self.version.counties
towns = []
if town:
for c in counties:
towns += c.find(town)
else:
for c in counties:
towns += c.towns
sections = []
if section:
for t in towns:
for s in t.sections:
s.count_section_fuzzy(section)
if small_section:
s.count_small_section_fuzzy(small_section)
sections.append(s)
sections.sort(key=lambda x: sum_cost(x.section_fc))
if small_section:
sections.sort(key=lambda x: sum_cost(x.small_section_fc))
elif small_section:
for t in towns:
for s in t.sections:
s.count_small_section_fuzzy(small_section)
sections.append(s)
sections.sort(key=lambda x: sum_cost(x.small_section_fc))
digit = ''
if number[0]:
digits = number[0].split('-')
if len(digits) == 1:
digits.append('')
digit = digits[0].zfill(4) + digits[1].zfill(4)
return [(s.code6,
s.code7,
s.code6 + digit if digit else '',
s.code7 + digit if digit else '') for s in sections[:take]]
def find_complex(self, addr_str, take=1):
def singularize_number(addr_str):
ins = LandAddress(addr_str, normalize=False)
if ins.number:
value = re.sub(r'[.、;,+及和]|以及', LandAddress.SEP_SIGN, ins.number[0])
ns = [n for n in re.split(LandAddress.SEP_SIGN, value) if n]
# clear other unit's value
front_str = ins.pick_to_flat(0, 1, 2, 3, 4)
front_str = ''.join(e for e in front_str if e.isalnum())
return [front_str + n + ins.number[1] for n in ns]
return []
# separate addresses
tokens = Address.tokenize(addr_str, normalize=False)
addresses = LandAddress.singularize_address(tokens)
parsed_addresses = []
for address in addresses:
parsed_addresses += singularize_number(address)
return [(Address.normalize(address), self.find(address, take=take)) for address in parsed_addresses]
| UTF-8 | Python | false | false | 9,658 | py | 14 | util.py | 9 | 0.500961 | 0.492419 | 0 | 333 | 27.048048 | 108 |
bokulich-lab/RESCRIPt | 4,483,945,869,901 | 953a59146948a093d893b041eaa5a92c119c036a | 820b7409521d0f2517877a2998df11578d62d48a | /rescript/tests/test_filter_length.py | f151ac9d97d4be93ef2dbcdfaf4b371a4107a696 | [
"BSD-3-Clause"
]
| permissive | https://github.com/bokulich-lab/RESCRIPt | 94c49f0f12806909c2588e12fdac97d44b11b493 | c7c6b4e8b0d7e2f1654806e733de5326757b442f | refs/heads/master | 2023-06-10T20:04:03.845458 | 2023-06-05T06:45:42 | 2023-06-05T06:45:42 | 183,673,700 | 61 | 22 | BSD-3-Clause | false | 2023-07-19T16:18:48 | 2019-04-26T18:05:50 | 2023-07-19T12:08:37 | 2023-06-05T09:27:10 | 6,453 | 66 | 21 | 18 | Python | false | false | # ----------------------------------------------------------------------------
# Copyright (c) 2019-2023, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import qiime2
import pandas.util.testing as pdt
from qiime2.plugin.testing import TestPluginBase
from q2_types.feature_data import DNAIterator
from qiime2.plugins import rescript
from rescript.filter_length import _seq_length_within_range
import_data = qiime2.Artifact.import_data
# These tests mostly check the plugin action and validation steps; individual
# filtering options and edge cases for length filtering tested below.
class TestFilterByTaxonomy(TestPluginBase):
package = 'rescript.tests'
def setUp(self):
super().setUp()
self.seqs = import_data(
'FeatureData[Sequence]', self.get_data_path('derep-test.fasta'))
self.taxa = import_data(
'FeatureData[Taxonomy]', self.get_data_path('derep-taxa.tsv'))
# test that nested taxonomic filtering works.
# The test seqs contain a few different taxa and some seqs of different
# lengths; this filters based on multiple criteria, and ensures that nested
# filtering works when more stringent filters are applied for genus/species
# level than at kingdom level.
def test_filter_seqs_length_by_taxon_nested(self):
# if a sequence matches multiple taxonomic terms in the search, we
# grab the most stringent: longest minimum length and/or shortest
# maximum length for filtering.
labels = ['Bacteria', 'Paenibacillus', 'vaginalis', 's__casei']
min_lens = [270, 295, 295, 250]
max_lens = [500, 500, 500, 290]
global_min = 270
global_max = 500
filtered, failed = rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=labels,
min_lens=min_lens, max_lens=max_lens, global_min=global_min,
global_max=global_max)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {'B1', 'B1b', 'C1', 'C2'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B2', 'B3', 'B1a',
'C1a', 'C1b', 'C1c', 'C1d'}
self.assertEqual(failed_ids, exp_failed_ids)
# this test also tests that nested filtering works, but covers two other
# cases:
# 1. keep any seqs that match NONE of the search terms. This case is only
# looking for Lactobacillaceae, so all of the Paenibacillaceae should be
# ignored and retained without checking length (no global filters)
# 2. ensure that the most stringent filter is applied when more specific
# labels are searched: stringent family overrides lenient species. In
# this case, s__acidilacti is an extraneous label (less stringent filter
# than the genus-level filter), but s__damnosus can't get away!
def test_filter_seqs_by_taxon_nested_keep_taxa_without_label_hit(self):
labels = ['f__Lactobacillaceae', 's__acidilacti', 's__pseudocasei']
# all seqs are len=291 nt, except for s__acidilacti and an unknown
# f__Lactobacillaceae that will be filtered here; the more lenient
# s__acidilacti filter is ignored. The more stringent s__pseudocasei
# max_len filter, however, gets applied (as already tested above).
min_lens = [270, 260, 1]
max_lens = [500, 500, 280]
filtered, failed = rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=labels,
min_lens=min_lens, max_lens=max_lens, global_min=None,
global_max=None)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B1a', 'B2',
'B3', 'C1', 'C2'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'B1b', 'C1a', 'C1b', 'C1c', 'C1d'}
self.assertEqual(failed_ids, exp_failed_ids)
# this test makes sure that empty outputs pass
def test_filter_seqs_length_by_taxon_no_seqs_pass_filter(self):
# all seqs are < 300 min_len
filtered, failed = rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=['Bacteria'],
min_lens=[300])
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = set()
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3',
'B1a', 'B1b', 'C1', 'C1a', 'C1b', 'C1c', 'C1d', 'C2'}
self.assertEqual(failed_ids, exp_failed_ids)
# this test makes sure that empty outputs pass
def test_filter_seqs_length_by_taxon_no_failures(self):
# all seqs are > 100 min_len
filtered, failed = rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=['Bacteria'],
min_lens=[100])
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {
'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B1a', 'B1b', 'C1',
'C1a', 'C1b', 'C1c', 'C1d', 'C2'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = set()
self.assertEqual(failed_ids, exp_failed_ids)
def test_filter_seqs_length_by_taxon_no_filters_error(self):
with self.assertRaisesRegex(
ValueError, "No filters were applied.*min_lens, max_lens."):
rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=['Bacteria'])
def test_filter_seqs_length_by_taxon_index_mismatch_error(self):
missing_taxa = import_data(
'FeatureData[Taxonomy]',
self.taxa.view(pd.Series).drop(['C1', 'C2']))
with self.assertRaisesRegex(ValueError, "IDs are missing.*C1, C2"):
rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=missing_taxa,
labels=['Bacteria'], min_lens=[1200])
def test_filter_seqs_length_by_taxon_min_lens_mismatch(self):
with self.assertRaisesRegex(
ValueError, "labels and min_lens must contain"):
rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa, labels=['Bacteria'],
min_lens=[1200, 100])
def test_filter_seqs_length_by_taxon_max_lens_mismatch(self):
with self.assertRaisesRegex(
ValueError, "labels and max_lens must contain"):
rescript.actions.filter_seqs_length_by_taxon(
sequences=self.seqs, taxonomy=self.taxa,
labels=['Bacteria', 'Archaea'], min_lens=None, max_lens=[300])
class TestSeqLengthWithinRange(TestPluginBase):
package = 'rescript.tests'
def setUp(self):
super().setUp()
# fake seq; only length matters here
self.seq = 'A' * 265
# List of taxa filter search terms that match sequence's taxonomy.
# This will always max keys of min/max_lens, as tested above, so we
# just make that assumption in these tests.
self.taxahits = ['Bacteria', 'Paenibacillus']
def test_seq_length_within_range_min_len_false(self):
# taxid mins filtering criteria fail
self.assertFalse(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits,
mins={'Bacteria': 270, 'Paenibacillus': 260}, maxs=None,
global_min=None, global_max=None))
def test_seq_length_within_range_min_len_true(self):
# taxid mins filtering criteria pass
self.assertTrue(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits,
mins={'Bacteria': 260, 'Paenibacillus': 260}, maxs=None,
global_min=None, global_max=None))
def test_seq_length_within_range_max_len_false(self):
# taxid maxs filtering criteria fail
self.assertFalse(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits,
mins=None, maxs={'Bacteria': 270, 'Paenibacillus': 260},
global_min=None, global_max=None))
def test_seq_length_within_range_max_len_true(self):
# taxid maxs filtering criteria pass
self.assertTrue(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits,
mins=None, maxs={'Bacteria': 270, 'Paenibacillus': 270},
global_min=None, global_max=None))
def test_seq_length_within_range_hypothetical_true_no_filters(self):
# never get here, no limits
self.assertTrue(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits, mins=None,
maxs=None, global_min=None, global_max=None))
def test_seq_length_within_range_global_max_true(self):
# global_max pass
self.assertTrue(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits, mins=None,
maxs=None, global_min=None, global_max=270))
def test_seq_length_within_range_global_max_false(self):
# global_max fail
self.assertFalse(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits, mins=None,
maxs=None, global_min=None, global_max=260))
def test_seq_length_within_range_global_min_true(self):
# global_max pass
self.assertTrue(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits, mins=None,
maxs=None, global_min=260, global_max=None))
def test_seq_length_within_range_global_min_false(self):
# global_max fail
self.assertFalse(
_seq_length_within_range(
sequence=self.seq, taxahits=self.taxahits, mins=None,
maxs=None, global_min=270, global_max=None))
# This method is just a vsearch wrapper with basic validation, so save on tests
class TestFilterGlobally(TestPluginBase):
package = 'rescript.tests'
def setUp(self):
super().setUp()
# Lengths: 12 X 291 nt; 4 X 264 nt
self.seqs = import_data(
'FeatureData[Sequence]', self.get_data_path('derep-test.fasta'))
def test_filter_seqs_length_by_taxon_no_filters_error(self):
with self.assertRaisesRegex(
ValueError, "No filters were applied.*global_min, global_max"):
rescript.actions.filter_seqs_length(self.seqs)
def test_filter_seqs_length_by_min_length(self):
# filter out seqs < 270 nt (N = 4)
filtered, failed = rescript.actions.filter_seqs_length(
self.seqs, global_min=270, global_max=None)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3',
'B1a', 'B1b', 'C1', 'C2'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'C1a', 'C1b', 'C1c', 'C1d'}
self.assertEqual(failed_ids, exp_failed_ids)
def test_filter_seqs_length_by_max_length(self):
# filter out seqs > 280 nt (N = 12)
filtered, failed = rescript.actions.filter_seqs_length(
self.seqs, global_min=None, global_max=280)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {'C1a', 'C1b', 'C1c', 'C1d'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3',
'B1a', 'B1b', 'C1', 'C2'}
self.assertEqual(failed_ids, exp_failed_ids)
# this test makes sure that empty outputs pass
def test_filter_seqs_length_all_filtered_out(self):
# all seqs are < 270 or > 280
filtered, failed = rescript.actions.filter_seqs_length(
self.seqs, global_min=270, global_max=280)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = set()
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = {'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3',
'B1a', 'B1b', 'C1', 'C1a', 'C1b', 'C1c', 'C1d', 'C2'}
self.assertEqual(failed_ids, exp_failed_ids)
# this test makes sure that empty outputs pass
def test_filter_seqs_length_no_failures(self):
# all seqs are > 100 min_len
filtered, failed = rescript.actions.filter_seqs_length(
self.seqs, global_min=100, global_max=300)
filtered_ids = {
seq.metadata['id'] for seq in filtered.view(DNAIterator)}
exp_filtered_ids = {
'A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B1a', 'B1b', 'C1',
'C1a', 'C1b', 'C1c', 'C1d', 'C2'}
self.assertEqual(filtered_ids, exp_filtered_ids)
failed_ids = {seq.metadata['id'] for seq in failed.view(DNAIterator)}
exp_failed_ids = set()
self.assertEqual(failed_ids, exp_failed_ids)
class TestFilterTaxa(TestPluginBase):
package = 'rescript.tests'
def setUp(self):
super().setUp()
self.taxa = import_data(
'FeatureData[Taxonomy]', self.get_data_path('derep-taxa.tsv'))
def test_filter_taxa_invalid(self):
with self.assertRaisesRegex(ValueError, "No filtering criteria"):
filtered, = rescript.actions.filter_taxa(self.taxa)
def test_filter_taxa_by_ids(self):
ids = pd.Index(['A1', 'B1'], name='Feature ID')
ids_to_keep = qiime2.Metadata(pd.DataFrame(index=ids))
exp_taxa = pd.Series(
['k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__chondroitinus',
'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales; '
'f__Lactobacillaceae; g__Lactobacillus; s__brevis'],
index=ids, name='Taxon')
filtered, = rescript.actions.filter_taxa(
self.taxa, ids_to_keep=ids_to_keep)
pdt.assert_series_equal(filtered.view(pd.Series), exp_taxa)
def test_filter_taxa_by_ids_invalid_ids(self):
ids = pd.DataFrame(
index=pd.Index(['A1', 'B1', 'D5'], name='Feature ID'))
ids_to_keep = qiime2.Metadata(ids)
with self.assertRaisesRegex(ValueError, "IDs are missing.*D5"):
filtered, = rescript.actions.filter_taxa(
self.taxa, ids_to_keep=ids_to_keep)
def test_filter_taxa_by_include(self):
ids = pd.Index(['C1', 'C2'], name='Feature ID')
exp_taxa = pd.Series(
['k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales; '
'f__Lactobacillaceae; g__Pediococcus; s__damnosus'] * 2,
index=ids, name='Taxon')
filtered, = rescript.actions.filter_taxa(
self.taxa, include=['damnosus'])
pdt.assert_series_equal(filtered.view(pd.Series), exp_taxa)
# note I slip in a little trick here to test order of operations
# the include statement is run but effectively useless, as exclusion is
# subsequently done at the order level
def test_filter_taxa_by_exclude(self):
ids = pd.Index(['A1', 'A2'], name='Feature ID')
exp_taxa = pd.Series(
['k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__chondroitinus'] * 2,
index=ids, name='Taxon')
filtered, = rescript.actions.filter_taxa(
self.taxa, include=['brevis', 'Paenibacillus'],
exclude=['Lactobacillales', 'alvei'])
pdt.assert_series_equal(filtered.view(pd.Series), exp_taxa)
# but now look here: we exclude taxa, like above, but explicitly add them
# back with ids_to_keep
def test_filter_taxa_by_complex_query(self):
ids = pd.Index(['A1'], name='Feature ID')
ids_to_keep = qiime2.Metadata(pd.DataFrame(index=ids))
exp_taxa = pd.Series(
['k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__chondroitinus',
'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales; '
'f__Lactobacillaceae; g__Lactobacillus; s__brevis'],
index=ids.union({'B1'}), name='Taxon')
exp_taxa.index.name = 'Feature ID'
filtered, = rescript.actions.filter_taxa(
self.taxa, ids_to_keep=ids_to_keep, include=['brevis'],
exclude=['o__Bacillales'])
pdt.assert_series_equal(filtered.view(pd.Series), exp_taxa)
def test_filter_taxa_fail_all_filtered_out(self):
with self.assertRaisesRegex(ValueError, "All features were filtered"):
filtered, = rescript.actions.filter_taxa(
self.taxa, exclude=['Bacteria'])
| UTF-8 | Python | false | false | 18,059 | py | 69 | test_filter_length.py | 43 | 0.605958 | 0.588017 | 0 | 381 | 46.39895 | 79 |
nicola-debernardini/LabOfBioinformatics1 | 2,379,411,917,304 | cd53ce8222c4f1272e6ab3590e57bd560c372f18 | fcc8e1b34622f0ad5ee27743073dcf7b5d74662d | /Kunitz_project/Scripts/Set_model_parameters.py | 361b4a49647004371271098945943f6bf26a62c9 | []
| no_license | https://github.com/nicola-debernardini/LabOfBioinformatics1 | 2c69cebb03d1ba5d85db64478c807be25a3827f3 | 4a866f59a5ce692cc82824ef62591aa35a01139a | refs/heads/master | 2020-05-19T07:47:53.480977 | 2019-09-22T08:20:35 | 2019-09-22T08:20:35 | 184,905,491 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/urs/bin/python
############ SCRIPT ##############
# This script can determine which is the optimal threshold for the model in a specific dataset
import sys
import numpy as np
# Function to calculate the confusion matrix
def conf_mat(filename, th, sp = -2, cp =-1): # Used to defive default variables in a function: score position, class position
cm = [[0.0,0.0],[0.0,0.0]] # Confusion matrix
f = open(filename)
for line in f:
v = line.rstrip().split()
if int(v[cp]) == 1:
j = 1 # It belong to the positive set -> kunitz
else:
j = 0 # It belong to the negative set -> non-kunitz
if float(v[sp]) < th:
i = 1 # Predicted as kunitz
else:
i = 0 # Predicted as non-kunitz
# REAL N-K K
# PRED N-K 0,0 TN 0,1 FN
# K 1,0 FP 1,1 TP
cm[i][j] = cm[i][j] +1
return cm
# Function to calculate the perfomance given a confusion matrix
def print_performance(cm, th):
acc = (cm[0][0]+cm[1][1])/(cm[0][0]+cm[1][1]+cm[1][0]+cm[0][1])
d = np.sqrt((cm[0][0]+cm[0][1])*(cm[0][0]+cm[1][0])*(cm[1][1]+cm[0][1])*(cm[1][1]+cm[1][0])) #denominator
if d == 0:
d = 1
mc = (cm[0][0]*cm[1][1]-cm[0][1]*cm[1][0])/d # Mathew correlation
n = float(sum(cm[0])+sum(cm[1]))
FPR = float(cm[1][0])/(float(cm[1][0])+float(cm[0][0])) # FPR = FP /(FP+TN)
TPR = float(cm[1][1])/(float(cm[1][1])+float(cm[0][1])) # TPR = TP /(TP+FN)
print cm, 'TH = ', th, 'Q2 = ', acc, 'MCC = ', mc, 'FPR = ', FPR, 'TPR =', TPR
return mc
# MAIN #
if __name__=='__main__':
filename = sys.argv[1]
sp = -2
if len(sys.argv) > 2: sp = int(sys.argv[2])-1 # From command line is possible to change the default column of the E-value
optimal = float('-inf')
optimal_th = 0
som = 0
recursion = 30
th = 1.0
# Cycle through different threshold and evaluate the performance
for i in range(recursion):
th = th/2.0
cm = conf_mat(filename, th, sp)
som = print_performance(cm, th)
# Save the threashold generating the higher MCC
if som > optimal:
optimal = som
optimal_th = th
print '> The optimal treashold is: ', optimal_th
| UTF-8 | Python | false | false | 2,351 | py | 10 | Set_model_parameters.py | 6 | 0.520204 | 0.481923 | 0 | 73 | 31.150685 | 126 |
glory9/miscellaneous | 11,381,663,347,867 | 8e9c19f99fef8459c06d8f494f335eafd5624d1d | ab164bb380108badefc3a9f634cc71302f493bcf | /game-of-life.py | 838d5ae84104d1f89c0fd61c168296606d742139 | []
| no_license | https://github.com/glory9/miscellaneous | 90b580e2a275aed0974de67d5e1e0eecaea7ee18 | 2b4c0086f8b31f4c77616954353badbc17cb3f2d | refs/heads/master | 2022-07-27T11:40:22.943689 | 2020-05-13T21:01:21 | 2020-05-13T21:01:21 | 197,703,528 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def gridGame(grid, k, rules):
# Write your code here
a = 0
b = 0
magic = list()
for _ in rules:
if _ == 'alive':
magic.append(_)
g = 0
while g < grid_rows:
h = 0
count = 0
while h < grid_columns:
r = g - 1
while r < g + 2:
v = h - 1
while v < h + 2:
gf = grid[r][v]
if gf is True:
if gf == 1:
count = count + 1
h = h + 1
r = r + 1
if count in magic:
grid[g][h] = 1
h = h + 1
g = g + 1
return grid
#if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
grid_rows = int(input().strip())
grid_columns = int(input().strip())
grid = []
for _ in range(grid_rows):
grid.append(list(map(int, input().rstrip().split())))
k = int(input().strip())
rules_count = int(input().strip())
rules = []
for _ in range(rules_count):
rules_item = input()
rules.append(rules_item)
result = gridGame(grid, k, rules)
print(result)
| UTF-8 | Python | false | false | 1,215 | py | 23 | game-of-life.py | 23 | 0.404115 | 0.390947 | 0 | 54 | 20.462963 | 57 |
pranav102001/IF-ELSE-Python | 5,437,428,621,938 | fa921bd7d0108018f76a7c13cfb306d075d85e81 | 9fa9f799e9437b3bd8d680640267f5fcf4b49f34 | /voting.py | ed43dc987d9d9721c022e4375a60ee1881c22278 | []
| no_license | https://github.com/pranav102001/IF-ELSE-Python | 100479d8f8e67ec220820524e91bdcb0ab31c815 | 291558e402befad739440302440b5ee928aeda20 | refs/heads/main | 2022-12-23T19:44:53.050693 | 2020-10-04T14:15:48 | 2020-10-04T14:15:48 | 301,145,478 | 0 | 0 | null | true | 2020-10-04T14:11:36 | 2020-10-04T14:11:36 | 2020-10-04T12:21:30 | 2020-10-04T12:21:28 | 1 | 0 | 0 | 0 | null | false | false | age = input("Just enter your age!")
if age >= 18:
Print("You are eligible for voting in India!")
else:
Print("You are not eligible for voting!)
| UTF-8 | Python | false | false | 152 | py | 1 | voting.py | 1 | 0.664474 | 0.651316 | 0 | 5 | 29.4 | 50 |
ankurbelbase/django-pushy | 14,723,147,912,608 | 624cc83fef8ca8a6d6ebf41fd8e227ad058ba96b | f5b0b4f242e72a0694d87f6a6da2f6b8158254d4 | /tests/data.py | 92ec1d690ff76e31e661bf05ad2ba7b36efa4a1d | [
"MIT"
]
| permissive | https://github.com/ankurbelbase/django-pushy | 519b1cc257ff9931019d626c4a4ee5402e8a3dcf | ba896c521a960e20175409be67d4527393be13cd | refs/heads/master | 2021-06-23T11:28:52.235206 | 2017-08-06T19:20:19 | 2017-08-06T19:20:19 | 103,645,114 | 0 | 1 | null | true | 2017-09-15T10:28:08 | 2017-09-15T10:28:08 | 2017-08-29T10:59:37 | 2017-08-24T15:57:32 | 94 | 0 | 0 | 0 | null | null | null | from pushjack import (
GCMCanonicalID
)
class ResponseMock:
def __init__(self, status_code):
self.status_code = status_code
self.errors = []
self.canonical_ids = []
def valid_response():
return ResponseMock(200)
def valid_with_canonical_id_response(canonical_id):
canonical_id_obj = GCMCanonicalID(canonical_id, canonical_id)
response = ResponseMock(200)
response.canonical_ids = [canonical_id_obj]
return response
def invalid_with_exception(exc):
response = ResponseMock(400)
response.errors.append(exc)
return response
| UTF-8 | Python | false | false | 593 | py | 27 | data.py | 21 | 0.684654 | 0.669477 | 0 | 27 | 20.962963 | 65 |
IvanStelmakh/toloka-kit | 10,127,532,911,195 | 920479d1d5cddfcf1853f916075d30cb76b368f0 | c4b758f7011480bea589e9088c9bc77d8180cb93 | /src/client/user_skill.py | 3d181f8913bca29d8d6755f5e8a5cd5a74de49b0 | [
"Apache-2.0"
]
| permissive | https://github.com/IvanStelmakh/toloka-kit | 680ee376c2b168d5c9ac28d2f7c927c54ab3f915 | 1d244a69cb053b4a2e1c1856a54cd224a66946a2 | refs/heads/main | 2023-03-11T13:00:14.728576 | 2021-03-01T14:13:45 | 2021-03-01T14:13:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from attr.validators import optional, instance_of
import datetime
from decimal import Decimal
from .primitives.base import attribute, BaseTolokaObject
class SetUserSkillRequest(BaseTolokaObject):
skill_id: str
user_id: str
value: Decimal = attribute(validator=optional(instance_of(Decimal)))
class UserSkill(BaseTolokaObject):
id: str
skill_id: str
user_id: str
value: int
exact_value: Decimal = attribute(validator=optional(instance_of(Decimal)))
created: datetime.datetime
modified: datetime.datetime
| UTF-8 | Python | false | false | 548 | py | 94 | user_skill.py | 91 | 0.751825 | 0.751825 | 0 | 21 | 25.095238 | 78 |
mon4ter/fashionable | 10,333,691,352,799 | fc488129a5cf612a0861f941e1c511081a585336 | 4b8f6588cddf990cf7b43e07b35739a452d35dfd | /src/fashionable/model.py | 886c7675282ab5d83ab1171f53c22d280f82c9bd | [
"MIT"
]
| permissive | https://github.com/mon4ter/fashionable | ddc56dfb0aa549b2e9d2dec0fdf054e8c0f1d524 | 336d918ce835b6fda5459fbba10faf24cac96202 | refs/heads/master | 2021-12-01T03:58:40.659093 | 2021-11-25T19:28:03 | 2021-11-25T19:28:03 | 148,993,734 | 0 | 0 | MIT | false | 2021-11-25T19:28:03 | 2018-09-16T12:32:54 | 2021-10-08T13:04:52 | 2021-11-25T19:28:03 | 107 | 0 | 0 | 0 | Python | false | false | from copy import deepcopy
from itertools import zip_longest
from typing import Any, Dict, Iterable, Mapping, Tuple, Union
from .errors import ValidateError
from .modelmeta import ModelMeta
from .unset import UNSET
from .validation import validate
__all__ = [
'Model',
]
class Model(metaclass=ModelMeta):
@classmethod
def _to_dict(cls, obj: Any) -> dict:
if hasattr(obj, 'to_dict'):
obj = obj.to_dict()
elif hasattr(obj, 'toDict'):
obj = obj.toDict()
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
obj = type(obj)(cls._to_dict(o) for o in (obj.items() if isinstance(obj, dict) else obj))
return obj
def __init__(self, *args, **kwargs):
attributes = getattr(self, '.attributes')
for attr, value in zip(attributes, args):
kwargs.setdefault(attr.name, value)
for attr in attributes:
name = attr.ciname or attr.name
setattr(self, attr.name, next((v for k, v in kwargs.items() if name == k), UNSET))
def __iter__(self):
for attr in getattr(self, '.attributes'):
value = getattr(self, attr.name)
if value is not UNSET:
yield attr.name, value
def __eq__(self, other: Union['Model', Mapping, Iterable, Tuple]):
if not isinstance(other, type(self)):
try:
other = validate(type(self), other, strict=False)
except ValidateError:
return NotImplemented
return all(s == o for s, o in zip_longest(iter(self), iter(other), fillvalue=object()))
def __str__(self):
return '{}({})'.format(type(self).__name__, self._id())
def __repr__(self):
return '{}({})'.format(type(self).__name__, ', '.join('{}={!r}'.format(k, v) for k, v in self))
def __copy__(self) -> 'Model':
return type(self)(**dict(self))
def __deepcopy__(self, *args, **kwargs) -> 'Model':
return type(self)(**{k: deepcopy(v) for k, v in self})
def _id(self):
return getattr(self, getattr(self, '.attributes')[0].name)
def to_dict(self) -> Dict[str, Any]:
return {n: self._to_dict(v) for n, v in self}
toDict = to_dict
| UTF-8 | Python | false | false | 2,262 | py | 24 | model.py | 22 | 0.572944 | 0.572502 | 0 | 71 | 30.859155 | 103 |
Datamine/Perceptron | 1,546,188,256,835 | fb11853fa932846daf467097cc4c82694b39055a | dc9aae92038fede42ef0d3d748e420979c823ef7 | /Image-Generator/gen-squares.py | 772fff7e2d8dc25c239178da12d8eae47fbe3acb | []
| no_license | https://github.com/Datamine/Perceptron | 69782b8414a727c3f53c00eb17a1f2d2e4f9d616 | 1c7766b6a68a9ff53e568a0c6d5f8fae8cd180de | refs/heads/master | 2016-05-19T22:23:27.990086 | 2016-04-05T10:51:18 | 2016-04-05T10:51:18 | 55,475,099 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # John Loeber | contact@johnloeber.com | Python 2.7.10 | April 2016
from plumbing import *
for i in range(ITERATIONS):
# randomly pick three corners (x,y) to define the triangle
# then flip one of the triangle's corners to create a rectangle
while True:
try:
corner_1 = (randint(0,IMG_W), randint(0,IMG_H))
corner_2 = (randint(0,IMG_W), randint(0,IMG_H))
corner_3 = (randint(0,IMG_W), randint(0,IMG_H))
# get the line y = mx+b given by corner_1 and corner_3
# using http://stackoverflow.com/questions/3306838/algorithm-for-reflecting-a-point-across-a-line
m = (corner_3[1] - corner_1[1])/float(corner_3[0] - corner_1[0])
b = corner_1[1] - m*corner_1[0]
d = (corner_2[0] + (corner_2[1]-b)*m)/float(1+m**2)
corner_4 = (int(2*d - corner_2[0]), int(2 * d * m - corner_2[1] + 2 * b))
# gotta make sure corner 4 is not out of bounds
if not (0 <= corner_4[0] < IMG_W and 0 <= corner_4[1] < IMG_H):
continue
# check that the area is large enough for analysis (arbitrarily, > 40 sq px)
# n.b. taking the area now will be useful later for testing point-in-triangle
# n.b. we are computing the *signed* area. Heron's formula not appropriate here?
area = (1.0/2) * (-corner_2[1]*corner_3[0] + corner_1[1] * (-corner_2[0] + corner_3[0]) + corner_1[0]* (corner_2[1] - corner_3[1]) + corner_2[0]*corner_3[1])
# check that all three points are unique
if area > 40 and len(set([corner_1, corner_2, corner_3]))==3:
break
except:
continue
print corner_4, corner_2
# to reduce the search space for testing whether or not a point is in the triangle
min_x = min(corner_1[0], corner_2[0], corner_3[0], corner_4[0])
min_y = min(corner_1[1], corner_2[1], corner_3[1], corner_4[1])
max_x = max(corner_1[0], corner_2[0], corner_3[0], corner_4[0])
max_y = max(corner_1[1], corner_2[1], corner_3[1], corner_4[1])
# create a new image
im = Image.new("RGB", (IMG_W, IMG_H), BGCOLOR)
"""
# determine all the coordinates that are inside the triangle
# using barymetric method for testing point-in-triangle
# made reference to http://jsfiddle.net/PerroAZUL/zdaY8/1
for x in range(min_x, max_x):
for y in range(min_y, max_y):
s = (corner_1[1] * corner_3[0] - corner_1[0] * corner_3[1] + (corner_3[1] - corner_1[1]) * x + (corner_1[0] - corner_3[0]) * y)
if s >= 0:
t = (corner_1[0] * corner_2[1] - corner_1[1] * corner_2[0] + (corner_1[1] - corner_2[1]) * x + (corner_2[0] - corner_1[0]) * y)
if t >= 0 and (s+t) <= 2 * area:
im.putpixel((x,y), SHAPECOLOR)
# repeating the same process w corner_4 triangle
for x in range(min_x, max_x):
for y in range(min_y, max_y):
s = (corner_1[1] * corner_3[0] - corner_1[0] * corner_3[1] + (corner_3[1] - corner_1[1]) * x + (corner_1[0] - corner_3[0]) * y)
if s >= 0:
t = (corner_1[0] * corner_4[1] - corner_1[1] * corner_4[0] + (corner_1[1] - corner_4[1]) * x + (corner_4[0] - corner_1[0]) * y)
if t >= 0 and (s+t) <= 2 * area:
im.putpixel((x,y), SHAPECOLOR)
"""
for (x,y) in [corner_1, corner_2, corner_3, corner_4]:
try:
if (x,y) == corner_2:
im.putpixel((x,y), (255,0,0))
elif (x,y) == corner_4:
im.putpixel((x,y), (0,255,0))
else:
im.putpixel((x,y), SHAPECOLOR)
except:
continue
# use the timestamp to create a unique filename
im.save("square" + str(int(time())) + str(i) + ".png", "PNG") | UTF-8 | Python | false | false | 3,450 | py | 5 | gen-squares.py | 4 | 0.594203 | 0.532464 | 0 | 75 | 45.013333 | 160 |
FuckBrains/python-note-2019 | 16,466,904,643,801 | f5e22fd88f61105abbcacf7dc0edc86656f936d5 | c235c8a4a655ec19bd68f521b0dcbed36f44ded8 | /code/Python-spider/base/scrapy/proxy_ip/proxy_ip/items.py | ade0e318f32174ee256ba0c296b21052cba9e10e | []
| no_license | https://github.com/FuckBrains/python-note-2019 | d6d83b7ac23a178d0805b0ae94b61482db05390b | 4d4c2c063fee3fc14a4e043db3c989ad36cfe8fc | refs/heads/master | 2023-04-20T16:09:24.240590 | 2020-03-02T10:55:50 | 2020-03-02T10:55:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ProxyIpItem(scrapy.Item):
country = scrapy.Field()
ip = scrapy.Field()
port = scrapy.Field( )
server_location = scrapy.Field()
is_anonymous = scrapy.Field()
protocol_type = scrapy.Field()
speed = scrapy.Field()
connect_time = scrapy.Field()
survival_time = scrapy.Field()
validate_time = scrapy.Field()
source = scrapy.Field()
create_time = scrapy.Field()
def get_insert_sql(self):
insert_sql = """
insert into proxy_ip(
country, ip, port, server_location,
is_anonymous, protocol_type, speed, connect_time,
survival_time, validate_time, source, create_time
)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
"""
params = (
self["country"], self["ip"], self["port"], self["server_location"],
self["is_anonymous"], self["protocol_type"], self["speed"], self["speed"],
self["survival_time"], self["validate_time"], self["source"], self["create_time"]
)
return insert_sql, params | UTF-8 | Python | false | false | 1,293 | py | 48 | items.py | 29 | 0.561485 | 0.560712 | 0 | 38 | 33.052632 | 101 |
RiderMike27/grafiexpress | 13,288,628,839,180 | 0298984068562f3a294542c7d1ca169e311fce6e | 32a790f0b14bc6c95d55af8988f7db7934d890f1 | /ventas/autocomplete.py | 3576dda4ed833dc6059818439fda5c276717765b | []
| no_license | https://github.com/RiderMike27/grafiexpress | f8212db0b306b8ad78320056acd55768172cf5ff | 15e95116599a66f34e65bba95a20417a749b77b8 | refs/heads/master | 2021-07-09T21:48:02.046315 | 2020-01-23T11:58:07 | 2020-01-23T11:58:07 | 235,783,775 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dal import autocomplete
from django.db.models import Q
from ventas.models import *
class RemisionAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
cliente = self.forwarded.get('cliente', None)
if not self.request.user.is_authenticated() or not cliente:
return Remision.objects.none()
qs = Remision.objects.filter(cliente_id=cliente).exclude(pk__in=[i.remision_id for i in VentaRemision.objects.all()])
if self.q:
qs = qs.filter(numero_de_remision__istartswith=self.q)
return qs.order_by("-id")
class VentaAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated():
return Venta.objects.none()
qs = Venta.objects.all()
if self.q:
qs = qs.filter(numero_de_factura__istartswith=self.q)
return qs.order_by("-id")
class FacturaCobroAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
cliente = self.forwarded.get('cliente', None)
if not self.request.user.is_authenticated() or not cliente:
return Venta.objects.none()
qs = Venta.objects.filter(cliente_id=cliente).exclude(saldo=0).exclude(estado=ANULADO)
if self.q:
qs = qs.filter(numero_de_factura__icontains=self.q)
return qs.order_by("-id") | UTF-8 | Python | false | false | 1,406 | py | 262 | autocomplete.py | 211 | 0.662162 | 0.659317 | 0 | 47 | 28.93617 | 125 |
juliebarron/SoftwareDesign | 18,829,136,641,115 | 7fc1ba53dc47fee8998cf7ae9e3e00109ca64382 | a69891f9f457d96beebdc85ce0319eec4bb70a87 | /chap08/rotate.py | a661b1fcc7d28bab5b66ab7c39c980a7309c23b7 | []
| no_license | https://github.com/juliebarron/SoftwareDesign | 2bd40f3d9e70c6bcc967088a92228a845604ab9b | 282fb0e0f274706b470c3b1c2bd37effb1992e0e | refs/heads/master | 2020-12-24T21:54:39.182849 | 2013-12-06T04:19:43 | 2013-12-06T04:19:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def rotate_word(s, n):
word = ''
for letter in s:
u = ord(letter) + n
s = chr(u)
word += s
return word
print rotate_word('cheer', 7) | UTF-8 | Python | false | false | 181 | py | 12 | rotate.py | 11 | 0.458564 | 0.453039 | 0 | 12 | 14.166667 | 29 |
weilancys/qr_gen_gui | 8,151,847,954,988 | f11c30728a388a89f1783108af273551c4446b4f | 3a200602a6394ee7760fa0b6ee5e6b97704b1763 | /setup.py | 9956d4e2082d6f5ad1ce240e038f0d284bff8de9 | [
"MIT"
]
| permissive | https://github.com/weilancys/qr_gen_gui | 5b167a1280da682ceec87ef9baa997b9f77981c5 | 93f747c31b6e74a4d1f532d1dde4a34b476c398e | refs/heads/master | 2023-08-05T15:19:42.228209 | 2021-09-21T05:29:03 | 2021-09-21T05:29:03 | 408,693,955 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name = 'qr_gen',
version = "0.0.1",
author="lbcoder",
author_email="lbcoder@hotmail.com",
description="a simple tool with GUI for generating QR code, for educational purposes.",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/weilancys/qr_gen_gui",
py_modules=['qr_gen', ],
install_requires = [
"qrcode[pil]",
],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'gui_scripts': [
'qr_gen = qr_gen:launcher',
],
}
) | UTF-8 | Python | false | false | 820 | py | 4 | setup.py | 2 | 0.589024 | 0.582927 | 0 | 29 | 27.310345 | 91 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.