hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1cc6430aae49409753e53072d72074d4e901e4
| 61 |
py
|
Python
|
data_processing/__init__.py
|
JHorcasitas/cnn_document_binarization
|
075f76aed375ca14a53011f4dfeb12379debb5b3
|
[
"MIT"
] | 9 |
2019-07-21T18:00:27.000Z
|
2020-08-21T08:26:30.000Z
|
data_processing/__init__.py
|
JHorcasitas/cnn_document_binarization
|
075f76aed375ca14a53011f4dfeb12379debb5b3
|
[
"MIT"
] | 2 |
2019-10-30T09:05:56.000Z
|
2020-09-18T10:41:34.000Z
|
data_processing/__init__.py
|
JHorcasitas/cnn_document_binarization
|
075f76aed375ca14a53011f4dfeb12379debb5b3
|
[
"MIT"
] | 3 |
2019-07-22T15:04:11.000Z
|
2021-06-21T09:38:56.000Z
|
from .trainer import Trainer
from .evaluator import Evaluator
| 30.5 | 32 | 0.852459 |
4a1cc8292b3d717a28a5660d68c8047587cfa037
| 940 |
py
|
Python
|
rolz_bot/extensions/choose.py
|
Reriiru/rolz_org_to_discord
|
caf860585c551d85729b2d9661e8d801750a7aa5
|
[
"MIT"
] | 1 |
2021-04-15T04:14:55.000Z
|
2021-04-15T04:14:55.000Z
|
rolz_bot/extensions/choose.py
|
Reriiru/rolz_org_to_discord
|
caf860585c551d85729b2d9661e8d801750a7aa5
|
[
"MIT"
] | null | null | null |
rolz_bot/extensions/choose.py
|
Reriiru/rolz_org_to_discord
|
caf860585c551d85729b2d9661e8d801750a7aa5
|
[
"MIT"
] | 2 |
2017-11-05T02:34:35.000Z
|
2017-11-20T06:00:06.000Z
|
import discord
import rolz_bot.format_responses as format_responses
from discord.ext import commands
from rolz_bot.roller import Roller
class Choose(Roller):
@commands.command(name='choose')
async def choose(self, *variants: str):
'''Picks between your variants.'''
return_variants = ' '.join(variants)
return_variants = return_variants.split(',')
dice_query = '1d' + str(len(return_variants))
right_choice = await self._roll_dice(dice_query)
response_string = format_responses.choose_string.format(
return_variants[right_choice['result']-1]
)
try:
await self.bot.say(response_string)
except discord.errors.HTTPException as error:
response_string = format_responses.message_too_long_string
def setup(bot):
bot.add_cog(Choose(bot))
| 31.333333 | 78 | 0.62766 |
4a1cc85ba1df64c28dcffbc0e7cd28446034d0ee
| 7,160 |
py
|
Python
|
tests/test_authentication.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | null | null | null |
tests/test_authentication.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | null | null | null |
tests/test_authentication.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | 2 |
2018-10-01T17:45:19.000Z
|
2020-12-07T13:48:25.000Z
|
import json
from app import create_app, db
from app.models import User, UserType
from .base import BaseTest
class AuthenticationTestCase(BaseTest):
"""This will test authentication endpoints"""
def setUp(self):
self.app = create_app(config_name='testing')
self.client = self.app.test_client()
self.headers = {'Content-Type' : 'application/json'}
with self.app.app_context():
db.create_all()
def data(self):
return json.dumps({
'username': 'John',
'email': 'john@doe.com',
'password': 'secret',
'password_confirmation': 'secret'
})
def test_can_register(self):
"""Test user can register"""
res = self.client.post(
'api/v1/auth/signup',
data=self.data(),
headers=self.headers
)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully registered account', res.data)
def test_cannot_register_without_email(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_without(['email']),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'email field is required', res.data)
def test_cannot_register_with_invalid_email(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_with({'email': 'hi'}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'email must be a valid email address', res.data)
def test_cannot_register_without_username(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_without(['username']),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'username field is required', res.data)
def test_cannot_register_without_password(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_without(['password']),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'password field is required', res.data)
def test_cannot_register_without_password_confirmation(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_without(['password_confirmation']),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'confirmation does not match', res.data)
def test_cannot_register_without_password_confirmation_matching(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_with({'password_confirmation': 'hi'}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'confirmation does not match', res.data)
def test_cannot_register_without_long_password(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_with({
'password': 'hi',
'password_confirmation': 'hi'
}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'password must be at least', res.data)
def test_cannot_register_without_string_username(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data_with({'username': 12}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'username may contain only letters', res.data)
def test_can_login(self):
"""Test user can login"""
user, headers = self.authUser()
res = self.client.post(
'api/v1/auth/login',
data=json.dumps({
'email': user['email'],
'password': 'secret'
}),
headers=self.headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully logged in', res.data)
def test_cannot_login_without_email(self):
"""Test user can login"""
user, headers = self.authUser()
res = self.client.post(
'api/v1/auth/login',
data=json.dumps({
'password': 'secret'
}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'email field is required', res.data)
def test_cannot_login_without_password(self):
"""Test user can login"""
user, headers = self.authUser()
res = self.client.post(
'api/v1/auth/login',
data=json.dumps({
'email': user['email'],
}),
headers=self.headers
)
self.assertEqual(res.status_code, 400)
self.assertIn(b'password field is required', res.data)
def test_can_get_user(self):
"""Test user can login"""
user, headers = self.authUser()
res = self.client.get(
'api/v1/auth',
headers=headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully retrieved user', res.data)
def test_can_logout(self):
"""Test user can logout"""
user, headers = self.authUser()
res = self.client.delete(
'api/v1/auth/logout',
headers=headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully logged out', res.data)
def test_can_request_password_reset(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data(),
headers=self.headers
)
self.assertEqual(res.status_code, 201)
res = self.client.post(
'api/v1/auth/password-reset',
data=self.data(),
headers=self.headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Password reset created', res.data)
def test_can_make_password_reset(self):
res = self.client.post(
'api/v1/auth/signup',
data=self.data(),
headers=self.headers
)
self.assertEqual(res.status_code, 201)
res = self.client.post(
'api/v1/auth/password-reset',
data=self.data(),
headers=self.headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Password reset created', res.data)
json_res = self.to_dict(res)
res = self.client.put(
'api/v1/auth/password-reset',
data=json.dumps({
'token': json_res['token'],
'password': 'secret2',
'password_confirmation': 'secret2'
}),
headers=self.headers
)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Password successfully reset', res.data)
def tearDown(self):
with self.app.app_context():
db.drop_all()
| 33.302326 | 74 | 0.574441 |
4a1cc8f990467348a10504b8eed55901dcef38b9
| 631 |
py
|
Python
|
home_server/manage.py
|
dkoleber/WebcamOracle
|
972f86b7ea341adcee6b068c537296e0d51d6b51
|
[
"Unlicense"
] | 2 |
2019-11-02T06:22:44.000Z
|
2021-04-22T09:47:55.000Z
|
home_server/manage.py
|
dkoleber/WebcamOracle
|
972f86b7ea341adcee6b068c537296e0d51d6b51
|
[
"Unlicense"
] | null | null | null |
home_server/manage.py
|
dkoleber/WebcamOracle
|
972f86b7ea341adcee6b068c537296e0d51d6b51
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'home_server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818 | 75 | 0.684628 |
4a1cc90f6e7490311a0d14e802bb4afed741c594
| 8,303 |
py
|
Python
|
tests/test_utils/test_registry.py
|
DmitriySidnev/mmcv
|
54ece10ffb144f572c1527b42072f6f925ff4bdf
|
[
"Apache-2.0"
] | 1 |
2019-05-31T14:01:25.000Z
|
2019-05-31T14:01:25.000Z
|
tests/test_utils/test_registry.py
|
DmitriySidnev/mmcv
|
54ece10ffb144f572c1527b42072f6f925ff4bdf
|
[
"Apache-2.0"
] | 10 |
2020-10-15T19:31:38.000Z
|
2021-03-21T16:16:28.000Z
|
tests/test_utils/test_registry.py
|
DmitriySidnev/mmcv
|
54ece10ffb144f572c1527b42072f6f925ff4bdf
|
[
"Apache-2.0"
] | 1 |
2019-12-14T12:12:48.000Z
|
2019-12-14T12:12:48.000Z
|
import pytest
import mmcv
def test_registry():
CATS = mmcv.Registry('cat')
assert CATS.name == 'cat'
assert CATS.module_dict == {}
assert len(CATS) == 0
@CATS.register_module()
class BritishShorthair:
pass
assert len(CATS) == 1
assert CATS.get('BritishShorthair') is BritishShorthair
class Munchkin:
pass
CATS.register_module(Munchkin)
assert len(CATS) == 2
assert CATS.get('Munchkin') is Munchkin
assert 'Munchkin' in CATS
with pytest.raises(KeyError):
CATS.register_module(Munchkin)
CATS.register_module(Munchkin, force=True)
assert len(CATS) == 2
# force=False
with pytest.raises(KeyError):
@CATS.register_module()
class BritishShorthair:
pass
@CATS.register_module(force=True)
class BritishShorthair:
pass
assert len(CATS) == 2
assert CATS.get('PersianCat') is None
assert 'PersianCat' not in CATS
@CATS.register_module(name='Siamese')
class SiameseCat:
pass
assert CATS.get('Siamese').__name__ == 'SiameseCat'
class SphynxCat:
pass
CATS.register_module(name='Sphynx', module=SphynxCat)
assert CATS.get('Sphynx') is SphynxCat
CATS.register_module(name=['Sphynx1', 'Sphynx2'], module=SphynxCat)
assert CATS.get('Sphynx2') is SphynxCat
repr_str = 'Registry(name=cat, items={'
repr_str += ("'BritishShorthair': <class 'test_registry.test_registry."
"<locals>.BritishShorthair'>, ")
repr_str += ("'Munchkin': <class 'test_registry.test_registry."
"<locals>.Munchkin'>, ")
repr_str += ("'Siamese': <class 'test_registry.test_registry."
"<locals>.SiameseCat'>, ")
repr_str += ("'Sphynx': <class 'test_registry.test_registry."
"<locals>.SphynxCat'>, ")
repr_str += ("'Sphynx1': <class 'test_registry.test_registry."
"<locals>.SphynxCat'>, ")
repr_str += ("'Sphynx2': <class 'test_registry.test_registry."
"<locals>.SphynxCat'>")
repr_str += '})'
assert repr(CATS) == repr_str
# name type
with pytest.raises(AssertionError):
CATS.register_module(name=7474741, module=SphynxCat)
# the registered module should be a class
with pytest.raises(TypeError):
CATS.register_module(0)
# can only decorate a class
with pytest.raises(TypeError):
@CATS.register_module()
def some_method():
pass
# begin: test old APIs
with pytest.warns(UserWarning):
CATS.register_module(SphynxCat)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(UserWarning):
CATS.register_module(SphynxCat, force=True)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(UserWarning):
@CATS.register_module
class NewCat:
pass
assert CATS.get('NewCat').__name__ == 'NewCat'
with pytest.warns(UserWarning):
CATS.deprecated_register_module(SphynxCat, force=True)
assert CATS.get('SphynxCat').__name__ == 'SphynxCat'
with pytest.warns(UserWarning):
@CATS.deprecated_register_module
class CuteCat:
pass
assert CATS.get('CuteCat').__name__ == 'CuteCat'
with pytest.warns(UserWarning):
@CATS.deprecated_register_module(force=True)
class NewCat2:
pass
assert CATS.get('NewCat2').__name__ == 'NewCat2'
# end: test old APIs
def test_multi_scope_registry():
DOGS = mmcv.Registry('dogs')
assert DOGS.name == 'dogs'
assert DOGS.scope == 'test_registry'
assert DOGS.module_dict == {}
assert len(DOGS) == 0
@DOGS.register_module()
class GoldenRetriever:
pass
assert len(DOGS) == 1
assert DOGS.get('GoldenRetriever') is GoldenRetriever
HOUNDS = mmcv.Registry('dogs', parent=DOGS, scope='hound')
@HOUNDS.register_module()
class BloodHound:
pass
assert len(HOUNDS) == 1
assert HOUNDS.get('BloodHound') is BloodHound
assert DOGS.get('hound.BloodHound') is BloodHound
assert HOUNDS.get('hound.BloodHound') is BloodHound
LITTLE_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='little_hound')
@LITTLE_HOUNDS.register_module()
class Dachshund:
pass
assert len(LITTLE_HOUNDS) == 1
assert LITTLE_HOUNDS.get('Dachshund') is Dachshund
assert LITTLE_HOUNDS.get('hound.BloodHound') is BloodHound
assert HOUNDS.get('little_hound.Dachshund') is Dachshund
assert DOGS.get('hound.little_hound.Dachshund') is Dachshund
MID_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='mid_hound')
@MID_HOUNDS.register_module()
class Beagle:
pass
assert MID_HOUNDS.get('Beagle') is Beagle
assert HOUNDS.get('mid_hound.Beagle') is Beagle
assert DOGS.get('hound.mid_hound.Beagle') is Beagle
assert LITTLE_HOUNDS.get('hound.mid_hound.Beagle') is Beagle
assert MID_HOUNDS.get('hound.BloodHound') is BloodHound
assert MID_HOUNDS.get('hound.Dachshund') is None
def test_build_from_cfg():
BACKBONES = mmcv.Registry('backbone')
@BACKBONES.register_module()
class ResNet:
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
@BACKBONES.register_module()
class ResNeXt:
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3})
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 3
cfg = dict(type='ResNeXt', depth=50, stages=3)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNeXt)
assert model.depth == 50 and model.stages == 3
cfg = dict(type=ResNet, depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
# type defined using default_args
cfg = dict(depth=50)
model = mmcv.build_from_cfg(
cfg, BACKBONES, default_args=dict(type='ResNet'))
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
cfg = dict(depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type=ResNet))
assert isinstance(model, ResNet)
assert model.depth == 50 and model.stages == 4
# not a registry
with pytest.raises(TypeError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
# non-registered class
with pytest.raises(KeyError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, BACKBONES)
# default_args must be a dict or None
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1)
# cfg['type'] should be a str or class
with pytest.raises(TypeError):
cfg = dict(type=1000)
model = mmcv.build_from_cfg(cfg, BACKBONES)
# cfg should contain the key "type"
with pytest.raises(KeyError, match='must contain the key "type"'):
cfg = dict(depth=50, stages=4)
model = mmcv.build_from_cfg(cfg, BACKBONES)
# cfg or default_args should contain the key "type"
with pytest.raises(KeyError, match='must contain the key "type"'):
cfg = dict(depth=50)
model = mmcv.build_from_cfg(
cfg, BACKBONES, default_args=dict(stages=4))
# incorrect registry type
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
# incorrect default_args type
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0)
# incorrect arguments
with pytest.raises(TypeError):
cfg = dict(type='ResNet', non_existing_arg=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
| 29.653571 | 79 | 0.650488 |
4a1cc91f7a79f64961cfd40498ca41638157a94e
| 10,123 |
py
|
Python
|
cleepcli/ci.py
|
tangb/cleep-cli
|
9b6c2c1673a20c95b7bc8e735b25ef892676ef1d
|
[
"MIT"
] | null | null | null |
cleepcli/ci.py
|
tangb/cleep-cli
|
9b6c2c1673a20c95b7bc8e735b25ef892676ef1d
|
[
"MIT"
] | null | null | null |
cleepcli/ci.py
|
tangb/cleep-cli
|
9b6c2c1673a20c95b7bc8e735b25ef892676ef1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import zipfile
import os
import glob
import re
import requests
import time
from . import config
from .console import Console
from .check import Check
import subprocess
class Ci():
"""
Continuous Integration helpers
"""
EXTRACT_DIR = '/root/extract'
SOURCE_DIR = '/root/cleep/modules'
CLEEP_COMMAND_URL = 'http://127.0.0.1/command'
CLEEP_CONFIG_URL = 'http://127.0.0.1/config'
def __init__(self):
"""
Constructor
"""
self.logger = logging.getLogger(self.__class__.__name__)
def mod_install_source(self, package_path):
"""
Install module package (zip archive) sources
Args:
package_path (string): package path
Raises:
Exception if error occured
"""
# init
(_, module_name, module_version) = os.path.basename(package_path).split('_')
module_version = module_version.replace('.zip', '')[1:]
self.logger.debug('Installing application %s[%s]' % (module_name, module_version))
# perform some checkings
if not module_version:
raise Exception('Invalid package filename')
if not re.match('\d+\.\d+\.\d+', module_version):
raise Exception('Invalid package filename')
console = Console()
resp = console.command('file --keep-going --mime-type "%s"' % package_path)
if resp['returncode'] != 0:
raise Exception('Unable to check file validity')
filetype = resp['stdout'][0].split(': ')[1].strip()
self.logger.debug('Filetype=%s' % filetype)
if filetype != 'application/zip\\012- application/octet-stream':
raise Exception('Invalid application package file')
# unzip content
self.logger.debug('Extracting archive "%s" to "%s"' % (package_path, self.EXTRACT_DIR))
with zipfile.ZipFile(package_path, 'r') as package:
package.extractall(self.EXTRACT_DIR)
# check structure
if not os.path.exists(os.path.join(self.EXTRACT_DIR, 'backend/modules/%s' % module_name)):
raise Exception('Invalid package structure')
if not os.path.exists(os.path.join(self.EXTRACT_DIR, 'module.json')):
raise Exception('Invalid package structure')
# execute preinst script
preinst_path = os.path.join(self.EXTRACT_DIR, 'scripts', 'preinst.sh')
self.logger.debug('preinst.sh path "%s" exists? %s' % (preinst_path, os.path.exists(preinst_path)))
if os.path.exists(preinst_path):
self.logger.info('Executing "%s" preinst script' % preinst_path)
resp = console.command('cd "%(path)s" && chmod +x "%(script)s" && "%(script)s"' % {
'path': os.path.join(self.EXTRACT_DIR, 'scripts'),
'script': preinst_path,
}, 900)
self.logger.debug('Resp: %s' % resp)
if resp['returncode'] != 0:
raise Exception('Preinst.sh script failed (killed=%s): %s' % (resp['killed'], resp['stderr']))
# install sources
self.logger.info('Installing source files')
os.makedirs(os.path.join(self.SOURCE_DIR, module_name), exist_ok=True)
for filepath in glob.glob(self.EXTRACT_DIR + '/**/*.*', recursive=True):
if filepath.startswith(os.path.join(self.EXTRACT_DIR, 'frontend')):
dest = filepath.replace(os.path.join(self.EXTRACT_DIR, 'frontend/js/modules/%s' % module_name), os.path.join(self.SOURCE_DIR, module_name, 'frontend'))
self.logger.debug(' -> frontend: %s' % dest)
elif filepath.startswith(os.path.join(self.EXTRACT_DIR, 'backend')):
dest = filepath.replace(os.path.join(self.EXTRACT_DIR, 'backend/modules/%s' % module_name), os.path.join(self.SOURCE_DIR, module_name, 'backend'))
self.logger.debug(' -> backend: %s' % dest)
elif filepath.startswith(os.path.join(self.EXTRACT_DIR, 'tests')):
dest = filepath.replace(os.path.join(self.EXTRACT_DIR, 'tests'), os.path.join(self.SOURCE_DIR, module_name, 'tests'))
self.logger.debug(' -> tests: %s' % dest)
elif filepath.startswith(os.path.join(self.EXTRACT_DIR, 'scripts')):
dest = filepath.replace(os.path.join(self.EXTRACT_DIR, 'scripts'), os.path.join(self.SOURCE_DIR, module_name, 'scripts'))
self.logger.debug(' -> scripts: %s' % dest)
else:
dest = filepath.replace(self.EXTRACT_DIR, os.path.join(self.SOURCE_DIR, module_name))
self.logger.debug(' -> other: %s' % dest)
os.makedirs(os.path.dirname(dest), exist_ok=True)
os.rename(filepath, dest)
os.system('cleep-cli modsync --module=%s' % module_name)
# execute postinst script
postinst_path = os.path.join(self.SOURCE_DIR, module_name, 'scripts', 'postinst.sh')
self.logger.debug('postinst.sh path "%s" exists? %s' % (postinst_path, os.path.exists(postinst_path)))
if os.path.exists(postinst_path):
self.logger.info('Executing "%s" postinst script' % postinst_path)
resp = console.command('cd "%(path)s" && chmod +x "%(script)s" && "%(script)s"' % {
'path': os.path.join(self.EXTRACT_DIR, 'scripts'),
'script': postinst_path
}, 900)
self.logger.debug('Resp: %s' % resp)
if resp['returncode'] != 0:
raise Exception('Postinst.sh script failed (killed=%s): %s || %s' % (resp['killed'], resp['stdout'], resp['stderr']))
# install tests python requirements
tests_requirements_path = os.path.join(self.SOURCE_DIR, module_name, 'tests', 'requirements.txt')
if os.path.exists(tests_requirements_path):
self.logger.info('Install tests python dependencies')
resp = console.command('python3 -m pip install --trusted-host pypi.org -r "%s"' % tests_requirements_path, 900)
self.logger.debug('Resp: %s' % resp)
if resp['returncode'] != 0:
raise Exception('Error installing tests python dependencies (killed=%s): %s' % (resp['killed'], resp['stderr']))
try:
# start cleep (non blocking)
self.logger.info('Starting Cleep...')
cleep_proc = subprocess.Popen(['cleep', '--noro'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(15)
self.logger.info('Done')
# make sure to have latest modules.json version
self.logger.info('Updating applications list in Cleep')
resp = requests.post(self.CLEEP_COMMAND_URL, json={
'command': 'check_modules_updates',
'to': 'update',
})
resp.raise_for_status()
resp_json = resp.json()
if resp_json['error']:
raise Exception('Check_modules_updates command failed: %s' % resp_json)
# install module in cleep (it will also install deps)
self.logger.info('Installing "%s" application in Cleep' % module_name)
resp = requests.post(self.CLEEP_COMMAND_URL, json={
'command': 'install_module',
'to': 'update',
'params': {
'module_name': module_name,
}
})
resp.raise_for_status()
resp_json = resp.json()
if resp_json['error']:
raise Exception('Install_module command failed: %s' % resp_json)
# wait until end of installation
self.logger.info('Waiting end of application installation')
while True:
time.sleep(1.0)
resp = requests.post(self.CLEEP_COMMAND_URL, json={
'command': 'get_modules_updates',
'to': 'update'
})
resp.raise_for_status()
resp_json = resp.json()
if resp_json['error']:
raise Exception('Get_modules_updates command failed')
module_updates = resp_json['data'].get(module_name)
self.logger.debug('Updates: %s' % module_updates)
if not module_updates:
raise Exception('No "%s" application info in updates' % module_name)
if module_updates['processing'] == False:
if module_updates['update']['failed']:
raise Exception('Application "%s" installation failed' % module_name)
break
# restart cleep
self.logger.info('Restarting cleep...')
cleep_proc.kill()
cleep_proc = subprocess.Popen(['cleep', '--noro'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(15)
self.logger.info('Done')
# check module is installed and running
self.logger.info('Checking application is installed')
resp = requests.post(self.CLEEP_CONFIG_URL)
resp.raise_for_status()
resp_json = resp.json()
module_config = resp_json['modules'].get(module_name)
if not module_config or not module_config.get('started'):
self.logger.error('Found application config: %s' % module_config)
raise Exception('Application "%s" installation failed' % module_name)
self.logger.info('Application and its dependencies installed successfully')
finally:
if cleep_proc:
cleep_proc.kill()
def mod_check(self, module_name):
"""
Perform some checkings (see check.py file) for continuous integration
Args:
module_name (string): module name
Raises:
Exception if error occured
"""
check = Check()
check.check_backend(module_name)
check.check_frontend(module_name)
check.check_scripts(module_name)
check.check_tests(module_name)
| 45.599099 | 167 | 0.588462 |
4a1cc9626a456b32c380bc40b5e5ffafd9664071
| 3,070 |
py
|
Python
|
examples/mail/Contacts.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2018-09-19T09:14:16.000Z
|
2018-09-19T09:14:16.000Z
|
examples/mail/Contacts.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/mail/Contacts.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2019-11-18T14:17:59.000Z
|
2019-11-18T14:17:59.000Z
|
from pyjamas.ui.Composite import Composite
from pyjamas.ui.HTML import HTML
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.Image import Image
from pyjamas.ui.PopupPanel import PopupPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Widget import Widget
from pyjamas.ui.Label import Label
from Logger import Logger
class Contact:
def __init__(self, name, email):
self.photo = "http://code.google.com/webtoolkit/documentation/examples/desktopclone/default_photo.jpg"
self.name = name
self.email = email
class ContactPopup(PopupPanel):
def __init__(self, contact):
# The popup's constructor's argument is a boolean specifying that it
# auto-close itself when the user clicks outside of it.
PopupPanel.__init__(self, True)
inner = VerticalPanel()
nameLabel = Label(contact.name)
emailLabel = Label(contact.email)
inner.add(nameLabel)
inner.add(emailLabel)
panel = HorizontalPanel()
panel.setSpacing(4)
panel.add(Image(contact.photo))
panel.add(inner)
self.add(panel)
self.setStyleName("mail-ContactPopup")
nameLabel.setStyleName("mail-ContactPopupName")
emailLabel.setStyleName("mail-ContactPopupEmail")
class Contacts(Composite):
def __init__(self):
Composite.__init__(self)
self.contacts = []
self.contacts.append(Contact("Benoit Mandelbrot", "benoit@example.com"))
self.contacts.append(Contact("Albert Einstein", "albert@example.com"))
self.contacts.append(Contact("Rene Descartes", "rene@example.com"))
self.contacts.append(Contact("Bob Saget", "bob@example.com"))
self.contacts.append(Contact("Ludwig von Beethoven", "ludwig@example.com"))
self.contacts.append(Contact("Richard Feynman", "richard@example.com"))
self.contacts.append(Contact("Alan Turing", "alan@example.com"))
self.contacts.append(Contact("John von Neumann", "john@example.com"))
self.panel = VerticalPanel()
# Add all the contacts to the list.
i = 0
while (i < len(self.contacts)):
self.addContact(self.contacts[i])
i = i + 1
self.initWidget(self.panel)
self.setStyleName("mail-Contacts")
def addContact(self, contact):
link = HTML("<a href='javascript:;'>" + contact.name + "</a>")
self.panel.add(link)
# Add a click listener that displays a ContactPopup when it is clicked.
listener = ContactListener(contact, link)
link.addClickListener(listener)
class ContactListener:
def __init__(self, contact, link):
self.cont = contact
self.link = link
def onClick(self, sender):
if (sender == self.link):
popup = ContactPopup(self.cont)
left = self.link.getAbsoluteLeft() + 32
top = self.link.getAbsoluteTop() + 8
popup.setPopupPosition(left, top)
popup.show()
| 35.697674 | 110 | 0.652769 |
4a1ccad92599942ce163d63e0910094a2206bf7e
| 4,356 |
py
|
Python
|
fibo.py
|
loopspace/fibonacci_spirals
|
9e72290fe519c340808572e897bf3084869d58d1
|
[
"CC0-1.0"
] | null | null | null |
fibo.py
|
loopspace/fibonacci_spirals
|
9e72290fe519c340808572e897bf3084869d58d1
|
[
"CC0-1.0"
] | null | null | null |
fibo.py
|
loopspace/fibonacci_spirals
|
9e72290fe519c340808572e897bf3084869d58d1
|
[
"CC0-1.0"
] | null | null | null |
import subprocess
import math
import argparse
import platform
parser = argparse.ArgumentParser(description="Generate a Fibonacci spiral")
parser.add_argument('-l','--lua',help="Use lualatex to compile document", action="store_true")
parser.add_argument('-x','--xe',help="Use xelatex to compile document", action="store_true")
parser.add_argument('-v','--view',help="View PDF afterwards", action="store_true")
parser.add_argument('-t','--tikz',help="Create TikZ code", action="store_true")
parser.add_argument('-s','--svg',help="Create SVG code", action="store_true")
parser.add_argument('-c','--colour','--color',nargs='?', default='black', help="Set the line colour")
parser.add_argument('-w','--linewidth','--line-width', '--strokewidth', '--stroke-width',nargs='?', default='1', help="Set the line width")
args = parser.parse_args()
SCALE = 1
PHI = 0.5*(5**0.5-1) # scale factor
LEN = 8 # how many iterations
D = 144 # angle (in degrees) for each arc
R = 5. # radius of largest circle
SA = 30 # start angle
# Set line colour
if args.colour:
COLOUR = args.colour
else:
COLOUR = "black"
# Set line width
if args.linewidth:
WIDTH = args.linewidth
else:
WIDTH = 1
# Set TeX engine
TEX = "pdflatex"
if args.lua:
TEX = "lualatex"
if args.xe:
TEX = "xelatex"
# If requested, how to view the PDF afterwards
SHOW = args.view
if platform.system() == "Linux":
OPEN = "xdg-open" # Linux
elif platform.system() == "Darwin":
OPEN = "open" # Mac OS
else:
OPEN = "" # Dunno what to do for Windows
SHOW = False
if args.svg:
R *= 10
TIKZ = False
curve_start = r'<path d="'
curve_end = r'" />'
picture_start = f'<g stroke="{COLOUR}" fill="none" stroke-width="{WIDTH}">'
picture_end = r'</g>'
preamble = r"""<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTDSVG1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" viewBox="-150 -150 300 300" width="100%" height="100%">
"""
postamble = r"</svg>"
move = lambda x,y: f"M {x:.2f} {y:.2f} "
arc = lambda r,a,D,d,x,y: f"A {r:.2f} {r:.2f} 0 0 {int((d+1)/2)} {x:.2f} {y:.2f} "
else:
TIKZ = True
curve_start = r"\draw "
curve_end = r";"
picture_start = "\\begin{tikzpicture}" + f"[linewidth={WIDTH} pt, color={COLOUR}, x={SCALE}cm, y={SCALE}cm]\n"
picture_end = "\n\\end{tikzpicture}\n\n"
preamble = r"""\documentclass[border=10pt,tikz]{standalone}
\begin{document}
"""
postamble = r"\end{document}"
move = lambda x,y: f"({x},{y}) "
arc = lambda r,a,D,d,x,y: f"arc[radius={r}, start angle={a}, delta angle={D*d}] "
# this is a bit wasteful, but I think a simple thing that works is probably better than a complicated calculation.
def curve(n):
"""Plot a curve that goes in different directions depending on the binary expansion of the argument"""
r = R
a = SA
direction = +1
out = curve_start
x = 0
y = 0
if n == 0:
out += move(0,0)
for i in range(LEN):
if n%2 == 1:
direction *= -1
a = (a+180) % 360 # switch direction and reduce radius
r *= PHI
if n == 1: # are we ready to start drawing?
out += move(x,y)
# update starting point of next maybe-arc
x += -r*math.cos(a * math.pi/180) + r*math.cos( (a + D*direction) * math.pi/180)
y += -r*math.sin(a * math.pi/180) + r*math.sin( (a + D*direction) * math.pi/180)
if n <= 1: # are we drawing?
out += arc(r,a,D,direction,x,y)
a = (a+direction*D) % 360
r *= PHI # reduce radius
n >>= 1
return out + curve_end
def curves():
"""plot all of the possible curves"""
return "\n".join([curve(i) for i in range(2**LEN)])
def full_file():
"""Use standalone class for single-image documents."""
out = preamble
for f in [curves]:
out += picture_start + f() + picture_end
out += postamble
return out
fn = "fibo"
if TIKZ:
tfn = fn + ".tex"
ofn = fn + ".pdf"
else:
tfn = fn + ".svg"
ofn = tfn
with open(tfn,'w') as f:
f.write(full_file())
# compile it
if TIKZ:
subprocess.call(f"{TEX} {tfn} -o {ofn}", shell =True, executable = '/bin/zsh')
if SHOW:
subprocess.call(f"{OPEN} {ofn}",shell =True, executable = '/bin/zsh')
| 30.676056 | 139 | 0.601928 |
4a1ccc86f1c9820e18f3333448927b005b57a7de
| 439 |
py
|
Python
|
pickle/pickle.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 126 |
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
pickle/pickle.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 38 |
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
pickle/pickle.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 55 |
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
HIGHEST_PROTOCOL = 0
def dump(obj, f, protocol=0):
f.write(repr(obj))
def dumps(obj, protocol=0):
return repr(obj).encode()
def load(f):
s = f.read()
return loads(s)
def loads(s):
d = {}
s = s.decode()
if "(" in s:
qualname = s.split("(", 1)[0]
if "." in qualname:
pkg = qualname.rsplit(".", 1)[0]
mod = __import__(pkg)
d[pkg] = mod
return eval(s, d)
| 19.086957 | 44 | 0.501139 |
4a1ccc9fd019f07f2b8cf725a888dbefd5dd1792
| 30,935 |
py
|
Python
|
secv_guis/bimask_app/main_window.py
|
pshobowale/secv-guis
|
d916ae2c0bebc5d99a8f08052133b7e536bff231
|
[
"MIT"
] | 3 |
2020-03-31T23:32:25.000Z
|
2020-11-10T09:32:03.000Z
|
secv_guis/bimask_app/main_window.py
|
pshobowale/secv-guis
|
d916ae2c0bebc5d99a8f08052133b7e536bff231
|
[
"MIT"
] | 2 |
2021-03-20T00:04:26.000Z
|
2021-05-29T18:43:25.000Z
|
secv_guis/bimask_app/main_window.py
|
pshobowale/secv-guis
|
d916ae2c0bebc5d99a8f08052133b7e536bff231
|
[
"MIT"
] | 1 |
2021-03-19T23:44:37.000Z
|
2021-03-19T23:44:37.000Z
|
# -*- coding:utf-8 -*-
"""
This module contains the logic and widgets pertaining to the main window
of the bimask app: An app that allows displaying an image, editing a mask
on it and also displaying/editing a preannotation mask.
It can be used to efficiently annotate large images with pixel precision.
Check instructions.txt for more details.
"""
import os
from PySide2 import QtCore, QtWidgets, QtGui
import numpy as np
from PIL import Image
import json
#
from skimage.filters import apply_hysteresis_threshold
#
from .dialogs import InstructionsDialog, AboutDialog, KeymapsDialog, \
SavedStateTracker
#
from ..masked_scene import MaskedImageScene, DisplayView
from ..base_widgets import FileList, MaskPaintForm, SaveForm
from ..utils import load_img_and_exif, unique_filename
from ..commands import DrawCommand, EraseCommand, DrawOverlappingCommand
from ..objects import PointList
# #############################################################################
# ## APPLICATION LOGIC FOR QUICK MASKING
# #############################################################################
def pmap_to_mask(pmap,upper_percentile,lower_percentile,percentile_max=100):
pmap=np.array(pmap)
values = np.sort(pmap[::-1].flatten())
up = int((len(values)-1) * upper_percentile /percentile_max)
lp= int((len(values)-1) * lower_percentile / percentile_max )
pmap[pmap>values[up]]=0
pmap[pmap<values[lp]]=0
pmap=pmap>0
return pmap
# #############################################################################
# ## WIDGET EXTENSIONS AND COMPOSITIONS TO ADD SPECIFIC LOGIC+LAYOUT
# #############################################################################
class FileLists(QtWidgets.QWidget):
"""
A cluster of 3 file lists: one for images, one for masks and one for
preannotations.
"""
def __init__(self, parent=None, img_extensions=[".png", ".jpg", ".jpeg"],
mask_extensions=None, preannot_extensions=None):
"""
If given, the extensions are case-insensitive lists in the form
``[".png", ".jpg"]`` that filter the files that are shown in the list
by allowing only the given terminations.
"""
super().__init__(parent)
# create widgets
self.img_list = FileList("Images\nfolder", extensions=img_extensions)
self.mask_list = FileList("Masks\nfolder")
self.preannot_list = FileList("Pre-annotations\nfolder")
# add widgets to layout
self.main_layout = QtWidgets.QHBoxLayout()
self.main_layout.addWidget(self.img_list)
self.main_layout.addWidget(self.mask_list)
self.main_layout.addWidget(self.preannot_list)
self.setLayout(self.main_layout)
class IntegratedSaveForm(SaveForm):
"""
A ``SaveForm`` that implements this app's logic, namely, it features 2
masks, one for annot and one for preannot, and saves them as B&W png.
"""
def __init__(self, main_window, default_path=None,
save_dialog_timeout_ms=1000):
"""
:param main_window: A reference to the ``BimaskMainWindow``
:param str default_path: If non given, 'home' is picked.
:param save_dialog_timeout: When successfully saving, a dialog
will pop up, and disappear after this many miliseconds.
"""
super().__init__(None, default_path)
self.main_window = main_window
self.add_checkbox("preannot.", initial_val=False,
initial_txt="_preannot.png")
self.add_checkbox("annot.", initial_txt="_annot.png")
self.add_checkbox("points", initial_txt="_points.json")
# This reference is needed otherwise dialogs get garbage collected?
self.dialog = None
self.dialog_ms = save_dialog_timeout_ms
def save_masks(self, states, suffixes, overwrite):
"""
Overriden method that we don't call directly. See ``SaveForm`` for
interface details.
"""
save_preannot, save_annot, save_points = states
suff_preannot, suff_annot, suff_points = suffixes
img_name = self.main_window.current_img_basename
#
a_pmi = self.main_window.graphics_view.annot_pmi
pa_pmi = self.main_window.graphics_view.preannot_pmi
#
scene = self.main_window.graphics_view.scene()
saved = {}
if save_preannot and pa_pmi is not None:
pa_path = os.path.join(self.save_path, img_name + suff_preannot)
if not overwrite:
pa_path = unique_filename(pa_path)
pa_msk_arr = scene.mask_as_bool_arr(pa_pmi)
self.save_bool_arr_as_img(pa_msk_arr, pa_path, overwrite)
saved["preannotation mask"] = pa_path
if save_annot and a_pmi is not None:
a_path = os.path.join(self.save_path, img_name + suff_annot)
if not overwrite:
a_path = unique_filename(a_path)
msk_arr = scene.mask_as_bool_arr(a_pmi)
self.save_bool_arr_as_img(msk_arr, a_path, overwrite)
saved["annotation mask"] = a_path
if save_points and scene.objects:
state_dict = {k.__name__: [elt.state() for elt in v if elt.state()]
for k, v in scene.objects.items()}
p_path = os.path.join(self.save_path, img_name + suff_points)
if not overwrite:
p_path = unique_filename(p_path)
with open(p_path, "w") as f:
# f.write(str(state_dict))
json.dump(state_dict, f)
saved["point lists"] = p_path
#
if saved:
self.main_window.graphics_view.saved_state_tracker.save(
saved, self.dialog_ms)
def save_bool_arr_as_img(self, arr, outpath, overwrite_existing=False):
"""
Output: RGB PNG image where false is black (0, 0, 0) and true is white
(255, 255, 255).
"""
if not overwrite_existing:
outpath = unique_filename(outpath)
img = Image.fromarray(arr)
img.save(outpath)
class IntegratedDisplayView(DisplayView):
"""
This class implements the main component of the main window: it features a
view of the image and the masks, together with a set of operations that can
be done on them (painting, updating...), and the callback mechanisms to
trigger those operations.
"""
def __init__(self, main_window, scale_percent=15):
"""
:param scale_percent: Each zoom in/out operation will scale the view
by this much (in percent).
"""
super().__init__(scene=None, parent=None, scale_percent=scale_percent)
self._scene = MaskedImageScene()
self.main_window = main_window
self.shape = None
self.setScene(self._scene)
#
self._preannot_pmap = None
self.preannot_pmi = None
self.annot_pmi = None
#
#
self._current_clickdrag_action = None
#
self.saved_state_tracker = None
# MEMORY ACTIONS
def new_image(self, img_path, initial_mask_color=(219, 54, 148, 150),
initial_preannot_color=(102, 214, 123, 100)):
"""
If successful, removes all elements from the scene and the undo stack,
and loads a fresh image and masks. If there are unsaved changes, a
dialog asking for confirmation will pop up.
:returns: True if the action completed successfully, False if the user
decides to abort.
"""
if self.saved_state_tracker is not None:
is_delete_ok = self.saved_state_tracker.delete()
if not is_delete_ok:
# If user didn't want to delete unsaved changes
return False
# Go on with the update
img_arr = load_img_and_exif(img_path)[0]
self.shape = img_arr.shape
self._scene.update_image(img_arr)
dummy_preannot = np.zeros(img_arr.shape[:2], dtype=np.bool)
dummy_mask = np.zeros_like(dummy_preannot)
self.preannot_pmi = self._scene.add_mask(
dummy_preannot, initial_preannot_color)
self.annot_pmi = self._scene.add_mask(
dummy_mask, initial_mask_color)
self.fit_in_scene()
#
self.main_window.undo_stack.clear()
#
self.saved_state_tracker = SavedStateTracker()
return True
def preannot_from_path(self, preannot_path, rgba, upper_thresh=100,
lower_thresh=90, normalize=False):
"""
This method is prototype-ish: It loads an ``.npz`` file with and
'entropy' field, expected to have a numpy float matrix with same
shape as the image. Alternatively it takes a greyscale image file
suppoted by PIL.
"""
assert self.scene().img_pmi is not None, \
"You need to load an image first!"
if preannot_path.endswith(".npz") or preannot_path.endswith(".npy"):
self._preannot_pmap = np.load(preannot_path)["entropy"]
else:
img=np.asanyarray(Image.open(preannot_path))
if len(img.shape)>2:
img=img[:,:,0]
self._preannot_pmap = np.asarray(img)
normalize=True
if normalize:
try:
self._preannot_pmap = self._preannot_pmap/np.max(self._preannot_pmap)
except ZeroDivisionError:
pass
m = pmap_to_mask(self._preannot_pmap, upper_thresh, lower_thresh)
self.preannot_pmi = self.scene().replace_mask_pmi(
self.preannot_pmi, m)
#
self.saved_state_tracker.edit()
def mask_from_path(self, mask_path, rgba):
"""
:param mask_path: Path to an image containing a binary mask, where
zero pixels are considered false and non-zero true.
:param rgba: Color of the loaded mask
Loads a binary mask into the scene as an RGBA-colored mask.
"""
assert self.scene().img_pmi is not None, \
"You need to load an image first!"
arr = load_img_and_exif(mask_path)[0]
if len(arr.shape) == 2:
mask = arr > 0
elif len(arr.shape) == 3:
mask = arr.any(axis=-1)
else:
raise RuntimeError("Mask must be rank 2 or 3!")
self.annot_pmi = self.scene().replace_mask_pmi(
self.annot_pmi, mask)
#
self.saved_state_tracker.edit()
# MASK SINGLE-SHOT ACTIONS
def change_preannot_pval(self, upper_thresh, lower_thresh):
"""
Updates the preannot->mask threshold.
"""
if self._preannot_pmap is not None:
new_m = pmap_to_mask(self._preannot_pmap,
upper_thresh,
lower_thresh)
self.preannot_pmi = self.scene().replace_mask_pmi(
self.preannot_pmi, new_m)
#
if self.saved_state_tracker is not None:
self.saved_state_tracker.edit()
def change_preannot_rgba(self, rgba):
"""
Updates the preannot mask color.
"""
if self.preannot_pmi is not None:
m = self.scene().mask_as_bool_arr(self.preannot_pmi)
self.preannot_pmi = self.scene().replace_mask_pmi(
self.preannot_pmi, m, rgba)
def change_annot_rgba(self, rgba):
"""
Updates the annot mask color.
"""
if self.annot_pmi is not None:
m = self.scene().mask_as_bool_arr(self.annot_pmi)
self.annot_pmi = self.scene().replace_mask_pmi(
self.annot_pmi, m, rgba)
# MASK COMPOSITE ACTIONS
def _finish_clickdrag_action(self):
"""
finishes any click+drag action that may be active (does nothing if
none active).
"""
cmd = self._current_clickdrag_action
if cmd is not None:
cmd.finish(self.main_window.undo_stack)
self._current_clickdrag_action = None
def _perform_composite_action(self, action_class, action_args,
construction_args):
"""
This function is the recommended way to perform a composite
action for the following reasons:
1. If ``action_class`` is already running, it simply continues it.
2. If a different composite action was running, it closes it and starts
this one.
3. If no composite action was running, starts this one
And finally performs the action.
:param construction_args: If this action needs to be started, it will
be called via ``cmd = action_class(*construction_args)``
:param action_args: The command will be called via ``cmd(action_args)``
Usage example::
x, y = current_action_position...
pmi = ...
brush_size = ...
rgba = self.scene().mask_pmis[pmi]
self._perform_composite_action(DrawCommand, [x, y],
[pmi, rgba, brush_size])
"""
cmd = self._current_clickdrag_action
# if changed to this action without releasing the prior one, release it
action_changed = action_class is not cmd.__class__
cmd_finished = cmd is not None and cmd.finished
if action_changed:
self._finish_clickdrag_action() # sets current action to None
cmd = self._current_clickdrag_action
# if no open action of this class, create
if cmd is None or cmd_finished:
cmd = action_class(*construction_args)
self._current_clickdrag_action = cmd
cmd.action(*action_args)
def clickdrag_action(self, x, y):
"""
Paint to the currently selected mask, with the currently selected
brush type, at the given position.
The given ``x, y`` position is in 'scene coordinates', i.e. the
position from a mouse event has to be translated as follows::
xpos, ypos = self.mapToScene(event.pos()).toTuple()
self.clickdrag_action(xpos, ypos)
"""
# retrieve pmi info
# expected idx: 0 for preannot, 1 for annot
idx_map = {0: self.preannot_pmi, 1: self.annot_pmi}
mask_idx = self.main_window.paint_form.current_button_idx
pmi = idx_map[mask_idx]
# paint only if this pmi
if pmi is None:
return
# retrieve brush info
p_txt, e_txt, mp_txt = [self.main_window.PAINTER_TXT,
self.main_window.ERASER_TXT,
self.main_window.MASKED_PAINTER_TXT]
brush_type = self.main_window.paint_form.current_brush_type
brush_size = self.main_window.paint_form.current_brush_size
# if no open action exists, create:
did_something = False
if brush_type == p_txt:
rgba = self.scene().mask_pmis[pmi]
self._perform_composite_action(DrawCommand, [x, y],
[pmi, rgba, brush_size])
did_something = True
elif brush_type == e_txt:
self._perform_composite_action(EraseCommand, [x, y],
[pmi, brush_size])
did_something = True
elif brush_type == mp_txt:
rgba = self.scene().mask_pmis[pmi]
ref_pmi = self.preannot_pmi # preannot is always the ref
self._perform_composite_action(DrawOverlappingCommand, [x, y],
[pmi, ref_pmi, rgba, brush_size])
did_something = True
#
if did_something:
self.saved_state_tracker.edit()
def add_point(self, x, y, close_after=False):
"""
"""
if self.scene().img_pmi is None:
return
brush_size = self.main_window.paint_form.current_brush_size
self.scene().object_action(
PointList, [x, y, self.main_window.undo_stack],
[self.scene(), brush_size, (0, 0, 0, 100), (0, 0, 0, 255),
True]) # draw lines
#
if close_after:
self.scene().close_current_object_action(
self.main_window.undo_stack)
# EVENT HANDLING
def on_left_press(self, event):
"""
"""
xpos, ypos = self.mapToScene(event.pos()).toTuple()
brush_type = self.main_window.paint_form.current_brush_type
if brush_type == self.main_window.POINT_LIST_TXT:
mods = event.modifiers()
has_ctrl = bool(mods & QtCore.Qt.ControlModifier)
self.add_point(xpos, ypos, close_after=has_ctrl)
else:
self.clickdrag_action(xpos, ypos)
def on_left_release(self, event):
"""
If there is an open macro command, closes it and adds it to the undo
stack
"""
self._finish_clickdrag_action()
def on_move(self, event, has_left, has_mid, has_right, this_pos, last_pos):
"""
Callback implementation, calls ``clickdrag_action`` if moving while
pressing left.
"""
super().on_move(event, has_left, has_mid, has_right, this_pos,
last_pos)
#
if has_left:
xpos, ypos = self.mapToScene(event.pos()).toTuple()
self.clickdrag_action(xpos, ypos)
class CrackAnnotPaintForm(MaskPaintForm):
"""
A ``MaskPaintForm`` that holds a reference to the app's main window and
connects its callbacks with the main window's corresponding components.
"""
def __init__(self, main_window, brushes, max_brush_size=100, parent=None,
thresh_min=0, thresh_max=1, thresh_num_steps=100):
"""
:param main_window: A reference to the bimask app main window instance.
:param brushes: A list of brush names to be featured in the form.
"""
super().__init__(brushes, max_brush_size, parent, thresh_min,
thresh_max, thresh_num_steps, min_alpha=1)
self.main_window = main_window
self.current_brush_type = self.brush_names[
self.brush_combo_box.currentIndex()]
self.current_brush_size = self.brush_size_slider.value()
self.current_button_idx = None # activate when calling addItem
def button_pressed(self, but):
"""
Setter
"""
self.current_button_idx = self._buttons.index(but)
def threshold_slider_changed(self, t, t2):
"""
:param t : Upper Threshold
:param t2 : Lower Threshold
"""
self.main_window.graphics_view.change_preannot_pval(t,t2)
def rgba_box_changed(self, idx, r, g, b, a):
"""
Update corresponding mask with new RGBA color.
"""
# NOTE: THIS ASSUMES THAT BOX 0 IS ANNOT AND BOX 1 IS PREANNOT!
assert idx in {0, 1}, "This GUI wasn't prepared for more than 2 masks"
# self.main_window.graphics_view.scene().change_mask_color(
# idx, r, g, b, a)
view = self.main_window.graphics_view
# idx_map = {0: view.preannot_pmi, 1: view.annot_pmi}
# pmi = idx_map[idx]
if idx == 0:
view.change_preannot_rgba((r, g, b, a))
self.main_window.preannot_color = (r, g, b, a)
elif idx == 1:
view.change_annot_rgba((r, g, b, a))
self.main_window.mask_color = (r, g, b, a)
def brush_type_changed(self, idx):
"""
Setter
"""
self.current_brush_type = self.brush_names[idx]
def brush_size_changed(self, sz):
"""
Setter
"""
self.current_brush_size = sz
# #############################################################################
# ## MAIN WINDOW
# #############################################################################
class MainWindow(QtWidgets.QMainWindow):
"""
This is the central widget for the bimask application. It is a composition
of all the used elements, together with the logic that binds them.
"""
# These variables handle the preannotation thresholding. Check pmap_to_mask
DISCARD_P_VALUE = 0.5 # Number in range (thresh_slider_max, 1]
THRESH_MIN = 0
THRESH_MAX = 100
THRESH_NUM_STEPS = 100
#
PAINTER_TXT = "Painter"
ERASER_TXT = "Eraser"
MASKED_PAINTER_TXT = "Masked painter"
POINT_LIST_TXT = "Points"
def __init__(self, parent=None, initial_mask_color=(255, 54, 76, 150),
initial_preannot_color=(102, 214, 123, 100),
max_brush_size=200):
"""
"""
super().__init__(parent)
self.graphics_view = IntegratedDisplayView(self)
#
self.mask_color = initial_mask_color
self.preannot_color = initial_preannot_color
#
self.current_img_basename = None
#
self.instructions_dialog = InstructionsDialog()
self.about_dialog = AboutDialog()
self.keymaps_dialog = KeymapsDialog(
{k: v.toString() for k, v in self.keymaps().items()})
# define controller widgets
self.file_lists = FileLists()
self.paint_form = CrackAnnotPaintForm(
self, [self.PAINTER_TXT, self.ERASER_TXT, self.MASKED_PAINTER_TXT,
self.POINT_LIST_TXT],
max_brush_size, thresh_min=self.THRESH_MIN,
thresh_max=self.THRESH_MAX, thresh_num_steps=self.THRESH_NUM_STEPS)
self.save_form = IntegratedSaveForm(self, default_path=None)
self.paint_form.add_item("preannot.", self.preannot_color,
slider_visible=True, activate=False)
self.paint_form.add_item("annot.", self.mask_color,
slider_visible=False, activate=True)
# create controller layout
controller_layout = QtWidgets.QVBoxLayout()
controller_layout.addWidget(self.paint_form)
controller_layout.addWidget(self.save_form)
controller_widget = QtWidgets.QWidget()
controller_widget.setLayout(controller_layout)
self.controller_splitter = QtWidgets.QSplitter()
self.controller_splitter.setOrientation(QtCore.Qt.Vertical)
self.controller_splitter.addWidget(self.file_lists)
self.controller_splitter.addWidget(controller_widget)
# create main layout, add controller and graphics:
self.main_splitter = QtWidgets.QSplitter()
self.main_splitter.setOrientation(QtCore.Qt.Horizontal)
self.main_splitter.addWidget(self.controller_splitter)
self.main_splitter.addWidget(self.graphics_view)
# fine-tune main layout: sizes and such
self.controller_splitter.setMinimumWidth(10)
left_width = self.controller_splitter.width()
right_width = self.graphics_view.width()
self.main_splitter.setSizes([left_width, right_width * 2])
self.setCentralWidget(self.main_splitter)
# add connections
self.file_lists.img_list.file_list.itemDoubleClicked.connect(
lambda elt: self._handle_img_selection(elt.text()))
self.file_lists.mask_list.file_list.itemDoubleClicked.connect(
lambda elt: self._handle_mask_selection(elt.text()))
self.file_lists.preannot_list.file_list.itemDoubleClicked.connect(
lambda elt: self._handle_preannot_selection(elt.text()))
#
self._setup_undo()
self._setup_menu_bar()
self._add_keymaps()
def _setup_undo(self):
"""
Set up undo stack and undo view
"""
self.undo_stack = QtWidgets.QUndoStack(self)
self.undo_view = QtWidgets.QUndoView(self.undo_stack)
self.undo_view.setWindowTitle("Undo View")
self.undo_view.setAttribute(QtCore.Qt.WA_QuitOnClose, False)
def _setup_menu_bar(self):
"""
Set up menu bar: create actions and connect them to methods.
"""
# edit menu
edit_menu = self.menuBar().addMenu("Edit")
self.undo_action = edit_menu.addAction("Undo")
self.undo_action.triggered.connect(self.undo_stack.undo)
self.redo_action = edit_menu.addAction("Redo")
self.redo_action.triggered.connect(self.undo_stack.redo)
edit_menu.addSeparator()
self.view_undo_action = edit_menu.addAction("View undo stack")
self.view_undo_action.triggered.connect(self.undo_view.show)
# help menu
help_menu = self.menuBar().addMenu("Help")
self.keyboard_shortcuts = help_menu.addAction("Keyboard shortcuts")
self.keyboard_shortcuts.triggered.connect(self.keymaps_dialog.show)
self.instructions = help_menu.addAction("Instructions")
self.instructions.triggered.connect(self.instructions_dialog.show)
self.about = help_menu.addAction("About")
self.about.triggered.connect(self.about_dialog.show)
def keymaps(self):
"""
:returns: A dictionary in the form ``name: QtGui.QKeySequence``,
where the
Define this GUI's specific key mappings. Note that this method can
be overriden to return a different mapping, but the ``name``s have
to remain identical, in order to be recognized by ``_add_keymaps``.
"""
d = {
"Undo": QtGui.QKeySequence("Ctrl+Z"),
"Redo": QtGui.QKeySequence("Ctrl+Y"),
"View undo list": QtGui.QKeySequence("Alt+Z"),
#
"Load image path": QtGui.QKeySequence("Ctrl+I"),
"Load mask path": QtGui.QKeySequence("Ctrl+M"),
"Load preannotation path": QtGui.QKeySequence("Ctrl+P"),
#
"Save mask path": QtGui.QKeySequence("Alt+S"),
"Save mask(s)": QtGui.QKeySequence("Ctrl+S"),
#
"Set painter": QtGui.QKeySequence("a"),
"Set eraser": QtGui.QKeySequence("e"),
"Set masked painter": QtGui.QKeySequence("m"),
#
"Next image": QtGui.QKeySequence("Space"),
"Previous image": QtGui.QKeySequence("Ctrl+Space")
}
return d
def _add_keymaps(self):
"""
This function is closeley connected to ``keymaps``. There, the
shortcuts are defined, here, they are applied.
"""
km = self.keymaps()
# add menu shortcuts
self.undo_action.setShortcut(km["Undo"])
self.redo_action.setShortcut(km["Redo"])
self.view_undo_action.setShortcut(km["View undo list"])
# add widget shortcuts
#
self.file_lists.img_list.file_button.setShortcut(km["Load image path"])
self.file_lists.mask_list.file_button.setShortcut(km["Load mask path"])
self.file_lists.preannot_list.file_button.setShortcut(
km["Load preannotation path"])
#
self.save_form.file_dialog_button.setShortcut(km["Save mask path"])
self.save_form.save_button.setShortcut(km["Save mask(s)"])
# Paint region (wheel event has the brush size)
QtWidgets.QShortcut( # combobox shortcuts are a little more complex
km["Set painter"], self.paint_form.brush_combo_box,
lambda: self.paint_form.brush_combo_box.setCurrentText(
self.PAINTER_TXT))
QtWidgets.QShortcut(
km["Set eraser"], self.paint_form.brush_combo_box,
lambda: self.paint_form.brush_combo_box.setCurrentText(
self.ERASER_TXT))
QtWidgets.QShortcut(
km["Set masked painter"], self.paint_form.brush_combo_box,
lambda: self.paint_form.brush_combo_box.setCurrentText(
self.MASKED_PAINTER_TXT))
#
QtWidgets.QShortcut(
km["Next image"], self, lambda: self._switch_img(1))
QtWidgets.QShortcut(
km["Previous image"], self, lambda: self._switch_img(-1))
def _switch_img(self, step=1):
"""
An alternative way of double clicking on an image list is to call
this method, which will switch to the image located at
``curent_img + step`` in the list.
"""
curr_idx = self.file_lists.img_list.file_list.currentRow()
nxt_item = self.file_lists.img_list.file_list.item(curr_idx + step)
if nxt_item is not None:
success = self._handle_img_selection(nxt_item.text())
if success:
self.file_lists.img_list.file_list.setCurrentItem(nxt_item)
def _handle_img_selection(self, basename):
"""
This protected method is triggered when double clicking on an
image list item, or called by ``_switch_img``.
"""
abspath = os.path.join(self.file_lists.img_list.dirpath, basename)
success = self.graphics_view.new_image(abspath, self.mask_color,
self.preannot_color)
if self.file_lists.preannot_list is not None:
self.file_lists.preannot_list.update_path(self.file_lists.preannot_list.dirpath,
basename)
if self.file_lists.mask_list is not None:
self.file_lists.mask_list.update_path(self.file_lists.mask_list.dirpath,
basename)
if success:
self.current_img_basename = basename
return success
def _handle_mask_selection(self, basename):
"""
This protected method is triggered when double clicking on an
annotation list item.
"""
abspath = os.path.join(self.file_lists.mask_list.dirpath, basename)
self.graphics_view.mask_from_path(abspath, self.mask_color)
def _handle_preannot_selection(self, basename):
"""
This protected method is triggered when double clicking on a
preannotation list item.
"""
abspath = os.path.join(self.file_lists.preannot_list.dirpath, basename)
pval = self.paint_form.slider_to_p_val(
self.paint_form._sliders[-1].value())
self.graphics_view.preannot_from_path(
abspath, self.preannot_color)
def wheelEvent(self, event):
"""
The ``DisplayView`` has zoom functionality associated to the wheel.
Here we associate 'brush size change' functionality when the wheel
is rolled while pressing Control.
"""
mods = event.modifiers()
has_ctrl = bool(mods & QtCore.Qt.ControlModifier)
has_alt = bool(mods & QtCore.Qt.AltModifier)
has_shift = bool(mods & QtCore.Qt.ShiftModifier)
if (has_ctrl, has_alt, has_shift) == (True, False, False):
current = self.paint_form.brush_size_slider.value()
delta = 1 if event.delta() >= 0 else - 1
self.paint_form.brush_size_slider.setValue(current + delta)
| 41.301736 | 92 | 0.613997 |
4a1ccd2e74aae1ada96fbc6848346a134fba3da8
| 2,767 |
py
|
Python
|
RainMachineDev.indigoPlugin/Contents/Server Plugin/controller.py
|
geoffsharris/Indigo-Rainmachine
|
375d93ae7ee66a8c3037df17022f46168bf832a1
|
[
"MIT"
] | null | null | null |
RainMachineDev.indigoPlugin/Contents/Server Plugin/controller.py
|
geoffsharris/Indigo-Rainmachine
|
375d93ae7ee66a8c3037df17022f46168bf832a1
|
[
"MIT"
] | 5 |
2020-12-16T18:18:00.000Z
|
2021-05-24T15:42:26.000Z
|
RainMachineDev.indigoPlugin/Contents/Server Plugin/controller.py
|
geoffsharris/Indigo-Rainmachine
|
375d93ae7ee66a8c3037df17022f46168bf832a1
|
[
"MIT"
] | null | null | null |
"""Define a RainMachine controller class."""
from datetime import datetime, timedelta
from program import Program
from provision import Provision
from zone import Zone
from watering import Watering
URL_BASE_LOCAL = 'https://{}:{}/api/4'
URL_BASE_REMOTE = 'https://api.rainmachine.com/{}/api/4'
class Controller: # pylint: disable=too-many-instance-attributes
"""Define the controller."""
def __init__(self, request):
"""Initialize."""
self._access_token = None
self._access_token_expiration = None
self._client_request = request
self._host = None
self._ssl = True
self.api_version = None
self.hardware_version = None
self.mac = None
self.name = None
self.software_version = None
self.connection_type = None
# API endpoints:
self.programs = Program(self._request)
self.provisioning = Provision(self._request)
self.zones = Zone(self._request)
self.watering = Watering(self._request)
def _request(self, method, endpoint, **kwargs):
"""Wrap the generic request method to add access token, etc."""
return self._client_request(
method,
'{}/{}'.format(self._host, endpoint),
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
ssl=self._ssl,
**kwargs
)
class LocalController(Controller):
"""Define a controller accessed over the LAN."""
def __init__(self, request, host, port, ssl):
"""Initialize."""
Controller.__init__(self, request)
self._host = URL_BASE_LOCAL.format(host, port)
self._ssl = ssl
self.connection_type = "local"
def login(self, password):
auth_resp = self._client_request(
"POST",
"{}/auth/login".format(self._host),
json={"pwd": password, "remember": 1},
ssl=False)
self._access_token = auth_resp['access_token']
self._access_token_expiration = datetime.now() + timedelta(seconds=int(auth_resp["expires_in"]) - 10)
class RemoteController(Controller):
"""Define a controller accessed over RainMachine's cloud."""
def login(self, stage_1_access_token, sprinkler_id, password):
"""Authenticate against the device (remotely)."""
auth_resp = self._client_request(
"post",
"https://my.rainmachine.com/devices/login-sprinkler",
access_token=stage_1_access_token,
json={"sprinklerId": sprinkler_id, "pwd": password})
self._access_token = auth_resp['access_token']
self._host = URL_BASE_REMOTE.format(sprinkler_id)
self.connection_type = "cloud"
| 32.940476 | 109 | 0.637875 |
4a1ccfb1295319b59de0720edb0c4a91725b5a21
| 272 |
py
|
Python
|
BOJ_Solved/BOJ-1003.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | 7 |
2021-11-19T14:50:59.000Z
|
2022-02-25T20:00:20.000Z
|
BOJ_Solved/BOJ-1003.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | null | null | null |
BOJ_Solved/BOJ-1003.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | null | null | null |
"""
백준 1003번 : 피보나치 함수
"""
T = int(input( ))
zero = [1, 0, 1]
one = [0, 1, 1]
for i in range(3, 41):
zero.append(one[i - 1])
one.append(one[i - 1] + zero[i - 1])
for _ in range(T):
number = int(input( ))
print("{} {}".format(zero[number], one[number]))
| 17 | 52 | 0.514706 |
4a1ccfd5d0a17686cc05a63478d7a5dc2504340b
| 3,421 |
py
|
Python
|
tests/test_update.py
|
quantus/sqlalchemy-continuum
|
1453888e4e696dac835f8b907b7f819433b27e6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_update.py
|
quantus/sqlalchemy-continuum
|
1453888e4e696dac835f8b907b7f819433b27e6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_update.py
|
quantus/sqlalchemy-continuum
|
1453888e4e696dac835f8b907b7f819433b27e6c
|
[
"BSD-3-Clause"
] | null | null | null |
import sqlalchemy as sa
from tests import TestCase
class TestUpdate(TestCase):
def test_creates_versions_on_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Updated name'
article.content = u'Updated content'
self.session.commit()
self.session.refresh(article)
version = article.versions.all()[-1]
assert version.name == u'Updated name'
assert version.content == u'Updated content'
assert version.transaction.id == version.transaction_id
def test_partial_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.content = u'Updated content'
self.session.commit()
self.session.refresh(article)
version = article.versions.all()[-1]
assert version.name == u'Some article'
assert version.content == u'Updated content'
assert version.transaction.id == version.transaction_id
def test_update_with_same_values(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.refresh(article)
article.name = u'Some article'
self.session.commit()
assert article.versions.count() == 1
def test_stores_operation_type(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Some other article'
self.session.commit()
assert article.versions[-1].operation_type == 1
def test_multiple_updates_within_same_transaction(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.content = u'Updated content'
self.session.flush()
article.content = u'Updated content 2'
self.session.commit()
assert article.versions.count() == 2
version = article.versions.all()[-1]
assert version.name == u'Some article'
assert version.content == u'Updated content 2'
class TestUpdateWithDefaultValues(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
updated_at = sa.Column(sa.DateTime, server_default=sa.func.now())
is_editable = sa.Column(sa.Boolean)
self.Article = Article
def test_update_with_default_values(self):
article = self.Article()
article.name = u'Some article'
article.is_editable = False
self.session.add(article)
self.session.commit()
article.is_editable = True
self.session.commit()
article = article.versions.all()[-1]
assert article.name == u'Some article'
| 32.273585 | 77 | 0.625256 |
4a1cd2e25ae1742d83024490fe0b89865168ebd9
| 1,927 |
py
|
Python
|
Integer to Roman.py
|
sugia/leetcode
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
[
"Apache-2.0"
] | null | null | null |
Integer to Roman.py
|
sugia/leetcode
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
[
"Apache-2.0"
] | null | null | null |
Integer to Roman.py
|
sugia/leetcode
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
[
"Apache-2.0"
] | null | null | null |
'''
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given an integer, convert it to a roman numeral. Input is guaranteed to be within the range from 1 to 3999.
Example 1:
Input: 3
Output: "III"
Example 2:
Input: 4
Output: "IV"
Example 3:
Input: 9
Output: "IX"
Example 4:
Input: 58
Output: "LVIII"
Explanation: C = 100, L = 50, XXX = 30 and III = 3.
Example 5:
Input: 1994
Output: "MCMXCIV"
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
'''
class Solution(object):
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
dic = {1: 'I', 5: 'V', 10: 'X', 50: 'L', 100: 'C', 500: 'D', 1000: 'M', 4: 'IV', 9: 'IX', 40: 'XL', 90: 'XC', 400: 'CD', 900: 'CM'}
vec = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
res = []
while num > 0:
for val in vec:
while num >= val:
num -= val
res.append(dic[val])
return ''.join(res)
| 31.080645 | 345 | 0.57862 |
4a1cd3174fb23a25a00689c66f35dd83df025888
| 1,021 |
py
|
Python
|
models/bert.py
|
XJay18/ChineseTextClassification
|
0920af34f68830b842fd6a246d1ee72183fe23d6
|
[
"MIT"
] | null | null | null |
models/bert.py
|
XJay18/ChineseTextClassification
|
0920af34f68830b842fd6a246d1ee72183fe23d6
|
[
"MIT"
] | null | null | null |
models/bert.py
|
XJay18/ChineseTextClassification
|
0920af34f68830b842fd6a246d1ee72183fe23d6
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from transformers import BertTokenizer, BertModel
PRETRAINED_WEIGHTS = "bert-base-chinese"
class BERT(nn.Module):
def __init__(self, weights=None, hidden_state=768, num_classes=2):
super(BERT, self).__init__()
self.tokenizer = BertTokenizer.from_pretrained(PRETRAINED_WEIGHTS)
if weights is None:
# for training
self.bert = BertModel.from_pretrained(PRETRAINED_WEIGHTS)
else:
# for testing
self.bert = BertModel.from_pretrained(None, **weights)
self.config = self.bert.config
for param in self.bert.parameters():
param.requires_grad = True
self.fc = nn.Linear(hidden_state, num_classes)
def forward(self, x):
ctx, mask = x
_, pooled = self.bert(ctx, attention_mask=mask)
return self.fc(pooled)
if __name__ == '__main__':
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_WEIGHTS)
model = BertModel.from_pretrained(PRETRAINED_WEIGHTS)
| 32.935484 | 74 | 0.670911 |
4a1cd38fdd2e6e1cd83ef42380209e021718c22e
| 857 |
py
|
Python
|
mysite/mysite/urls.py
|
elexihuanchifan/hellopy
|
ee51c0eefca7136b5dca30679117d7d6ecd87d7a
|
[
"MIT"
] | null | null | null |
mysite/mysite/urls.py
|
elexihuanchifan/hellopy
|
ee51c0eefca7136b5dca30679117d7d6ecd87d7a
|
[
"MIT"
] | null | null | null |
mysite/mysite/urls.py
|
elexihuanchifan/hellopy
|
ee51c0eefca7136b5dca30679117d7d6ecd87d7a
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', 'mysite.views.first_page'),
url(r'^west/', include('west.urls')),
]
| 34.28 | 79 | 0.689615 |
4a1cd56daf7433b1b7603fc04015651eea3fd07d
| 14,313 |
py
|
Python
|
inception.py
|
phymhan/ACGAN-PyTorch
|
36dca717ddcac37e77c6ecfe04e6d66575a02555
|
[
"MIT"
] | null | null | null |
inception.py
|
phymhan/ACGAN-PyTorch
|
36dca717ddcac37e77c6ecfe04e6d66575a02555
|
[
"MIT"
] | null | null | null |
inception.py
|
phymhan/ACGAN-PyTorch
|
36dca717ddcac37e77c6ecfe04e6d66575a02555
|
[
"MIT"
] | 1 |
2020-06-16T04:00:18.000Z
|
2020-06-16T04:00:18.000Z
|
''' Inception utilities
This file contains methods for calculating IS and FID, using either
the original numpy code or an accelerated fully-pytorch version that
uses a fast newton-schulz approximation for the matrix sqrt. There are also
methods for acquiring a desired number of samples from the Generator,
and parallelizing the inbuilt PyTorch inception network.
NOTE that Inception Scores and FIDs calculated using these methods will
*not* be directly comparable to values calculated using the original TF
IS/FID code. You *must* use the TF model if you wish to report and compare
numbers. This code tends to produce IS values that are 5-10% lower than
those obtained through TF.
borrowed from Twin-Auxiliary-Classifier GAN code
'''
import numpy as np
from scipy import linalg # For numpy FID
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
import os
import pdb
from utils import ImageSampler
# Module that wraps the inception network to enable use with dataparallel and
# returning pool features and logits.
class WrapInception(nn.Module):
def __init__(self, net):
super(WrapInception, self).__init__()
self.net = net
self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),
requires_grad=False)
self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),
requires_grad=False)
def forward(self, x):
# Normalize x
x = (x + 1.) / 2.0
x = (x - self.mean) / self.std
# Upsample if necessary
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.net.Mixed_5b(x)
# 35 x 35 x 256
x = self.net.Mixed_5c(x)
# 35 x 35 x 288
x = self.net.Mixed_5d(x)
# 35 x 35 x 288
x = self.net.Mixed_6a(x)
# 17 x 17 x 768
x = self.net.Mixed_6b(x)
# 17 x 17 x 768
x = self.net.Mixed_6c(x)
# 17 x 17 x 768
x = self.net.Mixed_6d(x)
# 17 x 17 x 768
x = self.net.Mixed_6e(x)
# 17 x 17 x 768
# 17 x 17 x 768
x = self.net.Mixed_7a(x)
# 8 x 8 x 1280
x = self.net.Mixed_7b(x)
# 8 x 8 x 2048
x = self.net.Mixed_7c(x)
# 8 x 8 x 2048
pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
# 1 x 1 x 2048
logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
# 1000 (num_classes)
return pool, logits
# A pytorch implementation of cov, from Modar M. Alfadly
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
# Pytorch implementation of matrix sqrt, from Tsung-Yu Lin, and Subhransu Maji
# https://github.com/msubhransu/matrix-sqrt
def sqrt_newton_schulz(A, numIters, dtype=None):
with torch.no_grad():
if dtype is None:
dtype = A.type()
batchSize = A.shape[0]
dim = A.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A));
I = torch.eye(dim, dim).view(1, dim, dim).repeat(batchSize, 1, 1).type(dtype)
Z = torch.eye(dim, dim).view(1, dim, dim).repeat(batchSize, 1, 1).type(dtype)
for i in range(numIters):
T = 0.5 * (3.0 * I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y * torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
return sA
# FID calculator from TTUR--consider replacing this with GPU-accelerated cov
# calculations using torch?
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
print('wat')
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
return out
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out
# Calculate Inception Score mean + std given softmax'd logits and number of splits
def calculate_inception_score(pred, num_splits=10):
scores = []
for index in range(num_splits):
pred_chunk = pred[index * (pred.shape[0] // num_splits): (index + 1) * (pred.shape[0] // num_splits), :]
kl_inception = pred_chunk * (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))
kl_inception = np.mean(np.sum(kl_inception, 1))
scores.append(np.exp(kl_inception))
return np.mean(scores), np.std(scores)
# Loop and run the sampler and the net until it accumulates num_inception_images
# activations. Return the pool, the logits, and the labels (if one wants
# Inception Accuracy the labels of the generated class will be needed)
def accumulate_inception_activations(dataloader, net, num_inception_images=50000):
dataiter = iter(dataloader)
pool, logits, labels = [], [], []
while (torch.cat(logits, 0).shape[0] if len(logits) else 0) < num_inception_images:
with torch.no_grad():
# images, labels_val = sample()
# pool_val, logits_val = net(images.float())
images, labels_val = next(dataiter) # altered
pool_val, logits_val = net(images.cuda()) # altered
pool += [pool_val]
logits += [F.softmax(logits_val, 1)]
labels += [labels_val]
return torch.cat(pool, 0), torch.cat(logits, 0), torch.cat(labels, 0)
# Load and wrap the Inception model
def load_inception_net(parallel=False, train=False):
inception_model = inception_v3(pretrained=True, transform_input=False, init_weights=False)
if train == False:
inception_model = WrapInception(inception_model.eval()).cuda()
else:
inception_model = WrapInception(inception_model.train()).cuda()
if parallel:
print('Parallelizing Inception module...')
inception_model = nn.DataParallel(inception_model)
return inception_model
def prepare_data_statistics(dataloader, datafile, net, num_inception_images=50000):
if not os.path.exists(datafile + '_inception_moments.npz'):
try:
os.makedirs(os.path.split(datafile)[0])
except OSError:
pass
# net = load_inception_net()
pool, logits, labels = accumulate_inception_activations(dataloader, net, num_inception_images)
mu, sigma = np.mean(pool.cpu().numpy(), axis=0), np.cov(pool.cpu().numpy(), rowvar=False)
np.savez(datafile + '_inception_moments.npz', mu=mu, sigma=sigma)
else:
mu = np.load(datafile + '_inception_moments.npz')['mu']
sigma = np.load(datafile + '_inception_moments.npz')['sigma']
return mu, sigma
# This produces a function which takes in an iterator which returns a set number of samples
# and iterates until it accumulates config['num_inception_images'] images.
# The iterator can return samples with a different batch size than used in
# training, using the setting confg['inception_batchsize']
def prepare_inception_metrics(dataloader, datafile, parallel, num_inception_images=50000, no_is=False):
# Load metrics; this is intentionally not in a try-except loop so that
# the script will crash here if it cannot find the Inception moments.
# Load network
net = load_inception_net(parallel)
# By default, remove the "hdf5" from dataset
# dataset = dataset.strip('_hdf5')
# data_mu = np.load(dataset + '_inception_moments.npz')['mu']
# data_sigma = np.load(dataset + '_inception_moments.npz')['sigma']
data_mu, data_sigma = prepare_data_statistics(dataloader, datafile, net, num_inception_images)
def get_inception_metrics(sampler, num_inception_images, num_splits=10,
prints=True, use_torch=False):
pool, logits, labels = accumulate_inception_activations(sampler, net, num_inception_images)
if no_is:
IS_mean, IS_std = 0., 1.
else:
if prints:
print('Gathering activations...')
if prints:
print('Calculating Inception Score...')
IS_mean, IS_std = calculate_inception_score(logits.cpu().numpy(), num_splits)
if prints:
print('Calculating means and covariances...')
if use_torch:
mu, sigma = torch.mean(pool, 0), torch_cov(pool, rowvar=False)
else:
mu, sigma = np.mean(pool.cpu().numpy(), axis=0), np.cov(pool.cpu().numpy(), rowvar=False)
if prints:
print('Covariances calculated, getting FID...')
if use_torch:
FID = torch_calculate_frechet_distance(mu, sigma, torch.tensor(data_mu).float().cuda(),
torch.tensor(data_sigma).float().cuda())
FID = float(FID.cpu().numpy())
else:
FID = numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
# Delete mu, sigma, pool, logits, and labels, just in case
del mu, sigma, pool, logits, labels
return IS_mean, IS_std, FID
return get_inception_metrics
| 41.973607 | 112 | 0.638301 |
4a1cd63af0fcc0e8a9fc9bc2b733cb8bdfa995a2
| 751 |
py
|
Python
|
userbot/modules/sql_helper/gmute_sql.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 |
2021-11-28T16:04:59.000Z
|
2021-11-28T16:04:59.000Z
|
userbot/modules/sql_helper/gmute_sql.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 |
2021-11-28T21:14:32.000Z
|
2021-11-29T09:20:22.000Z
|
userbot/modules/sql_helper/gmute_sql.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 |
2021-11-29T05:25:48.000Z
|
2021-11-29T05:25:48.000Z
|
try:
from userbot.modules.sql_helper import BASE, SESSION
except ImportError:
raise AttributeError
from sqlalchemy import Column, String
class GMute(BASE):
__tablename__ = "gmute"
sender = Column(String(14), primary_key=True)
def __init__(self, sender):
self.sender = str(sender)
GMute.__table__.create(checkfirst=True)
def is_gmuted(_sender_id):
try:
return SESSION.query(GMute).all()
except BaseException:
return None
finally:
SESSION.close()
def gmute(sender):
adder = GMute(str(sender))
SESSION.add(adder)
SESSION.commit()
def ungmute(sender):
rem = SESSION.query(GMute).get((str(sender)))
if rem:
SESSION.delete(rem)
SESSION.commit()
| 18.775 | 56 | 0.664447 |
4a1cd8ae8989c0b74f43df575ac55d331929527c
| 125 |
py
|
Python
|
jointly/__init__.py
|
enra64/jointly-1
|
002665e6728dc1dac6a59b7fd0f24456f15ceeba
|
[
"MIT"
] | null | null | null |
jointly/__init__.py
|
enra64/jointly-1
|
002665e6728dc1dac6a59b7fd0f24456f15ceeba
|
[
"MIT"
] | null | null | null |
jointly/__init__.py
|
enra64/jointly-1
|
002665e6728dc1dac6a59b7fd0f24456f15ceeba
|
[
"MIT"
] | null | null | null |
from .abstract_extractor import *
from .shake_extractor import *
from .segment_selector import *
from .synchronizer import *
| 25 | 33 | 0.808 |
4a1cda8c090e7d4b214270515c76796db5a79656
| 3,527 |
py
|
Python
|
g2p_project/g2p/views.py
|
Vivaq/g2p
|
a92e37835851abba12985614aa52e80179025aa3
|
[
"BSD-2-Clause"
] | 1 |
2015-10-02T12:01:17.000Z
|
2015-10-02T12:01:17.000Z
|
g2p_project/g2p/views.py
|
Vivaq/g2p
|
a92e37835851abba12985614aa52e80179025aa3
|
[
"BSD-2-Clause"
] | null | null | null |
g2p_project/g2p/views.py
|
Vivaq/g2p
|
a92e37835851abba12985614aa52e80179025aa3
|
[
"BSD-2-Clause"
] | null | null | null |
#-*- coding: utf-8 -*-
import json
from itertools import product
from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from g2p.forms import DocumentForm
import sqlite3
import os
from g2p.models import Document
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
import unicodedata
from django.contrib.auth.decorators import login_required
class MyList(list):
def __init__(self, *args):
super(MyList, self).__init__(args)
def __add__(self, other):
new_kl = self[:]
new_kl += other
return self.__class__(*new_kl)
def __radd__(self, other):
new_kl = other
new_kl += self[:]
return self.__class__(*new_kl)
def __sub__(self, other):
return self.__class__(*[item for item in self if item not in other])
def __mul__(self, other):
return map(''.join, list(product(self, other)))
def __rmul__(self, other):
return map(''.join, list(product(other, self)))
def execSetsSplitStringsAndMakeProducts(x):
A = MyList('a', u'ą', 'e', u'ę', 'i', 'o', u'ó', 'u', 'y')
D = MyList('b', 'd', 'g', 'z', u'ź', u'ż')
T = MyList('c', u'ć', 'f', 'h', 'k', 'p', 's', u'ś', 't')
R = MyList('l', u'ł', 'r', 'w')
M = MyList('m', 'n', u'ń', 'j')
V = A+R+M
O = MyList('.', '!', '?', ',', ';', ':', '-', '...', ')', '(', ' ', '#')
O1 = MyList('.', '!', '?', ',', ';', ':', '-', '...', ')', '(', '#')
X = A+D+T+R+M+O
x = json.loads(x[0])
for i, let_rules in enumerate(x.values()):
for j, let_rule in enumerate(let_rules.values()):
for k, phoneme_rules in enumerate(let_rule):
rule = eval(''.join(phoneme_rules))
rule = [value for values in rule for value in (values if isinstance(values, basestring) else [values])]
before_, after_ = rule[:rule.index('_')], rule[rule.index('_')+1:]
rule = [[''.join(res) for res in list(product(*before_))],
[''.join(res) for res in list(product(*after_))]]
x.values()[i].values()[j][k] = rule
return x
@login_required
def downloadData(request):
if request.method == 'POST':
'''with open('C:\data.txt', 'r') as f:
try:
jsn = json.load(f)
except:
jsn = {}
with open('C:\data.txt', 'w') as f:
jsn.update(execSetsSplitStringsAndMakeProducts(request.POST.get('docfile')))
json.dump(jsn, f)'''
db = sqlite3.connect(os.getcwd()+'\database.sqlite3')
c = db.cursor()
db.commit()
request.POST = dict(request.POST)
try:
x = eval(c.execute('select * from g2p_document').next()[1])
x.update(execSetsSplitStringsAndMakeProducts(request.POST.get('docfile')))
request.POST['docfile'] = x
except:
request.POST['docfile'] = execSetsSplitStringsAndMakeProducts(request.POST.get('docfile'))
Document.objects.all().delete()
form = DocumentForm(request.POST)
data = form.save(commit=False)
data.save()
return HttpResponseRedirect(reverse('g2p.views.downloadData'))
else:
form = DocumentForm()
return render_to_response(
'g2p/exp.html',
{'form': form},
context_instance=RequestContext(request)
)
| 37.126316 | 119 | 0.576978 |
4a1cdac65cb961179083c83aeeec50d4b843449e
| 7,384 |
py
|
Python
|
mujoco/vail/main.py
|
amy12xx/lets-do-irl
|
fd469e9fb7426e41b07c83ce4b87962ac3543b1e
|
[
"MIT"
] | 408 |
2019-02-06T00:31:28.000Z
|
2022-03-31T03:21:44.000Z
|
mujoco/vail/main.py
|
LeeonTree/lets-do-irl
|
2ce7496dddf95e9e9f418817150108bd66d65407
|
[
"MIT"
] | 5 |
2020-02-05T04:03:47.000Z
|
2021-08-21T01:30:57.000Z
|
mujoco/vail/main.py
|
LeeonTree/lets-do-irl
|
2ce7496dddf95e9e9f418817150108bd66d65407
|
[
"MIT"
] | 81 |
2019-02-23T20:46:18.000Z
|
2022-03-24T17:43:02.000Z
|
import os
import gym
import pickle
import argparse
import numpy as np
from collections import deque
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from utils.utils import *
from utils.zfilter import ZFilter
from model import Actor, Critic, VDB
from train_model import train_actor_critic, train_vdb
parser = argparse.ArgumentParser(description='PyTorch VAIL')
parser.add_argument('--env_name', type=str, default="Hopper-v2",
help='name of the environment to run')
parser.add_argument('--load_model', type=str, default=None,
help='path to load the saved model')
parser.add_argument('--render', action="store_true", default=False,
help='if you dont want to render, set this to False')
parser.add_argument('--gamma', type=float, default=0.99,
help='discounted factor (default: 0.99)')
parser.add_argument('--lamda', type=float, default=0.98,
help='GAE hyper-parameter (default: 0.98)')
parser.add_argument('--hidden_size', type=int, default=100,
help='hidden unit size of actor, critic and vdb networks (default: 100)')
parser.add_argument('--z_size', type=int, default=4,
help='latent vector z unit size of vdb networks (default: 4)')
parser.add_argument('--learning_rate', type=float, default=3e-4,
help='learning rate of models (default: 3e-4)')
parser.add_argument('--l2_rate', type=float, default=1e-3,
help='l2 regularizer coefficient (default: 1e-3)')
parser.add_argument('--clip_param', type=float, default=0.2,
help='clipping parameter for PPO (default: 0.2)')
parser.add_argument('--alpha_beta', type=float, default=1e-4,
help='step size to be used in beta term (default: 1e-4)')
parser.add_argument('--i_c', type=float, default=0.5,
help='constraint for KL-Divergence upper bound (default: 0.5)')
parser.add_argument('--vdb_update_num', type=int, default=3,
help='update number of variational discriminator bottleneck (default: 3)')
parser.add_argument('--ppo_update_num', type=int, default=10,
help='update number of actor-critic (default: 10)')
parser.add_argument('--total_sample_size', type=int, default=2048,
help='total sample size to collect before PPO update (default: 2048)')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size to update (default: 64)')
parser.add_argument('--suspend_accu_exp', type=float, default=0.8,
help='accuracy for suspending discriminator about expert data (default: 0.8)')
parser.add_argument('--suspend_accu_gen', type=float, default=0.8,
help='accuracy for suspending discriminator about generated data (default: 0.8)')
parser.add_argument('--max_iter_num', type=int, default=4000,
help='maximal number of main iterations (default: 4000)')
parser.add_argument('--seed', type=int, default=500,
help='random seed (default: 500)')
parser.add_argument('--logdir', type=str, default='logs',
help='tensorboardx logs directory')
args = parser.parse_args()
def main():
env = gym.make(args.env_name)
env.seed(args.seed)
torch.manual_seed(args.seed)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
running_state = ZFilter((num_inputs,), clip=5)
print('state size:', num_inputs)
print('action size:', num_actions)
actor = Actor(num_inputs, num_actions, args)
critic = Critic(num_inputs, args)
vdb = VDB(num_inputs + num_actions, args)
actor_optim = optim.Adam(actor.parameters(), lr=args.learning_rate)
critic_optim = optim.Adam(critic.parameters(), lr=args.learning_rate,
weight_decay=args.l2_rate)
vdb_optim = optim.Adam(vdb.parameters(), lr=args.learning_rate)
# load demonstrations
expert_demo, _ = pickle.load(open('./expert_demo/expert_demo.p', "rb"))
demonstrations = np.array(expert_demo)
print("demonstrations.shape", demonstrations.shape)
writer = SummaryWriter(args.logdir)
if args.load_model is not None:
saved_ckpt_path = os.path.join(os.getcwd(), 'save_model', str(args.load_model))
ckpt = torch.load(saved_ckpt_path)
actor.load_state_dict(ckpt['actor'])
critic.load_state_dict(ckpt['critic'])
vdb.load_state_dict(ckpt['vdb'])
running_state.rs.n = ckpt['z_filter_n']
running_state.rs.mean = ckpt['z_filter_m']
running_state.rs.sum_square = ckpt['z_filter_s']
print("Loaded OK ex. Zfilter N {}".format(running_state.rs.n))
episodes = 0
train_discrim_flag = True
for iter in range(args.max_iter_num):
actor.eval(), critic.eval()
memory = deque()
steps = 0
scores = []
while steps < args.total_sample_size:
state = env.reset()
score = 0
state = running_state(state)
for _ in range(10000):
if args.render:
env.render()
steps += 1
mu, std = actor(torch.Tensor(state).unsqueeze(0))
action = get_action(mu, std)[0]
next_state, reward, done, _ = env.step(action)
irl_reward = get_reward(vdb, state, action)
if done:
mask = 0
else:
mask = 1
memory.append([state, action, irl_reward, mask])
next_state = running_state(next_state)
state = next_state
score += reward
if done:
break
episodes += 1
scores.append(score)
score_avg = np.mean(scores)
print('{}:: {} episode score is {:.2f}'.format(iter, episodes, score_avg))
writer.add_scalar('log/score', float(score_avg), iter)
actor.train(), critic.train(), vdb.train()
if train_discrim_flag:
expert_acc, learner_acc = train_vdb(vdb, memory, vdb_optim, demonstrations, 0, args)
print("Expert: %.2f%% | Learner: %.2f%%" % (expert_acc * 100, learner_acc * 100))
if expert_acc > args.suspend_accu_exp and learner_acc > args.suspend_accu_gen:
train_discrim_flag = False
train_actor_critic(actor, critic, memory, actor_optim, critic_optim, args)
if iter % 100:
score_avg = int(score_avg)
model_path = os.path.join(os.getcwd(),'save_model')
if not os.path.isdir(model_path):
os.makedirs(model_path)
ckpt_path = os.path.join(model_path, 'ckpt_'+ str(score_avg)+'.pth.tar')
save_checkpoint({
'actor': actor.state_dict(),
'critic': critic.state_dict(),
'vdb': vdb.state_dict(),
'z_filter_n':running_state.rs.n,
'z_filter_m': running_state.rs.mean,
'z_filter_s': running_state.rs.sum_square,
'args': args,
'score': score_avg
}, filename=ckpt_path)
if __name__=="__main__":
main()
| 40.130435 | 101 | 0.609832 |
4a1cdce4dabe63a150279b3a593d4830ed33681a
| 11,232 |
py
|
Python
|
deals/tests/deals_data.py
|
xuppr/GameDealsDashboard-server
|
a15e90fe6a37431db1b4de565e9e7d54d2aee223
|
[
"MIT"
] | null | null | null |
deals/tests/deals_data.py
|
xuppr/GameDealsDashboard-server
|
a15e90fe6a37431db1b4de565e9e7d54d2aee223
|
[
"MIT"
] | null | null | null |
deals/tests/deals_data.py
|
xuppr/GameDealsDashboard-server
|
a15e90fe6a37431db1b4de565e9e7d54d2aee223
|
[
"MIT"
] | null | null | null |
mock_data = [{
"internalName": "DEUSEXHUMANREVOLUTIONDIRECTORSCUT",
"title": "Deus Ex: Human Revolution - Director's Cut",
"metacriticLink": "\/game\/pc\/deus-ex-human-revolution---directors-cut",
"dealID": "mU%2FbH6z0MsHtcyqBBnv1C29aei%2FU0ZcsW0tNaZjC3xQ%3D",
"storeID": "11",
"gameID": "102249",
"salePrice": "2.99",
"normalPrice": "19.99",
"isOnSale": "1",
"savings": "85.042521",
"metacriticScore": "91",
"steamRatingText": "Very Positive",
"steamRatingPercent": "92",
"steamRatingCount": "18537",
"steamAppID": "238010",
"releaseDate": 1382400000,
"lastChange": 1627327366,
"dealRating": "9.8",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/238010\/capsule_sm_120.jpg?t=1619788192"
}, {
"internalName": "XCOM2COLLECTION",
"title": "XCOM 2 Collection",
"metacriticLink": None,
"dealID": "Sj4Ekwg3zyM34ZrbE3hvOuiZCpshBTDA4WVHu01wp6w%3D",
"storeID": "11",
"gameID": "177716",
"salePrice": "19.99",
"normalPrice": "99.99",
"isOnSale": "1",
"savings": "80.008001",
"metacriticScore": "0",
"steamRatingText": None,
"steamRatingPercent": "0",
"steamRatingCount": "0",
"steamAppID": None,
"releaseDate": 0,
"lastChange": 1627327317,
"dealRating": "9.7",
"thumb": "https:\/\/gamersgatep.imgix.net\/b\/f\/4\/567aacd3255435c3adf1ace4b67884f92e5b44fb.jpg?auto=&w="
}, {
"internalName": "TUMBLESTONE",
"title": "Tumblestone",
"metacriticLink": "\/game\/pc\/tumblestone",
"dealID": "dJNCeHkZV3iaXZQFBSpYh3B2tz6ZuMvBaFpI6d1QYiU%3D",
"storeID": "1",
"gameID": "154838",
"salePrice": "3.74",
"normalPrice": "24.99",
"isOnSale": "1",
"savings": "85.034014",
"metacriticScore": "91",
"steamRatingText": "Very Positive",
"steamRatingPercent": "88",
"steamRatingCount": "119",
"steamAppID": "269710",
"releaseDate": 1468281600,
"lastChange": 1627323031,
"dealRating": "9.7",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/269710\/capsule_sm_120.jpg?t=1625169247"
}, {
"internalName": "JUSTCAUSE2",
"title": "Just Cause 2",
"metacriticLink": "\/game\/pc\/just-cause-2",
"dealID": "Zuw5g7simZBKAGWw5HBDpJTkzM6VtQCs0X6PvE5PN7U%3D",
"storeID": "11",
"gameID": "180",
"salePrice": "1.49",
"normalPrice": "14.99",
"isOnSale": "1",
"savings": "90.060040",
"metacriticScore": "84",
"steamRatingText": "Very Positive",
"steamRatingPercent": "90",
"steamRatingCount": "36051",
"steamAppID": "8190",
"releaseDate": 1269302400,
"lastChange": 1627327441,
"dealRating": "9.6",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/8190\/capsule_sm_120.jpg?t=1593180404"
}, {
"internalName": "ZOMBIENIGHTTERROR",
"title": "Zombie Night Terror",
"metacriticLink": "\/game\/pc\/zombie-night-terror",
"dealID": "kfwmtdgaQCWWgiQOf5nL4d5%2BMuEwy%2Bo5avHGyMJVF4c%3D",
"storeID": "1",
"gameID": "154629",
"salePrice": "1.29",
"normalPrice": "12.99",
"isOnSale": "1",
"savings": "90.069284",
"metacriticScore": "81",
"steamRatingText": "Very Positive",
"steamRatingPercent": "93",
"steamRatingCount": "1923",
"steamAppID": "416680",
"releaseDate": 1468972800,
"lastChange": 1627321444,
"dealRating": "9.4",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/416680\/capsule_sm_120.jpg?t=1582640712"
}, {
"internalName": "REDNECKEDASTROMONSTERSSHOW",
"title": "Redneck Ed: Astro Monsters Show",
"metacriticLink": "\/game\/pc\/redneck-ed-astro-monsters-show",
"dealID": "%2BriI1%2B63K5PCBDSW7YcjfrFIhgc0u2sIkmRpwojt8l4%3D",
"storeID": "1",
"gameID": "220374",
"salePrice": "1.99",
"normalPrice": "19.99",
"isOnSale": "1",
"savings": "90.045023",
"metacriticScore": "53",
"steamRatingText": "Mostly Positive",
"steamRatingPercent": "73",
"steamRatingCount": "26",
"steamAppID": "1237490",
"releaseDate": 1599696000,
"lastChange": 1627323852,
"dealRating": "9.4",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/1237490\/capsule_sm_120.jpg?t=1624543511"
}, {
"internalName": "FREEDOMFORCE",
"title": "Freedom Force",
"metacriticLink": "\/game\/pc\/freedom-force",
"dealID": "7NIk9XNNzmNctW1QTIVzLCrVoDIYbKhjTMyAmNBUo%2FI%3D",
"storeID": "7",
"gameID": "257",
"salePrice": "1.49",
"normalPrice": "5.99",
"isOnSale": "1",
"savings": "75.125209",
"metacriticScore": "90",
"steamRatingText": "Mixed",
"steamRatingPercent": "59",
"steamRatingCount": "355",
"steamAppID": "8880",
"releaseDate": 1016928000,
"lastChange": 1627477943,
"dealRating": "9.3",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/8880\/capsule_sm_120.jpg?t=1569012854"
}, {
"internalName": "WWE2K20DIGITALDELUXEEDITION",
"title": "WWE 2K20 - Digital Deluxe Edition",
"metacriticLink": None,
"dealID": "qROdZ7LIobZof4Tcr8GlEb5A4dgOYyP%2BWzxZh11QBTw%3D",
"storeID": "11",
"gameID": "204473",
"salePrice": "22.49",
"normalPrice": "89.99",
"isOnSale": "1",
"savings": "75.008334",
"metacriticScore": "0",
"steamRatingText": None,
"steamRatingPercent": "0",
"steamRatingCount": "0",
"steamAppID": None,
"releaseDate": 0,
"lastChange": 1627327376,
"dealRating": "9.3",
"thumb": "https:\/\/hb.imgix.net\/6cfe51bf21497b678253ed3e6d7edbc70aa18c87.jpg?auto=compress,format&fit=crop&h=84&w=135&s=4f792354443bd5b0345bef6e647e49a6"
}, {
"internalName": "DIVINITYDRAGONCOMMANDER",
"title": "Divinity: Dragon Commander",
"metacriticLink": "\/game\/pc\/divinity-dragon-commander",
"dealID": "rfnxxA9yFZZ%2BFfxDGsRqLQQubn17Q8NB0SHOChnsL%2BI%3D",
"storeID": "7",
"gameID": "97052",
"salePrice": "3.99",
"normalPrice": "39.99",
"isOnSale": "1",
"savings": "90.022506",
"metacriticScore": "76",
"steamRatingText": "Mostly Positive",
"steamRatingPercent": "73",
"steamRatingCount": "1527",
"steamAppID": "243950",
"releaseDate": 1375747200,
"lastChange": 1627478307,
"dealRating": "9.2",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/243950\/capsule_sm_120.jpg?t=1568735710"
}, {
"internalName": "SYSTEMSHOCK2",
"title": "System Shock 2",
"metacriticLink": "\/game\/pc\/system-shock-2",
"dealID": "tzAVXMgjTwh5hk442e%2FDLgK57c%2FSv4XhH%2B67K3Xvk4k%3D",
"storeID": "7",
"gameID": "96654",
"salePrice": "2.49",
"normalPrice": "9.99",
"isOnSale": "1",
"savings": "75.075075",
"metacriticScore": "92",
"steamRatingText": "Overwhelmingly Positive",
"steamRatingPercent": "95",
"steamRatingCount": "4674",
"steamAppID": "238210",
"releaseDate": 934329600,
"lastChange": 1627309441,
"dealRating": "9.2",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/238210\/capsule_sm_120.jpg?t=1591116073"
}, {
"internalName": "XCOM2",
"title": "XCOM 2",
"metacriticLink": "\/game\/pc\/xcom-2",
"dealID": "sS2YcemCkJJjex6WesVsp%2BgnMOAlqnPmHeiHq1HFihc%3D",
"storeID": "11",
"gameID": "145838",
"salePrice": "9.59",
"normalPrice": "59.99",
"isOnSale": "1",
"savings": "84.014002",
"metacriticScore": "88",
"steamRatingText": "Very Positive",
"steamRatingPercent": "85",
"steamRatingCount": "48016",
"steamAppID": "268500",
"releaseDate": 1454630400,
"lastChange": 1627327429,
"dealRating": "9.1",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/268500\/capsule_sm_120.jpg?t=1600177724"
}, {
"internalName": "THEOFFICEQUEST",
"title": "The Office Quest",
"metacriticLink": "\/game\/pc\/the-office-quest",
"dealID": "IML08yuF8VjsQwlgfgjNDDjw9WCHBN%2FmFAGNhmhlWF4%3D",
"storeID": "1",
"gameID": "187510",
"salePrice": "1.19",
"normalPrice": "11.99",
"isOnSale": "1",
"savings": "90.075063",
"metacriticScore": "72",
"steamRatingText": "Very Positive",
"steamRatingPercent": "95",
"steamRatingCount": "419",
"steamAppID": "810660",
"releaseDate": 1527120000,
"lastChange": 1627411005,
"dealRating": "9.0",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/810660\/capsule_sm_120.jpg?t=1602068195"
}, {
"internalName": "BATTLEFIELD1",
"title": "Battlefield 1",
"metacriticLink": "\/game\/pc\/battlefield-1",
"dealID": "ICV0L0NmwniVHpc4NjfQsDO5gRILOIkqPz05jfxFtCM%3D",
"storeID": "1",
"gameID": "152332",
"salePrice": "4.79",
"normalPrice": "39.99",
"isOnSale": "1",
"savings": "88.022006",
"metacriticScore": "88",
"steamRatingText": "Very Positive",
"steamRatingPercent": "83",
"steamRatingCount": "28733",
"steamAppID": "1238840",
"releaseDate": 1477008000,
"lastChange": 1626989332,
"dealRating": "9.0",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/1238840\/capsule_sm_120.jpg?t=1626725027"
}, {
"internalName": "DEUSEXINVISIBLEWAR",
"title": "Deus Ex: Invisible War",
"metacriticLink": "\/game\/pc\/deus-ex-invisible-war",
"dealID": "3qPSnop7kYkWFlnEh25MnJnPy23yPu3yPKOCWheqjhs%3D",
"storeID": "11",
"gameID": "569",
"salePrice": "0.97",
"normalPrice": "6.99",
"isOnSale": "1",
"savings": "86.123033",
"metacriticScore": "80",
"steamRatingText": "Mixed",
"steamRatingPercent": "57",
"steamRatingCount": "1431",
"steamAppID": "6920",
"releaseDate": 1070323200,
"lastChange": 1627327444,
"dealRating": "9.0",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/6920\/capsule_sm_120.jpg?t=1593593178"
}, {
"internalName": "EVERSPACE",
"title": "Everspace",
"metacriticLink": "\/game\/pc\/everspace",
"dealID": "eKcwMzD%2Bsr2BhW%2BcIb%2FTR1mcNf49dlln2q3p1n1igkw%3D",
"storeID": "7",
"gameID": "157493",
"salePrice": "4.49",
"normalPrice": "29.99",
"isOnSale": "1",
"savings": "85.028343",
"metacriticScore": "79",
"steamRatingText": "Mostly Positive",
"steamRatingPercent": "78",
"steamRatingCount": "7906",
"steamAppID": "396750",
"releaseDate": 1495756800,
"lastChange": 1627477903,
"dealRating": "9.0",
"thumb": "https:\/\/cdn.cloudflare.steamstatic.com\/steam\/apps\/396750\/capsule_sm_120.jpg?t=1618422232"
},{
"internalName": "TOMBRAIDERANNIVERSARY",
"title": "Tomb Raider: Anniversary",
"metacriticLink": "/game/pc/tomb-raider-anniversary",
"dealID": "ryaV37fbWkAIG6ZLy%2FtViuEGOIShgUVV7kpgbYlDqQk%3D",
"storeID": "1",
"gameID": "456",
"salePrice": "0.98",
"normalPrice": "8.99",
"isOnSale": "1",
"savings": "89.098999",
"metacriticScore": "83",
"steamRatingText": "Very Positive",
"steamRatingPercent": "82",
"steamRatingCount": "3621",
"steamAppID": "8000",
"releaseDate": 1181001600,
"lastChange": 1621540355,
"dealRating": "9.3",
"thumb": "https://cdn.cloudflare.steamstatic.com/steam/apps/8000/capsule_sm_120.jpg?t=1592494287"
}]
| 34.990654 | 159 | 0.625623 |
4a1cdff5b505ccf761d6fd53c76d7ac0f82c6ee0
| 15,187 |
py
|
Python
|
cellpack/mgl_tools/DejaVu/GleObjects.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
cellpack/mgl_tools/DejaVu/GleObjects.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | 21 |
2021-10-02T00:07:05.000Z
|
2022-03-30T00:02:10.000Z
|
cellpack/mgl_tools/DejaVu/GleObjects.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: Michel F. SANNER, Sophie Coon
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/DejaVu/GleObjects.py,v 1.32 2008/11/20 00:54:16 vareille Exp $
#
# $Id: GleObjects.py,v 1.32 2008/11/20 00:54:16 vareille Exp $
#
try:
import gle
except:
print("Sorry you need the GLE extension module")
from DejaVu.viewerFns import checkKeywords
from DejaVu.Geom import Geom
from DejaVu.triangle_strip import Triangle_strip
from opengltk.OpenGL import GL
import numpy.oldnumeric as Numeric
class GleObject(Triangle_strip):
keywords = Triangle_strip.keywords + ["normalStyle", "joinStyle"]
def __init__(self, name=None, check=1, **kw):
self.normalStyle = gle.TUBE_NORM_PATH_EDGE
self.joinStyle = gle.TUBE_JN_ANGLE
Triangle_strip.__init__(*(self, name, check), **kw)
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func"""
if "materials" in kw and kw["materials"]:
materials = Numeric.array((kw["materials"]), "f")
else:
materials = Numeric.array(((0.0, 0.0, 1.0, 1.0),), "f")
redoFlags = Triangle_strip.Set(*(self, check, 0), **kw)
nm = kw.get("normalStyle")
# nm can be TUBE_NORM_FACET, TUBE_NORM_EDGE, TUBE_NORM_PATH_EDGE
if nm:
self.normalStyle = self.normalStyle & ~gle.TUBE_NORM_MASK
self.normalStyle = self.normalStyle | nm
gle.gleSetJoinStyle(self.normalStyle | self.joinStyle)
ja = kw.get("joinStyle")
# ja can be TUBE_JN_RAW, TUBE_JN_ANGLE, TUBE_JN_CUT, TUBE_JN_ROUND,
# TUBE_JN_CAP
if ja:
self.joinStyle = self.joinStyle & ~gle.TUBE_JN_MASK
self.joinStyle = self.joinStyle | ja
gle.gleSetJoinStyle(self.normalStyle | self.joinStyle)
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
pass
def asIndexedPolygons(self, run=1, quality=None, **kw):
"""run=0 returns 1 if this geom can be represented as an
IndexedPolygon and None if not. run=1 returns the IndexedPolygon
object."""
if run == 0:
return 1 # yes, I can be represented as IndexedPolygons
faces = self.faceSet.faces.array
verts = self.vertexSet.vertices.array
size = faces.shape
# number of triangles in each face (containing triangle strip
# vertices) from faces array.
ntr = size[1] - 2
# new length of triangles array
nfaces = size[0] * ntr
new_faces = Numeric.zeros((nfaces, 3), "i")
i = 0
for f in faces:
for n in range(ntr):
if (n / 2) * 2 == n:
new_faces[i] = [f[n], f[n + 1], f[n + 2]]
else:
new_faces[i] = [f[n + 2], f[n + 1], f[n]]
i = i + 1
from DejaVu.IndexedPolygons import IndexedPolygons
new_obj = IndexedPolygons(
"gleobj",
vertices=verts,
faces=new_faces,
visible=1,
invertNormals=self.invertNormals,
)
return new_obj
class GleExtrude(GleObject):
keywords = GleObject.keywords + ["shape2D", "trace3D", "contourUp", "capsFlag"]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
checkKeywords(*(name, self.keywords), **kw)
GleObject.__init__(*(self, name, 0), **kw)
self.Set(
trace3D=kw.get("trace3D"),
shape2D=kw.get("shape2D"),
contourUp=kw.get("contourUp"),
capsFlag=kw.get("capsFlag"),
)
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func"""
redoFlags = 0
capsFlag = kw.get("capsFlag")
if capsFlag is None:
if not hasattr(self, "capsFlag"):
self.capsFlag = 0
else:
self.capsFlag = capsFlag
shape2D = kw.get("shape2D")
if shape2D is None:
if not hasattr(self, "shape2D"):
self.shape2D = None
else:
self.shape2D = shape2D
contourUp = kw.get("contourUp")
if contourUp is None:
if not hasattr(self, "contourUp"):
self.contourUp = (0.0, 0.0, 1.0)
else:
self.contourUp = contourUp
trace3D = kw.get("trace3D")
if trace3D is None:
if not hasattr(self, "trace3D"):
self.trace3D = Numeric.zeros((0, 3), "f")
else:
self.trace3D = trace3D
if "materials" in kw and kw["materials"]:
materials = Numeric.array((kw["materials"]), "f")
redoFlags |= self._redoFlags["redoDisplayListFlag"]
else:
materials = Numeric.array(((0.0, 0.0, 1.0, 1.0),), "f")
if not shape2D is None:
v, n, s = self.extrude()
if self.capsFlag == 1:
v, n, s = self.addCaps(v, n, s)
redoFlags |= self._redoFlags["redoDisplayListFlag"]
if v is not None:
kw["vertices"] = v
if n is not None:
kw["vnormals"] = n
if s is not None:
kw["stripBegin"] = [0] + list(s[:-1, 0])
kw["stripEnd"] = list(s[:, 0])
redoFlags |= GleObject.Set(*(self, check, 0), **kw)
return self.redoNow(redo, updateOwnGui, redoFlags)
def addCaps(self, v, n, s):
""" Method to add front and end caps to the extruded geometry."""
# calculate the length of each strip
lenStrip = 2 * self.shape2D.lenShape
# 1- Front Cap:
# ================
# Get the midPoint of the front cap
frontMid = self.trace3D[1]
# Get the coordinates of the contourPoints of the cap
shapePoints = v[1:lenStrip:2]
# Organize the points so the strip creates the cap
frontCapPts = []
for point in shapePoints.tolist():
frontCapPts.append(point)
frontCapPts.append(frontMid)
# Add the new strip to the front of the vertices
vertices = Numeric.concatenate((frontCapPts, v))
# Compute normal of the cap by computing the cross product of (M3 M1).
if self.shape2D.vertDup == 0:
fm1 = shapePoints[0] - frontMid
fm3 = shapePoints[1] - frontMid
elif self.shape2D.vertDup == 1:
fm1 = shapePoints[0] - frontMid
fm3 = shapePoints[2] - frontMid
# Cross product
nc = [
[
(fm3[1] * fm1[2] - fm3[2] * fm1[1]),
(fm3[0] * fm1[2] - fm3[2] * fm1[0]),
(fm3[0] * fm1[1] - fm3[0] * fm1[1]),
],
]
frontNorm = Numeric.array(nc * lenStrip, "d")
# Add the normals to the normal array
normals = Numeric.concatenate((frontNorm, n))
lastVert = s[-1][0] + lenStrip
strip = Numeric.concatenate(
(
s,
[
[lastVert, lastVert],
],
)
)
# 2- End cap:
# ================
# Get the midPoint of the end cap
endMid = self.trace3D[-2]
# Get the coordinates of the contourPoints of the last cap
endShape = v[-lenStrip:-1:2]
# Organize the points so the strip creates the cap
endCapPts = []
# Definition of the strip
for point in endShape.tolist():
endCapPts.append(endMid)
endCapPts.append(point)
# Add the new strip to the front of the vertices
vertices = Numeric.concatenate((vertices, endCapPts))
# Compute normal of the cap by computing the cross product of 2 vectors\
# defined by the mid cap point and a point of the shape.
if self.shape2D.vertDup == 0:
em1 = endShape[0] - endMid
em3 = endShape[1] - endMid
elif self.shape2D.vertDup == 1:
em1 = endShape[2] - endMid
em3 = endShape[0] - endMid
# Cross product
nc = [
[
(em3[1] * em1[2] - em3[2] * em1[1]),
(em3[0] * em1[2] - em3[2] * em1[0]),
(em3[0] * em1[1] - em3[0] * em1[1]),
],
]
endNorm = Numeric.array(nc * lenStrip, "d")
# Add the normals to the normal array
normals = Numeric.concatenate((normals, endNorm))
lastVert = strip[-1][0] + lenStrip
strip = Numeric.concatenate(
(
strip,
[
[lastVert, lastVert],
],
)
)
return vertices, normals, strip
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
from gle import glec
gle.gleSetJoinStyle(self.normalStyle | self.joinStyle)
glec.gleFeedBack()
contpts = Numeric.array(self.shape2D.contpts)
contourPoints = contpts[:, :2]
contnorm = Numeric.array(self.shape2D.contnorm)
contourNormals = contnorm[:, :2]
gle.gleExtrusion(
contourPoints,
contourNormals,
self.contourUp,
self.trace3D,
self.materials[1028].prop[0][:, :3],
)
glec.gleTextureMode(0)
v, n, s = glec.gleGetTriangleMesh()
vinv = Numeric.zeros(v.shape, "d")
vinv[::2] = v[1::2]
vinv[1::2] = v[::2]
ninv = Numeric.zeros(n.shape, "d")
ninv[::2] = n[1::2]
ninv[1::2] = n[::2]
return vinv, ninv, s
def getFaces(self):
"""returns a handle to the faces array"""
return self.IndexedFaceSet.faces.array
#
# WARNING the extrusion in this object ONLY works after this object has
# been added to a viewer
#
class GlePolyCylinder(GleExtrude):
keywords = GleExtrude.keywords + ["trace3D", "radius"]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
checkKeywords(*(name, self.keywords), **kw)
r = kw.get("radius")
if not r:
r = 1.0
self.radius = r
GleExtrude.__init__(*(self, name, 0), **kw)
self.Set(trace3D=kw.get("trace3D"))
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func"""
redoFlags = GleExtrude.Set(*(self, check, 0), **kw)
if "materials" in kw and kw["materials"]:
materials = Numeric.array((kw["materials"]), "f")
else:
materials = Numeric.array(((0.0, 0.0, 1.0, 1.0),), "f")
self.trace3D = kw.get("trace3D")
if not self.trace3D:
v, n, s = (None, None, None)
redoFlags |= self._redoFlags["redoDisplayListFlag"]
else:
v, n, s = self.extrude()
redoFlags |= self._redoFlags["redoDisplayListFlag"]
if v:
kw["vertices"] = v
if n:
kw["vnormals"] = n
if s:
kw["stripBegin"] = [0] + list(s[:, 0])
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
from gle import glec
gle.gleSetJoinStyle(self.joinStyle | self.normalStyle)
glec.gleFeedBack()
# DisplayFunction of the old GlePolyCylinder
GL.glColorMaterial(GL.GL_FRONT_AND_BACK, GL.GL_AMBIENT)
GL.glEnable(GL.GL_COLOR_MATERIAL)
# glEnable(GL_LIGHTING)
if self.viewer is not None:
self.viewer.enableOpenglLighting()
colors = self.materials[GL.GL_FRONT].prop[0][:, :3]
gle.glePolyCylinder(self.trace3D, colors, self.radius)
GL.glDisable(GL.GL_COLOR_MATERIAL)
glec.gleTextureMode(0)
v, n, s = glec.gleGetTriangleMesh()
return v, n, s
class GlePolyCone(GlePolyCylinder):
keywords = GleExtrude.keywords + ["trace3D", "radii"]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
checkKeywords(*(name, self.keywords), **kw)
GlePolyCylinder.__init__(*(self, name, 0), **kw)
self.Set(*(), **kw)
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func"""
redoFlags = GlePolyCylinder.Set(*(self, check, 0), **kw)
r = kw.get("radii")
if r is not None:
assert len(r)
self.radii = r
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Extrude a cone with radii specified at each point
of the extrusion"""
assert len(self.radii) == len(self.trace3D)
from gle import glec
gle.gleSetJoinStyle(self.joinStyle | self.normalStyle)
glec.gleFeedBack()
# DisplayFunction of the old GlePolyCylinder
GL.glColorMaterial(GL.GL_FRONT_AND_BACK, GL.GL_AMBIENT)
GL.glEnable(GL.GL_COLOR_MATERIAL)
if self.viewer is not None:
self.viewer.enableOpenglLighting()
colors = self.materials[GL.GL_FRONT].prop[0][:, :3]
gle.glePolyCone(self.trace3D, colors, self.radii)
GL.glDisable(GL.GL_COLOR_MATERIAL)
glec.gleTextureMode(0)
v, n, s = glec.gleGetTriangleMesh()
return v, n, s
| 33.159389 | 107 | 0.558833 |
4a1ce02fb3cf3d3e1fe0dd1c77a8cb96ca34981c
| 1,593 |
py
|
Python
|
src/main/py/com/example/sql/sql-s3a-read.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2021-07-18T16:23:56.000Z
|
2021-07-18T16:23:56.000Z
|
src/main/py/com/example/sql/sql-s3a-read.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/main/py/com/example/sql/sql-s3a-read.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import src.utils.commons as commons
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
conf = (
SparkConf()
.setAppName("Spark minIO Test")
.set("spark.eventLog.enabled", "true")
.set("spark.eventLog.dir", "file:///apps/hostpath/spark/logs/")
.set("spark.hadoop.fs.s3a.endpoint", "http://localhost:9000")
.set("spark.hadoop.fs.s3a.access.key", "abc")
.set("spark.hadoop.fs.s3a.secret.key", "xyzxyzxyz")
.set("spark.hadoop.fs.s3a.path.style.access", True)
.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
)
spark = SparkSession.builder.appName("Spark minIO Test").config(conf=conf).getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
airlineSchema = StructType([
StructField("id", IntegerType(), True),
StructField("airlineName", StringType(),True),
StructField("alias", StringType(),True),
StructField("iataCode", StringType(), True),
StructField("icaoCode", StringType(), True),
StructField("callsign", StringType(), True),
StructField("country", StringType(), True),
StructField("active", StringType(), True)
])
airlinesWithSchema = spark.read.format("csv") \
.option("header", False) \
.option("delimiter", ',') \
.schema(airlineSchema)\
.load("s3a://word-count/flights-data/airlines.csv")
airlinesWithSchema.printSchema()
airlinesWithSchema.show()
#print(sc.wholeTextFiles('s3a://word-count/flights-data/airlines.csv').collect())
| 37.928571 | 88 | 0.691149 |
4a1ce1376b43f9c4f13bc00b339cabc965673dad
| 143,559 |
py
|
Python
|
env/lib/python3.6/site-packages/pygments/lexers/lisp.py
|
DivyaSDV/pySINDy
|
e7cba8f983e083ef8cdce66c7c1572276717b225
|
[
"MIT"
] | 69 |
2019-02-18T12:07:35.000Z
|
2022-03-12T10:38:32.000Z
|
env/lib/python3.6/site-packages/pygments/lexers/lisp.py
|
DivyaSDV/pySINDy
|
e7cba8f983e083ef8cdce66c7c1572276717b225
|
[
"MIT"
] | 12 |
2018-12-06T22:06:49.000Z
|
2022-02-25T17:40:44.000Z
|
env/lib/python3.6/site-packages/pygments/lexers/lisp.py
|
DivyaSDV/pySINDy
|
e7cba8f983e083ef8cdce66c7c1572276717b225
|
[
"MIT"
] | 28 |
2019-03-22T01:07:13.000Z
|
2022-02-21T16:38:27.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.lisp
~~~~~~~~~~~~~~~~~~~~
Lexers for Lispy languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Error
from pygments.lexers.python import PythonLexer
__all__ = ['SchemeLexer', 'CommonLispLexer', 'HyLexer', 'RacketLexer',
'NewLispLexer', 'EmacsLispLexer', 'ShenLexer', 'CPSALexer',
'XtlangLexer', 'FennelLexer']
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
.. versionadded:: 0.6
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm', '*.ss']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = (
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
)
builtins = (
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
)
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
tokens = {
'root': [
# the comments
# and going to the end of the line
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# commented form (entire sexpr folliwng)
(r'#;\s*\(', Comment, 'commented-form'),
# signifies that the program text that follows is written with the
# lexical and datum syntax described in r6rs
(r'#!r6rs', Comment),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
# (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join(re.escape(entry) + ' ' for entry in keywords),
Keyword),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join(re.escape(entry) + ' ' for entry in builtins),
Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
'multiline-comment': [
(r'#\|', Comment.Multiline, '#push'),
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form': [
(r'\(', Comment, '#push'),
(r'\)', Comment, '#pop'),
(r'[^()]+', Comment),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
.. versionadded:: 0.9
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl', 'lisp']
filenames = ['*.cl', '*.lisp']
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
# couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[\w!$%&*+-/<=>?@\[\]^{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
# symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._cl_builtins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root': [
default('body'),
],
'multiline-comment': [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form': [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body': [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|\\\n|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"::" + symbol, String.Symbol),
(r":#" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#b[+-]?[01]+(/[01]+)?', Number.Bin),
# octal rational
(r'#o[+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#x[+-]?[0-9a-f]+(/[0-9a-f]+)?', Number.Hex),
# radix rational
(r'#\d+r[+-]?[0-9a-z]+(/[0-9a-z]+)?', Number),
# complex
(r'(#c)(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+a)(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#s)(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#p?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HyLexer(RegexLexer):
"""
Lexer for `Hy <http://hylang.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Hy'
aliases = ['hylang']
filenames = ['*.hy']
mimetypes = ['text/x-hy', 'application/x-hy']
special_forms = (
'cond', 'for', '->', '->>', 'car',
'cdr', 'first', 'rest', 'let', 'when', 'unless',
'import', 'do', 'progn', 'get', 'slice', 'assoc', 'with-decorator',
',', 'list_comp', 'kwapply', '~', 'is', 'in', 'is-not', 'not-in',
'quasiquote', 'unquote', 'unquote-splice', 'quote', '|', '<<=', '>>=',
'foreach', 'while',
'eval-and-compile', 'eval-when-compile'
)
declarations = (
'def', 'defn', 'defun', 'defmacro', 'defclass', 'lambda', 'fn', 'setv'
)
hy_builtins = ()
hy_core = (
'cycle', 'dec', 'distinct', 'drop', 'even?', 'filter', 'inc',
'instance?', 'iterable?', 'iterate', 'iterator?', 'neg?',
'none?', 'nth', 'numeric?', 'odd?', 'pos?', 'remove', 'repeat',
'repeatedly', 'take', 'take_nth', 'take_while', 'zero?'
)
builtins = hy_builtins + hy_core
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
def _multi_escape(entries):
return words(entries, suffix=' ')
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
(r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
# keywords
(r'::?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
include('py-keywords'),
include('py-builtins'),
# highlight the special forms
(_multi_escape(special_forms), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(_multi_escape(declarations), Keyword.Declaration),
# highlight the builtins
(_multi_escape(builtins), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Hy accepts vector notation
(r'(\[|\])', Punctuation),
# Hy accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
'py-keywords': PythonLexer.tokens['keywords'],
'py-builtins': PythonLexer.tokens['builtins'],
}
def analyse_text(text):
if '(import ' in text or '(defn ' in text:
return 0.9
class RacketLexer(RegexLexer):
"""
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly
known as PLT Scheme).
.. versionadded:: 1.6
"""
name = 'Racket'
aliases = ['racket', 'rkt']
filenames = ['*.rkt', '*.rktd', '*.rktl']
mimetypes = ['text/x-racket', 'application/x-racket']
# Generated by example.rkt
_keywords = (
u'#%app', u'#%datum', u'#%declare', u'#%expression', u'#%module-begin',
u'#%plain-app', u'#%plain-lambda', u'#%plain-module-begin',
u'#%printing-module-begin', u'#%provide', u'#%require',
u'#%stratified-body', u'#%top', u'#%top-interaction',
u'#%variable-reference', u'->', u'->*', u'->*m', u'->d', u'->dm', u'->i',
u'->m', u'...', u':do-in', u'==', u'=>', u'_', u'absent', u'abstract',
u'all-defined-out', u'all-from-out', u'and', u'any', u'augment', u'augment*',
u'augment-final', u'augment-final*', u'augride', u'augride*', u'begin',
u'begin-for-syntax', u'begin0', u'case', u'case->', u'case->m',
u'case-lambda', u'class', u'class*', u'class-field-accessor',
u'class-field-mutator', u'class/c', u'class/derived', u'combine-in',
u'combine-out', u'command-line', u'compound-unit', u'compound-unit/infer',
u'cond', u'cons/dc', u'contract', u'contract-out', u'contract-struct',
u'contracted', u'define', u'define-compound-unit',
u'define-compound-unit/infer', u'define-contract-struct',
u'define-custom-hash-types', u'define-custom-set-types',
u'define-for-syntax', u'define-local-member-name', u'define-logger',
u'define-match-expander', u'define-member-name',
u'define-module-boundary-contract', u'define-namespace-anchor',
u'define-opt/c', u'define-sequence-syntax', u'define-serializable-class',
u'define-serializable-class*', u'define-signature',
u'define-signature-form', u'define-struct', u'define-struct/contract',
u'define-struct/derived', u'define-syntax', u'define-syntax-rule',
u'define-syntaxes', u'define-unit', u'define-unit-binding',
u'define-unit-from-context', u'define-unit/contract',
u'define-unit/new-import-export', u'define-unit/s', u'define-values',
u'define-values-for-export', u'define-values-for-syntax',
u'define-values/invoke-unit', u'define-values/invoke-unit/infer',
u'define/augment', u'define/augment-final', u'define/augride',
u'define/contract', u'define/final-prop', u'define/match',
u'define/overment', u'define/override', u'define/override-final',
u'define/private', u'define/public', u'define/public-final',
u'define/pubment', u'define/subexpression-pos-prop',
u'define/subexpression-pos-prop/name', u'delay', u'delay/idle',
u'delay/name', u'delay/strict', u'delay/sync', u'delay/thread', u'do',
u'else', u'except', u'except-in', u'except-out', u'export', u'extends',
u'failure-cont', u'false', u'false/c', u'field', u'field-bound?', u'file',
u'flat-murec-contract', u'flat-rec-contract', u'for', u'for*', u'for*/and',
u'for*/async', u'for*/first', u'for*/fold', u'for*/fold/derived',
u'for*/hash', u'for*/hasheq', u'for*/hasheqv', u'for*/last', u'for*/list',
u'for*/lists', u'for*/mutable-set', u'for*/mutable-seteq',
u'for*/mutable-seteqv', u'for*/or', u'for*/product', u'for*/set',
u'for*/seteq', u'for*/seteqv', u'for*/stream', u'for*/sum', u'for*/vector',
u'for*/weak-set', u'for*/weak-seteq', u'for*/weak-seteqv', u'for-label',
u'for-meta', u'for-syntax', u'for-template', u'for/and', u'for/async',
u'for/first', u'for/fold', u'for/fold/derived', u'for/hash', u'for/hasheq',
u'for/hasheqv', u'for/last', u'for/list', u'for/lists', u'for/mutable-set',
u'for/mutable-seteq', u'for/mutable-seteqv', u'for/or', u'for/product',
u'for/set', u'for/seteq', u'for/seteqv', u'for/stream', u'for/sum',
u'for/vector', u'for/weak-set', u'for/weak-seteq', u'for/weak-seteqv',
u'gen:custom-write', u'gen:dict', u'gen:equal+hash', u'gen:set',
u'gen:stream', u'generic', u'get-field', u'hash/dc', u'if', u'implies',
u'import', u'include', u'include-at/relative-to',
u'include-at/relative-to/reader', u'include/reader', u'inherit',
u'inherit-field', u'inherit/inner', u'inherit/super', u'init',
u'init-depend', u'init-field', u'init-rest', u'inner', u'inspect',
u'instantiate', u'interface', u'interface*', u'invariant-assertion',
u'invoke-unit', u'invoke-unit/infer', u'lambda', u'lazy', u'let', u'let*',
u'let*-values', u'let-syntax', u'let-syntaxes', u'let-values', u'let/cc',
u'let/ec', u'letrec', u'letrec-syntax', u'letrec-syntaxes',
u'letrec-syntaxes+values', u'letrec-values', u'lib', u'link', u'local',
u'local-require', u'log-debug', u'log-error', u'log-fatal', u'log-info',
u'log-warning', u'match', u'match*', u'match*/derived', u'match-define',
u'match-define-values', u'match-lambda', u'match-lambda*',
u'match-lambda**', u'match-let', u'match-let*', u'match-let*-values',
u'match-let-values', u'match-letrec', u'match-letrec-values',
u'match/derived', u'match/values', u'member-name-key', u'mixin', u'module',
u'module*', u'module+', u'nand', u'new', u'nor', u'object-contract',
u'object/c', u'only', u'only-in', u'only-meta-in', u'open', u'opt/c', u'or',
u'overment', u'overment*', u'override', u'override*', u'override-final',
u'override-final*', u'parameterize', u'parameterize*',
u'parameterize-break', u'parametric->/c', u'place', u'place*',
u'place/context', u'planet', u'prefix', u'prefix-in', u'prefix-out',
u'private', u'private*', u'prompt-tag/c', u'protect-out', u'provide',
u'provide-signature-elements', u'provide/contract', u'public', u'public*',
u'public-final', u'public-final*', u'pubment', u'pubment*', u'quasiquote',
u'quasisyntax', u'quasisyntax/loc', u'quote', u'quote-syntax',
u'quote-syntax/prune', u'recontract-out', u'recursive-contract',
u'relative-in', u'rename', u'rename-in', u'rename-inner', u'rename-out',
u'rename-super', u'require', u'send', u'send*', u'send+', u'send-generic',
u'send/apply', u'send/keyword-apply', u'set!', u'set!-values',
u'set-field!', u'shared', u'stream', u'stream*', u'stream-cons', u'struct',
u'struct*', u'struct-copy', u'struct-field-index', u'struct-out',
u'struct/c', u'struct/ctc', u'struct/dc', u'submod', u'super',
u'super-instantiate', u'super-make-object', u'super-new', u'syntax',
u'syntax-case', u'syntax-case*', u'syntax-id-rules', u'syntax-rules',
u'syntax/loc', u'tag', u'this', u'this%', u'thunk', u'thunk*', u'time',
u'unconstrained-domain->', u'unit', u'unit-from-context', u'unit/c',
u'unit/new-import-export', u'unit/s', u'unless', u'unquote',
u'unquote-splicing', u'unsyntax', u'unsyntax-splicing', u'values/drop',
u'when', u'with-continuation-mark', u'with-contract',
u'with-contract-continuation-mark', u'with-handlers', u'with-handlers*',
u'with-method', u'with-syntax', u'λ'
)
# Generated by example.rkt
_builtins = (
u'*', u'*list/c', u'+', u'-', u'/', u'<', u'</c', u'<=', u'<=/c', u'=', u'=/c',
u'>', u'>/c', u'>=', u'>=/c', u'abort-current-continuation', u'abs',
u'absolute-path?', u'acos', u'add-between', u'add1', u'alarm-evt',
u'always-evt', u'and/c', u'andmap', u'angle', u'any/c', u'append', u'append*',
u'append-map', u'apply', u'argmax', u'argmin', u'arithmetic-shift',
u'arity-at-least', u'arity-at-least-value', u'arity-at-least?',
u'arity-checking-wrapper', u'arity-includes?', u'arity=?',
u'arrow-contract-info', u'arrow-contract-info-accepts-arglist',
u'arrow-contract-info-chaperone-procedure',
u'arrow-contract-info-check-first-order', u'arrow-contract-info?',
u'asin', u'assf', u'assoc', u'assq', u'assv', u'atan',
u'bad-number-of-results', u'banner', u'base->-doms/c', u'base->-rngs/c',
u'base->?', u'between/c', u'bitwise-and', u'bitwise-bit-field',
u'bitwise-bit-set?', u'bitwise-ior', u'bitwise-not', u'bitwise-xor',
u'blame-add-car-context', u'blame-add-cdr-context', u'blame-add-context',
u'blame-add-missing-party', u'blame-add-nth-arg-context',
u'blame-add-range-context', u'blame-add-unknown-context',
u'blame-context', u'blame-contract', u'blame-fmt->-string',
u'blame-missing-party?', u'blame-negative', u'blame-original?',
u'blame-positive', u'blame-replace-negative', u'blame-source',
u'blame-swap', u'blame-swapped?', u'blame-update', u'blame-value',
u'blame?', u'boolean=?', u'boolean?', u'bound-identifier=?', u'box',
u'box-cas!', u'box-immutable', u'box-immutable/c', u'box/c', u'box?',
u'break-enabled', u'break-parameterization?', u'break-thread',
u'build-chaperone-contract-property', u'build-compound-type-name',
u'build-contract-property', u'build-flat-contract-property',
u'build-list', u'build-path', u'build-path/convention-type',
u'build-string', u'build-vector', u'byte-pregexp', u'byte-pregexp?',
u'byte-ready?', u'byte-regexp', u'byte-regexp?', u'byte?', u'bytes',
u'bytes->immutable-bytes', u'bytes->list', u'bytes->path',
u'bytes->path-element', u'bytes->string/latin-1', u'bytes->string/locale',
u'bytes->string/utf-8', u'bytes-append', u'bytes-append*',
u'bytes-close-converter', u'bytes-convert', u'bytes-convert-end',
u'bytes-converter?', u'bytes-copy', u'bytes-copy!',
u'bytes-environment-variable-name?', u'bytes-fill!', u'bytes-join',
u'bytes-length', u'bytes-no-nuls?', u'bytes-open-converter', u'bytes-ref',
u'bytes-set!', u'bytes-utf-8-index', u'bytes-utf-8-length',
u'bytes-utf-8-ref', u'bytes<?', u'bytes=?', u'bytes>?', u'bytes?', u'caaaar',
u'caaadr', u'caaar', u'caadar', u'caaddr', u'caadr', u'caar', u'cadaar',
u'cadadr', u'cadar', u'caddar', u'cadddr', u'caddr', u'cadr',
u'call-in-nested-thread', u'call-with-atomic-output-file',
u'call-with-break-parameterization',
u'call-with-composable-continuation', u'call-with-continuation-barrier',
u'call-with-continuation-prompt', u'call-with-current-continuation',
u'call-with-default-reading-parameterization',
u'call-with-escape-continuation', u'call-with-exception-handler',
u'call-with-file-lock/timeout', u'call-with-immediate-continuation-mark',
u'call-with-input-bytes', u'call-with-input-file',
u'call-with-input-file*', u'call-with-input-string',
u'call-with-output-bytes', u'call-with-output-file',
u'call-with-output-file*', u'call-with-output-string',
u'call-with-parameterization', u'call-with-semaphore',
u'call-with-semaphore/enable-break', u'call-with-values', u'call/cc',
u'call/ec', u'car', u'cartesian-product', u'cdaaar', u'cdaadr', u'cdaar',
u'cdadar', u'cdaddr', u'cdadr', u'cdar', u'cddaar', u'cddadr', u'cddar',
u'cdddar', u'cddddr', u'cdddr', u'cddr', u'cdr', u'ceiling', u'channel-get',
u'channel-put', u'channel-put-evt', u'channel-put-evt?',
u'channel-try-get', u'channel/c', u'channel?', u'chaperone-box',
u'chaperone-channel', u'chaperone-continuation-mark-key',
u'chaperone-contract-property?', u'chaperone-contract?', u'chaperone-evt',
u'chaperone-hash', u'chaperone-hash-set', u'chaperone-of?',
u'chaperone-procedure', u'chaperone-procedure*', u'chaperone-prompt-tag',
u'chaperone-struct', u'chaperone-struct-type', u'chaperone-vector',
u'chaperone?', u'char->integer', u'char-alphabetic?', u'char-blank?',
u'char-ci<=?', u'char-ci<?', u'char-ci=?', u'char-ci>=?', u'char-ci>?',
u'char-downcase', u'char-foldcase', u'char-general-category',
u'char-graphic?', u'char-in', u'char-in/c', u'char-iso-control?',
u'char-lower-case?', u'char-numeric?', u'char-punctuation?',
u'char-ready?', u'char-symbolic?', u'char-title-case?', u'char-titlecase',
u'char-upcase', u'char-upper-case?', u'char-utf-8-length',
u'char-whitespace?', u'char<=?', u'char<?', u'char=?', u'char>=?', u'char>?',
u'char?', u'check-duplicate-identifier', u'check-duplicates',
u'checked-procedure-check-and-extract', u'choice-evt',
u'class->interface', u'class-info', u'class-seal', u'class-unseal',
u'class?', u'cleanse-path', u'close-input-port', u'close-output-port',
u'coerce-chaperone-contract', u'coerce-chaperone-contracts',
u'coerce-contract', u'coerce-contract/f', u'coerce-contracts',
u'coerce-flat-contract', u'coerce-flat-contracts', u'collect-garbage',
u'collection-file-path', u'collection-path', u'combinations', u'compile',
u'compile-allow-set!-undefined', u'compile-context-preservation-enabled',
u'compile-enforce-module-constants', u'compile-syntax',
u'compiled-expression-recompile', u'compiled-expression?',
u'compiled-module-expression?', u'complete-path?', u'complex?', u'compose',
u'compose1', u'conjoin', u'conjugate', u'cons', u'cons/c', u'cons?', u'const',
u'continuation-mark-key/c', u'continuation-mark-key?',
u'continuation-mark-set->context', u'continuation-mark-set->list',
u'continuation-mark-set->list*', u'continuation-mark-set-first',
u'continuation-mark-set?', u'continuation-marks',
u'continuation-prompt-available?', u'continuation-prompt-tag?',
u'continuation?', u'contract-continuation-mark-key',
u'contract-custom-write-property-proc', u'contract-exercise',
u'contract-first-order', u'contract-first-order-passes?',
u'contract-late-neg-projection', u'contract-name', u'contract-proc',
u'contract-projection', u'contract-property?',
u'contract-random-generate', u'contract-random-generate-fail',
u'contract-random-generate-fail?',
u'contract-random-generate-get-current-environment',
u'contract-random-generate-stash', u'contract-random-generate/choose',
u'contract-stronger?', u'contract-struct-exercise',
u'contract-struct-generate', u'contract-struct-late-neg-projection',
u'contract-struct-list-contract?', u'contract-val-first-projection',
u'contract?', u'convert-stream', u'copy-directory/files', u'copy-file',
u'copy-port', u'cos', u'cosh', u'count', u'current-blame-format',
u'current-break-parameterization', u'current-code-inspector',
u'current-command-line-arguments', u'current-compile',
u'current-compiled-file-roots', u'current-continuation-marks',
u'current-contract-region', u'current-custodian', u'current-directory',
u'current-directory-for-user', u'current-drive',
u'current-environment-variables', u'current-error-port', u'current-eval',
u'current-evt-pseudo-random-generator',
u'current-force-delete-permissions', u'current-future',
u'current-gc-milliseconds', u'current-get-interaction-input-port',
u'current-inexact-milliseconds', u'current-input-port',
u'current-inspector', u'current-library-collection-links',
u'current-library-collection-paths', u'current-load',
u'current-load-extension', u'current-load-relative-directory',
u'current-load/use-compiled', u'current-locale', u'current-logger',
u'current-memory-use', u'current-milliseconds',
u'current-module-declare-name', u'current-module-declare-source',
u'current-module-name-resolver', u'current-module-path-for-load',
u'current-namespace', u'current-output-port', u'current-parameterization',
u'current-plumber', u'current-preserved-thread-cell-values',
u'current-print', u'current-process-milliseconds', u'current-prompt-read',
u'current-pseudo-random-generator', u'current-read-interaction',
u'current-reader-guard', u'current-readtable', u'current-seconds',
u'current-security-guard', u'current-subprocess-custodian-mode',
u'current-thread', u'current-thread-group',
u'current-thread-initial-stack-size',
u'current-write-relative-directory', u'curry', u'curryr',
u'custodian-box-value', u'custodian-box?', u'custodian-limit-memory',
u'custodian-managed-list', u'custodian-memory-accounting-available?',
u'custodian-require-memory', u'custodian-shutdown-all', u'custodian?',
u'custom-print-quotable-accessor', u'custom-print-quotable?',
u'custom-write-accessor', u'custom-write-property-proc', u'custom-write?',
u'date', u'date*', u'date*-nanosecond', u'date*-time-zone-name', u'date*?',
u'date-day', u'date-dst?', u'date-hour', u'date-minute', u'date-month',
u'date-second', u'date-time-zone-offset', u'date-week-day', u'date-year',
u'date-year-day', u'date?', u'datum->syntax', u'datum-intern-literal',
u'default-continuation-prompt-tag', u'degrees->radians',
u'delete-directory', u'delete-directory/files', u'delete-file',
u'denominator', u'dict->list', u'dict-can-functional-set?',
u'dict-can-remove-keys?', u'dict-clear', u'dict-clear!', u'dict-copy',
u'dict-count', u'dict-empty?', u'dict-for-each', u'dict-has-key?',
u'dict-implements/c', u'dict-implements?', u'dict-iter-contract',
u'dict-iterate-first', u'dict-iterate-key', u'dict-iterate-next',
u'dict-iterate-value', u'dict-key-contract', u'dict-keys', u'dict-map',
u'dict-mutable?', u'dict-ref', u'dict-ref!', u'dict-remove',
u'dict-remove!', u'dict-set', u'dict-set!', u'dict-set*', u'dict-set*!',
u'dict-update', u'dict-update!', u'dict-value-contract', u'dict-values',
u'dict?', u'directory-exists?', u'directory-list', u'disjoin', u'display',
u'display-lines', u'display-lines-to-file', u'display-to-file',
u'displayln', u'double-flonum?', u'drop', u'drop-common-prefix',
u'drop-right', u'dropf', u'dropf-right', u'dump-memory-stats',
u'dup-input-port', u'dup-output-port', u'dynamic->*', u'dynamic-get-field',
u'dynamic-object/c', u'dynamic-place', u'dynamic-place*',
u'dynamic-require', u'dynamic-require-for-syntax', u'dynamic-send',
u'dynamic-set-field!', u'dynamic-wind', u'eighth', u'empty',
u'empty-sequence', u'empty-stream', u'empty?',
u'environment-variables-copy', u'environment-variables-names',
u'environment-variables-ref', u'environment-variables-set!',
u'environment-variables?', u'eof', u'eof-evt', u'eof-object?',
u'ephemeron-value', u'ephemeron?', u'eprintf', u'eq-contract-val',
u'eq-contract?', u'eq-hash-code', u'eq?', u'equal-contract-val',
u'equal-contract?', u'equal-hash-code', u'equal-secondary-hash-code',
u'equal<%>', u'equal?', u'equal?/recur', u'eqv-hash-code', u'eqv?', u'error',
u'error-display-handler', u'error-escape-handler',
u'error-print-context-length', u'error-print-source-location',
u'error-print-width', u'error-value->string-handler', u'eval',
u'eval-jit-enabled', u'eval-syntax', u'even?', u'evt/c', u'evt?',
u'exact->inexact', u'exact-ceiling', u'exact-floor', u'exact-integer?',
u'exact-nonnegative-integer?', u'exact-positive-integer?', u'exact-round',
u'exact-truncate', u'exact?', u'executable-yield-handler', u'exit',
u'exit-handler', u'exn', u'exn-continuation-marks', u'exn-message',
u'exn:break', u'exn:break-continuation', u'exn:break:hang-up',
u'exn:break:hang-up?', u'exn:break:terminate', u'exn:break:terminate?',
u'exn:break?', u'exn:fail', u'exn:fail:contract',
u'exn:fail:contract:arity', u'exn:fail:contract:arity?',
u'exn:fail:contract:blame', u'exn:fail:contract:blame-object',
u'exn:fail:contract:blame?', u'exn:fail:contract:continuation',
u'exn:fail:contract:continuation?', u'exn:fail:contract:divide-by-zero',
u'exn:fail:contract:divide-by-zero?',
u'exn:fail:contract:non-fixnum-result',
u'exn:fail:contract:non-fixnum-result?', u'exn:fail:contract:variable',
u'exn:fail:contract:variable-id', u'exn:fail:contract:variable?',
u'exn:fail:contract?', u'exn:fail:filesystem',
u'exn:fail:filesystem:errno', u'exn:fail:filesystem:errno-errno',
u'exn:fail:filesystem:errno?', u'exn:fail:filesystem:exists',
u'exn:fail:filesystem:exists?', u'exn:fail:filesystem:missing-module',
u'exn:fail:filesystem:missing-module-path',
u'exn:fail:filesystem:missing-module?', u'exn:fail:filesystem:version',
u'exn:fail:filesystem:version?', u'exn:fail:filesystem?',
u'exn:fail:network', u'exn:fail:network:errno',
u'exn:fail:network:errno-errno', u'exn:fail:network:errno?',
u'exn:fail:network?', u'exn:fail:object', u'exn:fail:object?',
u'exn:fail:out-of-memory', u'exn:fail:out-of-memory?', u'exn:fail:read',
u'exn:fail:read-srclocs', u'exn:fail:read:eof', u'exn:fail:read:eof?',
u'exn:fail:read:non-char', u'exn:fail:read:non-char?', u'exn:fail:read?',
u'exn:fail:syntax', u'exn:fail:syntax-exprs',
u'exn:fail:syntax:missing-module',
u'exn:fail:syntax:missing-module-path',
u'exn:fail:syntax:missing-module?', u'exn:fail:syntax:unbound',
u'exn:fail:syntax:unbound?', u'exn:fail:syntax?', u'exn:fail:unsupported',
u'exn:fail:unsupported?', u'exn:fail:user', u'exn:fail:user?',
u'exn:fail?', u'exn:misc:match?', u'exn:missing-module-accessor',
u'exn:missing-module?', u'exn:srclocs-accessor', u'exn:srclocs?', u'exn?',
u'exp', u'expand', u'expand-once', u'expand-syntax', u'expand-syntax-once',
u'expand-syntax-to-top-form', u'expand-to-top-form', u'expand-user-path',
u'explode-path', u'expt', u'externalizable<%>', u'failure-result/c',
u'false?', u'field-names', u'fifth', u'file->bytes', u'file->bytes-lines',
u'file->lines', u'file->list', u'file->string', u'file->value',
u'file-exists?', u'file-name-from-path', u'file-or-directory-identity',
u'file-or-directory-modify-seconds', u'file-or-directory-permissions',
u'file-position', u'file-position*', u'file-size',
u'file-stream-buffer-mode', u'file-stream-port?', u'file-truncate',
u'filename-extension', u'filesystem-change-evt',
u'filesystem-change-evt-cancel', u'filesystem-change-evt?',
u'filesystem-root-list', u'filter', u'filter-map', u'filter-not',
u'filter-read-input-port', u'find-executable-path', u'find-files',
u'find-library-collection-links', u'find-library-collection-paths',
u'find-relative-path', u'find-system-path', u'findf', u'first',
u'first-or/c', u'fixnum?', u'flat-contract', u'flat-contract-predicate',
u'flat-contract-property?', u'flat-contract?', u'flat-named-contract',
u'flatten', u'floating-point-bytes->real', u'flonum?', u'floor',
u'flush-output', u'fold-files', u'foldl', u'foldr', u'for-each', u'force',
u'format', u'fourth', u'fprintf', u'free-identifier=?',
u'free-label-identifier=?', u'free-template-identifier=?',
u'free-transformer-identifier=?', u'fsemaphore-count', u'fsemaphore-post',
u'fsemaphore-try-wait?', u'fsemaphore-wait', u'fsemaphore?', u'future',
u'future?', u'futures-enabled?', u'gcd', u'generate-member-key',
u'generate-temporaries', u'generic-set?', u'generic?', u'gensym',
u'get-output-bytes', u'get-output-string', u'get-preference',
u'get/build-late-neg-projection', u'get/build-val-first-projection',
u'getenv', u'global-port-print-handler', u'group-by', u'group-execute-bit',
u'group-read-bit', u'group-write-bit', u'guard-evt', u'handle-evt',
u'handle-evt?', u'has-blame?', u'has-contract?', u'hash', u'hash->list',
u'hash-clear', u'hash-clear!', u'hash-copy', u'hash-copy-clear',
u'hash-count', u'hash-empty?', u'hash-eq?', u'hash-equal?', u'hash-eqv?',
u'hash-for-each', u'hash-has-key?', u'hash-iterate-first',
u'hash-iterate-key', u'hash-iterate-key+value', u'hash-iterate-next',
u'hash-iterate-pair', u'hash-iterate-value', u'hash-keys', u'hash-map',
u'hash-placeholder?', u'hash-ref', u'hash-ref!', u'hash-remove',
u'hash-remove!', u'hash-set', u'hash-set!', u'hash-set*', u'hash-set*!',
u'hash-update', u'hash-update!', u'hash-values', u'hash-weak?', u'hash/c',
u'hash?', u'hasheq', u'hasheqv', u'identifier-binding',
u'identifier-binding-symbol', u'identifier-label-binding',
u'identifier-prune-lexical-context',
u'identifier-prune-to-source-module',
u'identifier-remove-from-definition-context',
u'identifier-template-binding', u'identifier-transformer-binding',
u'identifier?', u'identity', u'if/c', u'imag-part', u'immutable?',
u'impersonate-box', u'impersonate-channel',
u'impersonate-continuation-mark-key', u'impersonate-hash',
u'impersonate-hash-set', u'impersonate-procedure',
u'impersonate-procedure*', u'impersonate-prompt-tag',
u'impersonate-struct', u'impersonate-vector', u'impersonator-contract?',
u'impersonator-ephemeron', u'impersonator-of?',
u'impersonator-prop:application-mark', u'impersonator-prop:blame',
u'impersonator-prop:contracted',
u'impersonator-property-accessor-procedure?', u'impersonator-property?',
u'impersonator?', u'implementation?', u'implementation?/c', u'in-bytes',
u'in-bytes-lines', u'in-combinations', u'in-cycle', u'in-dict',
u'in-dict-keys', u'in-dict-pairs', u'in-dict-values', u'in-directory',
u'in-hash', u'in-hash-keys', u'in-hash-pairs', u'in-hash-values',
u'in-immutable-hash', u'in-immutable-hash-keys',
u'in-immutable-hash-pairs', u'in-immutable-hash-values',
u'in-immutable-set', u'in-indexed', u'in-input-port-bytes',
u'in-input-port-chars', u'in-lines', u'in-list', u'in-mlist',
u'in-mutable-hash', u'in-mutable-hash-keys', u'in-mutable-hash-pairs',
u'in-mutable-hash-values', u'in-mutable-set', u'in-naturals',
u'in-parallel', u'in-permutations', u'in-port', u'in-producer', u'in-range',
u'in-sequences', u'in-set', u'in-slice', u'in-stream', u'in-string',
u'in-syntax', u'in-value', u'in-values*-sequence', u'in-values-sequence',
u'in-vector', u'in-weak-hash', u'in-weak-hash-keys', u'in-weak-hash-pairs',
u'in-weak-hash-values', u'in-weak-set', u'inexact->exact',
u'inexact-real?', u'inexact?', u'infinite?', u'input-port-append',
u'input-port?', u'inspector?', u'instanceof/c', u'integer->char',
u'integer->integer-bytes', u'integer-bytes->integer', u'integer-in',
u'integer-length', u'integer-sqrt', u'integer-sqrt/remainder', u'integer?',
u'interface->method-names', u'interface-extension?', u'interface?',
u'internal-definition-context-binding-identifiers',
u'internal-definition-context-introduce',
u'internal-definition-context-seal', u'internal-definition-context?',
u'is-a?', u'is-a?/c', u'keyword->string', u'keyword-apply', u'keyword<?',
u'keyword?', u'keywords-match', u'kill-thread', u'last', u'last-pair',
u'lcm', u'length', u'liberal-define-context?', u'link-exists?', u'list',
u'list*', u'list*of', u'list->bytes', u'list->mutable-set',
u'list->mutable-seteq', u'list->mutable-seteqv', u'list->set',
u'list->seteq', u'list->seteqv', u'list->string', u'list->vector',
u'list->weak-set', u'list->weak-seteq', u'list->weak-seteqv',
u'list-contract?', u'list-prefix?', u'list-ref', u'list-set', u'list-tail',
u'list-update', u'list/c', u'list?', u'listen-port-number?', u'listof',
u'load', u'load-extension', u'load-on-demand-enabled', u'load-relative',
u'load-relative-extension', u'load/cd', u'load/use-compiled',
u'local-expand', u'local-expand/capture-lifts',
u'local-transformer-expand', u'local-transformer-expand/capture-lifts',
u'locale-string-encoding', u'log', u'log-all-levels', u'log-level-evt',
u'log-level?', u'log-max-level', u'log-message', u'log-receiver?',
u'logger-name', u'logger?', u'magnitude', u'make-arity-at-least',
u'make-base-empty-namespace', u'make-base-namespace', u'make-bytes',
u'make-channel', u'make-chaperone-contract',
u'make-continuation-mark-key', u'make-continuation-prompt-tag',
u'make-contract', u'make-custodian', u'make-custodian-box',
u'make-custom-hash', u'make-custom-hash-types', u'make-custom-set',
u'make-custom-set-types', u'make-date', u'make-date*',
u'make-derived-parameter', u'make-directory', u'make-directory*',
u'make-do-sequence', u'make-empty-namespace',
u'make-environment-variables', u'make-ephemeron', u'make-exn',
u'make-exn:break', u'make-exn:break:hang-up', u'make-exn:break:terminate',
u'make-exn:fail', u'make-exn:fail:contract',
u'make-exn:fail:contract:arity', u'make-exn:fail:contract:blame',
u'make-exn:fail:contract:continuation',
u'make-exn:fail:contract:divide-by-zero',
u'make-exn:fail:contract:non-fixnum-result',
u'make-exn:fail:contract:variable', u'make-exn:fail:filesystem',
u'make-exn:fail:filesystem:errno', u'make-exn:fail:filesystem:exists',
u'make-exn:fail:filesystem:missing-module',
u'make-exn:fail:filesystem:version', u'make-exn:fail:network',
u'make-exn:fail:network:errno', u'make-exn:fail:object',
u'make-exn:fail:out-of-memory', u'make-exn:fail:read',
u'make-exn:fail:read:eof', u'make-exn:fail:read:non-char',
u'make-exn:fail:syntax', u'make-exn:fail:syntax:missing-module',
u'make-exn:fail:syntax:unbound', u'make-exn:fail:unsupported',
u'make-exn:fail:user', u'make-file-or-directory-link',
u'make-flat-contract', u'make-fsemaphore', u'make-generic',
u'make-handle-get-preference-locked', u'make-hash',
u'make-hash-placeholder', u'make-hasheq', u'make-hasheq-placeholder',
u'make-hasheqv', u'make-hasheqv-placeholder',
u'make-immutable-custom-hash', u'make-immutable-hash',
u'make-immutable-hasheq', u'make-immutable-hasheqv',
u'make-impersonator-property', u'make-input-port',
u'make-input-port/read-to-peek', u'make-inspector',
u'make-keyword-procedure', u'make-known-char-range-list',
u'make-limited-input-port', u'make-list', u'make-lock-file-name',
u'make-log-receiver', u'make-logger', u'make-mixin-contract',
u'make-mutable-custom-set', u'make-none/c', u'make-object',
u'make-output-port', u'make-parameter', u'make-parent-directory*',
u'make-phantom-bytes', u'make-pipe', u'make-pipe-with-specials',
u'make-placeholder', u'make-plumber', u'make-polar', u'make-prefab-struct',
u'make-primitive-class', u'make-proj-contract',
u'make-pseudo-random-generator', u'make-reader-graph', u'make-readtable',
u'make-rectangular', u'make-rename-transformer',
u'make-resolved-module-path', u'make-security-guard', u'make-semaphore',
u'make-set!-transformer', u'make-shared-bytes', u'make-sibling-inspector',
u'make-special-comment', u'make-srcloc', u'make-string',
u'make-struct-field-accessor', u'make-struct-field-mutator',
u'make-struct-type', u'make-struct-type-property',
u'make-syntax-delta-introducer', u'make-syntax-introducer',
u'make-temporary-file', u'make-tentative-pretty-print-output-port',
u'make-thread-cell', u'make-thread-group', u'make-vector',
u'make-weak-box', u'make-weak-custom-hash', u'make-weak-custom-set',
u'make-weak-hash', u'make-weak-hasheq', u'make-weak-hasheqv',
u'make-will-executor', u'map', u'match-equality-test',
u'matches-arity-exactly?', u'max', u'mcar', u'mcdr', u'mcons', u'member',
u'member-name-key-hash-code', u'member-name-key=?', u'member-name-key?',
u'memf', u'memq', u'memv', u'merge-input', u'method-in-interface?', u'min',
u'mixin-contract', u'module->exports', u'module->imports',
u'module->language-info', u'module->namespace',
u'module-compiled-cross-phase-persistent?', u'module-compiled-exports',
u'module-compiled-imports', u'module-compiled-language-info',
u'module-compiled-name', u'module-compiled-submodules',
u'module-declared?', u'module-path-index-join',
u'module-path-index-resolve', u'module-path-index-split',
u'module-path-index-submodule', u'module-path-index?', u'module-path?',
u'module-predefined?', u'module-provide-protected?', u'modulo', u'mpair?',
u'mutable-set', u'mutable-seteq', u'mutable-seteqv', u'n->th',
u'nack-guard-evt', u'namespace-anchor->empty-namespace',
u'namespace-anchor->namespace', u'namespace-anchor?',
u'namespace-attach-module', u'namespace-attach-module-declaration',
u'namespace-base-phase', u'namespace-mapped-symbols',
u'namespace-module-identifier', u'namespace-module-registry',
u'namespace-require', u'namespace-require/constant',
u'namespace-require/copy', u'namespace-require/expansion-time',
u'namespace-set-variable-value!', u'namespace-symbol->identifier',
u'namespace-syntax-introduce', u'namespace-undefine-variable!',
u'namespace-unprotect-module', u'namespace-variable-value', u'namespace?',
u'nan?', u'natural-number/c', u'negate', u'negative?', u'never-evt',
u'new-∀/c', u'new-∃/c', u'newline', u'ninth', u'non-empty-listof',
u'non-empty-string?', u'none/c', u'normal-case-path', u'normalize-arity',
u'normalize-path', u'normalized-arity?', u'not', u'not/c', u'null', u'null?',
u'number->string', u'number?', u'numerator', u'object%', u'object->vector',
u'object-info', u'object-interface', u'object-method-arity-includes?',
u'object-name', u'object-or-false=?', u'object=?', u'object?', u'odd?',
u'one-of/c', u'open-input-bytes', u'open-input-file',
u'open-input-output-file', u'open-input-string', u'open-output-bytes',
u'open-output-file', u'open-output-nowhere', u'open-output-string',
u'or/c', u'order-of-magnitude', u'ormap', u'other-execute-bit',
u'other-read-bit', u'other-write-bit', u'output-port?', u'pair?',
u'parameter-procedure=?', u'parameter/c', u'parameter?',
u'parameterization?', u'parse-command-line', u'partition', u'path->bytes',
u'path->complete-path', u'path->directory-path', u'path->string',
u'path-add-suffix', u'path-convention-type', u'path-element->bytes',
u'path-element->string', u'path-element?', u'path-for-some-system?',
u'path-list-string->path-list', u'path-only', u'path-replace-suffix',
u'path-string?', u'path<?', u'path?', u'pathlist-closure', u'peek-byte',
u'peek-byte-or-special', u'peek-bytes', u'peek-bytes!', u'peek-bytes!-evt',
u'peek-bytes-avail!', u'peek-bytes-avail!*', u'peek-bytes-avail!-evt',
u'peek-bytes-avail!/enable-break', u'peek-bytes-evt', u'peek-char',
u'peek-char-or-special', u'peek-string', u'peek-string!',
u'peek-string!-evt', u'peek-string-evt', u'peeking-input-port',
u'permutations', u'phantom-bytes?', u'pi', u'pi.f', u'pipe-content-length',
u'place-break', u'place-channel', u'place-channel-get',
u'place-channel-put', u'place-channel-put/get', u'place-channel?',
u'place-dead-evt', u'place-enabled?', u'place-kill', u'place-location?',
u'place-message-allowed?', u'place-sleep', u'place-wait', u'place?',
u'placeholder-get', u'placeholder-set!', u'placeholder?',
u'plumber-add-flush!', u'plumber-flush-all',
u'plumber-flush-handle-remove!', u'plumber-flush-handle?', u'plumber?',
u'poll-guard-evt', u'port->bytes', u'port->bytes-lines', u'port->lines',
u'port->list', u'port->string', u'port-closed-evt', u'port-closed?',
u'port-commit-peeked', u'port-count-lines!', u'port-count-lines-enabled',
u'port-counts-lines?', u'port-display-handler', u'port-file-identity',
u'port-file-unlock', u'port-next-location', u'port-number?',
u'port-print-handler', u'port-progress-evt',
u'port-provides-progress-evts?', u'port-read-handler',
u'port-try-file-lock?', u'port-write-handler', u'port-writes-atomic?',
u'port-writes-special?', u'port?', u'positive?', u'predicate/c',
u'prefab-key->struct-type', u'prefab-key?', u'prefab-struct-key',
u'preferences-lock-file-mode', u'pregexp', u'pregexp?', u'pretty-display',
u'pretty-format', u'pretty-print', u'pretty-print-.-symbol-without-bars',
u'pretty-print-abbreviate-read-macros', u'pretty-print-columns',
u'pretty-print-current-style-table', u'pretty-print-depth',
u'pretty-print-exact-as-decimal', u'pretty-print-extend-style-table',
u'pretty-print-handler', u'pretty-print-newline',
u'pretty-print-post-print-hook', u'pretty-print-pre-print-hook',
u'pretty-print-print-hook', u'pretty-print-print-line',
u'pretty-print-remap-stylable', u'pretty-print-show-inexactness',
u'pretty-print-size-hook', u'pretty-print-style-table?',
u'pretty-printing', u'pretty-write', u'primitive-closure?',
u'primitive-result-arity', u'primitive?', u'print', u'print-as-expression',
u'print-boolean-long-form', u'print-box', u'print-graph',
u'print-hash-table', u'print-mpair-curly-braces',
u'print-pair-curly-braces', u'print-reader-abbreviations',
u'print-struct', u'print-syntax-width', u'print-unreadable',
u'print-vector-length', u'printable/c', u'printable<%>', u'printf',
u'println', u'procedure->method', u'procedure-arity',
u'procedure-arity-includes/c', u'procedure-arity-includes?',
u'procedure-arity?', u'procedure-closure-contents-eq?',
u'procedure-extract-target', u'procedure-keywords',
u'procedure-reduce-arity', u'procedure-reduce-keyword-arity',
u'procedure-rename', u'procedure-result-arity', u'procedure-specialize',
u'procedure-struct-type?', u'procedure?', u'process', u'process*',
u'process*/ports', u'process/ports', u'processor-count', u'progress-evt?',
u'promise-forced?', u'promise-running?', u'promise/c', u'promise/name?',
u'promise?', u'prop:arity-string', u'prop:arrow-contract',
u'prop:arrow-contract-get-info', u'prop:arrow-contract?', u'prop:blame',
u'prop:chaperone-contract', u'prop:checked-procedure', u'prop:contract',
u'prop:contracted', u'prop:custom-print-quotable', u'prop:custom-write',
u'prop:dict', u'prop:dict/contract', u'prop:equal+hash', u'prop:evt',
u'prop:exn:missing-module', u'prop:exn:srclocs',
u'prop:expansion-contexts', u'prop:flat-contract',
u'prop:impersonator-of', u'prop:input-port',
u'prop:liberal-define-context', u'prop:object-name',
u'prop:opt-chaperone-contract', u'prop:opt-chaperone-contract-get-test',
u'prop:opt-chaperone-contract?', u'prop:orc-contract',
u'prop:orc-contract-get-subcontracts', u'prop:orc-contract?',
u'prop:output-port', u'prop:place-location', u'prop:procedure',
u'prop:recursive-contract', u'prop:recursive-contract-unroll',
u'prop:recursive-contract?', u'prop:rename-transformer', u'prop:sequence',
u'prop:set!-transformer', u'prop:stream', u'proper-subset?',
u'pseudo-random-generator->vector', u'pseudo-random-generator-vector?',
u'pseudo-random-generator?', u'put-preferences', u'putenv', u'quotient',
u'quotient/remainder', u'radians->degrees', u'raise',
u'raise-argument-error', u'raise-arguments-error', u'raise-arity-error',
u'raise-blame-error', u'raise-contract-error', u'raise-mismatch-error',
u'raise-not-cons-blame-error', u'raise-range-error',
u'raise-result-error', u'raise-syntax-error', u'raise-type-error',
u'raise-user-error', u'random', u'random-seed', u'range', u'rational?',
u'rationalize', u'read', u'read-accept-bar-quote', u'read-accept-box',
u'read-accept-compiled', u'read-accept-dot', u'read-accept-graph',
u'read-accept-infix-dot', u'read-accept-lang', u'read-accept-quasiquote',
u'read-accept-reader', u'read-byte', u'read-byte-or-special',
u'read-bytes', u'read-bytes!', u'read-bytes!-evt', u'read-bytes-avail!',
u'read-bytes-avail!*', u'read-bytes-avail!-evt',
u'read-bytes-avail!/enable-break', u'read-bytes-evt', u'read-bytes-line',
u'read-bytes-line-evt', u'read-case-sensitive', u'read-cdot', u'read-char',
u'read-char-or-special', u'read-curly-brace-as-paren',
u'read-curly-brace-with-tag', u'read-decimal-as-inexact',
u'read-eval-print-loop', u'read-language', u'read-line', u'read-line-evt',
u'read-on-demand-source', u'read-square-bracket-as-paren',
u'read-square-bracket-with-tag', u'read-string', u'read-string!',
u'read-string!-evt', u'read-string-evt', u'read-syntax',
u'read-syntax/recursive', u'read/recursive', u'readtable-mapping',
u'readtable?', u'real->decimal-string', u'real->double-flonum',
u'real->floating-point-bytes', u'real->single-flonum', u'real-in',
u'real-part', u'real?', u'reencode-input-port', u'reencode-output-port',
u'regexp', u'regexp-match', u'regexp-match*', u'regexp-match-evt',
u'regexp-match-exact?', u'regexp-match-peek',
u'regexp-match-peek-immediate', u'regexp-match-peek-positions',
u'regexp-match-peek-positions*',
u'regexp-match-peek-positions-immediate',
u'regexp-match-peek-positions-immediate/end',
u'regexp-match-peek-positions/end', u'regexp-match-positions',
u'regexp-match-positions*', u'regexp-match-positions/end',
u'regexp-match/end', u'regexp-match?', u'regexp-max-lookbehind',
u'regexp-quote', u'regexp-replace', u'regexp-replace*',
u'regexp-replace-quote', u'regexp-replaces', u'regexp-split',
u'regexp-try-match', u'regexp?', u'relative-path?', u'relocate-input-port',
u'relocate-output-port', u'remainder', u'remf', u'remf*', u'remove',
u'remove*', u'remove-duplicates', u'remq', u'remq*', u'remv', u'remv*',
u'rename-contract', u'rename-file-or-directory',
u'rename-transformer-target', u'rename-transformer?', u'replace-evt',
u'reroot-path', u'resolve-path', u'resolved-module-path-name',
u'resolved-module-path?', u'rest', u'reverse', u'round', u'second',
u'seconds->date', u'security-guard?', u'semaphore-peek-evt',
u'semaphore-peek-evt?', u'semaphore-post', u'semaphore-try-wait?',
u'semaphore-wait', u'semaphore-wait/enable-break', u'semaphore?',
u'sequence->list', u'sequence->stream', u'sequence-add-between',
u'sequence-andmap', u'sequence-append', u'sequence-count',
u'sequence-filter', u'sequence-fold', u'sequence-for-each',
u'sequence-generate', u'sequence-generate*', u'sequence-length',
u'sequence-map', u'sequence-ormap', u'sequence-ref', u'sequence-tail',
u'sequence/c', u'sequence?', u'set', u'set!-transformer-procedure',
u'set!-transformer?', u'set->list', u'set->stream', u'set-add', u'set-add!',
u'set-box!', u'set-clear', u'set-clear!', u'set-copy', u'set-copy-clear',
u'set-count', u'set-empty?', u'set-eq?', u'set-equal?', u'set-eqv?',
u'set-first', u'set-for-each', u'set-implements/c', u'set-implements?',
u'set-intersect', u'set-intersect!', u'set-map', u'set-mcar!', u'set-mcdr!',
u'set-member?', u'set-mutable?', u'set-phantom-bytes!',
u'set-port-next-location!', u'set-remove', u'set-remove!', u'set-rest',
u'set-some-basic-contracts!', u'set-subtract', u'set-subtract!',
u'set-symmetric-difference', u'set-symmetric-difference!', u'set-union',
u'set-union!', u'set-weak?', u'set/c', u'set=?', u'set?', u'seteq', u'seteqv',
u'seventh', u'sgn', u'shared-bytes', u'shell-execute', u'shrink-path-wrt',
u'shuffle', u'simple-form-path', u'simplify-path', u'sin',
u'single-flonum?', u'sinh', u'sixth', u'skip-projection-wrapper?', u'sleep',
u'some-system-path->string', u'sort', u'special-comment-value',
u'special-comment?', u'special-filter-input-port', u'split-at',
u'split-at-right', u'split-common-prefix', u'split-path', u'splitf-at',
u'splitf-at-right', u'sqr', u'sqrt', u'srcloc', u'srcloc->string',
u'srcloc-column', u'srcloc-line', u'srcloc-position', u'srcloc-source',
u'srcloc-span', u'srcloc?', u'stop-after', u'stop-before', u'stream->list',
u'stream-add-between', u'stream-andmap', u'stream-append', u'stream-count',
u'stream-empty?', u'stream-filter', u'stream-first', u'stream-fold',
u'stream-for-each', u'stream-length', u'stream-map', u'stream-ormap',
u'stream-ref', u'stream-rest', u'stream-tail', u'stream/c', u'stream?',
u'string', u'string->bytes/latin-1', u'string->bytes/locale',
u'string->bytes/utf-8', u'string->immutable-string', u'string->keyword',
u'string->list', u'string->number', u'string->path',
u'string->path-element', u'string->some-system-path', u'string->symbol',
u'string->uninterned-symbol', u'string->unreadable-symbol',
u'string-append', u'string-append*', u'string-ci<=?', u'string-ci<?',
u'string-ci=?', u'string-ci>=?', u'string-ci>?', u'string-contains?',
u'string-copy', u'string-copy!', u'string-downcase',
u'string-environment-variable-name?', u'string-fill!', u'string-foldcase',
u'string-join', u'string-len/c', u'string-length', u'string-locale-ci<?',
u'string-locale-ci=?', u'string-locale-ci>?', u'string-locale-downcase',
u'string-locale-upcase', u'string-locale<?', u'string-locale=?',
u'string-locale>?', u'string-no-nuls?', u'string-normalize-nfc',
u'string-normalize-nfd', u'string-normalize-nfkc',
u'string-normalize-nfkd', u'string-normalize-spaces', u'string-port?',
u'string-prefix?', u'string-ref', u'string-replace', u'string-set!',
u'string-split', u'string-suffix?', u'string-titlecase', u'string-trim',
u'string-upcase', u'string-utf-8-length', u'string<=?', u'string<?',
u'string=?', u'string>=?', u'string>?', u'string?', u'struct->vector',
u'struct-accessor-procedure?', u'struct-constructor-procedure?',
u'struct-info', u'struct-mutator-procedure?',
u'struct-predicate-procedure?', u'struct-type-info',
u'struct-type-make-constructor', u'struct-type-make-predicate',
u'struct-type-property-accessor-procedure?', u'struct-type-property/c',
u'struct-type-property?', u'struct-type?', u'struct:arity-at-least',
u'struct:arrow-contract-info', u'struct:date', u'struct:date*',
u'struct:exn', u'struct:exn:break', u'struct:exn:break:hang-up',
u'struct:exn:break:terminate', u'struct:exn:fail',
u'struct:exn:fail:contract', u'struct:exn:fail:contract:arity',
u'struct:exn:fail:contract:blame',
u'struct:exn:fail:contract:continuation',
u'struct:exn:fail:contract:divide-by-zero',
u'struct:exn:fail:contract:non-fixnum-result',
u'struct:exn:fail:contract:variable', u'struct:exn:fail:filesystem',
u'struct:exn:fail:filesystem:errno',
u'struct:exn:fail:filesystem:exists',
u'struct:exn:fail:filesystem:missing-module',
u'struct:exn:fail:filesystem:version', u'struct:exn:fail:network',
u'struct:exn:fail:network:errno', u'struct:exn:fail:object',
u'struct:exn:fail:out-of-memory', u'struct:exn:fail:read',
u'struct:exn:fail:read:eof', u'struct:exn:fail:read:non-char',
u'struct:exn:fail:syntax', u'struct:exn:fail:syntax:missing-module',
u'struct:exn:fail:syntax:unbound', u'struct:exn:fail:unsupported',
u'struct:exn:fail:user', u'struct:srcloc',
u'struct:wrapped-extra-arg-arrow', u'struct?', u'sub1', u'subbytes',
u'subclass?', u'subclass?/c', u'subprocess', u'subprocess-group-enabled',
u'subprocess-kill', u'subprocess-pid', u'subprocess-status',
u'subprocess-wait', u'subprocess?', u'subset?', u'substring', u'suggest/c',
u'symbol->string', u'symbol-interned?', u'symbol-unreadable?', u'symbol<?',
u'symbol=?', u'symbol?', u'symbols', u'sync', u'sync/enable-break',
u'sync/timeout', u'sync/timeout/enable-break', u'syntax->datum',
u'syntax->list', u'syntax-arm', u'syntax-column', u'syntax-debug-info',
u'syntax-disarm', u'syntax-e', u'syntax-line',
u'syntax-local-bind-syntaxes', u'syntax-local-certifier',
u'syntax-local-context', u'syntax-local-expand-expression',
u'syntax-local-get-shadower', u'syntax-local-identifier-as-binding',
u'syntax-local-introduce', u'syntax-local-lift-context',
u'syntax-local-lift-expression', u'syntax-local-lift-module',
u'syntax-local-lift-module-end-declaration',
u'syntax-local-lift-provide', u'syntax-local-lift-require',
u'syntax-local-lift-values-expression',
u'syntax-local-make-definition-context',
u'syntax-local-make-delta-introducer',
u'syntax-local-module-defined-identifiers',
u'syntax-local-module-exports',
u'syntax-local-module-required-identifiers', u'syntax-local-name',
u'syntax-local-phase-level', u'syntax-local-submodules',
u'syntax-local-transforming-module-provides?', u'syntax-local-value',
u'syntax-local-value/immediate', u'syntax-original?', u'syntax-position',
u'syntax-property', u'syntax-property-preserved?',
u'syntax-property-symbol-keys', u'syntax-protect', u'syntax-rearm',
u'syntax-recertify', u'syntax-shift-phase-level', u'syntax-source',
u'syntax-source-module', u'syntax-span', u'syntax-taint',
u'syntax-tainted?', u'syntax-track-origin',
u'syntax-transforming-module-expression?',
u'syntax-transforming-with-lifts?', u'syntax-transforming?', u'syntax/c',
u'syntax?', u'system', u'system*', u'system*/exit-code',
u'system-big-endian?', u'system-idle-evt', u'system-language+country',
u'system-library-subpath', u'system-path-convention-type', u'system-type',
u'system/exit-code', u'tail-marks-match?', u'take', u'take-common-prefix',
u'take-right', u'takef', u'takef-right', u'tan', u'tanh',
u'tcp-abandon-port', u'tcp-accept', u'tcp-accept-evt',
u'tcp-accept-ready?', u'tcp-accept/enable-break', u'tcp-addresses',
u'tcp-close', u'tcp-connect', u'tcp-connect/enable-break', u'tcp-listen',
u'tcp-listener?', u'tcp-port?', u'tentative-pretty-print-port-cancel',
u'tentative-pretty-print-port-transfer', u'tenth', u'terminal-port?',
u'the-unsupplied-arg', u'third', u'thread', u'thread-cell-ref',
u'thread-cell-set!', u'thread-cell-values?', u'thread-cell?',
u'thread-dead-evt', u'thread-dead?', u'thread-group?', u'thread-receive',
u'thread-receive-evt', u'thread-resume', u'thread-resume-evt',
u'thread-rewind-receive', u'thread-running?', u'thread-send',
u'thread-suspend', u'thread-suspend-evt', u'thread-try-receive',
u'thread-wait', u'thread/suspend-to-kill', u'thread?', u'time-apply',
u'touch', u'transplant-input-port', u'transplant-output-port', u'true',
u'truncate', u'udp-addresses', u'udp-bind!', u'udp-bound?', u'udp-close',
u'udp-connect!', u'udp-connected?', u'udp-multicast-interface',
u'udp-multicast-join-group!', u'udp-multicast-leave-group!',
u'udp-multicast-loopback?', u'udp-multicast-set-interface!',
u'udp-multicast-set-loopback!', u'udp-multicast-set-ttl!',
u'udp-multicast-ttl', u'udp-open-socket', u'udp-receive!',
u'udp-receive!*', u'udp-receive!-evt', u'udp-receive!/enable-break',
u'udp-receive-ready-evt', u'udp-send', u'udp-send*', u'udp-send-evt',
u'udp-send-ready-evt', u'udp-send-to', u'udp-send-to*', u'udp-send-to-evt',
u'udp-send-to/enable-break', u'udp-send/enable-break', u'udp?', u'unbox',
u'uncaught-exception-handler', u'unit?', u'unspecified-dom',
u'unsupplied-arg?', u'use-collection-link-paths',
u'use-compiled-file-paths', u'use-user-specific-search-paths',
u'user-execute-bit', u'user-read-bit', u'user-write-bit', u'value-blame',
u'value-contract', u'values', u'variable-reference->empty-namespace',
u'variable-reference->module-base-phase',
u'variable-reference->module-declaration-inspector',
u'variable-reference->module-path-index',
u'variable-reference->module-source', u'variable-reference->namespace',
u'variable-reference->phase',
u'variable-reference->resolved-module-path',
u'variable-reference-constant?', u'variable-reference?', u'vector',
u'vector->immutable-vector', u'vector->list',
u'vector->pseudo-random-generator', u'vector->pseudo-random-generator!',
u'vector->values', u'vector-append', u'vector-argmax', u'vector-argmin',
u'vector-copy', u'vector-copy!', u'vector-count', u'vector-drop',
u'vector-drop-right', u'vector-fill!', u'vector-filter',
u'vector-filter-not', u'vector-immutable', u'vector-immutable/c',
u'vector-immutableof', u'vector-length', u'vector-map', u'vector-map!',
u'vector-member', u'vector-memq', u'vector-memv', u'vector-ref',
u'vector-set!', u'vector-set*!', u'vector-set-performance-stats!',
u'vector-split-at', u'vector-split-at-right', u'vector-take',
u'vector-take-right', u'vector/c', u'vector?', u'vectorof', u'version',
u'void', u'void?', u'weak-box-value', u'weak-box?', u'weak-set',
u'weak-seteq', u'weak-seteqv', u'will-execute', u'will-executor?',
u'will-register', u'will-try-execute', u'with-input-from-bytes',
u'with-input-from-file', u'with-input-from-string',
u'with-output-to-bytes', u'with-output-to-file', u'with-output-to-string',
u'would-be-future', u'wrap-evt', u'wrapped-extra-arg-arrow',
u'wrapped-extra-arg-arrow-extra-neg-party-argument',
u'wrapped-extra-arg-arrow-real-func', u'wrapped-extra-arg-arrow?',
u'writable<%>', u'write', u'write-byte', u'write-bytes',
u'write-bytes-avail', u'write-bytes-avail*', u'write-bytes-avail-evt',
u'write-bytes-avail/enable-break', u'write-char', u'write-special',
u'write-special-avail*', u'write-special-evt', u'write-string',
u'write-to-file', u'writeln', u'xor', u'zero?', u'~.a', u'~.s', u'~.v', u'~a',
u'~e', u'~r', u'~s', u'~v'
)
_opening_parenthesis = r'[([{]'
_closing_parenthesis = r'[)\]}]'
_delimiters = r'()[\]{}",\'`;\s'
_symbol = r'(?u)(?:\|[^|]*\||\\[\w\W]|[^|\\%s]+)+' % _delimiters
_exact_decimal_prefix = r'(?:#e)?(?:#d)?(?:#e)?'
_exponent = r'(?:[defls][-+]?\d+)'
_inexact_simple_no_hashes = r'(?:\d+(?:/\d+|\.\d*)?|\.\d+)'
_inexact_simple = (r'(?:%s|(?:\d+#+(?:\.#*|/\d+#*)?|\.\d+#+|'
r'\d+(?:\.\d*#+|/\d+#+)))' % _inexact_simple_no_hashes)
_inexact_normal_no_hashes = r'(?:%s%s?)' % (_inexact_simple_no_hashes,
_exponent)
_inexact_normal = r'(?:%s%s?)' % (_inexact_simple, _exponent)
_inexact_special = r'(?:(?:inf|nan)\.[0f])'
_inexact_real = r'(?:[-+]?%s|[-+]%s)' % (_inexact_normal,
_inexact_special)
_inexact_unsigned = r'(?:%s|%s)' % (_inexact_normal, _inexact_special)
tokens = {
'root': [
(_closing_parenthesis, Error),
(r'(?!\Z)', Text, 'unquoted-datum')
],
'datum': [
(r'(?s)#;|#*', Comment),
(u';[^\\n\\r\x85\u2028\u2029]*', Comment.Single),
(r'#\|', Comment.Multiline, 'block-comment'),
# Whitespaces
(r'(?u)\s+', Text),
# Numbers: Keep in mind Racket reader hash prefixes, which
# can denote the base or the type. These don't map neatly
# onto Pygments token types; some judgment calls here.
# #d or no prefix
(r'(?i)%s[-+]?\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters),
Number.Integer, '#pop'),
(r'(?i)%s[-+]?(\d+(\.\d*)?|\.\d+)([deflst][-+]?\d+)?(?=[%s])' %
(_exact_decimal_prefix, _delimiters), Number.Float, '#pop'),
(r'(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' %
(_exact_decimal_prefix, _inexact_normal_no_hashes,
_inexact_normal_no_hashes, _inexact_normal_no_hashes,
_delimiters), Number, '#pop'),
# Inexact without explicit #i
(r'(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%s@%s)(?=[%s])' %
(_inexact_real, _inexact_unsigned, _inexact_unsigned,
_inexact_real, _inexact_real, _delimiters), Number.Float,
'#pop'),
# The remaining extflonums
(r'(?i)(([-+]?%st[-+]?\d+)|[-+](inf|nan)\.t)(?=[%s])' %
(_inexact_simple, _delimiters), Number.Float, '#pop'),
# #b
(r'(?i)(#[ei])?#b%s' % _symbol, Number.Bin, '#pop'),
# #o
(r'(?i)(#[ei])?#o%s' % _symbol, Number.Oct, '#pop'),
# #x
(r'(?i)(#[ei])?#x%s' % _symbol, Number.Hex, '#pop'),
# #i is always inexact, i.e. float
(r'(?i)(#d)?#i%s' % _symbol, Number.Float, '#pop'),
# Strings and characters
(r'#?"', String.Double, ('#pop', 'string')),
(r'#<<(.+)\n(^(?!\1$).*$\n)*^\1$', String.Heredoc, '#pop'),
(r'#\\(u[\da-fA-F]{1,4}|U[\da-fA-F]{1,8})', String.Char, '#pop'),
(r'(?is)#\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'),
(r'(?s)#[pr]x#?"(\\?.)*?"', String.Regex, '#pop'),
# Constants
(r'#(true|false|[tTfF])', Name.Constant, '#pop'),
# Keyword argument names (e.g. #:keyword)
(r'#:%s' % _symbol, Keyword.Declaration, '#pop'),
# Reader extensions
(r'(#lang |#!)(\S+)',
bygroups(Keyword.Namespace, Name.Namespace)),
(r'#reader', Keyword.Namespace, 'quoted-datum'),
# Other syntax
(r"(?i)\.(?=[%s])|#c[is]|#['`]|#,@?" % _delimiters, Operator),
(r"'|#[s&]|#hash(eqv?)?|#\d*(?=%s)" % _opening_parenthesis,
Operator, ('#pop', 'quoted-datum'))
],
'datum*': [
(r'`|,@?', Operator),
(_symbol, String.Symbol, '#pop'),
(r'[|\\]', Error),
default('#pop')
],
'list': [
(_closing_parenthesis, Punctuation, '#pop')
],
'unquoted-datum': [
include('datum'),
(r'quote(?=[%s])' % _delimiters, Keyword,
('#pop', 'quoted-datum')),
(r'`', Operator, ('#pop', 'quasiquoted-datum')),
(r'quasiquote(?=[%s])' % _delimiters, Keyword,
('#pop', 'quasiquoted-datum')),
(_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')),
(words(_keywords, prefix='(?u)', suffix='(?=[%s])' % _delimiters),
Keyword, '#pop'),
(words(_builtins, prefix='(?u)', suffix='(?=[%s])' % _delimiters),
Name.Builtin, '#pop'),
(_symbol, Name, '#pop'),
include('datum*')
],
'unquoted-list': [
include('list'),
(r'(?!\Z)', Text, 'unquoted-datum')
],
'quasiquoted-datum': [
include('datum'),
(r',@?', Operator, ('#pop', 'unquoted-datum')),
(r'unquote(-splicing)?(?=[%s])' % _delimiters, Keyword,
('#pop', 'unquoted-datum')),
(_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')),
include('datum*')
],
'quasiquoted-list': [
include('list'),
(r'(?!\Z)', Text, 'quasiquoted-datum')
],
'quoted-datum': [
include('datum'),
(_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')),
include('datum*')
],
'quoted-list': [
include('list'),
(r'(?!\Z)', Text, 'quoted-datum')
],
'block-comment': [
(r'#\|', Comment.Multiline, '#push'),
(r'\|#', Comment.Multiline, '#pop'),
(r'[^#|]+|.', Comment.Multiline)
],
'string': [
(r'"', String.Double, '#pop'),
(r'(?s)\\([0-7]{1,3}|x[\da-fA-F]{1,2}|u[\da-fA-F]{1,4}|'
r'U[\da-fA-F]{1,8}|.)', String.Escape),
(r'[^\\"]+', String.Double)
]
}
class NewLispLexer(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
.. versionadded:: 1.5
"""
name = 'NewLisp'
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl', '*.kif']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
flags = re.IGNORECASE | re.MULTILINE | re.UNICODE
# list of built-in functions for newLISP version 10.3
builtins = (
'^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
'<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
'$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
'$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file',
'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
'base64-enc', 'bayes-query', 'bayes-train', 'begin',
'beta', 'betai', 'bind', 'binomial', 'bits', 'callback',
'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
'close', 'command-event', 'cond', 'cons', 'constant',
'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
'def-new', 'default', 'define-macro', 'define',
'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
'last', 'legal?', 'length', 'let', 'letex', 'letn',
'list?', 'list', 'load', 'local', 'log', 'lookup',
'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
'net-send-to', 'net-send-udp', 'net-send', 'net-service',
'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
'read-key', 'read-line', 'read-utf8', 'reader-event',
'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
'write', 'write-char', 'write-file', 'write-line',
'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
)
# valid names
valid_name = r'([\w!$%&*+.,/<=>?@^~|-])+|(\[.*?\])+'
tokens = {
'root': [
# shebang
(r'#!(.*?)$', Comment.Preproc),
# comments starting with semicolon
(r';.*$', Comment.Single),
# comments starting with #
(r'#.*$', Comment.Single),
# whitespace
(r'\s+', Text),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
# braces
(r'\{', String, "bracestring"),
# [text] ... [/text] delimited strings
(r'\[text\]*', String, "tagstring"),
# 'special' operators...
(r"('|:)", Operator),
# highlight the builtins
(words(builtins, suffix=r'\b'),
Keyword),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Variable),
# the remaining variables
(valid_name, String.Symbol),
# parentheses
(r'(\(|\))', Punctuation),
],
# braced strings...
'bracestring': [
(r'\{', String, "#push"),
(r'\}', String, "#pop"),
('[^{}]+', String),
],
# tagged [text]...[/text] delimited strings...
'tagstring': [
(r'(?s)(.*?)(\[/text\])', String, '#pop'),
],
}
class EmacsLispLexer(RegexLexer):
"""
An ELisp lexer, parsing a stream and outputting the tokens
needed to highlight elisp code.
.. versionadded:: 2.1
"""
name = 'EmacsLisp'
aliases = ['emacs', 'elisp', 'emacs-lisp']
filenames = ['*.el']
mimetypes = ['text/x-elisp', 'application/x-elisp']
flags = re.MULTILINE
# couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[\w!$%&*+-/<=>?@^{}~|]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\]\'\n,;`])' # whitespace or terminating macro characters
# symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent)
macros = set((
'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2',
'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare',
'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct',
'cl-defsubst', 'cl-deftype', 'cl-defun', 'cl-destructuring-bind',
'cl-do', 'cl-do*', 'cl-do-all-symbols', 'cl-do-symbols', 'cl-dolist',
'cl-dotimes', 'cl-ecase', 'cl-etypecase', 'eval-when', 'cl-eval-when', 'cl-flet',
'cl-flet*', 'cl-function', 'cl-incf', 'cl-labels', 'cl-letf',
'cl-letf*', 'cl-load-time-value', 'cl-locally', 'cl-loop',
'cl-macrolet', 'cl-multiple-value-bind', 'cl-multiple-value-setq',
'cl-progv', 'cl-psetf', 'cl-psetq', 'cl-pushnew', 'cl-remf',
'cl-return', 'cl-return-from', 'cl-rotatef', 'cl-shiftf',
'cl-symbol-macrolet', 'cl-tagbody', 'cl-the', 'cl-typecase',
'combine-after-change-calls', 'condition-case-unless-debug', 'decf',
'declaim', 'declare', 'declare-function', 'def-edebug-spec',
'defadvice', 'defclass', 'defcustom', 'defface', 'defgeneric',
'defgroup', 'define-advice', 'define-alternatives',
'define-compiler-macro', 'define-derived-mode', 'define-generic-mode',
'define-global-minor-mode', 'define-globalized-minor-mode',
'define-minor-mode', 'define-modify-macro',
'define-obsolete-face-alias', 'define-obsolete-function-alias',
'define-obsolete-variable-alias', 'define-setf-expander',
'define-skeleton', 'defmacro', 'defmethod', 'defsetf', 'defstruct',
'defsubst', 'deftheme', 'deftype', 'defun', 'defvar-local',
'delay-mode-hooks', 'destructuring-bind', 'do', 'do*',
'do-all-symbols', 'do-symbols', 'dolist', 'dont-compile', 'dotimes',
'dotimes-with-progress-reporter', 'ecase', 'ert-deftest', 'etypecase',
'eval-and-compile', 'eval-when-compile', 'flet', 'ignore-errors',
'incf', 'labels', 'lambda', 'letrec', 'lexical-let', 'lexical-let*',
'loop', 'multiple-value-bind', 'multiple-value-setq', 'noreturn',
'oref', 'oref-default', 'oset', 'oset-default', 'pcase',
'pcase-defmacro', 'pcase-dolist', 'pcase-exhaustive', 'pcase-let',
'pcase-let*', 'pop', 'psetf', 'psetq', 'push', 'pushnew', 'remf',
'return', 'rotatef', 'rx', 'save-match-data', 'save-selected-window',
'save-window-excursion', 'setf', 'setq-local', 'shiftf',
'track-mouse', 'typecase', 'unless', 'use-package', 'when',
'while-no-input', 'with-case-table', 'with-category-table',
'with-coding-priority', 'with-current-buffer', 'with-demoted-errors',
'with-eval-after-load', 'with-file-modes', 'with-local-quit',
'with-output-to-string', 'with-output-to-temp-buffer',
'with-parsed-tramp-file-name', 'with-selected-frame',
'with-selected-window', 'with-silent-modifications', 'with-slots',
'with-syntax-table', 'with-temp-buffer', 'with-temp-file',
'with-temp-message', 'with-timeout', 'with-tramp-connection-property',
'with-tramp-file-property', 'with-tramp-progress-reporter',
'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv',
'return-from',
))
special_forms = set((
'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar',
'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1',
'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion',
'save-restriction', 'setq', 'setq-default', 'subr-arity',
'unwind-protect', 'while',
))
builtin_function = set((
'%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=',
'Snarf-documentation', 'abort-recursive-edit', 'abs',
'accept-process-output', 'access-file', 'accessible-keymaps', 'acos',
'active-minibuffer-window', 'add-face-text-property',
'add-name-to-file', 'add-text-properties', 'all-completions',
'append', 'apply', 'apropos-internal', 'aref', 'arrayp', 'aset',
'ash', 'asin', 'assoc', 'assoc-string', 'assq', 'atan', 'atom',
'autoload', 'autoload-do-load', 'backtrace', 'backtrace--locals',
'backtrace-debug', 'backtrace-eval', 'backtrace-frame',
'backward-char', 'backward-prefix-chars', 'barf-if-buffer-read-only',
'base64-decode-region', 'base64-decode-string',
'base64-encode-region', 'base64-encode-string', 'beginning-of-line',
'bidi-find-overridden-directionality', 'bidi-resolved-levels',
'bitmap-spec-p', 'bobp', 'bolp', 'bool-vector',
'bool-vector-count-consecutive', 'bool-vector-count-population',
'bool-vector-exclusive-or', 'bool-vector-intersection',
'bool-vector-not', 'bool-vector-p', 'bool-vector-set-difference',
'bool-vector-subsetp', 'bool-vector-union', 'boundp',
'buffer-base-buffer', 'buffer-chars-modified-tick',
'buffer-enable-undo', 'buffer-file-name', 'buffer-has-markers-at',
'buffer-list', 'buffer-live-p', 'buffer-local-value',
'buffer-local-variables', 'buffer-modified-p', 'buffer-modified-tick',
'buffer-name', 'buffer-size', 'buffer-string', 'buffer-substring',
'buffer-substring-no-properties', 'buffer-swap-text', 'bufferp',
'bury-buffer-internal', 'byte-code', 'byte-code-function-p',
'byte-to-position', 'byte-to-string', 'byteorder',
'call-interactively', 'call-last-kbd-macro', 'call-process',
'call-process-region', 'cancel-kbd-macro-events', 'capitalize',
'capitalize-region', 'capitalize-word', 'car', 'car-less-than-car',
'car-safe', 'case-table-p', 'category-docstring',
'category-set-mnemonics', 'category-table', 'category-table-p',
'ccl-execute', 'ccl-execute-on-string', 'ccl-program-p', 'cdr',
'cdr-safe', 'ceiling', 'char-after', 'char-before',
'char-category-set', 'char-charset', 'char-equal', 'char-or-string-p',
'char-resolve-modifiers', 'char-syntax', 'char-table-extra-slot',
'char-table-p', 'char-table-parent', 'char-table-range',
'char-table-subtype', 'char-to-string', 'char-width', 'characterp',
'charset-after', 'charset-id-internal', 'charset-plist',
'charset-priority-list', 'charsetp', 'check-coding-system',
'check-coding-systems-region', 'clear-buffer-auto-save-failure',
'clear-charset-maps', 'clear-face-cache', 'clear-font-cache',
'clear-image-cache', 'clear-string', 'clear-this-command-keys',
'close-font', 'clrhash', 'coding-system-aliases',
'coding-system-base', 'coding-system-eol-type', 'coding-system-p',
'coding-system-plist', 'coding-system-priority-list',
'coding-system-put', 'color-distance', 'color-gray-p',
'color-supported-p', 'combine-after-change-execute',
'command-error-default-function', 'command-remapping', 'commandp',
'compare-buffer-substrings', 'compare-strings',
'compare-window-configurations', 'completing-read',
'compose-region-internal', 'compose-string-internal',
'composition-get-gstring', 'compute-motion', 'concat', 'cons',
'consp', 'constrain-to-field', 'continue-process',
'controlling-tty-p', 'coordinates-in-window-p', 'copy-alist',
'copy-category-table', 'copy-file', 'copy-hash-table', 'copy-keymap',
'copy-marker', 'copy-sequence', 'copy-syntax-table', 'copysign',
'cos', 'current-active-maps', 'current-bidi-paragraph-direction',
'current-buffer', 'current-case-table', 'current-column',
'current-global-map', 'current-idle-time', 'current-indentation',
'current-input-mode', 'current-local-map', 'current-message',
'current-minor-mode-maps', 'current-time', 'current-time-string',
'current-time-zone', 'current-window-configuration',
'cygwin-convert-file-name-from-windows',
'cygwin-convert-file-name-to-windows', 'daemon-initialized',
'daemonp', 'dbus--init-bus', 'dbus-get-unique-name',
'dbus-message-internal', 'debug-timer-check', 'declare-equiv-charset',
'decode-big5-char', 'decode-char', 'decode-coding-region',
'decode-coding-string', 'decode-sjis-char', 'decode-time',
'default-boundp', 'default-file-modes', 'default-printer-name',
'default-toplevel-value', 'default-value', 'define-category',
'define-charset-alias', 'define-charset-internal',
'define-coding-system-alias', 'define-coding-system-internal',
'define-fringe-bitmap', 'define-hash-table-test', 'define-key',
'define-prefix-command', 'delete',
'delete-all-overlays', 'delete-and-extract-region', 'delete-char',
'delete-directory-internal', 'delete-field', 'delete-file',
'delete-frame', 'delete-other-windows-internal', 'delete-overlay',
'delete-process', 'delete-region', 'delete-terminal',
'delete-window-internal', 'delq', 'describe-buffer-bindings',
'describe-vector', 'destroy-fringe-bitmap', 'detect-coding-region',
'detect-coding-string', 'ding', 'directory-file-name',
'directory-files', 'directory-files-and-attributes', 'discard-input',
'display-supports-face-attributes-p', 'do-auto-save', 'documentation',
'documentation-property', 'downcase', 'downcase-region',
'downcase-word', 'draw-string', 'dump-colors', 'dump-emacs',
'dump-face', 'dump-frame-glyph-matrix', 'dump-glyph-matrix',
'dump-glyph-row', 'dump-redisplay-history', 'dump-tool-bar-row',
'elt', 'emacs-pid', 'encode-big5-char', 'encode-char',
'encode-coding-region', 'encode-coding-string', 'encode-sjis-char',
'encode-time', 'end-kbd-macro', 'end-of-line', 'eobp', 'eolp', 'eq',
'eql', 'equal', 'equal-including-properties', 'erase-buffer',
'error-message-string', 'eval', 'eval-buffer', 'eval-region',
'event-convert-list', 'execute-kbd-macro', 'exit-recursive-edit',
'exp', 'expand-file-name', 'expt', 'external-debugging-output',
'face-attribute-relative-p', 'face-attributes-as-vector', 'face-font',
'fboundp', 'fceiling', 'fetch-bytecode', 'ffloor',
'field-beginning', 'field-end', 'field-string',
'field-string-no-properties', 'file-accessible-directory-p',
'file-acl', 'file-attributes', 'file-attributes-lessp',
'file-directory-p', 'file-executable-p', 'file-exists-p',
'file-locked-p', 'file-modes', 'file-name-absolute-p',
'file-name-all-completions', 'file-name-as-directory',
'file-name-completion', 'file-name-directory',
'file-name-nondirectory', 'file-newer-than-file-p', 'file-readable-p',
'file-regular-p', 'file-selinux-context', 'file-symlink-p',
'file-system-info', 'file-system-info', 'file-writable-p',
'fillarray', 'find-charset-region', 'find-charset-string',
'find-coding-systems-region-internal', 'find-composition-internal',
'find-file-name-handler', 'find-font', 'find-operation-coding-system',
'float', 'float-time', 'floatp', 'floor', 'fmakunbound',
'following-char', 'font-at', 'font-drive-otf', 'font-face-attributes',
'font-family-list', 'font-get', 'font-get-glyphs',
'font-get-system-font', 'font-get-system-normal-font', 'font-info',
'font-match-p', 'font-otf-alternates', 'font-put',
'font-shape-gstring', 'font-spec', 'font-variation-glyphs',
'font-xlfd-name', 'fontp', 'fontset-font', 'fontset-info',
'fontset-list', 'fontset-list-all', 'force-mode-line-update',
'force-window-update', 'format', 'format-mode-line',
'format-network-address', 'format-time-string', 'forward-char',
'forward-comment', 'forward-line', 'forward-word',
'frame-border-width', 'frame-bottom-divider-width',
'frame-can-run-window-configuration-change-hook', 'frame-char-height',
'frame-char-width', 'frame-face-alist', 'frame-first-window',
'frame-focus', 'frame-font-cache', 'frame-fringe-width', 'frame-list',
'frame-live-p', 'frame-or-buffer-changed-p', 'frame-parameter',
'frame-parameters', 'frame-pixel-height', 'frame-pixel-width',
'frame-pointer-visible-p', 'frame-right-divider-width',
'frame-root-window', 'frame-scroll-bar-height',
'frame-scroll-bar-width', 'frame-selected-window', 'frame-terminal',
'frame-text-cols', 'frame-text-height', 'frame-text-lines',
'frame-text-width', 'frame-total-cols', 'frame-total-lines',
'frame-visible-p', 'framep', 'frexp', 'fringe-bitmaps-at-pos',
'fround', 'fset', 'ftruncate', 'funcall', 'funcall-interactively',
'function-equal', 'functionp', 'gap-position', 'gap-size',
'garbage-collect', 'gc-status', 'generate-new-buffer-name', 'get',
'get-buffer', 'get-buffer-create', 'get-buffer-process',
'get-buffer-window', 'get-byte', 'get-char-property',
'get-char-property-and-overlay', 'get-file-buffer', 'get-file-char',
'get-internal-run-time', 'get-load-suffixes', 'get-pos-property',
'get-process', 'get-screen-color', 'get-text-property',
'get-unicode-property-internal', 'get-unused-category',
'get-unused-iso-final-char', 'getenv-internal', 'gethash',
'gfile-add-watch', 'gfile-rm-watch', 'global-key-binding',
'gnutls-available-p', 'gnutls-boot', 'gnutls-bye', 'gnutls-deinit',
'gnutls-error-fatalp', 'gnutls-error-string', 'gnutls-errorp',
'gnutls-get-initstage', 'gnutls-peer-status',
'gnutls-peer-status-warning-describe', 'goto-char', 'gpm-mouse-start',
'gpm-mouse-stop', 'group-gid', 'group-real-gid',
'handle-save-session', 'handle-switch-frame', 'hash-table-count',
'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'hash-table-weakness', 'iconify-frame', 'identity', 'image-flush',
'image-mask-p', 'image-metadata', 'image-size', 'imagemagick-types',
'imagep', 'indent-to', 'indirect-function', 'indirect-variable',
'init-image-library', 'inotify-add-watch', 'inotify-rm-watch',
'input-pending-p', 'insert', 'insert-and-inherit',
'insert-before-markers', 'insert-before-markers-and-inherit',
'insert-buffer-substring', 'insert-byte', 'insert-char',
'insert-file-contents', 'insert-startup-screen', 'int86',
'integer-or-marker-p', 'integerp', 'interactive-form', 'intern',
'intern-soft', 'internal--track-mouse', 'internal-char-font',
'internal-complete-buffer', 'internal-copy-lisp-face',
'internal-default-process-filter',
'internal-default-process-sentinel', 'internal-describe-syntax-value',
'internal-event-symbol-parse-modifiers',
'internal-face-x-get-resource', 'internal-get-lisp-face-attribute',
'internal-lisp-face-attribute-values', 'internal-lisp-face-empty-p',
'internal-lisp-face-equal-p', 'internal-lisp-face-p',
'internal-make-lisp-face', 'internal-make-var-non-special',
'internal-merge-in-global-face',
'internal-set-alternative-font-family-alist',
'internal-set-alternative-font-registry-alist',
'internal-set-font-selection-order',
'internal-set-lisp-face-attribute',
'internal-set-lisp-face-attribute-from-resource',
'internal-show-cursor', 'internal-show-cursor-p', 'interrupt-process',
'invisible-p', 'invocation-directory', 'invocation-name', 'isnan',
'iso-charset', 'key-binding', 'key-description',
'keyboard-coding-system', 'keymap-parent', 'keymap-prompt', 'keymapp',
'keywordp', 'kill-all-local-variables', 'kill-buffer', 'kill-emacs',
'kill-local-variable', 'kill-process', 'last-nonminibuffer-frame',
'lax-plist-get', 'lax-plist-put', 'ldexp', 'length',
'libxml-parse-html-region', 'libxml-parse-xml-region',
'line-beginning-position', 'line-end-position', 'line-pixel-height',
'list', 'list-fonts', 'list-system-processes', 'listp', 'load',
'load-average', 'local-key-binding', 'local-variable-if-set-p',
'local-variable-p', 'locale-info', 'locate-file-internal',
'lock-buffer', 'log', 'logand', 'logb', 'logior', 'lognot', 'logxor',
'looking-at', 'lookup-image', 'lookup-image-map', 'lookup-key',
'lower-frame', 'lsh', 'macroexpand', 'make-bool-vector',
'make-byte-code', 'make-category-set', 'make-category-table',
'make-char', 'make-char-table', 'make-directory-internal',
'make-frame-invisible', 'make-frame-visible', 'make-hash-table',
'make-indirect-buffer', 'make-keymap', 'make-list',
'make-local-variable', 'make-marker', 'make-network-process',
'make-overlay', 'make-serial-process', 'make-sparse-keymap',
'make-string', 'make-symbol', 'make-symbolic-link', 'make-temp-name',
'make-terminal-frame', 'make-variable-buffer-local',
'make-variable-frame-local', 'make-vector', 'makunbound',
'map-char-table', 'map-charset-chars', 'map-keymap',
'map-keymap-internal', 'mapatoms', 'mapc', 'mapcar', 'mapconcat',
'maphash', 'mark-marker', 'marker-buffer', 'marker-insertion-type',
'marker-position', 'markerp', 'match-beginning', 'match-data',
'match-end', 'matching-paren', 'max', 'max-char', 'md5', 'member',
'memory-info', 'memory-limit', 'memory-use-counts', 'memq', 'memql',
'menu-bar-menu-at-x-y', 'menu-or-popup-active-p',
'menu-or-popup-active-p', 'merge-face-attribute', 'message',
'message-box', 'message-or-box', 'min',
'minibuffer-completion-contents', 'minibuffer-contents',
'minibuffer-contents-no-properties', 'minibuffer-depth',
'minibuffer-prompt', 'minibuffer-prompt-end',
'minibuffer-selected-window', 'minibuffer-window', 'minibufferp',
'minor-mode-key-binding', 'mod', 'modify-category-entry',
'modify-frame-parameters', 'modify-syntax-entry',
'mouse-pixel-position', 'mouse-position', 'move-overlay',
'move-point-visually', 'move-to-column', 'move-to-window-line',
'msdos-downcase-filename', 'msdos-long-file-names', 'msdos-memget',
'msdos-memput', 'msdos-mouse-disable', 'msdos-mouse-enable',
'msdos-mouse-init', 'msdos-mouse-p', 'msdos-remember-default-colors',
'msdos-set-keyboard', 'msdos-set-mouse-buttons',
'multibyte-char-to-unibyte', 'multibyte-string-p', 'narrow-to-region',
'natnump', 'nconc', 'network-interface-info',
'network-interface-list', 'new-fontset', 'newline-cache-check',
'next-char-property-change', 'next-frame', 'next-overlay-change',
'next-property-change', 'next-read-file-uses-dialog-p',
'next-single-char-property-change', 'next-single-property-change',
'next-window', 'nlistp', 'nreverse', 'nth', 'nthcdr', 'null',
'number-or-marker-p', 'number-to-string', 'numberp',
'open-dribble-file', 'open-font', 'open-termscript',
'optimize-char-table', 'other-buffer', 'other-window-for-scrolling',
'overlay-buffer', 'overlay-end', 'overlay-get', 'overlay-lists',
'overlay-properties', 'overlay-put', 'overlay-recenter',
'overlay-start', 'overlayp', 'overlays-at', 'overlays-in',
'parse-partial-sexp', 'play-sound-internal', 'plist-get',
'plist-member', 'plist-put', 'point', 'point-marker', 'point-max',
'point-max-marker', 'point-min', 'point-min-marker',
'pos-visible-in-window-p', 'position-bytes', 'posix-looking-at',
'posix-search-backward', 'posix-search-forward', 'posix-string-match',
'posn-at-point', 'posn-at-x-y', 'preceding-char',
'prefix-numeric-value', 'previous-char-property-change',
'previous-frame', 'previous-overlay-change',
'previous-property-change', 'previous-single-char-property-change',
'previous-single-property-change', 'previous-window', 'prin1',
'prin1-to-string', 'princ', 'print', 'process-attributes',
'process-buffer', 'process-coding-system', 'process-command',
'process-connection', 'process-contact', 'process-datagram-address',
'process-exit-status', 'process-filter', 'process-filter-multibyte-p',
'process-id', 'process-inherit-coding-system-flag', 'process-list',
'process-mark', 'process-name', 'process-plist',
'process-query-on-exit-flag', 'process-running-child-p',
'process-send-eof', 'process-send-region', 'process-send-string',
'process-sentinel', 'process-status', 'process-tty-name',
'process-type', 'processp', 'profiler-cpu-log',
'profiler-cpu-running-p', 'profiler-cpu-start', 'profiler-cpu-stop',
'profiler-memory-log', 'profiler-memory-running-p',
'profiler-memory-start', 'profiler-memory-stop', 'propertize',
'purecopy', 'put', 'put-text-property',
'put-unicode-property-internal', 'puthash', 'query-font',
'query-fontset', 'quit-process', 'raise-frame', 'random', 'rassoc',
'rassq', 're-search-backward', 're-search-forward', 'read',
'read-buffer', 'read-char', 'read-char-exclusive',
'read-coding-system', 'read-command', 'read-event',
'read-from-minibuffer', 'read-from-string', 'read-function',
'read-key-sequence', 'read-key-sequence-vector',
'read-no-blanks-input', 'read-non-nil-coding-system', 'read-string',
'read-variable', 'recent-auto-save-p', 'recent-doskeys',
'recent-keys', 'recenter', 'recursion-depth', 'recursive-edit',
'redirect-debugging-output', 'redirect-frame-focus', 'redisplay',
'redraw-display', 'redraw-frame', 'regexp-quote', 'region-beginning',
'region-end', 'register-ccl-program', 'register-code-conversion-map',
'remhash', 'remove-list-of-text-properties', 'remove-text-properties',
'rename-buffer', 'rename-file', 'replace-match',
'reset-this-command-lengths', 'resize-mini-window-internal',
'restore-buffer-modified-p', 'resume-tty', 'reverse', 'round',
'run-hook-with-args', 'run-hook-with-args-until-failure',
'run-hook-with-args-until-success', 'run-hook-wrapped', 'run-hooks',
'run-window-configuration-change-hook', 'run-window-scroll-functions',
'safe-length', 'scan-lists', 'scan-sexps', 'scroll-down',
'scroll-left', 'scroll-other-window', 'scroll-right', 'scroll-up',
'search-backward', 'search-forward', 'secure-hash', 'select-frame',
'select-window', 'selected-frame', 'selected-window',
'self-insert-command', 'send-string-to-terminal', 'sequencep',
'serial-process-configure', 'set', 'set-buffer',
'set-buffer-auto-saved', 'set-buffer-major-mode',
'set-buffer-modified-p', 'set-buffer-multibyte', 'set-case-table',
'set-category-table', 'set-char-table-extra-slot',
'set-char-table-parent', 'set-char-table-range', 'set-charset-plist',
'set-charset-priority', 'set-coding-system-priority',
'set-cursor-size', 'set-default', 'set-default-file-modes',
'set-default-toplevel-value', 'set-file-acl', 'set-file-modes',
'set-file-selinux-context', 'set-file-times', 'set-fontset-font',
'set-frame-height', 'set-frame-position', 'set-frame-selected-window',
'set-frame-size', 'set-frame-width', 'set-fringe-bitmap-face',
'set-input-interrupt-mode', 'set-input-meta-mode', 'set-input-mode',
'set-keyboard-coding-system-internal', 'set-keymap-parent',
'set-marker', 'set-marker-insertion-type', 'set-match-data',
'set-message-beep', 'set-minibuffer-window',
'set-mouse-pixel-position', 'set-mouse-position',
'set-network-process-option', 'set-output-flow-control',
'set-process-buffer', 'set-process-coding-system',
'set-process-datagram-address', 'set-process-filter',
'set-process-filter-multibyte',
'set-process-inherit-coding-system-flag', 'set-process-plist',
'set-process-query-on-exit-flag', 'set-process-sentinel',
'set-process-window-size', 'set-quit-char',
'set-safe-terminal-coding-system-internal', 'set-screen-color',
'set-standard-case-table', 'set-syntax-table',
'set-terminal-coding-system-internal', 'set-terminal-local-value',
'set-terminal-parameter', 'set-text-properties', 'set-time-zone-rule',
'set-visited-file-modtime', 'set-window-buffer',
'set-window-combination-limit', 'set-window-configuration',
'set-window-dedicated-p', 'set-window-display-table',
'set-window-fringes', 'set-window-hscroll', 'set-window-margins',
'set-window-new-normal', 'set-window-new-pixel',
'set-window-new-total', 'set-window-next-buffers',
'set-window-parameter', 'set-window-point', 'set-window-prev-buffers',
'set-window-redisplay-end-trigger', 'set-window-scroll-bars',
'set-window-start', 'set-window-vscroll', 'setcar', 'setcdr',
'setplist', 'show-face-resources', 'signal', 'signal-process', 'sin',
'single-key-description', 'skip-chars-backward', 'skip-chars-forward',
'skip-syntax-backward', 'skip-syntax-forward', 'sleep-for', 'sort',
'sort-charsets', 'special-variable-p', 'split-char',
'split-window-internal', 'sqrt', 'standard-case-table',
'standard-category-table', 'standard-syntax-table', 'start-kbd-macro',
'start-process', 'stop-process', 'store-kbd-macro-event', 'string',
'string-as-multibyte', 'string-as-unibyte', 'string-bytes',
'string-collate-equalp', 'string-collate-lessp', 'string-equal',
'string-lessp', 'string-make-multibyte', 'string-make-unibyte',
'string-match', 'string-to-char', 'string-to-multibyte',
'string-to-number', 'string-to-syntax', 'string-to-unibyte',
'string-width', 'stringp', 'subr-name', 'subrp',
'subst-char-in-region', 'substitute-command-keys',
'substitute-in-file-name', 'substring', 'substring-no-properties',
'suspend-emacs', 'suspend-tty', 'suspicious-object', 'sxhash',
'symbol-function', 'symbol-name', 'symbol-plist', 'symbol-value',
'symbolp', 'syntax-table', 'syntax-table-p', 'system-groups',
'system-move-file-to-trash', 'system-name', 'system-users', 'tan',
'terminal-coding-system', 'terminal-list', 'terminal-live-p',
'terminal-local-value', 'terminal-name', 'terminal-parameter',
'terminal-parameters', 'terpri', 'test-completion',
'text-char-description', 'text-properties-at', 'text-property-any',
'text-property-not-all', 'this-command-keys',
'this-command-keys-vector', 'this-single-command-keys',
'this-single-command-raw-keys', 'time-add', 'time-less-p',
'time-subtract', 'tool-bar-get-system-style', 'tool-bar-height',
'tool-bar-pixel-width', 'top-level', 'trace-redisplay',
'trace-to-stderr', 'translate-region-internal', 'transpose-regions',
'truncate', 'try-completion', 'tty-display-color-cells',
'tty-display-color-p', 'tty-no-underline',
'tty-suppress-bold-inverse-default-colors', 'tty-top-frame',
'tty-type', 'type-of', 'undo-boundary', 'unencodable-char-position',
'unhandled-file-name-directory', 'unibyte-char-to-multibyte',
'unibyte-string', 'unicode-property-table-internal', 'unify-charset',
'unintern', 'unix-sync', 'unlock-buffer', 'upcase', 'upcase-initials',
'upcase-initials-region', 'upcase-region', 'upcase-word',
'use-global-map', 'use-local-map', 'user-full-name',
'user-login-name', 'user-real-login-name', 'user-real-uid',
'user-uid', 'variable-binding-locus', 'vconcat', 'vector',
'vector-or-char-table-p', 'vectorp', 'verify-visited-file-modtime',
'vertical-motion', 'visible-frame-list', 'visited-file-modtime',
'w16-get-clipboard-data', 'w16-selection-exists-p',
'w16-set-clipboard-data', 'w32-battery-status',
'w32-default-color-map', 'w32-define-rgb-color',
'w32-display-monitor-attributes-list', 'w32-frame-menu-bar-size',
'w32-frame-rect', 'w32-get-clipboard-data',
'w32-get-codepage-charset', 'w32-get-console-codepage',
'w32-get-console-output-codepage', 'w32-get-current-locale-id',
'w32-get-default-locale-id', 'w32-get-keyboard-layout',
'w32-get-locale-info', 'w32-get-valid-codepages',
'w32-get-valid-keyboard-layouts', 'w32-get-valid-locale-ids',
'w32-has-winsock', 'w32-long-file-name', 'w32-reconstruct-hot-key',
'w32-register-hot-key', 'w32-registered-hot-keys',
'w32-selection-exists-p', 'w32-send-sys-command',
'w32-set-clipboard-data', 'w32-set-console-codepage',
'w32-set-console-output-codepage', 'w32-set-current-locale',
'w32-set-keyboard-layout', 'w32-set-process-priority',
'w32-shell-execute', 'w32-short-file-name', 'w32-toggle-lock-key',
'w32-unload-winsock', 'w32-unregister-hot-key', 'w32-window-exists-p',
'w32notify-add-watch', 'w32notify-rm-watch',
'waiting-for-user-input-p', 'where-is-internal', 'widen',
'widget-apply', 'widget-get', 'widget-put',
'window-absolute-pixel-edges', 'window-at', 'window-body-height',
'window-body-width', 'window-bottom-divider-width', 'window-buffer',
'window-combination-limit', 'window-configuration-frame',
'window-configuration-p', 'window-dedicated-p',
'window-display-table', 'window-edges', 'window-end', 'window-frame',
'window-fringes', 'window-header-line-height', 'window-hscroll',
'window-inside-absolute-pixel-edges', 'window-inside-edges',
'window-inside-pixel-edges', 'window-left-child',
'window-left-column', 'window-line-height', 'window-list',
'window-list-1', 'window-live-p', 'window-margins',
'window-minibuffer-p', 'window-mode-line-height', 'window-new-normal',
'window-new-pixel', 'window-new-total', 'window-next-buffers',
'window-next-sibling', 'window-normal-size', 'window-old-point',
'window-parameter', 'window-parameters', 'window-parent',
'window-pixel-edges', 'window-pixel-height', 'window-pixel-left',
'window-pixel-top', 'window-pixel-width', 'window-point',
'window-prev-buffers', 'window-prev-sibling',
'window-redisplay-end-trigger', 'window-resize-apply',
'window-resize-apply-total', 'window-right-divider-width',
'window-scroll-bar-height', 'window-scroll-bar-width',
'window-scroll-bars', 'window-start', 'window-system',
'window-text-height', 'window-text-pixel-size', 'window-text-width',
'window-top-child', 'window-top-line', 'window-total-height',
'window-total-width', 'window-use-time', 'window-valid-p',
'window-vscroll', 'windowp', 'write-char', 'write-region',
'x-backspace-delete-keys-p', 'x-change-window-property',
'x-change-window-property', 'x-close-connection',
'x-close-connection', 'x-create-frame', 'x-create-frame',
'x-delete-window-property', 'x-delete-window-property',
'x-disown-selection-internal', 'x-display-backing-store',
'x-display-backing-store', 'x-display-color-cells',
'x-display-color-cells', 'x-display-grayscale-p',
'x-display-grayscale-p', 'x-display-list', 'x-display-list',
'x-display-mm-height', 'x-display-mm-height', 'x-display-mm-width',
'x-display-mm-width', 'x-display-monitor-attributes-list',
'x-display-pixel-height', 'x-display-pixel-height',
'x-display-pixel-width', 'x-display-pixel-width', 'x-display-planes',
'x-display-planes', 'x-display-save-under', 'x-display-save-under',
'x-display-screens', 'x-display-screens', 'x-display-visual-class',
'x-display-visual-class', 'x-family-fonts', 'x-file-dialog',
'x-file-dialog', 'x-file-dialog', 'x-focus-frame', 'x-frame-geometry',
'x-frame-geometry', 'x-get-atom-name', 'x-get-resource',
'x-get-selection-internal', 'x-hide-tip', 'x-hide-tip',
'x-list-fonts', 'x-load-color-file', 'x-menu-bar-open-internal',
'x-menu-bar-open-internal', 'x-open-connection', 'x-open-connection',
'x-own-selection-internal', 'x-parse-geometry', 'x-popup-dialog',
'x-popup-menu', 'x-register-dnd-atom', 'x-select-font',
'x-select-font', 'x-selection-exists-p', 'x-selection-owner-p',
'x-send-client-message', 'x-server-max-request-size',
'x-server-max-request-size', 'x-server-vendor', 'x-server-vendor',
'x-server-version', 'x-server-version', 'x-show-tip', 'x-show-tip',
'x-synchronize', 'x-synchronize', 'x-uses-old-gtk-dialog',
'x-window-property', 'x-window-property', 'x-wm-set-size-hint',
'xw-color-defined-p', 'xw-color-defined-p', 'xw-color-values',
'xw-color-values', 'xw-display-color-p', 'xw-display-color-p',
'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region',
'forward-point',
))
builtin_function_highlighted = set((
'defvaralias', 'provide', 'require',
'with-no-warnings', 'define-widget', 'with-electric-help',
'throw', 'defalias', 'featurep'
))
lambda_list_keywords = set((
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
))
error_keywords = set((
'cl-assert', 'cl-check-type', 'error', 'signal',
'user-error', 'warn',
))
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in EmacsLispLexer.builtin_function:
yield index, Name.Function, value
continue
if value in EmacsLispLexer.special_forms:
yield index, Keyword, value
continue
if value in EmacsLispLexer.error_keywords:
yield index, Name.Exception, value
continue
if value in EmacsLispLexer.builtin_function_highlighted:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.macros:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.lambda_list_keywords:
yield index, Keyword.Pseudo, value
continue
yield index, token, value
tokens = {
'root': [
default('body'),
],
'body': [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# strings and characters
(r'"', String, 'string'),
(r'\?([^\\]|\\.)', String.Char),
# quoting
(r":" + symbol, Name.Builtin),
(r"::" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
terminated, Number.Float),
# vectors
(r'\[|\]', Punctuation),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read syntax for char tables
(r'#\^\^?', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number.Bin),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+r[+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.|:)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'#\(', Operator, 'body'),
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
'string': [
(r'[^"\\`]+', String),
(r'`%s\'' % symbol, String.Symbol),
(r'`', String),
(r'\\.', String),
(r'\\\n', String),
(r'"', String, '#pop'),
],
}
class ShenLexer(RegexLexer):
"""
Lexer for `Shen <http://shenlanguage.org/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Shen'
aliases = ['shen']
filenames = ['*.shen']
mimetypes = ['text/x-shen', 'application/x-shen']
DECLARATIONS = (
'datatype', 'define', 'defmacro', 'defprolog', 'defcc',
'synonyms', 'declare', 'package', 'type', 'function',
)
SPECIAL_FORMS = (
'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze',
'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output',
'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p',
'@s', '@v',
)
BUILTINS = (
'==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address',
'<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin',
'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd',
'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy',
'difference', 'element?', 'empty?', 'enable-type-theory',
'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external',
'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym',
'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical',
'implementation', 'in', 'include', 'include-all-but', 'inferences',
'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill',
'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand',
'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null',
'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port',
'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile',
'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file',
'read-file-as-bytelist', 'read-file-as-string', 'read-from-string',
'release', 'remove', 'return', 'reverse', 'run', 'save', 'set',
'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput',
'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?',
'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track',
'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile',
'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?',
'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file',
'y-or-n?',
)
BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>')
MAPPINGS = dict((s, Keyword) for s in DECLARATIONS)
MAPPINGS.update((s, Name.Builtin) for s in BUILTINS)
MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS)
valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]'
valid_name = '%s+' % valid_symbol_chars
symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars
variable = r'[A-Z]%s*' % valid_symbol_chars
tokens = {
'string': [
(r'"', String, '#pop'),
(r'c#\d{1,3};', String.Escape),
(r'~[ARS%]', String.Interpol),
(r'(?s).', String),
],
'root': [
(r'(?s)\\\*.*?\*\\', Comment.Multiline), # \* ... *\
(r'\\\\.*', Comment.Single), # \\ ...
(r'\s+', Text),
(r'_{5,}', Punctuation),
(r'={5,}', Punctuation),
(r'(;|:=|\||--?>|<--?)', Punctuation),
(r'(:-|:|\{|\})', Literal),
(r'[+-]*\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'[+-]*\d+', Number.Integer),
(r'"', String, 'string'),
(variable, Name.Variable),
(r'(true|false|<>|\[\])', Keyword.Pseudo),
(symbol_name, Literal),
(r'(\[|\]|\(|\))', Punctuation),
],
}
def get_tokens_unprocessed(self, text):
tokens = RegexLexer.get_tokens_unprocessed(self, text)
tokens = self._process_symbols(tokens)
tokens = self._process_declarations(tokens)
return tokens
def _relevant(self, token):
return token not in (Text, Comment.Single, Comment.Multiline)
def _process_declarations(self, tokens):
opening_paren = False
for index, token, value in tokens:
yield index, token, value
if self._relevant(token):
if opening_paren and token == Keyword and value in self.DECLARATIONS:
declaration = value
for index, token, value in \
self._process_declaration(declaration, tokens):
yield index, token, value
opening_paren = value == '(' and token == Punctuation
def _process_symbols(self, tokens):
opening_paren = False
for index, token, value in tokens:
if opening_paren and token in (Literal, Name.Variable):
token = self.MAPPINGS.get(value, Name.Function)
elif token == Literal and value in self.BUILTINS_ANYWHERE:
token = Name.Builtin
opening_paren = value == '(' and token == Punctuation
yield index, token, value
def _process_declaration(self, declaration, tokens):
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if declaration == 'datatype':
prev_was_colon = False
token = Keyword.Type if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if prev_was_colon and token == Literal:
token = Keyword.Type
yield index, token, value
if self._relevant(token):
prev_was_colon = token == Literal and value == ':'
elif declaration == 'package':
token = Name.Namespace if token == Literal else token
yield index, token, value
elif declaration == 'define':
token = Name.Function if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if value == '{' and token == Literal:
yield index, Punctuation, value
for index, token, value in self._process_signature(tokens):
yield index, token, value
else:
yield index, token, value
else:
token = Name.Function if token == Literal else token
yield index, token, value
return
def _process_signature(self, tokens):
for index, token, value in tokens:
if token == Literal and value == '}':
yield index, Punctuation, value
return
elif token in (Literal, Name.Function):
token = Name.Variable if value.istitle() else Keyword.Type
yield index, token, value
class CPSALexer(SchemeLexer):
"""
A CPSA lexer based on the CPSA language as of version 2.2.12
.. versionadded:: 2.1
"""
name = 'CPSA'
aliases = ['cpsa']
filenames = ['*.cpsa']
mimetypes = []
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
_keywords = (
'herald', 'vars', 'defmacro', 'include', 'defprotocol', 'defrole',
'defskeleton', 'defstrand', 'deflistener', 'non-orig', 'uniq-orig',
'pen-non-orig', 'precedes', 'trace', 'send', 'recv', 'name', 'text',
'skey', 'akey', 'data', 'mesg',
)
_builtins = (
'cat', 'enc', 'hash', 'privk', 'pubk', 'invk', 'ltk', 'gen', 'exp',
)
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
# (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
(words(_keywords, suffix=r'\b'), Keyword),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
(words(_builtins, prefix=r'(?<=\()', suffix=r'\b'), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
}
class XtlangLexer(RegexLexer):
"""An xtlang lexer for the `Extempore programming environment
<http://extempore.moso.com.au>`_.
This is a mixture of Scheme and xtlang, really. Keyword lists are
taken from the Extempore Emacs mode
(https://github.com/extemporelang/extempore-emacs-mode)
.. versionadded:: 2.2
"""
name = 'xtlang'
aliases = ['extempore']
filenames = ['*.xtm']
mimetypes = []
common_keywords = (
'lambda', 'define', 'if', 'else', 'cond', 'and',
'or', 'let', 'begin', 'set!', 'map', 'for-each',
)
scheme_keywords = (
'do', 'delay', 'quasiquote', 'unquote', 'unquote-splicing', 'eval',
'case', 'let*', 'letrec', 'quote',
)
xtlang_bind_keywords = (
'bind-func', 'bind-val', 'bind-lib', 'bind-type', 'bind-alias',
'bind-poly', 'bind-dylib', 'bind-lib-func', 'bind-lib-val',
)
xtlang_keywords = (
'letz', 'memzone', 'cast', 'convert', 'dotimes', 'doloop',
)
common_functions = (
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', '%', 'abs', 'acos',
'angle', 'append', 'apply', 'asin', 'assoc', 'assq', 'assv',
'atan', 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar',
'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar',
'caddar', 'cadddr', 'caddr', 'cadr', 'car', 'cdaaar',
'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr',
'cddr', 'cdr', 'ceiling', 'cons', 'cos', 'floor', 'length',
'list', 'log', 'max', 'member', 'min', 'modulo', 'not',
'reverse', 'round', 'sin', 'sqrt', 'substring', 'tan',
'println', 'random', 'null?', 'callback', 'now',
)
scheme_functions = (
'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc',
'char->integer', 'char-alphabetic?', 'char-ci<=?', 'char-ci<?',
'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?',
'char-upcase', 'char-upper-case?', 'char-whitespace?',
'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?',
'close-input-port', 'close-output-port', 'complex?',
'current-input-port', 'current-output-port', 'denominator',
'display', 'dynamic-wind', 'eof-object?', 'eq?', 'equal?',
'eqv?', 'even?', 'exact->inexact', 'exact?', 'exp', 'expt',
'force', 'gcd', 'imag-part', 'inexact->exact', 'inexact?',
'input-port?', 'integer->char', 'integer?',
'interaction-environment', 'lcm', 'list->string',
'list->vector', 'list-ref', 'list-tail', 'list?', 'load',
'magnitude', 'make-polar', 'make-rectangular', 'make-string',
'make-vector', 'memq', 'memv', 'negative?', 'newline',
'null-environment', 'number->string', 'number?',
'numerator', 'odd?', 'open-input-file', 'open-output-file',
'output-port?', 'pair?', 'peek-char', 'port?', 'positive?',
'procedure?', 'quotient', 'rational?', 'rationalize', 'read',
'read-char', 'real-part', 'real?',
'remainder', 'scheme-report-environment', 'set-car!', 'set-cdr!',
'string', 'string->list', 'string->number', 'string->symbol',
'string-append', 'string-ci<=?', 'string-ci<?', 'string-ci=?',
'string-ci>=?', 'string-ci>?', 'string-copy', 'string-fill!',
'string-length', 'string-ref', 'string-set!', 'string<=?',
'string<?', 'string=?', 'string>=?', 'string>?', 'string?',
'symbol->string', 'symbol?', 'transcript-off', 'transcript-on',
'truncate', 'values', 'vector', 'vector->list', 'vector-fill!',
'vector-length', 'vector?',
'with-input-from-file', 'with-output-to-file', 'write',
'write-char', 'zero?',
)
xtlang_functions = (
'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!',
'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free',
'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor',
'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now',
'pset!', 'pref-ptr', 'vset!', 'vref', 'aset!', 'aref', 'aref-ptr',
'tset!', 'tref', 'tref-ptr', 'salloc', 'halloc', 'zalloc', 'alloc',
'schedule', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sqrt', 'expt', 'floor', 'ceiling', 'truncate', 'round',
'llvm_printf', 'push_zone', 'pop_zone', 'memzone', 'callback',
'llvm_sprintf', 'make-array', 'array-set!', 'array-ref',
'array-ref-ptr', 'pointer-set!', 'pointer-ref', 'pointer-ref-ptr',
'stack-alloc', 'heap-alloc', 'zone-alloc', 'make-tuple', 'tuple-set!',
'tuple-ref', 'tuple-ref-ptr', 'closure-set!', 'closure-ref', 'pref',
'pdref', 'impc_null', 'bitcast', 'void', 'ifret', 'ret->', 'clrun->',
'make-env-zone', 'make-env', '<>', 'dtof', 'ftod', 'i1tof',
'i1tod', 'i1toi8', 'i1toi32', 'i1toi64', 'i8tof', 'i8tod',
'i8toi1', 'i8toi32', 'i8toi64', 'i32tof', 'i32tod', 'i32toi1',
'i32toi8', 'i32toi64', 'i64tof', 'i64tod', 'i64toi1',
'i64toi8', 'i64toi32',
)
# valid names for Scheme identifiers (names cannot consist fully
# of numbers, but this should be good enough for now)
valid_scheme_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
# valid characters in xtlang names & types
valid_xtlang_name = r'[\w.!-]+'
valid_xtlang_type = r'[]{}[\w<>,*/|!-]+'
tokens = {
# keep track of when we're exiting the xtlang form
'xtlang': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(?<=bind-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-val\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-type\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-alias\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-poly\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-dylib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-val\s)' + valid_xtlang_name, Name.Function),
# type annotations
(r':' + valid_xtlang_type, Keyword.Type),
# types
(r'(<' + valid_xtlang_type + r'>|\|' + valid_xtlang_type + r'\||/' +
valid_xtlang_type + r'/|' + valid_xtlang_type + r'\*)\**',
Keyword.Type),
# keywords
(words(xtlang_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(xtlang_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_xtlang_name, Name.Variable),
],
'scheme': [
# quoted symbols
(r"'" + valid_scheme_name, String.Symbol),
# char literals
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# keywords
(words(scheme_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(scheme_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_scheme_name, Name.Variable),
],
# common to both xtlang and Scheme
'common': [
# comments
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# binary/oct/hex literals
(r'(#b|#o|#x)[\d.]+', Number),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
# true/false constants
(r'(#t|#f)', Name.Constant),
# keywords
(words(common_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(common_functions, prefix=r'(?<=\()'), Name.Function),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
'root': [
# go into xtlang mode
(words(xtlang_bind_keywords, prefix=r'(?<=\()', suffix=r'\b'),
Keyword, 'xtlang'),
include('scheme')
],
}
class FennelLexer(RegexLexer):
"""A lexer for the `Fennel programming language <https://fennel-lang.org>`_.
Fennel compiles to Lua, so all the Lua builtins are recognized as well
as the special forms that are particular to the Fennel compiler.
.. versionadded:: 2.3
"""
name = 'Fennel'
aliases = ['fennel', 'fnl']
filenames = ['*.fnl']
# these two lists are taken from fennel-mode.el:
# https://gitlab.com/technomancy/fennel-mode
# this list is current as of Fennel version 0.1.0.
special_forms = (
u'require-macros', u'eval-compiler',
u'do', u'values', u'if', u'when', u'each', u'for', u'fn', u'lambda',
u'λ', u'set', u'global', u'var', u'local', u'let', u'tset', u'doto',
u'set-forcibly!', u'defn', u'partial', u'while', u'or', u'and', u'true',
u'false', u'nil', u'.', u'+', u'..', u'^', u'-', u'*', u'%', u'/', u'>',
u'<', u'>=', u'<=', u'=', u'~=', u'#', u'...', u':', u'->', u'->>',
)
# Might be nicer to use the list from _lua_builtins.py but it's unclear how?
builtins = (
u'_G', u'_VERSION', u'arg', u'assert', u'bit32', u'collectgarbage',
u'coroutine', u'debug', u'dofile', u'error', u'getfenv',
u'getmetatable', u'io', u'ipairs', u'load', u'loadfile', u'loadstring',
u'math', u'next', u'os', u'package', u'pairs', u'pcall', u'print',
u'rawequal', u'rawget', u'rawlen', u'rawset', u'require', u'select',
u'setfenv', u'setmetatable', u'string', u'table', u'tonumber',
u'tostring', u'type', u'unpack', u'xpcall'
)
# based on the scheme definition, but disallowing leading digits and commas
valid_name = r'[a-zA-Z_!$%&*+/:<=>?@^~|-][\w!$%&*+/:<=>?@^~|\.-]*'
tokens = {
'root': [
# the only comment form is a semicolon; goes to the end of the line
(r';.*$', Comment.Single),
(r'[,\s]+', Text),
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# these are technically strings, but it's worth visually
# distinguishing them because their intent is different
# from regular strings.
(r':' + valid_name, String.Symbol),
# special forms are keywords
(words(special_forms, suffix=' '), Keyword),
# lua standard library are builtins
(words(builtins, suffix=' '), Name.Builtin),
# special-case the vararg symbol
(r'\.\.\.', Name.Variable),
# regular identifiers
(valid_name, Name.Variable),
# all your normal paired delimiters for your programming enjoyment
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
(r'(\{|\})', Punctuation),
]
}
| 53.288419 | 89 | 0.57577 |
4a1ce168c298d2173270cc1f9401d8ad6b2c7eb2
| 32 |
py
|
Python
|
src/lavatory/__init__.py
|
sijis/lavatory
|
4560adc45539cde061c9077205a0b7e0c0b353d6
|
[
"Apache-2.0"
] | 25 |
2017-10-30T21:22:32.000Z
|
2021-05-21T19:44:29.000Z
|
src/lavatory/__init__.py
|
sijis/lavatory
|
4560adc45539cde061c9077205a0b7e0c0b353d6
|
[
"Apache-2.0"
] | 26 |
2017-10-30T17:37:12.000Z
|
2020-09-24T10:54:24.000Z
|
src/lavatory/__init__.py
|
gogoair/artifactorypurge
|
37eba1c403b0e6707748d0538e01d6efb23c6289
|
[
"Apache-2.0"
] | 10 |
2018-08-01T19:48:28.000Z
|
2020-08-23T18:02:41.000Z
|
from . import exceptions, utils
| 16 | 31 | 0.78125 |
4a1ce1eece6896baee2e013279b4cc4cf4526fc2
| 1,163 |
py
|
Python
|
fileprocessor/abstracts.py
|
ahlusar1989/fileprocessor
|
ad8557e86b054ebc14d9b2364ddfa9d7104e2391
|
[
"MIT"
] | 1 |
2021-06-27T22:49:02.000Z
|
2021-06-27T22:49:02.000Z
|
fileprocessor/abstracts.py
|
DonaldWhyte/fileprocessor
|
5ccbbae87387b5683b9e5fc5fe559a0d3e50b9c1
|
[
"MIT"
] | null | null | null |
fileprocessor/abstracts.py
|
DonaldWhyte/fileprocessor
|
5ccbbae87387b5683b9e5fc5fe559a0d3e50b9c1
|
[
"MIT"
] | null | null | null |
"""Contains the abstract classes for the major components of the library."""
class Searcher:
"""Searches directory for files to process."""
def search(self, rootDirectory):
"""Search directory for files and return list of absolute paths to those files.
Arguments:
rootDirectory -- Root directory to start searching in.
"""
raise NotImplementedError
class Filterer:
"""Filters lists of files based on some criteria."""
def filter(self, fileListing):
"""Filter list of files and return a NEW list containing only the files that passed the filter.
NOTE: This should not alter the original list given.
Arguments:
fileListing -- A list containing the absolute paths of the
files to filter."""
raise NotImplementedError
class Extractor:
"""Extracts data from files."""
def extract(self, filename):
"""Extract data from the file with the given filename.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from
"""
raise NotImplementedError
| 26.431818 | 98 | 0.708512 |
4a1ce2260c0ea16e78a875f08825faf5da422aa8
| 8,252 |
py
|
Python
|
tests/core/full_node/test_performance.py
|
hashgreen/chia-blockchain
|
b1acb5597ba242649d1dc97de7fd605148e33816
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/test_performance.py
|
hashgreen/chia-blockchain
|
b1acb5597ba242649d1dc97de7fd605148e33816
|
[
"Apache-2.0"
] | 1 |
2022-03-29T03:05:10.000Z
|
2022-03-29T03:05:10.000Z
|
tests/core/full_node/test_performance.py
|
hashgreen/chia-blockchain
|
b1acb5597ba242649d1dc97de7fd605148e33816
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa: F811, F401
import asyncio
import dataclasses
import logging
import random
import time
from typing import Dict
from clvm.casts import int_to_bytes
import pytest
import pytest_asyncio
import cProfile
from chia.consensus.block_record import BlockRecord
from chia.full_node.full_node_api import FullNodeAPI
from chia.protocols import full_node_protocol as fnp
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.ints import uint64
from tests.wallet_tools import WalletTool
from tests.connection_utils import add_dummy_connection
from tests.core.full_node.stores.test_coin_store import get_future_reward_coins
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
async def get_block_path(full_node: FullNodeAPI):
blocks_list = [await full_node.full_node.blockchain.get_full_peak()]
assert blocks_list[0] is not None
while blocks_list[0].height != 0:
b = await full_node.full_node.block_store.get_full_block(blocks_list[0].prev_header_hash)
assert b is not None
blocks_list.insert(0, b)
return blocks_list
@pytest_asyncio.fixture(scope="module")
async def wallet_nodes_perf(bt):
async_gen = setup_simulators_and_wallets(1, 1, {"MEMPOOL_BLOCK_BUFFER": 1, "MAX_BLOCK_COST_CLVM": 11000000000})
nodes, wallets = await async_gen.__anext__()
full_node_1 = nodes[0]
server_1 = full_node_1.full_node.server
wallet_a = bt.get_pool_wallet_tool()
wallet_receiver = WalletTool(full_node_1.full_node.constants)
yield full_node_1, server_1, wallet_a, wallet_receiver
async for _ in async_gen:
yield _
class TestPerformance:
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_full_block_performance(self, bt, wallet_nodes_perf, self_hostname):
full_node_1, server_1, wallet_a, wallet_receiver = wallet_nodes_perf
blocks = await full_node_1.get_all_full_blocks()
full_node_1.full_node.mempool_manager.limit_factor = 1
wallet_ph = wallet_a.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=wallet_ph,
pool_reward_puzzle_hash=wallet_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(fnp.RespondBlock(block))
start_height = (
full_node_1.full_node.blockchain.get_peak().height
if full_node_1.full_node.blockchain.get_peak() is not None
else -1
)
incoming_queue, node_id = await add_dummy_connection(server_1, self_hostname, 12312)
fake_peer = server_1.all_connections[node_id]
# Mempool has capacity of 100, make 110 unspents that we can use
puzzle_hashes = []
# Makes a bunch of coins
for i in range(20):
conditions_dict: Dict = {ConditionOpcode.CREATE_COIN: []}
# This should fit in one transaction
for _ in range(100):
receiver_puzzlehash = wallet_receiver.get_new_puzzlehash()
puzzle_hashes.append(receiver_puzzlehash)
output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [receiver_puzzlehash, int_to_bytes(100000000)])
conditions_dict[ConditionOpcode.CREATE_COIN].append(output)
spend_bundle = wallet_a.generate_signed_transaction(
100,
puzzle_hashes[0],
get_future_reward_coins(blocks[1 + i])[0],
condition_dic=conditions_dict,
)
assert spend_bundle is not None
respond_transaction_2 = fnp.RespondTransaction(spend_bundle)
await full_node_1.respond_transaction(respond_transaction_2, fake_peer)
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-1]), fake_peer)
await time_out_assert(10, node_height_at_least, True, full_node_1, start_height + 20)
spend_bundles = []
spend_bundle_ids = []
# Fill mempool
for puzzle_hash in puzzle_hashes[1:]:
coin_record = (await full_node_1.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash))[0]
receiver_puzzlehash = wallet_receiver.get_new_puzzlehash()
if puzzle_hash == puzzle_hashes[-1]:
fee = 100000000 # 100 million (20 fee per cost)
else:
fee = random.randint(1, 100000000)
spend_bundle = wallet_receiver.generate_signed_transaction(
uint64(500), receiver_puzzlehash, coin_record.coin, fee=fee
)
spend_bundles.append(spend_bundle)
spend_bundle_ids.append(spend_bundle.get_hash())
pr = cProfile.Profile()
pr.enable()
start = time.time()
num_tx: int = 0
for spend_bundle, spend_bundle_id in zip(spend_bundles, spend_bundle_ids):
num_tx += 1
respond_transaction = fnp.RespondTransaction(spend_bundle)
await full_node_1.respond_transaction(respond_transaction, fake_peer)
request = fnp.RequestTransaction(spend_bundle_id)
req = await full_node_1.request_transaction(request)
if req is None:
break
end = time.time()
log.warning(f"Num Tx: {num_tx}")
log.warning(f"Time for mempool: {end - start:f}")
assert end - start < 0.001
pr.create_stats()
pr.dump_stats("./mempool-benchmark.pstats")
# Create an unfinished block
peak = full_node_1.full_node.blockchain.get_peak()
assert peak is not None
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = full_node_1.full_node.blockchain.block_record(curr.prev_hash)
mempool_bundle = await full_node_1.full_node.mempool_manager.create_bundle_from_mempool(curr.header_hash)
if mempool_bundle is None:
spend_bundle = None
else:
spend_bundle = mempool_bundle[0]
current_blocks = await full_node_1.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
1,
transaction_data=spend_bundle,
block_list_input=current_blocks,
guarantee_transaction_block=True,
)
block = blocks[-1]
unfinished = UnfinishedBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
pr = cProfile.Profile()
pr.enable()
start = time.time()
res = await full_node_1.respond_unfinished_block(fnp.RespondUnfinishedBlock(unfinished), fake_peer)
end = time.time()
log.warning(f"Res: {res}")
log.warning(f"Time for unfinished: {end - start:f}")
assert end - start < 0.1
pr.create_stats()
pr.dump_stats("./unfinished-benchmark.pstats")
pr = cProfile.Profile()
pr.enable()
start = time.time()
# No transactions generator, the full node already cached it from the unfinished block
block_small = dataclasses.replace(block, transactions_generator=None)
res = await full_node_1.full_node.respond_block(fnp.RespondBlock(block_small))
end = time.time()
log.warning(f"Res: {res}")
log.warning(f"Time for full block: {end - start:f}")
assert end - start < 0.1
pr.create_stats()
pr.dump_stats("./full-block-benchmark.pstats")
| 38.203704 | 120 | 0.672322 |
4a1ce2738966ed2821b5da240e59b7090ca221c0
| 703 |
py
|
Python
|
test/test_db_matches_ui.py
|
KarpikovaSV/python_training
|
7dac017d3120d7a5b832fad64858ae1a2d7bf355
|
[
"Apache-2.0"
] | null | null | null |
test/test_db_matches_ui.py
|
KarpikovaSV/python_training
|
7dac017d3120d7a5b832fad64858ae1a2d7bf355
|
[
"Apache-2.0"
] | null | null | null |
test/test_db_matches_ui.py
|
KarpikovaSV/python_training
|
7dac017d3120d7a5b832fad64858ae1a2d7bf355
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
from model.contact import Contact
def test_group_list(app, db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
def test_contact_list(app, db):
ui_list = app.contact.get_contact_list()
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip())
db_list = map(clean, db.get_contact_list())
assert sorted(ui_list, key=Contact.id_or_max) == sorted(db_list, key=Contact.id_or_max)
| 41.352941 | 109 | 0.728307 |
4a1ce2bb77f0d8bc0ba2f647cae20a4d486ca579
| 1,480 |
py
|
Python
|
04. Variables and Names/ex4.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
04. Variables and Names/ex4.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
04. Variables and Names/ex4.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passenger_per_car = passengers / cars_driven
print("There are", cars, "cars available.")
print("There are only", drivers, "drivers available.")
print("There will be", cars_not_driven, "empty cars today.")
print("We can transport", carpool_capacity, "people today.")
print("We have", passengers, "to carpool today.")
print("We need to put about", average_passenger_per_car, "in each car.")
# Study Drills
# 1. I used 4.0 for space_in_a_car, but is that necessary? What happens if it’s just 4?
# Actually that wouold work fineand yield the same output. (Python 3)
# 2. Remember that 4.0 is a “floating point” number. Find out what that means.
# Floating point number means it has values after decimal. Eg.: 3.00, 6.9
# 3. Write comments above each of the variable assignments.
# Idk name seem to be self explanatory. But still is a good practice.
# 4. Make sure you know what = is called (equals) and that it’s making names for things.
# Yeah. '=' is called equals and is an assignment operator.
# 5. Remember that _ is an underscore character.
# Without '_' it would be really hard for us to define meaningfull variable names.
# 6. Try running Python as a calculator like you did before and use variable names
# to do your calculations. Popular variable names are also i, x, and j.
# Good Luck (*.*)
| 38.947368 | 88 | 0.743243 |
4a1ce2fd8d9ffda6580a82da2bf34e4f765840d7
| 3,192 |
py
|
Python
|
artista/artistFollowing/views.py
|
Rafat97/Artista
|
40a824f97dcc8f97632a1864a12329c3172c7c66
|
[
"MIT"
] | 17 |
2020-09-21T19:59:23.000Z
|
2021-05-16T15:28:41.000Z
|
artista/artistFollowing/views.py
|
Rafat97/Artista
|
40a824f97dcc8f97632a1864a12329c3172c7c66
|
[
"MIT"
] | null | null | null |
artista/artistFollowing/views.py
|
Rafat97/Artista
|
40a824f97dcc8f97632a1864a12329c3172c7c66
|
[
"MIT"
] | 2 |
2021-03-13T09:31:30.000Z
|
2022-03-19T09:43:15.000Z
|
from django.http import HttpResponse
from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse, Http404
from django.views import View
from artista.utils import get_current_user
from htmlmin.decorators import minified_response
from django.shortcuts import get_object_or_404
from .models import ArtistFollow
from register.models import User
from django.db.models import Q
# Create your views here.
class ArtistFollowView(View):
"""
Atrist can follow an artist
**Super Class**
from django.views import View
**Method User:**
GET,POST
**Context**
user_info: register.User.\n
artists : artistFollowing.ArtistFollow\n
**Models that are used by this Class**
The instance of model register.User.\n
The instance of model artistFollowing.ArtistFollow.\n
**Template:**
View Templates directory: artistArt/templates/follow.html
"""
USER_INFO = None
def get(self, request):
self.USER_INFO = get_current_user(request)
if self.USER_INFO == None:
return redirect('/logout')
artists = ArtistFollow.objects.filter(
user_follower=self.USER_INFO)
# artists = User.objects.filter(
# Q(user_role_id=1) & ~Q(uuid=self.USER_INFO.uuid))
if self.USER_INFO == None:
return redirect('/logout')
context = {
'artists': artists,
'user_info': self.USER_INFO
}
return render(request, 'follow.html', context)
# def post(self, request):
# self.USER_INFO = get_current_user(request)
# artist_id = request.POST.get('artist_id')
# artist_user = get_object_or_404(User, uuid=artist_id)
# print("Current User =", self.USER_INFO)
# print("Current User following =", artist_user.uuid)
# return redirect('artist_follow')
class ArtistFollowViewFormSubmit(View):
"""
Atrist can submit follow or Unfollow
**Super Class**
from django.views import View
**Method User:**
POST
**Context**
user_info: register.User.\n
artists : artistFollowing.ArtistFollow\n
**Models that are used by this Class**
The instance of model register.User.\n
The instance of model artistFollowing.ArtistFollow.\n
**Redirect:**
View Redirect Url name: prev_url = request.META['HTTP_REFERER']
"""
USER_INFO = None
def post(self, request, *args, **kwargs):
prev_url = request.META['HTTP_REFERER']
following_uid = kwargs.get('uuid')
if not following_uid:
raise Http404("Page not found")
art = request.POST.get('art')
self.USER_INFO = get_current_user(request)
if self.USER_INFO == None:
return redirect('/logout')
artist_user = get_object_or_404(
User, uuid=following_uid
)
af = ArtistFollow()
af.user_follower = self.USER_INFO
af.user_following = artist_user
af.save()
# {% url 'app_artInfo:artist_single_art_page' data.uuid %}
return redirect(prev_url)
| 23.470588 | 71 | 0.635338 |
4a1ce3a7ea504fbf2a3cef3d1264890a587eb4db
| 97,417 |
py
|
Python
|
tools/machine_learning/dataset/dataset_1800.py
|
emiliopomares/flow-separation-prediction
|
ca4585a8263cd5933889fbd763154e2f3851969a
|
[
"MIT"
] | null | null | null |
tools/machine_learning/dataset/dataset_1800.py
|
emiliopomares/flow-separation-prediction
|
ca4585a8263cd5933889fbd763154e2f3851969a
|
[
"MIT"
] | null | null | null |
tools/machine_learning/dataset/dataset_1800.py
|
emiliopomares/flow-separation-prediction
|
ca4585a8263cd5933889fbd763154e2f3851969a
|
[
"MIT"
] | null | null | null |
dataset = [[575, 1.8478238142818033, 335.84132856507614, 4.0, 40.0, 18.0, 1.0], [576, 15.77686287348893, 42.441254859835766, 4.0, 40.0, 18.0, 1.0], [577, 2.5651959697179394, 460.4148384334217, 4.0, 40.0, 18.0, 0.796539971089987], [578, 17.085286455903624, 1722.0569227642409, 4.0, 40.0, 18.0, 0.20868652310905142], [579, 0.8922404456295938, 544.2232815722436, 4.0, 40.0, 18.0, 0.8391968483418055], [580, 1.855625614157908, 324.0611877697628, 4.0, 40.0, 18.0, 1.0], [581, 13.887583805638116, 370.66355345479735, 4.0, 40.0, 18.0, 0.4156268407842482], [582, 4.574319040915774, 1916.680593529377, 4.0, 40.0, 18.0, 0.4731431536067498], [583, 11.620584764397769, 1765.727157030207, 4.0, 40.0, 18.0, 0.3125490112539554], [584, 2.6235186150459104, 1833.1693569752063, 4.0, 40.0, 18.0, 0.5374621823117757], [585, 13.484073962894696, 660.3699928897363, 4.0, 40.0, 18.0, 0.3539297235585189], [586, 14.039143301499495, 1194.4759299020527, 4.0, 40.0, 18.0, 0.2909060179303773], [587, 13.912217642029711, 1554.008389226998, 4.0, 40.0, 18.0, 0.2743234627435817], [588, 13.99433118037543, 1828.6039911998955, 4.0, 40.0, 18.0, 0.26137292500800485], [589, 13.97985812120516, 1457.7115189147835, 4.0, 40.0, 18.0, 0.277698127963447], [590, 14.235082927476668, 1533.990342569619, 4.0, 40.0, 18.0, 0.2684974619970663], [591, 0.14438911944825494, 1552.4111707538157, 4.0, 40.0, 18.0, 0.6646778218759786], [592, 12.838639587489782, 897.1618056405601, 4.0, 40.0, 18.0, 0.3404965898150903], [593, 11.149353897963573, 598.125634720337, 4.0, 40.0, 18.0, 0.42099275208739695], [594, 2.6770942046494586, 1985.9408533826909, 4.0, 40.0, 18.0, 0.5310687434251105], [595, 2.9997563631283897, 1040.9042861817666, 4.0, 40.0, 18.0, 0.6086804760399638], [596, 2.3022674475226914, 307.9928831694318, 4.0, 40.0, 18.0, 1.0], [597, 16.291429852780407, 1256.6722690719946, 4.0, 40.0, 18.0, 0.24371063020372327], [598, 15.922124691331389, 507.48840748529494, 4.0, 40.0, 18.0, 0.32681641236328557], [599, 1.9421270372564932, 296.11422429306833, 4.0, 40.0, 18.0, 1.0], [600, 17.158011255218895, 1370.1534109726924, 4.0, 40.0, 18.0, 0.22156305405117283], [601, 3.7932553942867377, 1121.772643508161, 4.0, 40.0, 18.0, 0.5662626026818638], [602, 14.10521268070536, 34.78839785558823, 4.0, 40.0, 18.0, 1.0], [603, 9.73869075013141, 1581.5507122013316, 4.0, 40.0, 18.0, 0.3625765570093685], [604, 4.274494253604736, 235.24380920742726, 4.0, 40.0, 18.0, 1.0], [605, 3.2295643670867213, 393.2709171003013, 4.0, 40.0, 18.0, 0.8062809231893029], [606, 12.475115053078682, 1915.9055265343711, 4.0, 40.0, 18.0, 0.2878838514964685], [607, 1.6886028928130217, 270.23903604128816, 4.0, 40.0, 18.0, 1.0], [608, 11.690517878266208, 1486.0998001794405, 4.0, 40.0, 18.0, 0.3237148945091826], [609, 15.926532949205589, 1131.5673840607374, 4.0, 40.0, 18.0, 0.25796741296596415], [610, 1.8446899105333436, 1224.3210865622684, 4.0, 40.0, 18.0, 0.6298416489939084], [611, 2.6566105295944635, 731.8422724963048, 4.0, 40.0, 18.0, 0.6871636918077176], [612, 15.125019204779058, 297.8907653170386, 4.0, 40.0, 18.0, 0.4185181467542303], [613, 3.8530850542593447, 1361.0001015077783, 4.0, 40.0, 18.0, 0.5373958378712685], [614, 12.598205126437756, 1717.5123950190948, 4.0, 40.0, 18.0, 0.2939356075449356], [615, 12.334492974283485, 623.3359259430543, 4.0, 40.0, 18.0, 0.3876830117045339], [616, 11.192915263658763, 1966.1376533177634, 4.0, 40.0, 18.0, 0.31352700344854745], [617, 13.997928305359197, 93.70706217274409, 4.0, 40.0, 18.0, 0.7141709148966205], [618, 16.366113492225136, 1236.5923416320818, 4.0, 40.0, 18.0, 0.2435035777641013], [619, 2.9775330120959294, 1632.2202941746937, 4.0, 40.0, 18.0, 0.5446685841376463], [620, 12.375848772835731, 1454.8269704665809, 4.0, 40.0, 18.0, 0.31088421330658206], [621, 10.573273354771072, 809.9502345850427, 4.0, 40.0, 18.0, 0.4036679768086213], [622, 0.658017470275974, 1174.2198110845593, 4.0, 40.0, 18.0, 0.6885758157602853], [623, 15.388174373590935, 1558.7329300836798, 4.0, 40.0, 18.0, 0.24506860672627045], [624, 11.47959751919403, 495.72811032418923, 4.0, 40.0, 18.0, 0.43667527053115335], [625, 1.0249692741560694, 1714.5459100597297, 4.0, 40.0, 18.0, 0.6123934325550479], [626, 3.3490318260816085, 1587.88311412887, 4.0, 40.0, 18.0, 0.5351478832625405], [627, 13.621056051126965, 375.77630844629044, 4.0, 40.0, 18.0, 0.42020410107137113], [628, 15.612933200029865, 1869.1874865785246, 4.0, 40.0, 18.0, 0.23012146482050813], [629, 14.50967460452426, 81.04786425254812, 4.0, 40.0, 18.0, 0.7461201670586656], [630, 2.5263164804706095, 105.6280113180625, 4.0, 40.0, 18.0, 1.0], [631, 1.8029490922238407, 1029.5067606548166, 4.0, 40.0, 18.0, 0.6610687775529165], [632, 0.7319623297551434, 701.8330982972551, 4.0, 40.0, 18.0, 0.7869646649087125], [633, 0.4386533254903302, 794.8526073599669, 4.0, 40.0, 18.0, 0.7738976400870887], [634, 14.652220310971902, 952.1451985012426, 4.0, 40.0, 18.0, 0.29583094230337575], [635, 0.20246156655845526, 1804.2365401166314, 4.0, 40.0, 18.0, 0.6391260749168856], [636, 2.8725533409172863, 1981.9648539155141, 4.0, 40.0, 18.0, 0.5245258107130394], [637, 12.689631024428062, 1311.634645014224, 4.0, 40.0, 18.0, 0.31189270816357884], [638, 0.9614768236763129, 558.3493409470752, 4.0, 40.0, 18.0, 0.8292573515295004], [639, 12.935603226152619, 1333.9059870755948, 4.0, 40.0, 18.0, 0.30569596499996965], [640, 13.7979345626314, 779.3753407587446, 4.0, 40.0, 18.0, 0.331003864216317], [641, 0.9849375805474148, 825.9890492627081, 4.0, 40.0, 18.0, 0.7400968512949635], [642, 1.4105991726699643, 1700.8898630020053, 4.0, 40.0, 18.0, 0.5979771432431052], [643, 2.8503538554202015, 1064.3038789087652, 4.0, 40.0, 18.0, 0.6111854691231061], [644, 16.04834549540297, 1707.5748299191628, 4.0, 40.0, 18.0, 0.22738107076450115], [645, 16.783303066854362, 471.84431061561446, 4.0, 40.0, 18.0, 0.31643778862826194], [646, 0.39848871808779607, 1240.3359247723351, 4.0, 40.0, 18.0, 0.6908161553575081], [647, 2.4778883843398987, 1787.162704442942, 4.0, 40.0, 18.0, 0.5509850654318513], [648, 13.25885996232254, 1698.8026832869486, 4.0, 40.0, 18.0, 0.2808466520575083], [649, 15.986135974030557, 224.86348916148867, 4.0, 40.0, 18.0, 0.44441779528106345], [650, 13.675228038330282, 473.8153271538389, 4.0, 40.0, 18.0, 0.3878574026483963], [651, 2.5952162624842563, 981.5417910601017, 4.0, 40.0, 18.0, 0.6353139849691855], [652, 2.0463400011399635, 613.7276199815243, 4.0, 40.0, 18.0, 0.7530953898704728], [653, 9.649425541309405, 105.74257917879993, 4.0, 40.0, 18.0, 1.0], [654, 11.838679710747154, 1217.4544316048584, 4.0, 40.0, 18.0, 0.33626602314339615], [655, 2.098502180533995, 1251.1532820311488, 4.0, 40.0, 18.0, 0.6159690017531594], [656, 2.1025685084517787, 98.89633668206652, 4.0, 40.0, 18.0, 1.0], [657, 12.857819799391866, 1359.358143001064, 4.0, 40.0, 18.0, 0.3059279506262172], [658, 2.233221352723336, 906.0579887922141, 4.0, 40.0, 18.0, 0.6649669558186118], [659, 1.7811199402272135, 1448.3686916556367, 4.0, 40.0, 18.0, 0.6063111556787022], [660, 14.448901654879998, 167.34241426955228, 4.0, 40.0, 18.0, 0.5463919600053861], [661, 10.562823930958594, 1297.580763882867, 4.0, 40.0, 18.0, 0.35997957233633354], [662, 3.272269298565971, 1067.0431698105351, 4.0, 40.0, 18.0, 0.593765244196822], [663, 1.0343862600092883, 667.017705876655, 4.0, 40.0, 18.0, 0.7836515967272167], [664, 0.16856950051587072, 720.6567295694282, 4.0, 40.0, 18.0, 0.8084805065903237], [665, 3.9177560361585195, 66.91061929546387, 4.0, 40.0, 18.0, 1.0], [666, 2.3780276261967783, 1668.1749819231613, 4.0, 40.0, 18.0, 0.5635900242240913], [667, 1.8813117598740123, 150.91226392259642, 4.0, 40.0, 18.0, 1.0], [668, 0.8447262583017403, 386.5987088286651, 4.0, 40.0, 18.0, 1.0], [669, 13.736506161491794, 889.6250282419106, 4.0, 40.0, 18.0, 0.3211736700709351], [670, 2.0090109477892426, 561.9522843904172, 4.0, 40.0, 18.0, 0.7749304621492978], [671, 3.038100630719427, 679.1589007105449, 4.0, 40.0, 18.0, 0.6851159203011229], [672, 3.9884559940760846, 833.3855871576827, 4.0, 40.0, 18.0, 0.6057434054729105], [673, 4.814399039171513, 30.558155481419423, 4.0, 40.0, 18.0, 1.0], [674, 0.7751917829450046, 1577.5845369091087, 4.0, 40.0, 18.0, 0.6351594260173516], [675, 14.960053050776954, 832.9983160485076, 4.0, 40.0, 18.0, 0.30016266965397775], [676, 17.771656535063176, 848.7911707008597, 4.0, 40.0, 18.0, 0.24429667759683385], [677, 1.401096753301573, 1702.1164509374134, 4.0, 40.0, 18.0, 0.5982530514668655], [678, 0.4236798333082352, 317.63818523938863, 4.0, 40.0, 18.0, 1.0], [679, 15.223642065748285, 933.4098161116106, 4.0, 40.0, 18.0, 0.2857079409908941], [680, 9.012045439273502, 1116.6022833618213, 4.0, 40.0, 18.0, 0.41056611918224784], [681, 1.2355168150259568, 1420.4346939390018, 4.0, 40.0, 18.0, 0.6319092021335166], [682, 0.3865424177831063, 473.2484023940222, 4.0, 40.0, 18.0, 0.904518741016756], [683, 1.2963805536393012, 1694.6108822988597, 4.0, 40.0, 18.0, 0.6029557100837556], [684, 1.9197251745615858, 1694.91862221521, 4.0, 40.0, 18.0, 0.5786066164285447], [685, 10.089674593087276, 544.0253517370464, 4.0, 40.0, 18.0, 0.46211496683321573], [686, 0.6334401188915719, 1844.853761865411, 4.0, 40.0, 18.0, 0.6178520401415883], [687, 13.326970265837408, 1834.0964338819465, 4.0, 40.0, 18.0, 0.2738377055667499], [688, 14.666304604439595, 467.8448461972652, 4.0, 40.0, 18.0, 0.3655591708614097], [689, 0.844304414255183, 1076.3594020117616, 4.0, 40.0, 18.0, 0.6959151451407178], [690, 3.6789640910162786, 626.4312648360602, 4.0, 40.0, 18.0, 0.6727236618293195], [691, 2.0051344211178366, 1007.3458777711303, 4.0, 40.0, 18.0, 0.6561383111692533], [692, 2.3481552984134613, 1471.3713178729508, 4.0, 40.0, 18.0, 0.5817223165277476], [693, 4.427135327036443, 1243.094343499476, 4.0, 40.0, 18.0, 0.5295634840686351], [694, 2.190869281670545, 554.5439904875483, 4.0, 40.0, 18.0, 0.7691339438293971], [695, 10.501562239820775, 250.29321404168425, 4.0, 40.0, 18.0, 0.5972936037623792], [696, 1.0579076190132626, 264.2901377145358, 4.0, 40.0, 18.0, 1.0], [697, 14.595094635291549, 1352.0935667481695, 4.0, 40.0, 18.0, 0.27090889639433213], [698, 16.80898248691349, 217.61917192979868, 4.0, 40.0, 18.0, 0.4291036505392953], [699, 0.4008588925910539, 496.7708158830859, 4.0, 40.0, 18.0, 0.8892724306754946], [700, 16.673778109326502, 763.6732093014514, 4.0, 40.0, 18.0, 0.27343576698147964], [701, 4.049172829085666, 878.2315631702593, 4.0, 40.0, 18.0, 0.5945659270380028], [702, 3.0942338736710977, 1363.9170030493724, 4.0, 40.0, 18.0, 0.5643689491583638], [703, 2.126979268729393, 1774.8561400512385, 4.0, 40.0, 18.0, 0.5647131880217934], [704, 3.237293211206092, 191.81932534132739, 4.0, 40.0, 18.0, 1.0], [705, 3.4937049106334515, 484.70155391287017, 4.0, 40.0, 18.0, 0.7375686883019803], [706, 11.959072857883736, 42.238399809480285, 4.0, 40.0, 18.0, 1.0], [707, 9.832122271157074, 1013.5444884346197, 4.0, 40.0, 18.0, 0.400658804102444], [708, 14.257261734318867, 1290.167101491819, 4.0, 40.0, 18.0, 0.2811251610127445], [709, 13.584067151959738, 1617.345767469841, 4.0, 40.0, 18.0, 0.27790841405159983], [710, 2.86939610720086, 1528.0120869387833, 4.0, 40.0, 18.0, 0.5571594643640032], [711, 15.247977292503393, 78.47662178969892, 4.0, 40.0, 18.0, 0.7221566962049696], [712, 15.117226234056941, 1500.6389271531489, 4.0, 40.0, 18.0, 0.2529794077617841], [713, 0.8943924071003173, 930.034122187956, 4.0, 40.0, 18.0, 0.720893590508253], [714, 13.984266724862009, 248.39513882779175, 4.0, 40.0, 18.0, 0.482014990063243], [715, 2.223386857337172, 146.40521301654854, 4.0, 40.0, 18.0, 1.0], [716, 3.6592465325879995, 575.4090431647485, 4.0, 40.0, 18.0, 0.6909654634158583], [717, 11.527160608092421, 470.6255362849969, 4.0, 40.0, 18.0, 0.44251677112794247], [718, 1.5345921160899505, 933.6775403022946, 4.0, 40.0, 18.0, 0.6907209763934317], [719, 13.495474661190109, 448.9712669713969, 4.0, 40.0, 18.0, 0.3995449302285382], [720, 2.8143152440490398, 754.914262233562, 4.0, 40.0, 18.0, 0.6740759572520959], [721, 12.386140618404264, 617.4674074358072, 4.0, 40.0, 18.0, 0.3874563897442213], [722, 2.489191894483384, 1858.0378326075042, 4.0, 40.0, 18.0, 0.5455838604250699], [723, 14.945166788553095, 977.6845992440178, 4.0, 40.0, 18.0, 0.287696335767216], [724, 15.062033439978197, 586.7926249125157, 4.0, 40.0, 18.0, 0.3301358785168809], [725, 13.315486637462875, 1276.1039521680034, 4.0, 40.0, 18.0, 0.30085902284906213], [726, 5.5250731877212775, 1510.3496624936267, 4.0, 40.0, 18.0, 0.47230268898558975], [727, 2.4576833472407893, 1678.2310828766163, 4.0, 40.0, 18.0, 0.5598075789527722], [728, 15.369419811333646, 1910.4883745286518, 4.0, 40.0, 18.0, 0.23338937847894647], [729, 5.214285816910424, 956.0268177113105, 4.0, 40.0, 18.0, 0.5381743176242986], [730, 1.3077047948296436, 1976.678926789535, 4.0, 40.0, 18.0, 0.581328676181729], [731, 1.3390940699166374, 1338.4119742971081, 4.0, 40.0, 18.0, 0.6369868698126483], [732, 0.9447404261407903, 68.99171372172654, 4.0, 40.0, 18.0, 1.0], [733, 4.371945005856279, 1890.1522564525435, 4.0, 40.0, 18.0, 0.4821625964777501], [734, 13.618513853095315, 1646.6200277329588, 4.0, 40.0, 18.0, 0.27590590005053567], [735, 10.775468892185003, 1976.3410362610828, 4.0, 40.0, 18.0, 0.32235853074822723], [736, 1.0811406407288038, 1443.2954720813782, 4.0, 40.0, 18.0, 0.6359822206273342], [737, 1.0995547545955375, 376.7519579456747, 4.0, 40.0, 18.0, 1.0], [738, 4.301003721146503, 77.81431231638504, 4.0, 40.0, 18.0, 1.0], [739, 15.080118223773836, 76.48070671447462, 4.0, 40.0, 18.0, 0.7425229728210355], [740, 3.574588871061194, 1433.6517065601122, 4.0, 40.0, 18.0, 0.5404133911667138], [741, 3.266803748137959, 768.0183481079831, 4.0, 40.0, 18.0, 0.6508517372826103], [742, 11.43373389005566, 1204.420414885311, 4.0, 40.0, 18.0, 0.3463125292925278], [743, 2.35173742155411, 489.22149464059817, 4.0, 40.0, 18.0, 0.7917123610736267], [744, 12.905201804335201, 1152.2035380398245, 4.0, 40.0, 18.0, 0.3175083258225495], [745, 0.4524338972995281, 1136.9338973099766, 4.0, 40.0, 18.0, 0.7038516116029139], [746, 4.3175697121860726, 650.7705528177835, 4.0, 40.0, 18.0, 0.6374002296428714], [747, 0.39510622901542636, 426.23806126288514, 4.0, 40.0, 18.0, 0.9455163973623548], [748, 14.67896127887815, 1456.1132115988173, 4.0, 40.0, 18.0, 0.26381913942493096], [749, 0.3555654643300439, 1560.595803351052, 4.0, 40.0, 18.0, 0.6548400085242511], [750, 10.055333964164149, 1696.7235455170785, 4.0, 40.0, 18.0, 0.34975385610025994], [751, 3.585948608898914, 1420.1272427420804, 4.0, 40.0, 18.0, 0.5412431767198689], [752, 13.239128463560627, 1768.3159811873886, 4.0, 40.0, 18.0, 0.27813109147915616], [753, 16.581173790050137, 1774.982634205434, 4.0, 40.0, 18.0, 0.21572823709191316], [754, 12.405257599909454, 1035.7129388087144, 4.0, 40.0, 18.0, 0.3375681394414063], [755, 14.025105691317485, 525.9186634463808, 4.0, 40.0, 18.0, 0.3664283779805332], [756, 2.2627180677654173, 1610.1215579758955, 4.0, 40.0, 18.0, 0.5725099766756114], [757, 3.8456742854216834, 974.6240117342162, 4.0, 40.0, 18.0, 0.5857546073747673], [758, 16.197110055633342, 811.1043442328842, 4.0, 40.0, 18.0, 0.27696357422853746], [759, 2.665567780129208, 1336.0363928181405, 4.0, 40.0, 18.0, 0.5834163594521643], [760, 15.878764458243273, 462.27250530519643, 4.0, 40.0, 18.0, 0.33885961769632117], [761, 16.657701804149767, 624.3021245972809, 4.0, 40.0, 18.0, 0.28958737547857455], [762, 12.4319797348128, 27.981780002591723, 4.0, 40.0, 18.0, 1.0], [763, 2.012088284960268, 203.25557099559074, 4.0, 40.0, 18.0, 1.0], [764, 12.07149921493987, 676.2404177502207, 4.0, 40.0, 18.0, 0.3852280102935016], [765, 3.2847123226633084, 480.74450340340275, 4.0, 40.0, 18.0, 0.7496224546070713], [766, 1.015608512384193, 989.0806757950718, 4.0, 40.0, 18.0, 0.7037137312266106], [767, 4.360325936889063, 1164.2479267295978, 4.0, 40.0, 18.0, 0.5405340479152868], [768, 18.3553886093106, 812.878232633861, 4.0, 40.0, 18.0, 0.23685367650290476], [769, 12.819641028845409, 99.85617473371678, 4.0, 40.0, 18.0, 0.7470961521980516], [770, 14.887398175547531, 955.1368509262782, 4.0, 40.0, 18.0, 0.29053086854876525], [771, 0.3049978880800497, 240.7835315879952, 4.0, 40.0, 18.0, 1.0], [772, 17.127897067130593, 128.52248922310935, 4.0, 40.0, 18.0, 0.5179783036052341], [773, 14.531605080634762, 1792.6363312400038, 4.0, 40.0, 18.0, 0.25257580205051355], [774, 18.277613851490095, 323.99960113230503, 4.0, 40.0, 18.0, 0.33223349252333434], [775, 14.093652353671214, 834.2167547538646, 4.0, 40.0, 18.0, 0.3187851943897908], [776, 13.976500995865905, 1971.4792814333991, 4.0, 40.0, 18.0, 0.25760437353957355], [777, 1.952474272573163, 295.44785041285417, 4.0, 40.0, 18.0, 1.0], [778, 0.08806775032605585, 113.73233728362332, 4.0, 40.0, 18.0, 1.0], [779, 2.453415081795997, 910.7242479224352, 4.0, 40.0, 18.0, 0.6545424458355116], [780, 3.6143013216859554, 1413.6379225118915, 4.0, 40.0, 18.0, 0.5408465932926981], [781, 13.999693357590361, 1563.0900153598675, 4.0, 40.0, 18.0, 0.2720721321312241], [782, 1.3264017323236081, 593.4245618091892, 4.0, 40.0, 18.0, 0.7960560666824285], [783, 10.191735427999252, 1600.8013150448471, 4.0, 40.0, 18.0, 0.3513108574021523], [784, 4.051250394046126, 985.8585314218301, 4.0, 40.0, 18.0, 0.5760435104642214], [785, 14.807307671854748, 320.66778786870094, 4.0, 40.0, 18.0, 0.4147264064944534], [786, 4.268574548409049, 743.3937271179888, 4.0, 40.0, 18.0, 0.6147561722735536], [787, 0.8542702954421335, 1638.3696405059818, 4.0, 40.0, 18.0, 0.6260693567962147], [788, 12.390226678120298, 35.71317038290776, 4.0, 40.0, 18.0, 1.0], [789, 1.3639243293316943, 1868.2981482102077, 4.0, 40.0, 18.0, 0.5868582703677531], [790, 2.188831794594288, 1711.7797448804686, 4.0, 40.0, 18.0, 0.5671193765550071], [791, 15.758645055648472, 530.8267105297234, 4.0, 40.0, 18.0, 0.32530622487733385], [792, 13.059184046144315, 546.4123943138928, 4.0, 40.0, 18.0, 0.38516009047684285], [793, 0.020813953740415947, 1879.7985181999034, 4.0, 40.0, 18.0, 0.6407069486576307], [794, 13.976962090956986, 618.0348813826838, 4.0, 40.0, 18.0, 0.34917404485508285], [795, 0.14640206387335253, 1894.464504292166, 4.0, 40.0, 18.0, 0.6343405003196935], [796, 2.5511425850656826, 601.9232883752466, 4.0, 40.0, 18.0, 0.7331806458577551], [797, 12.847672292440175, 1261.598687370912, 4.0, 40.0, 18.0, 0.3115043396222614], [798, 12.344080026363585, 622.9195855768322, 4.0, 40.0, 18.0, 0.38750786086516037], [799, 0.9140585736086912, 1918.1368313365947, 4.0, 40.0, 18.0, 0.6009262777088463], [800, 0.35235189836048986, 1200.7010354775734, 4.0, 40.0, 18.0, 0.6986593717380764], [801, 0.42408892485570027, 775.8273006039032, 4.0, 40.0, 18.0, 0.7799235039007982], [802, 12.149370594834728, 49.7844001872184, 4.0, 40.0, 18.0, 1.0], [803, 2.9452057386165458, 675.5148540411165, 4.0, 40.0, 18.0, 0.6903658838768196], [804, 0.8650275146096837, 490.3551333869768, 4.0, 40.0, 18.0, 0.8678914535667861], [805, 11.264888723035527, 279.3339740753467, 4.0, 40.0, 18.0, 0.5452519060100363], [806, 0.9395446287184084, 268.10969545806984, 4.0, 40.0, 18.0, 1.0], [807, 12.320807236805747, 1300.6221934228442, 4.0, 40.0, 18.0, 0.3204935123878617], [808, 11.70632945413205, 1441.745445288523, 4.0, 40.0, 18.0, 0.32579640997107984], [809, 0.35145916095542207, 585.0453061453562, 4.0, 40.0, 18.0, 0.8485895124139439], [810, 4.445773728023266, 1258.4893180743315, 4.0, 40.0, 18.0, 0.5273167945622266], [811, 13.157817112034227, 1302.4115111415156, 4.0, 40.0, 18.0, 0.3027236223450973], [812, 2.6038670669485415, 575.2770357029218, 4.0, 40.0, 18.0, 0.7406538623606801], [813, 14.494374239199553, 950.3095667625149, 4.0, 40.0, 18.0, 0.29917920893048755], [814, 2.678555621471801, 875.2773613567432, 4.0, 40.0, 18.0, 0.6519207852377271], [815, 5.462519171772081, 1565.7578258848418, 4.0, 40.0, 18.0, 0.470256327574106], [816, 3.6925405136339666, 83.83399528926779, 4.0, 40.0, 18.0, 1.0], [817, 0.2115348826045702, 111.75885341485372, 4.0, 40.0, 18.0, 1.0], [818, 14.402890184387994, 692.8296053618269, 4.0, 40.0, 18.0, 0.32817588075576654], [819, 14.053683308306466, 554.168037466839, 4.0, 40.0, 18.0, 0.3594465509401986], [820, 9.822034405531422, 398.14312654946264, 4.0, 40.0, 18.0, 0.5215505855665795], [821, 10.80218553706231, 566.8990284356657, 4.0, 40.0, 18.0, 0.43702722233564545], [822, 4.226574534404808, 1895.5571571711712, 4.0, 40.0, 18.0, 0.4862284241682766], [823, 15.949911231789805, 328.6047615035785, 4.0, 40.0, 18.0, 0.38379127533171353], [824, 1.7421386127462293, 575.514140642602, 4.0, 40.0, 18.0, 0.7826957935291143], [825, 13.812861891077372, 530.6744036266848, 4.0, 40.0, 18.0, 0.37037799072499694], [826, 15.396540461138336, 1005.8720769068121, 4.0, 40.0, 18.0, 0.27668394182518674], [827, 3.7434074240616857, 1624.9930754111701, 4.0, 40.0, 18.0, 0.519149602832212], [828, 0.07647515233241453, 1077.467134469636, 4.0, 40.0, 18.0, 0.7309306554616624], [829, 16.768563736003642, 1401.9601662982361, 4.0, 40.0, 18.0, 0.2269830652049164], [830, 15.636220365831498, 1472.3493807122168, 4.0, 40.0, 18.0, 0.24437477012394507], [831, 1.4410911226772205, 1016.3433066720726, 4.0, 40.0, 18.0, 0.6794864004396505], [832, 13.429268731173906, 1636.0324925040434, 4.0, 40.0, 18.0, 0.2802160985454364], [833, 1.7001405956665383, 497.6687682538203, 4.0, 40.0, 18.0, 0.8204325318854587], [834, 2.2801805994410946, 1982.1214434171384, 4.0, 40.0, 18.0, 0.5450168923984778], [835, 3.6254909244031404, 851.6421890102177, 4.0, 40.0, 18.0, 0.6168636797014945], [836, 2.4674618747481962, 25.072612129755036, 4.0, 40.0, 18.0, 1.0], [837, 11.309361392384687, 74.83038114989517, 4.0, 40.0, 18.0, 1.0], [838, 2.755439076643698, 517.1786134321785, 4.0, 40.0, 18.0, 0.757905306884278], [839, 2.877880558773736, 513.3027576117254, 4.0, 40.0, 18.0, 0.7536759982981875], [840, 0.12078775050444213, 507.1880300488068, 4.0, 40.0, 18.0, 0.8972672699498528], [841, 1.2444480307310486, 850.347297144776, 4.0, 40.0, 18.0, 0.7220547696064394], [842, 12.346750089330126, 432.5882287162059, 4.0, 40.0, 18.0, 0.4326930156506844], [843, 1.5889424632997833, 1340.7825445181215, 4.0, 40.0, 18.0, 0.6261528541397215], [844, 3.222459948811766, 127.94951038145253, 4.0, 40.0, 18.0, 1.0], [845, 13.84976520946138, 1860.6028498991764, 4.0, 40.0, 18.0, 0.2630875407495844], [846, 10.809450602607129, 697.7146030768455, 4.0, 40.0, 18.0, 0.4122864658711165], [847, 2.6517370985662945, 524.882015156666, 4.0, 40.0, 18.0, 0.7594546559239661], [848, 0.33151114416620175, 148.02595643232797, 4.0, 40.0, 18.0, 1.0], [849, 12.561384381668459, 1707.4417184851957, 4.0, 40.0, 18.0, 0.2952058934400186], [850, 0.2911780698329245, 1984.1116309569888, 4.0, 40.0, 18.0, 0.6216635522127714], [851, 15.034769662755576, 506.00476943493436, 4.0, 40.0, 18.0, 0.3471045303079199], [852, 13.613211001049788, 1621.8526579690747, 4.0, 40.0, 18.0, 0.27712957684978695], [853, 14.40687818096237, 123.64644020573355, 4.0, 40.0, 18.0, 0.6169858411816644], [854, 15.294320483647374, 1024.5299577076414, 4.0, 40.0, 18.0, 0.27731194025122224], [855, 11.771252185889919, 792.1702398240161, 4.0, 40.0, 18.0, 0.3765789395910771], [856, 12.712316898436608, 875.0992355058326, 4.0, 40.0, 18.0, 0.3454188902225476], [857, 1.6596262198930225, 1514.836482990111, 4.0, 40.0, 18.0, 0.6045344845451512], [858, 9.922203260880732, 1763.5137529481794, 4.0, 40.0, 18.0, 0.34982745088830114], [859, 13.988993685467582, 44.92657644876597, 4.0, 40.0, 18.0, 1.0], [860, 11.361187941096702, 949.3578953048755, 4.0, 40.0, 18.0, 0.3697459466883972], [861, 4.7679721273197755, 270.8999790374226, 4.0, 40.0, 18.0, 0.8368747879513082], [862, 13.261838815599832, 511.2917526138542, 4.0, 40.0, 18.0, 0.3883826771684269], [863, 0.3471091839154674, 493.5988467910215, 4.0, 40.0, 18.0, 0.8933765780267001], [864, 0.8367956247061774, 721.331726759914, 4.0, 40.0, 18.0, 0.7757173979973006], [865, 15.587056868126549, 577.3829475502033, 4.0, 40.0, 18.0, 0.3215678688285604], [866, 5.558999515029088, 1219.3662366634514, 4.0, 40.0, 18.0, 0.4952216968075769], [867, 3.501123798345038, 36.39878879439331, 4.0, 40.0, 18.0, 1.0], [868, 12.478975729494595, 552.9496226672502, 4.0, 40.0, 18.0, 0.3983166754960929], [869, 2.191668262788809, 212.63346482514075, 4.0, 40.0, 18.0, 1.0], [870, 0.6651462967474171, 332.0085086621616, 4.0, 40.0, 18.0, 1.0], [871, 0.6520694894744892, 1525.5004930335242, 4.0, 40.0, 18.0, 0.6456586439354453], [872, 3.2854463912199674, 1529.7019305477327, 4.0, 40.0, 18.0, 0.5421830391732159], [873, 4.26479061271794, 1650.8828736240553, 4.0, 40.0, 18.0, 0.5004064865608115], [874, 1.397417393603562, 520.5374198976365, 4.0, 40.0, 18.0, 0.8244495034316459], [875, 17.4020075595963, 1638.5497268464483, 4.0, 40.0, 18.0, 0.2063952702935425], [876, 17.266867430259012, 517.7366361387418, 4.0, 40.0, 18.0, 0.29733518877058607], [877, 4.651958853541073, 1991.7687195777119, 4.0, 40.0, 18.0, 0.4671262272005749], [878, 4.6773427238229655, 1887.7061658877649, 4.0, 40.0, 18.0, 0.47336803999065985], [879, 2.3367373636181195, 26.395310323466973, 4.0, 40.0, 18.0, 1.0], [880, 1.0425516720644656, 1583.9473784885345, 4.0, 40.0, 18.0, 0.6233641218149952], [881, 10.801450726991098, 515.3670963596301, 4.0, 40.0, 18.0, 0.449610086321201], [882, 3.6448084496851765, 1330.078454856822, 4.0, 40.0, 18.0, 0.5476344589935176], [883, 0.8186557151880045, 1751.4627850640395, 4.0, 40.0, 18.0, 0.6177246429711354], [884, 3.8456552890447084, 484.5436617996804, 4.0, 40.0, 18.0, 0.7208386596494314], [885, 14.179261861894211, 45.97764684225157, 4.0, 40.0, 18.0, 1.0], [886, 12.469770353975969, 1396.0546701729043, 4.0, 40.0, 18.0, 0.31188432431254715], [887, 0.9979036808746822, 482.5891193944024, 4.0, 40.0, 18.0, 0.8653929817154723], [888, 3.6543410591156675, 1511.589633483546, 4.0, 40.0, 18.0, 0.5309946902593086], [889, 2.3109790245019717, 1486.5918912322754, 4.0, 40.0, 18.0, 0.5816964312441228], [890, 1.5279860050850842, 1179.8792252646228, 4.0, 40.0, 18.0, 0.6495763551188402], [891, 16.009572858609232, 1527.8415977201078, 4.0, 40.0, 18.0, 0.2349395566333903], [892, 0.625553047297132, 1375.382578491901, 4.0, 40.0, 18.0, 0.6632309139206402], [893, 1.8700549420915475, 1459.3607464749566, 4.0, 40.0, 18.0, 0.6017235731715382], [894, 4.7203670143848075, 1233.955835525839, 4.0, 40.0, 18.0, 0.5206216359174135], [895, 2.3469275787028274, 143.68895877079933, 4.0, 40.0, 18.0, 1.0], [896, 13.293778841935117, 932.4615150655806, 4.0, 40.0, 18.0, 0.3269208744678481], [897, 2.2745822436563925, 1770.8350103208168, 4.0, 40.0, 18.0, 0.5595061389646725], [898, 13.666554850823916, 1580.8125987337846, 4.0, 40.0, 18.0, 0.2779646088948095], [899, 12.817842657558726, 1103.2719645390746, 4.0, 40.0, 18.0, 0.32279005191624505], [900, 12.625966008465072, 1081.9735094386326, 4.0, 40.0, 18.0, 0.32889863628161914], [901, 13.723473591368872, 934.2190141641606, 4.0, 40.0, 18.0, 0.3174030111597273], [902, 2.1392136415686838, 1777.8941529681579, 4.0, 40.0, 18.0, 0.5640434960950943], [903, 13.969768661699815, 456.11253120090186, 4.0, 40.0, 18.0, 0.3855585390092459], [904, 13.492296797506846, 823.6488026751222, 4.0, 40.0, 18.0, 0.33304210963356434], [905, 2.4006379347625755, 1208.4337139265797, 4.0, 40.0, 18.0, 0.6090798255363604], [906, 3.2996878926064643, 1337.1106049529992, 4.0, 40.0, 18.0, 0.5594818899137194], [907, 0.6492383211452042, 729.2503406045738, 4.0, 40.0, 18.0, 0.7824979378024204], [908, 17.346096572563948, 1116.1596925144515, 4.0, 40.0, 18.0, 0.23304827148437396], [909, 11.76715542138179, 231.2640104783427, 4.0, 40.0, 18.0, 0.5685466033812662], [910, 3.460305294910314, 463.3425516518295, 4.0, 40.0, 18.0, 0.7503076720840286], [911, 9.905165123946508, 1623.4591525304995, 4.0, 40.0, 18.0, 0.35674684537427404], [912, 12.519429777919818, 1087.8976955000057, 4.0, 40.0, 18.0, 0.3308106440616603], [913, 12.319860344214563, 1170.7922649261436, 4.0, 40.0, 18.0, 0.32899488387402576], [914, 2.2228893559090825, 392.33817468647754, 4.0, 40.0, 18.0, 0.8602564526557024], [915, 3.8067427291656584, 1404.3857152789735, 4.0, 40.0, 18.0, 0.5349146406180674], [916, 2.845307779535298, 1224.5948480262102, 4.0, 40.0, 18.0, 0.5894087300981499], [917, 1.5978325186990006, 733.7915602697259, 4.0, 40.0, 18.0, 0.7356911118556473], [918, 12.375250922216058, 1094.1405838746987, 4.0, 40.0, 18.0, 0.333446679789058], [919, 12.664249513564506, 1338.942009483894, 4.0, 40.0, 18.0, 0.31096660152908345], [920, 4.730057276230532, 707.6882882087757, 4.0, 40.0, 18.0, 0.6047021685017591], [921, 1.34200905881488, 445.7578236962116, 4.0, 40.0, 18.0, 0.869228677565014], [922, 19.189200552837196, 1271.8341781178929, 4.0, 40.0, 18.0, 0.19257084213389974], [923, 0.20754297998301063, 1975.762382694505, 4.0, 40.0, 18.0, 0.6256652310084061], [924, 1.5847315139362266, 1653.4738687150048, 4.0, 40.0, 18.0, 0.5950294363507569], [925, 0.9029107915704686, 1677.1085267452029, 4.0, 40.0, 18.0, 0.6206617133297019], [926, 1.9910239556107405, 1127.1444321415468, 4.0, 40.0, 18.0, 0.6373936481003256], [927, 1.6339303208191573, 868.9373037304671, 4.0, 40.0, 18.0, 0.6998676675829426], [928, 11.786132320694488, 357.5789828613401, 4.0, 40.0, 18.0, 0.47899034894400444], [929, 2.572490815590662, 572.5060453942956, 4.0, 40.0, 18.0, 0.7432340613970874], [930, 11.324076732318105, 1717.409772456833, 4.0, 40.0, 18.0, 0.32109029940016276], [931, 3.2033824826704946, 1381.766083288084, 4.0, 40.0, 18.0, 0.5584738695440018], [932, 0.29935076673756944, 346.4940759742787, 4.0, 40.0, 18.0, 1.0], [933, 3.3149473633066835, 369.4040760496109, 4.0, 40.0, 18.0, 0.8199639826820321], [934, 14.957461489742554, 392.1766853209864, 4.0, 40.0, 18.0, 0.38210275992583703], [935, 10.252068671560986, 878.8502929934763, 4.0, 40.0, 18.0, 0.4036874990181745], [936, 2.3298973235475184, 192.02823441675403, 4.0, 40.0, 18.0, 1.0], [937, 16.133257692608076, 68.96615475196143, 4.0, 40.0, 18.0, 0.7361223643796446], [938, 15.384049651537264, 674.7480437355557, 4.0, 40.0, 18.0, 0.3092093102129602], [939, 11.811459137224123, 1172.9652532967907, 4.0, 40.0, 18.0, 0.34018306472707666], [940, 0.25360697410958943, 394.65344289624267, 4.0, 40.0, 18.0, 1.0], [941, 1.0153955963041492, 1405.8976942428362, 4.0, 40.0, 18.0, 0.6429231137926095], [942, 15.023097872524138, 1612.1185311349077, 4.0, 40.0, 18.0, 0.24963010031947305], [943, 12.674138874068834, 1529.2419566313745, 4.0, 40.0, 18.0, 0.30084850061216206], [944, 14.802983843618815, 164.936432500173, 4.0, 40.0, 18.0, 0.5380645237956828], [945, 1.7974469089941434, 780.51386038894, 4.0, 40.0, 18.0, 0.7134924720574082], [946, 4.917161606969317, 1062.7610082561107, 4.0, 40.0, 18.0, 0.5336437867991229], [947, 12.4743031269495, 929.6669079224592, 4.0, 40.0, 18.0, 0.34558765008358927], [948, 11.621068659871398, 1118.90290438318, 4.0, 40.0, 18.0, 0.34840901764913146], [949, 16.280946670743294, 410.62405010321527, 4.0, 40.0, 18.0, 0.34469277018725086], [950, 3.0558105663569095, 175.23515739061907, 4.0, 40.0, 18.0, 1.0], [951, 11.340051238252313, 180.51325888810882, 4.0, 40.0, 18.0, 0.6411003699273948], [952, 14.277484175725977, 826.0645908403268, 4.0, 40.0, 18.0, 0.3155862057747023], [953, 1.3169760846316896, 1007.6922592236383, 4.0, 40.0, 18.0, 0.6864551690008719], [954, 15.98081088902735, 1312.1921207392581, 4.0, 40.0, 18.0, 0.24634828994169444], [955, 2.7038090341911643, 1070.423555481202, 4.0, 40.0, 18.0, 0.6162027689566242], [956, 0.13162317514869004, 968.7399315897476, 4.0, 40.0, 18.0, 0.7484223591940241], [957, 0.8895438034073955, 394.7437938171988, 4.0, 40.0, 18.0, 0.9472596588449149], [958, 17.6054051883661, 1164.4526642461376, 4.0, 40.0, 18.0, 0.22543712181505488], [959, 0.9803329696935392, 966.5219069922134, 4.0, 40.0, 18.0, 0.7095950885633576], [960, 12.40450221251569, 40.73367000616375, 4.0, 40.0, 18.0, 1.0], [961, 14.38877907140237, 1945.7004454607559, 4.0, 40.0, 18.0, 0.2506475381119266], [962, 13.839385771164153, 1721.227435947506, 4.0, 40.0, 18.0, 0.2678203713392895], [963, 11.300003819707515, 899.41463455827, 4.0, 40.0, 18.0, 0.3760826184598671], [964, 12.621759878623221, 1509.09256699155, 4.0, 40.0, 18.0, 0.3030571471492336], [965, 4.446005752769037, 1215.6389911853855, 4.0, 40.0, 18.0, 0.531810221189528], [966, 14.12027533741313, 470.96365260444, 4.0, 40.0, 18.0, 0.37783846710475805], [967, 1.2335924052287428, 668.8036679063714, 4.0, 40.0, 18.0, 0.7731438909319109], [968, 13.891702310379722, 1433.5430891509743, 4.0, 40.0, 18.0, 0.28073620473004], [969, 13.07691706050377, 1990.0562638873407, 4.0, 40.0, 18.0, 0.27428850854309267], [970, 0.3741296303842674, 1992.3569398652019, 4.0, 40.0, 18.0, 0.6175748266261021], [971, 1.7737879178494667, 259.0842415595515, 4.0, 40.0, 18.0, 1.0], [972, 11.844469223850624, 685.3142445680993, 4.0, 40.0, 18.0, 0.3895263456632979], [973, 15.994151464447118, 195.3837928601646, 4.0, 40.0, 18.0, 0.46969256643285157], [974, 15.07415742392848, 1188.7953327292678, 4.0, 40.0, 18.0, 0.27083084409540814], [975, 11.51631080098148, 990.392849343815, 4.0, 40.0, 18.0, 0.3621093095794676], [976, 3.7324910274297363, 548.3082993159786, 4.0, 40.0, 18.0, 0.6979905116968695], [977, 10.126931622487524, 1311.0628779016936, 4.0, 40.0, 18.0, 0.3695539493716479], [978, 14.384868256664273, 1801.2232282031146, 4.0, 40.0, 18.0, 0.2550010957197734], [979, 1.5941497499646031, 21.42087368419864, 4.0, 40.0, 18.0, 1.0], [980, 14.311725315503883, 1641.4679331877837, 4.0, 40.0, 18.0, 0.26209851444974125], [981, 11.702490712594281, 1608.85589034457, 4.0, 40.0, 18.0, 0.3178295943147355], [982, 1.6831440624308462, 1937.773598507266, 4.0, 40.0, 18.0, 0.569613400952179], [983, 10.04449510773711, 1337.8640599219123, 4.0, 40.0, 18.0, 0.3697144533402944], [984, 12.265338523875279, 685.4790451609148, 4.0, 40.0, 18.0, 0.3791954762658907], [985, 14.047504992714842, 907.1842125697408, 4.0, 40.0, 18.0, 0.3126033510357102], [986, 0.0812992206315577, 237.74215754091662, 4.0, 40.0, 18.0, 1.0], [987, 16.240518744406902, 383.0087836084393, 4.0, 40.0, 18.0, 0.3549175791697821], [988, 0.06883842704250531, 1404.314806666811, 4.0, 40.0, 18.0, 0.6845397642348533], [989, 13.787439703636503, 497.45265536719654, 4.0, 40.0, 18.0, 0.3790056810984859], [990, 0.27157705024292866, 548.9587537120956, 4.0, 40.0, 18.0, 0.8688478453937651], [991, 15.892046387870389, 284.8597712136106, 4.0, 40.0, 18.0, 0.40718189317192766], [992, 9.848824144688235, 1846.5677455100385, 4.0, 40.0, 18.0, 0.34791635672450527], [993, 2.9839837324850187, 628.7377084119697, 4.0, 40.0, 18.0, 0.7035413612230239], [994, 9.743893770133019, 872.7922343672578, 4.0, 40.0, 18.0, 0.4160575897456665], [995, 3.1725132243825644, 1719.0748664697376, 4.0, 40.0, 18.0, 0.5315448268622537], [996, 14.16220465364361, 1380.1146019027651, 4.0, 40.0, 18.0, 0.27806357499245316], [997, 12.047099646751901, 808.0087418067287, 4.0, 40.0, 18.0, 0.3681542492764767], [998, 0.5304190367814037, 1655.4241578426804, 4.0, 40.0, 18.0, 0.6381701152771689], [999, 14.549367192037938, 1884.0702403816829, 4.0, 40.0, 18.0, 0.24933330781290222], [1000, 10.969079225753841, 51.07680155924689, 4.0, 40.0, 18.0, 1.0], [1001, 14.379505126968896, 1550.2450716680714, 4.0, 40.0, 18.0, 0.2650038234454316], [1002, 12.565066512565895, 208.72732881755675, 4.0, 40.0, 18.0, 0.5628847986029153], [1003, 16.709875990586724, 1443.2650546498917, 4.0, 40.0, 18.0, 0.2261108268887247], [1004, 13.627628051755496, 1244.7532834905965, 4.0, 40.0, 18.0, 0.2964599992301991], [1005, 2.5697766087191978, 280.23877982048634, 4.0, 40.0, 18.0, 1.0], [1006, 2.8154781428106443, 1941.9545128347193, 4.0, 40.0, 18.0, 0.5290188622609497], [1007, 2.6333417921411346, 1076.5628624835515, 4.0, 40.0, 18.0, 0.6181720182582942], [1008, 17.28877762619281, 1874.0882382622772, 4.0, 40.0, 18.0, 0.20020956318710967], [1009, 11.25986242885082, 1152.8672697173233, 4.0, 40.0, 18.0, 0.35436490976918505], [1010, 2.0208419061258196, 1515.6593149763987, 4.0, 40.0, 18.0, 0.5903298092886061], [1011, 13.814286645733162, 983.7863318379204, 4.0, 40.0, 18.0, 0.31074136158799803], [1012, 12.444316410401754, 982.2880542798202, 4.0, 40.0, 18.0, 0.3415192818676255], [1013, 0.24952696616418857, 1036.7053004115012, 4.0, 40.0, 18.0, 0.7301535922653354], [1014, 13.639792484664886, 295.6283233387311, 4.0, 40.0, 18.0, 0.4594129236510011], [1015, 13.996314281774813, 549.8992191379771, 4.0, 40.0, 18.0, 0.3616955234209215], [1016, 15.921205656186562, 1729.2266149159716, 4.0, 40.0, 18.0, 0.22883476115101697], [1017, 14.134947206063751, 393.084954980827, 4.0, 40.0, 18.0, 0.4019043302681516], [1018, 8.66595361238791, 282.97653182782375, 4.0, 40.0, 18.0, 0.6408100008094858], [1019, 14.71138262666598, 60.97269071044567, 4.0, 40.0, 18.0, 1.0], [1020, 2.382268438061506, 917.076536395473, 4.0, 40.0, 18.0, 0.6564118004625531], [1021, 1.3609303603948204, 1001.8006758262095, 4.0, 40.0, 18.0, 0.6855880264411957], [1022, 3.0509720820353223, 1130.0439406595988, 4.0, 40.0, 18.0, 0.593504458976072], [1023, 2.5284957625784505, 1702.6925102059365, 4.0, 40.0, 18.0, 0.5553988908104479], [1024, 15.292318778395282, 164.5565070065693, 4.0, 40.0, 18.0, 0.5231762056992927], [1025, 12.709092524239063, 1337.838808917731, 4.0, 40.0, 18.0, 0.31013542004721883], [1026, 0.8664897269266063, 942.9426248090579, 4.0, 40.0, 18.0, 0.7195565723760619], [1027, 1.5536892902461885, 389.97253600178186, 4.0, 40.0, 18.0, 0.8975768383165542], [1028, 11.839748995092563, 35.17181476816696, 4.0, 40.0, 18.0, 1.0], [1029, 1.4292313804494934, 1740.6119702955277, 4.0, 40.0, 18.0, 0.5939137749467036], [1030, 1.3363135462155014, 1954.668669092828, 4.0, 40.0, 18.0, 0.5816961163661599], [1031, 2.8060572164743784, 1511.4491807486315, 4.0, 40.0, 18.0, 0.5609479158655598], [1032, 1.6806437079890457, 1155.2749326757967, 4.0, 40.0, 18.0, 0.6465713835704293], [1033, 13.284582033277403, 882.454433596463, 4.0, 40.0, 18.0, 0.33188616968173273], [1034, 12.994307863493844, 697.5910800347838, 4.0, 40.0, 18.0, 0.3597134973980741], [1035, 2.885707761520376, 1620.154783493823, 4.0, 40.0, 18.0, 0.5488468743484286], [1036, 1.7882389506939496, 1287.3707719696315, 4.0, 40.0, 18.0, 0.6243143714742815], [1037, 3.080079185483843, 493.3275516580027, 4.0, 40.0, 18.0, 0.7533814280844582], [1038, 2.028637955841206, 945.993717340121, 4.0, 40.0, 18.0, 0.6662129702129556], [1039, 1.905818935924439, 1285.8710428640877, 4.0, 40.0, 18.0, 0.6196402258174992], [1040, 11.599243904922254, 502.6315839756573, 4.0, 40.0, 18.0, 0.43158195615645856], [1041, 13.790439405291108, 103.64426432372672, 4.0, 40.0, 18.0, 0.6904458550013394], [1042, 4.004757399049852, 67.01069464719315, 4.0, 40.0, 18.0, 1.0], [1043, 14.052258983357287, 222.10776228882835, 4.0, 40.0, 18.0, 0.501652903766701], [1044, 3.227388132069464, 1720.8643397860187, 4.0, 40.0, 18.0, 0.529568658548787], [1045, 10.959534497399321, 996.1713102210934, 4.0, 40.0, 18.0, 0.3747722821016364], [1046, 2.60867208302761, 1007.3720736381559, 4.0, 40.0, 18.0, 0.6301983748350348], [1047, 10.34723334800158, 1855.8283279760872, 4.0, 40.0, 18.0, 0.33661924254169556], [1048, 13.663771853028694, 416.4327323283204, 4.0, 40.0, 18.0, 0.40522574751884216], [1049, 2.926733647466812, 789.2318756014898, 4.0, 40.0, 18.0, 0.6604279212323652], [1050, 3.916952249795078, 1446.2509437736092, 4.0, 40.0, 18.0, 0.527588212328101], [1051, 1.5906441069726882, 227.17592299329092, 4.0, 40.0, 18.0, 1.0], [1052, 14.751478209741343, 1848.0800359979974, 4.0, 40.0, 18.0, 0.2466063624687294], [1053, 1.7180325519710022, 1784.2677435651497, 4.0, 40.0, 18.0, 0.5793416188712317], [1054, 1.770786282639183, 621.5628077582616, 4.0, 40.0, 18.0, 0.7636640570909402], [1055, 1.7634453515540762, 375.3213031714331, 4.0, 40.0, 18.0, 0.8985040082586395], [1056, 3.289665948419221, 197.39330994853793, 4.0, 40.0, 18.0, 1.0], [1057, 4.321995443049678, 739.2499295139535, 4.0, 40.0, 18.0, 0.613594208215663], [1058, 15.02796313304617, 684.1786932402887, 4.0, 40.0, 18.0, 0.31558062235784945], [1059, 1.7022514916524445, 132.8195324775657, 4.0, 40.0, 18.0, 1.0], [1060, 11.861196214382316, 1726.5552035226012, 4.0, 40.0, 18.0, 0.3092657884234668], [1061, 18.53219405604401, 1345.7697364706196, 4.0, 40.0, 18.0, 0.19962274578731354], [1062, 2.2018003860809126, 500.1605357192023, 4.0, 40.0, 18.0, 0.7936425960912976], [1063, 3.4308501723109535, 1040.2294758666994, 4.0, 40.0, 18.0, 0.5916129904815498], [1064, 3.131734440587331, 355.50835691379723, 4.0, 40.0, 18.0, 0.8411527242435897], [1065, 2.9134224241220155, 229.23854805670973, 4.0, 40.0, 18.0, 1.0], [1066, 14.182680593516672, 1376.92496836616, 4.0, 40.0, 18.0, 0.27783514085000444], [1067, 1.0786995201198697, 822.2766104186211, 4.0, 40.0, 18.0, 0.7365328861340361], [1068, 2.013553879196522, 1400.2014508042894, 4.0, 40.0, 18.0, 0.602115791885369], [1069, 2.7438534237015824, 129.28535173944977, 4.0, 40.0, 18.0, 1.0], [1070, 1.042107562388861, 524.6764616151083, 4.0, 40.0, 18.0, 0.8407905441604646], [1071, 1.0363398012755658, 1090.763973585378, 4.0, 40.0, 18.0, 0.6848039829600742], [1072, 3.237176891741856, 1216.7050270741079, 4.0, 40.0, 18.0, 0.5752419675428667], [1073, 3.6846385922453555, 1022.0347809340302, 4.0, 40.0, 18.0, 0.5844967780427568], [1074, 16.48237661016666, 230.3628634944973, 4.0, 40.0, 18.0, 0.42753973279031254], [1075, 11.887183915844156, 1532.8909206881513, 4.0, 40.0, 18.0, 0.31745651977986195], [1076, 9.795987619152806, 368.4240752495669, 4.0, 40.0, 18.0, 0.5377963414991482], [1077, 1.5725362031766448, 613.1519482784821, 4.0, 40.0, 18.0, 0.7763502871890664], [1078, 16.14442837863233, 1789.6274344911128, 4.0, 40.0, 18.0, 0.22301517771401855], [1079, 3.010681177382583, 525.0947325429249, 4.0, 40.0, 18.0, 0.7418565423427339], [1080, 2.626050049856609, 87.26077739123517, 4.0, 40.0, 18.0, 1.0], [1081, 3.5461655770160334, 721.0241535448812, 4.0, 40.0, 18.0, 0.6508579971217162], [1082, 0.43942912734418327, 1903.2370431310198, 4.0, 40.0, 18.0, 0.6214608856436676], [1083, 14.201089097245724, 1418.6458888791833, 4.0, 40.0, 18.0, 0.27532909801989186], [1084, 15.697789759021756, 71.49820317665981, 4.0, 40.0, 18.0, 0.7420874614185295], [1085, 3.894179835326802, 788.2217364023721, 4.0, 40.0, 18.0, 0.619585289675786], [1086, 1.9379248240751918, 622.0804084562546, 4.0, 40.0, 18.0, 0.7553662922444764], [1087, 2.9280871415937564, 368.8052447090669, 4.0, 40.0, 18.0, 0.8408900945428005], [1088, 14.192762025559434, 394.2730494780759, 4.0, 40.0, 18.0, 0.4001357721309345], [1089, 12.615397903867029, 1710.6393537745012, 4.0, 40.0, 18.0, 0.29387052050302004], [1090, 13.388229013777496, 1019.0095809250246, 4.0, 40.0, 18.0, 0.31715968686528295], [1091, 13.936394137395718, 494.0804879432962, 4.0, 40.0, 18.0, 0.3761526138501655], [1092, 2.6626983703910425, 1331.096613064419, 4.0, 40.0, 18.0, 0.5840920102088519], [1093, 1.6606063674304958, 1330.8185094517041, 4.0, 40.0, 18.0, 0.6243939242790345], [1094, 11.782712083760497, 1672.6850788957563, 4.0, 40.0, 18.0, 0.3131053486626985], [1095, 15.184319320772518, 1755.46332324165, 4.0, 40.0, 18.0, 0.2414660477031268], [1096, 12.919814450210065, 1136.5904190118838, 4.0, 40.0, 18.0, 0.3183027222015717], [1097, 13.397585541158913, 1420.02894350462, 4.0, 40.0, 18.0, 0.29137754426213663], [1098, 16.039296411196265, 49.81395132343184, 4.0, 40.0, 18.0, 1.0], [1099, 0.03223062208415195, 564.5007444947587, 4.0, 40.0, 18.0, 0.8738943923720012], [1100, 0.3965324674328574, 208.67144682210144, 4.0, 40.0, 18.0, 1.0], [1101, 0.2587454158897944, 492.9092213386541, 4.0, 40.0, 18.0, 0.8978433478561145], [1102, 2.4512282270355894, 969.5733077750301, 4.0, 40.0, 18.0, 0.6435722255483033], [1103, 13.037110622507559, 1978.7926611925182, 4.0, 40.0, 18.0, 0.2752066659479918], [1104, 0.09702801470292499, 1034.568683688643, 4.0, 40.0, 18.0, 0.737549579804514], [1105, 3.7243141512925186, 1252.7413042473172, 4.0, 40.0, 18.0, 0.5531106760686401], [1106, 3.7300823221064467, 1840.6662082327387, 4.0, 40.0, 18.0, 0.5050181829485517], [1107, 0.7390472827273027, 951.1868047048595, 4.0, 40.0, 18.0, 0.7237118480285332], [1108, 14.699203480418248, 790.876583285864, 4.0, 40.0, 18.0, 0.3100833375715713], [1109, 13.744642637595122, 295.211022577771, 4.0, 40.0, 18.0, 0.4565803091908225], [1110, 1.3234921726933306, 1411.1810785263465, 4.0, 40.0, 18.0, 0.6291183294059258], [1111, 10.733819827658278, 1594.8047339872448, 4.0, 40.0, 18.0, 0.3394986221586111], [1112, 15.08396030415058, 1199.8789231506944, 4.0, 40.0, 18.0, 0.2699211489375776], [1113, 14.151848809304024, 1869.221704700846, 4.0, 40.0, 18.0, 0.2572297358093538], [1114, 15.425082152374571, 1928.9469914362653, 4.0, 40.0, 18.0, 0.2320335107672802], [1115, 3.1846824091441563, 56.284039786743236, 4.0, 40.0, 18.0, 1.0], [1116, 15.023770557348968, 89.36796050368335, 4.0, 40.0, 18.0, 0.6852249853431339], [1117, 1.9043719116248894, 810.3152033779872, 4.0, 40.0, 18.0, 0.701132851442026], [1118, 2.476744419555659, 512.9939632842418, 4.0, 40.0, 18.0, 0.7734321965867296], [1119, 5.638224412403431, 947.4189031293905, 4.0, 40.0, 18.0, 0.5248719760731467], [1120, 12.582768429433104, 889.0572248684971, 4.0, 40.0, 18.0, 0.34691433189565624], [1121, 13.209996807812674, 1703.2990868663117, 4.0, 40.0, 18.0, 0.2816933934759444], [1122, 2.571196462356841, 965.5725176028748, 4.0, 40.0, 18.0, 0.6391147897940703], [1123, 15.549839824671203, 860.7425022913329, 4.0, 40.0, 18.0, 0.28533522764891855], [1124, 2.290197107034295, 37.718444738812885, 4.0, 40.0, 18.0, 1.0], [1125, 15.296633491654081, 1631.6983714902794, 4.0, 40.0, 18.0, 0.24385832761706278], [1126, 1.9773314267862319, 824.5442653048218, 4.0, 40.0, 18.0, 0.6943966812207562], [1127, 13.585137721344376, 1533.1725756544176, 4.0, 40.0, 18.0, 0.282034008235188], [1128, 15.724474189120706, 1222.4003815287786, 4.0, 40.0, 18.0, 0.25636618186995946], [1129, 15.054773013057204, 28.788618774487205, 4.0, 40.0, 18.0, 1.0], [1130, 3.935755879936649, 451.2790557553616, 4.0, 40.0, 18.0, 0.7340605450519974], [1131, 12.060374322802431, 1695.8775129845455, 4.0, 40.0, 18.0, 0.3064044663551058], [1132, 0.5699100026349455, 257.01071505759273, 4.0, 40.0, 18.0, 1.0], [1133, 2.337056732561778, 1332.1933599946979, 4.0, 40.0, 18.0, 0.5967274659049205], [1134, 14.929161291553678, 1858.4160649572784, 4.0, 40.0, 18.0, 0.24298573172591478], [1135, 16.37830189256581, 735.8850746398455, 4.0, 40.0, 18.0, 0.28103652066356616], [1136, 3.008408854260301, 732.031875140908, 4.0, 40.0, 18.0, 0.6715223796687633], [1137, 1.9402546562278868, 260.90790268695594, 4.0, 40.0, 18.0, 1.0], [1138, 1.7561927137871818, 1729.9272434246395, 4.0, 40.0, 18.0, 0.5819820807182222], [1139, 3.3182210403211245, 350.9610525818323, 4.0, 40.0, 18.0, 0.8352300172435078], [1140, 12.010770887643613, 1248.6746024116271, 4.0, 40.0, 18.0, 0.3305460939325732], [1141, 10.85941168088137, 71.47059309401556, 4.0, 40.0, 18.0, 1.0], [1142, 10.980415534436846, 166.37596074320402, 4.0, 40.0, 18.0, 0.6761774118115749], [1143, 1.5617875088281348, 675.6474977311742, 4.0, 40.0, 18.0, 0.7552429145001589], [1144, 13.165972382410674, 1899.8163826372215, 4.0, 40.0, 18.0, 0.2748329138010692], [1145, 12.190708636744622, 751.015848145742, 4.0, 40.0, 18.0, 0.3715866481019394], [1146, 1.3287748158002197, 1381.2224180495753, 4.0, 40.0, 18.0, 0.6323922963778769], [1147, 14.349986748903923, 662.8479716854886, 4.0, 40.0, 18.0, 0.33361783156616204], [1148, 0.8117701028446691, 1330.9808890399793, 4.0, 40.0, 18.0, 0.6605914629692309], [1149, 10.694801028864255, 1180.9791185972229, 4.0, 40.0, 18.0, 0.3654725380201102], [1150, 12.522745003700294, 1288.6646026503458, 4.0, 40.0, 18.0, 0.3169286746814777], [1151, 14.125243173427773, 1704.1461229143479, 4.0, 40.0, 18.0, 0.2631058145280849], [1152, 12.640158288260164, 1325.0179824092363, 4.0, 40.0, 18.0, 0.31216247638923234], [1153, 2.996767138831382, 1340.973855832597, 4.0, 40.0, 18.0, 0.5701560739249362], [1154, 1.0103158459050823, 149.82453054600853, 4.0, 40.0, 18.0, 1.0], [1155, 15.45792707994535, 490.6957143049323, 4.0, 40.0, 18.0, 0.34124500808218117], [1156, 12.632808993338807, 60.51819786436858, 4.0, 40.0, 18.0, 1.0], [1157, 9.995708285889641, 1053.1451951133604, 4.0, 40.0, 18.0, 0.3931658299507808], [1158, 2.3665670302411397, 619.0489234318621, 4.0, 40.0, 18.0, 0.7357972231848412], [1159, 1.992289451710359, 542.7269335929699, 4.0, 40.0, 18.0, 0.7841200540845771], [1160, 1.0581205167158352, 1797.386027870636, 4.0, 40.0, 18.0, 0.60410395312093], [1161, 4.506478622211751, 1922.4010309402897, 4.0, 40.0, 18.0, 0.4763789694653317], [1162, 1.230245115157081, 862.4269173835855, 4.0, 40.0, 18.0, 0.7199665502138691], [1163, 12.27136767117742, 1886.5720539480553, 4.0, 40.0, 18.0, 0.2933905468259819], [1164, 0.25313841335976406, 741.2614845795835, 4.0, 40.0, 18.0, 0.7979299627443198], [1165, 1.478970185735261, 1564.1201914549476, 4.0, 40.0, 18.0, 0.6071810359443801], [1166, 16.09605943976552, 919.6453315499255, 4.0, 40.0, 18.0, 0.2693965632365857], [1167, 4.037566215031811, 66.79749823781658, 4.0, 40.0, 18.0, 1.0], [1168, 19.78364305035553, 1938.2673473014622, 4.0, 40.0, 18.0, 0.16089148757518487], [1169, 2.4027005670192043, 41.94031467342457, 4.0, 40.0, 18.0, 1.0], [1170, 0.6820141827833761, 1399.187879991399, 4.0, 40.0, 18.0, 0.639718489507238], [1171, 19.036076524320162, 123.42767966478047, 4.0, 40.0, 18.0, 0.449049620257609], [1172, 3.1399276566344376, 369.9001366174945, 4.0, 40.0, 18.0, 0.7666244215791429], [1173, 3.236058640430873, 283.40322513670935, 4.0, 40.0, 18.0, 0.8203188141646067], [1174, 1.9691454086109306, 961.7632204257682, 4.0, 40.0, 18.0, 0.6449613351258549], [1175, 2.715182514964546, 1150.6740337312187, 4.0, 40.0, 18.0, 0.5920930222828198], [1176, 27.21311553963702, 1756.2584035565546, 4.0, 40.0, 18.0, 0.0903216643796868], [1177, 16.332278679348473, 1929.1325653020458, 4.0, 40.0, 18.0, 0.21548233630350994], [1178, 18.851358241448246, 164.25249141088568, 4.0, 40.0, 18.0, 0.40659466958377105], [1179, 1.2645877257120057, 128.72759562109485, 4.0, 40.0, 18.0, 1.0], [1180, 2.174052221206166, 1871.2330478166857, 4.0, 40.0, 18.0, 0.5500519158934457], [1181, 0.369264383329146, 79.06962927973483, 4.0, 40.0, 18.0, 1.0], [1182, 5.015590388923191, 1829.5542832770025, 4.0, 40.0, 18.0, 0.46517364188444893], [1183, 7.9641432612560354, 1659.8180695861129, 4.0, 40.0, 18.0, 0.4003670474723286], [1184, 6.3462868073769085, 1397.883291575914, 4.0, 40.0, 18.0, 0.45477617599031894], [1185, 7.355755954655896, 1807.5327951701765, 4.0, 40.0, 18.0, 0.4074628472567552], [1186, 8.64442960873242, 1303.9973933918432, 4.0, 40.0, 18.0, 0.40467635360271276], [1187, 8.269376490066012, 1475.3787626067367, 4.0, 40.0, 18.0, 0.4029830158676198], [1188, 5.295471215691508, 961.476795656042, 4.0, 40.0, 18.0, 0.5283487182528002], [1189, 7.186759498414793, 798.0421632859379, 4.0, 40.0, 18.0, 0.49340469296409467], [1190, 1.8033674141831544, 1555.069034300783, 4.0, 40.0, 18.0, 0.5852046358304841], [1191, 5.178531140767847, 1602.1361815824275, 4.0, 40.0, 18.0, 0.4737608456775778], [1192, 17.499316946265807, 620.0053180513956, 4.0, 40.0, 18.0, 0.2756750300803742], [1193, 1.9355781932515268, 381.00787564845757, 4.0, 40.0, 18.0, 0.8101138400526966], [1194, 7.336185562775143, 423.59185827148286, 4.0, 40.0, 18.0, 0.5803122245928203], [1195, 18.33887179153779, 1359.1806943668778, 4.0, 40.0, 18.0, 0.2023491071916834], [1196, 7.130302768984083, 1340.6693328992794, 4.0, 40.0, 18.0, 0.43880829257870607], [1197, 13.414444529516082, 128.79726388648123, 4.0, 40.0, 18.0, 0.6071991537058363], [1198, 8.267772881612357, 294.9600472733411, 4.0, 40.0, 18.0, 0.6126383197909909], [1199, 7.337264528588566, 1634.5111683840066, 4.0, 40.0, 18.0, 0.4158109062971177], [1200, 6.9139864083816285, 141.39951824680566, 4.0, 40.0, 18.0, 0.8421334519140697], [1201, 8.175265131631265, 103.74759390533359, 4.0, 40.0, 18.0, 1.0], [1202, 6.315423840438488, 105.9531581358207, 4.0, 40.0, 18.0, 1.0], [1203, 7.977137727503396, 398.44340273995226, 4.0, 40.0, 18.0, 0.5686245839943815], [1204, 8.332407119903376, 1121.741332798518, 4.0, 40.0, 18.0, 0.42516968726602145], [1205, 6.569668765469224, 1981.7682164139594, 4.0, 40.0, 18.0, 0.4182336957011782], [1206, 6.92982025249804, 836.6303225546189, 4.0, 40.0, 18.0, 0.4952506114546419], [1207, 4.724765881516024, 66.19126710533727, 4.0, 40.0, 18.0, 1.0], [1208, 5.496455450181003, 1020.5655269761479, 4.0, 40.0, 18.0, 0.5144030695496474], [1209, 12.23978705101208, 124.71125716357164, 4.0, 40.0, 18.0, 0.6573088266938368], [1210, 16.712612730021203, 926.4867825338386, 4.0, 40.0, 18.0, 0.2572257334400859], [1211, 20.454886096365193, 186.34483810853808, 4.0, 40.0, 18.0, 0.35263237159788147], [1212, 9.077300632255346, 1543.3696335480215, 4.0, 40.0, 18.0, 0.37983852792403305], [1213, 10.002309398630995, 167.45928656638495, 4.0, 40.0, 18.0, 0.6693986801311453], [1214, 5.041531638213447, 539.451421243852, 4.0, 40.0, 18.0, 0.6216735892997367], [1215, 17.295354635393192, 1258.190830900827, 4.0, 40.0, 18.0, 0.22503256583007158], [1216, 7.529610314398463, 81.18094844993291, 4.0, 40.0, 18.0, 1.0], [1217, 8.716881519353677, 522.0598522107705, 4.0, 40.0, 18.0, 0.5034524537641513], [1218, 23.207488562506025, 1183.9987562749156, 4.0, 40.0, 18.0, 0.14273251949545332], [1219, 15.53041038553193, 1653.9244284660335, 4.0, 40.0, 18.0, 0.23876833803890174], [1220, 7.581987642793694, 126.5490051137025, 4.0, 40.0, 18.0, 0.8471405292924625], [1221, 18.100579927708925, 78.56611237387193, 4.0, 40.0, 18.0, 0.5723543398356996], [1222, 10.032954048928659, 1583.0188619122825, 4.0, 40.0, 18.0, 0.3552885933537411], [1223, 3.928618864946684, 1172.098169381809, 4.0, 40.0, 18.0, 0.5476817583031351], [1224, 8.878072541422867, 1052.1277030255112, 4.0, 40.0, 18.0, 0.41794991221434724], [1225, 7.538354231026863, 1150.5325398662783, 4.0, 40.0, 18.0, 0.4430023174977232], [1226, 3.86430927339438, 159.5061944069398, 4.0, 40.0, 18.0, 1.0], [1227, 6.271635001441852, 714.0497554992917, 4.0, 40.0, 18.0, 0.5362393576330298], [1228, 5.284787910798242, 934.5347648863686, 4.0, 40.0, 18.0, 0.5323052224051645], [1229, 9.357768871113777, 983.1155550614441, 4.0, 40.0, 18.0, 0.4128187349829811], [1230, 1.9260645051823582, 1352.833337687172, 4.0, 40.0, 18.0, 0.5987624743799103], [1231, 2.964067273683936, 559.9733735227916, 4.0, 40.0, 18.0, 0.6943021026302088], [1232, 8.987943269243626, 87.84460272548935, 4.0, 40.0, 18.0, 1.0], [1233, 8.892778007256128, 1879.2127971726704, 4.0, 40.0, 18.0, 0.3681950952851235], [1234, 7.776003883481518, 1269.5906241773212, 4.0, 40.0, 18.0, 0.42744039552339286], [1235, 2.801001159610994, 327.06479995276214, 4.0, 40.0, 18.0, 0.806736806048399], [1236, 5.7188470581733775, 711.1044171570837, 4.0, 40.0, 18.0, 0.5551491587240777], [1237, 7.898725291077167, 917.4608916170924, 4.0, 40.0, 18.0, 0.45666826071995703], [1238, 10.606889008599666, 160.0841741182571, 4.0, 40.0, 18.0, 0.6572256724301155], [1239, 7.432590392373277, 187.00049815458232, 4.0, 40.0, 18.0, 0.7443421818814376], [1240, 13.774078721690827, 1266.0431727037835, 4.0, 40.0, 18.0, 0.2913974431720982], [1241, 6.282444921870322, 38.019598628753926, 4.0, 40.0, 18.0, 1.0], [1242, 21.07774803192213, 554.8461810902066, 4.0, 40.0, 18.0, 0.2219080730434804], [1243, 6.886359730237494, 127.40059719552774, 4.0, 40.0, 18.0, 0.8964373692149963], [1244, 13.42167230468698, 801.5509709291521, 4.0, 40.0, 18.0, 0.3373220817679418], [1245, 23.092273369478555, 50.99786504059964, 4.0, 40.0, 18.0, 0.5337052832409289], [1246, 4.548593126766056, 1453.1644361071328, 4.0, 40.0, 18.0, 0.5025293218844562], [1247, 2.3629948907309637, 922.9084452025695, 4.0, 40.0, 18.0, 0.6362891669369165], [1248, 6.0197603572237455, 156.37500238824265, 4.0, 40.0, 18.0, 0.852839393860757], [1249, 12.755982625323353, 56.05131895169758, 4.0, 40.0, 18.0, 1.0], [1250, 7.6034813542348285, 62.955048796187185, 4.0, 40.0, 18.0, 1.0], [1251, 17.919673159104054, 699.2242419395886, 4.0, 40.0, 18.0, 0.25715009688587154], [1252, 6.555400094542579, 415.27269071849696, 4.0, 40.0, 18.0, 0.6113975663260102], [1253, 0.4683803992454405, 766.3019646419847, 4.0, 40.0, 18.0, 0.7391491333636743], [1254, 24.065839967024313, 56.13669337316097, 4.0, 40.0, 18.0, 0.48409256999326084], [1255, 6.188304545031862, 87.83946702881296, 4.0, 40.0, 18.0, 1.0], [1256, 5.6545761135112595, 409.4800516606527, 4.0, 40.0, 18.0, 0.64720109692411], [1257, 8.875924397475725, 1885.198280422778, 4.0, 40.0, 18.0, 0.36834073945473816], [1258, 5.9812605938000765, 1627.8956165996356, 4.0, 40.0, 18.0, 0.45019048409168305], [1259, 7.563385596100861, 1668.4160934378785, 4.0, 40.0, 18.0, 0.40893350760687874], [1260, 6.767522146498627, 1892.654600288721, 4.0, 40.0, 18.0, 0.41727179473952486], [1261, 9.003768343888353, 924.3897737315632, 4.0, 40.0, 18.0, 0.4275660496560671], [1262, 10.11415998228599, 92.72362753509303, 4.0, 40.0, 18.0, 0.8420548802855079], [1263, 3.1676425521769103, 24.25115770596276, 4.0, 40.0, 18.0, 1.0], [1264, 8.60709620270132, 176.3005333520283, 4.0, 40.0, 18.0, 0.7113186351743177], [1265, 8.574862833918065, 1867.2280891759276, 4.0, 40.0, 18.0, 0.37604187015770435], [1266, 22.568124982996913, 447.49349422921375, 4.0, 40.0, 18.0, 0.21648344682626164], [1267, 7.442154491451388, 1048.0093148569185, 4.0, 40.0, 18.0, 0.45473073539560094], [1268, 6.380765099104327, 809.1409989911083, 4.0, 40.0, 18.0, 0.5161337468286761], [1269, 6.2703930117405635, 100.32388964712213, 4.0, 40.0, 18.0, 1.0], [1270, 10.891774882555897, 1079.3078057471048, 4.0, 40.0, 18.0, 0.36813708960246255], [1271, 8.95396269186028, 1627.7744256533495, 4.0, 40.0, 18.0, 0.3783056440799314], [1272, 18.44610600865544, 1757.4927772048977, 4.0, 40.0, 18.0, 0.18551712146322075], [1273, 4.6786716661006835, 547.769122013201, 4.0, 40.0, 18.0, 0.632447593403183], [1274, 2.1251800097959395, 892.7901026143173, 4.0, 40.0, 18.0, 0.6502236632149607], [1275, 8.741687911881119, 145.72296449208903, 4.0, 40.0, 18.0, 0.7542550861397427], [1276, 6.0317976118249925, 1525.2067264516947, 4.0, 40.0, 18.0, 0.4547038137911362], [1277, 4.8260706205210875, 996.9702392265958, 4.0, 40.0, 18.0, 0.5388373173621561], [1278, 8.978237387083567, 1320.1616961970205, 4.0, 40.0, 18.0, 0.3958794698591554], [1279, 6.6378293436968, 1161.2106690573914, 4.0, 40.0, 18.0, 0.4660687658328459], [1280, 8.458132265868903, 158.34178015920972, 4.0, 40.0, 18.0, 0.744202484161588], [1281, 6.512084835891488, 1365.227444970359, 4.0, 40.0, 18.0, 0.45284650512938746], [1282, 18.946150935904775, 836.8908630333353, 4.0, 40.0, 18.0, 0.22517478630818474], [1283, 4.197391059439028, 157.4806314253832, 4.0, 40.0, 18.0, 1.0], [1284, 15.704640944044323, 922.7576841594541, 4.0, 40.0, 18.0, 0.27692127673445993], [1285, 8.993572378756085, 1523.1643034307276, 4.0, 40.0, 18.0, 0.38288870048956913], [1286, 9.551762373963772, 137.37838587082774, 4.0, 40.0, 18.0, 0.7367472147108782], [1287, 6.836120334993654, 108.57200969134549, 4.0, 40.0, 18.0, 1.0], [1288, 18.623187829706257, 129.6522516712035, 4.0, 40.0, 18.0, 0.4505472862340692], [1289, 7.711400972402812, 177.20270732729372, 4.0, 40.0, 18.0, 0.7462122889683686], [1290, 26.761811418085376, 118.05657923867592, 4.0, 40.0, 18.0, 0.29554905956925015], [1291, 7.662669097791084, 56.90296588565029, 4.0, 40.0, 18.0, 1.0], [1292, 0.10870203577803483, 1741.7558595401888, 4.0, 40.0, 18.0, 0.6317376280654958], [1293, 1.9473160729516499, 145.14083380839472, 4.0, 40.0, 18.0, 1.0], [1294, 6.737575277744637, 608.2291946195155, 4.0, 40.0, 18.0, 0.5438704744699925], [1295, 19.682639589441887, 198.74232015310795, 4.0, 40.0, 18.0, 0.36004526049807617], [1296, 2.473123880916147, 1716.4999207626365, 4.0, 40.0, 18.0, 0.5500478185539265], [1297, 5.193408794676076, 1418.6348634687708, 4.0, 40.0, 18.0, 0.4859055759995014], [1298, 7.183575667393996, 417.07295184014845, 4.0, 40.0, 18.0, 0.5883269378654972], [1299, 21.150832560894557, 558.1605411763966, 4.0, 40.0, 18.0, 0.22015338400895257], [1300, 6.807668500434356, 22.774253873927528, 4.0, 40.0, 18.0, 1.0], [1301, 14.825811837772338, 1815.779515961615, 4.0, 40.0, 18.0, 0.24641851346536917], [1302, 19.61257903220836, 174.76549651402712, 4.0, 40.0, 18.0, 0.38045717360338593], [1303, 9.44597038411877, 1460.8613774570842, 4.0, 40.0, 18.0, 0.375590569182475], [1304, 8.363350677255537, 108.24812024854873, 4.0, 40.0, 18.0, 1.0], [1305, 11.627619932330308, 462.2438699082355, 4.0, 40.0, 18.0, 0.4386236246487028], [1306, 4.25375068601658, 44.737240506673565, 4.0, 40.0, 18.0, 1.0], [1307, 15.323723976306244, 193.13690043569127, 4.0, 40.0, 18.0, 0.4672625452083837], [1308, 7.67840565342198, 1221.622652501602, 4.0, 40.0, 18.0, 0.4335147827423309], [1309, 10.161472753743686, 637.9307505211873, 4.0, 40.0, 18.0, 0.4380512233028316], [1310, 5.352103221972685, 769.5004046201453, 4.0, 40.0, 18.0, 0.5562587887761353], [1311, 5.94396778460326, 734.3183615782542, 4.0, 40.0, 18.0, 0.5431304519220624], [1312, 4.329894397767422, 198.44607248145576, 4.0, 40.0, 18.0, 0.8611783341871606], [1313, 7.793884550435181, 195.9457843165826, 4.0, 40.0, 18.0, 0.7184431228756457], [1314, 1.6458396071403951, 1543.8211216833088, 4.0, 40.0, 18.0, 0.5916827782308387], [1315, 5.2397810323720115, 773.2872602366556, 4.0, 40.0, 18.0, 0.5592971078305247], [1316, 5.071334677576081, 1264.1841390372872, 4.0, 40.0, 18.0, 0.5022489125175499], [1317, 4.915897698343463, 190.8311847495432, 4.0, 40.0, 18.0, 0.8452452350984642], [1318, 18.131443878584584, 1355.144172984478, 4.0, 40.0, 18.0, 0.20603050675229675], [1319, 5.045425961883796, 1263.3857271786644, 4.0, 40.0, 18.0, 0.5031427613586562], [1320, 25.69668842680312, 25.471367110249652, 4.0, 40.0, 18.0, 1.0], [1321, 13.64121552667359, 282.1824957504032, 4.0, 40.0, 18.0, 0.4526475419564154], [1322, 7.894733259432112, 1686.619810566747, 4.0, 40.0, 18.0, 0.4006909265458889], [1323, 3.0672278084490934, 65.44236325424095, 4.0, 40.0, 18.0, 1.0], [1324, 8.26341516439206, 1780.6937209454986, 4.0, 40.0, 18.0, 0.3871309731675182], [1325, 12.025004855539096, 1169.499783664861, 4.0, 40.0, 18.0, 0.3348465579061486], [1326, 4.47725367246014, 61.237256127318005, 4.0, 40.0, 18.0, 1.0], [1327, 7.246862918086097, 573.9746077988173, 4.0, 40.0, 18.0, 0.5356513270883977], [1328, 18.144359786541134, 1899.8874592795703, 4.0, 40.0, 18.0, 0.18598501949557647], [1329, 6.013099458010077, 1658.5314414190225, 4.0, 40.0, 18.0, 0.44767210184258555], [1330, 0.8940771986066971, 1582.2689871819275, 4.0, 40.0, 18.0, 0.6156049967055883], [1331, 4.907385422705094, 729.179555570033, 4.0, 40.0, 18.0, 0.5791881555367441], [1332, 16.99250656770565, 174.90756645855836, 4.0, 40.0, 18.0, 0.44054102068717704], [1333, 5.068817012991641, 127.23313653932355, 4.0, 40.0, 18.0, 1.0], [1334, 25.195788921777797, 199.22631283422874, 4.0, 40.0, 18.0, 0.25507330373329373], [1335, 21.544753706441554, 555.1440084697524, 4.0, 40.0, 18.0, 0.21454677378346265], [1336, 9.916165122980521, 1541.0613415556109, 4.0, 40.0, 18.0, 0.35997462896430976], [1337, 6.526742349405956, 1807.7032492032918, 4.0, 40.0, 18.0, 0.42692317849711736], [1338, 8.563157796132174, 1546.4066817804764, 4.0, 40.0, 18.0, 0.392069891200638], [1339, 0.13033089636797346, 192.62951101195787, 4.0, 40.0, 18.0, 1.0], [1340, 6.99974565403118, 700.5548064422012, 4.0, 40.0, 18.0, 0.5157984088004443], [1341, 7.930998747820943, 1924.7987871999992, 4.0, 40.0, 18.0, 0.388928666683442], [1342, 6.50218765952285, 103.3118977811145, 4.0, 40.0, 18.0, 1.0], [1343, 6.719471840101348, 524.3954111611961, 4.0, 40.0, 18.0, 0.566853611351499], [1344, 9.751484178237913, 1675.8590959706237, 4.0, 40.0, 18.0, 0.3572178883988987], [1345, 0.10556604256910118, 1988.3050719028138, 4.0, 40.0, 18.0, 0.6150523684283697], [1346, 7.465932691532797, 183.1350873155065, 4.0, 40.0, 18.0, 0.7480246811260532], [1347, 7.265061941242257, 1125.8668851620005, 4.0, 40.0, 18.0, 0.45221003013275557], [1348, 6.122643015680119, 505.9987529538203, 4.0, 40.0, 18.0, 0.5931976591557482], [1349, 14.052223142753279, 1646.8249220778218, 4.0, 40.0, 18.0, 0.26745803017459796], [1350, 2.285564024387261, 1736.8013731136873, 4.0, 40.0, 18.0, 0.555062623731482], [1351, 6.739586680709102, 1344.7777862749588, 4.0, 40.0, 18.0, 0.4484823896795065], [1352, 7.164191395642688, 1117.1305572109643, 4.0, 40.0, 18.0, 0.4554428400556505], [1353, 14.849823545415909, 60.51695642447782, 4.0, 40.0, 18.0, 0.7833688915537796], [1354, 1.0194288663691398, 643.6275149273292, 4.0, 40.0, 18.0, 0.7469355692128384], [1355, 4.224066021987014, 1850.6837171253924, 4.0, 40.0, 18.0, 0.4866177807894939], [1356, 5.28716431248343, 1198.5718831747179, 4.0, 40.0, 18.0, 0.501774866899628], [1357, 8.340162678424349, 21.32494338213747, 4.0, 40.0, 18.0, 1.0], [1358, 9.719696774086742, 1841.7448205686303, 4.0, 40.0, 18.0, 0.35093579334605834], [1359, 16.814467905302354, 436.53341151347615, 4.0, 40.0, 18.0, 0.3242747967897373], [1360, 7.136016058138387, 1805.174997613296, 4.0, 40.0, 18.0, 0.41233348649200013], [1361, 25.680360170614822, 1916.2654263793163, 4.0, 40.0, 18.0, 0.09868157245353541], [1362, 23.099160932893476, 53.988658358854174, 4.0, 40.0, 18.0, 0.5190200093701248], [1363, 8.287422700055004, 984.1446901483798, 4.0, 40.0, 18.0, 0.43950859004598525], [1364, 12.058783954039447, 133.8844636343489, 4.0, 40.0, 18.0, 0.6463895822376701], [1365, 9.570013135856426, 661.2203894418175, 4.0, 40.0, 18.0, 0.44918698955353176], [1366, 17.539737847664163, 1569.7905336636911, 4.0, 40.0, 18.0, 0.20686584129155425], [1367, 16.375565215042805, 57.32755223864203, 4.0, 40.0, 18.0, 0.7305089329251796], [1368, 20.460560887932456, 855.8125840564995, 4.0, 40.0, 18.0, 0.1991373075193943], [1369, 4.55623263295522, 1282.14661520889, 4.0, 40.0, 18.0, 0.5165644340953469], [1370, 7.150778970106229, 881.760361028266, 4.0, 40.0, 18.0, 0.482389076448841], [1371, 6.420536299167115, 128.6662795596822, 4.0, 40.0, 18.0, 1.0], [1372, 9.09740701735269, 1965.287890119186, 4.0, 40.0, 18.0, 0.36004562132144996], [1373, 10.218863171498334, 505.9085800974442, 4.0, 40.0, 18.0, 0.464425772046037], [1374, 14.65441945974307, 252.30961098180072, 4.0, 40.0, 18.0, 0.44288392155964335], [1375, 16.795703707285128, 226.3918446941288, 4.0, 40.0, 18.0, 0.4068243099239856], [1376, 5.823036947728013, 849.3043870869254, 4.0, 40.0, 18.0, 0.5274785131092828], [1377, 24.154257832958088, 387.69631372576947, 4.0, 40.0, 18.0, 0.2053197251258709], [1378, 9.31706074414023, 123.89462226580088, 4.0, 40.0, 18.0, 0.7748116244169757], [1379, 8.065688030040926, 653.1942686991022, 4.0, 40.0, 18.0, 0.4927263221995867], [1380, 6.668595913838589, 186.8616931993778, 4.0, 40.0, 18.0, 0.7760405056050108], [1381, 3.533939419568749, 1290.0786579273663, 4.0, 40.0, 18.0, 0.5489046860897507], [1382, 1.7876579280540317, 646.4495812386665, 4.0, 40.0, 18.0, 0.7155867567216679], [1383, 14.54602461532443, 104.6725867910299, 4.0, 40.0, 18.0, 0.6183518069423478], [1384, 9.720111922566346, 1204.4440584728006, 4.0, 40.0, 18.0, 0.3858636006926133], [1385, 4.2663777942188705, 1708.2784897037623, 4.0, 40.0, 18.0, 0.493654176496372], [1386, 5.912671009190981, 125.81832702215486, 4.0, 40.0, 18.0, 1.0], [1387, 2.864431626963559, 88.6105652125078, 4.0, 40.0, 18.0, 1.0], [1388, 8.828281335803887, 1660.0172449378802, 4.0, 40.0, 18.0, 0.3796882638638138], [1389, 16.976929032103577, 38.75175542146202, 4.0, 40.0, 18.0, 1.0], [1390, 6.173275727101157, 21.81769529618452, 4.0, 40.0, 18.0, 1.0], [1391, 6.394661678869747, 1060.9792765034206, 4.0, 40.0, 18.0, 0.4829041861212237], [1392, 8.091634846899952, 452.38127858847275, 4.0, 40.0, 18.0, 0.5442641075440612], [1393, 16.905222691580484, 1451.3963081595855, 4.0, 40.0, 18.0, 0.22252343435978125], [1394, 5.147401899172413, 481.46956022354743, 4.0, 40.0, 18.0, 0.6371341839088344], [1395, 6.327297979245694, 127.24141945635041, 4.0, 40.0, 18.0, 1.0], [1396, 7.632596554337456, 921.2564358208746, 4.0, 40.0, 18.0, 0.4637740331925716], [1397, 25.825657942170633, 205.80766295754094, 4.0, 40.0, 18.0, 0.24137489469123433], [1398, 6.0584440628799365, 500.5116098297718, 4.0, 40.0, 18.0, 0.5973426126154691], [1399, 7.9131059479424115, 1597.757707464418, 4.0, 40.0, 18.0, 0.4046379655980164], [1400, 7.286783127722787, 127.77346965847144, 4.0, 40.0, 18.0, 0.8609159256657956], [1401, 3.113734526985728, 642.4370082728271, 4.0, 40.0, 18.0, 0.6647394288477444], [1402, 5.566438287554614, 861.2438352864752, 4.0, 40.0, 18.0, 0.5338140512797686], [1403, 4.090170831269408, 1607.9498981143893, 4.0, 40.0, 18.0, 0.5054999709022908], [1404, 5.519173513705386, 356.23734320744296, 4.0, 40.0, 18.0, 0.6788731723037061], [1405, 2.7230058978339438, 1976.9569970489736, 4.0, 40.0, 18.0, 0.5258695281514262], [1406, 4.539128976956881, 1586.7407604409423, 4.0, 40.0, 18.0, 0.49329369871567663], [1407, 8.763996818005866, 1822.4813705337447, 4.0, 40.0, 18.0, 0.37333197806542145], [1408, 9.172136762076102, 1254.3759358401282, 4.0, 40.0, 18.0, 0.39577254459307415], [1409, 22.55971215400875, 317.4500568026399, 4.0, 40.0, 18.0, 0.24816005365660937], [1410, 0.9795654593731009, 1976.1116351746173, 4.0, 40.0, 18.0, 0.5847560014854973], [1411, 8.508525530073715, 122.17045542643952, 4.0, 40.0, 18.0, 0.8146383914943712], [1412, 7.7627152000885555, 571.6975814005536, 4.0, 40.0, 18.0, 0.5199969511981349], [1413, 4.885724552599617, 747.6209323274771, 4.0, 40.0, 18.0, 0.5763116576610152], [1414, 7.482250265907595, 659.9314748865169, 4.0, 40.0, 18.0, 0.5089215291255028], [1415, 16.63449458091894, 149.8296927390698, 4.0, 40.0, 18.0, 0.4762899092703998], [1416, 8.199745593319292, 255.7337656073644, 4.0, 40.0, 18.0, 0.6435593469955595], [1417, 10.396870821616815, 414.165999681726, 4.0, 40.0, 18.0, 0.4866278132533303], [1418, 3.8197082321208518, 326.2534529390826, 4.0, 40.0, 18.0, 0.7650955605387477], [1419, 5.107839148855279, 206.46602475137428, 4.0, 40.0, 18.0, 0.8168528929261701], [1420, 7.136623751745773, 838.3287723587431, 4.0, 40.0, 18.0, 0.4887489743942604], [1421, 11.018485421148723, 1071.1619237716827, 4.0, 40.0, 18.0, 0.3658433545926178], [1422, 7.411689938374195, 174.4799242029652, 4.0, 40.0, 18.0, 0.7625153719057864], [1423, 5.93757187065406, 179.9555009896015, 4.0, 40.0, 18.0, 0.8166183943957896], [1424, 7.147622801389404, 1309.328395659426, 4.0, 40.0, 18.0, 0.44059117539631326], [1425, 7.748731237222881, 630.7807328388087, 4.0, 40.0, 18.0, 0.5068765977587307], [1426, 4.054742971073745, 1242.8736359334082, 4.0, 40.0, 18.0, 0.5362693768841398], [1427, 16.48206583064995, 756.240680967567, 4.0, 40.0, 18.0, 0.2779619242027664], [1428, 9.319666056394937, 118.7103763672889, 4.0, 40.0, 18.0, 0.7873644770570118], [1429, 9.40316753148614, 1522.841419946728, 4.0, 40.0, 18.0, 0.3729787097983885], [1430, 21.0307354674067, 220.1224659093743, 4.0, 40.0, 18.0, 0.3182457651493458], [1431, 3.5075843326134737, 136.01829096185543, 4.0, 40.0, 18.0, 1.0], [1432, 0.9210005823611191, 184.1876577957321, 4.0, 40.0, 18.0, 1.0], [1433, 9.903996728830823, 1459.2835815847238, 4.0, 40.0, 18.0, 0.36498620823531897], [1434, 19.347748426320095, 57.2984845756346, 4.0, 40.0, 18.0, 0.6172382959089595], [1435, 8.35257194781639, 191.7104433653863, 4.0, 40.0, 18.0, 0.7013766311999595], [1436, 3.6159963555167547, 912.5518876786311, 4.0, 40.0, 18.0, 0.5921725245655639], [1437, 10.585722986769962, 534.282518908241, 4.0, 40.0, 18.0, 0.447736256462454], [1438, 2.360392659617932, 1389.0127337179586, 4.0, 40.0, 18.0, 0.5798697076504878], [1439, 6.423309054741375, 1845.9969610469705, 4.0, 40.0, 18.0, 0.42773102917721917], [1440, 4.9310184094062235, 189.8114709679628, 4.0, 40.0, 18.0, 0.8459776501651886], [1441, 6.756472862851727, 609.2040494682731, 4.0, 40.0, 18.0, 0.5430375240419054], [1442, 22.14181910290872, 1369.791741661247, 4.0, 40.0, 18.0, 0.14771846747461068], [1443, 7.485527111046827, 1682.4025833949743, 4.0, 40.0, 18.0, 0.40995008233885827], [1444, 7.380650671748898, 79.20259967729757, 4.0, 40.0, 18.0, 1.0], [1445, 20.54353796109101, 166.58739838706174, 4.0, 40.0, 18.0, 0.36704344083301926], [1446, 5.673755828504657, 1085.7861657909748, 4.0, 40.0, 18.0, 0.5015361614639686], [1447, 9.984443326715368, 1808.5984835855545, 4.0, 40.0, 18.0, 0.34632257908593983], [1448, 13.219226349241204, 161.59723985510686, 4.0, 40.0, 18.0, 0.5634615415650952], [1449, 7.021111811196257, 111.67981667752437, 4.0, 40.0, 18.0, 1.0], [1450, 8.540184283794405, 1018.9235970172816, 4.0, 40.0, 18.0, 0.42950546783587545], [1451, 5.459702810904283, 1340.8923404361194, 4.0, 40.0, 18.0, 0.484204256924139], [1452, 14.918864534241061, 168.81417198867373, 4.0, 40.0, 18.0, 0.5024032368379349], [1453, 11.725688011767506, 1591.5073324979096, 4.0, 40.0, 18.0, 0.3178516084122688], [1454, 7.13849244803573, 137.65056449485706, 4.0, 40.0, 18.0, 0.8402491581825245], [1455, 5.007171095240031, 80.09991727899396, 4.0, 40.0, 18.0, 1.0], [1456, 5.264111943684236, 448.8296683598004, 4.0, 40.0, 18.0, 0.6452261103901248], [1457, 6.531901181647383, 115.59103480830353, 4.0, 40.0, 18.0, 1.0], [1458, 4.915021301326687, 1903.0619053882313, 4.0, 40.0, 18.0, 0.4642063693207182], [1459, 9.063858586782045, 158.64134672705455, 4.0, 40.0, 18.0, 0.7192582870148209], [1460, 9.000356201275288, 171.4955053878099, 4.0, 40.0, 18.0, 0.7025235473310786], [1461, 3.6207900837374725, 1227.212346627141, 4.0, 40.0, 18.0, 0.5523525338667007], [1462, 4.559176583172796, 962.7969539049454, 4.0, 40.0, 18.0, 0.552272997307918], [1463, 7.097479716721113, 831.189625528717, 4.0, 40.0, 18.0, 0.4909462324354141], [1464, 7.853843590651697, 1345.2278877031283, 4.0, 40.0, 18.0, 0.42007204358827827], [1465, 15.576567455340557, 442.91370847226943, 4.0, 40.0, 18.0, 0.3499360268169618], [1466, 3.8286033331003066, 1104.8815871998224, 4.0, 40.0, 18.0, 0.558697590944733], [1467, 3.1728991282796795, 1190.268697728775, 4.0, 40.0, 18.0, 0.5714213279096731], [1468, 4.7080831155969864, 1961.9041232881436, 4.0, 40.0, 18.0, 0.4670007609372295], [1469, 9.383962574208145, 47.86997075185706, 4.0, 40.0, 18.0, 1.0], [1470, 5.716170715660127, 1605.4449869355064, 4.0, 40.0, 18.0, 0.4583364631760671], [1471, 6.421737775921175, 766.4220305576592, 4.0, 40.0, 18.0, 0.5219588632484173], [1472, 8.320922043463431, 733.2403755610082, 4.0, 40.0, 18.0, 0.471051349047567], [1473, 5.5026756100814165, 1336.4147369295201, 4.0, 40.0, 18.0, 0.4833100515238367], [1474, 2.873128626161143, 879.5932883855462, 4.0, 40.0, 18.0, 0.6244993168477565], [1475, 10.094220072413082, 1078.3724303164206, 4.0, 40.0, 18.0, 0.38700420057796237], [1476, 22.45088621608383, 1659.564225464918, 4.0, 40.0, 18.0, 0.13405650221037882], [1477, 6.4952578245604755, 1965.8026158327255, 4.0, 40.0, 18.0, 0.42051057755491655], [1478, 8.254938813595167, 198.26463031907002, 4.0, 40.0, 18.0, 0.6973661418436117], [1479, 6.6693098475435395, 95.97260340666128, 4.0, 40.0, 18.0, 1.0], [1480, 8.960095033469251, 718.1037809228001, 4.0, 40.0, 18.0, 0.4555794401231723], [1481, 4.185226598798953, 551.3340797213306, 4.0, 40.0, 18.0, 0.649891517448095], [1482, 7.669206913490655, 142.53548443934395, 4.0, 40.0, 18.0, 0.8055923428312685], [1483, 4.380209233970239, 55.60462969995695, 4.0, 40.0, 18.0, 1.0], [1484, 7.980059686942393, 1784.7853298189943, 4.0, 40.0, 18.0, 0.3941287284858618], [1485, 0.8487115857203438, 710.7979778009479, 4.0, 40.0, 18.0, 0.7366317340803488], [1486, 23.830501879935564, 367.90587245598323, 4.0, 40.0, 18.0, 0.2144417984749653], [1487, 8.147244185878588, 1649.1988452971443, 4.0, 40.0, 18.0, 0.3966602250652906], [1488, 9.877801977267824, 1236.932159960166, 4.0, 40.0, 18.0, 0.37992647973807897], [1489, 4.90405386155661, 1364.0687250186543, 4.0, 40.0, 18.0, 0.4988081121931583], [1490, 4.219960423087258, 1433.9182882997545, 4.0, 40.0, 18.0, 0.5140779610688632], [1491, 6.960572216316067, 45.60916411877682, 4.0, 40.0, 18.0, 1.0], [1492, 7.607286231016997, 1756.212973066769, 4.0, 40.0, 18.0, 0.40410711860611964], [1493, 7.972396638388323, 103.45973223503046, 4.0, 40.0, 18.0, 1.0], [1494, 8.588974145422014, 115.21985504896168, 4.0, 40.0, 18.0, 0.8310331820405117], [1495, 6.987713123796789, 1816.97476756514, 4.0, 40.0, 18.0, 0.4153834115122825], [1496, 18.630269174539826, 1842.9053613159588, 4.0, 40.0, 18.0, 0.18060055847971512], [1497, 6.200520295863679, 1917.3950639713453, 4.0, 40.0, 18.0, 0.43008645845583837], [1498, 7.615918266417502, 467.0725890266668, 4.0, 40.0, 18.0, 0.5549543656189154], [1499, 13.307852026928668, 1276.8692703322868, 4.0, 40.0, 18.0, 0.3003517095877196], [1500, 10.064607743630653, 305.38473599614144, 4.0, 40.0, 18.0, 0.5448285916695026], [1501, 4.709094586280131, 1449.619378054903, 4.0, 40.0, 18.0, 0.49799362030237193], [1502, 4.830233562377837, 191.0888654280777, 4.0, 40.0, 18.0, 0.8486572829321771], [1503, 10.397938341200392, 1192.4401813826155, 4.0, 40.0, 18.0, 0.3707346897700712], [1504, 6.744045772849609, 1623.913009423283, 4.0, 40.0, 18.0, 0.43091237613891237], [1505, 8.620423257196446, 1605.5347996379483, 4.0, 40.0, 18.0, 0.38718495978718465], [1506, 2.7539527756474196, 1666.7973145429144, 4.0, 40.0, 18.0, 0.5440997199674483], [1507, 6.986253108067194, 136.48008176877158, 4.0, 40.0, 18.0, 0.8506737431950953], [1508, 13.425107250505567, 53.51126764306746, 4.0, 40.0, 18.0, 1.0], [1509, 5.726309774187078, 1037.17538354635, 4.0, 40.0, 18.0, 0.5054826574138978], [1510, 5.2543291010687, 640.5337955897517, 4.0, 40.0, 18.0, 0.5865578833066498], [1511, 6.964501971213778, 75.4965424659672, 4.0, 40.0, 18.0, 1.0], [1512, 1.2573508892105674, 1567.06544289013, 4.0, 40.0, 18.0, 0.6035673937535503], [1513, 7.615602855085757, 1465.4885661681344, 4.0, 40.0, 18.0, 0.4183867721165828], [1514, 15.734496303273243, 1729.4187601811373, 4.0, 40.0, 18.0, 0.23265730411934044], [1515, 22.111617752957965, 178.00951168217637, 4.0, 40.0, 18.0, 0.3247223965745285], [1516, 5.272018057434318, 902.5477051202643, 4.0, 40.0, 18.0, 0.5371807742235534], [1517, 13.485349491868027, 1653.6600863242463, 4.0, 40.0, 18.0, 0.27859676627349844], [1518, 8.452271206511048, 187.14470296177936, 4.0, 40.0, 18.0, 0.70318017012652], [1519, 9.824187525348615, 1250.867803198726, 4.0, 40.0, 18.0, 0.3802090700265861], [1520, 7.535922362931124, 826.1707695009538, 4.0, 40.0, 18.0, 0.4788668629670954], [1521, 6.557982765004695, 661.1834130318531, 4.0, 40.0, 18.0, 0.537754311538409], [1522, 2.4700428738059377, 117.55284934995986, 4.0, 40.0, 18.0, 1.0], [1523, 8.287639368354647, 63.02474683194713, 4.0, 40.0, 18.0, 1.0], [1524, 25.176251828780927, 1849.6062195943505, 4.0, 40.0, 18.0, 0.10436698962403848], [1525, 6.4387423928073115, 183.86033083199905, 4.0, 40.0, 18.0, 0.7898335893714638], [1526, 7.244811254458893, 164.021295935586, 4.0, 40.0, 18.0, 0.7853869983860317], [1527, 5.97809335147173, 1438.2181314880895, 4.0, 40.0, 18.0, 0.46221550714787474], [1528, 6.883738392003519, 771.3117976600419, 4.0, 40.0, 18.0, 0.5068373102386473], [1529, 22.582957946437688, 992.1618455315597, 4.0, 40.0, 18.0, 0.1599705319409927], [1530, 5.024551319984834, 1829.8700060930769, 4.0, 40.0, 18.0, 0.4649121746169556], [1531, 7.006263563357525, 1019.2135454245756, 4.0, 40.0, 18.0, 0.470015976938189], [1532, 6.852418448016522, 310.20841793395005, 4.0, 40.0, 18.0, 0.6550366633609217], [1533, 16.785762084605487, 1237.0837480758935, 4.0, 40.0, 18.0, 0.23516399147759037], [1534, 7.579072285555343, 835.9238940359734, 4.0, 40.0, 18.0, 0.47635669865830443], [1535, 8.220778097404907, 1814.7100596899054, 4.0, 40.0, 18.0, 0.38654315636533265], [1536, 3.3149561544562367, 294.92083355666637, 4.0, 40.0, 18.0, 0.8081213075033146], [1537, 4.988566134673856, 1729.7295913850178, 4.0, 40.0, 18.0, 0.47149223187710665], [1538, 6.291919924755264, 1013.2081301490877, 4.0, 40.0, 18.0, 0.49111854488694073], [1539, 8.210123389294777, 132.83571857393963, 4.0, 40.0, 18.0, 0.8023259709401551], [1540, 6.0956817478497705, 407.4492472401333, 4.0, 40.0, 18.0, 0.6315458059797406], [1541, 7.517749593949734, 30.114625427491088, 4.0, 40.0, 18.0, 1.0], [1542, 5.709178216642644, 222.05231710538783, 4.0, 40.0, 18.0, 0.7737139533701478], [1543, 5.3265457462019, 619.5962546307791, 4.0, 40.0, 18.0, 0.5891373514914607], [1544, 8.943530596053572, 148.59445754831265, 4.0, 40.0, 18.0, 0.7408676665509704], [1545, 7.96712057254748, 1193.457412722449, 4.0, 40.0, 18.0, 0.42851853175621196], [1546, 15.140816417249377, 121.88343581080954, 4.0, 40.0, 18.0, 0.5623407367180397], [1547, 7.711760890047227, 1914.0161666749566, 4.0, 40.0, 18.0, 0.3948694863190791], [1548, 10.393619046889896, 579.3342631959591, 4.0, 40.0, 18.0, 0.4430654860752856], [1549, 3.2531421951043216, 1613.5838883431766, 4.0, 40.0, 18.0, 0.5315494793734886], [1550, 5.6624236525372496, 1721.256554564797, 4.0, 40.0, 18.0, 0.4532343279865594], [1551, 9.059465828231772, 1845.7991067620585, 4.0, 40.0, 18.0, 0.3657284658490221], [1552, 4.13821052961041, 1044.155612357326, 4.0, 40.0, 18.0, 0.5557068687960173], [1553, 7.557002635510009, 1159.7141364254624, 4.0, 40.0, 18.0, 0.4417935177237833], [1554, 5.422952173682925, 788.888096099482, 4.0, 40.0, 18.0, 0.55040046200748], [1555, 6.341920872683209, 122.49598616193731, 4.0, 40.0, 18.0, 1.0], [1556, 5.942486163575136, 975.413009090001, 4.0, 40.0, 18.0, 0.5062841755766976], [1557, 9.256527097173086, 31.491395161893806, 4.0, 40.0, 18.0, 1.0], [1558, 5.111516108017895, 111.55227211045029, 4.0, 40.0, 18.0, 1.0], [1559, 23.319342545464835, 351.0544985065413, 4.0, 40.0, 18.0, 0.22640154761432907], [1560, 7.67602454730259, 117.96612791305735, 4.0, 40.0, 18.0, 0.8850806152493458], [1561, 2.5803063109338495, 127.32548532685276, 4.0, 40.0, 18.0, 1.0], [1562, 8.483667436909156, 187.94993935838662, 4.0, 40.0, 18.0, 0.7008759650854324], [1563, 8.505485960172487, 560.6523647245923, 4.0, 40.0, 18.0, 0.499923949723935], [1564, 4.927322378695388, 1011.9312471510475, 4.0, 40.0, 18.0, 0.53361455171533], [1565, 5.97610692768192, 90.37361757128325, 4.0, 40.0, 18.0, 1.0], [1566, 17.196907544178057, 173.72181873992542, 4.0, 40.0, 18.0, 0.43659016124799227], [1567, 16.373602472648145, 1484.558772660294, 4.0, 40.0, 18.0, 0.23051325615548954], [1568, 11.25784038332165, 24.365463959628595, 4.0, 40.0, 18.0, 1.0], [1569, 4.539114559930617, 117.82279114081426, 4.0, 40.0, 18.0, 1.0], [1570, 8.137055769416175, 129.55918644115124, 4.0, 40.0, 18.0, 0.8131025884034608], [1571, 7.17321187318156, 1543.3733106520424, 4.0, 40.0, 18.0, 0.4245150767818461], [1572, 4.971997291966351, 1875.4278383080573, 4.0, 40.0, 18.0, 0.4640214638118571], [1573, 5.389884768575948, 678.0938355673694, 4.0, 40.0, 18.0, 0.5731785904098703], [1574, 6.5352777858328155, 46.48729679491143, 4.0, 40.0, 18.0, 1.0], [1575, 7.083226106729316, 208.11762961437992, 4.0, 40.0, 18.0, 0.7328213729322276], [1576, 7.615889523391522, 98.09827236727384, 4.0, 40.0, 18.0, 1.0], [1577, 4.487274195875061, 1933.6112197503096, 4.0, 40.0, 18.0, 0.4747212362454081], [1578, 13.815818479278862, 1644.0797488667242, 4.0, 40.0, 18.0, 0.2724238693840501], [1579, 6.98248575952951, 93.64992281827371, 4.0, 40.0, 18.0, 1.0], [1580, 6.7717044778518085, 1001.9967188413315, 4.0, 40.0, 18.0, 0.4783501206022522], [1581, 1.530721477890057, 955.9681238030797, 4.0, 40.0, 18.0, 0.662356300931181], [1582, 9.21349684244142, 1534.8545760129753, 4.0, 40.0, 18.0, 0.37700484781190413], [1583, 2.4891278043235117, 760.8017795237251, 4.0, 40.0, 18.0, 0.6612081917676421], [1584, 4.851597685329531, 843.4675315618787, 4.0, 40.0, 18.0, 0.5603282283857064], [1585, 6.284590998314919, 1393.1852870210366, 4.0, 40.0, 18.0, 0.45672079426792156], [1586, 8.573339101412763, 1647.1529565229707, 4.0, 40.0, 18.0, 0.3861399976115557], [1587, 12.485121521140268, 1198.0509007228854, 4.0, 40.0, 18.0, 0.3225931511156782], [1588, 7.188460683795303, 893.7962546201172, 4.0, 40.0, 18.0, 0.47960952583786803], [1589, 13.77065377173318, 1093.5291508078062, 4.0, 40.0, 18.0, 0.3026012238493006], [1590, 11.038071220867437, 1200.4785855359808, 4.0, 40.0, 18.0, 0.35523031589033155], [1591, 11.077642545356793, 112.84857477468286, 4.0, 40.0, 18.0, 0.7286889826134711], [1592, 8.047021404532785, 1430.7417168172196, 4.0, 40.0, 18.0, 0.41023890436230936], [1593, 21.198841542350447, 1594.2306949930814, 4.0, 40.0, 18.0, 0.1517343546149145], [1594, 20.737818059619038, 60.35801817259937, 4.0, 40.0, 18.0, 0.5584882001733179], [1595, 17.65938215847581, 1675.7858999043176, 4.0, 40.0, 18.0, 0.2009636266621825], [1596, 5.523877592864851, 52.319801558893396, 4.0, 40.0, 18.0, 1.0], [1597, 16.83547101508357, 366.7845126799053, 4.0, 40.0, 18.0, 0.34365724979908163], [1598, 5.933608808236918, 636.0701978046965, 4.0, 40.0, 18.0, 0.5641536101602254], [1599, 8.847791782780538, 769.141049772764, 4.0, 40.0, 18.0, 0.4510547688311206], [1600, 7.47757269856208, 1141.4605021462014, 4.0, 40.0, 18.0, 0.4452828344393443], [1601, 6.266790430490555, 1636.7865830191788, 4.0, 40.0, 18.0, 0.4423811551257093], [1602, 5.658318074957867, 657.8552474471037, 4.0, 40.0, 18.0, 0.5684322662153971], [1603, 12.366747639486434, 30.243731314940444, 4.0, 40.0, 18.0, 1.0], [1604, 5.6856981753668565, 646.890151560126, 4.0, 40.0, 18.0, 0.5699237423176763], [1605, 4.102901479730693, 1539.3520779954683, 4.0, 40.0, 18.0, 0.5098209125389793], [1606, 9.486710768467919, 115.7111630325097, 4.0, 40.0, 18.0, 0.7878439089593062], [1607, 1.4851286867285634, 1927.9575067779454, 4.0, 40.0, 18.0, 0.5699106159404842], [1608, 7.911733324718017, 1211.5728815837865, 4.0, 40.0, 18.0, 0.42847589659378715], [1609, 7.465279783636339, 158.83762958229187, 4.0, 40.0, 18.0, 0.7845883288860804], [1610, 10.061320917690406, 712.7425145613392, 4.0, 40.0, 18.0, 0.4282152326245007], [1611, 7.911532290734311, 1659.4390673425883, 4.0, 40.0, 18.0, 0.4016308131481007], [1612, 0.7852919721511977, 713.8500278258549, 4.0, 40.0, 18.0, 0.7384753120770747], [1613, 6.798342348286864, 1226.7443513302426, 4.0, 40.0, 18.0, 0.45563227999479977], [1614, 9.490832905765648, 186.48638196422908, 4.0, 40.0, 18.0, 0.6636319667081898], [1615, 0.1686326172120972, 112.96335802917251, 4.0, 40.0, 18.0, 1.0], [1616, 4.3852270924502434, 1413.1300533350884, 4.0, 40.0, 18.0, 0.5106265546088355], [1617, 7.569800399757582, 305.361018535936, 4.0, 40.0, 18.0, 0.6312764488097602], [1618, 8.995053900413623, 753.869112218757, 4.0, 40.0, 18.0, 0.4494382332958127], [1619, 5.6886898713491245, 116.17153114751129, 4.0, 40.0, 18.0, 1.0], [1620, 6.465421504831174, 328.68811271429234, 4.0, 40.0, 18.0, 0.6582951005915435], [1621, 3.033598833121677, 60.74334204492873, 4.0, 40.0, 18.0, 1.0], [1622, 1.2218207419255158, 993.9028509830154, 4.0, 40.0, 18.0, 0.6683785854930143], [1623, 5.647488732281726, 888.4292704592026, 4.0, 40.0, 18.0, 0.5272190185840033], [1624, 4.975118572150626, 1604.9612978741588, 4.0, 40.0, 18.0, 0.47919340951601636], [1625, 8.595509165067217, 616.4978532127075, 4.0, 40.0, 18.0, 0.48472541684070736], [1626, 3.8936450683992607, 952.2082528753598, 4.0, 40.0, 18.0, 0.5764878040995821], [1627, 14.95945571875557, 1261.3537089534711, 4.0, 40.0, 18.0, 0.2679700136085218], [1628, 21.489258931217186, 1390.766249458563, 4.0, 40.0, 18.0, 0.15512650429283778], [1629, 9.058376362837839, 1027.7094762257932, 4.0, 40.0, 18.0, 0.4158554383571835], [1630, 6.745155297252995, 104.48587457215604, 4.0, 40.0, 18.0, 1.0], [1631, 6.297808238511056, 148.72929406847962, 4.0, 40.0, 18.0, 0.8559034195789268], [1632, 7.112354676232983, 101.03525569501069, 4.0, 40.0, 18.0, 1.0], [1633, 6.621417997172424, 934.6571671431897, 4.0, 40.0, 18.0, 0.49087067985461497], [1634, 9.2601626407869, 93.45293263013231, 4.0, 40.0, 18.0, 1.0], [1635, 4.94839046578994, 1433.201264508306, 4.0, 40.0, 18.0, 0.49198221309731804], [1636, 7.3770774159517485, 26.21984462745555, 4.0, 40.0, 18.0, 1.0], [1637, 15.259939818081723, 1838.749858724377, 4.0, 40.0, 18.0, 0.23747799788069238], [1638, 1.6316163727905535, 1997.8689443894837, 4.0, 40.0, 18.0, 0.5609012401618487], [1639, 21.61159647687823, 1270.8469648630462, 4.0, 40.0, 18.0, 0.15848005922279051], [1640, 6.953656638490959, 128.4157837046645, 4.0, 40.0, 18.0, 0.8853194288383118], [1641, 4.770794886720471, 1644.9915509672323, 4.0, 40.0, 18.0, 0.48271060946564276], [1642, 7.040030938905816, 1505.6237457215211, 4.0, 40.0, 18.0, 0.4302584230611116], [1643, 4.198527956123838, 1327.5249866495, 4.0, 40.0, 18.0, 0.5237328477055591], [1644, 3.80275626241089, 1191.2424626352936, 4.0, 40.0, 18.0, 0.549944100648603], [1645, 7.30616210679718, 467.2230107454874, 4.0, 40.0, 18.0, 0.5652872563931881], [1646, 18.62998807007149, 1454.2411967535686, 4.0, 40.0, 18.0, 0.193431661063486], [1647, 3.9524177590718677, 645.3976457633727, 4.0, 40.0, 18.0, 0.6324148618618348], [1648, 9.259919710583842, 1213.9180149508372, 4.0, 40.0, 18.0, 0.39660378977259375], [1649, 8.98356941878831, 286.804045740392, 4.0, 40.0, 18.0, 0.5925364993975425], [1650, 4.0754944198128005, 1376.6624989827112, 4.0, 40.0, 18.0, 0.5234017131724842], [1651, 20.614821226854374, 1345.0195517438349, 4.0, 40.0, 18.0, 0.16836566916039566], [1652, 11.235265293180639, 1569.0148716555143, 4.0, 40.0, 18.0, 0.32935230267809584], [1653, 0.640553290943116, 1040.434160903121, 4.0, 40.0, 18.0, 0.6838550977546844], [1654, 7.116371545043857, 1767.5494074908672, 4.0, 40.0, 18.0, 0.4145784334456293], [1655, 2.3344891794774325, 658.2178271523104, 4.0, 40.0, 18.0, 0.6909339400600828], [1656, 8.61949641144609, 161.59626815604886, 4.0, 40.0, 18.0, 0.7325055206590708], [1657, 9.499586520503968, 188.40984985917711, 4.0, 40.0, 18.0, 0.6610333494786171], [1658, 10.891146391818191, 712.0602442357903, 4.0, 40.0, 18.0, 0.4082875018740277], [1659, 12.473805884382497, 1066.4056951642303, 4.0, 40.0, 18.0, 0.3328106575333431], [1660, 8.993031963239025, 1206.3474725113097, 4.0, 40.0, 18.0, 0.4033603946066045], [1661, 8.923773680054712, 1395.1969587319325, 4.0, 40.0, 18.0, 0.3922806963897056], [1662, 10.882054019046326, 659.2612501586011, 4.0, 40.0, 18.0, 0.4162864557272494], [1663, 7.798092311334155, 69.88553316718503, 4.0, 40.0, 18.0, 1.0], [1664, 5.905958258587269, 427.398237054045, 4.0, 40.0, 18.0, 0.6299106627822287], [1665, 12.91251293504547, 1034.7296958756046, 4.0, 40.0, 18.0, 0.3255449221157862], [1666, 7.675088266463692, 135.75529350839068, 4.0, 40.0, 18.0, 0.8195281933273749], [1667, 10.91162911921838, 1203.496110055343, 4.0, 40.0, 18.0, 0.3578754283845784], [1668, 3.99511014433471, 1075.01932289308, 4.0, 40.0, 18.0, 0.5567028309953953], [1669, 15.170245914615203, 74.52237344890682, 4.0, 40.0, 18.0, 0.688811699115677], [1670, 15.806257485795008, 672.344855083556, 4.0, 40.0, 18.0, 0.3021638140504407], [1671, 9.63814333889086, 888.0692858547285, 4.0, 40.0, 18.0, 0.4159430760380338], [1672, 4.062994863761005, 1400.1018974708004, 4.0, 40.0, 18.0, 0.5218857895230017], [1673, 14.275762764849793, 814.0398107743806, 4.0, 40.0, 18.0, 0.31716073638197323], [1674, 18.0647737180295, 684.0561790767533, 4.0, 40.0, 18.0, 0.2563523141479301], [1675, 6.918418318805938, 1312.8630956655209, 4.0, 40.0, 18.0, 0.44610895921061156], [1676, 17.609560699900108, 1449.6242792217233, 4.0, 40.0, 18.0, 0.21029334878178543], [1677, 6.858103420229911, 1596.7709393642663, 4.0, 40.0, 18.0, 0.4295534628649423], [1678, 6.646193081492147, 1020.1458001567873, 4.0, 40.0, 18.0, 0.47999532222399177], [1679, 14.333299259831545, 402.4857354677446, 4.0, 40.0, 18.0, 0.39018956793815157], [1680, 3.9357227257517198, 1009.6160389492817, 4.0, 40.0, 18.0, 0.5670290580913139], [1681, 6.235188591895676, 622.2576374884144, 4.0, 40.0, 18.0, 0.5571805936549958], [1682, 2.0887038020060364, 40.56246359817597, 4.0, 40.0, 18.0, 1.0], [1683, 2.4664850934228433, 867.407067948808, 4.0, 40.0, 18.0, 0.6416755508211597], [1684, 4.852197523168505, 933.149900718775, 4.0, 40.0, 18.0, 0.5464985834764098], [1685, 0.6069216968144497, 1168.0099409816653, 4.0, 40.0, 18.0, 0.6680179537471249], [1686, 3.6613774145206364, 1249.6165792574493, 4.0, 40.0, 18.0, 0.5485991145035783], [1687, 5.398352808918952, 669.7048519657511, 4.0, 40.0, 18.0, 0.5747736781707152], [1688, 12.961855027046893, 1162.0163371867548, 4.0, 40.0, 18.0, 0.31488500613896575], [1689, 0.3031297699446073, 129.185641663362, 4.0, 40.0, 18.0, 1.0], [1690, 15.762586835791058, 82.92259714203308, 4.0, 40.0, 18.0, 0.6365326100045656], [1691, 15.479861791265462, 78.79329070177506, 4.0, 40.0, 18.0, 0.6609007784565603], [1692, 22.56249862676315, 1738.761884201823, 4.0, 40.0, 18.0, 0.13078090436600118], [1693, 19.112957069206786, 600.0047537288826, 4.0, 40.0, 18.0, 0.2484033785515952], [1694, 15.888384178132887, 1872.2849137221901, 4.0, 40.0, 18.0, 0.2251701387032182], [1695, 21.73302740338143, 43.675754892077514, 4.0, 40.0, 18.0, 0.6210665384782286], [1696, 22.35524370032606, 52.08387746133504, 4.0, 40.0, 18.0, 0.5493658238475402], [1697, 7.60610363625139, 118.98771347772472, 4.0, 40.0, 18.0, 0.883471450279507], [1698, 16.497331904597434, 337.0259151015339, 4.0, 40.0, 18.0, 0.361238271097317], [1699, 16.72254242516536, 1333.3757859904756, 4.0, 40.0, 18.0, 0.23128208052229698], [1700, 21.2556025054371, 1694.8674591788474, 4.0, 40.0, 18.0, 0.14781331380522963], [1701, 13.101953900006746, 396.0693228205217, 4.0, 40.0, 18.0, 0.42054517982609846], [1702, 19.55083194228293, 1034.545484455351, 4.0, 40.0, 18.0, 0.20039833453379519], [1703, 20.78584571964179, 1867.5443465788612, 4.0, 40.0, 18.0, 0.1502409427264411], [1704, 9.621310409558534, 567.6272557324186, 4.0, 40.0, 18.0, 0.46617566920175546], [1705, 24.714157198792456, 1915.3556212048488, 4.0, 40.0, 18.0, 0.10472151022251487], [1706, 19.775474825923244, 1817.283487334307, 4.0, 40.0, 18.0, 0.16425677711083322], [1707, 18.077549508166634, 1722.4817249234773, 4.0, 40.0, 18.0, 0.1923978628357989], [1708, 23.17916550338367, 1002.4909877164397, 4.0, 40.0, 18.0, 0.15203873706577736], [1709, 1.3308891148640618, 1825.640079787098, 4.0, 40.0, 18.0, 0.5818667882580332], [1710, 23.18181601780554, 980.3473162986612, 4.0, 40.0, 18.0, 0.15323319278524383], [1711, 4.234333825354664, 823.401527753161, 4.0, 40.0, 18.0, 0.5850696173441955], [1712, 22.941907300107466, 326.0656104286293, 4.0, 40.0, 18.0, 0.23917209296486333], [1713, 7.974280813246165, 1475.619985061003, 4.0, 40.0, 18.0, 0.40943513809536514], [1714, 19.118336420445516, 1375.3049222502161, 4.0, 40.0, 18.0, 0.18932035460981275], [1715, 0.6758817654935079, 167.96636726114173, 4.0, 40.0, 18.0, 1.0], [1716, 13.340897887753394, 683.7157036004362, 4.0, 40.0, 18.0, 0.35476705474396264], [1717, 22.880690187965598, 1790.5640023756032, 4.0, 40.0, 18.0, 0.12590012170617182], [1718, 0.880537397394135, 142.92209867527026, 4.0, 40.0, 18.0, 1.0], [1719, 24.09024460322958, 188.47513080588132, 4.0, 40.0, 18.0, 0.28029616423419723], [1720, 18.648659406238238, 462.34861353463054, 4.0, 40.0, 18.0, 0.28114815564051354], [1721, 9.342950193665873, 1957.851072467311, 4.0, 40.0, 18.0, 0.35508695158398346], [1722, 2.33113846495932, 629.6882187125599, 4.0, 40.0, 18.0, 0.6986872263946223], [1723, 13.744366509705605, 1758.367200113514, 4.0, 40.0, 18.0, 0.26915331924848346], [1724, 9.068127384927246, 1360.0922019048633, 4.0, 40.0, 18.0, 0.39098618425733545], [1725, 10.965096996073745, 772.9182498864528, 4.0, 40.0, 18.0, 0.3987558809815726], [1726, 23.295943137459993, 1312.6174588234753, 4.0, 40.0, 18.0, 0.13657673179334268], [1727, 9.032234003208295, 732.9254307648537, 4.0, 40.0, 18.0, 0.4515769737871771], [1728, 18.267895693882554, 182.55128559575311, 4.0, 40.0, 18.0, 0.4042246847408414], [1729, 2.1202048731287753, 1753.952164170847, 4.0, 40.0, 18.0, 0.5594489317446404], [1730, 5.980206779099389, 327.0958138073652, 4.0, 40.0, 18.0, 0.6779155225371589], [1731, 8.422051753762771, 1032.4055742871856, 4.0, 40.0, 18.0, 0.4311760849371425], [1732, 15.212073329895636, 1962.9687756971878, 4.0, 40.0, 18.0, 0.23464613548352306], [1733, 13.55927264769112, 1034.825914833511, 4.0, 40.0, 18.0, 0.3114553442035314], [1734, 1.813961930744712, 849.0248971772902, 4.0, 40.0, 18.0, 0.6698452595489227], [1735, 5.814377314443184, 180.4212502991614, 4.0, 40.0, 18.0, 0.8211067953789679], [1736, 18.097117132613477, 127.89044561583256, 4.0, 40.0, 18.0, 0.4664430326821901], [1737, 2.8433978869424643, 49.71097111868656, 4.0, 40.0, 18.0, 1.0], [1738, 13.870135390374008, 82.88675289973095, 4.0, 40.0, 18.0, 0.7071546521401205], [1739, 4.2975571797138565, 1072.6389692047155, 4.0, 40.0, 18.0, 0.54670214551155], [1740, 23.4465644967952, 1207.2268328110101, 4.0, 40.0, 18.0, 0.1389989814445305], [1741, 21.558336504250782, 944.3977781049064, 4.0, 40.0, 18.0, 0.1764806189591074], [1742, 22.642546810517896, 79.32561996954387, 4.0, 40.0, 18.0, 0.44486310261551226], [1743, 23.257205536426017, 782.4847906808816, 4.0, 40.0, 18.0, 0.16586493682430567], [1744, 20.3297106094133, 1532.6350395948778, 4.0, 40.0, 18.0, 0.16535566984090594], [1745, 7.601612577132405, 438.85613842569336, 4.0, 40.0, 18.0, 0.565432669707558], [1746, 20.533567876256793, 123.74129377390427, 4.0, 40.0, 18.0, 0.41302150698065226], [1747, 20.11184514098866, 64.43666065779996, 4.0, 40.0, 18.0, 0.5605086964690513], [1748, 20.96467599726041, 1310.5094970747757, 4.0, 40.0, 18.0, 0.1652938351338709], [1749, 10.694394663692886, 1071.3183206045346, 4.0, 40.0, 18.0, 0.3732300901264402], [1750, 11.009038331513354, 645.1806737580594, 4.0, 40.0, 18.0, 0.4155147861842374], [1751, 23.965946633057523, 186.31153145871704, 4.0, 40.0, 18.0, 0.28401520670589264], [1752, 16.305472713001144, 230.5820099724613, 4.0, 40.0, 18.0, 0.4152747794097998], [1753, 2.0164802133756767, 414.9259012008752, 4.0, 40.0, 18.0, 0.7893316759748789], [1754, 12.577597764246393, 522.904072949603, 4.0, 40.0, 18.0, 0.4016105096535484], [1755, 14.537924432368559, 1621.429842934899, 4.0, 40.0, 18.0, 0.259014058998408], [1756, 5.610243603993884, 1653.5787863792725, 4.0, 40.0, 18.0, 0.45836421643322045], [1757, 24.29808632457799, 164.02014029181785, 4.0, 40.0, 18.0, 0.2943475712690083], [1758, 2.9619784325358918, 180.52944331525876, 4.0, 40.0, 18.0, 1.0], [1759, 16.177258552308913, 730.2512584094902, 4.0, 40.0, 18.0, 0.28710448451694126], [1760, 5.092308259179642, 120.43679712598822, 4.0, 40.0, 18.0, 1.0], [1761, 9.341953599135755, 1427.1125828128668, 4.0, 40.0, 18.0, 0.38019307505029964], [1762, 0.0808929910235262, 130.78839415986135, 4.0, 40.0, 18.0, 1.0], [1763, 10.506599678634865, 55.77020106854454, 4.0, 40.0, 18.0, 1.0], [1764, 6.155716401936137, 1044.1699211597609, 4.0, 40.0, 18.0, 0.49167379269704986]]
| 48,708.5 | 97,416 | 0.731217 |
4a1ce3d450302d04c9c8e3716f1587528aeb712e
| 9,711 |
py
|
Python
|
tests/records/test_api.py
|
audrium/invenio-drafts-resources
|
982daaf92abe3538e8fe025916a6a6f93987a09c
|
[
"MIT"
] | null | null | null |
tests/records/test_api.py
|
audrium/invenio-drafts-resources
|
982daaf92abe3538e8fe025916a6a6f93987a09c
|
[
"MIT"
] | null | null | null |
tests/records/test_api.py
|
audrium/invenio-drafts-resources
|
982daaf92abe3538e8fe025916a6a6f93987a09c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Drafts-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Data access layer tests."""
import pytest
from invenio_search import current_search_client
from jsonschema import ValidationError
from mock_module.api import Draft, ParentRecord, Record
from mock_module.models import DraftMetadata, ParentRecordMetadata, \
ParentState, RecordMetadata
from sqlalchemy import inspect
from sqlalchemy.orm.exc import NoResultFound
#
# Create
#
def test_draft_create_empty(app, db):
"""Test draft creation."""
# Empty draft creation works, and injects a schema.
draft = Draft.create({})
db.session.commit()
assert draft.schema
# JSONSchema validation works.
pytest.raises(
ValidationError,
Draft.create,
{'metadata': {'title': 1}}
)
def test_draft_create_parent(app, db):
"""Test draft creation of the parent record."""
draft = Draft.create({})
db.session.commit()
assert draft.schema.endswith('record-v1.0.0.json')
assert draft.pid
assert draft.parent.schema.endswith('parent-v1.0.0.json')
assert draft.parent.pid
assert draft.model.parent_id == draft.model.parent.id
assert draft.pid.object_uuid != draft.parent.pid.object_uuid
def test_draft_create_parent_state(app, db):
"""Test draft creation of the parent record."""
draft = Draft.create({})
db.session.commit()
# Assert that associated objects were created
assert ParentState.query.count() == 1
assert DraftMetadata.query.count() == 1
assert ParentRecordMetadata.query.count() == 1
assert RecordMetadata.query.count() == 0
def assert_state(d):
# An initial draft is not published, so latest_id/index is None
assert d.model.index == 1
assert d.versions.index == 1
assert d.versions.latest_id is None
assert d.versions.latest_index is None
assert d.versions.next_draft_id == d.id
assert_state(draft)
assert_state(Draft.get_record(draft.id))
def test_record_create_parent_state(app, db):
"""Test draft creation of the parent record."""
draft = Draft.create({})
draft.commit()
db.session.commit()
record = Record.publish(draft)
record.commit()
db.session.commit()
def assert_state(r):
# An initial draft is not published, so latest_id/index is None
assert r.versions.latest_id == r.id
assert r.versions.latest_index == 1
assert r.versions.next_draft_id is None
assert r.versions.index == 1
assert r.versions.is_latest is True
assert r.versions.is_latest_draft is True
assert r.model.index == 1
assert r.model.parent_id == draft.model.parent_id
assert_state(record)
assert_state(Record.get_record(record.id))
def test_draft_create_new_version(app, db):
"""Test draft creation of the parent record."""
# A published record.
record = Record.publish(Draft.create({}))
db.session.commit()
# Create a draft for a new version (happens in service.new_version())
draft = Draft.new_version(record)
draft.commit()
db.session.commit()
record = Record.get_record(record.id)
draft = Draft.get_record(draft.id)
assert record.id != draft.id # different uuids
assert record.parent.id == draft.parent.id # same parent
assert draft.versions.is_latest_draft is True
assert draft.versions.is_latest is False
assert record.versions.is_latest_draft is False
assert record.versions.is_latest is True
def test_draft_parent_state_hard_delete(app, db):
"""Test force deletion of a draft."""
# Initial state: Only draft exists (i.e. no other record versions)
draft = Draft.create({})
db.session.commit()
# happens on:
# - service.delete_draft for an *unpublished* record
draft.delete(force=True)
db.session.commit()
# Make sure no parent and no parent state is left-behind
assert ParentState.query.count() == 0
assert ParentRecordMetadata.query.count() == 0
assert DraftMetadata.query.count() == 0
assert RecordMetadata.query.count() == 0
def test_draft_parent_state_hard_delete_with_parent(app, db):
"""Test force deletion of a draft."""
# Initial state: A previous reccord version exists, in addition to draft
draft = Draft.create({})
record = Record.create({}, parent=draft.parent)
db.session.commit()
# happens on:
# - service.delete_draft for an *unpublished* record
draft.delete(force=True)
db.session.commit()
# Make sure parent/parent state is still there
assert ParentState.query.count() == 1
assert ParentRecordMetadata.query.count() == 1
assert RecordMetadata.query.count() == 1
assert DraftMetadata.query.count() == 0
record = Record.get_record(record.id)
assert record.versions.next_draft_id is None
assert record.versions.latest_id == record.id
def test_draft_parent_state_soft_delete(app, db):
"""Test soft deletion of a draft."""
# Simulate a record being edited.
draft = Draft.create({})
record = Record.create({}, parent=draft.parent)
db.session.commit()
# happens on:
# - service.publish()
# - service.delete_draft() for a *published* record
draft.delete(force=False)
db.session.commit()
assert ParentState.query.count() == 1
assert ParentRecordMetadata.query.count() == 1
assert RecordMetadata.query.count() == 1
record = Record.get_record(record.id)
assert record.versions.next_draft_id is None
assert record.versions.latest_id == record.id
#
# Create/Update from draft
#
def test_create_record_from_draft(app, db, example_draft):
"""Test create a record from a draft.
This is used e.g. when publishing a new draft as a record.
"""
record = Record.publish(example_draft)
db.session.commit()
assert example_draft.pid == record.pid
assert example_draft.parent == record.parent
#
# Get
#
def test_draft_get_record(app, db, example_draft):
"""Test draft retrival."""
draft = Draft.get_record(example_draft.id)
# Test that the parent record is properly fetched.
assert draft.parent == example_draft.parent
#
# Delete
#
def test_draft_force_delete(app, db, example_draft):
"""Test draft hard deletion."""
parent_id = example_draft.parent.id
example_draft.delete(force=True)
db.session.commit()
# Both parent and draft is deleted
pytest.raises(NoResultFound, ParentRecord.get_record, parent_id)
pytest.raises(NoResultFound, Draft.get_record, example_draft.id)
def test_draft_soft_delete(app, db, example_draft):
"""Test draft soft deletion."""
parent_id = example_draft.parent.id
example_draft.delete(force=False)
db.session.commit()
# Parent not deleted, but draft is soft deleted.
assert ParentRecord.get_record(parent_id)
pytest.raises(NoResultFound, Draft.get_record, example_draft.id)
draft = Draft.get_record(example_draft.id, with_deleted=True)
assert draft.parent.id == parent_id
def test_draft_undelete(app, db, example_draft):
"""Test undeleting a draft."""
example_draft.delete()
db.session.commit()
draft = Draft.get_record(example_draft.id, with_deleted=True)
assert draft.is_deleted
draft.undelete()
assert draft.parent.id == example_draft.parent.id
#
# Dumps/loads
#
def test_draft_dump_load_idempotence(app, db, example_draft):
"""Test idempotence of dumps/loads."""
loaded_draft = Draft.loads(example_draft.dumps())
assert example_draft == loaded_draft
# Parent was dumped and loaded
assert example_draft.parent == loaded_draft.parent
assert example_draft.versions.is_latest_draft \
== loaded_draft.versions.is_latest_draft
# Test that SQLAlchemy model was loaded from the JSON and not DB.
assert not inspect(loaded_draft.parent.model).persistent
assert not inspect(loaded_draft.versions._state).persistent
#
# Indexing
#
def test_draft_indexing(app, db, es, example_draft, indexer):
"""Test indexing of a draft."""
# Index document in ES
assert indexer.index(example_draft)['result'] == 'created'
# Retrieve document from ES
data = current_search_client.get(
'draftsresources-drafts-draft-v1.0.0',
id=example_draft.id,
doc_type='_doc'
)
# Loads the ES data and compare
draft = Draft.loads(data['_source'])
assert draft == example_draft
assert draft.id == example_draft.id
assert draft.revision_id == example_draft.revision_id
assert draft.created == example_draft.created
assert draft.updated == example_draft.updated
assert draft.expires_at == example_draft.expires_at
assert draft.parent == example_draft.parent
assert draft.versions.is_latest_draft == \
example_draft.versions.is_latest_draft
assert draft.versions.index == \
example_draft.versions.index
# Check system fields
assert draft.metadata == example_draft['metadata']
def test_draft_delete_reindex(app, db, es, example_draft, indexer):
"""Test reindexing of a soft-deleted draft."""
draft = example_draft
# Index draft
assert indexer.index(draft)['result'] == 'created'
# Delete record.
draft.delete()
db.session.commit()
assert indexer.delete(draft)['result'] == 'deleted'
# Update draft and reindex (this will cause troubles unless proper
# optimistic concurrency control is used).
draft.undelete()
draft.commit()
db.session.commit()
assert indexer.index(draft)['result'] == 'created'
| 31.735294 | 76 | 0.702708 |
4a1ce4377c8c080fff7cbb7f0c6167311b82a9c5
| 11,731 |
py
|
Python
|
samples/jobs.py
|
awslabs/aws-iot-device-sdk-python-v2
|
b2f035c0dc17b0a719695f22137688ceae742436
|
[
"Apache-2.0"
] | 4 |
2019-05-08T08:27:49.000Z
|
2019-10-17T00:26:58.000Z
|
samples/jobs.py
|
awslabs/aws-iot-device-sdk-python-v2
|
b2f035c0dc17b0a719695f22137688ceae742436
|
[
"Apache-2.0"
] | 1 |
2019-10-16T17:08:58.000Z
|
2019-10-23T22:13:56.000Z
|
samples/jobs.py
|
awslabs/aws-iot-device-sdk-python-v2
|
b2f035c0dc17b0a719695f22137688ceae742436
|
[
"Apache-2.0"
] | 2 |
2019-05-27T18:39:42.000Z
|
2019-09-06T09:06:56.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import auth, http, io, mqtt
from awsiot import iotjobs
from awsiot import mqtt_connection_builder
from concurrent.futures import Future
import sys
import threading
import time
import traceback
from uuid import uuid4
# - Overview -
# This sample uses the AWS IoT Jobs Service to receive and execute operations
# on the device. Imagine periodic software updates that must be sent to and
# executed on devices in the wild.
#
# - Instructions -
# This sample requires you to create jobs for your device to execute. See:
# https://docs.aws.amazon.com/iot/latest/developerguide/create-manage-jobs.html
#
# - Detail -
# On startup, the sample tries to start the next pending job execution.
# If such a job exists, the sample emulates "doing work" by spawning a thread
# that sleeps for several seconds before marking the job as SUCCEEDED. When no
# pending job executions exist, the sample sits in an idle state.
#
# The sample also subscribes to receive "Next Job Execution Changed" events.
# If the sample is idle, this event wakes it to start the job. If the sample is
# already working on a job, it remembers to try for another when it's done.
# This event is sent by the service when the current job completes, so the
# sample will be continually prompted to try another job until none remain.
# Using globals to simplify sample code
is_sample_done = threading.Event()
# Parse arguments
import command_line_utils;
cmdUtils = command_line_utils.CommandLineUtils("Jobs - Recieve and execute operations on the device.")
cmdUtils.add_common_mqtt_commands()
cmdUtils.add_common_proxy_commands()
cmdUtils.add_common_logging_commands()
cmdUtils.register_command("key", "<path>", "Path to your key in PEM format.", True, str)
cmdUtils.register_command("cert", "<path>", "Path to your client certificate in PEM format.", True, str)
cmdUtils.register_command("client_id", "<str>", "Client ID to use for MQTT connection (optional, default='test-*').", default="test-" + str(uuid4()))
cmdUtils.register_command("port", "<int>", "Connection port. AWS IoT supports 433 and 8883 (optional, default=auto).", type=int)
cmdUtils.register_command("thing_name", "<str>", "The name assigned to your IoT Thing", required=True)
cmdUtils.register_command("job_time", "<int>", "Emulate working on a job by sleeping this many seconds (optional, default='5')", default=5, type=int)
# Needs to be called so the command utils parse the commands
cmdUtils.get_args()
mqtt_connection = None
jobs_client = None
jobs_thing_name = cmdUtils.get_command_required("thing_name")
class LockedData:
def __init__(self):
self.lock = threading.Lock()
self.disconnect_called = False
self.is_working_on_job = False
self.is_next_job_waiting = False
locked_data = LockedData()
# Function for gracefully quitting this sample
def exit(msg_or_exception):
if isinstance(msg_or_exception, Exception):
print("Exiting Sample due to exception.")
traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])
else:
print("Exiting Sample:", msg_or_exception)
with locked_data.lock:
if not locked_data.disconnect_called:
print("Disconnecting...")
locked_data.disconnect_called = True
future = mqtt_connection.disconnect()
future.add_done_callback(on_disconnected)
def try_start_next_job():
print("Trying to start the next job...")
with locked_data.lock:
if locked_data.is_working_on_job:
print("Nevermind, already working on a job.")
return
if locked_data.disconnect_called:
print("Nevermind, sample is disconnecting.")
return
locked_data.is_working_on_job = True
locked_data.is_next_job_waiting = False
print("Publishing request to start next job...")
request = iotjobs.StartNextPendingJobExecutionRequest(thing_name=jobs_thing_name)
publish_future = jobs_client.publish_start_next_pending_job_execution(request, mqtt.QoS.AT_LEAST_ONCE)
publish_future.add_done_callback(on_publish_start_next_pending_job_execution)
def done_working_on_job():
with locked_data.lock:
locked_data.is_working_on_job = False
try_again = locked_data.is_next_job_waiting
if try_again:
try_start_next_job()
def on_disconnected(disconnect_future):
# type: (Future) -> None
print("Disconnected.")
# Signal that sample is finished
is_sample_done.set()
def on_next_job_execution_changed(event):
# type: (iotjobs.NextJobExecutionChangedEvent) -> None
try:
execution = event.execution
if execution:
print("Received Next Job Execution Changed event. job_id:{} job_document:{}".format(
execution.job_id, execution.job_document))
# Start job now, or remember to start it when current job is done
start_job_now = False
with locked_data.lock:
if locked_data.is_working_on_job:
locked_data.is_next_job_waiting = True
else:
start_job_now = True
if start_job_now:
try_start_next_job()
else:
print("Received Next Job Execution Changed event: None. Waiting for further jobs...")
except Exception as e:
exit(e)
def on_publish_start_next_pending_job_execution(future):
# type: (Future) -> None
try:
future.result() # raises exception if publish failed
print("Published request to start the next job.")
except Exception as e:
exit(e)
def on_start_next_pending_job_execution_accepted(response):
# type: (iotjobs.StartNextJobExecutionResponse) -> None
try:
if response.execution:
execution = response.execution
print("Request to start next job was accepted. job_id:{} job_document:{}".format(
execution.job_id, execution.job_document))
# To emulate working on a job, spawn a thread that sleeps for a few seconds
job_thread = threading.Thread(
target=lambda: job_thread_fn(execution.job_id, execution.job_document),
name='job_thread')
job_thread.start()
else:
print("Request to start next job was accepted, but there are no jobs to be done. Waiting for further jobs...")
done_working_on_job()
except Exception as e:
exit(e)
def on_start_next_pending_job_execution_rejected(rejected):
# type: (iotjobs.RejectedError) -> None
exit("Request to start next pending job rejected with code:'{}' message:'{}'".format(
rejected.code, rejected.message))
def job_thread_fn(job_id, job_document):
try:
print("Starting local work on job...")
time.sleep(cmdUtils.get_command("job_time"))
print("Done working on job.")
print("Publishing request to update job status to SUCCEEDED...")
request = iotjobs.UpdateJobExecutionRequest(
thing_name=jobs_thing_name,
job_id=job_id,
status=iotjobs.JobStatus.SUCCEEDED)
publish_future = jobs_client.publish_update_job_execution(request, mqtt.QoS.AT_LEAST_ONCE)
publish_future.add_done_callback(on_publish_update_job_execution)
except Exception as e:
exit(e)
def on_publish_update_job_execution(future):
# type: (Future) -> None
try:
future.result() # raises exception if publish failed
print("Published request to update job.")
except Exception as e:
exit(e)
def on_update_job_execution_accepted(response):
# type: (iotjobs.UpdateJobExecutionResponse) -> None
try:
print("Request to update job was accepted.")
done_working_on_job()
except Exception as e:
exit(e)
def on_update_job_execution_rejected(rejected):
# type: (iotjobs.RejectedError) -> None
exit("Request to update job status was rejected. code:'{}' message:'{}'.".format(
rejected.code, rejected.message))
if __name__ == '__main__':
mqtt_connection = cmdUtils.build_mqtt_connection(None, None)
print("Connecting to {} with client ID '{}'...".format(
cmdUtils.get_command(cmdUtils.m_cmd_endpoint), cmdUtils.get_command("client_id")))
connected_future = mqtt_connection.connect()
jobs_client = iotjobs.IotJobsClient(mqtt_connection)
# Wait for connection to be fully established.
# Note that it's not necessary to wait, commands issued to the
# mqtt_connection before its fully connected will simply be queued.
# But this sample waits here so it's obvious when a connection
# fails or succeeds.
connected_future.result()
print("Connected!")
try:
# Subscribe to necessary topics.
# Note that is **is** important to wait for "accepted/rejected" subscriptions
# to succeed before publishing the corresponding "request".
print("Subscribing to Next Changed events...")
changed_subscription_request = iotjobs.NextJobExecutionChangedSubscriptionRequest(
thing_name=jobs_thing_name)
subscribed_future, _ = jobs_client.subscribe_to_next_job_execution_changed_events(
request=changed_subscription_request,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_next_job_execution_changed)
# Wait for subscription to succeed
subscribed_future.result()
print("Subscribing to Start responses...")
start_subscription_request = iotjobs.StartNextPendingJobExecutionSubscriptionRequest(
thing_name=jobs_thing_name)
subscribed_accepted_future, _ = jobs_client.subscribe_to_start_next_pending_job_execution_accepted(
request=start_subscription_request,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_start_next_pending_job_execution_accepted)
subscribed_rejected_future, _ = jobs_client.subscribe_to_start_next_pending_job_execution_rejected(
request=start_subscription_request,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_start_next_pending_job_execution_rejected)
# Wait for subscriptions to succeed
subscribed_accepted_future.result()
subscribed_rejected_future.result()
print("Subscribing to Update responses...")
# Note that we subscribe to "+", the MQTT wildcard, to receive
# responses about any job-ID.
update_subscription_request = iotjobs.UpdateJobExecutionSubscriptionRequest(
thing_name=jobs_thing_name,
job_id='+')
subscribed_accepted_future, _ = jobs_client.subscribe_to_update_job_execution_accepted(
request=update_subscription_request,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_job_execution_accepted)
subscribed_rejected_future, _ = jobs_client.subscribe_to_update_job_execution_rejected(
request=update_subscription_request,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_job_execution_rejected)
# Wait for subscriptions to succeed
subscribed_accepted_future.result()
subscribed_rejected_future.result()
# Make initial attempt to start next job. The service should reply with
# an "accepted" response, even if no jobs are pending. The response
# will contain data about the next job, if there is one.
try_start_next_job()
except Exception as e:
exit(e)
# Wait for the sample to finish
is_sample_done.wait()
| 39.631757 | 149 | 0.704714 |
4a1ce47dfd54754ce5dcfca198cdb554f5698e15
| 281 |
py
|
Python
|
numberofwaystomakechanges/program.py
|
anzharip/algorithm-and-datastructure
|
b83273330066ace02ecccfe834acdd998d21b5a4
|
[
"MIT"
] | null | null | null |
numberofwaystomakechanges/program.py
|
anzharip/algorithm-and-datastructure
|
b83273330066ace02ecccfe834acdd998d21b5a4
|
[
"MIT"
] | null | null | null |
numberofwaystomakechanges/program.py
|
anzharip/algorithm-and-datastructure
|
b83273330066ace02ecccfe834acdd998d21b5a4
|
[
"MIT"
] | null | null | null |
def numberOfWaysToMakeChange(n, denoms):
# Write your code here.
ways = [0] * (n + 1)
ways[0] = 1
for denom in denoms:
for amount in range(1, n + 1):
if amount >= denom:
ways[amount] += ways[amount - denom]
return ways[n]
| 21.615385 | 52 | 0.52669 |
4a1ce49f5b92dab44bdccc26f59ee12a7395293f
| 1,621 |
py
|
Python
|
examples/entrypoint.py
|
mediapills/console
|
a4ec98a0205e199649297eedd445186faad1ee27
|
[
"MIT"
] | null | null | null |
examples/entrypoint.py
|
mediapills/console
|
a4ec98a0205e199649297eedd445186faad1ee27
|
[
"MIT"
] | null | null | null |
examples/entrypoint.py
|
mediapills/console
|
a4ec98a0205e199649297eedd445186faad1ee27
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021-2021 MediaPills Console Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mediapills.console import Application
from mediapills.console.outputs import ConsoleOutput
from typing import Dict, Any
if __name__ == "__main__":
app = Application(stdout=ConsoleOutput(), stderr=ConsoleOutput())
@app.entrypoint # type: ignore
def print_me( # dead: disable
stdout: ConsoleOutput, **kwargs: Dict[Any, Any] # dead: disable
) -> None:
"""CLI command print message in STDOUT."""
stdout.write("Application message goes here ...")
app.run()
| 43.810811 | 72 | 0.74892 |
4a1ce606899318115dedcb61b7435a2448b27848
| 1,424 |
py
|
Python
|
var/spack/repos/builtin/packages/libcuml/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 |
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/libcuml/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 |
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/libcuml/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 |
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libcuml(CMakePackage):
"""cuML is a suite of libraries that implement machine
learning algorithms and mathematical primitives functions
that share compatible APIs with other RAPIDS projects."""
homepage = "https://rapids.ai"
url = "https://github.com/rapidsai/cuml/archive/v0.15.0.tar.gz"
version('0.15.0', sha256='5c9c656ae4eaa94a426e07d7385fd5ea0e5dc7abff806af2941aee10d4ca99c7')
depends_on('cmake@3.14:', type='build')
depends_on('zlib')
depends_on('libcudf@0.8:')
depends_on('cuda@9.2:')
depends_on('blas')
depends_on('nccl@2.4:')
depends_on('treelite')
depends_on('googletest')
depends_on('libcumlprims')
depends_on('mpi')
depends_on('ucx')
root_cmakelists_dir = 'cpp'
def cmake_args(self):
args = []
args.append("-DNCCL_PATH={0}".format(self.spec['nccl'].prefix))
args.append("-DBUILD_CUML_C_LIBRARY=ON")
args.append("-DWITH_UCX=ON")
args.append("-DNVTX=OFF")
args.append("-DBUILD_STATIC_FAISS=ON")
args.append("-DSINGLEGPU=OFF")
args.append("-DENABLE_CUMLPRIMS_MG=ON")
args.append("-DBUILD_CUML_MPI_COMMS=ON")
return args
| 30.956522 | 97 | 0.676264 |
4a1ce78d8c117a46f42bc0347dbea562ce82fb33
| 28,179 |
py
|
Python
|
tests/test_verbs.py
|
slint/invenio-oaiserver
|
fd715a5a6af117564f152dbc09938b788ed8bc8d
|
[
"MIT"
] | null | null | null |
tests/test_verbs.py
|
slint/invenio-oaiserver
|
fd715a5a6af117564f152dbc09938b788ed8bc8d
|
[
"MIT"
] | null | null | null |
tests/test_verbs.py
|
slint/invenio-oaiserver
|
fd715a5a6af117564f152dbc09938b788ed8bc8d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test OAI verbs."""
from __future__ import absolute_import
import uuid
from copy import deepcopy
from datetime import datetime, timedelta
from helpers import create_record, run_after_insert_oai_set
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.minters import recid_minter
from invenio_records.api import Record
from invenio_search import current_search
from lxml import etree
from invenio_oaiserver import current_oaiserver
from invenio_oaiserver.minters import oaiid_minter
from invenio_oaiserver.models import OAISet
from invenio_oaiserver.response import NS_DC, NS_OAIDC, NS_OAIPMH
from invenio_oaiserver.utils import datetime_to_datestamp, \
eprints_description, friends_description, oai_identifier_description
NAMESPACES = {'x': NS_OAIPMH, 'y': NS_OAIDC, 'z': NS_DC}
def _xpath_errors(body):
"""Find errors in body."""
return list(body.iter('{*}error'))
def test_no_verb(app):
"""Test response when no verb is specified."""
with app.test_client() as c:
result = c.get('/oai2d')
tree = etree.fromstring(result.data)
assert 'Missing data for required field.' in _xpath_errors(
tree)[0].text
def test_wrong_verb(app):
"""Test wrong verb."""
with app.test_client() as c:
result = c.get('/oai2d?verb=Aaa')
tree = etree.fromstring(result.data)
assert 'This is not a valid OAI-PMH verb:Aaa' in _xpath_errors(
tree)[0].text
def test_identify(app):
"""Test Identify verb."""
# baseUrls for friends element
baseUrls = ['http://example.org/1',
'http://example.org/2']
# parameters for eprints element
content = {'URL': 'http://arXiv.org/arXiv_content.htm'}
metadataPolicy = {'text': 'Metadata can be used by commercial'
'and non-commercial service providers',
'URL': 'http://arXiv.org/arXiv_metadata_use.htm'}
dataPolicy = {'text': 'Full content, i.e. preprints may'
'not be harvested by robots'}
submissionPolicy = {'URL': 'http://arXiv.org/arXiv_submission.htm'}
# parameters for oai-identifier element
scheme = 'oai'
repositoryIdentifier = 'oai-stuff.foo.org'
delimiter = ':'
sampleIdentifier = 'oai:oai-stuff.foo.org:5324'
app.config['OAISERVER_DESCRIPTIONS'] = \
[friends_description(baseUrls),
eprints_description(metadataPolicy, dataPolicy,
submissionPolicy, content),
oai_identifier_description(scheme, repositoryIdentifier, delimiter,
sampleIdentifier)]
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:Identify',
namespaces=NAMESPACES)) == 1
repository_name = tree.xpath('/x:OAI-PMH/x:Identify/x:repositoryName',
namespaces=NAMESPACES)
assert len(repository_name) == 1
assert repository_name[0].text == 'Invenio-OAIServer'
base_url = tree.xpath('/x:OAI-PMH/x:Identify/x:baseURL',
namespaces=NAMESPACES)
assert len(base_url) == 1
assert base_url[0].text == 'http://app/oai2d'
protocolVersion = tree.xpath(
'/x:OAI-PMH/x:Identify/x:protocolVersion',
namespaces=NAMESPACES)
assert len(protocolVersion) == 1
assert protocolVersion[0].text == '2.0'
adminEmail = tree.xpath('/x:OAI-PMH/x:Identify/x:adminEmail',
namespaces=NAMESPACES)
assert len(adminEmail) == 1
assert adminEmail[0].text == 'info@inveniosoftware.org'
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert len(earliestDatestamp) == 1
deletedRecord = tree.xpath('/x:OAI-PMH/x:Identify/x:deletedRecord',
namespaces=NAMESPACES)
assert len(deletedRecord) == 1
assert deletedRecord[0].text == 'no'
granularity = tree.xpath('/x:OAI-PMH/x:Identify/x:granularity',
namespaces=NAMESPACES)
assert len(granularity) == 1
description = tree.xpath('/x:OAI-PMH/x:Identify/x:description',
namespaces=NAMESPACES)
friends_element = description[0]
for element in friends_element.getchildren():
for child in element.getchildren():
assert child.tag == \
'{http://www.openarchives.org/OAI/2.0/friends/}baseURL'
assert child.text in baseUrls
eprints_root = description[1]
children = eprints_root[0].getchildren()
assert children[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}content'
leaves = children[0].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}URL'
assert leaves[0].text == content['URL']
assert children[1].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}metadataPolicy'
leaves = children[1].getchildren()
assert len(leaves) == 2
metadataPolicyContents = \
['{http://www.openarchives.org/OAI/2.0/eprints}text',
'{http://www.openarchives.org/OAI/2.0/eprints}URL']
assert set([leaves[0].tag, leaves[1].tag]) == \
set(metadataPolicyContents)
assert set([leaves[0].text, leaves[1].text]) == \
set(metadataPolicy.values())
assert children[2].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}dataPolicy'
leaves = children[2].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}text'
assert leaves[0].text == dataPolicy['text']
assert children[3].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}submissionPolicy'
leaves = children[3].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}URL'
assert leaves[0].text == submissionPolicy['URL']
oai_identifier_root = description[2]
children = oai_identifier_root[0].getchildren()
assert children[0].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}scheme'
assert children[0].text == scheme
assert children[1].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'repositoryIdentifier'
assert children[1].text == repositoryIdentifier
assert children[2].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'delimiter'
assert children[2].text == delimiter
assert children[3].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'sampleIdentifier'
assert children[3].text == sampleIdentifier
def test_identify_earliest_date(app, schema):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert earliestDatestamp[0].text == '0001-01-01T00:00:00Z'
first_record = create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test0'},
'_oai_id': 1, '$schema': schema
})
first_record.model.created = datetime(2000, 1, 1, 13, 0, 0)
RecordIndexer().index(first_record)
create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test1'},
'_oai_id': 2, '$schema': schema
})
create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test2'},
'_oai_id': 3, '$schema': schema
})
app.extensions['invenio-search'].flush_and_refresh('records')
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert earliestDatestamp[0].text == '2000-01-01T13:00:00Z'
def test_getrecord(app):
"""Test get record verb."""
with app.test_request_context():
pid_value = 'oai:legacy:1'
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {
'_oai': {'id': pid_value},
'title_statement': {'title': 'Test0'},
}
pid = oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
assert pid_value == pid.pid_value
record_updated = record.updated
with app.test_client() as c:
result = c.get(
'/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc'
.format(pid_value))
assert 200 == result.status_code
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:record/x:header',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:identifier',
namespaces=NAMESPACES)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:identifier/text()',
namespaces=NAMESPACES)
assert identifier == [pid_value]
datestamp = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:datestamp/text()',
namespaces=NAMESPACES)
assert datestamp == [datetime_to_datestamp(record_updated)]
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:record/x:metadata',
namespaces=NAMESPACES)) == 1
def test_getrecord_fail(app):
"""Test GetRecord if record doesn't exist."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(
'/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc'
.format('not-exist-pid'))
assert 422 == result.status_code
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _check_xml_error(tree, code):
"""Text xml for a error idDoesNotExist."""
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
error = tree.xpath('/x:OAI-PMH/x:error', namespaces=NAMESPACES)
assert len(error) == 1
assert error[0].attrib['code'] == code
def test_identify_with_additional_args(app):
"""Test identify with additional arguments."""
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify¬AValidArg=True')
tree = etree.fromstring(result.data)
assert 'You have passed too many arguments.' == _xpath_errors(
tree)[0].text
def test_listmetadataformats(app):
"""Test ListMetadataFormats."""
_listmetadataformats(app=app, query='/oai2d?verb=ListMetadataFormats')
def test_listmetadataformats_record(app):
"""Test ListMetadataFormats for a record."""
with app.test_request_context():
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test0'}}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
Record.create(data, id_=record_id)
pid_value = pid.pid_value
db.session.commit()
_listmetadataformats(
app=app,
query='/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
pid_value))
def test_listmetadataformats_record_fail(app):
"""Test ListMetadataFormats for a record that doesn't exist."""
query = '/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
'pid-not-exixts')
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _listmetadataformats(app, query):
"""Try ListMetadataFormats."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListMetadataFormats',
namespaces=NAMESPACES)) == 1
metadataFormats = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat',
namespaces=NAMESPACES)
cfg_metadataFormats = deepcopy(
app.config.get('OAISERVER_METADATA_FORMATS', {}))
assert len(metadataFormats) == len(cfg_metadataFormats)
prefixes = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataPrefix', namespaces=NAMESPACES)
assert len(prefixes) == len(cfg_metadataFormats)
assert all(pfx.text in cfg_metadataFormats for pfx in prefixes)
schemas = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:schema', namespaces=NAMESPACES)
assert len(schemas) == len(cfg_metadataFormats)
assert all(sch.text in cfg_metadataFormats[pfx.text]['schema']
for sch, pfx in zip(schemas, prefixes))
metadataNamespaces = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataNamespace', namespaces=NAMESPACES)
assert len(metadataNamespaces) == len(cfg_metadataFormats)
assert all(nsp.text in cfg_metadataFormats[pfx.text]['namespace']
for nsp, pfx in zip(metadataNamespaces, prefixes))
def test_listsets(app):
"""Test ListSets."""
with app.test_request_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
a = OAISet(spec='test', name='Test', description='test desc')
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setSpec',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription',
namespaces=NAMESPACES
)) == 1
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc',
namespaces=NAMESPACES)
) == 1
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description', namespaces=NAMESPACES)
) == 1
text = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description/text()', namespaces=NAMESPACES)
assert len(text) == 1
assert text[0] == 'test desc'
def test_listsets_invalid_name(app):
"""Test ListSets with invalid unicode character for XML."""
with app.test_request_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
a = OAISet(spec='test', name=u'uni\x01co\x0bde',
description='test desc')
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=NAMESPACES)[0].text == 'unicode'
def test_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords',
'/oai2d?verb=GetRecord&identifier=123',
'/oai2d?verb=ListIdentifiers'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_fail_not_exist_metadataPrefix(app):
"""Test ListRecords fail not exist metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords&metadataPrefix=not-exist',
'/oai2d?verb=GetRecord&identifier=123&metadataPrefix=not-exist',
'/oai2d?verb=ListIdentifiers&metadataPrefix=not-exist'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
query = '/oai2d?verb=ListRecords&'
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords(app):
"""Test ListRecords."""
total = 12
record_ids = []
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
for idx in range(total):
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test{0}'.format(idx)}}
recid_minter(record_id, data)
oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
record_ids.append(record_id)
db.session.commit()
for record_id in record_ids:
indexer.index_by_id(record_id)
current_search.flush_and_refresh('_all')
with app.test_client() as c:
result = c.get('/oai2d?verb=ListRecords&metadataPrefix=oai_dc')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListRecords&resumptionToken={0}'.format(
resumption_token.text
)
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 2
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert not resumption_token.text
# Check from:until range
with app.test_client() as c:
# Check date and datetime timestamps.
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListRecords&metadataPrefix=oai_dc'
'&from={0}&until={1}'.format(
datetime_to_datestamp(
record.updated - timedelta(days=1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(days=1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
def test_listidentifiers(app):
"""Test verb ListIdentifiers."""
from invenio_oaiserver.models import OAISet
with app.app_context():
current_oaiserver.unregister_signals_oaiset()
# create new OAI Set
with db.session.begin_nested():
oaiset = OAISet(
spec='test0',
name='Test0',
description='test desc 0',
search_pattern='title_statement.title:Test0',
)
db.session.add(oaiset)
db.session.commit()
run_after_insert_oai_set()
with app.test_request_context():
indexer = RecordIndexer()
# create a new record (inside the OAI Set)
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test0'}}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
current_search.flush_and_refresh('_all')
pid_value = pid.pid_value
# get the list of identifiers
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers/x:header',
namespaces=NAMESPACES)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
assert identifier[0].text == str(pid_value)
datestamp = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:datestamp',
namespaces=NAMESPACES
)
assert len(datestamp) == 1
assert datestamp[0].text == datetime_to_datestamp(record.updated)
# Check from:until range
with app.test_client() as c:
# Check date and datetime timestamps.
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
'&from={0}&until={1}&set=test0'.format(
datetime_to_datestamp(
record.updated - timedelta(1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
def test_list_sets_long(app):
"""Test listing of sets."""
from invenio_db import db
from invenio_oaiserver.models import OAISet
with app.app_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
for i in range(27):
oaiset = OAISet(
spec='test{0}'.format(i),
name='Test{0}'.format(i),
description='test desc {0}'.format(i),
search_pattern='title_statement.title:Test{0}'.format(i),
)
db.session.add(oaiset)
db.session.commit()
run_after_insert_oai_set()
with app.test_client() as c:
# First page:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Second page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Third page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 7
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert not resumption_token.text
def test_list_sets_with_resumption_token_and_other_args(app):
"""Test list sets with resumption tokens."""
pass
| 38.131258 | 79 | 0.586323 |
4a1ce7922b310c763c033cd1147d5c54e16f519f
| 4,374 |
py
|
Python
|
contrib/seeds/generate-seeds.py
|
vladlenomg/potatocoin
|
9f0f224de97828ab831e1a3992142a3c40939c3b
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
vladlenomg/potatocoin
|
9f0f224de97828ab831e1a3992142a3c40939c3b
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
vladlenomg/potatocoin
|
9f0f224de97828ab831e1a3992142a3c40939c3b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef POTATO_CHAINPARAMSSEEDS_H\n')
g.write('#define POTATO_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the potato network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // POTATO_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.467626 | 98 | 0.582076 |
4a1ce84eba1f13833edd71f268f7d72f662ca500
| 10,968 |
py
|
Python
|
HSTB/kluster/gui/dialog_daskclient.py
|
giumas/kluster
|
40abd266551a56b693132a7cb12471601f5a02b4
|
[
"CC0-1.0"
] | 18 |
2020-11-01T19:59:33.000Z
|
2022-03-31T22:46:48.000Z
|
HSTB/kluster/gui/dialog_daskclient.py
|
giumas/kluster
|
40abd266551a56b693132a7cb12471601f5a02b4
|
[
"CC0-1.0"
] | 46 |
2020-10-23T13:55:24.000Z
|
2022-03-31T15:58:26.000Z
|
HSTB/kluster/gui/dialog_daskclient.py
|
giumas/kluster
|
40abd266551a56b693132a7cb12471601f5a02b4
|
[
"CC0-1.0"
] | 9 |
2021-03-18T22:28:26.000Z
|
2022-02-23T21:48:09.000Z
|
from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal
from dask.distributed import get_client
from HSTB.kluster.gui.common_widgets import SaveStateDialog
from HSTB.kluster.dask_helpers import dask_find_or_start_client
from HSTB.kluster import kluster_variables
class DaskClientStart(SaveStateDialog):
"""
Widget that allows you to manually start the dask client if you need to run it in a specific way. If you don't
use this, we just autostart a default LocalCluster.
"""
def __init__(self, parent=None, title='', settings=None):
super().__init__(parent, settings, widgetname='daskclient')
self.setWindowTitle('Setup Dask Client')
self.client_vbox = QtWidgets.QVBoxLayout()
self.local_box = QtWidgets.QGroupBox('Local Cluster')
self.local_box.setToolTip('Uses your computer resources in starting the Local Cluster, this is what you want when running on a computer normally.')
self.local_box.setCheckable(True)
self.local_box.setChecked(True)
self.local_layout = QtWidgets.QVBoxLayout()
self.number_workers_layout = QtWidgets.QHBoxLayout()
self.number_workers_checkbox = QtWidgets.QCheckBox('Override Number of Workers')
self.number_workers_checkbox.setToolTip('Use this checkbox if you want to set a specific number of workers, default is the number of cores on your machine')
self.number_workers_checkbox.setChecked(False)
self.number_workers_layout.addWidget(self.number_workers_checkbox)
self.number_workers = QtWidgets.QLineEdit('')
self.number_workers.setMaximumWidth(40)
self.number_workers_layout.addWidget(self.number_workers)
self.local_layout.addLayout(self.number_workers_layout)
self.number_threads_layout = QtWidgets.QHBoxLayout()
self.number_threads_checkbox = QtWidgets.QCheckBox('Override Threads per Worker')
self.number_threads_checkbox.setToolTip('Use this checkbox if you want to set a specific number of threads per worker, default is based on the number of cores on your machine')
self.number_threads_checkbox.setChecked(False)
self.number_threads_layout.addWidget(self.number_threads_checkbox)
self.number_threads = QtWidgets.QLineEdit('')
self.number_threads.setMaximumWidth(40)
self.number_threads_layout.addWidget(self.number_threads)
self.local_layout.addLayout(self.number_threads_layout)
self.number_memory_layout = QtWidgets.QHBoxLayout()
self.number_memory_checkbox = QtWidgets.QCheckBox('Override Memory (GB) per Worker')
self.number_memory_checkbox.setToolTip('Use this amount of memory for each worker, default is the max memory available on your system')
self.number_memory_checkbox.setChecked(False)
self.number_memory_layout.addWidget(self.number_memory_checkbox)
self.number_memory = QtWidgets.QLineEdit('')
self.number_memory.setMaximumWidth(40)
self.number_memory_layout.addWidget(self.number_memory)
self.local_layout.addLayout(self.number_memory_layout)
self.local_box.setLayout(self.local_layout)
self.client_vbox.addWidget(self.local_box)
self.remote_box = QtWidgets.QGroupBox('Remote Client')
self.remote_box.setToolTip('Use this when you have set up a Dask Cluster on a remote server, the address given here is the address of that server.')
self.remote_box.setCheckable(True)
self.remote_box.setChecked(False)
self.remote_layout = QtWidgets.QVBoxLayout()
self.remote_ip_layout = QtWidgets.QHBoxLayout()
self.remote_ip_radio = QtWidgets.QRadioButton('By IP ')
self.remote_ip_radio.setChecked(True)
self.remote_ip_layout.addWidget(self.remote_ip_radio)
self.remote_ip_address_label = QtWidgets.QLabel('Address')
self.remote_ip_layout.addWidget(self.remote_ip_address_label)
self.remote_ip_address = QtWidgets.QLineEdit('')
self.remote_ip_address.setInputMask('000.000.000.000;_')
self.remote_ip_address.setMaximumWidth(93)
self.remote_ip_layout.addWidget(self.remote_ip_address)
self.remote_ip_layout.addStretch(1)
self.remote_ip_port_label = QtWidgets.QLabel('Port')
self.remote_ip_layout.addWidget(self.remote_ip_port_label)
self.remote_ip_port = QtWidgets.QLineEdit('')
self.remote_ip_port.setInputMask('00000;_')
self.remote_ip_port.setMaximumWidth(40)
self.remote_ip_layout.addWidget(self.remote_ip_port)
self.remote_layout.addLayout(self.remote_ip_layout)
self.remote_fqdn_layout = QtWidgets.QHBoxLayout()
self.remote_fqdn_radio = QtWidgets.QRadioButton('By FQDN')
self.remote_fqdn_layout.addWidget(self.remote_fqdn_radio)
self.remote_fqdn_address_label = QtWidgets.QLabel('Address')
self.remote_fqdn_layout.addWidget(self.remote_fqdn_address_label)
self.remote_fqdn_address = QtWidgets.QLineEdit('')
self.remote_fqdn_address.setMaximumWidth(140)
self.remote_fqdn_layout.addWidget(self.remote_fqdn_address)
self.remote_fqdn_layout.addStretch(1)
self.remote_fqdn_port_label = QtWidgets.QLabel('Port')
self.remote_fqdn_layout.addWidget(self.remote_fqdn_port_label)
self.remote_fqdn_port = QtWidgets.QLineEdit('')
self.remote_fqdn_port.setInputMask('00000;_')
self.remote_fqdn_port.setMaximumWidth(40)
self.remote_fqdn_layout.addWidget(self.remote_fqdn_port)
self.remote_layout.addLayout(self.remote_fqdn_layout)
self.remote_box.setLayout(self.remote_layout)
self.client_vbox.addWidget(self.remote_box)
self.status_msg = QtWidgets.QLabel('')
self.status_msg.setStyleSheet("QLabel { " + kluster_variables.error_color + "; }")
self.client_vbox.addWidget(self.status_msg)
self.button_layout = QtWidgets.QHBoxLayout()
self.button_layout.addStretch(1)
self.ok_button = QtWidgets.QPushButton('OK', self)
self.button_layout.addWidget(self.ok_button)
self.button_layout.addStretch(1)
self.cancel_button = QtWidgets.QPushButton('Cancel', self)
self.button_layout.addWidget(self.cancel_button)
self.button_layout.addStretch(1)
self.client_vbox.addLayout(self.button_layout)
self.cl = None
self.setLayout(self.client_vbox)
self.remote_box.clicked.connect(self.uncheck_local_box)
self.local_box.clicked.connect(self.uncheck_remote_box)
self.ok_button.clicked.connect(self.setup_client)
self.cancel_button.clicked.connect(self.cancel_client)
self.text_controls = [['number_workers', self.number_workers], ['number_memory', self.number_memory],
['number_threads', self.number_threads], ['remote_ip_address', self.remote_ip_address],
['remote_ip_port', self.remote_ip_port], ['remote_fqdn_address', self.remote_fqdn_address],
['remote_fqdn_port', self.remote_fqdn_port]]
self.checkbox_controls = [['local_box', self.local_box], ['remote_box', self.remote_box],
['number_workers_checkbox', self.number_workers_checkbox],
['number_threads_checkbox', self.number_threads_checkbox],
['number_memory_checkbox', self.number_memory_checkbox]]
self.read_settings()
def uncheck_local_box(self):
self.local_box.setChecked(False)
def uncheck_remote_box(self):
self.remote_box.setChecked(False)
def setup_client(self):
"""
Start a new dask client with the options you have here. Save to the cl attribute so the main window can pull
it out after user hits OK.
"""
if self.local_box.isChecked() or self.remote_box.isChecked():
self.accept()
self.save_settings()
if self.local_box.isChecked():
try: # have to close the existing local cluster/client first if you have one running before you can recreate
client = get_client()
client.close()
except:
pass
numworker = None
threadsworker = None
memoryworker = None
multiprocessing = True
if self.number_workers_checkbox.isChecked():
try:
numworker = int(self.number_workers.text())
if numworker < 1:
numworker = 1
if numworker == 1:
multiprocessing = False
except:
print('Invalid number of workers provided, number must be an integer, ex: 4')
return
if self.number_threads_checkbox.isChecked():
try:
threadsworker = int(self.number_threads.text())
if threadsworker < 1:
threadsworker = 1
except:
print('Invalid number of threads provided, number must be an integer, ex: 2')
return
if self.number_memory_checkbox.isChecked():
try:
memoryworker = str(self.number_memory.text()) + 'GB'
except:
print('Invalid memory per worker provided, number must be an integer, ex: 5')
return
self.cl = dask_find_or_start_client(number_of_workers=numworker, threads_per_worker=threadsworker,
memory_per_worker=memoryworker, multiprocessing=multiprocessing)
else:
if self.remote_ip_radio.isChecked():
full_address = self.remote_ip_address.text() + ':' + self.remote_ip_port.text()
else:
full_address = self.remote_fqdn_address.text() + ':' + self.remote_fqdn_port.text()
print('Starting client at address {}'.format(full_address))
try:
self.cl = dask_find_or_start_client(address=full_address)
except: # throws dask socket.gaierror, i'm not bothering to make this explicit
print('Unable to connect to remote Dask instance')
else:
self.status_msg.setText('Please select one of the options above (Local or Remote)')
def cancel_client(self):
self.accept()
if __name__ == '__main__':
try: # pyside2
app = QtWidgets.QApplication()
except TypeError: # pyqt5
app = QtWidgets.QApplication([])
dlog = DaskClientStart()
dlog.show()
if dlog.exec_():
pass
| 51.013953 | 184 | 0.661105 |
4a1ce87ff4ad97dcb2305b7ae819c4e9fc7ad3a4
| 409 |
py
|
Python
|
backend/lyftmylife_32006/wsgi.py
|
crowdbotics-apps/lyftmylife-32006
|
f560c3b68bb66b1b164ac7928f9597f0ee90aa9f
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/lyftmylife_32006/wsgi.py
|
crowdbotics-apps/lyftmylife-32006
|
f560c3b68bb66b1b164ac7928f9597f0ee90aa9f
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/lyftmylife_32006/wsgi.py
|
crowdbotics-apps/lyftmylife-32006
|
f560c3b68bb66b1b164ac7928f9597f0ee90aa9f
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for lyftmylife_32006 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lyftmylife_32006.settings')
application = get_wsgi_application()
| 24.058824 | 78 | 0.794621 |
4a1ce9129e02117856d7ceca5231083ca9d571b9
| 1,378 |
py
|
Python
|
target_athena/formats.py
|
beubeu13220/target-athena
|
ebe012bb6694f3685e4efa616a0acbd75c982fc5
|
[
"Apache-2.0"
] | 4 |
2021-09-08T17:41:57.000Z
|
2021-12-22T03:30:06.000Z
|
target_athena/formats.py
|
beubeu13220/target-athena
|
ebe012bb6694f3685e4efa616a0acbd75c982fc5
|
[
"Apache-2.0"
] | 19 |
2021-05-28T21:48:41.000Z
|
2021-08-23T04:17:01.000Z
|
target_athena/formats.py
|
beubeu13220/target-athena
|
ebe012bb6694f3685e4efa616a0acbd75c982fc5
|
[
"Apache-2.0"
] | 7 |
2021-12-02T19:27:57.000Z
|
2022-03-09T08:23:12.000Z
|
"""Methods for writinig different object formats."""
import os
import csv
import json
def write_csv(filename, record, header=None, delimiter= ",", quotechar='"'):
file_is_empty = (not os.path.isfile(filename)) or os.stat(
filename
).st_size == 0
if not header and not file_is_empty:
with open(filename, "r") as csv_file:
reader = csv.reader(
csv_file, delimiter=delimiter, quotechar=quotechar
)
first_line = next(reader)
header = (
first_line if first_line else record.keys()
)
else:
header = record.keys()
# Athena does not support newline characters in CSV format.
# Remove `\n` and replace with escaped text `\\n` ('\n')
for k, v in record.items():
if isinstance(v, str) and "\n" in v:
record[k] = v.replace("\n", "\\n")
with open(filename, "a") as csv_file:
writer = csv.DictWriter(
csv_file,
header,
extrasaction="ignore",
delimiter=delimiter,
quotechar=quotechar,
)
if file_is_empty:
writer.writeheader()
writer.writerow(record)
def write_jsonl(filename, record):
with open(filename, 'a', encoding='utf-8') as json_file:
json_file.write(json.dumps(record, default=str) + '\n')
| 29.956522 | 76 | 0.575472 |
4a1ce92ddef15d3ed22a7325d2db115835928f4e
| 9,530 |
py
|
Python
|
recipes/recipe_modules/bot_update/examples/full.py
|
mycode9998/chromium-tools-depot_tools
|
490961030b55067d50c87e2125c631f5078332a0
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipe_modules/bot_update/examples/full.py
|
mycode9998/chromium-tools-depot_tools
|
490961030b55067d50c87e2125c631f5078332a0
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipe_modules/bot_update/examples/full.py
|
mycode9998/chromium-tools-depot_tools
|
490961030b55067d50c87e2125c631f5078332a0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'bot_update',
'gclient',
'gerrit',
'tryserver',
'recipe_engine/buildbucket',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/runtime',
]
from recipe_engine import types
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
def RunSteps(api):
api.gclient.use_mirror = True
commit = api.buildbucket.build.input.gitiles_commit
src_cfg = api.gclient.make_config(CACHE_DIR=api.path['cache'].join('git'))
soln = src_cfg.solutions.add()
soln.name = 'src'
soln.url = 'https://chromium.googlesource.com/chromium/src.git'
soln.revision = commit.id or commit.ref or None
api.gclient.c = src_cfg
api.gclient.c.revisions.update(api.properties.get('revisions', {}))
if api.properties.get('deprecated_got_revision_mapping'):
api.gclient.c.got_revision_mapping['src'] = 'got_cr_revision'
else:
api.gclient.c.got_revision_reverse_mapping['got_cr_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_v8_revision'] = 'src/v8'
api.gclient.c.got_revision_reverse_mapping['got_angle_revision'] = (
'src/third_party/angle')
api.gclient.c.repo_path_map.update({
'https://chromium.googlesource.com/angle/angle': (
'src/third_party/angle', 'HEAD'),
'https://chromium.googlesource.com/v8/v8': ('src/v8', 'HEAD'),
'https://webrtc.googlesource.com/src': ('src/third_party/webrtc', 'HEAD'),
})
patch = api.properties.get('patch', True)
clobber = True if api.properties.get('clobber') else False
with_branch_heads = api.properties.get('with_branch_heads', False)
with_tags = api.properties.get('with_tags', False)
refs = api.properties.get('refs', [])
root_solution_revision = api.properties.get('root_solution_revision')
suffix = api.properties.get('suffix')
gerrit_no_reset = True if api.properties.get('gerrit_no_reset') else False
gerrit_no_rebase_patch_ref = bool(
api.properties.get('gerrit_no_rebase_patch_ref'))
patch_refs = api.properties.get('patch_refs')
add_blamelists = api.properties.get('add_blamelists', False)
set_output_commit = api.properties.get('set_output_commit', True)
step_test_data = None
bot_update_output = types.thaw(api.properties.get('bot_update_output'))
if bot_update_output:
step_test_data = lambda: api.json.test_api.output(bot_update_output)
bot_update_step = api.bot_update.ensure_checkout(
patch=patch,
with_branch_heads=with_branch_heads,
with_tags=with_tags,
refs=refs,
clobber=clobber,
root_solution_revision=root_solution_revision,
suffix=suffix,
gerrit_no_reset=gerrit_no_reset,
gerrit_no_rebase_patch_ref=gerrit_no_rebase_patch_ref,
disable_syntax_validation=True,
patch_refs=patch_refs,
add_blamelists=add_blamelists,
set_output_commit=set_output_commit,
step_test_data=step_test_data,
)
if patch:
api.bot_update.deapply_patch(bot_update_step)
if api.properties.get('resolve_chromium_fixed_version'):
api.bot_update.resolve_fixed_revision(bot_update_step.json.output, 'src')
def GenTests(api):
def try_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return api.buildbucket.try_build('chromium/src', 'try', 'linux', **kwargs)
def ci_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return (
api.buildbucket.ci_build('chromium/src', 'ci', 'linux', **kwargs) +
api.properties(patch=False)
)
yield (
api.test('basic') +
ci_build()
)
yield (
api.test('input_commit_with_id_without_repo') +
api.buildbucket.build(Build(
input={
'gitiles_commit': {
'id': 'a' * 40,
},
},
))
)
yield (
api.test('unrecognized_commit_repo') +
ci_build(git_repo='https://unrecognized/repo')
)
yield (
api.test('basic_luci') +
ci_build() +
api.runtime(is_experimental=False, is_luci=True)
)
yield (
api.test('resolve_chromium_fixed_version') +
ci_build() +
api.properties(resolve_chromium_fixed_version=True)
)
yield (
api.test('basic_with_branch_heads') +
ci_build() +
api.properties(
with_branch_heads=True,
suffix='with branch heads'
)
)
yield (
api.test('with_tags') +
api.properties(with_tags=True)
)
yield (
api.test('deprecated_got_revision_mapping') +
try_build() +
api.properties(
deprecated_got_revision_mapping=True,
set_output_commit=False,
)
)
yield (
api.test('refs') +
api.properties(refs=['+refs/change/1/2/333'])
)
yield (
api.test('tryjob_fail') +
try_build() +
api.step_data('bot_update', api.json.invalid(None), retcode=1)
)
yield (
api.test('tryjob_fail_patch') +
try_build() +
api.properties(fail_patch='apply') +
api.step_data('bot_update', retcode=88)
)
yield (
api.test('tryjob_fail_patch_download') +
try_build() +
api.properties(fail_patch='download') +
api.step_data('bot_update', retcode=87)
)
yield (
api.test('clobber') +
api.properties(clobber=1)
)
yield (
api.test('reset_root_solution_revision') +
api.properties(root_solution_revision=api.bot_update.gen_revision('fake-revision'))
)
yield (
api.test('gerrit_no_reset') +
api.properties(gerrit_no_reset=1)
)
yield (
api.test('gerrit_no_rebase_patch_ref') +
api.properties(gerrit_no_rebase_patch_ref=True)
)
yield (
api.test('tryjob_v8') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.properties(revisions={'src/v8': 'abc'})
)
yield (
api.test('tryjob_v8_head_by_default') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8')
)
yield (
api.test('tryjob_gerrit_angle') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('no_apply_patch_on_gclient') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('tryjob_gerrit_v8_feature_branch') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_feature_branch') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_branch_heads') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/branch-heads/67')
)
yield (
api.test('tryjob_gerrit_webrtc') +
try_build(git_repo='https://webrtc.googlesource.com/src')
)
yield (
api.test('multiple_patch_refs') +
api.properties(
patch_refs=[
('https://chromium.googlesource.com/chromium/src@'
'refs/changes/12/34/5'),
'https://chromium.googlesource.com/v8/v8@refs/changes/124/45/6',
],
)
)
yield (
api.test('origin_master') +
ci_build(revision='origin/master')
)
yield (
api.test('add_blamelists') +
ci_build() +
api.properties(
add_blamelists=True,
revisions={'src/v8': 'HEAD'},
)
)
yield (
api.test('no_cp_checkout_a_specific_commit') +
ci_build(revision='a' * 40) +
api.properties(
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_master') +
ci_build(revision='') +
api.properties(
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_a_branch_head') +
ci_build(revision='', git_ref='refs/branch-heads/x') +
api.properties(
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_HEAD') +
ci_build(revision='HEAD') +
api.properties(
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
| 29.78125 | 89 | 0.619098 |
4a1ceaf38773fa0905170c8e89ab1c5dd2a537ec
| 1,049 |
py
|
Python
|
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
def sort(keys):
_sort(keys, 0, len(keys)-1, 0)
def _sort(keys, lo, hi, start):
if hi <= lo:
return
lt = lo
gt = hi
v = get_r(keys[lt], start)
i = lt + 1
while i <= gt:
c = get_r(keys[i], start)
if c < v:
keys[lt], keys[i] = keys[i], keys[lt]
lt += 1
i += 1
elif c > v:
keys[i], keys[gt] = keys[gt], keys[i]
gt -= 1
else:
i += 1
_sort(keys, lo, lt-1, start)
if v >= 0:
_sort(keys, lt, gt, start+1)
_sort(keys, gt+1, hi, start)
def get_r(key, i):
if i < len(key):
return ord(key[i])
return -1
if __name__ == '__main__':
keys = [
'she',
'sells',
'seashells',
'by',
'the',
'seashore',
'the',
'shells',
'she',
'sells',
'are',
'surely',
'seashells',
]
expected = sorted(keys[:])
assert keys != expected
sort(keys)
assert keys == expected
| 16.919355 | 49 | 0.414681 |
4a1ceb094286746a0f0c0ded1dc0541966c411e0
| 120 |
py
|
Python
|
guet/steps/preparation/__init__.py
|
AbhishekMashetty/pairprogrammingmasetty
|
0528d4999b472ec6d94058193275a505eaf2c762
|
[
"Apache-2.0"
] | 13 |
2018-12-21T22:47:28.000Z
|
2021-12-17T14:27:35.000Z
|
guet/steps/preparation/__init__.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 63 |
2018-08-30T11:19:12.000Z
|
2021-05-13T12:11:08.000Z
|
guet/steps/preparation/__init__.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 7 |
2019-05-21T13:52:37.000Z
|
2022-01-30T22:57:21.000Z
|
from ._local_swap import SwapToLocal
from .initialize import InitializePreparation
from .preapration import Preparation
| 30 | 45 | 0.875 |
4a1cebabd16ee14ada0a50ec86a59c6ad58b8874
| 14,344 |
py
|
Python
|
tools/offwaketime.py
|
palao/bcc
|
ed17d7f387ee71afbb64d9b08bf89c2d6c9a74b4
|
[
"Apache-2.0"
] | 4 |
2018-01-29T13:38:50.000Z
|
2021-06-30T07:28:47.000Z
|
tools/offwaketime.py
|
palao/bcc
|
ed17d7f387ee71afbb64d9b08bf89c2d6c9a74b4
|
[
"Apache-2.0"
] | 13 |
2018-02-09T22:24:29.000Z
|
2018-06-18T22:33:29.000Z
|
tools/offwaketime.py
|
palao/bcc
|
ed17d7f387ee71afbb64d9b08bf89c2d6c9a74b4
|
[
"Apache-2.0"
] | 5 |
2018-01-31T05:04:19.000Z
|
2018-06-12T00:45:21.000Z
|
#!/usr/bin/python
#
# offwaketime Summarize blocked time by kernel off-CPU stack + waker stack
# For Linux, uses BCC, eBPF.
#
# USAGE: offwaketime [-h] [-p PID | -u | -k] [-U | -K] [-f] [duration]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 20-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from time import sleep
import argparse
import signal
import errno
from sys import stderr
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not availible,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
# arguments
examples = """examples:
./offwaketime # trace off-CPU + waker stack time until Ctrl-C
./offwaketime 5 # trace for 5 seconds only
./offwaketime -f 5 # 5 seconds, and output in folded format
./offwaketime -m 1000 # trace only events that last more than 1000 usec
./offwaketime -M 9000 # trace only events that last less than 9000 usec
./offwaketime -p 185 # only trace threads for PID 185
./offwaketime -t 188 # only trace thread 188
./offwaketime -u # only trace user threads (no kernel)
./offwaketime -k # only trace kernel threads (no user)
./offwaketime -U # only show user space stacks (no kernel)
./offwaketime -K # only show kernel space stacks (no user)
"""
parser = argparse.ArgumentParser(
description="Summarize blocked time by kernel stack trace + waker stack",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
thread_group = parser.add_mutually_exclusive_group()
# Note: this script provides --pid and --tid flags but their arguments are
# referred to internally using kernel nomenclature: TGID and PID.
thread_group.add_argument("-p", "--pid", metavar="PID", dest="tgid",
help="trace this PID only", type=positive_int)
thread_group.add_argument("-t", "--tid", metavar="TID", dest="pid",
help="trace this TID only", type=positive_int)
thread_group.add_argument("-u", "--user-threads-only", action="store_true",
help="user threads only (no kernel threads)")
thread_group.add_argument("-k", "--kernel-threads-only", action="store_true",
help="kernel threads only (no user threads)")
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("-d", "--delimited", action="store_true",
help="insert delimiter between kernel/user stacks")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("--stack-storage-size", default=1024,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 1024)")
parser.add_argument("duration", nargs="?", default=99999999,
type=positive_nonzero_int,
help="duration of trace, in seconds")
parser.add_argument("-m", "--min-block-time", default=1,
type=positive_nonzero_int,
help="the amount of time in microseconds over which we " +
"store traces (default 1)")
parser.add_argument("-M", "--max-block-time", default=(1 << 64) - 1,
type=positive_nonzero_int,
help="the amount of time in microseconds under which we " +
"store traces (default U64_MAX)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MINBLOCK_US MINBLOCK_US_VALUEULL
#define MAXBLOCK_US MAXBLOCK_US_VALUEULL
struct key_t {
char waker[TASK_COMM_LEN];
char target[TASK_COMM_LEN];
int w_k_stack_id;
int w_u_stack_id;
int t_k_stack_id;
int t_u_stack_id;
u32 t_pid;
u32 t_tgid;
u32 w_pid;
u32 w_tgid;
};
BPF_HASH(counts, struct key_t);
// Key of this hash is PID of waiting Process,
// value is timestamp when it went into waiting
BPF_HASH(start, u32);
struct wokeby_t {
char name[TASK_COMM_LEN];
int k_stack_id;
int u_stack_id;
int w_pid;
int w_tgid;
};
// Key of the hash is PID of the Process to be waken, value is information
// of the Process who wakes it
BPF_HASH(wokeby, u32, struct wokeby_t);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
int waker(struct pt_regs *ctx, struct task_struct *p) {
// PID and TGID of the target Process to be waken
u32 pid = p->pid;
u32 tgid = p->tgid;
if (!(THREAD_FILTER)) {
return 0;
}
// Construct information about current (the waker) Process
struct wokeby_t woke = {};
bpf_get_current_comm(&woke.name, sizeof(woke.name));
woke.k_stack_id = KERNEL_STACK_GET;
woke.u_stack_id = USER_STACK_GET;
woke.w_pid = bpf_get_current_pid_tgid();
woke.w_tgid = bpf_get_current_pid_tgid() >> 32;
wokeby.update(&pid, &woke);
return 0;
}
int oncpu(struct pt_regs *ctx, struct task_struct *p) {
// PID and TGID of the previous Process (Process going into waiting)
u32 pid = p->pid;
u32 tgid = p->tgid;
u64 *tsp;
u64 ts = bpf_ktime_get_ns();
// Record timestamp for the previous Process (Process going into waiting)
if (THREAD_FILTER) {
start.update(&pid, &ts);
}
// Calculate current Process's wait time by finding the timestamp of when
// it went into waiting.
// pid and tgid are now the PID and TGID of the current (waking) Process.
pid = bpf_get_current_pid_tgid();
tgid = bpf_get_current_pid_tgid() >> 32;
tsp = start.lookup(&pid);
if (tsp == 0) {
// Missed or filtered when the Process went into waiting
return 0;
}
u64 delta = ts - *tsp;
start.delete(&pid);
delta = delta / 1000;
if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) {
return 0;
}
// create map key
struct key_t key = {};
struct wokeby_t *woke;
bpf_get_current_comm(&key.target, sizeof(key.target));
key.t_pid = pid;
key.t_tgid = tgid;
key.t_k_stack_id = KERNEL_STACK_GET;
key.t_u_stack_id = USER_STACK_GET;
woke = wokeby.lookup(&pid);
if (woke) {
key.w_k_stack_id = woke->k_stack_id;
key.w_u_stack_id = woke->u_stack_id;
key.w_pid = woke->w_pid;
key.w_tgid = woke->w_tgid;
__builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
wokeby.delete(&pid);
}
counts.increment(key, delta);
return 0;
}
"""
# set thread filter
thread_context = ""
if args.tgid is not None:
thread_context = "PID %d" % args.tgid
thread_filter = 'tgid == %d' % args.tgid
elif args.pid is not None:
thread_context = "TID %d" % args.pid
thread_filter = 'pid == %d' % args.pid
elif args.user_threads_only:
thread_context = "user threads"
thread_filter = '!(p->flags & PF_KTHREAD)'
elif args.kernel_threads_only:
thread_context = "kernel threads"
thread_filter = 'p->flags & PF_KTHREAD'
else:
thread_context = "all threads"
thread_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
bpf_text = bpf_text.replace('MINBLOCK_US_VALUE', str(args.min_block_time))
bpf_text = bpf_text.replace('MAXBLOCK_US_VALUE', str(args.max_block_time))
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(ctx, 0)"
user_stack_get = "stack_traces.get_stackid(ctx, BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
if args.ebpf:
print(bpf_text)
exit()
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
b.attach_kprobe(event="try_to_wake_up", fn_name="waker")
matched = b.num_open_kprobes()
if matched == 0:
print("0 functions traced. Exiting.")
exit()
# header
if not folded:
print("Tracing blocked time (us) by %s off-CPU and waker stack" %
stack_context, end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
# print a newline for folded output on Ctrl-C
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
missing_stacks = 0
has_enomem = False
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
need_delimiter = args.delimited and not (args.kernel_stacks_only or
args.user_stacks_only)
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# handle get_stackid errors
if not args.user_stacks_only:
missing_stacks += int(stack_id_err(k.w_k_stack_id))
missing_stacks += int(stack_id_err(k.t_k_stack_id))
has_enomem = has_enomem or (k.w_k_stack_id == -errno.ENOMEM) or \
(k.t_k_stack_id == -errno.ENOMEM)
if not args.kernel_stacks_only:
missing_stacks += int(stack_id_err(k.w_u_stack_id))
missing_stacks += int(stack_id_err(k.t_u_stack_id))
has_enomem = has_enomem or (k.w_u_stack_id == -errno.ENOMEM) or \
(k.t_u_stack_id == -errno.ENOMEM)
waker_user_stack = [] if k.w_u_stack_id < 1 else \
reversed(list(stack_traces.walk(k.w_u_stack_id))[1:])
waker_kernel_stack = [] if k.w_k_stack_id < 1 else \
reversed(list(stack_traces.walk(k.w_k_stack_id))[1:])
target_user_stack = [] if k.t_u_stack_id < 1 else \
stack_traces.walk(k.t_u_stack_id)
target_kernel_stack = [] if k.t_k_stack_id < 1 else \
stack_traces.walk(k.t_k_stack_id)
if folded:
# print folded stack output
line = [k.target.decode('utf-8', 'replace')]
if not args.kernel_stacks_only:
if stack_id_err(k.t_u_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.t_tgid).decode('utf-8', 'replace')
for addr in reversed(list(target_user_stack)[1:])])
if not args.user_stacks_only:
line.extend(["-"] if (need_delimiter and k.t_k_stack_id > 0 and k.t_u_stack_id > 0) else [])
if stack_id_err(k.t_k_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(list(target_kernel_stack)[1:])])
line.append("--")
if not args.user_stacks_only:
if stack_id_err(k.w_k_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(list(waker_kernel_stack))])
if not args.kernel_stacks_only:
line.extend(["-"] if (need_delimiter and k.w_u_stack_id > 0 and k.w_k_stack_id > 0) else [])
if stack_id_err(k.w_u_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.w_tgid).decode('utf-8', 'replace')
for addr in reversed(list(waker_user_stack))])
line.append(k.waker.decode('utf-8', 'replace'))
print("%s %d" % (";".join(line), v.value))
else:
# print wakeup name then stack in reverse order
print(" %-16s %s %s" % ("waker:", k.waker.decode('utf-8', 'replace'), k.t_pid))
if not args.kernel_stacks_only:
if stack_id_err(k.w_u_stack_id):
print(" [Missed User Stack]")
else:
for addr in waker_user_stack:
print(" %s" % b.sym(addr, k.w_tgid))
if not args.user_stacks_only:
if need_delimiter and k.w_u_stack_id > 0 and k.w_k_stack_id > 0:
print(" -")
if stack_id_err(k.w_k_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in waker_kernel_stack:
print(" %s" % b.ksym(addr))
# print waker/wakee delimiter
print(" %-16s %s" % ("--", "--"))
if not args.user_stacks_only:
if stack_id_err(k.t_k_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in target_kernel_stack:
print(" %s" % b.ksym(addr))
if not args.kernel_stacks_only:
if need_delimiter and k.t_u_stack_id > 0 and k.t_k_stack_id > 0:
print(" -")
if stack_id_err(k.t_u_stack_id):
print(" [Missed User Stack]")
else:
for addr in target_user_stack:
print(" %s" % b.sym(addr, k.t_tgid))
print(" %-16s %s %s" % ("target:", k.target.decode('utf-8', 'replace'), k.w_pid))
print(" %d\n" % v.value)
if missing_stacks > 0:
enomem_str = " Consider increasing --stack-storage-size."
print("WARNING: %d stack traces lost and could not be displayed.%s" %
(missing_stacks, (enomem_str if has_enomem else "")),
file=stderr)
| 36.406091 | 104 | 0.643196 |
4a1cebebff575e77d8f02d9cb43e908aca310f15
| 11,510 |
py
|
Python
|
Code/linkedlist.py
|
Jeromeschmidt/CS-1.3-Core-Data-Structures
|
d897010e17fc5569cf972963fb9337a8ec08aed6
|
[
"MIT"
] | null | null | null |
Code/linkedlist.py
|
Jeromeschmidt/CS-1.3-Core-Data-Structures
|
d897010e17fc5569cf972963fb9337a8ec08aed6
|
[
"MIT"
] | 6 |
2020-02-14T07:54:37.000Z
|
2020-03-10T19:17:43.000Z
|
Code/linkedlist.py
|
Jeromeschmidt/CS-1.3-Core-Data-Structures
|
d897010e17fc5569cf972963fb9337a8ec08aed6
|
[
"MIT"
] | null | null | null |
#!python
###Doubly linked list and tests can be found at: https://github.com/Jeromeschmidt/CS-1.2-Intro-Data-Structures/blob/master/Code/linkedlist.py
class Node(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
def __repr__(self):
"""Return a string representation of this node."""
return 'Node({!r})'.format(self.data)
def __iter__(self, node):
return node.next
class LinkedList(object):
def __init__(self, iterable=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
self.size = 0 # Number of nodes
# Append the given items
if iterable is not None:
for item in iterable:
self.append(item)
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
def items(self):
"""Return a list of all items in this linked list.
Best and worst case running time: Theta(n) for n items in the list
because we always need to loop through all n nodes."""
# Create an empty list of results
result = [] # Constant time to create a new list
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Always n iterations because no early exit
# Append this node's data to the results list
result.append(node.data) # Constant time to append to a list
# Skip to the next node
node = node.next # Constant time to reassign a variable
# Now result contains the data from all nodes
return result # Constant time to return a list
def is_empty(self):
"""Return True if this linked list is empty, or False."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Node counter initialized to zero
node_count = 0
# Start at the head node
node = self.head
# Loop until the node is None, which is one node too far past the tail
while node is not None:
# Count one for this node
node_count += 1
# Skip to the next node
node = node.next
# Now node_count contains the number of nodes
return node_count
def get_at_index(self, index):
"""Return the item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index < self.size):
raise ValueError('List index out of range: {}'.format(index))
# TODO: Find the node at the given index and return its data
node = self.head
if index == 0:
return node.data
for i in range(index):
node = node.next
return node.data
def insert_at_index(self, index, item):
"""Insert the given item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index <= self.size):
raise ValueError('List index out of range: {}'.format(index))
# TODO: Find the node before the given index and insert item after it
if self.size == 0:
node = Node(item)
self.head = self.tail = node
self.size += 1
return
if index == 0:
self.prepend(item)
return
if index == self.size:
self.append(item)
return
node = self.head
for i in range(index-1):
node = node.next
new_node = Node(item)
new_node.next = node.next
node.next = new_node
self.size += 1
return
def append(self, item):
"""Insert the given item at the tail of this linked list.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign head to new node
self.head = new_node
else:
# Otherwise insert new node after tail
self.tail.next = new_node
# Update tail to new node regardless
self.size += 1
self.tail = new_node
def prepend(self, item):
"""Insert the given item at the head of this linked list.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign tail to new node
self.tail = new_node
else:
# Otherwise insert new node before head
new_node.next = self.head
# Update head to new node regardless
self.size += 1
self.head = new_node
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
Best case running time: Omega(1) if item is near the head of the list.
Worst case running time: O(n) if item is near the tail of the list or
not present and we need to loop through all n nodes in the list."""
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Up to n iterations if we don't exit early
# Check if this node's data satisfies the given quality function
if quality(node.data): # Constant time to call quality function
# We found data satisfying the quality function, so exit early
return node.data # Constant time to return data
# Skip to the next node
node = node.next # Constant time to reassign a variable
# We never found data satisfying quality, but have to return something
return None # Constant time to return None
def replace(self, old_item, new_item):
"""Replace the given old_item in this linked list with given new_item
using the same node, or raise ValueError if old_item is not found.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# TODO: Find the node containing the given old_item and replace its
# data with new_item, without creating a new node object
if self.size == 0:
raise ValueError('Item not found: {}'.format(item))
#check is wanted node is the head node
if self.head.data is old_item:
self.head.data = new_item
if self.size == 0:
self.tail = None
return
curr_node = self.head
found = False
#loop until node is found or end of linked list
while curr_node is not None:
if curr_node.data == old_item:
curr_node.data = new_item
found = True
curr_node = curr_node.next
if found == True:
return
if found == False:
raise ValueError('Item not found: {}'.format(old_item))
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Start at the head node
node = self.head
# Keep track of the node before the one containing the given item
previous = None
# Create a flag to track if we have found the given item
found = False
# Loop until we have found the given item or the node is None
while not found and node is not None:
# Check if the node's data matches the given item
if node.data == item:
# We found data matching the given item, so update found flag
found = True
else:
# Skip to the next node
previous = node
node = node.next
# Check if we found the given item or we never did and reached the tail
if found:
# Check if we found a node in the middle of this linked list
if node is not self.head and node is not self.tail:
# Update the previous node to skip around the found node
previous.next = node.next
# Unlink the found node from its next node
node.next = None
# Check if we found a node at the head
if node is self.head:
# Update head to the next node
self.head = node.next
# Unlink the found node from the next node
node.next = None
# Check if we found a node at the tail
if node is self.tail:
# Check if there is a node before the found node
if previous is not None:
# Unlink the previous node from the found node
previous.next = None
# Update tail to the previous node regardless
self.tail = previous
self.size -= 1
else:
# Otherwise raise an error to tell the user that delete has failed
raise ValueError('Item not found: {}'.format(item))
def test_linked_list():
ll = LinkedList()
print(ll)
print('Appending items:')
ll.append('A')
print(ll)
ll.append('B')
print(ll)
ll.append('C')
print(ll)
ll.insert_at_index(2, 'B')
print(ll)
# ll.replace('B', 'E')
# print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
print('Getting items by index:')
for index in range(ll.size):
item = ll.get_at_index(index)
print('get_at_index({}): {!r}'.format(index, item))
print('Deleting items:')
ll.delete('B')
print(ll)
ll.delete('C')
print(ll)
ll.delete('A')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
| 39.016949 | 141 | 0.58636 |
4a1cebf2fffa4e807660fa084ba5a8ce5e56bd4d
| 448 |
py
|
Python
|
slmpd/migrations/0005_auto_20201202_1710.py
|
phecht/djxpde
|
01c092c1329fc3493ad2baad2dfcd2b10af88008
|
[
"MIT"
] | null | null | null |
slmpd/migrations/0005_auto_20201202_1710.py
|
phecht/djxpde
|
01c092c1329fc3493ad2baad2dfcd2b10af88008
|
[
"MIT"
] | 6 |
2021-04-06T18:19:18.000Z
|
2021-06-10T20:23:03.000Z
|
slmpd/migrations/0005_auto_20201202_1710.py
|
phecht/djxpde
|
01c092c1329fc3493ad2baad2dfcd2b10af88008
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-02 17:10
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('slmpd', '0004_auto_20201130_1748'),
]
operations = [
migrations.AlterModelManagers(
name='crime_neighborhood',
managers=[
('objectsX', django.db.models.manager.Manager()),
],
),
]
| 21.333333 | 65 | 0.59375 |
4a1cec3c5ad826132d680c4c3812165a4e20ae5e
| 6,344 |
py
|
Python
|
mipego/misc.py
|
Basvanstein/MIP-EGO
|
e1ed0b0ea020850c72c4de5efd5dda0a99de571f
|
[
"MIT"
] | 23 |
2018-07-20T17:22:28.000Z
|
2022-02-23T15:41:30.000Z
|
mipego/misc.py
|
Basvanstein/MIP-EGO
|
e1ed0b0ea020850c72c4de5efd5dda0a99de571f
|
[
"MIT"
] | 5 |
2019-03-05T22:09:13.000Z
|
2021-10-08T08:48:43.000Z
|
mipego/misc.py
|
Basvanstein/MIP-EGO
|
e1ed0b0ea020850c72c4de5efd5dda0a99de571f
|
[
"MIT"
] | 14 |
2018-05-15T21:47:57.000Z
|
2021-12-07T02:04:38.000Z
|
"""
Created on Mon May 19 10:17:43 2014
@author: Hao Wang
@email: wangronin@gmail.com
"""
import logging, os, random, string, re
from copy import copy
import numpy as np
# TODO: re-written those functions to C/Cython
def non_dominated_set_2d(y, minimize=True):
"""
Argument
--------
y : numpy 2d array,
where the each solution occupies a row
"""
y = np.asarray(y)
N, _ = y.shape
if isinstance(minimize, bool):
minimize = [minimize]
minimize = np.asarray(minimize).ravel()
assert len(minimize) == 1 or minimize.shape == (N, )
y *= (np.asarray([-1] * N) ** minimize).reshape(-1, 1)
_ = np.argsort(y[:, 0])[::-1]
y2 = y[_, 1]
ND = []
for i in range(N):
v = y2[i]
if not any(v <= y2[ND]) or len(ND) == 0:
ND.append(i)
return _[ND]
def non_dominated_set_3d(y, minimize=True):
pass
def fast_non_dominated_sort(fitness):
fronts = []
dominated_set = []
mu = fitness.shape[1]
n_domination = np.zeros(mu)
for i in range(mu):
p = fitness[:, i]
p_dominated_set = []
n_p = 0
for j in range(mu):
q = fitness[:, j]
if i != j:
# TODO: verify this part
# check the strict domination
# allow for duplication points on the same front
if all(p <= q) and not all(p == q):
p_dominated_set.append(j)
elif all(p >= q) and not all(p == q):
n_p += 1
dominated_set.append(p_dominated_set)
n_domination[i] = n_p
# create the first front
fronts.append(np.nonzero(n_domination == 0)[0].tolist())
n_domination[n_domination == 0] = -1
i = 0
while True:
for p in fronts[i]:
p_dominated_set = dominated_set[p]
n_domination[p_dominated_set] -= 1
_front = np.nonzero(n_domination == 0)[0].tolist()
n_domination[n_domination == 0] = -1
if len(_front) == 0:
break
fronts.append(_front)
i += 1
return fronts
# TODO: implement this as a C procedure
def proportional_selection(perf, N, minimize=True, replacement=True):
def select(perf):
perf_min = np.min(perf)
interval = np.cumsum((perf - perf_min) / (np.sum(perf) - perf_min * len(perf)))
return np.nonzero(np.random.rand() <= interval)[0][0]
perf = np.array(perf)
if minimize:
perf = -perf
perf -= np.min(perf)
if replacement:
res = [select(perf) for i in range(N)]
else:
assert N <= len(perf)
perf_ = copy(perf)
idx = list(range(0, len(perf)))
res = []
for i in range(N):
if len(perf_) == 1:
res.append(idx[0])
else:
_ = select(perf_)
res.append(idx[_])
perf_ = np.delete(perf_, _)
del idx[_]
return res
# TODO: double check this one. It causes the explosion of step-sizes in MIES
def handle_box_constraint(x, lb, ub):
"""This function transforms x to t w.r.t. the low and high
boundaries lb and ub. It implements the function T^{r}_{[a,b]} as
described in Rui Li's PhD thesis "Mixed-Integer Evolution Strategies
for Parameter Optimization and Their Applications to Medical Image
Analysis" as alorithm 6.
"""
x = np.asarray(x, dtype='float')
shape_ori = x.shape
x = np.atleast_2d(x)
lb = np.atleast_1d(lb)
ub = np.atleast_1d(ub)
transpose = False
if x.shape[0] != len(lb):
x = x.T
transpose = True
lb, ub = lb.flatten(), ub.flatten()
lb_index = np.isfinite(lb)
up_index = np.isfinite(ub)
valid = np.bitwise_and(lb_index, up_index)
LB = lb[valid][:, np.newaxis]
UB = ub[valid][:, np.newaxis]
y = (x[valid, :] - LB) / (UB - LB)
I = np.mod(np.floor(y), 2) == 0
yprime = np.zeros(y.shape)
yprime[I] = np.abs(y[I] - np.floor(y[I]))
yprime[~I] = 1.0 - np.abs(y[~I] - np.floor(y[~I]))
x[valid, :] = LB + (UB - LB) * yprime
if transpose:
x = x.T
return x.reshape(shape_ori)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# TODO: use relative path for %(pathname)s
class LoggerFormatter(logging.Formatter):
default_time_format = '%m/%d/%Y %H:%M:%S'
default_msec_format = '%s,%02d'
FORMATS = {
logging.DEBUG : '%(asctime)s - [%(name)s.%(levelname)s] {%(pathname)s:%(lineno)d} -- %(message)s',
logging.INFO : '%(asctime)s - [%(name)s.%(levelname)s] -- %(message)s',
logging.WARNING : '%(asctime)s - [%(name)s.%(levelname)s] {%(name)s} -- %(message)s',
logging.ERROR : '%(asctime)s - [%(name)s.%(levelname)s] {%(name)s} -- %(message)s',
'DEFAULT' : '%(asctime)s - %(levelname)s -- %(message)s'
}
def __init__(self, fmt='%(asctime)s - %(levelname)s -- %(message)s'):
LoggerFormatter.FORMATS['DEFAULT'] = fmt
super().__init__(fmt=fmt, datefmt=None, style='%')
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
_fmt = self._style._fmt
# Replace the original format with one customized by logging level
self._style._fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
# Call the original formatter class to do the grunt work
fmt = logging.Formatter.format(self, record)
# Restore the original format configured by the user
self._style._fmt = _fmt
return fmt
def random_string(k=15):
return ''.join(random.choices(string.ascii_letters + string.digits, k=15))
def expand_replace(s):
m = re.match(r'${.*}', s)
for _ in m.group():
s.replace(_, os.path.expandvars(_))
return s
if __name__ == '__main__':
# TODO: this goes to unittest
np.random.seed(1)
perf = np.random.randn(20)
print(perf)
print(proportional_selection(perf, 20, minimize=False, replacement=False))
| 29.64486 | 106 | 0.558953 |
4a1cec5b32db6aaccb392896c99e301b41ae83c5
| 26,154 |
py
|
Python
|
bot/exts/fun/trivia_quiz.py
|
katimoth/sir-lancebot
|
ec734d37acc009730eaeefb6f8e51552b1ee1165
|
[
"MIT"
] | 122 |
2020-11-22T17:25:24.000Z
|
2022-03-30T22:53:30.000Z
|
bot/exts/fun/trivia_quiz.py
|
katimoth/sir-lancebot
|
ec734d37acc009730eaeefb6f8e51552b1ee1165
|
[
"MIT"
] | 441 |
2020-11-21T00:06:19.000Z
|
2022-03-25T02:45:56.000Z
|
bot/exts/fun/trivia_quiz.py
|
katimoth/sir-lancebot
|
ec734d37acc009730eaeefb6f8e51552b1ee1165
|
[
"MIT"
] | 183 |
2020-11-22T09:24:47.000Z
|
2022-03-26T08:21:36.000Z
|
import asyncio
import json
import logging
import operator
import random
import re
import string
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
from typing import Callable, Optional
import discord
from discord.ext import commands, tasks
from rapidfuzz import fuzz
from bot.bot import Bot
from bot.constants import Client, Colours, MODERATION_ROLES, NEGATIVE_REPLIES
logger = logging.getLogger(__name__)
DEFAULT_QUESTION_LIMIT = 7
STANDARD_VARIATION_TOLERANCE = 88
DYNAMICALLY_GEN_VARIATION_TOLERANCE = 97
MAX_ERROR_FETCH_TRIES = 3
WRONG_ANS_RESPONSE = [
"No one answered correctly!",
"Better luck next time...",
]
RULES = (
"No cheating and have fun!",
"Points for each question reduces by 25 after 10s or after a hint. Total time is 30s per question"
)
WIKI_FEED_API_URL = "https://en.wikipedia.org/api/rest_v1/feed/featured/{date}"
TRIVIA_QUIZ_ICON = (
"https://raw.githubusercontent.com/python-discord/branding/main/icons/trivia_quiz/trivia-quiz-dist.png"
)
@dataclass(frozen=True)
class QuizEntry:
"""Stores quiz entry (a question and a list of answers)."""
question: str
answers: list[str]
var_tol: int
class DynamicQuestionGen:
"""Class that contains functions to generate math/science questions for TriviaQuiz Cog."""
N_PREFIX_STARTS_AT = 5
N_PREFIXES = [
"penta", "hexa", "hepta", "octa", "nona",
"deca", "hendeca", "dodeca", "trideca", "tetradeca",
]
PLANETS = [
("1st", "Mercury"),
("2nd", "Venus"),
("3rd", "Earth"),
("4th", "Mars"),
("5th", "Jupiter"),
("6th", "Saturn"),
("7th", "Uranus"),
("8th", "Neptune"),
]
TAXONOMIC_HIERARCHY = [
"species", "genus", "family", "order",
"class", "phylum", "kingdom", "domain",
]
UNITS_TO_BASE_UNITS = {
"hertz": ("(unit of frequency)", "s^-1"),
"newton": ("(unit of force)", "m*kg*s^-2"),
"pascal": ("(unit of pressure & stress)", "m^-1*kg*s^-2"),
"joule": ("(unit of energy & quantity of heat)", "m^2*kg*s^-2"),
"watt": ("(unit of power)", "m^2*kg*s^-3"),
"coulomb": ("(unit of electric charge & quantity of electricity)", "s*A"),
"volt": ("(unit of voltage & electromotive force)", "m^2*kg*s^-3*A^-1"),
"farad": ("(unit of capacitance)", "m^-2*kg^-1*s^4*A^2"),
"ohm": ("(unit of electric resistance)", "m^2*kg*s^-3*A^-2"),
"weber": ("(unit of magnetic flux)", "m^2*kg*s^-2*A^-1"),
"tesla": ("(unit of magnetic flux density)", "kg*s^-2*A^-1"),
}
@classmethod
def linear_system(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a system of linear equations with two unknowns."""
x, y = random.randint(2, 5), random.randint(2, 5)
answer = a_format.format(x, y)
coeffs = random.sample(range(1, 6), 4)
question = q_format.format(
coeffs[0],
coeffs[1],
coeffs[0] * x + coeffs[1] * y,
coeffs[2],
coeffs[3],
coeffs[2] * x + coeffs[3] * y,
)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def mod_arith(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a basic modular arithmetic question."""
quotient, m, b = random.randint(30, 40), random.randint(10, 20), random.randint(200, 350)
ans = random.randint(0, 9) # max remainder is 9, since the minimum modulus is 10
a = quotient * m + ans - b
question = q_format.format(a, b, m)
answer = a_format.format(ans)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def ngonal_prism(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a question regarding vertices on n-gonal prisms."""
n = random.randint(0, len(cls.N_PREFIXES) - 1)
question = q_format.format(cls.N_PREFIXES[n])
answer = a_format.format((n + cls.N_PREFIX_STARTS_AT) * 2)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def imag_sqrt(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a negative square root question."""
ans_coeff = random.randint(3, 10)
question = q_format.format(ans_coeff ** 2)
answer = a_format.format(ans_coeff)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def binary_calc(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a binary calculation question."""
a = random.randint(15, 20)
b = random.randint(10, a)
oper = random.choice(
(
("+", operator.add),
("-", operator.sub),
("*", operator.mul),
)
)
# if the operator is multiplication, lower the values of the two operands to make it easier
if oper[0] == "*":
a -= 5
b -= 5
question = q_format.format(a, oper[0], b)
answer = a_format.format(oper[1](a, b))
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def solar_system(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a question on the planets of the Solar System."""
planet = random.choice(cls.PLANETS)
question = q_format.format(planet[0])
answer = a_format.format(planet[1])
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def taxonomic_rank(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a question on taxonomic classification."""
level = random.randint(0, len(cls.TAXONOMIC_HIERARCHY) - 2)
question = q_format.format(cls.TAXONOMIC_HIERARCHY[level])
answer = a_format.format(cls.TAXONOMIC_HIERARCHY[level + 1])
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def base_units_convert(cls, q_format: str, a_format: str) -> QuizEntry:
"""Generate a SI base units conversion question."""
unit = random.choice(list(cls.UNITS_TO_BASE_UNITS))
question = q_format.format(
unit + " " + cls.UNITS_TO_BASE_UNITS[unit][0]
)
answer = a_format.format(
cls.UNITS_TO_BASE_UNITS[unit][1]
)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
DYNAMIC_QUESTIONS_FORMAT_FUNCS = {
201: DynamicQuestionGen.linear_system,
202: DynamicQuestionGen.mod_arith,
203: DynamicQuestionGen.ngonal_prism,
204: DynamicQuestionGen.imag_sqrt,
205: DynamicQuestionGen.binary_calc,
301: DynamicQuestionGen.solar_system,
302: DynamicQuestionGen.taxonomic_rank,
303: DynamicQuestionGen.base_units_convert,
}
class TriviaQuiz(commands.Cog):
"""A cog for all quiz commands."""
def __init__(self, bot: Bot):
self.bot = bot
self.game_status = {} # A variable to store the game status: either running or not running.
self.game_owners = {} # A variable to store the person's ID who started the quiz game in a channel.
self.questions = self.load_questions()
self.question_limit = 0
self.player_scores = defaultdict(int) # A variable to store all player's scores for a bot session.
self.game_player_scores = {} # A variable to store temporary game player's scores.
self.categories = {
"general": "Test your general knowledge.",
"retro": "Questions related to retro gaming.",
"math": "General questions about mathematics ranging from grade 8 to grade 12.",
"science": "Put your understanding of science to the test!",
"cs": "A large variety of computer science questions.",
"python": "Trivia on our amazing language, Python!",
"wikipedia": "Guess the title of random wikipedia passages.",
}
self.get_wiki_questions.start()
def cog_unload(self) -> None:
"""Cancel `get_wiki_questions` task when Cog will unload."""
self.get_wiki_questions.cancel()
@tasks.loop(hours=24.0)
async def get_wiki_questions(self) -> None:
"""Get yesterday's most read articles from wikipedia and format them like trivia questions."""
error_fetches = 0
wiki_questions = []
# trivia_quiz.json follows a pattern, every new category starts with the next century.
start_id = 501
yesterday = datetime.strftime(datetime.now() - timedelta(1), '%Y/%m/%d')
while error_fetches < MAX_ERROR_FETCH_TRIES:
async with self.bot.http_session.get(url=WIKI_FEED_API_URL.format(date=yesterday)) as r:
if r.status != 200:
error_fetches += 1
continue
raw_json = await r.json()
articles_raw = raw_json["mostread"]["articles"]
for article in articles_raw:
question = article.get("extract")
if not question:
continue
# Normalize the wikipedia article title to remove all punctuations from it
for word in re.split(r"[\s-]", title := article["normalizedtitle"]):
cleaned_title = re.sub(
rf'\b{word.strip(string.punctuation)}\b', word, title, flags=re.IGNORECASE
)
# Since the extract contains the article name sometimes this would replace all the matching words
# in that article with *** of that length.
# NOTE: This removes the "answer" for 99% of the cases, but sometimes the wikipedia article is
# very different from the words in the extract, for example the title would be the nickname of a
# person (Bob Ross) whereas in the extract it would the full name (Robert Norman Ross) so it comes
# out as (Robert Norman ****) and (Robert Norman Ross) won't be a right answer :(
for word in re.split(r"[\s-]", cleaned_title):
word = word.strip(string.punctuation)
secret_word = r"\*" * len(word)
question = re.sub(rf'\b{word}\b', f"**{secret_word}**", question, flags=re.IGNORECASE)
formatted_article_question = {
"id": start_id,
"question": f"Guess the title of the Wikipedia article.\n\n{question}",
"answer": cleaned_title,
"info": article["extract"]
}
start_id += 1
wiki_questions.append(formatted_article_question)
# If everything has gone smoothly until now, we can break out of the while loop
break
if error_fetches < MAX_ERROR_FETCH_TRIES:
self.questions["wikipedia"] = wiki_questions.copy()
else:
del self.categories["wikipedia"]
logger.warning(f"Not loading wikipedia guess questions, hit max error fetches: {MAX_ERROR_FETCH_TRIES}.")
@staticmethod
def load_questions() -> dict:
"""Load the questions from the JSON file."""
p = Path("bot", "resources", "fun", "trivia_quiz.json")
return json.loads(p.read_text(encoding="utf-8"))
@commands.group(name="quiz", aliases=("trivia", "triviaquiz"), invoke_without_command=True)
async def quiz_game(self, ctx: commands.Context, category: Optional[str], questions: Optional[int]) -> None:
"""
Start a quiz!
Questions for the quiz can be selected from the following categories:
- general: Test your general knowledge.
- retro: Questions related to retro gaming.
- math: General questions about mathematics ranging from grade 8 to grade 12.
- science: Put your understanding of science to the test!
- cs: A large variety of computer science questions.
- python: Trivia on our amazing language, Python!
- wikipedia: Guess the title of random wikipedia passages.
(More to come!)
"""
if ctx.channel.id not in self.game_status:
self.game_status[ctx.channel.id] = False
if ctx.channel.id not in self.game_player_scores:
self.game_player_scores[ctx.channel.id] = {}
# Stop game if running.
if self.game_status[ctx.channel.id]:
await ctx.send(
"Game is already running... "
f"do `{Client.prefix}quiz stop`"
)
return
# Send embed showing available categories if inputted category is invalid.
if category is None:
category = random.choice(list(self.categories))
category = category.lower()
if category not in self.categories:
embed = self.category_embed()
await ctx.send(embed=embed)
return
topic = self.questions[category]
topic_length = len(topic)
if questions is None:
self.question_limit = min(DEFAULT_QUESTION_LIMIT, topic_length)
else:
if questions > topic_length:
await ctx.send(
embed=self.make_error_embed(
f"This category only has {topic_length} questions. "
"Please input a lower value!"
)
)
return
elif questions < 1:
await ctx.send(
embed=self.make_error_embed(
"You must choose to complete at least one question. "
f"(or enter nothing for the default value of {DEFAULT_QUESTION_LIMIT} questions)"
)
)
return
else:
self.question_limit = questions
# Start game if not running.
if not self.game_status[ctx.channel.id]:
self.game_owners[ctx.channel.id] = ctx.author
self.game_status[ctx.channel.id] = True
start_embed = self.make_start_embed(category)
await ctx.send(embed=start_embed) # send an embed with the rules
await asyncio.sleep(5)
done_questions = []
hint_no = 0
quiz_entry = None
while self.game_status[ctx.channel.id]:
# Exit quiz if number of questions for a round are already sent.
if len(done_questions) == self.question_limit and hint_no == 0:
await ctx.send("The round has ended.")
await self.declare_winner(ctx.channel, self.game_player_scores[ctx.channel.id])
self.game_status[ctx.channel.id] = False
del self.game_owners[ctx.channel.id]
self.game_player_scores[ctx.channel.id] = {}
break
# If no hint has been sent or any time alert. Basically if hint_no = 0 means it is a new question.
if hint_no == 0:
# Select a random question which has not been used yet.
while True:
question_dict = random.choice(topic)
if question_dict["id"] not in done_questions:
done_questions.append(question_dict["id"])
break
if "dynamic_id" not in question_dict:
quiz_entry = QuizEntry(
question_dict["question"],
quiz_answers if isinstance(quiz_answers := question_dict["answer"], list) else [quiz_answers],
STANDARD_VARIATION_TOLERANCE
)
else:
format_func = DYNAMIC_QUESTIONS_FORMAT_FUNCS[question_dict["dynamic_id"]]
quiz_entry = format_func(
question_dict["question"],
question_dict["answer"],
)
embed = discord.Embed(
colour=Colours.gold,
title=f"Question #{len(done_questions)}",
description=quiz_entry.question,
)
if img_url := question_dict.get("img_url"):
embed.set_image(url=img_url)
await ctx.send(embed=embed)
def check_func(variation_tolerance: int) -> Callable[[discord.Message], bool]:
def contains_correct_answer(m: discord.Message) -> bool:
return m.channel == ctx.channel and any(
fuzz.ratio(answer.lower(), m.content.lower()) > variation_tolerance
for answer in quiz_entry.answers
)
return contains_correct_answer
try:
msg = await self.bot.wait_for("message", check=check_func(quiz_entry.var_tol), timeout=10)
except asyncio.TimeoutError:
# In case of TimeoutError and the game has been stopped, then do nothing.
if not self.game_status[ctx.channel.id]:
break
if hint_no < 2:
hint_no += 1
if "hints" in question_dict:
hints = question_dict["hints"]
await ctx.send(f"**Hint #{hint_no}\n**{hints[hint_no - 1]}")
else:
await ctx.send(f"{30 - hint_no * 10}s left!")
# Once hint or time alerts has been sent 2 times, the hint_no value will be 3
# If hint_no > 2, then it means that all hints/time alerts have been sent.
# Also means that the answer is not yet given and the bot sends the answer and the next question.
else:
if self.game_status[ctx.channel.id] is False:
break
response = random.choice(WRONG_ANS_RESPONSE)
await ctx.send(response)
await self.send_answer(
ctx.channel,
quiz_entry.answers,
False,
question_dict,
self.question_limit - len(done_questions),
)
await asyncio.sleep(1)
hint_no = 0 # Reset the hint counter so that on the next round, it's in the initial state
await self.send_score(ctx.channel, self.game_player_scores[ctx.channel.id])
await asyncio.sleep(2)
else:
if self.game_status[ctx.channel.id] is False:
break
points = 100 - 25 * hint_no
if msg.author in self.game_player_scores[ctx.channel.id]:
self.game_player_scores[ctx.channel.id][msg.author] += points
else:
self.game_player_scores[ctx.channel.id][msg.author] = points
# Also updating the overall scoreboard.
if msg.author in self.player_scores:
self.player_scores[msg.author] += points
else:
self.player_scores[msg.author] = points
hint_no = 0
await ctx.send(f"{msg.author.mention} got the correct answer :tada: {points} points!")
await self.send_answer(
ctx.channel,
quiz_entry.answers,
True,
question_dict,
self.question_limit - len(done_questions),
)
await self.send_score(ctx.channel, self.game_player_scores[ctx.channel.id])
await asyncio.sleep(2)
def make_start_embed(self, category: str) -> discord.Embed:
"""Generate a starting/introduction embed for the quiz."""
rules = "\n".join([f"{index}: {rule}" for index, rule in enumerate(RULES, start=1)])
start_embed = discord.Embed(
title="Quiz game Starting!!",
description=(
f"Each game consists of {self.question_limit} questions.\n"
f"**Rules :**\n{rules}"
f"\n **Category** : {category}"
),
colour=Colours.blue
)
start_embed.set_thumbnail(url=TRIVIA_QUIZ_ICON)
return start_embed
@staticmethod
def make_error_embed(desc: str) -> discord.Embed:
"""Generate an error embed with the given description."""
error_embed = discord.Embed(
colour=Colours.soft_red,
title=random.choice(NEGATIVE_REPLIES),
description=desc,
)
return error_embed
@quiz_game.command(name="stop")
async def stop_quiz(self, ctx: commands.Context) -> None:
"""
Stop a quiz game if its running in the channel.
Note: Only mods or the owner of the quiz can stop it.
"""
try:
if self.game_status[ctx.channel.id]:
# Check if the author is the game starter or a moderator.
if ctx.author == self.game_owners[ctx.channel.id] or any(
role.id in MODERATION_ROLES for role in getattr(ctx.author, 'roles', [])
):
self.game_status[ctx.channel.id] = False
del self.game_owners[ctx.channel.id]
self.game_player_scores[ctx.channel.id] = {}
await ctx.send("Quiz stopped.")
await self.declare_winner(ctx.channel, self.game_player_scores[ctx.channel.id])
else:
await ctx.send(f"{ctx.author.mention}, you are not authorised to stop this game :ghost:!")
else:
await ctx.send("No quiz running.")
except KeyError:
await ctx.send("No quiz running.")
@quiz_game.command(name="leaderboard")
async def leaderboard(self, ctx: commands.Context) -> None:
"""View everyone's score for this bot session."""
await self.send_score(ctx.channel, self.player_scores)
@staticmethod
async def send_score(channel: discord.TextChannel, player_data: dict) -> None:
"""Send the current scores of players in the game channel."""
if len(player_data) == 0:
await channel.send("No one has made it onto the leaderboard yet.")
return
embed = discord.Embed(
colour=Colours.blue,
title="Score Board",
description="",
)
embed.set_thumbnail(url=TRIVIA_QUIZ_ICON)
sorted_dict = sorted(player_data.items(), key=operator.itemgetter(1), reverse=True)
for item in sorted_dict:
embed.description += f"{item[0]}: {item[1]}\n"
await channel.send(embed=embed)
@staticmethod
async def declare_winner(channel: discord.TextChannel, player_data: dict) -> None:
"""Announce the winner of the quiz in the game channel."""
if player_data:
highest_points = max(list(player_data.values()))
no_of_winners = list(player_data.values()).count(highest_points)
# Check if more than 1 player has highest points.
if no_of_winners > 1:
winners = []
points_copy = list(player_data.values()).copy()
for _ in range(no_of_winners):
index = points_copy.index(highest_points)
winners.append(list(player_data.keys())[index])
points_copy[index] = 0
winners_mention = " ".join(winner.mention for winner in winners)
else:
author_index = list(player_data.values()).index(highest_points)
winner = list(player_data.keys())[author_index]
winners_mention = winner.mention
await channel.send(
f"{winners_mention} Congratulations "
f"on winning this quiz game with a grand total of {highest_points} points :tada:"
)
def category_embed(self) -> discord.Embed:
"""Build an embed showing all available trivia categories."""
embed = discord.Embed(
colour=Colours.blue,
title="The available question categories are:",
description="",
)
embed.set_footer(text="If a category is not chosen, a random one will be selected.")
for cat, description in self.categories.items():
embed.description += (
f"**- {cat.capitalize()}**\n"
f"{description.capitalize()}\n"
)
return embed
@staticmethod
async def send_answer(
channel: discord.TextChannel,
answers: list[str],
answer_is_correct: bool,
question_dict: dict,
q_left: int,
) -> None:
"""Send the correct answer of a question to the game channel."""
info = question_dict.get("info")
plurality = " is" if len(answers) == 1 else "s are"
embed = discord.Embed(
color=Colours.bright_green,
title=(
("You got it! " if answer_is_correct else "")
+ f"The correct answer{plurality} **`{', '.join(answers)}`**\n"
),
description="",
)
# Don't check for info is not None, as we want to filter out empty strings.
if info:
embed.description += f"**Information**\n{info}\n\n"
embed.description += (
("Let's move to the next question." if q_left > 0 else "")
+ f"\nRemaining questions: {q_left}"
)
await channel.send(embed=embed)
def setup(bot: Bot) -> None:
"""Load the TriviaQuiz cog."""
bot.add_cog(TriviaQuiz(bot))
| 38.689349 | 118 | 0.573947 |
4a1ceecb155c6ec5ae2958bd9b4f9b4cdf20dcaf
| 37,737 |
py
|
Python
|
site/flask/lib/python2.7/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 5 |
2015-01-06T17:01:59.000Z
|
2016-08-13T05:29:24.000Z
|
site/flask/lib/python2.7/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 309 |
2016-10-27T23:47:06.000Z
|
2017-04-02T04:40:21.000Z
|
site/flask/lib/python2.7/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 6 |
2015-01-06T17:02:01.000Z
|
2016-11-11T02:50:27.000Z
|
# oracle/cx_oracle.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname\
[?key=value&key=value...]
:url: http://cx-oracle.sourceforge.net/
Additional Connect Arguments
----------------------------
When connecting with ``dbname`` present, the host, port, and dbname tokens are
converted to a TNS name using
the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
directly as a TNS name.
Additional arguments which may be specified either as query string arguments
on the URL, or as keyword arguments to :func:`.create_engine()` are:
* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
all bind parameters. This is required for LOB datatypes but can be
disabled to reduce overhead. Defaults to ``True``. Specific types
can be excluded from this process using the ``exclude_setinputsizes``
parameter.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
be excluded from the "auto setinputsizes" feature. The type names here
must match DBAPI types that are found in the "cx_Oracle" module namespace,
such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
``(STRING, UNICODE)``.
.. versionadded:: 0.8 specific DBAPI types can be excluded from the
auto_setinputsizes feature via the exclude_setinputsizes attribute.
* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or
alternatively an integer value. This value is only available as a URL query
string argument.
* ``threaded`` - enable multithreaded access to cx_oracle connections.
Defaults to ``True``. Note that this is the opposite default of the
cx_Oracle DBAPI itself.
.. _cx_oracle_unicode:
Unicode
-------
The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the
ability to return string results as Python unicode objects natively.
When used in Python 3, cx_Oracle returns all strings as Python unicode objects
(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
column values that are of type ``VARCHAR`` or other non-unicode string types,
it will return values as Python strings (e.g. bytestrings).
The cx_Oracle SQLAlchemy dialect presents two different options for the use
case of returning ``VARCHAR`` column values as Python unicode objects under
Python 2:
* the cx_Oracle DBAPI has the ability to coerce all string results to Python
unicode objects unconditionally using output type handlers. This has
the advantage that the unicode conversion is global to all statements
at the cx_Oracle driver level, meaning it works with raw textual SQL
statements that have no typing information associated. However, this system
has been observed to incur signfiicant performance overhead, not only
because it takes effect for all string values unconditionally, but also
because cx_Oracle under Python 2 seems to use a pure-Python function call in
order to do the decode operation, which under cPython can orders of
magnitude slower than doing it using C functions alone.
* SQLAlchemy has unicode-decoding services built in, and when using
SQLAlchemy's C extensions, these functions do not use any Python function
calls and are very fast. The disadvantage to this approach is that the
unicode conversion only takes effect for statements where the
:class:`.Unicode` type or :class:`.String` type with
``convert_unicode=True`` is explicitly associated with the result column.
This is the case for any ORM or Core query or SQL expression as well as for
a :func:`.text` construct that specifies output column types, so in the vast
majority of cases this is not an issue. However, when sending a completely
raw string to :meth:`.Connection.execute`, this typing information isn't
present, unless the string is handled within a :func:`.text` construct that
adds typing information.
As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
typing system. This keeps cx_Oracle's expensive Python 2 approach
disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy
detects that cx_Oracle is returning unicode objects natively and cx_Oracle's
system is used.
To re-enable cx_Oracle's output type handler under Python 2, the
``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
:func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
as Python unicode under Python 2 without using cx_Oracle's native handlers,
the :func:`.text` feature can be used::
from sqlalchemy import text, Unicode
result = conn.execute(
text("select username from user").columns(username=Unicode))
.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used
for unicode results of non-unicode datatypes in Python 2, after they were
identified as a major performance bottleneck. SQLAlchemy's own unicode
facilities are used instead.
.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_oracle DBAPI supports a limited subset of Oracle's already limited
RETURNING support. Typically, results can only be guaranteed for at most one
column being returned; this is the typical case when SQLAlchemy uses RETURNING
to get just the value of a primary-key-associated sequence value.
Additional column expressions will cause problems in a non-determinative way,
due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is
required for more complex RETURNING scenarios.
For this reason, stability may be enhanced by disabling RETURNING support
completely; SQLAlchemy otherwise will use RETURNING to fetch newly
sequence-generated primary keys. As illustrated in :ref:`oracle_returning`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
.. seealso::
http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693
- OCI documentation for RETURNING
http://sourceforge.net/mailarchive/message.php?msg_id=31338136
- cx_oracle developer commentary
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
converts these to strings so that the interface of the Binary type is
consistent with that of other backends, and so that the linkage to a live
cursor is not needed in scenarios like result.fetchmany() and
result.fetchall(). This means that by default, LOB objects are fully fetched
unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.
To disable this processing, pass ``auto_convert_lobs=False`` to
:func:`.create_engine()`.
Two Phase Transaction Support
-----------------------------
Two Phase transactions are implemented using XA transactions, and are known
to work in a rudimental fashion with recent versions of cx_Oracle
as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
considered to be robust and should still be regarded as experimental.
In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
two phase which prevents
a particular DBAPI connection from being consistently usable in both
prepared transactions as well as traditional DBAPI usage patterns; therefore
once a particular connection is used via :meth:`.Connection.begin_prepared`,
all subsequent usages of the underlying DBAPI connection must be within
the context of prepared transactions.
The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
connections. Therefore, due to the above glitch, a DBAPI connection that has
been used in a two-phase operation, and is then returned to the pool, will
not be usable in a non-two-phase context. To avoid this situation,
the application can make one of several choices:
* Disable connection pooling using :class:`.NullPool`
* Ensure that the particular :class:`.Engine` in use is only used
for two-phase operations. A :class:`.Engine` bound to an ORM
:class:`.Session` which includes ``twophase=True`` will consistently
use the two-phase transaction style.
* For ad-hoc two-phase operations without disabling pooling, the DBAPI
connection in use can be evicted from the connection pool using the
:meth:`.Connection.detach` method.
.. versionchanged:: 0.8.0b2,0.7.10
Support for cx_oracle prepared transactions has been implemented
and tested.
.. _cx_oracle_numeric:
Precision Numerics
------------------
The SQLAlchemy dialect goes through a lot of steps to ensure
that decimal numbers are sent and received with full accuracy.
An "outputtypehandler" callable is associated with each
cx_oracle connection object which detects numeric types and
receives them as string values, instead of receiving a Python
``float`` directly, which is then passed to the Python
``Decimal`` constructor. The :class:`.Numeric` and
:class:`.Float` types under the cx_oracle dialect are aware of
this behavior, and will coerce the ``Decimal`` to ``float`` if
the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
optional on :class:`.Numeric`).
Because the handler coerces to ``Decimal`` in all cases first,
the feature can detract significantly from performance.
If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
attributes of the result set column to best determine if
subsequent incoming values should be received as ``Decimal`` as
opposed to int (in which case no processing is added). There are
several scenarios where OCI_ does not provide unambiguous data
as to the numeric type, including some situations where
individual rows may return a combination of floating point and
integer values. Certain values for "precision" and "scale" have
been observed to determine this scenario. When it occurs, the
outputtypehandler receives as string and then passes off to a
processing function which detects, for each returned value, if a
decimal point is present, and if so converts to ``Decimal``,
otherwise to int. The intention is that simple int-based
statements like "SELECT my_seq.nextval() FROM DUAL" continue to
return ints and not ``Decimal`` objects, and that any kind of
floating point value is received as a string so that there is no
floating point loss of precision.
The "decimal point is present" logic itself is also sensitive to
locale. Under OCI_, this is controlled by the NLS_LANG
environment variable. Upon first connection, the dialect runs a
test to determine the current "decimal" character, which can be
a comma "," for European locales. From that point forward the
outputtypehandler uses that character to represent a decimal
point. Note that cx_oracle 5.0.3 or greater is required
when dealing with numerics with locale settings that don't use
a period "." as the decimal character.
.. versionchanged:: 0.6.6
The outputtypehandler supports the case where the locale uses a
comma "," character to represent a decimal point.
.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
"""
from __future__ import absolute_import
from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
import random
import collections
import decimal
import re
class _OracleNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
# cx_oracle accepts Decimal objects and floats
return None
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
# However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
# So we still need an old school isinstance() handler
# here for decimals.
if dialect.supports_native_decimal:
if self.asdecimal:
fstring = "%%.%df" % self._effective_decimal_return_scale
def to_decimal(value):
if value is None:
return None
elif isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(fstring % value)
return to_decimal
else:
if self.precision is None and self.scale is None:
return processors.to_float
elif not getattr(self, '_is_oracle_number', False) \
and self.scale is not None:
return processors.to_float
else:
return None
else:
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
class _LOBMixin(object):
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
# return the cx_oracle.LOB directly.
return None
def process(value):
if value is not None:
return value.read()
else:
return value
return process
class _NativeUnicodeMixin(object):
if util.py2k:
def bind_processor(self, dialect):
if dialect._cx_oracle_with_unicode:
def process(value):
if value is None:
return value
else:
return unicode(value)
return process
else:
return super(
_NativeUnicodeMixin, self).bind_processor(dialect)
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
def get_dbapi_type(self, dbapi):
return getattr(dbapi, 'UNICODE', dbapi.STRING)
class _OracleText(_LOBMixin, sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
class _OracleUnicodeText(
_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
def result_processor(self, dialect, coltype):
lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
if lob_processor is None:
return None
string_processor = sqltypes.UnicodeText.result_processor(
self, dialect, coltype)
if string_processor is None:
return lob_processor
else:
def process(value):
return string_processor(lob_processor(value))
return process
class _OracleInteger(sqltypes.Integer):
def result_processor(self, dialect, coltype):
def to_int(val):
if val is not None:
val = int(val)
return val
return to_int
class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name, **kw):
quote = getattr(name, 'quote', None)
if quote is True or quote is not False and \
self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def pre_exec(self):
quoted_bind_names = \
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
# if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
if self.dialect.auto_setinputsizes:
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
self.set_input_sizes(
quoted_bind_names,
exclude_types=self.dialect.exclude_setinputsizes
)
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" %
(bindparam.key, bindparam.type)
)
name = self.compiled.bind_names[bindparam]
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][quoted_bind_names.get(name, name)] = \
self.out_parameters[name]
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
result = None
if self.cursor.description is not None:
for column in self.cursor.description:
type_code = column[1]
if type_code in self.dialect._cx_oracle_binary_types:
result = _result.BufferedColumnResultProxy(self)
if result is None:
result = _result.ResultProxy(self)
if hasattr(self, 'out_parameters'):
if self.compiled_parameters is not None and \
len(self.compiled_parameters) == 1:
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type = bind.type
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(
self.dialect.dbapi)
result_processor = impl_type.\
result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
result_processor(
self.out_parameters[name].getvalue())
else:
out_parameters[name] = self.out_parameters[
name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return result
class OracleExecutionContext_cx_oracle_with_unicode(
OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
def __init__(self, *arg, **kw):
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
self.statement = util.text_type(self.statement)
def _execute_scalar(self, stmt):
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
_execute_scalar(util.text_type(stmt))
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams
into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
("ret_%d" % i, None)
for i, col in enumerate(returning)
]
def _buffer_rows(self):
return collections.deque(
[tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))]
)
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
driver = "cx_oracle"
colspecs = colspecs = {
sqltypes.Numeric: _OracleNumeric,
# generic type, assume datetime.date is desired
sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeText,
sqltypes.CHAR: _OracleChar,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
oracle.LONG: _OracleLong,
# this is only needed for OUT parameters.
# it would be nice if we could not use it otherwise.
sqltypes.Integer: _OracleInteger,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleNVarChar,
sqltypes.NVARCHAR: _OracleNVarChar,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
def __init__(self,
auto_setinputsizes=True,
exclude_setinputsizes=("STRING", "UNICODE"),
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
coerce_to_decimal=True,
coerce_to_unicode=False,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
self.threaded = threaded
self.arraysize = arraysize
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or \
hasattr(self.dbapi, 'TIMESTAMP')
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if hasattr(self.dbapi, 'version'):
self.cx_oracle_ver = tuple([int(x) for x in
self.dbapi.version.split('.')])
else:
self.cx_oracle_ver = (0, 0, 0)
def types(*names):
return set(
getattr(self.dbapi, name, None) for name in names
).difference([None])
self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
self._cx_oracle_string_types = types("STRING", "UNICODE",
"NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.coerce_to_unicode = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_unicode
)
self.supports_native_decimal = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
if self.cx_oracle_ver is None:
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
elif self.cx_oracle_ver >= (5,) and not \
hasattr(self.dbapi, 'UNICODE'):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
self.supports_unicode_binds = True
self._cx_oracle_with_unicode = True
if util.py2k:
# There's really no reason to run with WITH_UNICODE under
# Python 2.x. Give the user a hint.
util.warn(
"cx_Oracle is compiled under Python 2.xx using the "
"WITH_UNICODE flag. Consider recompiling cx_Oracle "
"without this flag, which is in no way necessary for "
"full support of Unicode. Otherwise, all string-holding "
"bind parameters must be explicitly typed using "
"SQLAlchemy's String type or one of its subtypes,"
"or otherwise be passed as Python unicode. "
"Plain Python strings passed as bind parameters will be "
"silently corrupted by cx_Oracle."
)
self.execution_ctx_cls = \
OracleExecutionContext_cx_oracle_with_unicode
else:
self._cx_oracle_with_unicode = False
if self.cx_oracle_ver is None or \
not self.auto_convert_lobs or \
not hasattr(self.dbapi, 'CLOB'):
self.dbapi_type_map = {}
else:
# only use this for LOB objects. using it for strings, dates
# etc. leads to a little too much magic, reflection doesn't know
# if it should expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
self.dbapi.BLOB: oracle.BLOB(),
self.dbapi.BINARY: oracle.RAW(),
}
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
"""detect if the decimal separator character is not '.', as
is the case with European locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
current locale.
"""
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
conn = connection.connection
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# one on the cursor
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
cursor.outputtypehandler = output_type_handler
cursor.execute("SELECT 0.1 FROM DUAL")
val = cursor.fetchone()[0]
cursor.close()
char = re.match(r"([\.,])", val).group(1)
if char != '.':
_detect_decimal = self._detect_decimal
self._detect_decimal = \
lambda value: _detect_decimal(value.replace(char, '.'))
self._to_decimal = \
lambda value: decimal.Decimal(value.replace(char, '.'))
def _detect_decimal(self, value):
if "." in value:
return decimal.Decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def on_connect(self):
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
if self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif self.coerce_to_unicode and \
defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(util.text_type, size, cursor.arraysize)
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
dialect_opts = dict(url.query)
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
'threaded', 'allow_twophase'):
if opt in dialect_opts:
util.coerce_kw_type(dialect_opts, opt, bool)
setattr(self, opt, dialect_opts[opt])
if url.database:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
dsn = self.dbapi.makedsn(url.host, port, url.database)
else:
# we have a local tnsname
dsn = url.host
opts = dict(
user=url.username,
password=url.password,
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
)
if util.py2k:
if self._cx_oracle_with_unicode:
for k, v in opts.items():
if isinstance(v, str):
opts[k] = unicode(v)
else:
for k, v in opts.items():
if isinstance(v, unicode):
opts[k] = str(v)
if 'mode' in url.query:
opts['mode'] = url.query['mode']
if isinstance(opts['mode'], util.string_types):
mode = opts['mode'].upper()
if mode == 'SYSDBA':
opts['mode'] = self.dbapi.SYSDBA
elif mode == 'SYSOPER':
opts['mode'] = self.dbapi.SYSOPER
else:
util.coerce_kw_type(opts, 'mode', int)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.version.split('.')
)
def is_disconnect(self, e, connection, cursor):
error, = e.args
if isinstance(e, self.dbapi.InterfaceError):
return "not connected" in str(e)
elif hasattr(error, 'code'):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified."""
id = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info['cx_oracle_prepared']
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop('cx_oracle_prepared', None)
dialect = OracleDialect_cx_oracle
| 39.024819 | 78 | 0.64181 |
4a1cef46e7a88a9412ae0dc5d6166791b46058bd
| 5,621 |
py
|
Python
|
qnarre/prep/metric/squad2.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/prep/metric/squad2.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/prep/metric/squad2.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import collections
import datasets as ds
import re
import string
from collections import Counter
class Squad2(ds.Metric):
def _info(self):
return ds.MetricInfo(
description="",
citation="",
inputs_description="",
features=ds.Features(
{
"predictions": {
"id": ds.Value("string"),
"prediction_text": ds.Value("string"),
"no_answer_probability": ds.Value("float32"),
},
"references": {
"id": ds.Value("string"),
"answers": ds.features.Sequence(
{"text": ds.Value("string"), "answer_start": ds.Value("int32")}
),
},
}
),
codebase_urls=[],
reference_urls=[],
)
def _compute(self, preds, refs, no_answer_threshold=1.0):
probs = {p["id"]: p["no_answer_probability"] for p in preds}
preds = {p["id"]: p["prediction_text"] for p in preds}
ds = [{"paragraphs": [{"qas": refs}]}]
has_ans = _make_map(ds)
ans_ids = [k for k, v in has_ans.items() if v]
no_ids = [k for k, v in has_ans.items() if not v]
ms_raw, f1_raw = _raw_scores(ds, preds)
ms = _apply(ms_raw, probs, has_ans, no_answer_threshold)
f1 = _apply(f1_raw, probs, has_ans, no_answer_threshold)
ys = _eval(ms, f1)
if ans_ids:
_merge(ys, _eval(ms, f1, ans_ids), "HasAns")
if no_ids:
_merge(ys, _eval(ms, f1, no_ids), "NoAns")
_best_thresh(ys, preds, ms_raw, f1_raw, probs, has_ans)
return dict(ys)
OPTS = None
def _make_map(ds):
ys = {}
for e in ds:
for p in e["paragraphs"]:
for x in p["qas"]:
ys[x["id"]] = bool(x["answers"]["text"])
return ys
def _raw_scores(ds, preds):
ms = {}
f1 = {}
for e in ds:
for p in e["paragraphs"]:
for q in p["qas"]:
i = q["id"]
if i not in preds:
print(f"Missing prediction for {i}")
continue
x = preds[i]
ts = [t for t in q["answers"]["text"] if _normalize(t)]
ts = ts if ts else [""]
ms[i] = max(_match(x, t) for t in ts)
f1[i] = max(_f1(x, t) for t in ts)
return ms, f1
def _match(x, t):
return int(_normalize(x) == _normalize(t))
def _f1(x, t):
xs = _normalize(x).split() if x else []
ts = _normalize(t).split() if t else []
if len(xs) == 0 or len(ts) == 0:
return int(xs == ts)
common = Counter(ts) & Counter(xs)
s = sum(common.values())
if s == 0:
return 0
precision = 1.0 * s / len(xs)
recall = 1.0 * s / len(ts)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _apply(scores, probs, has_ans, thresh):
ys = {}
for i, s in scores.items():
if probs[i] > thresh:
ys[i] = float(not has_ans[i])
else:
ys[i] = s
return ys
def _eval(ms, f1, ids):
if not ids:
n = len(ms)
return collections.OrderedDict(
[
("exact", 100.0 * sum(ms.values()) / n),
("f1", 100.0 * sum(f1.values()) / n),
("total", n),
]
)
else:
n = len(ids)
return collections.OrderedDict(
[
("exact", 100.0 * sum(ms[i] for i in ids) / n),
("f1", 100.0 * sum(f1[i] for i in ids) / n),
("total", n),
]
)
def _merge(ys, xs, pre):
for x in xs:
ys[f"{pre}_{x}"] = xs[x]
def _best_thresh(ys, preds, ms_raw, f1_raw, probs, has_ans):
ms, m_thresh = _find_best(preds, ms_raw, probs, has_ans)
f1, f1_thresh = _find_best(preds, f1_raw, probs, has_ans)
ys["best_exact"] = ms
ys["best_exact_thresh"] = m_thresh
ys["best_f1"] = f1
ys["best_f1_thresh"] = f1_thresh
def _find_best(preds, scores, probs, has_ans):
y = x = sum(1 for k in has_ans if not has_ans[k])
t = 0.0
ids = sorted(probs, key=lambda k: probs[k])
for i in ids:
if i not in scores:
continue
if has_ans[i]:
d = scores[i]
else:
d = -1 if preds[i] else 0
x += d
if x > y:
y = x
t = probs[i]
return 100.0 * y / len(scores), t
def _normalize(t):
def no_punc(x):
exclude = set(string.punctuation)
return "".join(c for c in x if c not in exclude)
def no_articles(x):
return re.sub(re.compile(r"\b(a|an|the)\b", re.UNICODE), " ", x)
def ws_fix(x):
return " ".join(x.split())
return ws_fix(no_articles(no_punc(t.lower())))
| 29.276042 | 91 | 0.505248 |
4a1cef556be2b6517eff11630e353b2f1987cb24
| 13,954 |
py
|
Python
|
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_saml.py
|
Stienvdh/statrick
|
7b092fc42171e226718a70a285a4b323f2f395ad
|
[
"MIT"
] | null | null | null |
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_saml.py
|
Stienvdh/statrick
|
7b092fc42171e226718a70a285a4b323f2f395ad
|
[
"MIT"
] | null | null | null |
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_saml.py
|
Stienvdh/statrick
|
7b092fc42171e226718a70a285a4b323f2f395ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_system_saml
short_description: Global settings for SAML authentication.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
system_saml:
description: the top level parameters set
required: false
type: dict
suboptions:
acs-url:
type: str
description: 'SP ACS(login) URL.'
cert:
type: str
description: 'Certificate name.'
entity-id:
type: str
description: 'SP entity ID.'
idp-cert:
type: str
description: 'IDP Certificate name.'
idp-entity-id:
type: str
description: 'IDP entity ID.'
idp-single-logout-url:
type: str
description: 'IDP single logout url.'
idp-single-sign-on-url:
type: str
description: 'IDP single sign-on URL.'
login-auto-redirect:
type: str
default: 'disable'
description:
- 'Enable/Disable auto redirect to IDP login page.'
- 'disable - Disable auto redirect to IDP Login Page.'
- 'enable - Enable auto redirect to IDP Login Page.'
choices:
- 'disable'
- 'enable'
role:
type: str
default: 'SP'
description:
- 'SAML role.'
- 'IDP - IDentiy Provider.'
- 'SP - Service Provider.'
choices:
- 'IDP'
- 'SP'
server-address:
type: str
description: 'server address.'
service-providers:
description: no description
type: list
suboptions:
idp-entity-id:
type: str
description: 'IDP Entity ID.'
idp-single-logout-url:
type: str
description: 'IDP single logout url.'
idp-single-sign-on-url:
type: str
description: 'IDP single sign-on URL.'
name:
type: str
description: 'Name.'
prefix:
type: str
description: 'Prefix.'
sp-cert:
type: str
description: 'SP certificate name.'
sp-entity-id:
type: str
description: 'SP Entity ID.'
sp-single-logout-url:
type: str
description: 'SP single logout URL.'
sp-single-sign-on-url:
type: str
description: 'SP single sign-on URL.'
sls-url:
type: str
description: 'SP SLS(logout) URL.'
status:
type: str
default: 'disable'
description:
- 'Enable/disable SAML authentication (default = disable).'
- 'disable - Disable SAML authentication.'
- 'enable - Enabld SAML authentication.'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Global settings for SAML authentication.
fmgr_system_saml:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
system_saml:
acs-url: <value of string>
cert: <value of string>
entity-id: <value of string>
idp-cert: <value of string>
idp-entity-id: <value of string>
idp-single-logout-url: <value of string>
idp-single-sign-on-url: <value of string>
login-auto-redirect: <value in [disable, enable]>
role: <value in [IDP, SP]>
server-address: <value of string>
service-providers:
-
idp-entity-id: <value of string>
idp-single-logout-url: <value of string>
idp-single-sign-on-url: <value of string>
name: <value of string>
prefix: <value of string>
sp-cert: <value of string>
sp-entity-id: <value of string>
sp-single-logout-url: <value of string>
sp-single-sign-on-url: <value of string>
sls-url: <value of string>
status: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/cli/global/system/saml'
]
perobject_jrpc_urls = [
'/cli/global/system/saml/{saml}'
]
url_params = []
module_primary_key = None
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'system_saml': {
'required': False,
'type': 'dict',
'options': {
'acs-url': {
'required': False,
'type': 'str'
},
'cert': {
'required': False,
'type': 'str'
},
'entity-id': {
'required': False,
'type': 'str'
},
'idp-cert': {
'required': False,
'type': 'str'
},
'idp-entity-id': {
'required': False,
'type': 'str'
},
'idp-single-logout-url': {
'required': False,
'type': 'str'
},
'idp-single-sign-on-url': {
'required': False,
'type': 'str'
},
'login-auto-redirect': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'role': {
'required': False,
'choices': [
'IDP',
'SP'
],
'type': 'str'
},
'server-address': {
'required': False,
'type': 'str'
},
'service-providers': {
'required': False,
'type': 'list',
'options': {
'idp-entity-id': {
'required': False,
'type': 'str'
},
'idp-single-logout-url': {
'required': False,
'type': 'str'
},
'idp-single-sign-on-url': {
'required': False,
'type': 'str'
},
'name': {
'required': False,
'type': 'str'
},
'prefix': {
'required': False,
'type': 'str'
},
'sp-cert': {
'required': False,
'type': 'str'
},
'sp-entity-id': {
'required': False,
'type': 'str'
},
'sp-single-logout-url': {
'required': False,
'type': 'str'
},
'sp-single-sign-on-url': {
'required': False,
'type': 'str'
}
}
},
'sls-url': {
'required': False,
'type': 'str'
},
'status': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'system_saml'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 34.539604 | 153 | 0.472911 |
4a1cf1711bf992a92004def2360feec7c7930c41
| 160 |
py
|
Python
|
customer/admin.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
customer/admin.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
customer/admin.py
|
Amankori2307/Park-Here
|
bf30e721577ff6df3b761f8bbb1bfe085ffac14c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Customer, Vehicle
# Register your models here.
admin.site.register(Customer)
admin.site.register(Vehicle)
| 22.857143 | 37 | 0.8125 |
4a1cf1bd60e1db58a00659aa9f6f9e2211900977
| 1,154 |
py
|
Python
|
test/test_day02.py
|
0Bu/advent-of-code-2018
|
04b6088385d031a43109146be98e3e91870af44b
|
[
"MIT"
] | null | null | null |
test/test_day02.py
|
0Bu/advent-of-code-2018
|
04b6088385d031a43109146be98e3e91870af44b
|
[
"MIT"
] | null | null | null |
test/test_day02.py
|
0Bu/advent-of-code-2018
|
04b6088385d031a43109146be98e3e91870af44b
|
[
"MIT"
] | null | null | null |
import unittest
import day02
class Part1(unittest.TestCase):
ids = ["abcdef", "bababc", "abbcde", "abcccd", "aabcdd", "abcdee", "ababab"]
def test_get_variance(self):
self.assertDictEqual(day02.get_variance(self.ids[0]), dict.fromkeys(set(self.ids[0]), 1))
self.assertDictEqual(day02.get_variance(self.ids[1]), {"a": 2, "b": 3, "c": 1})
self.assertDictEqual(day02.get_variance(self.ids[2]), {"a": 1, "b": 2, "c": 1, "d": 1, "e": 1})
self.assertDictEqual(day02.get_variance(self.ids[3]), {"a": 1, "b": 1, "c": 3, "d": 1})
self.assertDictEqual(day02.get_variance(self.ids[4]), {"a": 2, "b": 1, "c": 1, "d": 2})
self.assertDictEqual(day02.get_variance(self.ids[5]), {"a": 1, "b": 1, "c": 1, "d": 1, "e": 2})
self.assertDictEqual(day02.get_variance(self.ids[6]), {"a": 3, "b": 3})
def test_get_checksum(self):
self.assertEqual(day02.get_checksum(self.ids), 12)
class Part2(unittest.TestCase):
ids = ["abcde", "fghij", "klmno", "pqrst", "fguij", "axcye", "wvxyz"]
def test_get_common_letters(self):
self.assertEqual(day02.get_common_letters(self.ids), "fgij")
| 44.384615 | 103 | 0.611785 |
4a1cf215d8b7801254c7b86a4707c95c64d0aeaa
| 9,662 |
py
|
Python
|
ptopk_patch_selection/lib/preprocess/preprocess_spec.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
ptopk_patch_selection/lib/preprocess/preprocess_spec.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
ptopk_patch_selection/lib/preprocess/preprocess_spec.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library parsing a preprocessing spec.
A preprocessing spec is a list of preprocessing ops separated by '|' that can be
applied sequentially as a preprocessing function. The preprocessing ops are
provided as input.
By convention the preprocessing function operates on dictionaries of features.
Each op can change the dictionary by modifying, adding or removing dictionary
entries. Dictionary entries should be tensors, keys are strings.
The first argument of the op must be named `features` to which the feature
dictionary will be passed. Additional positional and keyword only arguments can
be defined. The processing spec can define values that will passed to those. The
op must return the feature dictionary.
For convenience ops can also operate an tensors of the feature dictionary
directly. In this case they must accept the names of the tensors (one or
multiple of _FEATURE_TENSORS} and return values for all input tensors.
Example spec: 'fn1|fn2(3)|fn3(keyword=5)'
This will construct the following preprocessing function:
def preprocess_fn(features: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
features = fn1(features)
features = fn2(features, 3)
features = fn3(features, keyword=5)
return features
WARNING: Do not use decorators when defining ops.
"""
import ast
import inspect
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from absl import logging
import tensorflow as tf
# Any type of TF tensor.
Tensor = Union[tf.Tensor, tf.SparseTensor, tf.RaggedTensor]
# Dictionary with features.
Features = Dict[str, Tensor]
TPU_SUPPORTED_TYPES = frozenset(
[tf.float32, tf.int32, tf.complex64, tf.int64, tf.bool, tf.bfloat16])
_FEATURE_TENSORS = ("features", "image", "label", "video", "segmentation_mask",
"instance_masks", "instance_labels", "rng")
OpFn = Callable[Ellipsis, Union[Features, tf.Tensor, Sequence[tf.Tensor]]]
def remove_non_tpu_features(features):
"""Removes all features which types are not supported on TPU."""
for name in list(features):
dtype = features[name].dtype
if dtype not in TPU_SUPPORTED_TYPES:
del features[name]
msg = f"Removing {name!r} because dtype {dtype} is not supported on TPU."
logging.warning(msg)
elif isinstance(features[name], tf.SparseTensor):
del features[name]
msg = f"Removing features {name!r}. Sparse tensors not supported on TPU."
logging.warning(msg)
return features
class PreprocessOp:
"""Represents a processing operating.
A process op consists of a method arguments passed to the method. The method
can modify/add/remove features. For convenience the method can also directly
operate on the tensors directly (_FEATURE_TENSORS).
"""
def __init__(self, fn, kwargs = None):
self._fn = fn
self._kwargs = kwargs or {}
self._argspec = inspect.getfullargspec(inspect.unwrap(fn))
if not self._argspec.args or self._argspec.args[0] not in _FEATURE_TENSORS:
raise ValueError(
f"Method {fn} with argspec {self._argspec} cannot be used as "
f"preprocessing operation. First argument must be one of "
f"{_FEATURE_TENSORS} but was {self._argspec.args}.")
def __call__(self, features):
"""Applies the process op to the given features."""
try:
return self._apply(features)
except:
msg = f"Failed to apply {self!r} to features {features}."
logging.error(msg)
raise
def _apply(self, features):
"""Applies the preprocess op to given features."""
features = features.copy()
# Simple case: Function accepts a feature dictionary.
if self._argspec.args[0] == "features":
# These function should always return a feature dictionary, but PyType
# doesn't know this.
return self._fn(features, **self._kwargs) # pytype: disable=bad-return-type
# Handle special case with tensors passed directly.
tensor_names = []
for argname in self._argspec.args:
if argname not in _FEATURE_TENSORS:
break
if argname not in features:
raise ValueError(
f"Tensor {argname} requested by {self._fn} but not available "
f"features ({features}).")
tensor_names.append(argname)
if not tensor_names:
raise ValueError(
f"{self._fn} must either accept a dictionary with features as first "
f"argument called 'features' or any number of tensors (with names in "
f"{_FEATURE_TENSORS}) as positional arguments.")
returned_tensors = self._fn(**{n: features[n] for n in tensor_names},
**self._kwargs)
if len(tensor_names) == 1:
returned_tensors = [returned_tensors]
if len(returned_tensors) != len(tensor_names):
raise ValueError(
f"Number of returned tensors ({returned_tensors}) does not match "
f"number of input tensors ({tensor_names}).")
for i, name in enumerate(tensor_names):
features[name] = returned_tensors[i]
return features
def __str__(self):
"""Returns a valid preprocess spec for this operations."""
name = self._fn.__name__
args = ", ".join([f"{k}={v}" for k, v in self._kwargs.items()])
return f"{name}({args})"
def __repr__(self):
"""Returns a representation string."""
return (f"PreprocessOp(fn={self._fn}, kwargs={self._kwargs}, "
f"argspec={self._argspec})")
def __eq__(self, other):
"""Returns True if other is the same op with the same arguments."""
if not isinstance(other, PreprocessOp):
return False
# We do not check if kwargs simply match default argument values.
# pylint: disable=protected-access
return self._fn == other._fn and self._kwargs == other._kwargs
# pylint: enable=protected-access
class PreprocessFn(object):
"""Chain of preprocessing ops combined to a single preprocessing function."""
def __init__(self,
ops,
*,
only_tpu_features = True):
self._ops = ops
self._only_tpu_features = only_tpu_features
def __call__(self, features):
logging.info("Features before preprocessing: %s", features)
features = features.copy()
for op in self._ops:
features = op(features)
logging.info("Features after op %s: %s", op, features)
if self._only_tpu_features:
features = remove_non_tpu_features(features)
logging.info("Features after preprocessing and cleaning: %s", features)
return features
def __str__(self):
"""Returns a valid preprocess spec for this preprocess function."""
return "|".join([str(op) for op in self._ops])
def _get_op_fn(expr, available_ops):
"""Gets the process op fn from the given expression."""
if isinstance(expr, ast.Call):
fn_name = expr.func.id
elif isinstance(expr, ast.Name):
fn_name = expr.id
else:
raise ValueError(
f"Could not parse function name from expression: {expr!r}.")
name_to_op = {op.__name__: op for op in available_ops}
if fn_name in name_to_op:
return name_to_op[fn_name]
raise ValueError(
f"'{fn_name}' is not available (available ops: {list(name_to_op)}).")
def parse_single_preprocess_op(spec,
available_ops):
"""Parsing the spec for a single preprocess op.
The op can just be the method name or the method name followed by any
arguments (both positional and keyword) to the method.
See the test cases for some valid examples.
Args:
spec: String specifying a single processing operations.
available_ops: Available preprocessing ops.
Returns:
The ProcessOp corresponding to the spec.
"""
try:
expr = ast.parse(spec, mode="eval").body # pytype: disable=attribute-error
except SyntaxError:
raise ValueError(f"{spec!r} is not a valid preprocess op spec.")
fn = _get_op_fn(expr, available_ops)
# Simple case without arguments.
if isinstance(expr, ast.Name):
return PreprocessOp(fn)
assert isinstance(expr, ast.Call)
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kv.arg: ast.literal_eval(kv.value) for kv in expr.keywords}
if not args:
return PreprocessOp(fn, kwargs)
# Translate positional arguments into keyword arguments.
argspec = inspect.getfullargspec(inspect.unwrap(fn))
available_arg_names = [n for n in argspec.args if n not in _FEATURE_TENSORS]
for i, arg in enumerate(args):
name = available_arg_names[i]
if name in kwargs:
raise ValueError(
f"Argument {name} to op {fn} given both as positional argument "
f"(value: {arg}) and keyword argument (value: {kwargs[name]}).")
kwargs[name] = arg
return PreprocessOp(fn, kwargs)
def parse(spec,
available_ops,
*,
only_tpu_features = True):
"""Parses a preprocess spec; a '|' separated list of preprocess ops."""
if not spec.strip():
ops = []
else:
ops = [
parse_single_preprocess_op(s, available_ops) for s in spec.split("|")
]
return PreprocessFn(ops, only_tpu_features=only_tpu_features)
| 36.323308 | 82 | 0.697475 |
4a1cf25bbe0f82f86e0a0f7054f11a197eee927a
| 1,710 |
py
|
Python
|
benchmark/datasets/femnist/preprocess/get_hashes.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
benchmark/datasets/femnist/preprocess/get_hashes.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
benchmark/datasets/femnist/preprocess/get_hashes.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
# @Author : FederalLab
# @Date : 2021-09-26 00:32:12
# @Last Modified by : Chen Dengsheng
# @Last Modified time: 2021-09-26 00:32:12
# Copyright (c) FederalLab. All rights reserved.
import hashlib
import os
import sys
from benchmark.datasets.femnist.preprocess.utils import load_obj, save_obj
utils_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
utils_dir = os.path.join(utils_dir, 'utils')
sys.path.append(utils_dir)
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
cfd = os.path.join(parent_path, 'data', 'intermediate', 'class_file_dirs')
wfd = os.path.join(parent_path, 'data', 'intermediate', 'write_file_dirs')
class_file_dirs = load_obj(cfd)
write_file_dirs = load_obj(wfd)
class_file_hashes = []
write_file_hashes = []
count = 0
for tup in class_file_dirs:
if (count % 100000 == 0):
print('hashed %d class images' % count)
(cclass, cfile) = tup
file_path = os.path.join(parent_path, cfile)
chash = hashlib.md5(open(file_path, 'rb').read()).hexdigest()
class_file_hashes.append((cclass, cfile, chash))
count += 1
cfhd = os.path.join(parent_path, 'data', 'intermediate', 'class_file_hashes')
save_obj(class_file_hashes, cfhd)
count = 0
for tup in write_file_dirs:
if (count % 100000 == 0):
print('hashed %d write images' % count)
(cclass, cfile) = tup
file_path = os.path.join(parent_path, cfile)
chash = hashlib.md5(open(file_path, 'rb').read()).hexdigest()
write_file_hashes.append((cclass, cfile, chash))
count += 1
wfhd = os.path.join(parent_path, 'data', 'intermediate', 'write_file_hashes')
save_obj(write_file_hashes, wfhd)
| 28.5 | 77 | 0.697076 |
4a1cf2d239530e22293f557e116f3285210da7de
| 1,358 |
py
|
Python
|
Jernej Kos/experiments/classifiers/simple.py
|
shenkev/Exact-Inference-VAE-Robustness
|
37438df2d63b2de19b084153e6c68d528d533837
|
[
"MIT"
] | 3 |
2017-11-06T06:00:23.000Z
|
2017-12-01T20:51:07.000Z
|
Jernej Kos/experiments/classifiers/simple.py
|
shenkev/Exact-Inference-VAE-Robustness
|
37438df2d63b2de19b084153e6c68d528d533837
|
[
"MIT"
] | null | null | null |
Jernej Kos/experiments/classifiers/simple.py
|
shenkev/Exact-Inference-VAE-Robustness
|
37438df2d63b2de19b084153e6c68d528d533837
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow.contrib.slim as slim
from .. import classifier
class Classifier(classifier.ClassifierBase):
"""Simple classifier with two fully-connected hidden layers."""
name = 'simple-classifier'
def _build(self, x):
# Run the encoder of the underlying model to get the latent representation.
loops = 1 if not self.sample else 10
z_x_sampled = []
for _ in xrange(loops):
z_x_sampled.append(self.model.encode_op(x, sample=self.sample))
# Compute the mean sampled value.
z_x = tf.add_n(z_x_sampled) / len(z_x_sampled)
# Classify based on latent space.
with slim.arg_scope([slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': self._training}):
# Classify based on latent space.
fc1 = slim.fully_connected(z_x, 512, activation_fn=tf.nn.relu, scope='fc1')
fc2 = slim.fully_connected(fc1, 512, activation_fn=tf.nn.relu, scope='fc2')
# Don't use softmax on output due to our loss function.
return slim.fully_connected(fc2, 10, activation_fn=tf.identity, scope='fc_out')
def _build_loss(self, model, labels):
return tf.nn.sparse_softmax_cross_entropy_with_logits(model, labels)
| 41.151515 | 91 | 0.655376 |
4a1cf37d830de67b0ba37c1be37fd60a51ebd41a
| 2,279 |
py
|
Python
|
experiments/ashvin/icml2020/mujoco/pendulum/demo_awr2.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/mujoco/pendulum/demo_awr2.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/mujoco/pendulum/demo_awr2.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
"""
AWR + SAC from demo experiment
"""
from railrl.demos.source.mdp_path_loader import MDPPathLoader
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
layer_size=256,
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
policy_weight_decay=1e-4,
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=MDPPathLoader,
path_loader_kwargs=dict(
demo_path=["demos/icml2020/mujoco/pendulum.npy"],
# demo_off_policy_path=[
# "ashvin/icml2020/hand/door/demo-bc1/run3/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run4/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run5/video_*.p",
# ],
),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
)
search_space = {
'env': [
# 'half-cheetah',
# 'inv-double-pendulum',
'pendulum',
# 'ant',
# 'walker',
# 'hopper',
# 'humanoid',
# 'swimmer',
],
'seedid': range(3),
'trainer_kwargs.beta': [10, 100],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| 28.135802 | 70 | 0.585344 |
4a1cf39536cd2465ef5fc5b7a272c54efcdd504d
| 10,430 |
py
|
Python
|
sparkdatachallenge.py
|
tomerten/sparkdatachallenge
|
d20dbf5008a4dc5909b886486bb7f5658edd0e73
|
[
"MIT"
] | null | null | null |
sparkdatachallenge.py
|
tomerten/sparkdatachallenge
|
d20dbf5008a4dc5909b886486bb7f5658edd0e73
|
[
"MIT"
] | null | null | null |
sparkdatachallenge.py
|
tomerten/sparkdatachallenge
|
d20dbf5008a4dc5909b886486bb7f5658edd0e73
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Package sparkdatachallenge
==========================
Arrays A and B consisting of N non-negative integers are given.
Together, they represent N real numbers, denoted as C[0], ..., C[N−1].
Elements of A represent the integer parts and the corresponding elements of
B (divided by 1,000,000) represent the fractional parts of the elements of C.
A[I] and B[I] represent
C[I] = A[I] + B[I] / 1,000,000.
##
A pair of indices (P, Q) is multiplicative if 0 ≤ P < Q < N and C[P] * C[Q] ≥ C[P] + C[Q].
##
The package contains several methods to find the number of multiplicative pairs in C.
"""
__version__ = "0.1.1"
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
def check_input(inA: np.array, inB: np.array, scale: int = 1_000_000) -> bool:
"""Check input method.
Parameters
----------
inA : np.array
array containing the integer part
inB : np.array
array containing the decimal part
scale : int, optional
scale factor for the decimal parts, by default 1_000_000
Returns
-------
bool
Check if input is valid.
"""
checkA = (
(inA >= 0).all() and (inA <= 1000).all() and not np.isnan(inA).any() and not inA.size == 0
)
checkB = (
(inB >= 0).all() and (inB < scale).all() and not np.isnan(inB).any() and not inB.size == 0
)
checkC = inA.shape == inB.shape
return checkA and checkB and checkC
def generate_mul_triu(C: np.array) -> np.array:
"""Method to return an upper triangular array,
containing the element by element products of a
given input array C. The upper triangular part
comes from the fact we only want products
where col_idx > row_idx (hence k=-1) as C
is assumed to be an non-decreasing array of decimal numbers
adn where are looking for multiplicative pairs.
Parameters:
-----------
C : np.array
non-decreasing array of decimal numbers
Returns
-------
np.array
upper triangular array of element by element products
"""
out = C[:, np.newaxis] * C
return np.tril(out, k=-1).T
def generate_add_triu(C):
"""Method to return an upper triangular array,
containing the element by element sums of a
given input array C. The upper triangular part
comes from the fact we only want products
where col_idx > row_idx (hence k=-1) as C
is assumed to be an non-decreasing array of decimal numbers
adn where are looking for multiplicative pairs.
Parameters
----------
C : np.array
non-decreasing array of decimal numbers
Returns
-------
np.array
upper triangular array of element by element sums
"""
out = C[:, np.newaxis] + C
return np.tril(out, k=-1).T
def pairs(M: np.array) -> List[tuple]:
"""Method to generate the multiplicative pairs.add()
Parameters
----------
M : np.array
Array containing inequality values.
Returns
-------
List[tuple]
List of pairs as tuples.
"""
# list of indices that obey criterium
_sel_idx = np.argwhere(M >= 0)
# indices from upper triangle
triuidx = np.vstack(np.triu_indices_from(M, 1)).T
# indices that are multiplicative
_pairs = _sel_idx[(_sel_idx[:, None] == triuidx).all(-1).any(-1)]
_pairs = [tuple(t) for t in _pairs.tolist()]
return _pairs
def solution_brute1(A: np.array, B: np.array, verbose: bool = True) -> int:
"""Brute force method one - using upper triangular matrices. Expected
to fail with large arrays and it does due to memory issues !!!!
IMPORTANT:
==========
FAILS FOR LARGE ARRAYS!!!!
Parameters
----------
A : np.array
Integer part array
B : np.array
Decimal part array
verbose : bool, optional
to print out of pairs, by default True
Returns
-------
int
number of multiplicative pairs
"""
# generate the decimal numbers
C: np.array = A + B / 1_000_000
# generate the upper triangular arrays
_mul: np.array = generate_mul_triu(C)
_add: np.array = generate_add_triu(C)
_test: np.array = _mul - _add
# if verbose print mul pairs
if verbose:
print(pairs(_test))
# test where pairs are mul
n_mul_pairs: int = np.where(_test[np.triu_indices_from(_test, 1)] >= 0)[0].shape[0]
# if large number return threshold
if n_mul_pairs > 1_000_000_000:
return 1_000_000_000
return n_mul_pairs
def solution_brute2(
A: np.array,
B: np.array,
verbose: bool = True,
threshold: int = 1_000_000_000,
scale: int = 1_000_000,
) -> int:
"""Brute force method based on double for-loop.add()
Parameters
----------
A : np.array
integer part of the decimal numbers
B : np.array
decimal part of the decimal numbers
verbose : bool, optional
Print the mul pairs, by default True
threshold : int, optional
Threshold for breaking the for looop, by default 1_000_000_000
scale : int, optional
scale factor for the decimals, by default 1_000_000
Returns
-------
int
returns the number of mul pairs of lower than threshold otherwise return threshold value
"""
# generate the floats and sort
C: np.array = np.sort(A + B / scale)
# size
N = A.shape[0]
# init
tups = []
counter = 0
# double for loop - not very efficient
for P in range(N):
# use the sorting to reduce second for loop
for Q in range(P + 1, N):
if (C[P] * C[Q]) >= (C[P] + C[Q]):
counter += 1
if counter == threshold:
return threshold
if verbose:
tups.append((P, Q))
# if verbose print the mul pairs
if verbose:
print(tups)
return counter
def solution_math(
A: np.array, B: np.array, threshold: int = 1_000_000_000, scale: int = 1_000_000
) -> int:
"""Math based method. See tutorial/examples in docs for more details.add()
Parameters
----------
A : np.array
integer part of the decimal numbers
B : np.array
decimal part of the decimal numbers
threshold : int, optional
threshold value for the number of pairs, by default 1_000_000_000
scale : int, optional
scale factor for the decimals, by default 1_000_000
Returns
-------
int
returns number of mul pairs or the threshold value
"""
C: np.array = np.sort(A + B / scale)
# init count
count = 0
# x == 0 => y ==0 => count C[i] = 0.0
nzero = C[C == 0.0].shape[0]
# calculate the number of zero - zero pairs and update count
if nzero > 1:
count = nzero * (nzero - 1) / 2
if count > threshold:
return threshold
# 0 < x < 1 => no solution
# x==1 => no solution
# 1 < x < 2 => y >= x / (x-1)
# inequality is always satisfied for x, y >= 2
# for 1<x<2 we need y>=x/(x-1)
for el in C[(1 < C) & (C < 2)]:
# I ASSUME HERE THE A AND B INDICES ARE THE SAME AS IN THE ORDERED C ARRAY
f = el / (el - 1)
count += C[C >= f].shape[0]
if count > threshold:
return threshold
# case x>=2 and y>=2
k = C[C >= 2.0].shape[0]
count += k * (k - 1) / 2
return int(count)
def compare(A: np.array, B: np.array, P: int, Q: int, scale: int = 1_000_000) -> bool:
"""Comparing composed numbers using there original integer and decimal
values as integers.
Parameters
----------
A : np.array
integer parts
B : np.array
decimal parts
P : int
index
Q : int
index
scale : int, optional
scale for decimals, by default 1_000_000
Returns
-------
bool
return true if multiplicative
"""
# use associativity on integer and decimal part
prodi = A[P] * A[Q] + (A[P] * B[Q]) // scale + (A[Q] * B[P]) // scale
prodd = ((A[P] * B[Q]) % scale) * scale + ((A[Q] * B[P]) % scale) * scale + (B[P] * B[Q])
print(prodi, prodd)
sumi = A[P] + A[Q] + (B[P] + B[Q]) // scale
sumd = ((B[P] + B[Q]) % scale) * scale
print(sumi, sumd)
if prodi > sumi:
return True
elif prodi == sumi:
if prodd >= sumd:
return True
return False
def solution_math2(
A: np.array, B: np.array, threshold: int = 1_000_000_000, scale: int = 1_000_000
) -> int:
"""Math based method. See tutorial/examples in docs for more details.add()
Parameters
----------
A : np.array
integer part of the decimal numbers
B : np.array
decimal part of the decimal numbers
threshold : int, optional
threshold value for the number of pairs, by default 1_000_000_000
scale : int, optional
scale factor for the decimals, by default 1_000_000
Returns
-------
int
returns number of mul pairs or the threshold value
"""
C: np.array = np.sort(A + B / scale)
# init count
count = 0
# x == 0 => y ==0 => count C[i] = 0.0
nzero = C[C == 0.0].shape[0]
# calculate the number of zero - zero pairs and update count
if nzero > 1:
count = nzero * (nzero - 1) / 2
if count > threshold:
return threshold
# 0 < x < 1 => no solution
# x==1 => no solution
# 1 < x < 2 => y >= x / (x-1)
# inequality is always satisfied for x, y >= 2
# for 1<x<2 we need y>=x/(x-1)
# get indices of A == 1
one_idx = np.argwhere(divmod(C, 1)[0].astype(int) == 1)[:, 0]
for idx in one_idx:
# A == 1
# A.B / (A.B - 1) = 1.B / (1.B - 1) = 1.B / 0.B = 1 / 0.B + 1
f = scale / B[idx] * scale # max accuracy in B
fscaled = int(f) + scale
fscaled += 0 if np.ceil(f) else 1 # taking into account the last digit
count += C[scale * C >= fscaled].shape[0]
if count > threshold:
return threshold
# case x>=2 and y>=2
k = C[C >= 2.0].shape[0]
count += k * (k - 1) / 2
return int(count)
if __name__ == "__main__":
tup = (np.array([0, 1, 3]), np.array([0, 400_000, 500_000]), 1)
print(solution_brute1(tup[0], tup[1], verbose=True))
print(solution_brute2(tup[0], tup[1], verbose=True))
print(solution_math(tup[0], tup[1]))
print(solution_math2(tup[0], tup[1]))
# eof
| 26.075 | 98 | 0.582359 |
4a1cf3f3800cf34cd7c0b41291cd36e178aca479
| 608 |
py
|
Python
|
AppDB/appscale/datastore/scripts/data_layout.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | 1 |
2017-04-07T15:33:35.000Z
|
2017-04-07T15:33:35.000Z
|
AppDB/appscale/datastore/scripts/data_layout.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | 1 |
2019-10-15T15:57:53.000Z
|
2019-10-15T15:57:53.000Z
|
AppDB/appscale/datastore/scripts/data_layout.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | 1 |
2019-08-27T05:19:48.000Z
|
2019-08-27T05:19:48.000Z
|
import argparse
import sys
from ..appscale_datastore_batch import DatastoreFactory
# The exit code that indicates the data layout version is unexpected.
INVALID_VERSION_EXIT_CODE = 64
def main():
parser = argparse.ArgumentParser(
description='Checks if the data layout is valid')
parser.add_argument('--db-type', help='The database type')
args = parser.parse_args()
datastore_batch = DatastoreFactory.getDatastore(args.db_type)
try:
is_valid = datastore_batch.valid_data_version_sync()
finally:
datastore_batch.close()
if not is_valid:
sys.exit(INVALID_VERSION_EXIT_CODE)
| 25.333333 | 69 | 0.769737 |
4a1cf4916f780a291c0167f34b69f8197bffc279
| 3,560 |
py
|
Python
|
Cura/Cura/cura/PrinterOutput/Models/ExtruderConfigurationModel.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
Cura/Cura/cura/PrinterOutput/Models/ExtruderConfigurationModel.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
Cura/Cura/cura/PrinterOutput/Models/ExtruderConfigurationModel.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional
from PyQt5.QtCore import pyqtProperty, QObject, pyqtSignal
from .MaterialOutputModel import MaterialOutputModel
class ExtruderConfigurationModel(QObject):
extruderConfigurationChanged = pyqtSignal()
def __init__(self, position: int = -1) -> None:
super().__init__()
self._position = position # type: int
self._material = None # type: Optional[MaterialOutputModel]
self._hotend_id = None # type: Optional[str]
def setPosition(self, position: int) -> None:
self._position = position
@pyqtProperty(int, fset = setPosition, notify = extruderConfigurationChanged)
def position(self) -> int:
return self._position
def setMaterial(self, material: Optional[MaterialOutputModel]) -> None:
if material is None or self._material == material:
return
self._material = material
self.extruderConfigurationChanged.emit()
@pyqtProperty(QObject, fset = setMaterial, notify = extruderConfigurationChanged)
def activeMaterial(self) -> Optional[MaterialOutputModel]:
return self._material
@pyqtProperty(QObject, fset = setMaterial, notify = extruderConfigurationChanged)
def material(self) -> Optional[MaterialOutputModel]:
return self._material
def setHotendID(self, hotend_id: Optional[str]) -> None:
if self._hotend_id != hotend_id:
self._hotend_id = hotend_id
self.extruderConfigurationChanged.emit()
@pyqtProperty(str, fset = setHotendID, notify = extruderConfigurationChanged)
def hotendID(self) -> Optional[str]:
return self._hotend_id
## This method is intended to indicate whether the configuration is valid or not.
# The method checks if the mandatory fields are or not set
# At this moment is always valid since we allow to have empty material and variants.
def isValid(self) -> bool:
return True
def __str__(self) -> str:
message_chunks = []
message_chunks.append("Position: " + str(self._position))
message_chunks.append("-")
message_chunks.append("Material: " + self.activeMaterial.type if self.activeMaterial else "empty")
message_chunks.append("-")
message_chunks.append("HotendID: " + self.hotendID if self.hotendID else "empty")
return " ".join(message_chunks)
def __eq__(self, other) -> bool:
if not isinstance(other, ExtruderConfigurationModel):
return False
if self._position != other.position:
return False
# Empty materials should be ignored for comparison
if self.activeMaterial is not None and other.activeMaterial is not None:
if self.activeMaterial.guid != other.activeMaterial.guid:
if self.activeMaterial.guid != "" and other.activeMaterial.guid != "":
return False
else:
# At this point there is no material, so it doesn't matter what the hotend is.
return True
if self.hotendID != other.hotendID:
return False
return True
# Calculating a hash function using the position of the extruder, the material GUID and the hotend id to check if is
# unique within a set
def __hash__(self):
return hash(self._position) ^ (hash(self._material.guid) if self._material is not None else hash(0)) ^ hash(self._hotend_id)
| 40 | 132 | 0.671629 |
4a1cf66b17137fb70cd2b0746a4d5237d2066dc9
| 6,413 |
py
|
Python
|
AppServer/lib/django-1.5/django/contrib/sitemaps/tests/http.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 790 |
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.5/django/contrib/sitemaps/tests/http.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361 |
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.5/django/contrib/sitemaps/tests/http.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155 |
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
from __future__ import unicode_literals
import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils._os import upath
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS,
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
user_sitemap = GenericSitemap({'queryset': User.objects.all()})
def is_user(url):
return isinstance(url['item'], User)
item_in_url_info = all(map(is_user, user_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
| 43.331081 | 124 | 0.691408 |
4a1cf6b1a18c80ee69373ece1b2d13ffa409aa9b
| 1,169 |
py
|
Python
|
pytanga/components/Cisco/xe/ip/ip.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/Cisco/xe/ip/ip.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/Cisco/xe/ip/ip.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
from pytanga.components import AbstractComponent
class ipComponent(AbstractComponent):
def __init__(self):
self._xmlns = {}
self.attributes = self.setAttributes()
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'ip'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
self._xmlns = xmlns
def setAttributes(self):
attributes = {}
return attributes
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
self.parent_xmlns.update(child.getXMLNS())
return self.parent_xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
| 24.87234 | 61 | 0.615056 |
4a1cf7679ec6e01110933600be0e62651c5196a4
| 3,239 |
py
|
Python
|
video/cloud-client/labels/labels.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 3 |
2021-01-24T23:42:57.000Z
|
2021-02-17T12:02:12.000Z
|
video/cloud-client/labels/labels.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 320 |
2020-11-08T21:02:43.000Z
|
2022-02-10T10:43:29.000Z
|
video/cloud-client/labels/labels.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 3 |
2019-02-11T16:16:11.000Z
|
2019-04-19T21:34:37.000Z
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to detect labels from a video
based on the image content with the Google Cloud Video Intelligence
API.
For more information, check out the documentation at
https://cloud.google.com/videointelligence/docs.
Usage Example:
python labels.py gs://cloud-ml-sandbox/video/chicago.mp4
"""
# [START video_label_tutorial]
# [START video_label_tutorial_imports]
import argparse
from google.cloud import videointelligence
# [END video_label_tutorial_imports]
def analyze_labels(path):
""" Detects labels given a GCS path. """
# [START video_label_tutorial_construct_request]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(path, features=features)
# [END video_label_tutorial_construct_request]
print('\nProcessing video for label annotations:')
# [START video_label_tutorial_check_operation]
result = operation.result(timeout=90)
print('\nFinished processing.')
# [END video_label_tutorial_check_operation]
# [START video_label_tutorial_parse_response]
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# [END video_label_tutorial_parse_response]
if __name__ == '__main__':
# [START video_label_tutorial_run_application]
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('path', help='GCS file path for label detection.')
args = parser.parse_args()
analyze_labels(args.path)
# [END video_label_tutorial_run_application]
# [END video_label_tutorial]
| 38.105882 | 75 | 0.725533 |
4a1cf791ef3c7874963fc04d2c955e964b1209f2
| 77 |
py
|
Python
|
src/bytes_encryptor/__init__.py
|
huykingsofm/bytes_encryptor
|
c23dd196b95faea26cb61d495d2c562b11743228
|
[
"MIT"
] | null | null | null |
src/bytes_encryptor/__init__.py
|
huykingsofm/bytes_encryptor
|
c23dd196b95faea26cb61d495d2c562b11743228
|
[
"MIT"
] | null | null | null |
src/bytes_encryptor/__init__.py
|
huykingsofm/bytes_encryptor
|
c23dd196b95faea26cb61d495d2c562b11743228
|
[
"MIT"
] | null | null | null |
from bytes_encryptor.bytes_encryptor import BytesEncryptor, BMPImageEncryptor
| 77 | 77 | 0.922078 |
4a1cf821a8624c53d0f1ac1aca3b407768ec2dbe
| 1,086 |
py
|
Python
|
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/custom_params.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 25 |
2019-12-04T03:09:55.000Z
|
2022-03-08T10:52:06.000Z
|
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/custom_params.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 29 |
2019-12-04T03:00:39.000Z
|
2022-03-02T06:25:44.000Z
|
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/custom_params.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 33 |
2019-12-04T02:51:30.000Z
|
2022-03-24T02:47:38.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# dfs.allow.truncate in hdfs-site.xml being true is important for hawq's performance but still operational with a degraded performance.
# This flag is to decide whether starting hawq completely fails or still starts with the performance limitation when the truncate property is set to false.
enforce_hdfs_truncate = True
| 49.363636 | 155 | 0.802026 |
4a1cf9815e0dd3a9a90cbba87eae59dd758c4d5d
| 79,781 |
py
|
Python
|
transformers_modified/modeling_xlnet.py
|
mattf1n/lm-intervention
|
a35af3d8976931d588a08f5ae6a852587ae0b66f
|
[
"MIT"
] | 2 |
2021-06-17T19:11:04.000Z
|
2021-06-19T17:31:15.000Z
|
transformers_modified/modeling_xlnet.py
|
mattf1n/lm-intervention
|
a35af3d8976931d588a08f5ae6a852587ae0b66f
|
[
"MIT"
] | null | null | null |
transformers_modified/modeling_xlnet.py
|
mattf1n/lm-intervention
|
a35af3d8976931d588a08f5ae6a852587ae0b66f
|
[
"MIT"
] | 1 |
2021-11-18T16:49:04.000Z
|
2021-11-18T16:49:04.000Z
|
""" A copy of transformers/modeling_xlnet.py from the Huggingface
transformers library modified so that the attention module is called with
non-keyword arguments (to make those arguments accessible to the hook).
"""
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None
):
### MODIFIED ###
# outputs = self.rel_attn(
# output_h,
# output_g,
# attn_mask_h,
# attn_mask_g,
# r,
# seg_mat,
# mems=mems,
# target_mapping=target_mapping,
# head_mask=head_mask,
# )
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems,
target_mapping,
head_mask,
)
### MODIFIED ###
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetModel.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=False)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {"input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForSequenceClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForTokenClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForTokenClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
scores = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
labels=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForMultipleChoice
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnsweringSimple
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnswering
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
| 48.945399 | 304 | 0.64098 |
4a1cf9b17bb22437bf2f596bb015536ac2e483b7
| 7,489 |
py
|
Python
|
juriscraper/pacer/utils.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/pacer/utils.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/pacer/utils.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | null | null | null |
import re
import requests
import tldextract
from lxml import html
from ..lib.exceptions import ParsingException
def get_pacer_court_info():
r = requests.get("https://court-version-scraper.herokuapp.com/courts.json")
return r.json()
def get_courts_from_json(j):
courts = []
for k, v in j.items():
for court in v["courts"]:
court["type"] = k
courts.append(court)
return courts
def get_court_id_from_url(url):
"""Extract the court ID from the URL."""
parts = tldextract.extract(url)
return parts.subdomain.split(".")[1]
def get_pacer_case_id_from_nonce_url(url):
"""Extract the pacer case ID from the URL.
In: https://ecf.almd.uscourts.gov/cgi-bin/DktRpt.pl?56120
Out: 56120
In: https://ecf.azb.uscourts.gov/cgi-bin/iquery.pl?625371913403797-L_9999_1-0-663150
Out: 663150
"""
param = url.split("?")[1]
if "L" in param:
return param.rsplit("-", 1)[1]
return param
def get_pacer_seq_no_from_doc1_url(url):
"""Extract the seq_no from the doc1 URL."""
match = re.search("de_seq_num=(\d+)", url)
if match:
return match.group(1)
else:
return None
def get_pacer_doc_id_from_doc1_url(url):
"""Extract the pacer document ID from the doc1 URL. Coerce the fourth digit
to zero.
In: https://ecf.almd.uscourts.gov/doc1/01712427473
Out: 01702427473
In: /doc1/01712427473
Out: 01702427473
Note that results are strings, not ints, because many of the strings start
with zero.
See tests for more examples.
"""
assert (
"show_case_doc" not in url
), "Cannot get doc1 ID from show_case_doc URL"
url = url.rsplit("/", 1)[1].split("?")[0]
url = url[:3] + "0" + url[4:]
return url
def get_pacer_seq_no_from_doc1_anchor(anchor):
"""Extract the PACER sequence number from an HTML anchor node.
:param anchor: An LXML Element.
:return: None if no sequence number can be found. Otherwise returns the
sequence number.
"""
try:
onclick = anchor.xpath("./@onclick")[0]
except IndexError:
return None
else:
if "goDLS" in onclick:
go_dls_parts = reverse_goDLS_function(onclick)
return go_dls_parts["de_seq_num"]
def reverse_goDLS_function(s):
"""Extract the arguments from the goDLS JavaScript function.
In: goDLS('/doc1/01712427473','56121','69','','','1','','');return(false);
Out: {
'form_post_url': '/doc1/01712427473',
'caseid': '56121',
'de_seq_num': '69',
'got_receipt': '',
'pdf_header': '',
'pdf_toggle_possible': '1',
'magic_num': '',
'hdr': '',
}
The key names correspond to the form field names in the JavaScript on PACER,
but we don't actually know what each of these values does. Our best
speculation is:
- form_post_url: Where the form is posted to. The HTML 'action' attribute.
- caseid: The internal PACER ID for the case.
- de_seq_num: Unclear. This seems to be the internal ID for the document,
but this field can be omitted without any known issues.
- got_receipt: If set to '1', this will bypass the receipt page and
immediately direct you to the page where the PDF is embedded in an
iframe.
- pdf_header: Can be either 1 or 2. 1: Show the header. 2: No header.
- pdf_toggle_possible: This seems to always be 1. Could be that some courts
do not allow the header to be turned off, but we haven't discoered that
yet.
- magic_num: This is used for the "One free look" downloads.
- hdr: Unclear what HDR stands for but on items that have attachments,
passing this parameter bypasses the download attachment screen and takes
you directly to the PDF that you're trying to download. For an example,
see document 108 from 1:12-cv-00102 in tnmd, which is a free opinion that
has an attachment. Note that the eighth parameter was added some time
after 2010. Dockets older than that date only have seven responses.
"""
args = re.findall("'(.*?)'", s)
parts = {
"form_post_url": args[0],
"caseid": args[1],
"de_seq_num": args[2],
"got_receipt": args[3],
"pdf_header": args[4],
"pdf_toggle_possible": args[5],
"magic_num": args[6],
}
try:
parts["hdr"] = args[7]
except IndexError:
# At some point dockets added this eighth parameter. Older ones lack it
parts["hdr"] = None
return parts
def make_doc1_url(court_id, pacer_doc_id, skip_attachment_page):
"""Make a doc1 URL.
If skip_attachment_page is True, we replace the fourth digit with a 1
instead of a zero, which bypasses the attachment page.
"""
if skip_attachment_page and pacer_doc_id[3] == "0":
# If the fourth digit is a 0, replace it with a 1
pacer_doc_id = pacer_doc_id[:3] + "1" + pacer_doc_id[4:]
return "https://ecf.%s.uscourts.gov/doc1/%s" % (court_id, pacer_doc_id)
def is_pdf(response):
"""Determines whether the item downloaded is a PDF or something else."""
if response.headers.get("content-type") == "application/pdf":
return True
return False
def is_text(response):
"""Determines whether the item downloaded is a text file or something else."""
if ".txt" in response.headers.get("content-type", ""):
return True
return False
def get_nonce_from_form(r):
"""Get a nonce value from a HTML response. Returns the first nonce that is
found.
:param r: The response object you wish to parse.
:returns A nonce object that can be used to query PACER or None, if no nonce
can be found.
"""
tree = html.fromstring(r.text)
form_attrs = tree.xpath("//form//@action")
for attr in form_attrs:
# The action attr will be a value like:
# ../cgi-bin/HistDocQry.pl?112801540788508-L_1_0-1
# Split on the '?', and return the nonce.
try:
path, nonce = attr.split("?")
except ValueError:
raise ParsingException("Didn't get nonce from PACER form.")
else:
if "-L_" in nonce:
return nonce
return None
BASE_IA_URL = "https://www.archive.org/download"
def get_bucket_name(court, pacer_case_id):
bucketlist = ["gov", "uscourts", court, str(pacer_case_id)]
return ".".join(bucketlist)
def get_docket_filename(court, pacer_case_id):
return ".".join(
[
"gov",
"uscourts",
str(court),
str(pacer_case_id),
"docket.xml",
]
)
def get_document_filename(
court, pacer_case_id, document_number, attachment_number
):
return ".".join(
[
"gov",
"uscourts",
str(court),
str(pacer_case_id),
str(document_number),
str(attachment_number or 0),
"pdf",
]
)
def get_docketxml_url(court, pacer_case_id):
return "%s/%s/%s" % (
BASE_IA_URL,
get_bucket_name(court, pacer_case_id),
get_docket_filename(court, pacer_case_id),
)
def get_pdf_url(court, pacer_case_id, document_number, attachment_number):
return "%s/%s/%s" % (
BASE_IA_URL,
get_bucket_name(court, pacer_case_id),
get_document_filename(
court, pacer_case_id, document_number, attachment_number
),
)
| 29.956 | 88 | 0.631326 |
4a1cfa1b4106a0a3b86b95637ab1cd1212bbe63c
| 6,739 |
py
|
Python
|
vpython/with_notebook.py
|
kdunn926/vpython-jupyter
|
b8e816b5ff9eac1d33ce2cfe1e15cfab4900b343
|
[
"MIT"
] | 1 |
2019-07-13T08:21:28.000Z
|
2019-07-13T08:21:28.000Z
|
vpython/with_notebook.py
|
kdunn926/vpython-jupyter
|
b8e816b5ff9eac1d33ce2cfe1e15cfab4900b343
|
[
"MIT"
] | null | null | null |
vpython/with_notebook.py
|
kdunn926/vpython-jupyter
|
b8e816b5ff9eac1d33ce2cfe1e15cfab4900b343
|
[
"MIT"
] | null | null | null |
import os
import time
from threading import Thread
from jupyter_core.paths import jupyter_data_dir
import notebook
import IPython
from IPython.display import display, Javascript
from .vpython import GlowWidget, baseObj, canvas
from .rate_control import ws_queue
from . import __version__
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import json
import asyncio
import logging
def find_free_port():
s = socket.socket()
s.bind(('',0)) # find an available port
return s.getsockname()[1]
__SOCKET_PORT = find_free_port()
try:
if platform.python_implementation() == 'PyPy':
__SOCKET_PORT = 9000 + __SOCKET_PORT % 1000 # use port number between 9000 and 9999 for PyPy
except:
pass
#### Setup for Jupyter VPython
# The following file operations check whether nbextensions already has the correct files.
package_dir = os.path.dirname(__file__) # The location in site-packages of the vpython module
datacnt = len(os.listdir(package_dir+"/vpython_data")) # the number of files in the site-packages vpython data folder
libcnt = len(os.listdir(package_dir+"/vpython_libraries")) # the number of files in the site-packages vpython libraries folder
jd = jupyter_data_dir()
nbdir = jd+'/nbextensions/'
nbdata = nbdir+'vpython_data'
nblib = nbdir+'vpython_libraries'
transfer = True # need to transfer files from site-packages to nbextensions
### If JupyterLab is installed then copy vpython_data directory to static dir in Jupytarlab Application Directory
try:
import jupyterlab
import jupyterlab.commands
except ImportError:
#logging.info("Unable to import jupyterlab")
pass
else:
# We have jupyterlab, is it the right version?
if jupyterlab.__version__ >= '0.35.0':
from os.path import join
labextensions_dir = join(jupyterlab.commands.get_app_dir(), u'static')
notebook.nbextensions.install_nbextension(path=package_dir + "/vpython_data",
nbextensions_dir=labextensions_dir,
overwrite=False,
verbose=0)
if 'nbextensions' in os.listdir(jd):
ldir = os.listdir(nbdir)
if ('vpython_data' in ldir and len(os.listdir(nbdata)) == datacnt and
'vpython_libraries' in ldir and len(os.listdir(nblib)) == libcnt and
'vpython_version.txt' in ldir):
v = open(nbdir+'/vpython_version.txt').read()
transfer = (v != __version__) # need not transfer files to nbextensions if correct version's files already there
if transfer:
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_data",overwrite = True,user = True,verbose = 0)
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_libraries",overwrite = True,user = True,verbose = 0)
# Wait for files to be transferred to nbextensions:
libready = False
dataready = False
while True:
nb = os.listdir(nbdir)
for f in nb:
if f == 'vpython_data':
if len(os.listdir(nbdata)) == datacnt:
dataready = True
if f == 'vpython_libraries':
if len(os.listdir(nblib)) == libcnt:
libready = True
if libready and dataready: break
# Mark with the version number that the files have been transferred successfully:
fd = open(nbdir+'/vpython_version.txt', 'w')
fd.write(__version__)
fd.close()
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glow.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glowcomm");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/jquery-ui.custom.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glow.min"], function(){console.log("GLOW LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glowcomm"], function(){console.log("GLOWCOMM LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/jquery-ui.custom.min"], function(){console.log("JQUERY LOADED");});}else{element.textContent = ' ';}"""))
time.sleep(1) # allow some time for javascript code above to run before attempting to setup Comm Channel
wsConnected = False
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global wsConnected
wsConnected = True
def on_message(self, message):
ws_queue.put(message)
def on_close(self):
self.stop_tornado()
def stop_tornado(self):
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
def check_origin(self, origin):
return True
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
application = tornado.web.Application([(r'/ws', WSHandler),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(__SOCKET_PORT)
Log = logging.getLogger('tornado.access')
level = logging.getLevelName('WARN')
Log.setLevel(level)
tornado.ioloop.IOLoop.instance().start()
# Removed check for ipykernel version because the old check
# was for 5.0.0 but this works with 4.x too...and 4.x is the first
# version of ipykernel
t = Thread(target=start_server, args=())
t.start()
# Setup Comm Channel and websocket
baseObj.glow = GlowWidget(wsport=__SOCKET_PORT, wsuri='/ws')
while (not wsConnected):
time.sleep(0.1) # wait for websocket to connect
baseObj.trigger() # start the trigger ping-pong process
# Same justification as above for removing the ipykernel check.
# There was also an IPython version check for >=4, which was
# released in Nov 2015. Ok to stop supporting in 2.019 I think.
async def wsperiodic():
while True:
if ws_queue.qsize() > 0:
data = ws_queue.get()
d = json.loads(data)
# Must send events one at a time to GW.handle_msg because
# bound events need the loop code
for m in d:
# message format used by notebook
msg = {'content': {'data': [m]}}
baseObj.glow.handle_msg(msg)
await asyncio.sleep(0.1)
loop = asyncio.get_event_loop()
loop.create_task(wsperiodic())
# Dummy name to import...
_ = None
| 39.180233 | 205 | 0.686155 |
4a1cfad66263be249b24713177c993933b1286a6
| 11,075 |
py
|
Python
|
model/onmt/Loss.py
|
wanghm92/rotowire_fg
|
67d7534f78368da8cb74bd222f311fc1a8906ba9
|
[
"MIT"
] | 13 |
2019-11-11T12:03:15.000Z
|
2022-03-31T20:02:41.000Z
|
model/onmt/Loss.py
|
wanghm92/rotowire_fg
|
67d7534f78368da8cb74bd222f311fc1a8906ba9
|
[
"MIT"
] | 5 |
2020-07-21T03:15:16.000Z
|
2021-02-08T02:27:04.000Z
|
model/onmt/Loss.py
|
wanghm92/rotowire_fg
|
67d7534f78368da8cb74bd222f311fc1a8906ba9
|
[
"MIT"
] | 8 |
2019-11-12T10:38:20.000Z
|
2020-11-16T02:28:47.000Z
|
"""
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt
import onmt.io
# TGT_VOCAB_SIZE = 606
# TGT_VOCAB_SIZE = 580
TGT_VOCAB_SIZE = 630
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating mutiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[onmt.io.PAD_WORD]
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns, stage1=True):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
stage1: is it stage1
Returns:
:obj:`onmt.Statistics`: loss statistics
"""
if stage1:
range_ = (0, batch.tgt1.size(0))
else:
range_ = (0, batch.tgt2.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization, retain_graph=False):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
batch_stats = onmt.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size, retain_graph=retain_graph):
loss, stats = self._compute_loss(batch, **shard)
loss.div(normalization).backward()
batch_stats.update(stats)
return batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum()
return onmt.Statistics(loss[0], non_padding.sum(), num_correct)
def _bottle(self, v):
return v.view(-1, v.size(2))
def _unbottle(self, v, batch_size):
return v.view(-1, batch_size, v.size(1))
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, generator, tgt_vocab, normalization="sents",
label_smoothing=0.0, decoder_type='rnn'):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)
self.decoder_type = decoder_type
if label_smoothing > 0:
# When label smoothing is turned on,
# KL-divergence between q_{smoothed ground truth prob.}(w)
# and p_{prob. computed by model}(w) is minimized.
# If label smoothing value is set to zero, the loss
# is equivalent to NLLLoss or CrossEntropyLoss.
# All non-true labels are uniformly set to low-confidence.
self.criterion = nn.KLDivLoss(size_average=False)
one_hot = torch.randn(1, len(tgt_vocab))
one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
one_hot[0][self.padding_idx] = 0
self.register_buffer('one_hot', one_hot)
else:
if self.decoder_type == 'pointer':
weight = torch.ones(TGT_VOCAB_SIZE)
else:
weight = torch.ones(len(tgt_vocab))
weight[self.padding_idx] = 0
self.criterion = nn.NLLLoss(weight, size_average=False)
self.confidence = 1.0 - label_smoothing
def _make_shard_state(self, batch, output, range_, attns=None):
if self.decoder_type == 'pointer':
return {
"output": attns['std'],
"target": batch.tgt1_planning[range_[0] + 1: range_[1]]
}
else:
assert False
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def _compute_loss(self, batch, output, target):
if self.decoder_type == 'pointer':
scores = self._bottle(output)
else:
scores = self.generator(self._bottle(output))
gtruth = target.view(-1)
if self.confidence < 1:
tdata = gtruth.data
mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
log_likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))
tmp_ = self.one_hot.repeat(gtruth.size(0), 1)
tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
if mask.dim() > 0:
log_likelihood.index_fill_(0, mask, 0)
tmp_.index_fill_(0, mask, 0)
gtruth = Variable(tmp_, requires_grad=False)
loss = self.criterion(scores, gtruth)
if self.confidence < 1:
# Default: report smoothed ppl.
# loss_data = -log_likelihood.sum(0)
loss_data = loss.data.clone()
else:
loss_data = loss.data.clone()
stats = self._stats(loss_data, scores.data, target.view(-1).data)
return loss, stats
def filter_shard_state(state, requires_grad=True, volatile=False):
for k, v in state.items():
if v is not None:
if isinstance(v, Variable) and v.requires_grad:
v = Variable(v.data, requires_grad=requires_grad,
volatile=volatile)
yield k, v
def shards(state, shard_size, eval=False, retain_graph=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval:
yield filter_shard_state(state, False, True)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, torch.split(v, shard_size))
for k, v in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = ((state[k], v.grad.data) for k, v in non_none.items()
if isinstance(v, Variable) and v.grad is not None)
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads, retain_graph=retain_graph)
| 38.454861 | 80 | 0.610835 |
4a1cfc11a07055a725ff0a2720e75a79eb3968e9
| 8,064 |
py
|
Python
|
supports/integration-test/util.py
|
plusplusjiajia/SSM
|
1a5b3c476250c1db2d636a15d00be208634fac9a
|
[
"Apache-2.0"
] | null | null | null |
supports/integration-test/util.py
|
plusplusjiajia/SSM
|
1a5b3c476250c1db2d636a15d00be208634fac9a
|
[
"Apache-2.0"
] | null | null | null |
supports/integration-test/util.py
|
plusplusjiajia/SSM
|
1a5b3c476250c1db2d636a15d00be208634fac9a
|
[
"Apache-2.0"
] | null | null | null |
import requests
import random
import time
import uuid
# Server info
BASE_URL = "http://localhost:7045"
# Restapi root
REST_ROOT = BASE_URL + "/smart/api/v1"
RULE_ROOT = REST_ROOT + "/rules"
CMDLET_ROOT = REST_ROOT + "/cmdlets"
ACTION_ROOT = REST_ROOT + "/actions"
CLUSTER_ROOT = REST_ROOT + "/cluster"
SYSTEM_ROOT = REST_ROOT + "/system"
CONF_ROOT = REST_ROOT + "/conf"
PRIMARY_ROOT = REST_ROOT + "/primary"
MOVE_TYPE = ["onessd",
"allssd",
"archive"]
TEST_DIR = "/test/"
def random_file_path():
return TEST_DIR + random_string()
def random_string():
return str(uuid.uuid4())
def check_post_resp(resp):
if resp.status_code != 201:
raise IOError("Post fails")
def check_get_resp(resp):
if resp.status_code != 200:
raise IOError("Get fails")
def all_success(cmds):
for cmd in cmds:
try:
if cmd is None or cmd['state'] == "FAILED":
return False
except Exception:
return False
return True
def move_cmdlet(mover_type, file_path):
return submit_cmdlet(mover_type + " -file " + file_path)
def submit_cmdlet(cmdlet_str):
"""
submit cmdlet then return cid
"""
resp = requests.post(CMDLET_ROOT + "/submit", data=cmdlet_str)
return resp.json()["body"]
def get_cmdlet(cid):
"""
get cmdlet json with cid
"""
resp = requests.get(CMDLET_ROOT + "/" + str(cid) + "/info")
return resp.json()["body"]
def wait_for_cmdlet(cid, period=300):
"""
wait at most 300 seconds for cmdlet to be done
"""
timeout = time.time() + period
while True:
cmdlet = get_cmdlet(cid)
if cmdlet['state'] == "PENDING" or cmdlet['state'] == "EXECUTING":
time.sleep(1)
elif cmdlet['state'] == "DONE" or cmdlet['state'] == "FAILED":
return cmdlet
if time.time() >= timeout:
return None
def wait_for_cmdlets(cids, period=300):
failed_cids = []
while len(cids) != 0:
cmd = wait_for_cmdlet(cids[0])
if cmd is None or cmd['state'] == 'FAILED':
failed_cids.append(cids[0])
cids.pop(0)
return failed_cids
def get_rule(rid):
resp = requests.get(RULE_ROOT + "/" + str(rid) + "/info",
data=str(rid))
return resp.json()["body"]
def list_rule():
resp = requests.get(RULE_ROOT + "/list")
return resp.json()["body"]
def submit_rule(rule_str):
resp = requests.post(RULE_ROOT + "/add", data={'ruleText': rule_str})
return resp.json()["body"]
def delete_rule(rid):
requests.post(RULE_ROOT + "/" + str(rid) + "/delete")
def start_rule(rid):
requests.post(RULE_ROOT + "/" + str(rid) + "/start")
def stop_rule(rid):
requests.post(RULE_ROOT + "/" + str(rid) + "/stop")
def get_action(aid):
resp = requests.get(ACTION_ROOT + "/" + str(aid) + "/info")
return resp.json()["body"]
def list_action():
resp = requests.get(ACTION_ROOT + "/list")
return resp.json()["body"]
def read_file(file_path):
cmdlet_str = "read -file " + file_path
return submit_cmdlet(cmdlet_str)
def create_file(file_path, length=1024):
cmdlet_str = "write -file " + file_path + " -length " + str(length)
return submit_cmdlet(cmdlet_str)
def create_random_file(length=1024):
"""
create a random file in /test/
"""
file_path = TEST_DIR + random_string()
cmdlet_str = "write -file " + \
file_path + " -length " + str(length)
wait_for_cmdlet(submit_cmdlet(cmdlet_str))
return file_path
def create_random_file_parallel(length=1024):
"""
create a random file in /test/
"""
return create_random_file_parallel(TEST_DIR, length)
def create_random_file_parallel(dest_path, length=1024):
"""
create a random file in /dest_path/
"""
file_path = dest_path + random_string()
cmdlet_str = "write -file " + \
file_path + " -length " + str(length)
return file_path, submit_cmdlet(cmdlet_str)
def create_random_file_parallel_return_file_name(dest_path, length=1024):
"""
create a random file in /dest_path/
"""
file_name = random_string()
file_path = dest_path + file_name
cmdlet_str = "write -file " + \
file_path + " -length " + str(length)
submit_cmdlet(cmdlet_str)
return file_name
def copy_file_to_S3(file_path, dest_path):
"""
move file to S3
"""
cmdlet_str = "copy2s3 -file " + \
file_path + " -dest " + dest_path
return submit_cmdlet(cmdlet_str)
def delete_file(file_path, recursivly=True):
cmdlet_str = "delete -file " + file_path
return submit_cmdlet(cmdlet_str)
def append_file(file_path, length=1024):
"""
append random content to file_path
"""
cmdlet_str = "append -file " + file_path + " -length " + str(length)
return submit_cmdlet(cmdlet_str)
def random_move_test_file(file_path):
index = random.randrange(len(MOVE_TYPE))
resp = requests.post(CMDLET_ROOT + "/submit",
data=MOVE_TYPE[index] + " -file " + file_path)
return resp.json()["body"]
def check_storage(file_path):
resp = requests.post(CMDLET_ROOT + "/submit",
data="checkstorage -file " + file_path)
cid = resp.json()["body"]
cmdlet = wait_for_cmdlet(cid)
aid = cmdlet['aids']
return get_action(aid[0])
def move_random_file(mover_type, length):
file_path = TEST_DIR + random_string()
cmd_create = wait_for_cmdlet(create_file(file_path, length))
cmd_move = wait_for_cmdlet(move_cmdlet(mover_type, file_path))
return cmd_create, cmd_move
def move_random_file_twice(mover_type_1, mover_type_2, length):
file_path = TEST_DIR + random_string()
cmd_create = wait_for_cmdlet(create_file(file_path, length))
cmd_move_1 = wait_for_cmdlet(move_cmdlet(mover_type_1, file_path))
cmd_move_2 = wait_for_cmdlet(move_cmdlet(mover_type_2, file_path))
return cmd_create, cmd_move_1, cmd_move_2
def move_randomly(file_path):
"""
Randomly move blocks of a given file
"""
index = random.randrange(len(MOVE_TYPE))
return submit_cmdlet(MOVE_TYPE[index] + " -file " + file_path)
def continualy_move(moves, file_path):
cmds = []
for move in moves:
cmds.append(wait_for_cmdlet(move_cmdlet(move, file_path)))
return cmds
def random_move_list(length=10):
"""
Generate a rabdin move list with given length.
Note that neighbor moves must be different.
"""
moves = []
last_move = -1
while length > 0:
random_index = random.randrange(len(MOVE_TYPE))
if random_index != last_move:
last_move = random_index
moves.append(MOVE_TYPE[random_index])
length -= 1
return moves
def random_move_list_totally(length=10):
"""
Generate a rabdin move list with given length.
"""
moves = []
while length > 0:
random_index = random.randrange(len(MOVE_TYPE))
moves.append(MOVE_TYPE[random_index])
length -= 1
return moves
def move_random_task_list(file_size):
"""
Generate a random file with given size, and
generate rand a move list (nearbor move is different).
Then, move this file continualy.
"""
file_path = random_file_path()
wait_for_cmdlet(create_file(file_path, file_size))
# check_storage(file_path)
# use a list to save the result
# record the last task
moves = random_move_list(random.randrange(10, 21))
return continualy_move(moves, file_path)
def move_random_task_list_totally(file_size):
"""
Generate a random file with given size, and
generate rand a move list.
Then, move this file continualy.
"""
file_path = random_file_path()
wait_for_cmdlet(create_file(file_path, file_size))
# check_storage(file_path)
# use a list to save the result
# record the last task
moves = random_move_list_totally(random.randrange(10, 21))
return continualy_move(moves, file_path)
| 25.846154 | 74 | 0.646205 |
4a1cfc1dcc70242836a5aba8afbdc9cf2afc462f
| 163 |
py
|
Python
|
code_all/day09/exercise03.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day09/exercise03.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day09/exercise03.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
"""
"""
data01 = 10
def func01(p):
global data01
data01 += 1
p += 1
data02 = 10
func01(data02)
func01(data02)
print(data01) # 12
print(data02) # 10
| 10.1875 | 18 | 0.595092 |
4a1cfc6f67de59a9ed86806a8128bd9ab49b1bc7
| 16,162 |
py
|
Python
|
resnet/resnet_train_imagenet.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | 3 |
2021-07-30T23:58:35.000Z
|
2021-11-11T02:05:58.000Z
|
resnet/resnet_train_imagenet.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | null | null | null |
resnet/resnet_train_imagenet.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | 1 |
2021-09-13T10:12:16.000Z
|
2021-09-13T10:12:16.000Z
|
import argparse
import os
import random
import shutil
import time
import warnings
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import resnet.resnet_v2 as resnet
# model_names = sorted(name for name in models.__dict__
# if name.islower() and not name.startswith("__")
# and callable(models.__dict__[name]))
model_names = sorted(name for name in resnet.__dict__
if name.startswith("resnet")
and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet152',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet152)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
# if args.pretrained:
# print("=> using pre-trained model '{}'".format(args.arch))
# model = models.__dict__[args.arch](pretrained=True)
# else:
# print("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = resnet.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = resnet.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint_imagenet.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best_imagenet.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 38.850962 | 91 | 0.602524 |
4a1cfd367375352004d36fd7a31af2463a4c7fba
| 5,421 |
py
|
Python
|
cfm/cli/options.py
|
bullocke/NRT-CCDC
|
49785a28960fd3c6e473c88ae36d062b4d498e23
|
[
"MIT"
] | 5 |
2020-04-08T22:30:23.000Z
|
2021-12-15T12:49:12.000Z
|
cfm/cli/options.py
|
bullocke/NRT-CCDC
|
49785a28960fd3c6e473c88ae36d062b4d498e23
|
[
"MIT"
] | null | null | null |
cfm/cli/options.py
|
bullocke/NRT-CCDC
|
49785a28960fd3c6e473c88ae36d062b4d498e23
|
[
"MIT"
] | 6 |
2019-07-25T02:27:28.000Z
|
2022-03-02T00:28:07.000Z
|
""" YATSM command line interface """
from datetime import datetime as dt
import os
import click
# CLI VALIDATORS
def valid_band(ctx, param, value):
""" Check image band validity (band >= 1)"""
try:
band = int(value)
assert band >= 1
except:
raise click.BadParameter('Band must be integer above 1')
return band
# CLI ARGUMENTS
arg_config_file = click.argument(
'config',
nargs=1,
type=click.Path(exists=True, readable=True,
dir_okay=False, resolve_path=True),
metavar='<config>')
arg_output = click.argument(
'output',
metavar='<output>',
type=click.Path(writable=True, dir_okay=False,
resolve_path=True))
arg_mon_csv = click.argument(
'mon_csv',
metavar='<mon_csv>',
type=click.Path(writable=True, dir_okay=False,
resolve_path=True))
arg_total_jobs = click.argument(
'total_jobs',
nargs=1,
type=click.INT,
metavar='<total_jobs>')
def arg_date(var='date', metavar='<date>', date_frmt_key='date_frmt'):
def _arg_date(f):
def callback(ctx, param, value):
try:
value = dt.strptime(value, ctx.params[date_frmt_key])
except KeyError:
raise click.ClickException(
'Need to use `date_format_opt` when using `date_arg`')
except ValueError:
raise click.BadParameter(
'Cannot parse {v} to date with format {f}'.format(
v=value, f=ctx.params['date_frmt']))
else:
return value
return click.argument(var, metavar=metavar, callback=callback)(f)
return _arg_date
def arg_job_number(f):
def callback(ctx, param, value):
try:
value = int(value)
except:
raise click.BadParameter('Must specify an integer >= 0')
if value < 0:
raise click.BadParameter('Must specify an integer >= 0')
elif value == 0:
return value
else:
return value - 1
return click.argument('job_number', nargs=1, callback=callback,
metavar='<job_number>')(f)
# CLI OPTIONS
opt_date_format = click.option(
'--date', 'date_frmt',
default='%Y-%m-%d',
metavar='<format>',
show_default=True,
is_eager=True,
help='Date format')
opt_format = click.option(
'-f', '--format', 'gdal_frmt',
default='GTiff',
metavar='<driver>',
show_default=True,
help='Output format driver')
opt_nodata = click.option(
'--ndv', metavar='<NoDataValue>', type=float, default=-9999,
show_default=True, help='Output NoDataValue')
opt_rootdir = click.option(
'--root',
default='./',
metavar='<directory>',
help='Root timeseries directory',
show_default=True,
type=click.Path(exists=True, file_okay=False,
readable=True, resolve_path=True))
def opt_exampleimg(f):
def callback(ctx, param, value):
# Check if file qualifies alone
if os.path.isfile(value):
_value = value
else:
# Check if path relative to root qualifies
_value = os.path.join(ctx.params['root'], value)
if not os.path.isfile(_value):
raise click.BadParameter('Cannot find example image '
'"{f}"'.format(f=value))
if not os.access(_value, os.R_OK):
raise click.BadParameter('Found example image but cannot '
'read from "{f}"'.format(f=_value))
return os.path.abspath(_value)
return click.option('--image', '-i',
default='example_img',
metavar='<image>',
show_default=True,
help='Example timeseries image',
callback=callback)(f)
def opt_resultdir(f):
def callback(ctx, param, value):
# Check if path qualifies alone
if os.path.isdir(value):
_value = value
else:
# Check if path relative to root qualifies
_value = os.path.join(ctx.params['root'], value)
if not os.path.isdir(_value):
raise click.BadParameter('Cannot find result directory '
'"{d}"'.format(d=value))
if not os.access(_value, os.R_OK):
raise click.BadParameter('Found result directory but cannot '
'read from "{d}"'.format(d=_value))
return os.path.abspath(_value)
return click.option('--result', '-r',
default='YATSM',
metavar='<directory>',
show_default=True,
help='Directory of results',
callback=callback)(f)
# CALLBACKS
def callback_dict(ctx, param, value):
""" Call back for dict style arguments (e.g., KEY=VALUE)
"""
if not value:
return {}
else:
d = {}
for val in value:
if '=' not in val:
raise click.BadParameter(
'Must specify {p} as KEY=VALUE ({v} given)'.format(
p=param, v=value))
else:
k, v = val.split('=', 1)
d[k] = v
return d
| 30.284916 | 76 | 0.539384 |
4a1cfddaadb0b895767b54df2e1bfe4865042327
| 4,971 |
py
|
Python
|
distantbes/proto/proto/build_status_pb2.py
|
antmicro/distant-bes
|
b8e3e40f0b173cbf3bf2d489640c93a42a20c021
|
[
"Apache-2.0"
] | 5 |
2020-09-28T10:21:26.000Z
|
2021-12-02T19:44:04.000Z
|
distantbes/proto/proto/build_status_pb2.py
|
antmicro/distant-bes
|
b8e3e40f0b173cbf3bf2d489640c93a42a20c021
|
[
"Apache-2.0"
] | null | null | null |
distantbes/proto/proto/build_status_pb2.py
|
antmicro/distant-bes
|
b8e3e40f0b173cbf3bf2d489640c93a42a20c021
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/build_status.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/build_status.proto',
package='google.devtools.build.v1',
syntax='proto3',
serialized_options=b'\n\034com.google.devtools.build.v1B\020BuildStatusProtoP\001\370\001\001',
serialized_pb=b'\n\x18proto/build_status.proto\x12\x18google.devtools.build.v1\x1a\x19google/protobuf/any.proto\"\xc6\x02\n\x0b\x42uildStatus\x12<\n\x06result\x18\x01 \x01(\x0e\x32,.google.devtools.build.v1.BuildStatus.Result\x12%\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\"\xd1\x01\n\x06Result\x12\x12\n\x0eUNKNOWN_STATUS\x10\x00\x12\x15\n\x11\x43OMMAND_SUCCEEDED\x10\x01\x12\x12\n\x0e\x43OMMAND_FAILED\x10\x02\x12\x0e\n\nUSER_ERROR\x10\x03\x12\x10\n\x0cSYSTEM_ERROR\x10\x04\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x05\x12 \n\x1cINVOCATION_DEADLINE_EXCEEDED\x10\x06\x12\x1d\n\x19REQUEST_DEADLINE_EXCEEDED\x10\x08\x12\r\n\tCANCELLED\x10\x07\x42\x35\n\x1c\x63om.google.devtools.build.v1B\x10\x42uildStatusProtoP\x01\xf8\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_BUILDSTATUS_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='google.devtools.build.v1.BuildStatus.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_STATUS', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND_SUCCEEDED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND_FAILED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER_ERROR', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYSTEM_ERROR', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_EXHAUSTED', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVOCATION_DEADLINE_EXCEEDED', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REQUEST_DEADLINE_EXCEEDED', index=7, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELLED', index=8, number=7,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=199,
serialized_end=408,
)
_sym_db.RegisterEnumDescriptor(_BUILDSTATUS_RESULT)
_BUILDSTATUS = _descriptor.Descriptor(
name='BuildStatus',
full_name='google.devtools.build.v1.BuildStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='google.devtools.build.v1.BuildStatus.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='google.devtools.build.v1.BuildStatus.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_BUILDSTATUS_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=82,
serialized_end=408,
)
_BUILDSTATUS.fields_by_name['result'].enum_type = _BUILDSTATUS_RESULT
_BUILDSTATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_BUILDSTATUS_RESULT.containing_type = _BUILDSTATUS
DESCRIPTOR.message_types_by_name['BuildStatus'] = _BUILDSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BuildStatus = _reflection.GeneratedProtocolMessageType('BuildStatus', (_message.Message,), {
'DESCRIPTOR' : _BUILDSTATUS,
'__module__' : 'proto.build_status_pb2'
# @@protoc_insertion_point(class_scope:google.devtools.build.v1.BuildStatus)
})
_sym_db.RegisterMessage(BuildStatus)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 37.37594 | 762 | 0.758198 |
4a1cfddf2ee4f1003ec1bb4f50ca7ca73dafbe18
| 6,052 |
py
|
Python
|
RTAscience/timing/time_gammapy3d_binned_fit.py
|
ambra-dipiano/cta-sag-sci
|
f2e238c323d35badd477ce4030069a0097d550bb
|
[
"BSD-3-Clause"
] | 1 |
2021-01-29T15:17:31.000Z
|
2021-01-29T15:17:31.000Z
|
RTAscience/timing/time_gammapy3d_binned_fit.py
|
ambra-dipiano/cta-sag-sci
|
f2e238c323d35badd477ce4030069a0097d550bb
|
[
"BSD-3-Clause"
] | 5 |
2021-03-17T09:10:36.000Z
|
2021-12-21T16:43:15.000Z
|
RTAscience/timing/time_gammapy3d_binned_fit.py
|
ambra-dipiano/cta-sag-sci
|
f2e238c323d35badd477ce4030069a0097d550bb
|
[
"BSD-3-Clause"
] | null | null | null |
# *******************************************************************************
# Copyright (C) 2020 INAF
#
# This software is distributed under the terms of the BSD-3-Clause license
#
# Authors:
# Ambra Di Piano <ambra.dipiano@inaf.it>
# *******************************************************************************
import time
import sys
import os
texp = sys.argv[1]
first = sys.argv[2]
# start timing
t = time.time()
clock0 = time.time()
import astropy.units as u
from astropy.coordinates import SkyCoord
from regions import CircleSkyRegion
from gammapy.analysis import Analysis, AnalysisConfig
from gammapy.data import EventList, GTI, Observation, Observations
from gammapy.irf import load_cta_irfs
from gammapy.modeling import Fit
from gammapy.estimators import ExcessMapEstimator
from gammapy.estimators.utils import find_peaks
from gammapy.modeling.models import PointSpatialModel, PowerLawSpectralModel, SkyModel, FoVBackgroundModel
timport = time.time() - t
print(f'Imports : {timport} s\n')
t = time.time()
rootpath = str(os.path.dirname(os.path.abspath(__file__))).replace('cta-sag-sci/RTAscience/timing', '')
caldb = f'{rootpath}/caldb/data/cta/prod3b-v2/bcf/South_z20_0.5h/irf_file.fits'
irfs = load_cta_irfs(caldb)
filename = f'{rootpath}/DATA/obs/crab/crab_offax_texp{texp}s_n01.fits'
obs_id = 1
print(f'Fits: {filename.replace(rootpath, "")}\n')
tsetup = time.time() - t
print(f'Setup : {tsetup} s\n')
# read phlist
t = time.time()
events = EventList.read(filename, hdu='EVENTS')
# get GTI
gti = GTI.read(filename, hdu='GTI')
# get pointing
pointing = events.pointing_radec
#print('Pointing :', pointing)
# create observation
observation = Observation.create(
pointing=pointing, obs_id=f'{1:02d}', tstart=gti.table['START']*u.s,
tstop=gti.table['STOP']*u.s, irfs=irfs, reference_time=gti.time_ref)
observation._events = events
#print(observation.gti)
observations = Observations()
observations.append(observation)
# fix pointing info
observation.fixed_pointing_info
tobs = time.time() - t
print(f'Create observation : {tobs} s\n')
# configure a 3d analysis
t = time.time()
config_3d = AnalysisConfig()
config_3d.general.log = {'level': 'warning'}
config_3d.observations.datastore = ''
config_3d.observations.obs_file = filename
# reduction type
config_3d.datasets.type = '3d' # Analysis type is 3D
config_3d.datasets.stack = False # We keep track of datasets in all bunches
# geometry of the map for 3d
config_3d.datasets.geom.wcs.skydir = {'lon': pointing.ra, 'lat': pointing.dec, 'frame': 'icrs'}
config_3d.datasets.geom.wcs.fov = {'width': '10 deg', 'height': '10 deg'}
config_3d.datasets.geom.wcs.binsize = '0.02 deg'
# The FoV radius to use for cutouts
config_3d.datasets.geom.selection.offset_max = 2.5 * u.deg
# reconstructed energy axis for the counts map
config_3d.datasets.geom.axes.energy = dict(min= "0.05 TeV", max="20 TeV", nbins=30)
# true energy axis for the IRF maps (should always be wider range and larger nbins)
config_3d.datasets.geom.axes.energy_true = dict(min= "0.03 TeV", max="30 TeV", nbins=40)
# backgroun
config_3d.datasets.background = {'method': 'fov_background', 'exclusion': None}
# safe mask from IRF and max offset
#config_3d.datasets.safe_mask.methods = ['aeff-default', 'offset-max']
# what maps to compute
config_3d.datasets.map_selection = ['counts', 'exposure', 'background', 'psf', 'edisp']
# save the configuration for later and overwrite if already existing
#config_3d.write(filepath + 'tests/prototype3d.yaml', overwrite=True)
tconf = time.time() - t
print(f'Configuration : {tconf} s\n')
#print(config_3d)
# instantiate data reduction passing directly the config object
t = time.time()
analysis_3d = Analysis(config_3d)
# set observation (single - no list)
analysis_3d.observations = observations
# perform data reduction
analysis_3d.get_datasets()
#print(analysis_3d.get_datasets())
tred = time.time() - t
print(f'Data Reduction : {tred} s\n')
# target significance
t = time.time()
target = {'ra': 83.6331, 'dec': 22.0145}
target = SkyCoord(target['ra'], target['dec'], unit='deg', frame='icrs')
target_region = CircleSkyRegion(target.icrs, 0.1 * u.deg)
stats = analysis_3d.datasets.info_table(cumulative=False)
print(stats['sqrt_ts'])
tstat = time.time() - t
print(f'Statistics: {tstat} s\n')
# modelling
t = time.time()
stacked_3d = analysis_3d.datasets.stack_reduce(name="stacked_3d")
spatial_model = PointSpatialModel(lon_0=target.ra, lat_0=target.dec, frame="icrs")
spectral_model = PowerLawSpectralModel(index=2.48, amplitude=2e-12 * u.Unit("1 / (cm2 s TeV)"), reference=1 * u.TeV)
spectral_model.parameters['index'].frozen = True
spatial_model.parameters['lon_0'].frozen = True
spatial_model.parameters['lat_0'].frozen = True
sky_model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model, name="Crab")
bkg_model = FoVBackgroundModel(dataset_name="stacked_3d")
bkg_model.parameters['norm'].frozen = False
stacked_3d.models = [bkg_model, sky_model]
tmodel = time.time() - t
print(f'Modelling: {tmodel} s\n')
# fitting
t = time.time()
fit = Fit([stacked_3d])
result = fit.run()
#print(result.parameters.to_table())
tfit = time.time() - t
print(f'\nFitting : {tfit} s\n')
# flux
t = time.time()
phflux_err = spectral_model.integral_error(0.05 * u.TeV, 20 * u.TeV)
print(f'\nPH-FLUX {phflux_err.value[0]} +/- {phflux_err.value[1]}')
tflux = time.time() - t
print(f'\nFlux : {tflux} s\n')
ttotal = time.time() - clock0
print(f'Total time: {ttotal} s\n')
print('\n\n-----------------------------------------------------\n\n')
logname = f'{rootpath}/DATA/outputs/crab/gammapy3d_binned_fit.csv'
row = f'{texp} {stats["sqrt_ts"][0]} {phflux_err.value[0]} {phflux_err.value[1]} {ttotal} {timport} {tsetup} {tconf} {tred} {tstat} {tmodel} {tfit} {tflux}\n'
if first == 'True':
hdr = 'texp sqrt_ts flux flux_err ttotal timport tsetup tobs tconf tred tstat tmodel tfit tflux\n'
log = open(logname, 'w+')
log.write(hdr)
log.write(row)
log.close()
else:
log = open(logname, 'a')
log.write(row)
log.close()
print(row)
| 36.902439 | 158 | 0.708361 |
4a1cfea51f5a13be30e2d5628d5b2f02cdcdbc7e
| 3,343 |
py
|
Python
|
molecule/command/check.py
|
odyssey4me/molecule
|
9f28e0e57b48403f39e0635ededc266afe52408c
|
[
"MIT"
] | 1 |
2020-09-16T10:23:37.000Z
|
2020-09-16T10:23:37.000Z
|
molecule/command/check.py
|
odyssey4me/molecule
|
9f28e0e57b48403f39e0635ededc266afe52408c
|
[
"MIT"
] | 6 |
2020-09-16T10:14:24.000Z
|
2020-09-20T16:01:11.000Z
|
molecule/command/check.py
|
corserp/molecule
|
48921fa43c3c4647a3f835c79290959af945a522
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Check Command Module."""
import os
import click
from molecule import logger
from molecule.command import base
from molecule import util
LOG = logger.get_logger(__name__)
MOLECULE_PARALLEL = os.environ.get("MOLECULE_PARALLEL", False)
class Check(base.Base):
"""
Check Command Class.
.. program:: molecule check
.. option:: molecule check
Target the default scenario.
.. program:: molecule check --scenario-name foo
.. option:: molecule check --scenario-name foo
Targeting a specific scenario.
.. program:: molecule --debug check
.. option:: molecule --debug check
Executing with `debug`.
.. program:: molecule --base-config base.yml check
.. option:: molecule --base-config base.yml check
Executing with a `base-config`.
.. program:: molecule --env-file foo.yml check
.. option:: molecule --env-file foo.yml check
Load an env file to read variables from when rendering
molecule.yml.
.. program:: molecule --parallel check
.. option:: molecule --parallel check
Run in parallelizable mode.
"""
def execute(self):
"""
Execute the actions necessary to perform a `molecule check` and \
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.check()
@base.click_command_ex()
@click.pass_context
@click.option(
"--scenario-name",
"-s",
default=base.MOLECULE_DEFAULT_SCENARIO_NAME,
help="Name of the scenario to target. ({})".format(
base.MOLECULE_DEFAULT_SCENARIO_NAME
),
)
@click.option(
"--parallel/--no-parallel",
default=MOLECULE_PARALLEL,
help="Enable or disable parallel mode. Default is disabled.",
)
def check(ctx, scenario_name, parallel): # pragma: no cover
"""Use the provisioner to perform a Dry-Run (destroy, dependency, create, \
prepare, converge)."""
args = ctx.obj.get("args")
subcommand = base._get_subcommand(__name__)
command_args = {"parallel": parallel, "subcommand": subcommand}
if parallel:
util.validate_parallel_cmd_args(command_args)
base.execute_cmdline_scenarios(scenario_name, args, command_args)
| 29.584071 | 79 | 0.697876 |
4a1cfefd266d4c1fdcdc23af17a6db50a718ba32
| 945 |
py
|
Python
|
acquisition/tomviz/__main__.py
|
alvarosan/tomviz
|
b53ccb0a07bfe7a33c3fb984c28d9b2658faa64b
|
[
"BSD-3-Clause"
] | null | null | null |
acquisition/tomviz/__main__.py
|
alvarosan/tomviz
|
b53ccb0a07bfe7a33c3fb984c28d9b2658faa64b
|
[
"BSD-3-Clause"
] | null | null | null |
acquisition/tomviz/__main__.py
|
alvarosan/tomviz
|
b53ccb0a07bfe7a33c3fb984c28d9b2658faa64b
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import tomviz
from tomviz.acquisition import server
def main():
parser = argparse.ArgumentParser(
description='Tomviz acquisition server.')
parser.add_argument("-a", "--adapter", help="source adapter to install")
parser.add_argument("-i", "--interface",
help="on what interface the server should run")
parser.add_argument("-p", "--port",
help="on what port the server should run")
parser.add_argument("-d", "--debug", help="turn on debug mode",
action='store_true')
args = parser.parse_args()
if args.port:
server.port = args.port
if args.interface:
server.host = args.interface
if args.adapter:
server.adapter = args.adapter
debug = False
if args.debug:
debug = args.debug
tomviz.setupLogger(debug)
server.start(debug)
if __name__ == '__main__':
main()
| 27 | 76 | 0.610582 |
4a1d00459158ca7cdff4abeea30793d94278ef86
| 3,664 |
py
|
Python
|
pyunsplash/src/unobject.py
|
mmangione/pyunsplash
|
c79aff3b0800c71b921b2090dac275247d0c88fc
|
[
"MIT"
] | 40 |
2018-05-22T15:50:10.000Z
|
2022-01-31T10:39:05.000Z
|
pyunsplash/src/unobject.py
|
mmangione/pyunsplash
|
c79aff3b0800c71b921b2090dac275247d0c88fc
|
[
"MIT"
] | 13 |
2017-09-13T03:37:42.000Z
|
2020-10-20T16:04:08.000Z
|
pyunsplash/src/unobject.py
|
mmangione/pyunsplash
|
c79aff3b0800c71b921b2090dac275247d0c88fc
|
[
"MIT"
] | 17 |
2017-01-31T17:30:19.000Z
|
2022-02-09T21:59:41.000Z
|
###############################################################################
# Copyright (c) 2016 Salvatore Ventura <salvoventura@gmail.com>
#
# File: unobject.py
#
# Author: Salvatore Ventura <salvoventura@gmail.com>
# Date: 07 Dec 2016
# Purpose: Base class for Unsplash API objects
#
# Revision: 1
# Comment: What's new in revision 1
# The idea here is that, and object can be created in two ways:
# - by its json body
# the body might be partial, but still the object provides
# shortcut/unified methods to deal with common data extraction
# A reload/refresh command can be issued to fetch data from the
# server, and the body gets updated
# - by its url
# in this case an explicit API call is made to populate body
###############################################################################
from .liblogging import logger
from .rest import Rest
from .settings import API_ROOT
class UnsplashObject(object):
_api_root = API_ROOT
def __init__(self, api_key, source, valid_options=None, **kwargs):
self.api_key = api_key
self.valid_options = valid_options
self.body, self.url, self._agent = None, None, None
self.query_parameters = self._sanitized_query_parameters(kwargs)
self._parse_source(source) # sets self.body, self.url
if not self._agent:
self._agent = Rest(api_key=self.api_key)
def refresh(self):
if not self._agent:
logger.debug('need an agent first')
self._agent = Rest(api_key=self.api_key)
logger.debug('object refresh from url %s', self.url)
r = self._agent.get(self.url, self.query_parameters)
if r.status_code == 200:
logger.debug('status %s: loading object body', r.status_code)
self.body = r.json()
else:
logger.debug('status %s: object body not refreshed', r.status_code)
@property
def id(self):
return self.body.get('id', None)
@property
def links(self):
return self.body.get('links', None)
def _parse_source(self, source):
# guess format based on source type, extract the link to self
if isinstance(source, dict):
logger.debug('Source is a dictionary')
self.body = source
self.url = source.get('links').get('self')
# # TODO: maybe protect and raise appropriate exception in
elif isinstance(source, str): # case someone feeds a random dictionary here
logger.debug('Source is a string')
if source.startswith(self._api_root):
self.url = source
self.refresh()
else:
logger.info('Source is a string, but we did not handle it: %s', source)
raise ValueError('Source is a string, but we did not handle it: %s' % source)
else:
logger.info('Invalid parameter to constructor: %s', source)
raise ValueError('Invalid parameter to constructor: %s' % source)
def _sanitized_query_parameters(self, kwargs):
logger.debug('call _sanitized_query_parameters(%s)', kwargs)
query_params = {}
for key in kwargs:
if self.valid_options and key not in self.valid_options:
logger.info('invalid parameter %s, safely ignoring it', key)
continue
query_params[key] = kwargs[key]
logger.debug(' returning %s', query_params)
return query_params
| 40.711111 | 112 | 0.578603 |
4a1d010d0e35727228500a6916df36586d449582
| 12,489 |
py
|
Python
|
qa/rpc-tests/p2p-acceptblock.py
|
Masternode/mastercoin
|
6f1a32f3bab23f3376b139513c12bc1e6fc03169
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-acceptblock.py
|
Masternode/mastercoin
|
6f1a32f3bab23f3376b139513c12bc1e6fc03169
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-acceptblock.py
|
Masternode/mastercoin
|
6f1a32f3bab23f3376b139513c12bc1e6fc03169
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("MASTERCOIND", "mastercoind"),
help="bitcoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = get_mocktime() + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time + 1))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
set_mocktime(get_mocktime() + 2)
set_node_times(self.nodes, get_mocktime())
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 42.624573 | 107 | 0.651373 |
4a1d01cb7b8653a177dc7a774e8cd56fbf432f58
| 1,037 |
py
|
Python
|
quickstarts/docs/api/python/source/azure/azure_vpng_delete_connection.py
|
NetFoundry/mop-samples
|
b0a3a9a6415a9f9cf3fda526a1e70450f5fe2ec6
|
[
"Apache-2.0"
] | 1 |
2020-05-13T19:30:17.000Z
|
2020-05-13T19:30:17.000Z
|
quickstarts/docs/api/python/source/azure/azure_vpng_delete_connection.py
|
NetFoundry/mop-samples
|
b0a3a9a6415a9f9cf3fda526a1e70450f5fe2ec6
|
[
"Apache-2.0"
] | 3 |
2020-05-22T17:41:46.000Z
|
2021-04-17T02:29:45.000Z
|
quickstarts/docs/api/python/source/azure/azure_vpng_delete_connection.py
|
NetFoundry/mop-samples
|
b0a3a9a6415a9f9cf3fda526a1e70450f5fe2ec6
|
[
"Apache-2.0"
] | 1 |
2020-05-13T19:31:05.000Z
|
2020-05-13T19:31:05.000Z
|
#!/usr/bin/python3
"""Disconnect VPN Site to VPN Gateway."""
import os
from azure.mgmt.network import NetworkManagementClient
from azure.common.credentials import ServicePrincipalCredentials
# setup Azure Login Credentials from Environmental Variables
credentials = ServicePrincipalCredentials(
client_id=os.environ.get('ARM_CLIENT_ID'),
secret=os.environ.get('ARM_CLIENT_SECRET'),
tenant=os.environ.get('ARM_TENANT_ID')
)
# Connect to Azure APIs and get session details
network_client = NetworkManagementClient(credentials, os.environ.get('ARM_SUBSCRIPTION_ID'))
# Delete VPN Site Connection to VPNG
async_vpn_site_connection_deletion = network_client.vpn_connections.delete(
os.environ.get('GROUP_NAME'),
os.environ.get('VPNG_NAME'),
'CONNECTION_' + os.environ.get('VPN_SITE_NAME'),
custom_headers=None,
raw=False,
polling=True
)
async_vpn_site_connection_deletion.wait()
print(async_vpn_site_connection_deletion.result())
print('VPN Site Connection to VPNG Deleted')
| 35.758621 | 93 | 0.768563 |
4a1d043d15ec9bd59a0663c526fb33c6a2a2031d
| 14,132 |
py
|
Python
|
scale/plot.py
|
kridsadakorn/SCALE
|
1fcb9ddec1cfe258c3a6507c3bea551de41a2ee4
|
[
"MIT"
] | null | null | null |
scale/plot.py
|
kridsadakorn/SCALE
|
1fcb9ddec1cfe258c3a6507c3bea551de41a2ee4
|
[
"MIT"
] | null | null | null |
scale/plot.py
|
kridsadakorn/SCALE
|
1fcb9ddec1cfe258c3a6507c3bea551de41a2ee4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
# Author: Xiong Lei
# Created Time : Mon 09 Apr 2018 07:36:48 PM CST
# File Name: plotting.py
# Description:
"""
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import seaborn as sns
# import os
# plt.rcParams['savefig.dpi'] = 300
# plt.rcParams['figure.dpi'] = 300
def sort_by_classes(X, y, classes):
if classes is None:
classes = np.unique(y)
index = []
for c in classes:
ind = np.where(y==c)[0]
index.append(ind)
index = np.concatenate(index)
X = X.iloc[:, index]
y = y[index]
return X, y, classes, index
def plot_confusion_matrix(cm, x_classes=None, y_classes=None,
normalize=False,
title='',
cmap=plt.cm.Blues,
figsize=(4,4),
mark=True,
save=None,
rotation=45,
show_cbar=True,
show_xticks=True,
show_yticks=True,
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Params:
cm: confusion matrix, MxN
x_classes: N
y_classes: M
"""
import itertools
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
if normalize:
cm = cm.astype('float') / cm.sum(axis=0)[np.newaxis, :]
fig = plt.figure(figsize=figsize)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
x_tick_marks = np.arange(len(x_classes))
y_tick_marks = np.arange(len(y_classes))
plt.xticks(x_tick_marks, x_classes, rotation=rotation, ha='right')
plt.yticks(y_tick_marks, y_classes)
ax=plt.gca()
if not show_xticks:
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_xaxis().set_ticklabels([])
if not show_yticks:
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([])
else:
plt.ylabel('Predicted Cluster')
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
if mark:
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if cm[i, j] > 0.1:
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
if show_cbar:
plt.colorbar(shrink=0.8)
if save:
plt.savefig(save, format='pdf', bbox_inches='tight')
plt.show()
def plot_heatmap(X, y, classes=None, y_pred=None, row_labels=None, colormap=None, row_cluster=False,
cax_title='', xlabel='', ylabel='', yticklabels='', legend_font=10,
show_legend=True, show_cax=True, tick_color='black', ncol=3,
bbox_to_anchor=(0.5, 1.3), position=(0.8, 0.78, .1, .04), return_grid=False,
save=None, **kw):
"""
plot hidden code heatmap with labels
Params:
X: fxn array, n is sample number, f is feature
y: a array of labels for n elements or a list of array
"""
import matplotlib.patches as mpatches # add legend
# if classes is not None:
X, y, classes, index = sort_by_classes(X, y, classes)
# else:
# classes = np.unique(y)
if y_pred is not None:
y_pred = y_pred[index]
classes = list(classes) + list(np.unique(y_pred))
if colormap is None:
colormap = plt.cm.tab20
colors = {c:colormap(i) for i, c in enumerate(classes)}
else:
colors = {c:colormap[i] for i, c in enumerate(classes)}
col_colors = []
col_colors.append([colors[c] for c in y])
col_colors.append([colors[c] for c in y_pred])
else:
if colormap is None:
colormap = plt.cm.tab20
colors = {c:colormap(i) for i, c in enumerate(classes)}
else:
colors = {c:colormap[i] for i, c in enumerate(classes)}
col_colors = [ colors[c] for c in y ]
legend_TN = [mpatches.Patch(color=color, label=c) for c, color in colors.items()]
if row_labels is not None:
row_colors = [ colors[c] for c in row_labels ]
kw.update({'row_colors':row_colors})
kw.update({'col_colors':col_colors})
cbar_kws={"orientation": "horizontal"}
grid = sns.clustermap(X, yticklabels=True,
col_cluster=False,
row_cluster=row_cluster,
cbar_kws=cbar_kws, **kw)
if show_cax:
grid.cax.set_position(position)
grid.cax.tick_params(length=1, labelsize=4, rotation=0)
grid.cax.set_title(cax_title, fontsize=6, y=0.35)
if show_legend:
grid.ax_heatmap.legend(loc='upper center',
bbox_to_anchor=bbox_to_anchor,
handles=legend_TN,
fontsize=legend_font,
frameon=False,
ncol=ncol)
grid.ax_col_colors.tick_params(labelsize=6, length=0, labelcolor='orange')
if (row_cluster==True) and (yticklabels is not ''):
yticklabels = yticklabels[grid.dendrogram_row.reordered_ind]
grid.ax_heatmap.set_xlabel(xlabel)
grid.ax_heatmap.set_ylabel(ylabel, fontsize=8)
grid.ax_heatmap.set_xticklabels('')
grid.ax_heatmap.set_yticklabels(yticklabels, color=tick_color)
grid.ax_heatmap.yaxis.set_label_position('left')
grid.ax_heatmap.tick_params(axis='x', length=0)
grid.ax_heatmap.tick_params(axis='y', labelsize=6, length=0, rotation=0, labelleft=True, labelright=False)
grid.ax_row_dendrogram.set_visible(False)
grid.cax.set_visible(show_cax)
grid.row_color_labels = classes
if save:
plt.savefig(save, format='pdf', bbox_inches='tight')
else:
plt.show()
if return_grid:
return grid
def plot_embedding(X, labels, classes=None, method='tSNE', cmap='tab20', figsize=(4, 4), markersize=4, marker=None,
return_emb=False, save=False, save_emb=False, show_legend=True, show_axis_label=True, **legend_params):
if marker is not None:
X = np.concatenate([X, marker], axis=0)
N = len(labels)
if X.shape[1] != 2:
if method == 'tSNE':
from sklearn.manifold import TSNE
X = TSNE(n_components=2, random_state=124).fit_transform(X)
if method == 'UMAP':
from umap import UMAP
X = UMAP(n_neighbors=30, min_dist=0.1, metric='correlation').fit_transform(X)
if method == 'PCA':
from sklearn.decomposition import PCA
X = PCA(n_components=2, random_state=124).fit_transform(X)
plt.figure(figsize=figsize)
if classes is None:
classes = np.unique(labels)
if cmap is not None:
cmap = cmap
elif len(classes) <= 10:
cmap = 'tab10'
elif len(classes) <= 20:
cmap = 'tab20'
else:
cmap = 'husl'
colors = sns.color_palette(cmap, n_colors=len(classes))
for i, c in enumerate(classes):
plt.scatter(X[:N][labels==c, 0], X[:N][labels==c, 1], s=markersize, color=colors[i], label=c)
if marker is not None:
plt.scatter(X[N:, 0], X[N:, 1], s=10*markersize, color='black', marker='*')
# plt.axis("off")
legend_params_ = {'loc': 'center left',
'bbox_to_anchor':(1.0, 0.45),
'fontsize': 10,
'ncol': 1,
'frameon': False,
'markerscale': 1.5
}
legend_params_.update(**legend_params)
if show_legend:
plt.legend(**legend_params_)
sns.despine(offset=10, trim=True)
if show_axis_label:
plt.xlabel(method+' dim 1', fontsize=12)
plt.ylabel(method+' dim 2', fontsize=12)
if save:
plt.savefig(save, format='pdf', bbox_inches='tight')
else:
plt.show()
if save_emb:
np.savetxt(save_emb, X)
if return_emb:
return X
def corr_heatmap(X, y=None, classes=None,
cmap='RdBu_r',
show_legend=True,
show_cbar=True,
figsize=(5,5),
ncol=3,
distance='pearson',
ticks=None,
save=None,
**kw):
"""
Plot cell-to-cell correlation matrix heatmap
"""
import matplotlib.patches as mpatches # add legend
colormap = plt.cm.tab20
if y is not None:
if classes is None:
classes = np.unique(y)
X, y, classes, index = sort_by_classes(X, y, classes)
colors = {c:colormap(i) for i,c in enumerate(classes)}
col_colors = [ colors[c] for c in y ]
bbox_to_anchor = (0.4, 1.2)
legend_TN = [mpatches.Patch(color=color, label=c) for c,color in colors.items()]
else:
col_colors = None
# else:
# index = np.argsort(ref)
# X = X.iloc[:,index]
# ref = ref[index]
corr = X.corr(method=distance)
cbar_kws={"orientation": "horizontal", "ticks":ticks}
grid = sns.clustermap(corr, cmap=cmap,
col_colors=col_colors,
figsize=figsize,
row_cluster=False,
col_cluster=False,
cbar_kws=cbar_kws,
**kw
)
grid.ax_heatmap.set_xticklabels('')
grid.ax_heatmap.set_yticklabels('')
grid.ax_heatmap.tick_params(axis='x', length=0)
grid.ax_heatmap.tick_params(axis='y', length=0)
if show_legend and (y is not None):
grid.ax_heatmap.legend(loc='upper center',
bbox_to_anchor=bbox_to_anchor,
handles=legend_TN,
fontsize=6,
frameon=False,
ncol=ncol)
if show_cbar:
grid.cax.set_position((0.8, 0.76, .1, .02))
grid.cax.tick_params(length=1, labelsize=4, rotation=0)
grid.cax.set_title(distance, fontsize=6, y=0.8)
else:
grid.cax.set_visible(False)
if save:
plt.savefig(save, format='pdf', bbox_inches='tight')
else:
plt.show()
def feature_specifity(feature, ref, classes, figsize=(6,6), save=None):
"""
Calculate the feature specifity:
Input:
feature: latent feature
ref: cluster assignments
classes: cluster classes
"""
from scipy.stats import f_oneway
# n_cluster = max(ref) + 1
n_cluster = len(classes)
dim = feature.shape[1] # feature dimension
pvalue_mat = np.zeros((dim, n_cluster))
for i,cluster in enumerate(classes):
for feat in range(dim):
a = feature.iloc[:, feat][ref == cluster]
b = feature.iloc[:, feat][ref != cluster]
pvalue = f_oneway(a,b)[1]
pvalue_mat[feat, i] = pvalue
plt.figure(figsize=figsize)
grid = sns.heatmap(-np.log10(pvalue_mat), cmap='RdBu_r',
vmax=20,
yticklabels=np.arange(10)+1,
xticklabels=classes[:n_cluster],
)
grid.set_ylabel('Feature', fontsize=18)
grid.set_xticklabels(labels=classes[:n_cluster], rotation=45, fontsize=18)
grid.set_yticklabels(labels=np.arange(dim)+1, fontsize=16)
cbar = grid.collections[0].colorbar
cbar.set_label('-log10 (Pvalue)', fontsize=18) #, rotation=0, x=-0.9, y=0)
if save:
plt.savefig(save, format='pdf', bbox_inches='tight')
else:
plt.show()
import os
from .utils import read_labels, reassign_cluster_with_ref
from sklearn.metrics import f1_score, normalized_mutual_info_score, adjusted_rand_score
def lineplot(data, name, title='', cbar=False):
sns.lineplot(x='fraction', y=name, hue='method', data=data, markers=True, style='method', sort=False)
plt.title(title)
if cbar:
plt.legend(loc='right', bbox_to_anchor=(1.25, 0.2), frameon=False)
else:
plt.legend().set_visible(False)
plt.show()
def plot_metrics(path, dataset, ref, fraction):
ARI = []
NMI = []
F1 = []
methods = ['scABC', 'SC3', 'scVI', 'SCALE']
for frac in fraction:
outdir = os.path.join(path, dataset, frac) #;print(outdir)
scABC_pred, _ = read_labels(os.path.join(outdir, 'scABC_predict.txt'))
if os.path.isfile(os.path.join(outdir, 'SC3_predict.txt')):
SC3_pred, _ = read_labels(os.path.join(outdir, 'SC3_predict.txt'))
else:
SC3_pred = None
scVI_pred, _ = read_labels(os.path.join(outdir, 'scVI_predict.txt'))
scale_pred, pred_classes = read_labels(os.path.join(outdir, 'cluster_assignments.txt'))
ari = []
nmi = []
f1 = []
for pred, method in zip([scABC_pred, SC3_pred, scVI_pred, scale_pred], methods):
if pred is None:
ari.append(0)
nmi.append(0)
f1.append(0)
else:
pred = reassign_cluster_with_ref(pred, ref)
ari.append(adjusted_rand_score(ref, pred))
nmi.append(normalized_mutual_info_score(ref, pred))
f1.append(f1_score(ref, pred, average='micro'))
ARI.append(ari)
NMI.append(nmi)
F1.append(f1)
fraction = [ frac.replace('corrupt_', '') for frac in fraction]
ARI = pd.Series(np.concatenate(ARI, axis=0))
NMI = pd.Series(np.concatenate(NMI, axis=0))
F1 = pd.Series(np.concatenate(F1, axis=0))
M = pd.Series(methods * len(fraction))
F = pd.Series(np.concatenate([[i]*len(methods) for i in fraction]))
metrics = pd.concat([ARI, NMI, F1, M, F], axis=1)
metrics.columns = ['ARI', 'NMI', 'F1', 'method', 'fraction']
lineplot(metrics, 'ARI', dataset, False)
lineplot(metrics, 'NMI', dataset, False)
lineplot(metrics, 'F1', dataset, True)
| 33.971154 | 122 | 0.574016 |
4a1d061153f98b6da38154d405565fcc81e1f36f
| 2,268 |
py
|
Python
|
tensorflow/tensorboard/backend/json_util_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 22 |
2017-06-26T01:27:45.000Z
|
2021-06-23T10:00:31.000Z
|
tensorflow/tensorboard/backend/json_util_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 7 |
2017-07-13T09:40:59.000Z
|
2019-04-08T22:46:51.000Z
|
tensorflow/tensorboard/backend/json_util_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 38 |
2017-04-28T04:15:48.000Z
|
2019-09-28T05:11:46.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tensorboard.backend import json_util
_INFINITY = float('inf')
class FloatWrapperTest(googletest.TestCase):
def _assertWrapsAs(self, to_wrap, expected):
"""Asserts that |to_wrap| becomes |expected| when wrapped."""
actual = json_util.Cleanse(to_wrap)
for a, e in zip(actual, expected):
self.assertEqual(e, a)
def testWrapsPrimitives(self):
self._assertWrapsAs(_INFINITY, 'Infinity')
self._assertWrapsAs(-_INFINITY, '-Infinity')
self._assertWrapsAs(float('nan'), 'NaN')
def testWrapsObjectValues(self):
self._assertWrapsAs({'x': _INFINITY}, {'x': 'Infinity'})
def testWrapsObjectKeys(self):
self._assertWrapsAs({_INFINITY: 'foo'}, {'Infinity': 'foo'})
def testWrapsInListsAndTuples(self):
self._assertWrapsAs([_INFINITY], ['Infinity'])
# map() returns a list even if the argument is a tuple.
self._assertWrapsAs((_INFINITY,), ['Infinity',])
def testWrapsRecursively(self):
self._assertWrapsAs({'x': [_INFINITY]}, {'x': ['Infinity']})
def testTuple_turnsIntoList(self):
self.assertEqual(json_util.Cleanse(('a', 'b')), ['a', 'b'])
def testSet_turnsIntoSortedList(self):
self.assertEqual(json_util.Cleanse(set(['b', 'a'])), ['a', 'b'])
def testByteString_turnsIntoUnicodeString(self):
self.assertEqual(json_util.Cleanse(b'\xc2\xa3'), u'\u00a3') # is # sterling
if __name__ == '__main__':
googletest.main()
| 34.892308 | 80 | 0.701058 |
4a1d072f07b2fc956b6cbd68c93ba3caac14ff1a
| 3,305 |
py
|
Python
|
enaml/qt/qt_date_selector.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 26 |
2016-04-01T18:49:31.000Z
|
2020-07-21T22:19:46.000Z
|
enaml/qt/qt_date_selector.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 29 |
2016-02-22T17:40:55.000Z
|
2018-08-21T18:18:36.000Z
|
enaml/qt/qt_date_selector.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 4 |
2015-01-27T01:56:14.000Z
|
2021-02-23T07:21:20.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.date_selector import ProxyDateSelector
from .QtGui import QDateEdit
from .qt_bounded_date import QtBoundedDate, CHANGED_GUARD
class QtDateSelector(QtBoundedDate, ProxyDateSelector):
""" A Qt implementation of an Enaml ProxyDateSelector.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QDateEdit)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the QDateEdit widget.
"""
self.widget = QDateEdit(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtDateSelector, self).init_widget()
d = self.declaration
self.set_date_format(d.date_format)
self.set_calendar_popup(d.calendar_popup)
self.widget.dateChanged.connect(self.on_date_changed)
#--------------------------------------------------------------------------
# Abstract API Implementation
#--------------------------------------------------------------------------
def get_date(self):
""" Return the current date in the control.
Returns
-------
result : date
The current control date as a date object.
"""
return self.widget.date().toPython()
def set_minimum(self, date):
""" Set the widget's minimum date.
Parameters
----------
date : date
The date object to use for setting the minimum date.
"""
self.widget.setMinimumDate(date)
def set_maximum(self, date):
""" Set the widget's maximum date.
Parameters
----------
date : date
The date object to use for setting the maximum date.
"""
self.widget.setMaximumDate(date)
def set_date(self, date):
""" Set the widget's current date.
Parameters
----------
date : date
The date object to use for setting the date.
"""
self._guard |= CHANGED_GUARD
try:
self.widget.setDate(date)
finally:
self._guard &= ~CHANGED_GUARD
def set_date_format(self, format):
""" Set the widget's date format.
Parameters
----------
format : string
A Python time formatting string.
"""
# XXX make sure Python's and Qt's format strings are the
# same, or convert between the two.
self.widget.setDisplayFormat(format)
def set_calendar_popup(self, popup):
""" Set whether a calendar popup is available on the widget.
Parameters
----------
popup : bool
Whether the calendar popup is enabled.
"""
self.widget.setCalendarPopup(popup)
| 28.247863 | 79 | 0.512557 |
4a1d089742717a8f98faf63c0c97dd43984dedd4
| 1,717 |
py
|
Python
|
onnx/backend/test/case/node/convinteger.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 12,820 |
2017-09-07T07:00:24.000Z
|
2022-03-31T14:41:57.000Z
|
onnx/backend/test/case/node/convinteger.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 3,213 |
2017-09-07T17:48:17.000Z
|
2022-03-31T19:44:57.000Z
|
onnx/backend/test/case/node/convinteger.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 2,922 |
2017-09-07T07:46:00.000Z
|
2022-03-31T15:55:24.000Z
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ConvInteger(Base):
@staticmethod
def export_without_padding(): # type: () -> None
x = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.uint8).reshape((1, 1, 3, 3))
x_zero_point = np.uint8(1)
w = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2))
y = np.array([12, 16, 24, 28]).astype(np.int32).reshape(1, 1, 2, 2)
# ConvInteger without padding
convinteger_node = onnx.helper.make_node('ConvInteger',
inputs=['x', 'w', 'x_zero_point'],
outputs=['y'])
expect(convinteger_node, inputs=[x, w, x_zero_point], outputs=[y],
name='test_convinteger_without_padding')
@staticmethod
def export_with_padding(): # type: () -> None
x = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.uint8).reshape((1, 1, 3, 3))
x_zero_point = np.uint8(1)
w = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2))
y = np.array([1, 3, 5, 3, 5, 12, 16, 9, 11, 24, 28, 15, 7, 15, 17, 9]).astype(np.int32).reshape((1, 1, 4, 4))
# ConvInteger with padding
convinteger_node_with_padding = onnx.helper.make_node('ConvInteger',
inputs=['x', 'w', 'x_zero_point'],
outputs=['y'],
pads=[1, 1, 1, 1],)
expect(convinteger_node_with_padding, inputs=[x, w, x_zero_point], outputs=[y],
name='test_convinteger_with_padding')
| 34.34 | 117 | 0.598719 |
4a1d08b8797a0d581ed34a96a359965b8fbaf54e
| 7,595 |
py
|
Python
|
dgm4nlp/tf/tri.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
dgm4nlp/tf/tri.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
dgm4nlp/tf/tri.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def fill_triangular(x, upper=False, name=None):
"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,
b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
For comparison, a pure numpy version of this function can be found in
`util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with ops.name_scope(name, "fill_triangular", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.shape.with_rank_at_least(1)[-1].value is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1].value)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = array_ops.shape(x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = math_ops.cast(
math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),
dtype=dtypes.int32)
static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(
[None, None])
# We now concatenate the "tail" of `x` to `x` (and reverse one of them).
#
# We do this based on the insight that the input `x` provides `ceil(n/2)`
# rows of an `n x n` matrix, some of which will get zeroed out being on the
# wrong side of the diagonal. The first row will not get zeroed out at all,
# and we need `floor(n/2)` more rows, so the first is what we omit from
# `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`
# rows provided by a reversed tail, it is exactly the other set of elements
# of the reversed tail which will be zeroed out for being on the wrong side
# of the diagonal further up/down the matrix. And, in doing-so, we've filled
# the triangular matrix in a clock-wise spiral pattern. Neat!
#
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
if upper:
x_list = [x, array_ops.reverse(x[..., n:], axis=[-1])]
else:
x_list = [x[..., n:], array_ops.reverse(x, axis=[-1])]
new_shape = (
static_final_shape.as_list()
if static_final_shape.is_fully_defined()
else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))
x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)
x = array_ops.matrix_band_part(
x,
num_lower=(0 if upper else -1),
num_upper=(-1 if upper else 0))
x.set_shape(static_final_shape)
return x
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
| 40.614973 | 80 | 0.604872 |
4a1d08ed80d4dc6149ac9cd752adbc47d052d265
| 1,502 |
py
|
Python
|
facial_landmarks/app.py
|
alwaysai/facial_landmarks_with_dlib
|
1eb3a4384cc3304c53fe705fc06cba31972ed4e8
|
[
"Apache-2.0"
] | 4 |
2020-09-03T08:44:22.000Z
|
2020-10-14T18:57:43.000Z
|
facial_landmarks/app.py
|
alwaysai/facial_landmarks_with_dlib
|
1eb3a4384cc3304c53fe705fc06cba31972ed4e8
|
[
"Apache-2.0"
] | null | null | null |
facial_landmarks/app.py
|
alwaysai/facial_landmarks_with_dlib
|
1eb3a4384cc3304c53fe705fc06cba31972ed4e8
|
[
"Apache-2.0"
] | 2 |
2020-09-05T08:26:07.000Z
|
2020-09-24T10:33:22.000Z
|
import edgeiq
import facial_landmarks
import cv2
def main():
text = "Facial Landmarks with Dlib"
try:
shape_predictor = "shape_predictor_68_face_landmarks.dat"
dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)
image_paths = sorted(list(edgeiq.list_images("images/")))
print("Images:\n{}\n".format(image_paths))
with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=3) as streamer:
for image_path in image_paths:
image = cv2.imread(image_path)
image, gray_image = dlib_flm.image_preprocessor(image)
facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(gray_image)
# Loop to markup image
for(i, rectangle) in enumerate(rectangles):
(x, y, w, h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(
image, "Face #{}".format(i + 1), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
for facial_coordinate in facial_coordinates:
for (x, y) in facial_coordinate:
cv2.circle(image, (x, y), 3, (255, 0, 0), -1)
streamer.send_data(image, text)
streamer.wait()
finally:
print("Program Ending")
if __name__ == "__main__":
main()
| 36.634146 | 89 | 0.565246 |
4a1d0a151751f58ac1d5d62f0e39aae3f98126db
| 10,524 |
py
|
Python
|
dist/ba_data/python/ba/_hooks.py
|
Bartixxx32/Bombsquad-Ballistica-Modded-Server
|
26d36f07a5b96702e4fbdf172c0d66671f1ee0bd
|
[
"MIT"
] | 21 |
2021-03-29T05:49:35.000Z
|
2022-03-18T09:02:34.000Z
|
dist/ba_data/python/ba/_hooks.py
|
Bartixxx32/Bombsquad-Ballistica-Modded-Server
|
26d36f07a5b96702e4fbdf172c0d66671f1ee0bd
|
[
"MIT"
] | 15 |
2021-04-10T11:08:09.000Z
|
2022-03-22T07:48:58.000Z
|
dist/ba_data/python/ba/_hooks.py
|
Bartixxx32/Bombsquad-Ballistica-Modded-Server
|
26d36f07a5b96702e4fbdf172c0d66671f1ee0bd
|
[
"MIT"
] | 31 |
2021-03-29T05:54:57.000Z
|
2022-03-22T16:58:57.000Z
|
# Released under the MIT License. See LICENSE for details.
#
"""Snippets of code for use by the internal C++ layer.
History: originally I would dynamically compile/eval bits of Python text
from within C++ code, but the major downside there was that none of that was
type-checked so if names or arguments changed I would never catch code breakage
until the code was next run. By defining all snippets I use here and then
capturing references to them all at launch I can immediately verify everything
I'm looking for exists and pylint/mypy can do their magic on this file.
"""
# (most of these are self-explanatory)
# pylint: disable=missing-function-docstring
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
from typing import Sequence, Optional, Any
import ba
def finish_bootstrapping() -> None:
"""Do final bootstrapping related bits."""
from ba._asyncio import setup_asyncio
assert _ba.in_game_thread()
# Kick off our asyncio event handling, allowing us to use coroutines
# in our game thread alongside our internal event handling.
setup_asyncio()
# Ok, bootstrapping is done; time to get the show started.
_ba.app.on_app_launch()
def reset_to_main_menu() -> None:
"""Reset the game to the main menu gracefully."""
_ba.app.return_to_main_menu_session_gracefully()
def set_config_fullscreen_on() -> None:
"""The app has set fullscreen on its own and we should note it."""
_ba.app.config['Fullscreen'] = True
_ba.app.config.commit()
def set_config_fullscreen_off() -> None:
"""The app has set fullscreen on its own and we should note it."""
_ba.app.config['Fullscreen'] = False
_ba.app.config.commit()
def not_signed_in_screen_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='notSignedInErrorText'))
def connecting_to_party_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='internal.connectingToPartyText'),
color=(1, 1, 1))
def rejecting_invite_already_in_party_message() -> None:
from ba._language import Lstr
_ba.screenmessage(
Lstr(resource='internal.rejectingInviteAlreadyInPartyText'),
color=(1, 0.5, 0))
def connection_failed_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='internal.connectionFailedText'),
color=(1, 0.5, 0))
def temporarily_unavailable_message() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(
Lstr(resource='getTicketsWindow.unavailableTemporarilyText'),
color=(1, 0, 0))
def in_progress_message() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(Lstr(resource='getTicketsWindow.inProgressText'),
color=(1, 0, 0))
def error_message() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(Lstr(resource='errorText'), color=(1, 0, 0))
def purchase_not_valid_error() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(Lstr(resource='store.purchaseNotValidError',
subs=[('${EMAIL}', 'support@froemling.net')]),
color=(1, 0, 0))
def purchase_already_in_progress_error() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(Lstr(resource='store.purchaseAlreadyInProgressText'),
color=(1, 0, 0))
def gear_vr_controller_warning() -> None:
from ba._language import Lstr
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(Lstr(resource='usesExternalControllerText'),
color=(1, 0, 0))
def orientation_reset_cb_message() -> None:
from ba._language import Lstr
_ba.screenmessage(
Lstr(resource='internal.vrOrientationResetCardboardText'),
color=(0, 1, 0))
def orientation_reset_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='internal.vrOrientationResetText'),
color=(0, 1, 0))
def on_app_pause() -> None:
_ba.app.on_app_pause()
def on_app_resume() -> None:
_ba.app.on_app_resume()
def launch_main_menu_session() -> None:
from bastd.mainmenu import MainMenuSession
_ba.new_host_session(MainMenuSession)
def language_test_toggle() -> None:
_ba.app.lang.setlanguage('Gibberish' if _ba.app.lang.language ==
'English' else 'English')
def award_in_control_achievement() -> None:
_ba.app.ach.award_local_achievement('In Control')
def award_dual_wielding_achievement() -> None:
_ba.app.ach.award_local_achievement('Dual Wielding')
def play_gong_sound() -> None:
_ba.playsound(_ba.getsound('gong'))
def launch_coop_game(name: str) -> None:
_ba.app.launch_coop_game(name)
def purchases_restored_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='getTicketsWindow.purchasesRestoredText'),
color=(0, 1, 0))
def dismiss_wii_remotes_window() -> None:
call = _ba.app.ui.dismiss_wii_remotes_window_call
if call is not None:
call()
def unavailable_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='getTicketsWindow.unavailableText'),
color=(1, 0, 0))
def submit_analytics_counts(sval: str) -> None:
_ba.add_transaction({'type': 'ANALYTICS_COUNTS', 'values': sval})
_ba.run_transactions()
def set_last_ad_network(sval: str) -> None:
import time
_ba.app.ads.last_ad_network = sval
_ba.app.ads.last_ad_network_set_time = time.time()
def no_game_circle_message() -> None:
from ba._language import Lstr
_ba.screenmessage(Lstr(resource='noGameCircleText'), color=(1, 0, 0))
def empty_call() -> None:
pass
def level_icon_press() -> None:
print('LEVEL ICON PRESSED')
def trophy_icon_press() -> None:
print('TROPHY ICON PRESSED')
def coin_icon_press() -> None:
print('COIN ICON PRESSED')
def ticket_icon_press() -> None:
from bastd.ui.resourcetypeinfo import ResourceTypeInfoWindow
ResourceTypeInfoWindow(
origin_widget=_ba.get_special_widget('tickets_info_button'))
def back_button_press() -> None:
_ba.back_press()
def friends_button_press() -> None:
print('FRIEND BUTTON PRESSED!')
def print_trace() -> None:
import traceback
print('Python Traceback (most recent call last):')
traceback.print_stack()
def toggle_fullscreen() -> None:
cfg = _ba.app.config
cfg['Fullscreen'] = not cfg.resolve('Fullscreen')
cfg.apply_and_commit()
def party_icon_activate(origin: Sequence[float]) -> None:
import weakref
from bastd.ui.party import PartyWindow
app = _ba.app
_ba.playsound(_ba.getsound('swish'))
# If it exists, dismiss it; otherwise make a new one.
if app.ui.party_window is not None and app.ui.party_window() is not None:
app.ui.party_window().close()
else:
app.ui.party_window = weakref.ref(PartyWindow(origin=origin))
def read_config() -> None:
_ba.app.read_config()
def ui_remote_press() -> None:
"""Handle a press by a remote device that is only usable for nav."""
from ba._language import Lstr
# Can be called without a context; need a context for getsound.
with _ba.Context('ui'):
_ba.screenmessage(Lstr(resource='internal.controllerForMenusOnlyText'),
color=(1, 0, 0))
_ba.playsound(_ba.getsound('error'))
def quit_window() -> None:
from bastd.ui.confirm import QuitWindow
QuitWindow()
def remove_in_game_ads_message() -> None:
_ba.app.ads.do_remove_in_game_ads_message()
def telnet_access_request() -> None:
from bastd.ui.telnet import TelnetAccessRequestWindow
TelnetAccessRequestWindow()
def do_quit() -> None:
_ba.quit()
def shutdown() -> None:
_ba.app.on_app_shutdown()
def gc_disable() -> None:
import gc
gc.disable()
def device_menu_press(device: ba.InputDevice) -> None:
from bastd.ui.mainmenu import MainMenuWindow
in_main_menu = _ba.app.ui.has_main_menu_window()
if not in_main_menu:
_ba.set_ui_input_device(device)
_ba.playsound(_ba.getsound('swish'))
_ba.app.ui.set_main_menu_window(MainMenuWindow().get_root_widget())
def show_url_window(address: str) -> None:
from bastd.ui.url import ShowURLWindow
ShowURLWindow(address)
def party_invite_revoke(invite_id: str) -> None:
# If there's a confirm window up for joining this particular
# invite, kill it.
for winref in _ba.app.invite_confirm_windows:
win = winref()
if win is not None and win.ew_party_invite_id == invite_id:
_ba.containerwidget(edit=win.get_root_widget(),
transition='out_right')
import custom_hooks as chooks
def filter_chat_message(msg: str, client_id: int) -> Optional[str]:
"""Intercept/filter chat messages.
Called for all chat messages while hosting.
Messages originating from the host will have clientID -1.
Should filter and return the string to be displayed, or return None
to ignore the message.
"""
return chooks.filter_chat_message(msg,client_id)
def kick_vote_started(by:str,to:str) -> None:
"""
get account ids of who started kick vote for whom ,
do what ever u want logging to files , whatever.
"""
print(by+">"+to)
def on_kicked(account_id:str) -> None:
pass
# print(account_id+" kicked ...sad")
def on_kick_vote_end() -> None:
pass
# print("kick vote end")
def on_player_join(pb_id:str)-> None:
pass
# print(pb_id+" joined python layer")
def on_player_leave(pb_id:str)-> None:
pass
#
print(pb_id+" left python layer")
def local_chat_message(msg: str) -> None:
if (_ba.app.ui.party_window is not None
and _ba.app.ui.party_window() is not None):
_ba.app.ui.party_window().on_chat_message(msg)
def get_player_icon(sessionplayer: ba.SessionPlayer) -> dict[str, Any]:
info = sessionplayer.get_icon_info()
return {
'texture': _ba.gettexture(info['texture']),
'tint_texture': _ba.gettexture(info['tint_texture']),
'tint_color': info['tint_color'],
'tint2_color': info['tint2_color']
}
| 28.214477 | 79 | 0.688712 |
4a1d0b4f8e9df8e6a8da9d9e55f6f28cd26dddb8
| 976 |
py
|
Python
|
test_rt.py
|
uschen/DSFD-Pytorch-Inference
|
8fce6ca3a4d91f638e77b861919bfef3f86e4580
|
[
"Apache-2.0"
] | null | null | null |
test_rt.py
|
uschen/DSFD-Pytorch-Inference
|
8fce6ca3a4d91f638e77b861919bfef3f86e4580
|
[
"Apache-2.0"
] | null | null | null |
test_rt.py
|
uschen/DSFD-Pytorch-Inference
|
8fce6ca3a4d91f638e77b861919bfef3f86e4580
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import cv2
import time
import face_detection
from face_detection.retinaface.tensorrt_wrap import TensorRTRetinaFace
def draw_faces(im, bboxes):
for bbox in bboxes:
x0, y0, x1, y1 = [int(_) for _ in bbox]
cv2.rectangle(im, (x0, y0), (x1, y1), (0, 0, 255), 2)
if __name__ == "__main__":
image = cv2.imread(".local/test_images/a00119.jpeg")
width = 1280
height = 720
expected_imsize = (height, width)
image = cv2.resize(image, (width, height))
detector = TensorRTRetinaFace(
(height, width),
(480, 640))
print(detector.infer(image))
boxes, landms, scores = detector.infer(image)
for i in range(boxes.shape[0]):
print(boxes[i])
x0, y0, x1, y1 = boxes[i].astype(int)
image = cv2.rectangle(image, (x0, y0), (x1, y1),(255, 0, 0), 1 )
for kp in landms[i]:
image = cv2.circle(image, tuple(kp), 5, (255, 0, 0))
cv2.imwrite("test.png", image)
| 31.483871 | 72 | 0.615779 |
4a1d0bf86f67c1abd0be967d5465bf515dae4f02
| 2,440 |
py
|
Python
|
scr/visualization/visualize_united_states_temporal.py
|
luyuliu/COVID-Transit-Demand
|
0b44cdec2c40ec48fd24b4bd7dfdc0573fbec777
|
[
"MIT"
] | null | null | null |
scr/visualization/visualize_united_states_temporal.py
|
luyuliu/COVID-Transit-Demand
|
0b44cdec2c40ec48fd24b4bd7dfdc0573fbec777
|
[
"MIT"
] | null | null | null |
scr/visualization/visualize_united_states_temporal.py
|
luyuliu/COVID-Transit-Demand
|
0b44cdec2c40ec48fd24b4bd7dfdc0573fbec777
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
import csv
import os
from datetime import date
from pymongo import MongoClient, ASCENDING
from scipy.optimize import leastsq
client = MongoClient('mongodb://localhost:27017/')
db_corona = client.corona
col_system = db_corona.system_info
col_case = db_corona.corona_cases_usafacts
col_ridership = db_corona.other_ridership
rl_system = col_system.find({})
def sigmoid(p, x):
x0, y0, L, k = p
y = L / (1 + np.exp(-k*(x-x0))) + y0
return y
def residuals(p, x, y):
return y - sigmoid(p, x)
def resize(arr, lower=0.0, upper=1.0):
arr = arr.copy()
if lower > upper:
lower, upper = upper, lower
arr -= arr.min()
arr *= (upper-lower)/arr.max()
arr += lower
return arr
system_name = "United States"
metro_area = "United States"
print(system_name, metro_area)
rl_ridership = col_ridership.find(
{"system_name": system_name}).sort("date", ASCENDING)
y = []
for each_record in rl_ridership:
y.append(each_record["demand_decrease"])
x = list(range(len(y)))
# print((x))
# p_guess = (int(np.median(x)), 0, 1.0, 0.5)
# # if system_name == "CATA":
# # continue
# p, cov, infodict, mesg, ier = leastsq(
# residuals, p_guess, args=(x, y), full_output=1)
# x0, y0, L, k = p
# results_005 = x0 - np.log(2/(0.05)-1)/k # 2.5% range
# results_095 = x0 + np.log(2/(0.05)-1)/k # 97.5% range
# print('''\
# x0 = {x0}
# y0 = {y0}
# L = {L}
# k = {k}
# x005 = {results_005}
# '''.format(x0=x0, y0=y0, L=L, k=k, results_005=results_005))
# # col_system.update_one({"_id": _id}, {"$set": {
# # "B": L, "k": k, "t0": x0, "b": y0, "divergent_point": results_005, "convergent_point": results_095, "modified_at": date.today().strftime("%Y%m%d")}}
# # )
# xp = np.linspace(0, len(x), len(y))
# pxp = sigmoid(p, xp)
# Plot separately
the_plot = plt.plot(x, y, '.')
plt.xlabel('x')
plt.ylabel('y', rotation='horizontal')
plt.grid(True)
plt.title(system_name, fontsize=16)
plt.savefig("C:\\Users\\liu.6544\\Desktop\\coronapics\\demand\\" + metro_area + "_" +
system_name + ".jpg")
plt.clf()
# # Plot together
# the_plot = plt.plot(x, y, '.')
# plt.xlabel('x')
# plt.ylabel('y', rotation='horizontal')
# plt.grid(True)
# plt.title(system_name, fontsize=16)
# plt.savefig("C:\\Users\\liu.6544\\Desktop\\coronapics\\demand\\all.jpg")
| 24.897959 | 176 | 0.620082 |
4a1d0c18bb1c62b83b6cd118997f98927a0ff973
| 1,298 |
py
|
Python
|
e5.py
|
SNH48Live/snh48g-livestreams-analysis
|
6ceb43ba054779c6d0557ff0533fc4e511c11f34
|
[
"MIT"
] | null | null | null |
e5.py
|
SNH48Live/snh48g-livestreams-analysis
|
6ceb43ba054779c6d0557ff0533fc4e511c11f34
|
[
"MIT"
] | null | null | null |
e5.py
|
SNH48Live/snh48g-livestreams-analysis
|
6ceb43ba054779c6d0557ff0533fc4e511c11f34
|
[
"MIT"
] | null | null | null |
G1 = [
('李艺彤', 16),
('黄婷婷', 22),
('冯薪朵', 7),
('陆婷', 34),
('莫寒', 35),
('赵粤', 27),
('许佳琪', 21),
('戴萌', 38),
('钱蓓婷', 36),
('林思意', 24),
('谢蕾蕾', 63572),
('吴哲晗', 39),
('孔肖吟', 19),
('苏杉杉', 327597),
('段艺璇', 63554),
('张语格', 1),
]
G2 = [
('孙芮', 8),
('郑丹妮', 327575),
('宋昕冉', 6738),
('张丹三', 6747),
('刘力菲', 327567),
('徐子轩', 14),
('杨冰怡', 6744),
('韩家乐', 459999),
('易嘉爱', 33),
('万丽娜', 25),
('张雨鑫', 5574),
('姜杉', 63560),
('冯思佳', 327587),
('刘增艳', 63566),
('张怡', 63582),
('费沁源', 63555),
]
G3 = [
('张怀瑾', 407127),
('陈珂', 63548),
('马玉灵', 327596),
('唐莉佳', 327571),
('陈倩楠', 327601),
('黄恩茹', 407106),
('胡晓慧', 63559),
('李宇琪', 20),
('李钊', 6735),
('左婧媛', 327577),
('陈美君', 63549),
('洪珮雲', 63558),
('徐诗琪', 399674),
('赵佳蕊', 460005),
('青钰雯', 327581),
('王诗蒙', 459989),
]
G4 = [
('沈梦瑶', 49005),
('肖文铃', 327573),
('卢静', 327569),
('许杨玉琢', 5566),
('王雨煊', 407168),
('谢妮', 45),
('刘倩倩', 327568),
('孙珍妮', 286977),
('李梓', 327591),
('高源婧', 63557),
('刘姝贤', 327579),
('葛司琪', 407104),
('张琼予', 327560),
('蒋芸', 17),
('袁雨桢', 5),
('祁静', 399672),
('闫明筠', 6745),
('胡丽芝', 528094),
]
| 16.641026 | 20 | 0.365948 |
4a1d0caff8c1b2ecd594c05ebdf12ec563307ece
| 14,234 |
py
|
Python
|
pandasgui/gui.py
|
ruben-itinera/pandasgui
|
11041860a95cdc730bcac550c7bb54dc2d1804d4
|
[
"MIT"
] | 1 |
2020-04-23T14:35:46.000Z
|
2020-04-23T14:35:46.000Z
|
pandasgui/gui.py
|
ruben-itinera/pandasgui
|
11041860a95cdc730bcac550c7bb54dc2d1804d4
|
[
"MIT"
] | null | null | null |
pandasgui/gui.py
|
ruben-itinera/pandasgui
|
11041860a95cdc730bcac550c7bb54dc2d1804d4
|
[
"MIT"
] | null | null | null |
"""Defines the main PandasGUI class and related functions"""
import inspect
import sys
import os
import pkg_resources
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from pandasgui.widgets import PivotDialog, ScatterDialog
from pandasgui.widgets import DataFrameExplorer
from pandasgui.widgets import FindToolbar
from pandasgui.utility import fix_ipython
# This makes it so PyQt5 windows don't become unresponsive in IPython outside app._exec() loops
fix_ipython()
# Provides proper stacktrace if PyQt crashes
sys.excepthook = lambda cls, exception, traceback: sys.__excepthook__(cls, exception, traceback)
# Holds references to all created PandasGUI windows so they don't get garbage collected
instance_list = []
class PandasGUI(QtWidgets.QMainWindow):
def __init__(self, **kwargs):
"""
Args:
**kwargs (): Dict of (key, value) pairs of
{'DataFrame name': DataFrame object}
self.df_dicts is a dictionary of all dataframes in the GUI.
{dataframe name: objects}
The objects are their own dictionary of:
{'dataframe': DataFrame object
'view': DataFrameViewer object
'model': DataFrameModel object
'dataframe_explorer': DataFrameExplorer object}
'display_df': DataFrame object
This is a truncated version of the dataframe for displaying
"""
# Property initialization
self.df_dicts = {}
# Set in setupUI()
self.stacked_widget = None
self.splitter = None
self.nav_tree = None
# Get an application instance
self.app = QtWidgets.QApplication.instance()
if self.app:
print('Using existing QApplication instance')
if not self.app:
self.app = QtWidgets.QApplication(sys.argv)
super().__init__()
# This ensures there is always a reference to this widget and it doesn't get garbage collected
global instance_list
instance_list.append(self)
# https://stackoverflow.com/a/27178019/3620725
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Adds DataFrames listed in kwargs to df_dict.
for i, (df_name, df_object) in enumerate(kwargs.items()):
self.df_dicts[df_name] = {}
self.df_dicts[df_name]['dataframe'] = df_object
# Generates all UI contents
self.setupUI()
# %% Window settings
# Set size
screen = QtWidgets.QDesktopWidget().screenGeometry()
percentage_of_screen = 0.7
size = tuple((pd.np.array([screen.width(), screen.height()]) * percentage_of_screen).astype(int))
self.resize(QtCore.QSize(*size))
# Center window on screen
screen = QtWidgets.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move(int((screen.width() - size.width()) / 2), int((screen.height() - size.height()) / 2))
# Title and logo
self.setWindowTitle('PandasGUI')
pdgui_icon = 'images/icon.png'
pdgui_icon_path = pkg_resources.resource_filename(__name__, pdgui_icon)
self.setWindowIcon(QtGui.QIcon(pdgui_icon_path))
self.show()
def setupUI(self):
"""
Creates and adds all widgets to GUI.
"""
# This holds the DataFrameExplorer for each DataFrame
self.stacked_widget = QtWidgets.QStackedWidget()
# Make the navigation bar
self.nav_tree = self.NavWidget(self)
# Creates the headers.
self.nav_tree.setHeaderLabels(['Name', 'Shape'])
self.nav_tree.itemSelectionChanged.connect(self.nav_clicked)
for df_name in self.df_dicts.keys():
df_object = self.df_dicts[df_name]['dataframe']
self.add_dataframe(df_name, df_object)
# Make splitter to hold nav and DataFrameExplorers
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.nav_tree)
self.splitter.addWidget(self.stacked_widget)
self.splitter.setCollapsible(0, False)
self.splitter.setCollapsible(1, False)
self.splitter.setStretchFactor(0, 0)
self.splitter.setStretchFactor(1, 1)
nav_width = self.nav_tree.sizeHint().width()
self.splitter.setSizes([nav_width, self.width() - nav_width])
self.splitter.setContentsMargins(10, 10, 10, 10)
# makes the find toolbar
self.findBar = FindToolbar(self)
self.addToolBar(self.findBar)
# QMainWindow setup
self.make_menu_bar()
self.setCentralWidget(self.splitter)
def import_dataframe(self, path):
if os.path.isfile(path) and path.endswith('.csv'):
df_name = os.path.split(path)[1]
df_object = pd.read_csv(path)
self.add_dataframe(df_name, df_object)
else:
print("Invalid file: ", path)
def add_dataframe(self, df_name, df_object):
'''
Add a new DataFrame to the GUI
'''
if type(df_object) != pd.DataFrame:
try:
df_object = pd.DataFrame(df_object)
print(f'Automatically converted "{df_name}" from type {type(df_object)} to DataFrame')
except:
print(f'Could not convert "{df_name}" from type {type(df_object)} to DataFrame')
return
# Non-string column indices causes problems when pulling them from a GUI dropdown (which will give str)
if type(df_object.columns) != pd.MultiIndex:
df_object.columns = df_object.columns.astype(str)
self.df_dicts[df_name] = {}
self.df_dicts[df_name] = {}
self.df_dicts[df_name]['dataframe'] = df_object
dfe = DataFrameExplorer(df_object)
self.stacked_widget.addWidget(dfe)
self.df_dicts[df_name]['dataframe_explorer'] = dfe
self.add_df_to_nav(df_name)
####################
# Menu bar functions
def make_menu_bar(self):
'''
Make the menubar and add it to the QMainWindow
'''
# Create a menu for setting the GUI style.
# Uses radio-style buttons in a QActionGroup.
menubar = self.menuBar()
# Creates an edit menu
editMenu = menubar.addMenu('&Edit')
findAction = QtWidgets.QAction('&Find', self)
findAction.setShortcut('Ctrl+F')
findAction.triggered.connect(self.findBar.show_find_bar)
editMenu.addAction(findAction)
styleMenu = menubar.addMenu('&Set Style')
styleGroup = QtWidgets.QActionGroup(styleMenu)
# Add an option to the menu for each GUI style that exist for the user's system
for style in QtWidgets.QStyleFactory.keys():
styleAction = QtWidgets.QAction(f'&{style}', self, checkable=True)
styleAction.triggered.connect(
lambda state, style=style: self.app.setStyle(style) and self.app.setStyleSheet(""))
styleGroup.addAction(styleAction)
styleMenu.addAction(styleAction)
# Set the default style
styleAction.trigger()
# Creates a debug menu.
debugMenu = menubar.addMenu('&Debug')
testDialogAction = QtWidgets.QAction('&Test', self)
testDialogAction.triggered.connect(self.test)
debugMenu.addAction(testDialogAction)
'''
# Creates a chart menu.
chartMenu = menubar.addMenu('&Plot Charts')
scatterDialogAction = QtWidgets.QAction('&Scatter Dialog', self)
scatterDialogAction.triggered.connect(self.scatter_dialog)
chartMenu.addAction(scatterDialogAction)
# Creates a reshaping menu.
chartMenu = menubar.addMenu('&Reshape Data')
pivotDialogAction = QtWidgets.QAction('&Pivot Dialog', self)
pivotDialogAction.triggered.connect(self.pivot_dialog)
chartMenu.addAction(pivotDialogAction)
'''
# I just use this function for printing various things to console while the GUI is running
def test(self):
print('----------------')
print('splitter', self.splitter.size())
print('nav_tree', self.nav_tree.size())
print('stacked_widget', self.stacked_widget.size())
print('splitter', self.splitter.sizeHint())
print('nav_tree', self.nav_tree.sizeHint())
print('stacked_widget', self.stacked_widget.sizeHint())
print('----------------')
class NavWidget(QtWidgets.QTreeWidget):
def __init__(self, gui):
super().__init__()
self.gui = gui
self.setHeaderLabels(['HeaderLabel'])
self.expandAll()
self.setAcceptDrops(True)
for i in range(self.columnCount()):
self.resizeColumnToContents(i)
self.setColumnWidth(0, 150)
self.setColumnWidth(1, 150)
def rowsInserted(self, parent: QtCore.QModelIndex, start: int, end: int):
super().rowsInserted(parent, start, end)
self.expandAll()
def sizeHint(self):
# Width
width = 0
for i in range(self.columnCount()):
width += self.columnWidth(i)
return QtCore.QSize(300, 500)
def dragEnterEvent(self, e):
if e.mimeData().hasUrls:
e.accept()
else:
e.ignore()
def dragMoveEvent(self, e):
if e.mimeData().hasUrls:
e.accept()
else:
e.ignore()
def dropEvent(self, e):
if e.mimeData().hasUrls:
e.setDropAction(QtCore.Qt.CopyAction)
e.accept()
fpath_list = []
for url in e.mimeData().urls():
fpath_list.append(str(url.toLocalFile()))
for fpath in fpath_list:
self.gui.import_dataframe(fpath)
else:
e.ignore()
def add_df_to_nav(self, df_name, parent=None):
"""
Add DataFrame to the nav by looking up the DataFrame by name in df_dicts
Args:
df_name (str): Name of the DataFrame
parent (QTreeWidgetItem): Parent item in the nav tree hierarchy
"""
if parent is None:
parent = self.nav_tree
# Calculate and format the shape of the DataFrame
shape = self.df_dicts[df_name]['dataframe'].shape
shape = str(shape[0]) + ' X ' + str(shape[1])
item = QtWidgets.QTreeWidgetItem(parent, [df_name, shape])
self.nav_tree.itemSelectionChanged.emit()
self.nav_tree.setCurrentItem(item)
def nav_clicked(self):
"""
Show the DataFrameExplorer corresponding to the highlighted nav item.
"""
try:
item = self.nav_tree.selectedItems()[0]
except IndexError:
return
df_name = item.data(0, Qt.DisplayRole)
dfe = self.df_dicts[df_name]['dataframe_explorer']
self.stacked_widget.setCurrentWidget(dfe)
####################
# Dialog functions. TODO: Rewrite these all
def pivot_dialog(self):
default = self.nav_tree.currentItem().data(0, Qt.DisplayRole)
win = PivotDialog(self.df_dicts, default=default, gui=self)
def scatter_dialog(self):
default = self.nav_tree.currentItem().data(0, Qt.DisplayRole)
win = ScatterDialog(self.df_dicts, default=default, gui=self)
def show(*args, block=True, **kwargs):
"""
Create and show a PandasGUI window with all the DataFrames passed. *args and **kwargs should all be DataFrames
Args:
*args: These should all be DataFrames. The GUI uses stack inspection to get the variable name to use in the GUI
block (bool): Indicates whether to run app._exec on the PyQt application to block further execution of script
**kwargs: These should all be DataFrames. The key is the desired name and the value is the DataFrame object
"""
# Remove reserved rewords
try:
kwargs.pop('block')
except:
pass
# Get the variable names in the scope show() was called from
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
# Make a dictionary of the DataFrames from the position args and get their variable names using inspect
dataframes = {}
for i, df_object in enumerate(args):
df_name = 'untitled' + str(i + 1)
for var_name, var_val in callers_local_vars:
if var_val is df_object:
df_name = var_name
dataframes[df_name] = df_object
# Add the dictionary of positional args to the kwargs
if (any([key in kwargs.keys() for key in dataframes.keys()])):
print("Warning! Duplicate DataFrame names were given, duplicates were ignored.")
kwargs = {**kwargs, **dataframes}
pandas_gui = PandasGUI(**kwargs)
if block:
pandas_gui.app.exec_()
if __name__ == '__main__':
# Fix lack of stack trace on PyQt exceptions
def my_exception_hook(exctype, value, traceback):
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = my_exception_hook
try:
# Get paths of drag & dropped files and prepare to open them in the GUI
file_paths = sys.argv[1:]
if file_paths:
file_dataframes = {}
for path in file_paths:
if os.path.isfile(path) and path.endswith('.csv'):
df = pd.read_csv(path)
filename = os.path.split(path)[1]
file_dataframes[filename] = df
show(**file_dataframes)
# Script was run normally, open sample data sets
else:
from pandasgui.datasets import iris, flights, multi, all_datasets
show(**all_datasets, block=True)
# Catch errors and call input() so they can be viewed before the console window closes when running with drag n drop
except Exception as e:
print(e)
import traceback
traceback.print_exc()
| 34.972973 | 120 | 0.623296 |
4a1d0e170def99eefa351889e867e1c81629ed42
| 391 |
py
|
Python
|
botree/models.py
|
ericmiguel/botree
|
289ca8c966d4b1592240b4da0179825907c86579
|
[
"MIT"
] | 2 |
2021-11-18T20:25:24.000Z
|
2021-11-22T16:53:41.000Z
|
botree/models.py
|
ericmiguel/botree
|
289ca8c966d4b1592240b4da0179825907c86579
|
[
"MIT"
] | null | null | null |
botree/models.py
|
ericmiguel/botree
|
289ca8c966d4b1592240b4da0179825907c86579
|
[
"MIT"
] | null | null | null |
from typing import Dict
from typing import Union
from pydantic import BaseModel
from pydantic import Field
class ResponseMetadata(BaseModel):
request_id: str = Field("", alias="RequestId")
http_status_code: int = Field(200, alias="HTTPStatusCode")
http_headers: Dict[str, Union[str, int]] = Field(alias="HTTPHeaders")
retry_attempts: int = Field(0, alias="RetryAttempts")
| 30.076923 | 73 | 0.744246 |
4a1d0e183bd57a15d96ce04f60a01042fd5eea5e
| 319 |
py
|
Python
|
tests/test_skeleton.py
|
ashrafuzzaman/pdf-wordcloud
|
38fa37bffcc71eda734432d8d9005f9bfb0f4229
|
[
"MIT"
] | null | null | null |
tests/test_skeleton.py
|
ashrafuzzaman/pdf-wordcloud
|
38fa37bffcc71eda734432d8d9005f9bfb0f4229
|
[
"MIT"
] | null | null | null |
tests/test_skeleton.py
|
ashrafuzzaman/pdf-wordcloud
|
38fa37bffcc71eda734432d8d9005f9bfb0f4229
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from pdf_wordcloud.skeleton import fib
__author__ = "A.K.M. Ashrafuzzaman"
__copyright__ = "A.K.M. Ashrafuzzaman"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
| 18.764706 | 39 | 0.642633 |
4a1d0e87fd6ce1682417ec6c7056e90c93e466d8
| 2,247 |
py
|
Python
|
crowd_anki/history/archiver_vendor.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 391 |
2016-08-31T21:55:07.000Z
|
2022-03-30T16:30:12.000Z
|
crowd_anki/history/archiver_vendor.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 150 |
2016-09-01T00:35:35.000Z
|
2022-03-30T23:26:48.000Z
|
crowd_anki/history/archiver_vendor.py
|
katrinleinweber/CrowdAnki
|
c78d837e082365d69bde5b1361b1dd4d11cd3d63
|
[
"MIT"
] | 51 |
2016-09-04T17:02:39.000Z
|
2022-02-04T11:49:10.000Z
|
from dataclasses import field, dataclass
from pathlib import Path
from typing import Any
from .anki_deck_archiver import AnkiDeckArchiver
from .archiver import AllDeckArchiver
from .dulwich_repo import DulwichAnkiRepo
from ..anki.adapters.deck_manager import AnkiStaticDeckManager, DeckManager
from ..anki.ui.utils import progress_indicator
from ..config.config_settings import ConfigSettings
from ..export.anki_exporter import AnkiJsonExporter
from ..utils.notifier import Notifier, AnkiTooltipNotifier
from ..utils.disambiguate_uuids import disambiguate_note_model_uuids
@dataclass
class ArchiverVendor:
window: Any
config: ConfigSettings
notifier: Notifier = field(default_factory=AnkiTooltipNotifier)
@property
def deck_manager(self) -> DeckManager:
return AnkiStaticDeckManager(self.window.col.decks)
def all_deck_archiver(self):
return AllDeckArchiver(
self.deck_manager,
lambda deck: AnkiDeckArchiver(deck,
self.config.full_snapshot_path,
AnkiJsonExporter(self.window.col, self.config),
DulwichAnkiRepo))
def snapshot_path(self):
return Path(self.config.snapshot_path)
def do_manual_snapshot(self):
self.do_snapshot('CrowdAnki: Manual snapshot')
def snapshot_on_sync(self):
if self.config.automated_snapshot:
self.do_snapshot('CrowdAnki: Snapshot on sync')
def do_snapshot(self, reason):
# Clean up duplicate note models. See
# https://github.com/Stvad/CrowdAnki/wiki/Workarounds-%E2%80%94-Duplicate-note-model-uuids.
disambiguate_note_model_uuids(self.window.col)
with progress_indicator(self.window, 'Taking CrowdAnki snapshot of all decks'):
self.all_deck_archiver().archive(overrides=self.overrides(),
reason=reason)
self.notifier.info("Snapshot successful",
f"The CrowdAnki snapshot to {str(self.config.full_snapshot_path)} successfully completed")
def overrides(self):
return self.deck_manager.for_names(self.config.snapshot_root_decks)
| 39.421053 | 121 | 0.689809 |
4a1d0f5d087cd5a51153320f152d3dc358f0cb11
| 90 |
py
|
Python
|
animals/admin.py
|
sourovw/rescue-and-adoption-system
|
3852d0e512abe34b1a2ccafd2808a575139045f3
|
[
"BSD-3-Clause"
] | 5 |
2021-08-05T07:47:31.000Z
|
2021-08-07T16:06:10.000Z
|
animals/admin.py
|
sourovw/rescue-and-adoption-system
|
3852d0e512abe34b1a2ccafd2808a575139045f3
|
[
"BSD-3-Clause"
] | null | null | null |
animals/admin.py
|
sourovw/rescue-and-adoption-system
|
3852d0e512abe34b1a2ccafd2808a575139045f3
|
[
"BSD-3-Clause"
] | 4 |
2021-08-05T08:19:33.000Z
|
2021-08-09T20:08:15.000Z
|
from django.contrib import admin
from .models import Animal
admin.site.register(Animal)
| 15 | 32 | 0.811111 |
4a1d10027d9b555163795ebb06163646554883b5
| 2,135 |
py
|
Python
|
test/test_run.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | 1 |
2021-07-06T23:37:49.000Z
|
2021-07-06T23:37:49.000Z
|
test/test_run.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | null | null | null |
test/test_run.py
|
kuri8ive/gokart
|
7492248e28b159e2790882e7c96bfd938952dedf
|
[
"MIT"
] | null | null | null |
import os
import unittest
from unittest.mock import patch
import luigi
import luigi.mock
import gokart
class _DummyTask(gokart.TaskOnKart):
task_namespace = __name__
param = luigi.Parameter()
class RunTest(unittest.TestCase):
def setUp(self):
luigi.configuration.LuigiConfigParser._instance = None
os.environ.clear()
@patch('sys.argv', new=['main', f'{__name__}._DummyTask', '--param', 'test', '--log-level=CRITICAL', '--local-scheduler'])
def test_run(self):
config_file_path = os.path.join(os.path.dirname(__name__), 'test_config.ini')
luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)
os.environ.setdefault('test_param', 'test')
with self.assertRaises(SystemExit) as exit_code:
gokart.run()
self.assertEqual(exit_code.exception.code, 0)
@patch('sys.argv', new=['main', f'{__name__}._DummyTask', '--log-level=CRITICAL', '--local-scheduler'])
def test_run_with_undefined_environ(self):
config_file_path = os.path.join(os.path.dirname(__name__), 'test_config.ini')
luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)
with self.assertRaises(luigi.parameter.MissingParameterException) as missing_parameter:
gokart.run()
@patch('sys.argv', new=['main', '--tree-info-mode=simple', '--tree-info-output-path=tree.txt', f'{__name__}._DummyTask', '--param', 'test', '--log-level=CRITICAL', '--local-scheduler'])
@patch('luigi.LocalTarget', new=lambda path, **kwargs: luigi.mock.MockTarget(path, **kwargs))
def test_run_tree_info(self):
config_file_path = os.path.join(os.path.dirname(__name__), 'test_config.ini')
luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)
os.environ.setdefault('test_param', 'test')
tree_info = gokart.tree_info(mode='simple', output_path='tree.txt')
with self.assertRaises(SystemExit):
gokart.run()
self.assertTrue(gokart.make_tree_info(_DummyTask(param='test')), tree_info.output().load())
if __name__ == '__main__':
unittest.main()
| 41.862745 | 189 | 0.696956 |
4a1d106e17936230bafba3f86ce4465613d67bae
| 1,514 |
py
|
Python
|
selvbetjening/core/members/models.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | null | null | null |
selvbetjening/core/members/models.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | 3 |
2020-02-11T21:54:59.000Z
|
2021-06-10T17:35:21.000Z
|
selvbetjening/core/members/models.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | null | null | null |
# coding=UTF-8
import datetime
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from countries.models import Country
from selvbetjening.core.user.models import SUser
import signals
def to_age(dateofbirth, reference_date=None):
if reference_date is None:
reference_date = datetime.date.today()
if dateofbirth is None:
return None
bday = dateofbirth
d = reference_date
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
class UserWebsite(models.Model):
user = models.ForeignKey(get_user_model(), verbose_name=_(u'user'), db_column='user_id')
name = models.CharField(max_length=32, blank=True)
url = models.URLField(blank=True)
class UserLocation(models.Model):
user = models.OneToOneField(get_user_model(), related_name='location')
lat = models.FloatField(blank=True, null=True, default=None)
lng = models.FloatField(blank=True, null=True, default=None)
expired = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now=True)
def user_saved(sender, **kwargs):
user = kwargs['instance']
try:
location = UserLocation.objects.get(user=user)
location.expired = True
location.save()
except UserLocation.DoesNotExist:
UserLocation.objects.create(user=user, expired=True)
post_save.connect(user_saved, sender=SUser)
| 27.527273 | 92 | 0.729855 |
4a1d10a0975a06ab154c83fa19a173b036812339
| 168 |
py
|
Python
|
Server/inputs.py
|
qqgg231/PyMail
|
256af9dd9f16bb748131153e00df4661118d5404
|
[
"MIT"
] | 22 |
2018-10-04T02:58:36.000Z
|
2021-11-12T11:15:17.000Z
|
Server/inputs.py
|
qqgg231/PyMail
|
256af9dd9f16bb748131153e00df4661118d5404
|
[
"MIT"
] | 3 |
2018-11-02T12:47:42.000Z
|
2018-11-02T19:16:03.000Z
|
Server/inputs.py
|
qqgg231/PyMail
|
256af9dd9f16bb748131153e00df4661118d5404
|
[
"MIT"
] | 5 |
2018-11-02T11:51:40.000Z
|
2020-12-07T15:30:26.000Z
|
import graphene
# Arguments you want to query
class MessagesInput(graphene.InputObjectType):
_id = graphene.ID(required=False)
name = graphene.String(required=False)
| 28 | 46 | 0.803571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.