repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openstack/python-mistralclient | 2,654,289,805,730 | f4e26cdc57ed6580bfd97a21a9c13c42c640567e | c6492a0727e301a92390b74f0990ddfd8bf22e01 | /mistralclient/tests/unit/v2/test_tasks.py | acb41ff2bf3f7d4995a5eb8100d45e1a020089fe | [
"Apache-2.0"
] | permissive | https://github.com/openstack/python-mistralclient | 6f2b3aff80cda80533ba7e9617fdcc4cc60bfd6e | b94abb44d9d0f92a527dfa1ecf2e4d3c1be6178c | refs/heads/master | 2023-09-03T22:51:50.152827 | 2023-01-09T19:25:50 | 2023-01-20T00:02:46 | 15,885,891 | 30 | 20 | Apache-2.0 | false | 2021-04-23T06:05:58 | 2014-01-13T23:40:34 | 2021-04-21T05:45:21 | 2021-04-22T08:19:01 | 1,387 | 50 | 20 | 0 | Python | false | false | # Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from mistralclient.api.v2.executions import Execution
from mistralclient.api.v2 import tasks
from mistralclient.tests.unit.v2 import base
# TODO(everyone): later we need additional tests verifying all the errors etc.
TASK = {
'id': "1",
'workflow_execution_id': '123',
'name': 'my_task',
'workflow_name': 'my_wf',
'state': 'RUNNING',
'tags': ['deployment', 'demo'],
'result': {'some': 'result'}
}
SUB_WF_EXEC = {
'id': "456",
'workflow_id': '123e4567-e89b-12d3-a456-426655440000',
'workflow_name': 'my_sub_wf',
'workflow_namespace': '',
'task_execution_id': "1",
'description': '',
'state': 'RUNNING',
'input': {}
}
URL_TEMPLATE = '/tasks'
URL_TEMPLATE_ID = '/tasks/%s'
URL_TEMPLATE_SUB_EXECUTIONS = '/tasks/%s/executions%s'
class TestTasksV2(base.BaseClientV2Test):
def test_list(self):
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE,
json={'tasks': [TASK]})
task_list = self.tasks.list()
self.assertEqual(1, len(task_list))
task = task_list[0]
self.assertEqual(
tasks.Task(self.tasks, TASK).to_dict(),
task.to_dict()
)
def test_list_with_fields(self):
field_params = "?fields=id,name"
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE + field_params,
json={'tasks': [TASK]})
self.tasks.list(fields=["id,name"])
self.assertTrue(self.requests_mock.called_once)
def test_list_with_no_limit(self):
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE,
json={'tasks': [TASK]})
task_list = self.tasks.list(limit=-1)
self.assertEqual(1, len(task_list))
last_request = self.requests_mock.last_request
self.assertNotIn('limit', last_request.qs)
def test_get(self):
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
self.requests_mock.get(url, json=TASK)
task = self.tasks.get(TASK['id'])
self.assertEqual(
tasks.Task(self.tasks, TASK).to_dict(),
task.to_dict()
)
def test_rerun(self):
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
self.requests_mock.put(url, json=TASK)
task = self.tasks.rerun(TASK['id'])
self.assertDictEqual(
tasks.Task(self.tasks, TASK).to_dict(),
task.to_dict()
)
body = {
'reset': True,
'state': 'RUNNING',
'id': TASK['id']
}
self.assertDictEqual(body, self.requests_mock.last_request.json())
def test_rerun_no_reset(self):
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
self.requests_mock.put(url, json=TASK)
task = self.tasks.rerun(TASK['id'], reset=False)
self.assertDictEqual(
tasks.Task(self.tasks, TASK).to_dict(),
task.to_dict()
)
body = {
'reset': False,
'state': 'RUNNING',
'id': TASK['id']
}
self.assertDictEqual(body, self.requests_mock.last_request.json())
def test_rerun_update_env(self):
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
self.requests_mock.put(url, json=TASK)
task = self.tasks.rerun(TASK['id'], env={'k1': 'foobar'})
self.assertDictEqual(
tasks.Task(self.tasks, TASK).to_dict(),
task.to_dict()
)
body = {
'reset': True,
'state': 'RUNNING',
'id': TASK['id'],
'env': jsonutils.dumps({'k1': 'foobar'})
}
self.assertDictEqual(body, self.requests_mock.last_request.json())
def test_get_sub_executions(self):
url = self.TEST_URL + URL_TEMPLATE_SUB_EXECUTIONS \
% (TASK['id'], '?max_depth=-1&errors_only=')
self.requests_mock.get(url, json={'executions': [SUB_WF_EXEC]})
sub_execution_list = self.tasks.get_task_sub_executions(TASK['id'])
self.assertEqual(1, len(sub_execution_list))
self.assertDictEqual(
Execution(self.executions, SUB_WF_EXEC).to_dict(),
sub_execution_list[0].to_dict()
)
| UTF-8 | Python | false | false | 4,933 | py | 100 | test_tasks.py | 65 | 0.578553 | 0.566187 | 0 | 164 | 29.079268 | 78 |
SG0032/gestionRevenu | 15,384,572,896,762 | 9788f603f77680f10230e9f01d39f4b3167bdbdb | 35de7d4f08f03732f871169b8823010a2f75ab3e | /core/migrations/0001_initial.py | 01704a43a8c223230fde5364d4f5bd17aa972b2a | [] | no_license | https://github.com/SG0032/gestionRevenu | 701b812972479f87ecb86acdc77e21eadd6a9672 | 36b326a7a4a99da96794af915fe8af983d3b6895 | refs/heads/master | 2023-04-22T00:24:39.546786 | 2021-04-22T00:21:12 | 2021-04-22T00:21:12 | 360,344,110 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2 on 2021-04-22 00:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='enfant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Enfant',
},
),
migrations.CreateModel(
name='famille',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nomFamille', models.CharField(max_length=255, verbose_name='nom famille')),
('nomP', models.CharField(max_length=255, verbose_name='nom pere')),
('prenomP', models.CharField(max_length=255, verbose_name='prenom pere')),
('etatP', models.CharField(choices=[('D', 'décédé(e)'), ('V', 'vivant(e)')], max_length=200, verbose_name='état pere')),
('professionP', models.CharField(max_length=255, verbose_name='Profession pere')),
('santeP', models.CharField(max_length=200, null=True, verbose_name='sante Pere')),
('revenuP', models.FloatField(default=0.0, verbose_name='revenu Pere')),
('nomM', models.CharField(max_length=255, verbose_name='nom Mère')),
('prenomM', models.CharField(max_length=255, verbose_name='prenom Mère')),
('etatM', models.CharField(choices=[('D', 'décédé(e)'), ('V', 'vivant(e)')], max_length=200, verbose_name='état Mère')),
('professionM', models.CharField(max_length=255, verbose_name='Profession Mère')),
('santeM', models.CharField(max_length=200, null=True, verbose_name='sante Mère')),
('revenuM', models.FloatField(default=0.0, verbose_name='revenu Mère')),
('nmbreEnfant', models.IntegerField(default=0, verbose_name='nombre enfant')),
],
options={
'verbose_name': 'famille',
'ordering': ['revenuP'],
},
),
]
| UTF-8 | Python | false | false | 2,257 | py | 7 | 0001_initial.py | 6 | 0.558181 | 0.534998 | 0 | 47 | 46.723404 | 136 |
juforg/wntc.alfredworkflow | 13,056,700,594,493 | c1285dbaf13f1b3abfcbf848e312a621c905e178 | c829569f18004175119bf96ccaeba1d9144055e3 | /util.py | 1cb0ab4a11b20d66b762b7a5727984303100a5d4 | [
"Apache-2.0"
] | permissive | https://github.com/juforg/wntc.alfredworkflow | e06fa41ae40c64fac834ee226914c8047a51da30 | 9f1ddbd83727a9e972fd26fd63486065021a7ae2 | refs/heads/master | 2020-03-25T13:32:57.435392 | 2019-04-17T14:56:10 | 2019-04-17T14:56:10 | 143,831,496 | 16 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8
import os
import pip
import imp
import requests
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
debug = os.getenv('debug')=="true" and True or False
"""
oss 阿里云配置
"""
AccessKeyId = os.getenv('oss.AccessKeyId')
AccessKeySecret = os.getenv('oss.AccessKeySecret')
bucket_name = os.getenv('oss.bucket_name')
endpoint = os.getenv('oss.endpoint')
endpointurl = "http://%s" % endpoint
"""
cos 腾讯云配置
"""
cos_bucket_name = os.getenv('cos_bucket_name')
cos_is_cdn = os.getenv('cos_is_cdn')
cos_cdn_domain = os.getenv('cos_cdn_domain')
cos_region = os.getenv('cos_region')
cos_secret_id = os.getenv('cos_secret_id')
cos_secret_key = os.getenv('cos_secret_key')
"""
imgur 配置
"""
imgur_client_id = os.getenv('imgur_client_id')
imgur_client_secret = os.getenv('imgur_client_secret')
imgur_access_token = os.getenv('imgur_access_token')
imgur_refresh_token = os.getenv('imgur_refresh_token')
imgur_use = os.getenv('imgur_use')
imgur_album = os.getenv('imgur_album')
porxyconf = os.getenv('porxyconf')
PROXY_LIST = {
'http': porxyconf ,
'https': porxyconf
}
credentials= []
def notice(msg, title="【万能图床】提示", subtitle=''):
''' notoce message in notification center'''
os.system('osascript -e \'display notification "%s" with title "%s"\'' % (msg, title))
def install_and_load(package):
pip.main(['install', package])
f, fname, desc = imp.find_module(package)
return imp.load_module(package, f, fname, desc)
"""
检查指定云是否配置正确
"""
def checkConfig(yuncode):
if 'oss' == yuncode:
return checkOssConfig()
elif yuncode == 'cos':
return checkCosConfig()
elif yuncode == 'imgur':
return checkImgurConfig()
else:
return False
"""
获取所有配置正确的云code
"""
def getAllConfiged():
list = []
if (checkOssConfig()):
list.append('oss')
if (checkCosConfig()):
list.append('cos')
if (checkImgurConfig()):
list.append('imgur')
return list
"""检查阿里云云配置是否配全"""
def checkOssConfig():
if (AccessKeyId is not None
and AccessKeySecret is not None
and bucket_name is not None
and endpoint is not None
):
return True
else:
return False
"""检查腾讯云配置是否配全"""
def checkCosConfig():
if (cos_bucket_name is not None
and cos_is_cdn is not None
and cos_region is not None
and cos_secret_id is not None
and cos_secret_key is not None
):
return True
else:
return False
"""检查imgur配置是否配全"""
def checkImgurConfig():
if (((imgur_use is not None
and imgur_use == 'true' )or imgur_use is None)
and imgur_client_id is not None
and imgur_client_secret is not None
and imgur_access_token is not None
and imgur_refresh_token is not None
):
return True
else:
return False
"""
上传到阿里云
"""
def uploadOssObj(objtype, name, obj):
try:
import oss2
except:
oss2 = install_and_load('oss2')
auth = oss2.Auth(AccessKeyId, AccessKeySecret)
bucket = oss2.Bucket(auth, endpointurl, bucket_name)
if debug : notice("上传到阿里云!%s %s" % (endpointurl,bucket_name))
if ('localfile' == objtype):
bucket.put_object_from_file(name, obj)
elif 'url' == objtype:
input = requests.get(obj)
bucket.put_object(name, input)
else:
if debug: notice("阿里云不支持【%s】上传" % objtype)
def getOssMKurl(upload_name):
return '' % (bucket_name,endpoint,upload_name)
"""
上传到腾讯云
"""
def uploadCosObj(objtype, name, obj):
token = ''
config = CosConfig(Region=cos_region, Secret_id=cos_secret_id, Secret_key=cos_secret_key, Token=token)
# 2. 获取客户端对象
client = CosS3Client(config)
if debug: notice("上传到腾讯云!%s" % ( cos_bucket_name))
if ('localfile' == objtype):
response = client.put_object_from_local_file(
Bucket=cos_bucket_name,
LocalFilePath=obj,
Key=name,
)
if debug: notice("上传到腾讯云返回:%s" % (response))
elif 'url' == objtype:
if debug: notice("腾讯云不支持url上传" )
else:
if debug: notice("腾讯云不支持【%s】上传" % objtype)
def getCosMKurl(upload_name):
if 'true' == cos_is_cdn:
return '' % (cos_bucket_name,cos_cdn_domain,upload_name)
else:
return '' % (cos_bucket_name,upload_name)
"""
上传到imgur
"""
def uploadImgurObj(objtype, name, obj):
from imgurpython import ImgurClient
import time
client = ImgurClient(imgur_client_id, imgur_client_secret,access_token=imgur_access_token,refresh_token=imgur_refresh_token,proxies=PROXY_LIST)
if debug: notice("上传到imgur!%s" % (imgur_album))
config = {
'album': imgur_album,
'name': name,
'title': name,
'description': '{0} 通过wntc万能图床上传 https://github.com/juforg/wntc.alfredworkflow'.format(time.strftime('%Y/%-m/%-d %H:%M:%S',time.localtime(time.time())))
}
if ('localfile' == objtype):
image = client.upload_from_path(obj, config=config, anon=False)
# print(json.dumps(image))
if debug: notice("上传到imgur返回:%s" % (response))
elif 'url' == objtype:
if debug: notice("imgur不支持url上传" )
else:
if debug: notice("imgur不支持【%s】上传" % objtype)
return '' % (image['link'])
def get_input(string):
''' Get input from console regardless of python 2 or 3 '''
try:
return raw_input(string)
except:
return input(string)
if __name__ == "__main__":
try:
import oss2
except:
print("err p")
| UTF-8 | Python | false | false | 5,973 | py | 5 | util.py | 3 | 0.618709 | 0.616397 | 0 | 209 | 25.904306 | 160 |
dijkstra007/manatal_challenge | 17,076,790,010,332 | cd2e8ec66e1a73fcfc2a910fcd9ce1019c2ca917 | 7e9c6f2a6e5ccb85de7383e4d171f65de36d60f8 | /schools/utils.py | 4c1c435afc46c0d786a866e5b3d8fe950e61ef1b | [] | no_license | https://github.com/dijkstra007/manatal_challenge | 03e7360403b779793d5fc55de37902b8ac4f016e | c02519989eff59099f9caa77be7d73bf3a370a17 | refs/heads/master | 2022-09-21T01:14:27.848401 | 2020-05-31T20:07:45 | 2020-05-31T20:07:45 | 268,277,088 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .models import School
def get_n_students_and_max_students(school):
n_students = Student.objects.filter(
school=school).count()
max_students = School.objects.get(
pk=school.id).max_student
return n_students, max_students | UTF-8 | Python | false | false | 255 | py | 11 | utils.py | 8 | 0.690196 | 0.690196 | 0 | 10 | 24.6 | 44 |
erikwestlund/blog | 6,571,300,003,060 | c68170dd7b43e7b79ce807d506296ab125bedcf0 | 7a79f8d30f764c56f4fdaa4cc07cdd00596183df | /app/posts/views/tagged_posts.py | 969256e1ca519924ed21e4bcf113a1eadd286127 | [] | no_license | https://github.com/erikwestlund/blog | 621deb845789365627140fd5cc9d8b94dc503684 | a3880dceaa7a08dcfb629db78fc2ffe6baf01c36 | refs/heads/master | 2023-01-13T21:41:46.771208 | 2019-03-13T18:03:52 | 2019-03-13T18:03:52 | 55,434,904 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import current_app, render_template
from flask import request
from flask.views import MethodView
from main.models.tag import Tag
from sqlalchemy import desc
from sqlalchemy.sql.functions import now
from posts.models.post import Post
from utils.models.find_or_fail import find_or_fail
from utils.models.paginated_json_response import paginated_json_response
class FetchTaggedPosts(MethodView):
def get(self, slug):
tag = find_or_fail(Tag, Tag.slug == slug)
per_page = int(current_app.config["POSTS_PER_PAGE"])
page = int(request.args.get("page")) if request.args.get("page") else 1
query = (
Post.query.filter(Post.published_at <= now())
.filter(Post.tags.any(slug=slug))
.order_by(desc("published_at"))
)
return paginated_json_response(query=query, per_page=per_page, page=page)
class ShowTaggedPosts(MethodView):
def get(self, slug):
tag = find_or_fail(Tag, Tag.slug == slug)
page = request.args.get("page") or 1
return render_template(
"posts/tagged_posts.html", tag=tag, page=page, title=tag.name
)
| UTF-8 | Python | false | false | 1,159 | py | 168 | tagged_posts.py | 84 | 0.66868 | 0.666954 | 0 | 37 | 30.324324 | 81 |
kush99993s/kiva-DefaultFinder | 5,007,931,903,777 | 9da19abd9e4257d2284309153de976f4d76ec736 | d6172b7ed2b0d16ae153cfa02ea73f623a6662c0 | /code/data_cleaning.py | cb82e2d9af55f797f328dc7145d495d66c499983 | [] | no_license | https://github.com/kush99993s/kiva-DefaultFinder | 5e556d150f5db5e4d5d3bfc8b4f9e88a06dc2615 | 400acff350d9006ae0469ed1d69f2882018e3148 | refs/heads/master | 2020-05-18T18:30:01.206625 | 2015-08-18T20:51:10 | 2015-08-18T20:51:10 | 40,996,170 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
from data_collection import DataCollection
import cPickle
import os.path
class Cleaning(object):
def __init__(self, number_file_read=2):
"""
Preprocess data for EDA and model building
"""
data = DataCollection()
dict_ = data.read_file(number_file_read)
# Convert the data from a dict to pandas df
self.df = self.convert_to_data_frame(dict_)
# Encoding string info to numeric for model building
self.country_code_dict = {}
self.town_dict = {}
self.sector_dict = {}
self.theme_dict = {}
self.geo_level_dict = {}
self.activity_dict = {}
self.repayment_interval_dict = {}
self.status_dic = {}
# Decoding numeric values to string values (i.e. country, activity, etc)
self.country_code_list = list(self.df.country_code.unique())
self.town_list= list(self.df.town.unique())
self.sector_list= list(self.df.sector.unique())
self.theme_list= list(self.df.theme.unique())
self.geo_level_list= list(self.df.geo_level.unique())
self.activity_list = list(self.df.activity.unique())
self.repayment_interval_list = list(self.df.repayment_interval.unique())
self.status_list=(self.df.status.unique())
# This will fill the dictinary to encode string values
self.fill_dictionarys()
self.change_all_variable()
def convert_to_data_frame(self, dictionary_):
"""
Data is stored in JSON, need to transfer into DataFrame for better EDA
"""
df = pd.DataFrame(dictionary_)
return df
def fill_dictionarys(self):
'''
Select each features from JSON format to dictinary for easy access
'''
for i, j in enumerate(self.df.country_code.unique()):
self.country_code_dict[j] = i
for i, j in enumerate(self.df.town.unique()):
self.town_dict[j] = i
for i, j in enumerate(self.df.sector.unique()):
self.sector_dict[j] = i
for i, j in enumerate(self.df.theme.unique()):
self.theme_dict[j] = i
for i, j in enumerate(self.df.geo_level.unique()):
self.geo_level_dict[j] = i
for i, j in enumerate(self.df.activity.unique()):
self.activity_dict[j] = i
for i, j in enumerate(self.df.repayment_interval.unique()):
self.repayment_interval_dict[j] = i
for i, j in enumerate(self.df.status.unique()):
self.status_dic[j] = i
def change_all_variable(self):
'''
All features are in text, need to transfer text to numberical to use in model and EDA
'''
self.change_gender_number()
self.change_picture()
self.replace_values()
self.delete_columns()
self.change_text()
def replace_values(self):
self.df.country_code = self.df.country_code.apply(lambda x : self.change_country_code(x))
self.df.town = self.df.town.apply(lambda x : self.change_town(x))
self.df.sector = self.df.sector.apply(lambda x : self.change_sector(x))
self.df.theme = self.df.theme.apply(lambda x : self.change_theme(x))
self.df.geo_level = self.df.geo_level.apply(lambda x : self.change_geo_level(x))
self.df.activity = self.df.activity.apply(lambda x : self.chage_activity(x))
self.df.repayment_interval = self.df.repayment_interval.apply(lambda x: self.change_repayment_interval(x))
self.df.status = self.df.status.apply(lambda x : self.change_status(x))
self.df.basket_amount = self.df.basket_amount.fillna(-2)
self.df.repayment_term = self.df.repayment_term.fillna(-2)
def delete_columns(self):
'''
removing not neccessary columns from data
'''
del self.df["bulkEntries"]
del self.df["tags"]
del self.df["bonus_credit_eligibility"]
del self.df["use"]
del self.df["video"]
del self.df["gender"]
def text_change(self,x):
'''
encode text into ascii format
'''
try:
return x.encode("ascii", "ignore").replace("<i>", " ").replace("</i>", " ").replace("<br>", " ").replace("</br>", " ")
except:
None
def change_text(self):
self.df['description'] = self.df['description'].apply(lambda x: self.text_change(x) )
def find_male(self,x):
temp_m = 0
for j in range(len(x)):
if x[j] == "M":
temp_m +=1
return temp_m
def find_female(self, x):
temp_f = 0
for j in range(len(x)):
if x[j] == "F":
temp_f +=1
return temp_f
def change_gender_number(self):
self.df["num_male"] = self.df.gender.apply(lambda x: self.find_male(x))
self.df["num_female"] = self.df.gender.apply(lambda x: self.find_female(x))
self.df["male_ratio"] = self.df.num_male / self.df.num_borrowers
def number_picture(self, x):
number_picture_ = 0
for j in range(len(x)):
if x[j] == True:
number_picture_ += 1
return number_picture_
def change_picture(self):
'''
Finding out how many borrowers are into the picture
'''
self.df["number_of_picture"] = self.df.has_picture.apply(lambda x: self.number_picture(x))
self.df["ratio_of_picture"] = self.df["number_of_picture"]/self.df.num_borrowers
del self.df["has_picture"]
def change_country_code(self, x):
'''
Changing country code to number
'''
if x in self.country_code_dict:
return self.country_code_dict[x]
else:
return -2
def change_town(self, x):
'''
changing town name to number
'''
if x in self.town_dict:
return self.town_dict[x]
else:
return -2
def change_sector(self,x ):
'''
changing categorical sector text to number
'''
if x in self.sector_dict:
return self.sector_dict[x]
else:
return -2
def change_theme(self, x):
if x in self.theme_dict:
return self.theme_dict[x]
else:
return -2
def change_geo_level(self, x):
if x in self.geo_level_dict:
return self.geo_level_dict[x]
else:
return -2
def chage_activity(self, x):
if x in self.activity_dict:
return self.activity_dict[x]
else:
return -2
def change_repayment_interval(self, x):
if x in self.repayment_interval_dict:
return self.repayment_interval_dict[x]
else:
return -2
def change_status(self, x):
if x in self.status_dic:
return self.status_dic[x]
else:
return -2
class Saving(object):
'''
Saving file after preprocess to access easily, saving list to decode cloumns back to value later
when required
'''
def __init__(self, number_file_read=2):
clean = Cleaning(number_file_read)
self.df = clean.df
self.country_code_list = clean.country_code_list
self.town_list = clean.town_list
self.sector_list = clean.sector_list
self.theme_list = clean.theme_list
self.geo_level_list = clean.geo_level_list
self.activity_list = clean.activity_list
self.repayment_interval_list = clean.repayment_interval_list
self.status_list = clean.status_list
self.number_of_files = number_file_read
def save_files(self):
'''
Saving loans into file, which are only defaulted or not defaulted
'''
condition_1 = self.df.status == 1
condition_2 = self.df.status == 0
self.df_new = self.df[condition_1 | condition_2]
self.df_new.to_csv("/home/patanjalichanakya/Documents/Galvanize/find_defaulter/data/%s_two_option.csv" %self.number_of_files, index=False)
dict_ = {"country_code_list":self.country_code_list, "town_list":self.town_list, "sector_list":self.sector_list, "theme_list": self.theme_list, "geo_level_list":self.geo_level_list,\
"activity_list":self.activity_list, "repayment_interval_list": self.repayment_interval_list, "status_list":self.status_list}
for file_name, content in dict_.iteritems():
with open("/home/patanjalichanakya/Documents/Galvanize/find_defaulter/data/%s_%s.pickle" %(file_name, self.number_of_files), 'w') as f:
cPickle.dump(content, f)
class OpenFile(object):
'''
open file tool
'''
def __init__(self):
self.isfile_ = os.path.isfile("/home/patanjalichanakya/Documents/Galvanize/find_defaulter/data/1638.csv")
def openfile(self):
if self.isfile_:
return pd.read_csv("/home/patanjalichanakya/Documents/Galvanize/find_defaulter/data/1638.csv")
else:
saving = Saving(number_file_read = 1638)
saving.save_files()
return pd.read_csv("/home/patanjalichanakya/Documents/Galvanize/find_defaulter/data/1638.csv")
| UTF-8 | Python | false | false | 8,044 | py | 24 | data_cleaning.py | 6 | 0.674043 | 0.66907 | 0 | 260 | 29.938462 | 184 |
Angingun/learn_python | 13,692,355,784,666 | 838ac43ea05942d29441f8c5bd8d7a541c4d3837 | 3f8c980b5a6315569f2a77d3da7540f347f127fb | /day9.1.py | ea34ff1ef08d98bccdb0456478f7873830690883 | [] | no_license | https://github.com/Angingun/learn_python | 930d11f6dd5a830788915301273cff3b6f00fc2f | 77d4fe0a43452d2e6907799caaf92604fb680c90 | refs/heads/master | 2020-07-08T07:54:03.571610 | 2019-08-21T15:07:29 | 2019-08-21T15:24:45 | 203,609,851 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 奥特曼打小怪兽
from abc import ABCMeta, abstractclassmethod
from random import randrange
class Fight(object, metaclass=ABCMeta):
__slots__ = ('_name', '_hp')
def __init__(self, name, hp):
self._name = name
self._hp = hp
@property
def name(self):
return self._name
@property
def hp(self):
return self._hp
@hp.setter
def hp(self, hp):
self._hp = hp if hp >= 0 else hp = 0
@property
def alive(self):
return self._hp > 0
@abstractclassmethod
def attack(self, other):
pass
class UtralMan(Fighter):
__slots__ = ('_name', '_hp', '_mp')
def __init__(self, name, hp, mp):
super().__init__(name, hp)
self._mp = mp
def attack(self, other):
other.hp -= randrange(15, 25)
def huge_attack(self, other):
if self._mp >= 50:
self._mp -= 50
injury = other.hp * 3 // 4
injury = injury if injury >= 50 else 50
other.hp -= injury
return True
else:
self.attck(other)
return False
def magic_attack(self, others):
if self._mp >= 20:
self._mp -= 20
for temp in others:
if temp.alive:
temp.hp -= randrange(10, 15)
return True
else:
return False
def resume(self):
incr_point = randrange(1, 10)
self._mp += incr_point
return incr_point
def __str__(self):
return '~~~{}奥特曼~~~\n'.format(self._name) + \
'生命值: {}\n'.format(self._hp) + \
'魔法值:{}\n'.format(self._mp)
# uncompleted
| UTF-8 | Python | false | false | 1,768 | py | 8 | day9.1.py | 7 | 0.476932 | 0.460784 | 0 | 76 | 21.802632 | 53 |
akariv/hasadna-data | 1,537,598,304,928 | bd975e172bea413834b046dc782cc65c23fa44a0 | 96948986fae74d4fb8339719569247ed32fb37fc | /tools/loaders/json_loader.py | 6116700a868175b167d76b5afbc29c2fe52238ee | [] | no_license | https://github.com/akariv/hasadna-data | 3e04edabe11c0de3a0f728a2b2fd089d223d0e1c | fa5b1182090277f0501761b558c12c4938c8bd2a | HEAD | 2016-09-06T01:23:21.643153 | 2011-12-03T17:49:46 | 2011-12-03T17:49:46 | 1,591,832 | 1 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null | from base import Loader
import json
@Loader.loader
class JsonLoader(Loader):
@classmethod
def condition(cls,filename):
return filename.endswith('.json')
def initialize(self,input_file):
self.data = json.load(input_file)
def get_rows(self):
return self.data
| UTF-8 | Python | false | false | 313 | py | 58 | json_loader.py | 12 | 0.645367 | 0.645367 | 0 | 15 | 19.866667 | 41 |
johnpcooke94/project-scrumger-games | 3,891,240,370,410 | b60711affb032386a1ed5342de0057d817705862 | 6279825ecafd178352e41905f2d38979a363d483 | /src/Sprites/riverbank.py | c5525bbd5df66841fb58e35cd5a6abd335271451 | [
"MIT"
] | permissive | https://github.com/johnpcooke94/project-scrumger-games | e6fe386c0d25a515e7e96e2c885622505d61d8de | d8c6bad99ccaf10d3cca05b6fc44799e2f46ad2a | refs/heads/main | 2023-04-10T23:40:39.178191 | 2021-04-20T00:26:46 | 2021-04-20T00:26:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame.sprite
class Riverbank(pygame.sprite.Sprite):
"""
Pygame sprite class for riverbank sprites used for killing the player
"""
# Constructor should be passed an int to indicate which position the sprite should go in
def __init__(self, pos):
"""
- :param pos:
An int representing the "index" of the riverbank. Should be 0-5, with 0 representing the left-most riverbank
and 5 representing the right-most bank.
"""
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([70, 50])
self.image.fill((255, 255, 255))
self.rect = self.image.get_rect()
self.rect.y = 40
self.set_pos(pos)
def set_pos(self, pos):
"""
Helper function called by constructor. Takes the position argument passed to the constructor and places the nest
in the proper slot.
- :param pos:
An int representing the "index" of the riverbank. Should be 0-5, with 1 representing the left-most riverbank
and 5 representing the right-most bank.
- :return:
None
"""
if pos == 0:
self.rect.x = -15
elif pos == 1:
self.rect.x = 148
elif pos == 2:
self.rect.x = 310
elif pos == 3:
self.rect.x = 470
elif pos == 4:
self.rect.x = 630
elif pos == 5:
self.rect.x = 789
| UTF-8 | Python | false | false | 1,472 | py | 42 | riverbank.py | 23 | 0.558424 | 0.527174 | 0 | 49 | 29.040816 | 120 |
Brainkackwitz/discord_botv2.4 | 6,038,724,066,179 | 4c842e4cc36de1ea4349ddcd9c507b16604a7201 | a7ec21d917d1e00bd7576b571a8745570172f8cd | /commands/info.py | 10ade59d58f6f37101ad71b48a133d79b4418cb6 | [] | no_license | https://github.com/Brainkackwitz/discord_botv2.4 | 65e215d41830690be500efc5f8dfa5fb05287d1b | 26682e4bbf775026312e8db4a6b3190a98e8651b | refs/heads/master | 2022-04-14T02:02:30.234834 | 2020-04-14T19:58:51 | 2020-04-14T19:58:51 | 255,712,901 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import discord
import asyncio
client = discord.Client()
from commands import STATICS
@asyncio.coroutine
def ex(args, message, client, invoke):
embed = discord.Embed(
title=STATICS.BOTNAME,
color=0xe67e22,
description="version: "+STATICS.description
)
embed.set_author(
name=STATICS.authorname,
icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Crown_of_Savoy.svg/1280px-Crown_of_Savoy.svg.png",
url="https://discordapp.com/developers/applications "
)
embed.add_field(
name="ID",
value="327779958250405889",
)
embed.add_field(
name="Help",
value="?help",
inline=False
)
embed.set_footer(
text="Ein bot von Brainkackwitz"
)
embed.set_thumbnail(
url="https://www.welt.de/img/kultur/mobile160730744/9152508057-ci102l-w1024/Die-Wunderbare-Reise-Des-Kleinen-Nils-Holgersson.jpg"
)
yield from client.send_message(message.channel, embed=embed)
| UTF-8 | Python | false | false | 1,064 | py | 24 | info.py | 19 | 0.62688 | 0.574248 | 0 | 34 | 30.294118 | 139 |
eva-yanrong-chen/Nomadic | 2,327,872,321,129 | 8b5a1eaf7af78ddb79eb0a3e5b8e7eab0b2a719d | 3c21e9780aa5e2473b82a341f2d0b3ea841534ce | /migrations/versions/0d186a3e99aa_initial_migration.py | afebc544b8f6cdb3c9a9f8778ff1d50991d3dec1 | [
"MIT"
] | permissive | https://github.com/eva-yanrong-chen/Nomadic | 1dd2bffdcd47824523df7bb77bfeacab1da348a9 | bc56b59b7096eb9eaa6407dd5190a8dc268059f6 | refs/heads/master | 2021-03-02T16:42:00.362150 | 2020-03-11T08:06:52 | 2020-03-11T08:06:52 | 245,884,371 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Initial Migration
Revision ID: 0d186a3e99aa
Revises:
Create Date: 2020-03-11 01:48:46.635189
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0d186a3e99aa'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('client',
sa.Column('id', sa.INTEGER(),
autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(),
autoincrement=False, nullable=True),
sa.Column('description', sa.VARCHAR(),
autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='client_pkey'),
sa.UniqueConstraint('name', name='client_name_key')
)
op.create_table('artist',
sa.Column('id', sa.INTEGER(),
autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(),
autoincrement=False, nullable=True),
sa.Column('portfolio_link', sa.VARCHAR(),
autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='artist_pkey'),
sa.UniqueConstraint('name', name='artist_name_key')
)
op.create_table('project',
sa.Column('id', sa.INTEGER(),
autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(),
autoincrement=False, nullable=True),
sa.Column('client_id', sa.INTEGER(),
autoincrement=False, nullable=False),
sa.Column('is_open', sa.BOOLEAN(),
autoincrement=False, nullable=False),
sa.Column('description', sa.VARCHAR(),
autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
['client_id'], ['client.id'], name='project_client_id_fkey'),
sa.PrimaryKeyConstraint(
'id', 'client_id', name='project_pkey'),
sa.UniqueConstraint('name', name='project_name_key')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('project')
op.drop_table('artist')
op.drop_table('client')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 2,698 | py | 7 | 0d186a3e99aa_initial_migration.py | 5 | 0.501483 | 0.488881 | 0 | 66 | 39.878788 | 85 |
omkarpat/conversational-movie-recommender | 8,272,107,030,508 | 0fe0d6d185d973f6d953cf0c789ebf13ee1a0cae | d8b9ff20d403f2327ac892a7061fb532357cb170 | /annotation_utils.py | c704feb2f3c135e668a40d56d533be7c120ab9c3 | [
"MIT"
] | permissive | https://github.com/omkarpat/conversational-movie-recommender | 01944e9f32b7be250b599b267015b888450353e1 | 3ee3b4854bbab5eb4730a18972f5cfd5350d9bf4 | refs/heads/main | 2023-01-19T06:39:49.257416 | 2020-12-07T02:06:27 | 2020-12-07T02:06:27 | 307,568,046 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
class DBPedia(object):
# These correspond to labels that have been misrecognized
BLACKLIST_URIS = {"http://dbpedia.org/resource/Glossary_of_tennis_terms",
"http://dbpedia.org/resource/Good_Movie",
"http://dbpedia.org/resource/Sierra_Entertainment",
"http://dbpedia.org/resource/Nice",
"http://dbpedia.org/resource/Take_Care_(album)",
"http://dbpedia.org/resource/Cloning",
"http://dbpedia.org/resource/Blood",
"http://dbpedia.org/resource/Downhill_creep",
"http://dbpedia.org/resource/Movies",
"http://dbpedia.org/resource/Hey_There",
"http://dbpedia.org/resource/Swimming_(sport)",
"http://dbpedia.org/resource/Princess_Falls",
"http://dbpedia.org/resource/Haha_(entertainer)",
"http://dbpedia.org/resource/LOL",
"http://dbpedia.org/resource/Drag_queen",
"http://dbpedia.org/resource/Yea_Football_Club",
"http://dbpedia.org/resource/Oh_Yeah_(Yello_song)",
"http://dbpedia.org/resource/Scalable_Coherent_Interface",
"http://dbpedia.org/resource/CAN_bus",
"http://dbpedia.org/resource/The_New_One_(horse)",
"http://dbpedia.org/resource/Information_technology",
"http://dbpedia.org/resource/The_Glad_Products_Company",
"http://dbpedia.org/resource/AM_broadcasting",
"http://dbpedia.org/resource/To_Heart",
"http://dbpedia.org/resource/National_Organization_for_Women",
"http://dbpedia.org/resource/Hit_or_Miss_(New_Found_Glory_song)",
"http://dbpedia.org/resource/Canada",
"http://dbpedia.org/resource/Different_Things",
"http://dbpedia.org/resource/Norwegian_Trekking_Association",
"http://dbpedia.org/resource/Take_One_(Canadian_magazine)",
"http://dbpedia.org/resource/For_Inspiration_and_Recognition_of_Science_and_Technology",
"http://dbpedia.org/resource/Two_Guys",
"http://dbpedia.org/resource/The_Sydney_Morning_Herald",
"http://dbpedia.org/resource/Booting",
"http://dbpedia.org/resource/Precious_Time_(album)",
"http://dbpedia.org/resource/I\\u0027m_Glad",
"http://dbpedia.org/resource/Social_Democratic_Party_of_Switzerland",
"http://dbpedia.org/resource/International_Maritime_Organization",
"http://dbpedia.org/resource/LOL",
"http://dbpedia.org/resource/Names_of_God_in_Judaism",
"http://dbpedia.org/resource/Ike_Turner",
"http://dbpedia.org/resource/Tricky_Stewart",
"http://dbpedia.org/resource/Movies!",
}
multi_spaces_pattern = re.compile(r"\s+")
def process_text(text):
return multi_spaces_pattern.sub(" ", text.capitalize()) | UTF-8 | Python | false | false | 2,540 | py | 50 | annotation_utils.py | 24 | 0.698819 | 0.697244 | 0 | 53 | 46.943396 | 92 |
pythonCore24062021/TA | 335,007,456,554 | b8af288dcd32093cad5a46b2a889f9740c410339 | 4600ab3681aa1fa9609fdc6f1a96e1d53a95cd6f | /HW/achornyi/ta_solution/ui/pages/home_page/_locators.py | 15932f862cb3439e2aa75d50a8075d3d8ae3cd8f | [] | no_license | https://github.com/pythonCore24062021/TA | 039ce203e6d150db84d85b6c06e480b8174af609 | e22f90f43d2770e44f35e4cf4ad38af970e97ae5 | refs/heads/main | 2023-08-25T20:52:38.566403 | 2021-10-20T13:45:56 | 2021-10-20T13:45:56 | 394,335,749 | 0 | 0 | null | false | 2021-10-20T13:45:34 | 2021-08-09T15:05:43 | 2021-10-20T13:44:26 | 2021-10-20T13:45:34 | 21,768 | 0 | 0 | 2 | Python | false | false | RELATIVE_PATH = "index.php?route=common/home"
PAGE_TITLE = "Your Store"
| UTF-8 | Python | false | false | 72 | py | 125 | _locators.py | 123 | 0.736111 | 0.736111 | 0 | 2 | 35 | 45 |
qyqx/TriAquae | 9,251,359,600,467 | fd0f7836b9bf36d67e1114564246f55d934fd288 | 707d0d155aec7cd0506cb3e07920af7b865f7349 | /include/serverList.py | a5fc87c43fede44ae80e587d8e181ee46d7e0809 | [] | no_license | https://github.com/qyqx/TriAquae | 5d7883cdd319d0cac4c23432fcd77890a0226eb5 | 17c00af6f74b588ab7bbc1e638ef16b49185d2e6 | refs/heads/master | 2021-01-01T06:57:32.903697 | 2013-04-03T02:38:35 | 2013-04-03T02:38:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import os,sys,global_env
DelimiterLine = '''\033[32;49;1m Powered by TriAquae
________________________________________________________________________________\033[0m'''
WorkDir=global_env.WorkDir
GroupDir = '%s/conf/server_list' % WorkDir
GroupList = os.listdir(GroupDir)
CountGroups = len(GroupList)
#TotalMenuNumber = 0
RecordLog = global_env.g_env()
class Group:
def ListGroup(self,Show_List = 0,Total_M_Number = 0,Dictionary=0):
GroupList = os.listdir(GroupDir)
global ServerDic
ServerDic = {}
G_num = 0
global Dic
Dic = {}
for group in GroupList:
Dic[ G_num ] = group # Add Group list to Dic dictionary
G_num = G_num + 1
if Show_List == 1:
print DelimiterLine,'\n'
for key in Dic:
ServerNum='%s/%s' %(GroupDir,Dic[key])
count = len(open(ServerNum,'rb').readlines( ))
print '\033[32;40;1m%s. %s [ \033[0m\033[36;1m%s \033[0m\033[32;1m]\033[0m ' % (key,Dic[key],count) # Print Dic list
#print '\033[32;40;1m%s. All the Servers (still under development...)\033[0m ' % G_num
print ' '
print '%s. Modify Group ' % (G_num + 1 )
print '%s. Add/Delete Server' % (G_num + 2)
print '%s. Upload server list' % (G_num + 3)
print DelimiterLine
global TotalMenuNumber
TotalMenuNumber = G_num + 3
if Total_M_Number == 1:
return TotalMenuNumber
if Dictionary == 1:return len(Dic)
#print DelimiterLine
def ChooseGroup(self,Str_Number,Show_G_name=0):
print DelimiterLine
Number = int(Str_Number)
if Number < CountGroups:
print 'Servers in \033[36;1m%s\033[0m\n' % Dic[Number]
if Show_G_name == 0:
Group().ListGroupContent(Number) # Print server list
else:
pass
print DelimiterLine
return Dic[Number] # return the group name
elif Number == TotalMenuNumber: # Modify Group option
print 'The group list file is under directory %s,you can modify the group content through edit those files ' % GroupDir
print DelimiterLine
def ListGroupContent(self,GroupNum):
GroupFileName = '%s/%s' % (GroupDir, Dic[GroupNum])
f = file(GroupFileName)
#print f.read()
S_num = 0
print '\033[36;1mID IP Address Username\033[0m'
while True:
line = f.readline()
if len(line) == 0:
break
if line.startswith('#') is True:continue
IP=line.split()[0]
User = line.split()[1]
ServerDic[ S_num ] = IP # Add server item to ServerDic dictionary
print '%s. %s %s ' %(S_num,ServerDic[S_num],User)
S_num = S_num + 1
#for S_key in ServerDic:
# print '%s. %s ' % (S_key, ServerDic[ S_key ]) # Print server list line
f.close()
def ChooseServerRange(self,StartIP,EndIP):
try:
if StartIP < EndIP:
print DelimiterLine
for i in range(StartIP,EndIP):
print i,'.', ServerDic[i]
except KeyError:
print '\033[32;40;1m Error :Out of range.\033[0m'
def ListVariable(self,Vname):
#TotalMenuNumber = Group().ListGroup.TotalMenuNumber
print Vname
def AddGroup(self,G_name):
NewGroupName = '%s/Group_%s' % (GroupDir,G_name)
F = file(NewGroupName,'w')
print 'Created group %s successful.' % G_name
RecordLog.op_log('Created new group : \033[36;1m %s \033[0m' % G_name)
#GroupDicKey = Group().ListGroup(0,0,1)
#Dic[GroupDicKey ] = NewGroupName
AddServer = raw_input('Do you want to add new server to Group \033[32;40;1m%s\033[0m [ Y / N ]:' % G_name)
if AddServer == 'Y' or AddServer == 'y':
while True:
NewServerIP2 = raw_input('Input new server IP or Hostname:')
if len(NewServerIP2) == 0:continue
while True:
IP2_user = raw_input("Input newser's username:")
if len(IP2_user) == 0:continue
IP2_pass = raw_input("Input newser's password:")
if len(IP2_pass) == 0:
print 'Error: password cannot be empty,please try again.'
continue
else:break
NewServerIP = '%s %s %s \n' % (NewServerIP2,IP2_user,IP2_pass) #Separate the IP into different line
F.write(NewServerIP)
RecordLog.op_log('Add new server %s to group %s' %(NewServerIP2,G_name))
print 'New server \033[32;40;1m%s\033[0m added successfully.' % NewServerIP2
KeepAddServer = raw_input('Keep on adding new server?:[ Y / N ]')
if KeepAddServer == 'Y' or KeepAddServer == 'y':continue
else:break
F.flush()
F.close()
def DelGroup(self,G_name):
GroupList =os.listdir(GroupDir)
IsFileExist = G_name in GroupList
if IsFileExist is True:
print G_name
D_option = raw_input('Are you sure you want to delete group\033[32;40;1m%s\033[0m [ Y / N ]: ' % G_name)
if D_option == 'Y':
GroupFile='%s/%s' % (GroupDir,G_name)
os.system('rm -rf %s' % GroupFile)
#del Dic[2]
#print Dic
print 'Deleted group %s\n ' % G_name
RecordLog.op_log('Deleted Group %s' % G_name)
#print '\033[31;40;1mGroup will be deleted from list after re-login.\033[0m'
else:
print '\033[33;40;1mNo action,back to main menu.\033[0m'
else:
print '\n\033[31;49;1mError+++: Wrong group name,check again.\033[0m\n'
def RenameGroup(self):
#while True:
print '---------------'
LoadSuccess=False
while not LoadSuccess:
Group_name=raw_input('Which group name do you want to change?:')
GroupFile='%s/%s' %(GroupDir,Group_name)
try:
file(GroupFile)
LoadSuccess=True
except IOError:
print '\033[31;1mGroup name not exist\033[0m'
while True:
NewGroupName2=raw_input('New name:')
NewGroupName = NewGroupName2.strip()
if len(NewGroupName)==0:continue
New_G_file='%s/%s' %(GroupDir,NewGroupName)
GroupRename='mv %s %s' %(GroupFile,New_G_file)
os.system(GroupRename)
msg= '\033[36;1m group name of %s changed to %s\033[0m' %(Group_name,NewGroupName)
print msg
RecordLog.op_log(msg)
break
def UploadServerList(self,GroupName,UploadList,update_option):
#try:
print 'Uploading server list-------------------------------------------'
NewList = file(UploadList)
OldList = file(GroupName,update_option)
while True:
line = NewList.readline()
if len(line) == 0:break
if line.startswith('#') is True:continue
OldList.write(line)
print line,
print 'New server list has successfully uploaded.'
NewList.close()
OldList.close()
def ServerManage(self,option='Add'):
LoadSucess=False;
while not LoadSucess:
try:
GroupName=raw_input('\nInput Group name which the server is in:')
GroupFile='%s/conf/server_list/%s' %(WorkDir,GroupName)
file(GroupFile)
print '\033[32;40;1m------------------------Server List----------------------------\033[0m'
os.system("cat %s|awk '{print $1}'" %GroupFile)
LoadSucess=True
except IOError:
print '\n\033[31;40;1mNo such group name found,please try again.\033[0m'
if option == 'Add':
f=file(GroupFile,'a')
while True:
NewServerIP2 = raw_input('Input new server IP or Hostname:')
if len(NewServerIP2) == 0:continue
while True:
IP2_user = raw_input("Input newser's username:")
if len(IP2_user) == 0:continue
IP2_pass = raw_input("Input newser's password:")
if len(IP2_pass) == 0:
print 'Error: password cannot be empty,please try again.'
continue
else:break
NewServerIP = '%s %s %s \n' % (NewServerIP2,IP2_user,IP2_pass) #Separate the IP into different line
f.write(NewServerIP)
RecordLog.op_log('Add new server %s to group %s' %(NewServerIP2,GroupName))
print 'New server \033[32;40;1m%s\033[0m added successfully.' % NewServerIP2
KeepAddServer = raw_input('Keep on adding new server?:[ Y / N ]')
if KeepAddServer == 'Y' or KeepAddServer == 'y':continue
else:
f.close()
break
elif option == 'Del':
#f=file(GroupFile)
while True:
f=file(GroupFile)
print '\n\033[33;40;1mNotice: All matched IP adresses will be deleted,becare\033[0m'
IP=raw_input('Input the server IP which you want to delete:')
if len(IP) ==0:continue
NotMatchedRow = 0
while True:
line = f.readline()
if len(line) ==0:break
OldIP=line.split()[0]
if IP == OldIP:
os.system("grep ^%s %s|awk '{print $1}'" %(IP,GroupFile))
MatchNumbers=os.system("grep ^%s %s|wc -l|xargs echo -e '\033[33;40;1mmatched rows:\033[0m'" %(IP,GroupFile))
DelAllMatches = raw_input('Do you want to delete all the matched rows?(y/n)')
#NotMatchedRow = -1 # set NoteMatchedRow < 0
if DelAllMatches == 'y':
NotMatchedRow = -1 # set NoteMatchedRow < 0
DelIP = "sed -i '/%s/d' %s" %(IP,GroupFile)
os.system(DelIP)
else:break
msg = 'User deleted server from group %s' %GroupName
RecordLog.op_log(msg)
print 'IP has been deleted from group %s ' %GroupName
else:
NotMatchedRow += 1
if NotMatchedRow > 0: print '\033[33;1m 0 matched rows!\033[0m'
#A=Group()
#A.ServerManage('Del')
#except:
# print 'err'
#A = Group()
#A.UploadServerList('Group_2','/tmp/sh.list','a')
#A.ListGroup()
#A.ChooseGroup(1)
| UTF-8 | Python | false | false | 9,469 | py | 19 | serverList.py | 17 | 0.596367 | 0.561411 | 0 | 246 | 37.49187 | 131 |
lukysummer/Sentence-Segmentation-w-Seq2Seq | 16,484,084,502,646 | 35a2ede4fcfad45f7af861978f410956c71316a7 | 54e388783cd07582d9da66247381513e173e1482 | /Seq2Seq.py | a5168c03d633d30491045496b30705f7e70b04a9 | [] | no_license | https://github.com/lukysummer/Sentence-Segmentation-w-Seq2Seq | e7980fca4aa4851f9b1b8ad9229dfa429902dc42 | eedc7ced16f62e859fe7313a571c101e3cc59ce4 | refs/heads/master | 2020-09-10T22:36:03.360834 | 2019-11-15T05:52:39 | 2019-11-15T05:52:39 | 221,853,886 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
import random
from .Encoder import Encoder
from .Decoder import Decoder
class Seq2Seq(nn.Module):
def __init__(self, n_vocab, #n_speaker, n_tags,
n_embed_text, #n_embed_speaker, n_embed_tags,
n_embed_dec,
n_hidden_enc, n_hidden_dec, n_layers,
n_output, dropout):
super().__init__()
self.encoder = Encoder(n_vocab=n_vocab, #n_speaker=n_speaker, n_tags=n_tags,
n_embed_text=n_embed_text, #n_embed_speaker=n_embed_speaker, n_embed_tags=n_embed_tags,
n_hidden_enc=n_hidden_enc, n_layers=n_layers, n_hidden_dec=n_hidden_dec,
dropout=dropout)
self.decoder = Decoder(n_output=n_output,
n_embed=n_embed_dec,
n_hidden_enc=n_hidden_enc, n_hidden_dec=n_hidden_dec, n_layers=n_layers,
dropout=dropout)
def forward(self, inputs, targets, tf_ratio=0.5):
''' inputs: [b, input_seq_len(200)]
targets: [b, input_seq_len(200)]'''
device = inputs.device
########################### 1. ENCODER ##############################
h = self.encoder.init_hidden(inputs)
last_layer_enc, last_h_enc = self.encoder(inputs, h)
########################### 2. DECODER ##############################
hidden_dec = last_h_enc #[b, n_layers, n_hidden_dec]
trg_seq_len = targets.size(1)
b = inputs.size(0)
n_output = self.decoder.n_output
output = targets[:, 0]
outputs = torch.zeros(b, n_output, trg_seq_len).to(device)
for t in range(1, trg_seq_len, 1):
output, hidden_dec, att_weights = self.decoder(output, hidden_dec, last_layer_enc)
# att_weights : [b, 1, src_seq_len]
att_weights_table = att_weights if t==1 else torch.cat((att_weights_table, att_weights), dim=1)
outputs[:, :, t] = output #output: [b, n_output]
if random.random() < tf_ratio:
output = targets[:, t]
else:
output = output.max(dim=1)[1]
# attn_weights_table : [b, trg_seq_len, src_seq_len]
return outputs, att_weights_table #[b, n_output, trg_seq_len]
| UTF-8 | Python | false | false | 2,593 | py | 4 | Seq2Seq.py | 4 | 0.475125 | 0.467027 | 0 | 66 | 38.212121 | 118 |
Mrhu1991/hello_code | 1,872,605,745,632 | 92aa93b1f66f96a9dfc16a326b30a16e3408eb9c | 8935ab8356174f0f64a056bc26a522ff79c14428 | /python/Django/webproject/mysite/polls/admin.py | 6574f3f2f3d6e3624d98ab3f854a568651a2d79b | [] | no_license | https://github.com/Mrhu1991/hello_code | 124ee8a936f04b86ff75d65a9b3a872481cb6996 | de371e5948b1c967f83e3e8084de026161ad9951 | refs/heads/master | 2016-09-09T22:43:14.369882 | 2015-03-30T03:38:04 | 2015-03-30T03:38:04 | 24,621,087 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2.7
from django.contrib import admin
from polls.models import Poll
from polls.models import Choice
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
# fields = ['pub_date','questions']
fieldsets = [
(None,{'fields':['questions']}),
('date_infomation',{'fields':['pub_date'],'classes': ['collapse']}),
]
inlines = [ChoiceInline]
admin.site.register(Poll,PollAdmin)
admin.site.register(Choice)
| UTF-8 | Python | false | false | 479 | py | 69 | admin.py | 46 | 0.707724 | 0.701461 | 0 | 17 | 27.176471 | 70 |
akshaypunwatkar/Caption_assisted_Image_inpainting | 6,528,350,317,285 | 364a4922e9f3f6b4dbc8bc238a8587cc5c87b772 | 9ed55ad2a3c8ff14e9e6d5408ac2a1cb5fa13b59 | /find_closest_captions.py | 2735cf7543ac4472f03db5e550e12e186fcb104f | [
"MIT"
] | permissive | https://github.com/akshaypunwatkar/Caption_assisted_Image_inpainting | d36748178a9274b06fc4cfd3733f1b5c5d854905 | a4fc4703f3727e10a4d584bf60ee90cad4014e73 | refs/heads/master | 2023-01-09T12:26:36.234878 | 2020-11-14T04:19:02 | 2020-11-14T04:19:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from glob import glob
from sentence_transformers.util import pytorch_cos_sim
import torch
from tqdm.notebook import tqdm
from PIL import Image
import matplotlib.pyplot as plt
import pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
for p in embedder.parameters():
p.requires_grad = False
pass
text_files = glob("../data/birds/CUB_200_2011/text/*/*.txt")
image_files = glob("../data/birds/CUB_200_2011/images/*/*.jpg")
corpus = []
for file in tqdm(text_files):
with open(file, "r") as f:
for line in f.readlines():
corpus.append(line)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True, show_progress_bar=True)
with open("./corpus_embeddings.pickle", "wb") as f:
pickle.dump(corpus_embeddings, f)
with open("./corpus_embeddings.pickle", "rb") as f:
ce = pickle.load(f)
near_img_idx_dict = dict()
for i in tqdm(range(0,len(corpus))):
c_emb = embedder.encode([corpus[i]], convert_to_tensor=True)
closest_3 = torch.topk(pytorch_cos_sim(c_emb, ce), 3)[1][0].cpu().numpy()
near_img_idx_dict[i] = closest_3
parts = []
for i in tqdm(range(0, len(ce) // 100 + 1)):
if i != len(ce) // 100:
parts.append(torch.topk(pytorch_cos_sim(ce[i*100:(i+1)*100], ce), k=3, dim=1)[1])
else:
parts.append(torch.topk(pytorch_cos_sim(ce[i*100:], ce), k=3, dim=1)[1])
parts_idxs = [p // 10 for p in parts]
parts_idxs_cpu = [p.cpu() for p in parts_idxs]
all_parts_tensor = torch.cat(parts_idxs_cpu, dim=0)
with open("./nearest_neighbors.pickle", "wb") as f:
pickle.dump(all_parts_tensor, f) | UTF-8 | Python | false | false | 1,742 | py | 24 | find_closest_captions.py | 6 | 0.676234 | 0.649254 | 0 | 65 | 25.815385 | 91 |
ThinkmanWang/thinkutils_plus | 8,684,423,895,302 | ff43badb8919dcca8bb2002f51a6823e4b5334a7 | 7b8fac032e4d574765aeace0792b66a689cf7896 | /thinkutils_plus/datetime/datetime.py | c600eb99134aeb9cdbe1ca9f03ef5cb4596f350f | [
"MIT"
] | permissive | https://github.com/ThinkmanWang/thinkutils_plus | f94471e4854f8ed1949eac432ca5df03bcf2f617 | 65d56a1a0cfce22dff08a4f0baea6b4eb08a2e35 | refs/heads/master | 2023-08-24T16:51:12.101273 | 2018-07-18T09:42:12 | 2018-07-18T09:42:12 | 141,403,478 | 0 | 0 | MIT | false | 2023-08-14T21:44:21 | 2018-07-18T08:16:13 | 2018-07-18T09:42:29 | 2023-08-14T21:44:17 | 84 | 0 | 0 | 1 | Python | false | false | import time
from datetime import date, timedelta
import datetime
import calendar
# logger = setup_custom_logger()
def get_timestamp():
return int(time.time())
def get_current_time_str():
temp = time.localtime(time.time())
szTime = time.strftime("%Y-%m-%d %H:%M:%S", temp)
return szTime
def timestamp2str(tt):
t1 = time.localtime(float(tt))
t2 = time.strftime("%Y-%m-%d %H:%M:%S", t1)
return t2
def hour():
temp = time.localtime(time.time())
szTime = time.strftime("%H", temp)
return int(szTime)
def today():
today = date.today()
return today.strftime('%Y-%m-%d')
def yesterday():
yesterday = date.today() + timedelta(-1)
return yesterday.strftime('%Y-%m-%d')
def diff_day(nDiff):
day = date.today() + timedelta(nDiff)
return day.strftime('%Y-%m-%d')
def addmonths(date,months = 0):
targetmonth=months+date.month
try:
if 0 == targetmonth%12:
return date.replace(year=date.year+int(targetmonth/12) - 1,month=12)
else:
return date.replace(year=date.year + int(targetmonth / 12), month=(targetmonth % 12))
except Exception,e:
# There is an exception if the day of the month we're in does not exist in the target month
# Go to the FIRST of the month AFTER, then go back one day.
print e
date.replace(year=date.year+int((targetmonth+1)/12),month=((targetmonth+1)%12),day=1)
date+=datetime.timedelta(days=-1)
return date
def last_day_of_month(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
def first_day_of_month(nDiffMon = 0):
today = date.today()
date1 = addmonths(today.replace(day=1), nDiffMon)
return date1.replace(day=1)
def last_day_of_month(nDiffMon = 0):
def end_day(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
return end_day(first_day_of_month(nDiffMon)) | UTF-8 | Python | false | false | 2,173 | py | 30 | datetime.py | 30 | 0.630465 | 0.612517 | 0 | 67 | 30.462687 | 99 |
django/django | 5,368,709,144,266 | 5f6dbe8694d56cd9199ff9cf4c19c603c550df20 | 069c2295076c482afadfe6351da5ae02be8e18e6 | /tests/check_framework/template_test_apps/same_tags_app_2/templatetags/same_tags.py | 9bec93d8e50a9d43d05b378ba8c50b2b7dd1e658 | [
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] | permissive | https://github.com/django/django | 5eb557f57053631cd4f566f451e43197309dbeeb | c74a6fad5475495756a5bdb18b2cab2b68d429bc | refs/heads/main | 2023-09-01T03:43:44.033530 | 2023-08-31T08:27:32 | 2023-08-31T08:27:32 | 4,164,482 | 73,530 | 38,187 | BSD-3-Clause | false | 2023-09-14T20:03:48 | 2012-04-28T02:47:18 | 2023-09-14T20:03:45 | 2023-09-14T20:03:35 | 240,685 | 73,034 | 29,920 | 145 | Python | false | false | from django.template import Library
register = Library()
| UTF-8 | Python | false | false | 58 | py | 3,311 | same_tags.py | 2,012 | 0.793103 | 0.793103 | 0 | 3 | 18.333333 | 35 |
rodrigoctoledo/ServerFlaUNi | 17,617,955,852,538 | da1d5f98b800551ebc97be2bd14c990007d24cf3 | ef8eb2c20305dd0da0552952eeaf80c4d19a89c7 | /djangofundamentos/tw_clientes/clientes/views.py | 0299dba3515d2178248090824f13366e31187be1 | [] | no_license | https://github.com/rodrigoctoledo/ServerFlaUNi | cfc771c940dd94643f1b314b97c6377b7577fe96 | ca3cc74f931591c2f47cc8e5888c12d692f15dd7 | refs/heads/main | 2023-08-05T10:33:54.585723 | 2021-09-07T23:55:33 | 2021-09-07T23:55:33 | 411,485,407 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import Cliente
from .forms import ClienteForm
# Create your views here.
def listar_clientes(request):
clientes = Cliente.objects.all()
return render(request,'clientes/lista_clientes.html',{'clientes':clientes})
def inserir_cliente(request):
if request.method == "POST":
form = ClienteForm(request.POST)
if form.is_valid():
form.save()
form = ClienteForm()
return render(request,'clientes/form_cliente.html',{'form':form})
| UTF-8 | Python | false | false | 523 | py | 4 | views.py | 2 | 0.694073 | 0.694073 | 0 | 18 | 28.055556 | 79 |
petrepa/OV-Mac-OS-Statusbar | 214,748,413,507 | c32f30134f9aa0d1090298ffde0e335ee411f7c8 | ddb0175db8683dcc5d505cab498393f0d5430e14 | /app.py | 03bfaa0e14861b366e95a0a681f4a027f5cae078 | [
"MIT"
] | permissive | https://github.com/petrepa/OV-Mac-OS-Statusbar | 826bb2f332fcebeb3c11fa8c4d9e27bb79e98044 | 6c224a90ba677427339b89341c51b7da3a875979 | refs/heads/master | 2022-07-30T09:08:09.947178 | 2021-04-13T11:23:01 | 2021-04-13T11:23:01 | 162,328,885 | 10 | 0 | MIT | false | 2022-07-06T19:57:26 | 2018-12-18T18:17:47 | 2021-04-13T11:23:04 | 2022-07-06T19:57:23 | 191 | 4 | 0 | 3 | Python | false | false | import rumps
import datetime
from door import door_status
class OV_StatusBarApp(rumps.App):
def __init__(self):
super(OV_StatusBarApp, self).__init__("OV Door Status")
self.icon = 'images/help-circle-outline.png'
self.menu.add(rumps.MenuItem(title='Status'))
self.menu.add(rumps.separator)
@rumps.timer(60*60)
def _get_door_status(self, sender):
if door_status() == True:
self.menu['Status'].title = "OV er ope"
self.icon = 'images/door-open.png'
else:
self.menu['Status'].title = "OV er stengt"
self.icon = 'images/door-closed.png'
if __name__ == "__main__":
OV_StatusBarApp().run() | UTF-8 | Python | false | false | 711 | py | 4 | app.py | 2 | 0.582278 | 0.576653 | 0 | 23 | 29.956522 | 63 |
GSS-Cogs/family-disability | 5,076,651,344,492 | 59d6e1bc40426c3066530a7430f108f79e2a0b7c | 33cc4fc2518420003047e233947071df017b28f7 | /datasets/SG-homelessness-in-scotland-annual-publication/Tables.py | 246ac4573677a80979408e4511c47fcaad91eea0 | [] | no_license | https://github.com/GSS-Cogs/family-disability | e90b3ccf32caa6576b16a890405b8388374463cd | 605a6c341ab18be016e3cf093c2b0cf466dfbe48 | refs/heads/master | 2021-07-13T02:52:56.129524 | 2020-10-22T16:14:34 | 2020-10-22T16:14:34 | 216,540,659 | 1 | 1 | null | false | 2019-12-04T20:15:37 | 2019-10-21T10:27:53 | 2019-12-04T18:58:42 | 2019-12-04T20:15:37 | 441 | 0 | 0 | 32 | Python | false | false | #!/usr/bin/env python
# coding: utf-8
# %%
from gssutils import *
from databaker.framework import *
def left(s, amount):
return s[:amount]
def right(s, amount):
return s[-amount:]
def mid(s, offset, amount):
return s[offset:offset+amount]
scraper = Scraper('https://www2.gov.scot/Topics/Statistics/Browse/Housing-Regeneration/RefTables/homelessness1819tablescharts')
scraper
# %%
distTables = scraper.distribution(title=lambda t: 'Tables' in t)
tabsTables = {tab.name: tab for tab in distTables.as_databaker()}
# %%
tab = tabsTables['Table 9']
cell = tab.excel_ref('A5')
reason = cell.shift(RIGHT).expand(DOWN).is_not_whitespace().is_not_blank().shift(LEFT)
exclude = tab.excel_ref('A27').expand(DOWN).expand(RIGHT)
exclude2 = tab.excel_ref('N3').expand(RIGHT).expand(DOWN)
reason2 = reason - exclude
period = cell.shift(1,-2).expand(RIGHT).is_not_blank().is_not_whitespace() - exclude2
observations = cell.shift(RIGHT).expand(RIGHT).expand(DOWN).is_not_blank().is_not_whitespace().is_number() - exclude - exclude2
Dimensions = [
HDim(reason2,'Reasons for homelessness application',DIRECTLY,LEFT),
HDim(period,'Period',DIRECTLY,ABOVE),
HDimConst('Measure Type','Count'),
HDimConst('Unit','People')
]
c1 = ConversionSegment(observations, Dimensions, processTIMEUNIT=True)
savepreviewhtml(c1)
# NB:- Find if there is a replacement for 'filter' which can find partial matches rather than full cell matches and replace cell_ref
# %%
new_table = c1.topandas()
import numpy as np
new_table['OBS'].replace('', np.nan, inplace=True)
new_table.dropna(subset=['OBS'], inplace=True)
new_table.rename(columns={'OBS': 'Value'}, inplace=True)
new_table['Value'] = new_table['Value'].astype(int)
new_table['Period'] = new_table['Period'].map(
lambda x: f'gregorian-interval/{left(x,2) + right(x,2)}-03-31T00:00:00/P1Y')
new_table = new_table[['Period','Reasons for homelessness application','Measure Type','Value','Unit']]
new_table['Reasons for homelessness application'] = new_table.apply(lambda x: pathify(x['Reasons for homelessness application']), axis = 1)
new_table['Reasons for homelessness application'] = new_table.apply(lambda x: x['Reasons for homelessness application'].replace('/', 'or'), axis = 1)
new_table
# %%
destinationFolder = Path('out')
destinationFolder.mkdir(exist_ok=True, parents=True)
TITLE = 'Homelessness in Scotland: Applications: Main reason for making an application for homelenessness to a Local Authority'
OBS_ID = pathify(TITLE)
GROUP_ID = 'sg-homelessness-in-scotland-annual-publication'
new_table.drop_duplicates().to_csv(destinationFolder / f'{OBS_ID}.csv', index = False)
# +
from gssutils.metadata import THEME
scraper.set_base_uri('http://gss-data.org.uk')
scraper.set_dataset_id(f'gss_data/disability/{GROUP_ID}/{OBS_ID}')
scraper.dataset.title = TITLE
scraper.dataset.family = 'disability'
#scraper.dataset.theme = THEME['health-social-care']
with open(destinationFolder / f'{OBS_ID}.csv-metadata.trig', 'wb') as metadata:
metadata.write(scraper.generate_trig())
# -
schema = CSVWMetadata('https://gss-cogs.github.io/family-disability/reference/')
schema.create(destinationFolder / f'{OBS_ID}.csv', destinationFolder / f'{OBS_ID}.csv-schema.json')
new_table
# %%
tab = tabsTables['Table 10']
cell = tab.excel_ref('A5')
reason = cell.shift(RIGHT).expand(DOWN).is_not_whitespace().is_not_blank().shift(LEFT)
exclude = tab.excel_ref('A19').expand(DOWN).expand(RIGHT)
exclude2 = tab.excel_ref('N3').expand(RIGHT).expand(DOWN)
reason2 = reason - exclude
period = cell.shift(1,-2).expand(RIGHT).is_not_blank().is_not_whitespace() - exclude2
observations = cell.shift(RIGHT).expand(RIGHT).expand(DOWN).is_not_blank().is_not_whitespace().is_number() - exclude - exclude2
Dimensions = [
HDim(reason2,'Reasons for failing to maintain accommodation',DIRECTLY,LEFT),
HDim(period,'Period',DIRECTLY,ABOVE),
HDimConst('Measure Type','Count'),
HDimConst('Unit','People')
]
c1 = ConversionSegment(observations, Dimensions, processTIMEUNIT=True)
savepreviewhtml(c1)
# %%
new_table = c1.topandas()
import numpy as np
new_table['OBS'].replace('', np.nan, inplace=True)
new_table.dropna(subset=['OBS'], inplace=True)
new_table.rename(columns={'OBS': 'Value'}, inplace=True)
new_table['Value'] = new_table['Value'].astype(int)
new_table['Period'] = new_table['Period'].map(
lambda x: f'gregorian-interval/{left(x,2) + right(x,2)}-03-31T00:00:00/P1Y')
new_table = new_table[['Period','Reasons for failing to maintain accommodation','Measure Type','Value','Unit']]
new_table['Reasons for failing to maintain accommodation'] = new_table.apply(lambda x: pathify(x['Reasons for failing to maintain accommodation']), axis = 1)
new_table['Reasons for failing to maintain accommodation'] = new_table.apply(lambda x: x['Reasons for failing to maintain accommodation'].replace('/', '-or'), axis = 1)
new_table = new_table.replace({'Reasons for failing to maintain accommodation' : {
'not-to-do-with-applicant-household-e-g-landlord-selling-property-fire-circumstances-of-other-persons-sharing-previous-property-harassment-by-others-etc' : 'not-to-do-with-applicant-household', }})
new_table
# %%
destinationFolder = Path('out')
destinationFolder.mkdir(exist_ok=True, parents=True)
TITLE = 'Homelessness in Scotland: Applications: Reasons for failing to maintain accommodation prior to application'
OBS_ID = pathify(TITLE)
new_table.drop_duplicates().to_csv(destinationFolder / f'{OBS_ID}.csv', index = False)
# +
from gssutils.metadata import THEME
scraper.set_base_uri('http://gss-data.org.uk')
scraper.set_dataset_id(f'gss_data/disability/{GROUP_ID}/{OBS_ID}')
scraper.dataset.title = TITLE
scraper.dataset.family = 'disability'
#scraper.dataset.theme = THEME['health-social-care']
with open(destinationFolder / f'{OBS_ID}.csv-metadata.trig', 'wb') as metadata:
metadata.write(scraper.generate_trig())
# -
schema = CSVWMetadata('https://gss-cogs.github.io/family-disability/reference/')
schema.create(destinationFolder / f'{OBS_ID}.csv', destinationFolder / f'{OBS_ID}.csv-schema.json')
new_table
# %%
tab = tabsTables['Table 15']
cell = tab.excel_ref('A4')
reason = cell.shift(RIGHT).expand(DOWN).is_not_whitespace().is_not_blank().shift(LEFT)
exclude = tab.excel_ref('A14').expand(DOWN).expand(RIGHT)
exclude2 = tab.excel_ref('N3').expand(RIGHT).expand(DOWN)
reason2 = reason - exclude
period = cell.shift(1,-1).expand(RIGHT).is_not_blank().is_not_whitespace() - exclude2
observations = cell.shift(RIGHT).expand(RIGHT).expand(DOWN).is_not_blank().is_not_whitespace().is_number() - exclude - exclude2
Dimensions = [
HDim(reason2,'Identified support needs of homeless households',DIRECTLY,LEFT),
HDim(period,'Period',DIRECTLY,ABOVE),
HDimConst('Measure Type','Count'),
HDimConst('Unit','People')
]
c1 = ConversionSegment(observations, Dimensions, processTIMEUNIT=True)
savepreviewhtml(c1)
# %%
new_table = c1.topandas()
import numpy as np
new_table['OBS'].replace('', np.nan, inplace=True)
new_table.dropna(subset=['OBS'], inplace=True)
new_table.rename(columns={'OBS': 'Value'}, inplace=True)
new_table['Value'] = new_table['Value'].astype(int)
new_table['Period'] = new_table['Period'].map(
lambda x: f'gregorian-interval/{left(x,2) + right(x,2)}-03-31T00:00:00/P1Y')
new_table = new_table[['Period','Identified support needs of homeless households','Measure Type','Value','Unit']]
new_table['Identified support needs of homeless households'] = new_table.apply(lambda x: pathify(x['Identified support needs of homeless households'].replace('/', 'or')), axis = 1)
new_table
# %%
destinationFolder = Path('out')
destinationFolder.mkdir(exist_ok=True, parents=True)
TITLE = 'Homelessness in Scotland: Applications: Support need identified for those homeless (or threatened with homelessness) households'
OBS_ID = pathify(TITLE)
new_table.drop_duplicates().to_csv(destinationFolder / f'{OBS_ID}.csv', index = False)
# +
from gssutils.metadata import THEME
scraper.set_base_uri('http://gss-data.org.uk')
scraper.set_dataset_id(f'gss_data/disability/{GROUP_ID}/'+ f'{OBS_ID}')
scraper.dataset.title = TITLE
scraper.dataset.family = 'disability'
scraper.dataset.theme = THEME['health-social-care']
with open(destinationFolder / f'{OBS_ID}.csv-metadata.trig', 'wb') as metadata:
metadata.write(scraper.generate_trig())
# -
schema = CSVWMetadata('https://gss-cogs.github.io/family-disability/reference/')
schema.create(destinationFolder / f'{OBS_ID}.csv', destinationFolder / f'{OBS_ID}.csv-schema.json')
new_table
| UTF-8 | Python | false | false | 8,796 | py | 171 | Tables.py | 42 | 0.708466 | 0.697428 | 0 | 213 | 40.253521 | 278 |
nathayoung/personal | 12,730,283,109,145 | b08b09d712c3ea06b1a1511dcc8b95e877138a20 | 176bc1f8c6bf4cbf88296d06711ff25baff23de5 | /School_Work/CIS_240/Exercise4_Floats/Ex4_Floats_EmployeePay.py | 5056b88f9f73ff40b2a627c3f12c6af61c2f081b | [] | no_license | https://github.com/nathayoung/personal | 4e29fecb2576c5916f63f2896120c9be228745bf | c1b214625bebe4960b1129a5361ef4edd0a05d7d | refs/heads/master | 2023-08-09T09:55:17.807229 | 2023-08-08T00:16:39 | 2023-08-08T00:16:39 | 188,744,568 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Declare Variables
# Get values for Variables
wage = float(input("Enter the wage: $"))
regularHours = float(input("Enter the regular hours: "))
overtimeHours = float(input("Enter the overtime hours: "))
# Do calculations
totalPay=((wage*regularHours)+((wage*overtimeHours)*1.5))
# display results
print("The total weekly pay is $" + str(totalPay)) | UTF-8 | Python | false | false | 348 | py | 249 | Ex4_Floats_EmployeePay.py | 64 | 0.732759 | 0.727011 | 0 | 9 | 37.777778 | 58 |
DreamLose/python- | 14,577,119,020,528 | 4b9bcdf11213c0610edc21dc693ab4d00ffcc53a | 69958e59f7b8bb02304425fe06b2039219229ec9 | /Day30From组件/FromProject/app01/forms.py | 9d1a7c6274f8b836f6b7c9de0b6c4fd1158b3e9b | [
"MIT"
] | permissive | https://github.com/DreamLose/python- | 467f955a773073db6c3825ef8c0819018aecc01f | 865249d7dba1fb1452189bde59c378aab8c99c9b | refs/heads/main | 2023-01-31T00:19:45.285440 | 2020-12-07T05:20:28 | 2020-12-07T05:20:28 | 309,921,294 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms as dforms
from django.forms import fields
class UserForm(dforms.Form):
username = fields.CharField(max_length=32)
email = fields.EmailField(max_length=32) | UTF-8 | Python | false | false | 188 | py | 200 | forms.py | 156 | 0.771277 | 0.75 | 0 | 6 | 30.5 | 46 |
yuto-moriizumi/AtCoder | 13,443,247,641,480 | f62dfa056d222d46c28207f485a21f0c3d79059b | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /ABC136/ABC136b.py | 44d5352c29f72a805b2f8577383820f41cebf75b | [
"Unlicense"
] | permissive | https://github.com/yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ABC136b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
c = 0
for i in range(1, n+1):
if (len(str(i)) % 2 == 1):
c += 1
print(c)
| UTF-8 | Python | false | false | 181 | py | 661 | ABC136b.py | 661 | 0.574586 | 0.508287 | 0 | 12 | 14.083333 | 30 |
amarotta1/FinalDise-oDeSistemas | 7,748,121,027,256 | 1aa19981970a8157f2ea65062e95f9084be1b7da | 68f0ca5ccde2c9b57881b15b3e7530c23e92ea12 | /Patrones/Abstract Factory Banco/ICuenta.py | 4ebca8a466fb03740feeef83a60d463424cbc18e | [] | no_license | https://github.com/amarotta1/FinalDise-oDeSistemas | 2a9eee925d6a0f772df08afcf2a7b444fa1f46e8 | e760b7b0d36a90be202b6878bed1475aade3047d | refs/heads/main | 2023-02-02T10:57:01.758609 | 2020-12-16T14:56:15 | 2020-12-16T14:56:15 | 322,011,321 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from abc import ABC , abstractmethod
class ICuenta(ABC):
@abstractmethod
def funcion(self):
pass | UTF-8 | Python | false | false | 114 | py | 87 | ICuenta.py | 87 | 0.675439 | 0.675439 | 0 | 7 | 15.428571 | 36 |
messyoxd/DesignPatterns | 8,881,992,369,335 | 9a3838343071eea1fa8e361b7172a63608530b59 | f209e1238b04c1b00a199e7320de2527e00714e5 | /ChainOfResponsability/ComChainOfResponsability/DataNascimentoHandler.py | 00e389f4b1a53b9777aa4f58924053f1b02985c0 | [] | no_license | https://github.com/messyoxd/DesignPatterns | b2e9e2b7d026cfa4a95f2dad62622f3ac3e9f41b | 934a9995287599e3f610fe1674f5eee13901bd26 | refs/heads/master | 2020-07-06T23:40:00.121028 | 2019-12-11T10:05:37 | 2019-12-11T10:05:37 | 203,174,366 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ChainOfResponsability import ChainOfResponsability
class DataNascimentoHandler(ChainOfResponsability):
def __init__(self, next=None):
self._next = next
def handle(self, dados, campo):
if campo == "Data de nascimento":
aux = dados
data = ""
for item in aux.split("/"):
data += item+"-"
data = data[:-1]
return data
else:
if self._next != None:
return self._next.handle(dados,campo)
else:
return None
| UTF-8 | Python | false | false | 570 | py | 23 | DataNascimentoHandler.py | 21 | 0.515789 | 0.514035 | 0 | 19 | 29 | 55 |
stardust85/Office365-REST-Python-Client | 4,372,276,745,760 | c5c62284dbcf7e75c24beb6f44c7a29ea749b95d | 2de2437bbf480f6518554bcb204106dd37262023 | /office365/graph/onedrive/folder.py | eb880bf08e15c8c303d64a079950395580118878 | [
"MIT"
] | permissive | https://github.com/stardust85/Office365-REST-Python-Client | 386e5bba16cdee1472b7e23d405a4bf9b6f5e73a | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | refs/heads/master | 2022-09-29T19:44:02.166438 | 2020-06-03T23:12:40 | 2020-06-03T23:12:40 | 269,356,313 | 0 | 0 | MIT | true | 2020-06-04T12:41:03 | 2020-06-04T12:41:02 | 2020-06-03T23:12:49 | 2020-06-04T00:18:42 | 6,191 | 0 | 0 | 0 | null | false | false | from office365.runtime.client_value_object import ClientValueObject
class Folder(ClientValueObject):
def __init__(self):
super(Folder, self).__init__()
| UTF-8 | Python | false | false | 167 | py | 36 | folder.py | 34 | 0.706587 | 0.688623 | 0 | 7 | 22.857143 | 67 |
korbu/miros | 6,399,501,309,293 | a854a2a94f2247eececb5fd0eb14982671604fb3 | c5ef3699be70031c0b736477e457d1a125809bfe | /examples/philosophers.py | 87c6582d7a3e3994862977475d995fc7ca7223e9 | [] | no_license | https://github.com/korbu/miros | b9f13a2d1e75e1b66b1081589046062961a40b7f | ab70722a7ec7b660e48452be80504d5390de19dd | refs/heads/master | 2023-01-05T00:57:24.612699 | 2020-11-03T16:23:46 | 2020-11-03T16:23:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import random
from miros import Event
from miros import spy_on
from miros import signals
from functools import partial
from miros import ActiveObject
from miros import return_status
NUMBER_OF_PHILOSOPHERS = 5
class Fork():
Free = 0
Used = 1
def right(n, num):
return ((n + (num - 1)) % num)
def left(n, num):
return ((n + 1) % num)
class Philosopher(ActiveObject):
def __init__(self, n):
name = "philosopher_{}".format(n)
super().__init__(name)
self.n = n
self.subscribe(Event(signal=signals.EAT))
@spy_on
def thinking(philosopher, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
philosopher.post_fifo(
Event(signal=signals.Tired),
period=random.randint(1, 10),
times=1,
deferred=True)
status = return_status.HANDLED
elif(e.signal == signals.Tired):
status = philosopher.trans(hungry)
else:
philosopher.temp.fun = philosopher.top
status = return_status.SUPER
return status
@spy_on
def hungry(philosopher, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
philosopher.publish(
Event(signal=signals.HUNGRY, payload=philosopher.n))
status = return_status.HANDLED
elif(e.signal == signals.EAT):
if e.payload == philosopher.n:
status = philosopher.trans(eating)
else:
status = return_status.HANDLED
else:
philosopher.temp.fun = philosopher.top
status = return_status.SUPER
return status
@spy_on
def eating(philosopher, e):
status = return_status.UNHANDLED
if(e.signal == signals.ENTRY_SIGNAL):
philosopher.post_fifo(
Event(signal=signals.Full),
period=random.randint(1, 10),
times=1,
deferred=True)
status = return_status.HANDLED
elif(e.signal == signals.Full):
status = philosopher.trans(thinking)
elif(e.signal == signals.EXIT_SIGNAL):
philosopher.publish(
Event(signal=signals.DONE, payload=philosopher.n))
status = return_status.HANDLED
else:
philosopher.temp.fun = philosopher.top
status = return_status.SUPER
return status
class Table(ActiveObject):
def __init__(self, name, number_of_philosophers):
super().__init__(name)
self.subscribe(Event(signal=signals.DONE))
self.subscribe(Event(signal=signals.HUNGRY))
self.forks = [Fork.Free for i in range(number_of_philosophers)]
self.is_hungry = [False for i in range(number_of_philosophers)]
self.to_the_right_of = partial(right, num=number_of_philosophers)
self.to_the_left_of = partial(left, num=number_of_philosophers)
@spy_on
def serving(table, e):
status = return_status.UNHANDLED
if (e.signal == signals.HUNGRY):
n = e.payload
m = table.to_the_left_of(n)
if (table.forks[m] == Fork.Free) and (table.forks[n] == Fork.Free):
table.forks[m] = Fork.Used
table.forks[n] = Fork.Used
table.is_hungry[n] = False
table.publish(Event(signal=signals.EAT, payload=n))
else:
table.is_hungry[n] = True
status = return_status.HANDLED #TODO: Not sure why book example handles this in the else...
elif (e.signal == signals.DONE):
n = e.payload
#set down the forks
table.forks[n] = Fork.Free
m = table.to_the_right_of(n)
table.forks[m] = Fork.Free
#check the person to the right is hungry and the fork to the OF THAT PERSON right is free...
if table.is_hungry[m] and (table.forks[m] == Fork.Free) :
table.forks[n] = Fork.Used
table.forks[m] = Fork.Used
table.is_hungry[m] = False
table.publish(Event(signal=signals.EAT, payload=m))
m = table.to_the_left_of(n) # check the left neighbour
n = table.to_the_left_of(m) # left fork of the left neighbour
#check the person to the left is hungry and the fork to the OF THAT PERSON right is free...
if table.is_hungry[m] and (table.forks[n] == Fork.Free) :
table.forks[m] = Fork.Used
table.forks[n] = Fork.Used
table.is_hungry[m] = False
table.publish(Event(signal=signals.EAT, payload=m))
status = return_status.HANDLED
else:
table.temp.fun = table.top #or chart.top for the top most
status = return_status.SUPER
return status
if __name__ == "__main__":
table = Table(name='table1',
number_of_philosophers=NUMBER_OF_PHILOSOPHERS)
table.start_at(serving)
time.sleep(0.1)
for n in range(NUMBER_OF_PHILOSOPHERS):
philosopher = Philosopher(n)
philosopher.start_at(thinking)
philosopher.live_trace = True
time.sleep(60)
| UTF-8 | Python | false | false | 4,680 | py | 618 | philosophers.py | 111 | 0.64594 | 0.642094 | 0 | 161 | 27.062112 | 96 |
RRuma/RestApiDjango | 10,728,828,336,707 | 6132d270ae12e35e89cc468b7914598ebfe77124 | f87fcb7a166603e4b1213e84bb6880a6592ebc0c | /category/serializers.py | 08c15f3f11d83c5c2ce439b39d3989595a05c8e8 | [] | no_license | https://github.com/RRuma/RestApiDjango | 0a9d2469694bbdf5fc7437967f867bced119d233 | 4e23f56bab312c971540941e7dec7a2fb7c5397f | refs/heads/master | 2020-04-23T19:33:46.987734 | 2019-02-24T11:12:44 | 2019-02-24T11:12:44 | 171,408,808 | 0 | 0 | null | false | 2019-02-24T11:12:45 | 2019-02-19T05:07:30 | 2019-02-20T05:34:18 | 2019-02-24T11:12:45 | 54 | 0 | 0 | 0 | Python | false | null | from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Category
# title = serializers.CharField(max_length=120)
# description = serializers.CharField()
# body = serializers.CharField()
#
#
# # author_id = serializers.IntegerField()
#
# def create(self, validated_data):
# return Category.objects.create(**validated_data)
# class categoryUserSerializer(serializers.ModelSerializer):
# class Meta:
# model = User
# fields = ("id")
class categorySerializer(serializers.ModelSerializer):
#user = categoryUserSerializer(read_only=True)
class Meta:
model = Category
fields = ( "categoryname", "description") | UTF-8 | Python | false | false | 703 | py | 21 | serializers.py | 20 | 0.71266 | 0.708393 | 0 | 27 | 25.074074 | 60 |
omoogo/practice-python | 7,962,869,395,085 | 821d4880c6501be5d9353fec1e58206b1e94293e | 8470f9a49eceea7d5ad249512fc173e2bb66851e | /exercise1.py | 638119c1196f0be8db3e0157bc74c0e888bd0c63 | [] | no_license | https://github.com/omoogo/practice-python | e962d1bc12dffb09b0ffc2fbb06d3a62c544d0c5 | 3736432ebc1fb0864ef6d8356da1c2cbdd77133f | refs/heads/master | 2022-09-23T02:03:19.444927 | 2020-05-26T01:46:38 | 2020-05-26T01:46:38 | 255,214,607 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Title: Character Input
# Create a program that asks the user to enter their name and their age.
# Print out a message addressed to them that tells them the year that they will turn 100 years old.
# Extras:
# Add on to the previous program by asking the user for another number and
# printing out that many copies of the previous message. (Hint: order of operations exists in Python)
# Print out that many copies of the previous message on separate lines.
# (Hint: the string "\n is the same as pressing the ENTER button)
import datetime
name = input('Please enter your name: ')
age = int(input('Please enter your age: '))
year_at_age_100 = datetime.datetime.today().year + (100 - age)
message = f'Hi {name}. You will turn 100 in the year {year_at_age_100}'
print(message)
number_of_times_to_print = int(input('Wait, can I have another number please? '))
for i in range(0, number_of_times_to_print):
print(message) | UTF-8 | Python | false | false | 931 | py | 10 | exercise1.py | 10 | 0.730397 | 0.713212 | 0 | 26 | 34.846154 | 101 |
emilyty/pythonProject | 4,028,679,345,288 | ce91ff9def978259e3a745b9066b1e3987690e5d | c3bf9049e76883f4dfd27c54db6ae5af6c0115f4 | /testingJob/__init__.py | 69b7720d08e7ef5714169e4af05734de51985694 | [] | no_license | https://github.com/emilyty/pythonProject | f3a1795f383b11a43cdb611cb4320cabdf270959 | 928521451a9a77c8f353a678df63f04e8f863354 | refs/heads/master | 2021-04-15T04:42:05.680588 | 2018-06-20T09:04:22 | 2018-06-20T09:04:22 | 126,923,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Author: emily
# @Date : 2018/3/2711:34
#@Contact : emilyty@163.com | UTF-8 | Python | false | false | 97 | py | 5 | __init__.py | 5 | 0.57732 | 0.42268 | 0 | 4 | 22.75 | 27 |
pyjhzwh/Efficient-Deep-Learning-for-Point-Clouds | 3,212,635,546,202 | bb70d7ff4bd1075de77974dbf40e51a32dfcaa04 | e552903c9308f635fab01984cb2f4a852b1fa427 | /launcher.py | 6647bde9bdd33007c9415133a19bd8cfeffaa653 | [] | no_license | https://github.com/pyjhzwh/Efficient-Deep-Learning-for-Point-Clouds | 8a7e1f530597545518dce1f695ced8ec332c80a8 | 921383a37813b41949659e9a6c5cd68c81f5f3a1 | refs/heads/master | 2023-04-14T17:06:07.779719 | 2021-04-22T02:27:00 | 2021-04-22T02:27:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
This script is used to compile and launch different network.
'''
import numpy as np
import argparse
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
parser = argparse.ArgumentParser()
parser.add_argument('--compile', type=str, default=None, help='Compile libraries in the models, to compile a specific network, use: --compile [NETWORK_NAME] or to compile all models using, --compile all')
parser.add_argument('--download', type=str, default=None, help='Download the specific dataset for the models, to download a dataset for a specific network, use: --download [NETWORK_NAME] or to download all models using, --download all')
parser.add_argument('--list_models', help='List all model names.')
parser.add_argument('--run', type=str, default=None, help='Evaluate the model with Fully Delayed-Aggregation.')
parser.add_argument('--train', type=str, default=None, help='Train the model with Fully Delayed-Aggregation.')
parser.add_argument('--use_baseline', type=bool, default=False, help='Use the baseline without any kind of Delayed-Aggregation.')
parser.add_argument('--use_limited', type=bool, default=False, help='Use Limited Delayed-Aggregation.')
parser.add_argument('--segmentation', type=bool, default=False, help='Execute the segmentation version.')
FLAGS = parser.parse_args()
COMPILE_MODELS = ['pointnet2', 'frustum-pointnets', 'DensePoint']
'''
Compile necessary modules
'''
if FLAGS.compile == 'all':
for m in COMPILE_MODELS:
dir_path = './Networks/%s' % m
if os.path.exists(dir_path):
print('cd %s' % dir_path)
os.system('cd %s; python compile.py' % dir_path)
else:
print('[ERROR]: can\'t find the path %s' % dir_path)
exit()
elif FLAGS.compile in COMPILE_MODELS:
dir_path = './Networks/%s' % FLAGS.compile
if os.path.exists(dir_path):
print('cd %s' % dir_path)
os.system('cd %s; python compile.py' % dir_path)
else:
print('[ERROR]: can\'t find the path %s' % dir_path)
exit()
elif FLAGS.compile is not None:
print('[ERROR]: can\'t find the model %s to compile.' % FLAGS.compile)
exit()
'''
Download datasets
'''
DOWNLOAD_SCRIPTS = {
'pointnet2' : 'download.py',
'frustum-pointnets' : 'download.py',
'ldgcnn' : 'download.py',
'dgcnn' : 'download.py',
'DensePoint' : 'download.py'
}
if FLAGS.download == 'all':
for key in DOWNLOAD_SCRIPTS:
dir_path = './Networks/%s' % key
if os.path.exists(dir_path):
print('run %s/%s' % (dir_path, DOWNLOAD_SCRIPTS[key]))
os.system('cd %s; python %s' % (dir_path, DOWNLOAD_SCRIPTS[key]))
dir_path = './Networks/%s/part_seg' % key
if os.path.exists(dir_path):
print('run %s/%s' % (dir_path, DOWNLOAD_SCRIPTS[key]))
os.system('cd %s; python %s' % (dir_path, DOWNLOAD_SCRIPTS[key]))
print ('done!')
exit()
elif FLAGS.download in DOWNLOAD_SCRIPTS:
if FLAGS.segmentation:
dir_path = './Networks/%s/part_seg' % FLAGS.download
else:
dir_path = './Networks/%s' % FLAGS.download
if os.path.exists(dir_path):
print('cd %s' % dir_path)
os.system('cd %s; python %s' % (dir_path, DOWNLOAD_SCRIPTS[FLAGS.download]))
else:
print('[ERROR]: can\'t find the path %s' % dir_path)
print ('done!')
exit()
elif FLAGS.download is not None:
print('[ERROR]: can\'t find the model %s\'s dataset to download.' % FLAGS.download)
exit()
'''
Evaluate models
'''
RUN_MODELS = {
'pointnet2' : 'python evaluate.py',
'frustum-pointnets' : 'bash scripts/command_test_v2.sh',
'ldgcnn' : 'python evaluate.py --log_dir log_new --model_cnn ldgcnn',
'dgcnn' : 'python evaluate.py',
'DensePoint' : 'python evaluate.py'
}
RUN_BASELINES = {
'pointnet2' : 'python evaluate-baseline.py',
'frustum-pointnets' : 'bash scripts/command_test_v2_baselline.sh',
'ldgcnn' : 'python evaluate.py --log_dir log_baseline --model_cnn ldgcnn_baseline',
'dgcnn' : 'python evaluate-baseline.py',
'DensePoint' : 'python evaluate-baseline.py'
}
RUN_LIMITED = {
'pointnet2' : 'python evaluate-limited.py',
'frustum-pointnets' : 'bash scripts/command_test_v2_limited.sh',
'ldgcnn' : 'python evaluate.py --log_dir log_new --model_cnn ldgcnn',
'dgcnn' : 'python evaluate.py',
'DensePoint' : 'python evaluate.py'
}
# Evaluate models
if FLAGS.segmentation:
dir_path = './Networks/%s/part_seg' % FLAGS.run
else:
dir_path = './Networks/%s' % FLAGS.run
if FLAGS.run in RUN_MODELS and os.path.exists(dir_path):
print('cd %s' % dir_path)
if FLAGS.use_baseline:
print('launching baseline version for %s ...\n' % FLAGS.run)
os.system('cd %s; %s' % (dir_path, RUN_BASELINES[FLAGS.run]))
elif FLAGS.use_limited:
print('launching limited delayed-aggregation version for %s ...\n' % FLAGS.run)
os.system('cd %s; %s' % (dir_path, RUN_LIMITED[FLAGS.run]))
else:
print('launching fully delayed-aggregation version for %s ...\n' % FLAGS.run)
os.system('cd %s; %s' % (dir_path, RUN_MODELS[FLAGS.run]))
exit()
elif FLAGS.run is not None:
print('[ERROR]: can\'t find the model %s to run.' % FLAGS.run)
exit()
'''
Train models
'''
TRAIN_MODELS = {
'pointnet2' : 'python train.py',
'frustum-pointnets' : 'bash scripts/command_train_v2.sh',
'ldgcnn' : 'python train.py --log_dir log_new --model ldgcnn',
'dgcnn' : 'python train.py',
'DensePoint' : 'bash train.sh'
}
TRAIN_BASELINES = {
'pointnet2' : 'python train-baseline.py',
'frustum-pointnets' : 'bash scripts/command_train_v2_baselline.sh',
'ldgcnn' : 'python train.py --log_dir log_baseline --model ldgcnn_baseline',
'dgcnn' : 'python train-baseline.py',
'DensePoint' : 'bash train-baseline.sh'
}
TRAIN_LIMITED = {
'pointnet2' : 'python train-limited.py',
'frustum-pointnets' : 'bash scripts/command_train_v2_limited.sh',
'ldgcnn' : 'python train.py --log_dir log_new --model ldgcnn',
'dgcnn' : 'python train.py',
'DensePoint' : 'bash train.sh'
}
# Train models
if FLAGS.segmentation:
dir_path = './Networks/%s/part_seg' % FLAGS.train
else:
dir_path = './Networks/%s' % FLAGS.train
if FLAGS.train in TRAIN_MODELS and os.path.exists(dir_path):
print('cd %s' % dir_path)
if FLAGS.use_baseline:
print('training baseline verstion for %s ...\n' % FLAGS.train)
os.system('cd %s; %s' % (dir_path, TRAIN_BASELINES[FLAGS.train]))
elif FLAGS.use_limited:
print('training limited delayed-aggregation version for %s ...\n' % FLAGS.train)
os.system('cd %s; %s' % (dir_path, TRAIN_LIMITED[FLAGS.train]))
else:
print('training fully delayed-aggregation version for %s ...\n' % FLAGS.train)
os.system('cd %s; %s' % (dir_path, TRAIN_MODELS[FLAGS.train]))
exit()
elif FLAGS.train is not None:
print('[ERROR]: can\'t find the model %s to train.' % FLAGS.train)
exit()
| UTF-8 | Python | false | false | 7,062 | py | 48 | launcher.py | 25 | 0.637921 | 0.635939 | 0 | 187 | 36.764706 | 236 |
emlynoregan/appenginetaskutils | 8,203,387,563,415 | 67792ec940a3d466069607be6ac26952fbe60dd6 | 34daf4ef4f8a07a453d7241433a3179a336aac93 | /experiments/incrementaccountsnaive.py | c9f54bd8d9567e127b2ef0b330297e10bf0ff1d4 | [
"Apache-2.0"
] | permissive | https://github.com/emlynoregan/appenginetaskutils | 9badc311b66b60e1a1519b7a910a47878f60b04e | 755cc7cbe4b9badfc1d50f8bd7ebea6e1aae50ee | refs/heads/master | 2021-01-14T08:13:44.128680 | 2018-05-22T08:21:04 | 2018-05-22T08:21:04 | 82,026,170 | 12 | 5 | Apache-2.0 | false | 2019-10-23T00:29:16 | 2017-02-15T06:05:03 | 2019-04-23T16:12:53 | 2019-10-23T00:29:15 | 97 | 11 | 4 | 1 | Python | false | false | from model.account import Account
from google.appengine.ext import ndb
def IncrementAccountsExperimentNaive():
def Go():
def AddFreeCredit(creditamount):
def ProcessOnePage(cursor):
accounts, cursor, kontinue = Account.query().fetch_page(
100, start_cursor = cursor
)
for account in accounts:
account.balance += creditamount
ndb.put_multi(accounts)
if kontinue:
ProcessOnePage(cursor)
ProcessOnePage(None)
AddFreeCredit(10)
return "Increment Accounts (Naive)", Go
| UTF-8 | Python | false | false | 653 | py | 34 | incrementaccountsnaive.py | 27 | 0.569678 | 0.562021 | 0 | 18 | 35.277778 | 72 |
Jigar710/Python_Programs | 8,100,308,336,554 | acc8bfb8e873a31c2060055cc176b277f3c92557 | 9afbb6993450d1e0c3bae68e86844bd06d4419ee | /lst_pograms/count1.py | 34ae489b7256bd9b6ea791f8e7c83585c1ef8c05 | [] | no_license | https://github.com/Jigar710/Python_Programs | 6f331caac30878655d4cca4ad97d4214c0262088 | 714a6306487eb6712f32ccb51b6a2407a81873fa | refs/heads/main | 2023-02-25T12:24:44.874199 | 2021-01-28T15:43:24 | 2021-01-28T15:43:24 | 332,869,164 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | lst = [5,1,2,3,4,5]
print(lst.count(5))
print(lst.count(50)) | UTF-8 | Python | false | false | 61 | py | 802 | count1.py | 749 | 0.622951 | 0.47541 | 0 | 4 | 14.5 | 20 |
KaliEtQuali/Dataset-augmentation-Python | 18,545,668,825,773 | 4cf1a50fe6241d8b576aeece046a5a23a78739c2 | 09a2305a7937d29098c499905fb462c8571c888d | /src/create_correspondances_json_from_files.py | 6b43933680da52d81b07afb129f23b728db1a185 | [] | no_license | https://github.com/KaliEtQuali/Dataset-augmentation-Python | 64d3499bd47b14466c4665f1a47982164cac26df | c51ccda02ab92e9d383f87a2f661f6f95a147a62 | refs/heads/master | 2020-03-27T15:42:50.671045 | 2018-08-31T00:41:36 | 2018-08-31T00:41:36 | 146,734,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import os
import cv2
import json
from input_dir import INPUT_DIR
def create_correspondances_json_from_files():
rotations_file_dir = os.path.join(INPUT_DIR, '../output/Orientations').replace("\\","/")
output_dir = os.path.join(INPUT_DIR, '../output').replace("\\","/")
rotation_files = [os.path.join(rotations_file_dir, f).replace("\\","/") for f in os.listdir(rotations_file_dir) if f.endswith(('.txt'))]
json_dic = {}
for rotation_file in rotation_files:
image_name = rotation_file.split('/')[-1].split('.')[0] + '.png'
with open(rotation_file) as r_file:
rotation = r_file.read()
# Convert rotation file to float array
rotation = rotation.split(" ")[1:]
for i in range(3):
rotation[i] = float(rotation[i].rstrip())
R = np.zeros((3,3))
cv2.Rodrigues(np.asarray(rotation), R)
rotation_dic = {"angles": rotation, "rotation": R.tolist()}
json_dic[image_name] = rotation_dic
print("{} added".format(image_name))
translations_file_dir = os.path.join(INPUT_DIR, '../output/Translations').replace("\\","/")
translation_files = [os.path.join(translations_file_dir, f).replace("\\","/") for f in os.listdir(translations_file_dir) if f.endswith(('.txt'))]
for translation_file in translation_files:
image_name = translation_file.split('/')[-1].split('.')[0] + '.png'
with open(translation_file) as t_file:
translation = t_file.read()
# Convert translation file to float array
translation = translation.split(" ")[1:]
for i in range(3):
translation[i] = float(translation[i].rstrip())
json_dic[image_name]["center"] = translation
print("{} added".format(image_name))
with open(os.path.join(output_dir, 'correspondances_by_name.json'), 'w') as outfile:
json.dump(json_dic, outfile)
if __name__ == "__main__":
create_correspondances_json_from_files()
| UTF-8 | Python | false | false | 1,997 | py | 185 | create_correspondances_json_from_files.py | 40 | 0.611918 | 0.605909 | 0 | 46 | 42.413043 | 149 |
dewlytg/Python-example | 11,269,994,186,010 | c4a96173597da95b32601e7c05cd178267267c34 | 085406a6754c33957ca694878db9bbe37f84b970 | /ATM+购物商城程序/utils/handler.py | 89923e5536e0c0530f45cebf41f15b835965cfb8 | [] | no_license | https://github.com/dewlytg/Python-example | 82157958da198ce42014e678dfe507c72ed67ef0 | 1e179e4037eccd9fefabefd252b060564a2eafce | refs/heads/master | 2021-01-01T18:36:08.868861 | 2019-01-18T10:39:08 | 2019-01-18T10:39:08 | 98,375,528 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import json,os
class ShopFileHandler(object):
def __init__(self,filename):
self.filename = filename
self.data = json.load(open(self.filename))
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value,is_locked=False):
self.data[key] = {"password":value,"is_locked":is_locked}
def __delitem__(self, key):
del self.data[key]
@property
def userinfo(self):
return self.data
def save2file(self):
fd = open(self.filename,"w")
json.dump(self.data,fd)
fd.close()
class CreditFileHandler(object):
def __init__(self,filename):
self.filename = filename
self.data = json.load(open(self.filename))
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value,amount=10000):
self.data[key] = {"password":value,"amount":amount}
def __delitem__(self, key):
del self.data[key]
@property
def creditinfo(self):
return self.data
def save2file(self):
fd = open(self.filename,"w")
json.dump(self.data,fd)
fd.close()
class ShoppingHandler(object):
def __init__(self,filename):
self.filename = filename
self.data = json.load(open(self.filename))
@property
def get_shop_list(self):
for index,value in enumerate(self.data):
print("%s) 商品名称[%s] 商品价格 [%s]" %(index,value["name"],value["price"]))
@property
def get_shop_name_price_dict(self):
result = {}
for i in self.data:
result[i["name"]] = int(i["price"])
return result
@property
def cheapest_shop_price(self):
price_list = []
for i in self.data:
price_list.append(int(i["price"]))
return min(price_list)
class CreditCarHandler(object):
def __init__(self,filename):
self.filename = filename
self.username = os.path.basename(self.filename)
self.data = json.load(open(self.filename))
def plus(self,money):
self.data[self.username]["amount"] += int(money)
def subtraction(self,money):
self.data[self.username]["amount"] -= int(money)
def save(self):
fd = open(self.filename,"w")
json.dump(self.data,fd)
fd.close()
if __name__ == "__main__":
pass | UTF-8 | Python | false | false | 2,492 | py | 232 | handler.py | 174 | 0.560582 | 0.557754 | 0 | 93 | 24.645161 | 81 |
ahpearce/langapp | 12,171,937,323,486 | e45d264f6c556a6ba0a98857eabbf65f7a038433 | d3c3174652ee7920d3dfdd8c07d4e642930458f8 | /langapp/backend/text.py | acb1b816aafc9ccc6b3135f20636ea8c689c7673 | [] | no_license | https://github.com/ahpearce/langapp | 668d167560593d1485d1e47aab086fc425e21902 | abcf696905383f3bf98dfa7f880774d974896137 | refs/heads/master | 2016-05-26T17:50:50.947961 | 2015-06-25T22:36:20 | 2015-06-25T22:36:20 | 38,077,985 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from nltk import sent_tokenize
import difflib
class Text:
def __init__(self, content, language):
self.content = str(content)
self.language = str(language)
def __str__(self):
print(self.content)
def set_content(self, content):
self.content = str(content)
def get_content(self):
return self.content
def set_language(self, language):
self.language = str(language)
def get_language(self):
return self.language
def get_sentences(self):
return sent_tokenize(self.content)
def compare_sentences(self, text):
original = self.get_sentences()
comparison = text.get_sentences()
d = difflib.Differ()
diff = d.compare(original, comparison)
print '\n'.join(diff)
| UTF-8 | Python | false | false | 794 | py | 4 | text.py | 3 | 0.618388 | 0.618388 | 0 | 33 | 23.060606 | 46 |
MukundKulkarni/OpenCV-Projects | 10,067,403,363,521 | db7c3a5c4d8144b0fb253629b631e28896bf83c4 | 1b0e4d18203da588f1c7de9127970810fd2bd899 | /Basics/image-quantization.py | 33949bdb575ce48f2a640a94a6a1ba6b097c2a75 | [] | no_license | https://github.com/MukundKulkarni/OpenCV-Projects | c5a97b1b52f0de7d2f6d0ce162dfd9d8cc60545a | 1e0d2ed155beecc02db5688182b279472f70819f | refs/heads/master | 2021-06-02T07:36:51.638114 | 2020-10-08T05:59:42 | 2020-10-08T05:59:42 | 147,081,724 | 0 | 1 | null | false | 2020-10-08T05:59:44 | 2018-09-02T12:09:56 | 2020-07-09T04:53:03 | 2020-10-08T05:59:43 | 101,211 | 0 | 1 | 0 | Python | false | false | from sklearn.cluster import MiniBatchKMeans
import argparse
import cv2
import numpy as np
def resize(image,y ):
r = float(y)/image.shape[1] # Maintain the aspect ratio
dim = (y, int(image.shape[0]*r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
return resized
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required= True, help = "path to image")
ap.add_argument("-o", "--output", required = False, help = "path of output image")
ap.add_argument("-c", "--clusters", required = True, type = int, help = "number of clusters")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
image = resize(image, 800)
(h,w) = image.shape[:2]
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
image = image.reshape(image.shape[0] * image.shape[1], 3)
clt = MiniBatchKMeans(n_clusters = args["clusters"])
labels = clt.fit_predict(image)
quant = clt.cluster_centers_.astype("uint8")[labels]
quant = quant.reshape((h, w, 3))
image = image.reshape((h, w, 3))
quant = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR)
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
cv2.imshow("Image",np.hstack([image, quant]))
cv2.waitKey(0)
| UTF-8 | Python | false | false | 1,172 | py | 32 | image-quantization.py | 30 | 0.68686 | 0.662969 | 0 | 44 | 25.636364 | 93 |
amolmishra23/edx-adminxblock | 15,693,810,534,705 | c8031333c978afa6d2e8b1de4aa66f98d993f676 | f5e4d5fd7091bae4a6a1547eaefdaedae0eb8e91 | /adminxblock/adminxblock.py | b0ca6084ec4606e465a3175c26cd71aeba3489d7 | [] | no_license | https://github.com/amolmishra23/edx-adminxblock | 48d9cd7a087271c23b8cce0b47fd1c2bb6e82607 | 976540075290ff0f510f409c73ffda0742d41305 | refs/heads/master | 2020-12-02T22:45:49.016236 | 2018-01-14T05:54:56 | 2018-01-14T05:54:56 | 96,178,810 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This project is Open edX server task automator using XBlock.
# The objective of the project is to create an XBlock which will perform
# all the tasks which a system adminstrator frequently performs in the
# lifecycle of the Open edX server.
import pkg_resources
from xblock.core import XBlock
from xblock.fields import Scope, String
from xblock.fragment import Fragment
import logging
log = logging.getLogger(__name__)
class AdminXBlock(XBlock):
# This is the variable which carries the final result to be displayed from the python code to javascript code
success = String(
default='', scope=Scope.user_state,
help="Result variable",
)
userid = String(
default='', scope=Scope.user_state,
help="User ID",
)
# Handy helper for getting resources from our kit.
def resource_string(self, path):
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
# The primary view which is shown to the students while opening the student view of the xblock.
# We are now trying to link all the files needed to be displayed as student view, linking the html,css,js files.
# As this project is to be only viewed and accessed by admins and nothing for students or staff.
# In the beginning, we are trying to get ID of the user who is logged in
# and check if he is a superuser or not
# if he is a superuser, he is allowed to access the XBlock.
# else he is not allowed to access the XBlock
def student_view(self, context=None):
self.userid = self.xmodule_runtime.user_id
import MySQLdb
db = MySQLdb.connect("localhost", "root", "", "edxapp")
cursor = db.cursor()
sql = "select is_superuser from auth_user where id="+str(self.userid)
cursor.execute(sql)
results = cursor.fetchall()
temp = 0
l=[]
for row in results:
l.append(row[0])
if l[0]==1:
html = self.resource_string("static/html/adminxblock.html")
frag = Fragment(html.format(self=self))
frag.add_css(self.resource_string("static/css/adminxblock.css"))
frag.add_javascript(self.resource_string("static/js/src/adminxblock.js"))
frag.initialize_js('StudioView')
return frag
else:
html = self.resource_string("static/html/studioview.html")
frag = Fragment(html.format(self=self))
frag.add_css(self.resource_string("static/css/studioview.css"))
frag.add_javascript(self.resource_string("static/js/src/studioview.js"))
frag.initialize_js('AdminXBlock')
return frag
# The secondary view which is shown to the admin while opening the studio view of the xblock.
# We are now trying to link all the files needed to be displayed as studio view, linking the html,css,js files.
# As this project is to be only viewed and accessed by admins, entire functionality and everything is to be available in this itself.
def studio_view(self, context=None):
html = self.resource_string("static/html/studioview.html")
frag = Fragment(html.format(self=self))
frag.add_css(self.resource_string("static/css/studioview.css"))
frag.add_javascript(self.resource_string("static/js/src/studioview.js"))
frag.initialize_js('AdminXBlock')
return frag
# Refer to the documentation at
# https://goo.gl/D6MyJ5
# for more details of server administration
# This is the main handler where all the functionality of xblock will be performed.
# The functionalities to be performed will be explained below in detail
@XBlock.json_handler
def perform(self, data, suffix=''):
# The modules needed for the working of this xblock
import os
import time
import MySQLdb
import shutil
from subprocess import Popen, PIPE
from xml.dom import minidom
# data['detail'] is the data which we are sending from js file received from the user, embedding it inside of ajax request
# next we are trying to split it. to be used in program.
# basically we receive it in the form of option_selected+parameters_for_that_option.
# its got stored in the list variable
list = data['detail'].split()
# this is the variable in which we will be concatenating whatever result we get and we will finally send to be displayed in result html page
res = ''
# Delete operation. Objective is to delete a particular course on the server.
# we move to a particular directory on server '/edx/app/edxapp/edx-platform' to execute the code in xblock. Using os module in python.
# then we use the command. executing it in shell using popen.
# and we are pre-embedding the yes-yes using the popen, from the subprocess module.
# after that, just to verify if the course is actually deleted or not, we check it from
# the 'course_overviews_courseoverview' table, available in edxapp, (mysql database)
# if the course is available in the table, operation was unsuccessful and we print it unsucessful
# else if course is not available in the table, operation was sucessful and we print it successful.
if list[0] == 'd01':
sql_user = 'root'
database = 'edxapp'
db_mysql = MySQLdb.connect(user=sql_user, db=database)
query = "select id from course_overviews_courseoverview where id='"+list[1]+"'";
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query)
courses1 = mysql_cursor.fetchall()
l1 = []
for b1 in courses1:
for c1 in b1:
l1.append(c1)
if len(l1)==1:
os.chdir('/edx/app/edxapp/edx-platform')
command = '/edx/bin/python.edxapp ./manage.py cms --settings=aws delete_course ' + list[1]
p = Popen(command, shell=True, stdin=PIPE)
p.stdin.write("y\n")
p.stdin.write("y\n")
p.wait()
query = "select id from course_overviews_courseoverview where id='"+list[1]+"'";
db_mysql = MySQLdb.connect(user=sql_user, db=database)
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query)
courses = mysql_cursor.fetchall()
l = []
for b in courses:
for c in b:
l.append(c)
if len(l)==1:
res += 'Deletion of the course "' + list[1] + '" including all dependancies of the course, its associated data and associated users has failed'
else:
res += 'Deletion of the course "' + list[1] + '" including all dependancies of the course, its associated data and associated users is successful'
else:
res += 'Course not available'
# Import a course from git repository and create a new course out of it on server, on the go.
# in front end, user must have entered only git url. an example of git url is https://github.com/edx/edx-demo-course.git
# at the time of import, the folder will be created,in temp folder same as last part of url.
# we need to extract that particular part of url. here it is edx-demo-course.
# now this folder we move it to the location where we plan to install it.
# -------------------------------------------------------------------------------
# besides that, we also extract the name of course, available in the xml file 'course.xml'
# located in the driectory we imported from git.
# now, after installing the code at particular location, we check if the particular course name is located
# in the 'course_overviews_courseoverview' table, available in edxapp, (mysql database)
# if the course is available in the table, operation was sucessful and we print it successful.
# else if course is not available in the table, operation was unsuccessful and we print it unsucessful.
elif list[0] == 'd02':
os.chdir('/var/tmp')
course = list[1].split('/')[-1].split('.')[0]
log.info('course name is found as '+course)
shutil.rmtree(course, ignore_errors=True)
comm = 'git clone '+list[1]
log.info('now git cloning using --- '+comm)
p = Popen(comm.encode('utf-8'), shell=True, stdin=PIPE)
p.wait()
log.info('cloning is over')
location = course+'/course.xml'
xmldoc = minidom.parse(location.encode('utf-8'))
itemlist = xmldoc.getElementsByTagName('course')
org = itemlist[0].attributes['org'].value
course1 = itemlist[0].attributes['course'].value
url_name = itemlist[0].attributes['url_name'].value
course_url = org + '+' + course1 + '+' + url_name
log.info(course_url)
os.chdir('/edx/app/edxapp/edx-platform')
log.info('dir changed and starting to execute')
command = '/edx/bin/python.edxapp ./manage.py cms --settings=aws import /edx/var/edxapp/data /var/tmp/' + course
p1 = Popen(command, shell=True, stdin=PIPE)
p1.wait()
sql_user = 'root'
database = 'edxapp'
db_mysql = MySQLdb.connect(user=sql_user, db=database)
query = "select id from course_overviews_courseoverview"
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query)
courses = mysql_cursor.fetchall()
l = []
for b in courses:
for c in b:
l.append(c)
flag = False
for elem in l:
if elem.endswith(course_url):
flag = True
break
if flag==True:
res += 'Import course contents(by creating a new course at the time of import) of course "' + course + '" from git repository is successful'
else:
res += 'Import course contents(by creating a new course at the time of import) of course "' + course + '" from git repository has failed'
# Running the asset collections command.
# its directly implemented as it is from documentation using the python modules.
# please check modules for more details
elif list[0] == 'd03':
# os.chdir('..')
# os.popen('. /edx/app/edxapp/edxapp_env').read()
# os.chdir('/edx/app/edxapp/edx-platform')
# command1 = 'paver update_assets cms --settings=aws'
# command2 = 'paver update_assets lms --settings=aws'
# p1 = Popen(command1, shell=True, stdin=PIPE)
# p1.wait()
# p2 = Popen(command2, shell=True, stdin=PIPE)
# p2.wait()
# res += 'Running of the asset collection commands is successful'
res += 'Functionality development still in progress'
# Here the objective being to export our course contents to git repository
# Functionality is yet to be implemented
elif list[0] == 'd04':
res += 'Functionality development still in progress'
# Here the objective is to activate the user.
# we need to modify the is_active variable in auth_user table of edxapp mysql database.
# as soon as we change its value to 1, user gets activated.
# for sake of verification, we also verify if is_active variable has changed for that particular row.
# and according send feedback to user regarding the success or failure of query.
elif list[0] == 'd05':
email = list[1]
db = MySQLdb.connect("localhost", "root", "", "edxapp")
cursor = db.cursor()
sql = "select is_active from auth_user where email='" + email + "'"
cursor.execute(sql)
results = cursor.fetchall()
temp = 0
l=[]
for row in results:
l.append(row[0])
if len(l) == 1:
sql = "update auth_user set is_active = 1 WHERE email = '" + email + "'"
cursor.execute(sql)
db.commit()
sql = "select is_active from auth_user where email='" + email + "'"
cursor.execute(sql)
results = cursor.fetchall()
temp = 0
for row in results:
temp = int(row[0])
if (temp == 1):
res += 'Activation of the user was successful'
else:
res += 'Activation of the user was not successful'
else:
res += 'User is not available to be activated'
# Here the objective is to deactivate the user.
# we need to modify the is_active variable in auth_user table of edxapp mysql database.
# as soon as we change its value to 0, user gets deactivated.
# for sake of verification, we also verify if is_active variable has changed for that particular row.
# and according send feedback to user regarding the success or failure of query.
elif list[0] == 'd06':
email = list[1]
db = MySQLdb.connect("localhost", "root", "", "edxapp")
cursor = db.cursor()
sql = "select is_active from auth_user where email='" + email + "'"
cursor.execute(sql)
results = cursor.fetchall()
temp = 0
l=[]
for row in results:
l.append(row[0])
if len(l) == 1:
sql = "update auth_user set is_active = 0 WHERE email = '" + email + "'"
cursor.execute(sql)
db.commit()
sql = "select is_active from auth_user where email='" + email + "'"
cursor.execute(sql)
results = cursor.fetchall()
temp = 1
for row in results:
temp = int(row[0])
if (temp == 0):
res += 'Deactivation of the user was successful'
else:
res += 'Deactivation of the user was not successful'
else:
res += 'User is not available to be activated'
# certificate generation for a particular user in a particular course.
# we first got to the location '/edx/app/edxapp/edx-platform'
# now we execute the command for certificate generation
# for verifying if the certificate is actually generated, we check it from
# certificates_generatedcertificate tables in edxapp mysql database and if its available,
# we link it to user_profile table
# and in the end, attempt to the user's name on the final screen, whose certificate was generated .
elif list[0] == 'd07':
os.chdir('/edx/app/edxapp/edx-platform')
command = "/edx/bin/python.edxapp ./manage.py lms --settings aws regenerate_user -u " + list[1] + " -c " + list[2] + " --insecure"
p = Popen(command, shell=True, stdin=PIPE)
p.wait()
time.sleep(10)
db5 = MySQLdb.connect("localhost", "root", "", "edxapp")
cursor = db5.cursor()
sql = "select c.name from auth_userprofile a,certificates_generatedcertificate c where a.name = c.name and a.mailing_address='"+list[1]+"' and c.status='downloadable'"
log.info(sql)
log.info(type(list[1]))
cursor.execute(sql)
results = cursor.fetchall()
log.info(results)
names_list = []
for row in results:
names_list.append(row[0])
log.info('AMol Mamol '+names_list[0])
if results:
res += 'Certificate generation for particular user was successful'
else:
res += 'Certificate generation for particular user was unsuccessful'
# certificate generation for all the users of a particular course.
# we first got to the location '/edx/app/edxapp/edx-platform'
# now we execute the command for certificate generation
# for verifying if the certificates are actually generated, we check it from
# certificates_generatedcertificate tables in edxapp mysql database
# and now we will try to print all the names of users, for whom certificate is generated,
# and for whom certificate is not generated
# and in the end, print both of these things on the final screen to the user.
elif list[0] == 'd08':
db = MySQLdb.connect("localhost", "root", "", "edxapp")
cursor = db.cursor()
sql2="select name from certificates_generatedcertificate where course_id = '" + list[1] + "'"
cursor.execute(sql2)
results2 = cursor.fetchall()
names_list2 = []
for row in results2:
names_list2.append(row[0])
if names_list2:
os.chdir('/edx/app/edxapp/edx-platform')
command = "/edx/bin/python.edxapp ./manage.py lms --settings aws ungenerated_certs -c " + list[1] + " --insecure"
p = Popen(command, shell=True, stdin=PIPE)
p.wait()
sql = "select name from certificates_generatedcertificate where course_id = '" + list[1] + "' and status = 'downloadable'"
sql1 = "select name from certificates_generatedcertificate where course_id = '" + list[1] + "' and status != 'downloadable'"
cursor.execute(sql)
results = cursor.fetchall()
cursor.execute(sql1)
results1 = cursor.fetchall()
names_list=[]
names_list1=[]
for row in results:
names_list.append(row[0])
for row in results1:
names_list1.append(row[0])
if not names_list:
res += 'Certificate generation was totally unsuccessful'
else:
res += 'Certificate generation was successful for:<br><ol><li>'+'</li><li>'.join(names_list)+'</li></ol><br><br><br>'
if names_list1:
res += 'Certificate generation was unsuccessful for:<br><ol><li>'+'</li><li>'.join(names_list1)+'</li></ol>'
else:
res+='Course itself is not available'
# No matter which if condition we went through, we surely must be having a result to display to user
# it may be success or failure
# that result we store it in the success variable, which is of xblock string datatype
# and we return this response to the json request we get
# this marks the end of the perform handler.
self.success = res
print self.success
return {"success": self.success}
# This is to create the scenarios if we would like to see in the workbench while developing your XBlock.
@staticmethod
def workbench_scenarios():
return [
("AdminXBlock",
"""<adminxblock/>
"""),
("Multiple AdminXBlock",
"""<vertical_demo>
<adminxblock/>
<adminxblock/>
<adminxblock/>
</vertical_demo>
"""),
]
| UTF-8 | Python | false | false | 19,646 | py | 11 | adminxblock.py | 4 | 0.584903 | 0.578896 | 0 | 396 | 48.611111 | 179 |
czming/cpy5p4 | 17,205,639,028,494 | 55744240d302356f498900f02855fa05aab79e1b | 652aba70f616b73595b68a6d48c7edbd47eda6cb | /q3_find_gcd.py | 0c5978f0ad095fa5a7e558cd2aefa9954a7c9983 | [] | no_license | https://github.com/czming/cpy5p4 | 7eaea266f30d1d4fc2f964efd00ea52878a75951 | 8a113aee8cd42e27e961efa3fd8343ad8d6ba060 | refs/heads/master | 2016-08-11T14:16:58.006851 | 2016-02-15T11:28:45 | 2016-02-15T11:28:45 | 51,683,506 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #find gcd
def find_gcd(number_one, number_two):
array = [number_one, number_two]
array.sort()
#numbers are sorted with the largest one being the first object in array
if array[1] % array[0] == 0:
return array[0]
else:
return find_gcd(array[0], array[1] % array[0])
| UTF-8 | Python | false | false | 306 | py | 8 | q3_find_gcd.py | 8 | 0.614379 | 0.591503 | 0 | 9 | 32.444444 | 76 |
Aasthaengg/IBMdataset | 10,505,490,011,977 | 8be40d934b2a1fe0a67d5f16e2ff86db4df7a526 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00002/s985758471.py | d984421c810a532b21fbc2c2b792e273c544069b | [] | no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf8 -*-
import sys
import math
for s in sys.stdin:
a,b = map(int, s.split(' '))
print int(math.log10(a+b)) + 1 | UTF-8 | Python | false | false | 132 | py | 202,060 | s985758471.py | 202,055 | 0.583333 | 0.55303 | 0 | 6 | 21.166667 | 34 |
rossbenedict/Challenges | 4,904,852,687,714 | 104cfa22329001ea02067e8d2ff8ed08551033f0 | d5878c1f53b392b050089b1819c111432d0d6bce | /Challenges/challenge6/challenge6.py | 5acfe5b19b78b10208c5cbbaaa67f1adcc98cec8 | [] | no_license | https://github.com/rossbenedict/Challenges | 23e31bffe3b55140fc00251b3e83566ed3b9d4bd | c156a26f98333bf4720456aa689ea0a8f9367752 | refs/heads/master | 2020-09-26T06:48:40.403709 | 2020-01-24T00:28:36 | 2020-01-24T00:28:36 | 226,193,822 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Challenge2(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome("../chromedriver")
def tearDown(self):
self.driver.close()
# error grab a screenshot
# where to store the screenshot
# write library to work on Mac and Windows
# create a folder and keep track of the path
# need to print out the full path and or open the image at the end
# Don't want to overwrite existing images
# recommend using the time stamp
# use imaging libraries like Pillow, not thread safe only does the on top browser
#so use whats build into the webdriver
#element screenshot_as_png
#screenshot_as_base64 saves as a string
#capture from Selenium
#driver.save_screenshot('/data/image.png")
#add how to save to a specific place
#capturing exceptions
def trycatch(self):
try:
print(x)
except NameError:
print("an exception example")
except:
print("Something else went wrong")
# go to copart, search, models enter a good model and check and then a bad model
except BaseException as e:
Screenshot().take(self.driver, challenge)
sys.exit(e)
def test_challenge6(self):
try:
filterName = "Model"
#turn this next part into a class as well
elements = self.driver.find_elements("//*[@id="filters-collapse-1"]//li//a[text()")
count = 0
for e in elements:
count = count + 1
if (e.text == filterName)
e.click()
txtelement = self.driver.find_elements("//*[@id='collapseinside" + str(count) + "']/form/div/input")
txtelement.send_keys("altima S")
checkelement = self.driver.find_element("//*[@id='collapseinside" + str(count) + "']//abbr[@value='" + modelvalue + ";]")
checkElement.click()
except:
print("an exception occurred")
print("you wanted" + modelValue)
print("but these are available checkboxes")
checkboxelements= self.driver.find_elements(//*{@id='collapseinside4']//input{@types="checkbox"]//text(})
for e in checkboxelements:
e.get_attribute("alue")
#change this to an object
# gac = GetAllCheckboes()
#gac.show()
#make a class to handle the filter
#go through the filter and click on the one you want
//*[@id="filters-collapse-1"]//li//a[text()
link text
| UTF-8 | Python | false | false | 2,714 | py | 18 | challenge6.py | 18 | 0.630066 | 0.62675 | 0 | 81 | 32.481481 | 133 |
diesseits/Pieberry2 | 11,218,454,616,371 | dffebd3bdca6c3df1ddd477ac84af9ac99f741d7 | 35d49391a310d47a80fb892ca4cdeec9a9458e7b | /pieberry/pieutility/date.py | 15758aa0a135945cc8a5e59003fbfde3713a28de | [] | no_license | https://github.com/diesseits/Pieberry2 | 4f653be45e88cd93adb4a48d6446776fcb4e107c | f2fe4bb4a0542b96db3717277f9ed8319bab8dfa | refs/heads/master | 2021-01-21T13:11:09.540679 | 2012-10-24T22:54:09 | 2012-10-24T22:54:09 | 3,077,719 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import wx, datetime
def wxdate2pydate(date):
assert isinstance(date, wx.DateTime)
if date.IsValid():
ymd = map(int, date.FormatISODate().split('-'))
return datetime.date(*ymd)
else:
return None
def pydate2wxdate(date):
assert isinstance(date, (datetime.datetime, datetime.date))
tt = date.timetuple()
dmy = (tt[2], tt[1]-1, tt[0])
return wx.DateTimeFromDMY(*dmy)
def fmtdate(date):
'''Hack for ui - work around the fact that datetime's strftime
shits itself at dates prior to 1900'''
d = str(date.day) if date.day > 9 else '0%d' % date.day
m = str(date.month) if date.month > 9 else '0%d' % date.month
return '%d-%s-%s' % (date.year, m, d)
| UTF-8 | Python | false | false | 734 | py | 117 | date.py | 114 | 0.613079 | 0.594005 | 0 | 22 | 32.318182 | 66 |
darkh14/HR-Product | 16,612,933,532,578 | 96ff8f7d52c1a5639470dedcaacb7b8bd9b1085b | 2c6d4c1b021ac83686275536d6b96098ca53f6af | /http_procession.py | fe1553d7b4c859cdeb8ca6951fa6f0d92b0c5782 | [] | no_license | https://github.com/darkh14/HR-Product | e003d5cea7a53ad0b6946b1bb0fc8c0028d09a12 | 7045f83c8126443dc840ae82b6458c120e8babf2 | refs/heads/main | 2023-05-07T03:42:28.548152 | 2021-05-22T07:49:29 | 2021-05-22T07:49:29 | 311,224,128 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import machine_learning as ml
from parsing import parsing_tool
import xml.etree.ElementTree as ET
import json
import mongo_connection
import filter
import data_processing
class HTTPProcessor:
def __init__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
self.parameters = {}
self.parameters_is_set = False
self.output = {}
self.output_values = {}
self.db_connector = None
self.status = ''
self.error = ''
def set_parameters(self):
if self.environ['REQUEST_METHOD'] == 'POST':
content_length = int(self.environ.get('CONTENT_LENGTH')) if self.environ.get('CONTENT_LENGTH') else 0
par_string = ''
if content_length:
par_string = self.environ['wsgi.input'].read(content_length)
else:
par_list = self.environ.get('wsgi.input')
if par_list:
for par_element in par_list:
par_string = par_element
if par_string:
self.parameters = self.parameters_from_json(par_string)
self.parameters_is_set = True
def process(self):
if not self.parameters_is_set:
self.set_parameters()
if not self.parameters_is_set:
self.status = 'error'
self.error = 'Error of setting parameters'
else:
if not self.parameters.get('request_type'):
self.status = 'error'
self.error = 'Parameter ''request_type'' have not found'
else:
if self.parameters['request_type'] == 'test':
for key in self.parameters:
self._add_parameter_to_output_(key, self.parameters[key] + ' test!')
self.status = 'OK'
elif self.parameters['request_type'] == 'get_fitting_cvs':
ids, error = ml.find_fitting_ids(**self.parameters)
if error:
self.status = 'error'
self.error = error
else:
self._add_parameter_to_output_('fitting_cvs', ids)
self.status = 'OK'
elif self.parameters['request_type'] == 'get_all_cvs':
ids, error = ml.get_all_ids(**self.parameters)
if error:
self.status = 'error'
self.error = error
else:
self._add_parameter_to_output_('all_cvs', ids)
self.status = 'OK'
elif self.parameters['request_type'] == 'set_cv_vacancy_labels':
is_set, error = ml.set_cv_vacancy_labels(**self.parameters)
if not is_set:
self.status = 'error'
self.error = error
else:
self.status = 'OK'
elif self.parameters['request_type'] == 'set_vacancies':
is_set, error = ml.set_vacancies(**self.parameters)
if not is_set:
self.status = 'error'
self.error = error
else:
self.status = 'OK'
elif self.parameters['request_type'] == 'set_profiles':
is_set, error = ml.set_profiles(**self.parameters)
if not is_set:
self.status = 'error'
self.error = error
else:
self.status = 'OK'
elif self.parameters['request_type'] == 'refill_cv_collection':
job_id, status, error = parsing_tool.refill_cv_collection(**self.parameters)
self.status = status
self.error = error
self._add_parameter_to_output_('job_id', job_id)
elif self.parameters['request_type'] == 'check_job_status':
self.db_connector = mongo_connection.MongoDBConnector()
id_filter = {}
if self.parameters.get('filter'):
id_filter.update(self.parameters.get('filter'))
limit = self.parameters.get('limit')
job_lines = self.db_connector.read_jobs(id_filter, limit)
self._add_parameter_to_output_('jobs', job_lines)
self.status = 'OK'
elif self.parameters['request_type'] == 'delete_jobs':
self.db_connector = mongo_connection.MongoDBConnector()
id_filter = {}
if self.parameters.get('filter'):
id_filter.update(self.parameters.get('filter'))
self.db_connector.delete_jobs(id_filter)
self.status = 'OK' if not self.db_connector.error else 'error'
self.error = self.db_connector.error
elif self.parameters['request_type'] == 'set_filter_collection':
self.db_connector = mongo_connection.MongoDBConnector()
result, error = filter.set_filter_collection(**self.parameters)
self.status = 'OK' if not error else 'error'
self.error = error
elif self.parameters['request_type'] == 'get_filter_collection':
print(self.parameters)
if self.parameters.get('collection_name') == 'filter_sites' and self.parameters.get('from_parsers'):
collection, error = parsing_tool.get_site_table_settings_from_parsers(**self.parameters)
else:
collection, error = filter.get_filter_collection(**self.parameters)
self._add_parameter_to_output_('filter_collection', collection)
self.status = 'OK' if not error else 'error'
self.error = error
elif self.parameters['request_type'] == 'get_filter_collection_names':
collection_names, error = filter.get_filter_collection_names(**self.parameters)
self._add_parameter_to_output_('collection_names', collection_names)
self.status = 'OK' if not error else 'error'
self.error = error
elif self.parameters['request_type'] == 'delete_filter_collection':
result, error = filter.delete_filter_collection(**self.parameters)
self.status = 'OK' if not error else 'error'
self.error = error
elif self.parameters['request_type'] == 'transform_cv':
result, error = data_processing.transform(**self.parameters)
self.status = 'OK' if not error else 'error'
self.error = error
elif self.parameters['request_type'] == 'fit_cv':
result, error = data_processing.fit(**self.parameters)
self.status = 'OK' if not error else 'error'
self.error = error
else:
self.status = 'error'
self.error = 'Unknown value of request type ''{}'''.format(self.parameters['request_type'])
self._add_parameter_to_output_('status', self.status)
self._add_parameter_to_output_('error', self.error)
output_str = json.dumps(self.output, ensure_ascii=False).encode()
output_len = len(output_str)
self.start_response('200 OK', [('Content-type', 'text/html'), ('Content-Length', str(output_len))])
return [output_str]
def _add_parameter_to_output_(self, key, value):
self.output[key] = value
@staticmethod
def parameters_from_xml(xml_string):
parameters_dict = dict()
root = ET.fromstring(xml_string)
if root.tag.count('Structure') > 0:
for child in root:
if child.tag.count('Property') > 0:
for sub_child in child:
if sub_child.tag.count('Value') > 0:
parameters_dict[child.attrib['name']] = sub_child.text
return parameters_dict
@staticmethod
def parameters_from_json(xml_string):
return json.loads(xml_string)
def process(environ, start_response):
processor = HTTPProcessor(environ, start_response)
output = processor.process()
return output | UTF-8 | Python | false | false | 8,678 | py | 33 | http_procession.py | 15 | 0.512906 | 0.5121 | 0 | 226 | 37.402655 | 120 |
jimgregory/raspberrypi | 5,188,320,499,625 | fe702508d953366e3ae4099bd9e5c63067bbaad8 | 13b164c58b6818cd1a918b929e627052d188220b | /scripts/input-polling.py | e6a4d8030f3673ec134d979723cd001cc3195bf1 | [] | no_license | https://github.com/jimgregory/raspberrypi | 85aef9dff176aa31367159daee3625be269eb446 | bb8fdae7f017ba3736efea2a2b053721ebb9af7a | refs/heads/master | 2021-01-10T18:49:22.355368 | 2015-12-01T13:31:10 | 2015-12-01T13:31:10 | 35,719,911 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python
# detects a change on an input pin by polling every second
# exits after 10 seconds
import RPi.GPIO as GPIO
from time import sleep
# pin to switch is attached (be sure to include a resistor in series)
pin = 21
# set which pin numbering system to use
# BOARD = pin layout on board
# BCM = pins layout on Broadcom chip
GPIO.setmode(GPIO.BCM)
# setup pin as input with (optional) pull-up resistor 'up'
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
for i in xrange(10):
print str(i) + '. Input is ' + str(GPIO.input(pin))
sleep(1)
# reset the pin to default state (this is important!)
GPIO.cleanup()
| UTF-8 | Python | false | false | 644 | py | 8 | input-polling.py | 5 | 0.701863 | 0.690994 | 0 | 25 | 24.72 | 69 |
disissaikat/cfc_2020 | 14,568,529,102,603 | 5cd4c6434a7dbc098084d70839f5c2b97d6c248f | dd00da0254875c877a35a59ae372391484a9631c | /ngo_app_code/disasters.py | 8863947dbbfa8383c1e389d5c13da18442d0f502 | [] | no_license | https://github.com/disissaikat/cfc_2020 | 9da8476f0eb26946b2d5fd1e63191e44573e8ab2 | 8222f9d1f6a8ba66190cf0dce19f9a5f15ebe789 | refs/heads/master | 2022-11-21T11:37:58.781244 | 2020-07-30T16:05:26 | 2020-07-30T16:05:26 | 282,971,756 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
def get_disasters():
response = json.loads(requests.get("https://api.reliefweb.int/v1/disasters?appname=togetherly&profile=list&preset=latest&slim=1").text)
disaster = []
for i in range(len(response["data"])):
print(response["data"][i]["fields"]["name"])
disaster.append(response["data"][i]["fields"]["name"])
return disaster | UTF-8 | Python | false | false | 397 | py | 24 | disasters.py | 16 | 0.654912 | 0.649874 | 0 | 11 | 34.272727 | 139 |
Ashwini2103/Ashwini-Portfolio | 12,635,793,810,942 | 6428564fde74555b1a4109a22f8f0c1d8512c72c | 08ad9ca76a7934205b02571b3f91eaf5255bcf8b | /Airbnb_new_user_booking.py | 79db8bb2124a1fe1a4fc53d7ace3242e23afb055 | [] | no_license | https://github.com/Ashwini2103/Ashwini-Portfolio | 6c3bc06b440dac1bf22343e0ccf5a47df3174e50 | 5a8653faf94bdad125285bb3f6cba0f2076bf591 | refs/heads/master | 2020-04-05T20:37:12.132846 | 2018-11-13T15:22:59 | 2018-11-13T15:22:59 | 157,188,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 09:57:14 2018
@author: Dad
"""
## Airbnb New Booking Dataset
''' Prediction of new Airbnb users booking their destination country.
Importing and Reading csv(.csv)files initally.
'''
import pandas as pd
files=['age_gender_bkts.csv','countries.csv','sessions.csv','train_users_2.csv','test_users.csv']
#%%
## Placing files into dictionary data
data={}
for i in files:
d=pd.read_csv(r'E:\Airbnb Dataset\{0}'.format(i))
data[i.replace('.csv','')]=d
#%%
## Displaying contents present in dictionary
for k,v in data.items():
print('\n'+k+'\n')
print(v.head())
#%%
## Checking datatypes of key 'countries' in dictionary
data['countries'].dtypes
#%%
## Displaying contents in countries
data['countries'].head()
#%%
## Checking datatypes of key 'age_gender_bkts' in dictionary
data['age_gender_bkts'].dtypes
#%%
## Displaying contents in age_gender_bkts
data['age_gender_bkts'].head()
#%%
## Converting column 'year' in 'age_gender_bkts' to datatype 'int'
data['age_gender_bkts']['year']=data['age_gender_bkts']['year'].astype(int)
#%%
## Checking datatypes of key 'age_gender_bkts' in dictionary
data['age_gender_bkts'].dtypes
#%%
## Checking datatypes of key 'train_users_2' in dictionary
data['train_users_2'].dtypes
#%%
## Displaying contents in train_users_2
data['train_users_2'].head()
#%%
## Counting user_id's in 'train_users_2'
data['train_users_2']['id'].value_counts()
#%%
## Checking datatypes of key 'sessions' in dictionary
data['sessions'].dtypes
#%%
## Displaying contents in sessions
data['sessions'].head()
#%%
## Counting user_id's in 'sessions'
data['sessions']['user_id'].value_counts()
#%%
## Creating a new dataframe 'sessions' and assigning data['sessions'] to it
sessions=data['sessions']
#%%
## Checking for null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
## Dropping rows containing null id's in dataframe 'sessions'
sessions=sessions[sessions.user_id.notnull()]
#%%
## Checking for further null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
## Null values in feature 'action' relate to feature 'action_type'
sessions[sessions.action.isnull()].action_type.value_counts()
#%%
## Changing null value to 'message' in feature 'action'
sessions[sessions.action.isnull()]='message'
#%%
## Removing user_id's having 'message' in dataframe 'sessions'
sessions=sessions[sessions.user_id!='message']
#%%
## Checking for further null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
## Null values in feature 'action_type' relate to feature 'action_detail'
sessions[sessions.action_type.isnull()].action_detail.value_counts()
#%%
''' Filling null values in column 'action_type' and 'action_detail' by using
groupby 'user_id' and 'action' inside a function min_null_values_action_type_detail
'''
def min_null_values_action_type_detail(df,feature):
## Filling common values for each user and action
new_df=pd.DataFrame(df.groupby(['user_id','action'])[feature].value_counts())
new_df.rename(columns={feature:'Count'},inplace=True)
new_df=new_df.reset_index()
new_df_max=pd.DataFrame(new_df.groupby(['user_id','action'])['Count'].max())
new_df_max=new_df_max.reset_index()
## Merging 2 dataframes
new_df_max=new_df_max.merge(new_df,on=['user_id','action','Count'])
del new_df_max['Count']
## Merge with main dataframe
df=df.merge(new_df_max,left_on=['user_id','action'],right_on=['user_id','action'],how='left')
return df
#%%
## Passing values while calling function
sessions=min_null_values_action_type_detail(sessions,'action_type')
#%%
sessions=min_null_values_action_type_detail(sessions,'action_detail')
#%%
## Replacing null values to cols 'action_type' and 'action_detail'
sessions.loc[sessions.action_type_x.isnull(),'action_type_x']=sessions.action_type_y
#%%
sessions.loc[sessions.action_detail_x.isnull(),'action_detail_x']=sessions.action_detail_y
#%%
## Assigning values of action_type_x and action_detail_x tp features action_type and action_detail
sessions['action_type']=sessions.action_type_x
sessions['action_detail']=sessions.action_detail_x
#%%
## Dropping few cols from Dataframe sessions
sessions=sessions.drop(['action_type_x','action_detail_x','action_type_y','action_detail_y'],axis=1)
#%%
## Checking for further null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
''' Filling null values in column 'action_type' and 'action_detail' by using
groupby 'action' inside a function min_null_values_action_type_detail
'''
def min_null_values_action_type_detail(df,feature):
new_df=pd.DataFrame(df.groupby(['action'])[feature].value_counts())
new_df.rename(columns={feature:'Count'},inplace=True)
new_df=new_df.reset_index()
new_df_max=pd.DataFrame(new_df.groupby(['action'])['Count'].max())
new_df_max=new_df_max.reset_index()
## Merging two dataframes
new_df_max=new_df_max.merge(new_df,on=['action','Count'])
del new_df_max['Count']
## Merge with main dataframe
df=df.merge(new_df_max,left_on=['action'],right_on=['action'],how='left')
return df
#%%
## Passing values to the function
sessions=min_null_values_action_type_detail(sessions,'action_type')
#%%
sessions=min_null_values_action_type_detail(sessions,'action_detail')
#%%
## Replacing null values to cols 'action_type' and 'action_detail'
sessions.loc[sessions.action_type_x.isnull(),'action_type_x']=sessions.action_type_y
#%%
sessions.loc[sessions.action_detail_x.isnull(),'action_detail_x']=sessions.action_detail_y
#%%
## Assigning values of action_type_x and action_detail_x to features action_type and action_detail
sessions['action_type']=sessions.action_type_x
sessions['action_detail']=sessions.action_detail_x
#%%
## Dropping few cols from Dataframe sessions
sessions=sessions.drop(['action_type_x','action_detail_x','action_type_y','action_detail_y'],axis=1)
#%%
## Checking for further null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
## We can see 'Track Page View' and 'Lookup' actions having null value in entire dataset
sessions.loc[sessions['action']=='lookup','action_type']='lookup'
sessions.loc[sessions['action']=='lookup','action_detail']='lookup'
#%%
sessions.loc[sessions['action']=='Track Page View','action_type']='Track Page View'
sessions.loc[sessions['action']=='Track Page View','action_detail']='Track Page View'
#%%
## Filling missing values using 'missing' in dataset
sessions.action_type=sessions.action_type.fillna("missing")
sessions.action_detail=sessions.action_detail.fillna("missing")
#%%
## Checking for further null values in features of dataframe 'sessions'
sessions.isnull().sum()
#%%
sessions.dtypes
#%%
## Changing datatype of feature 'secs_elapsed' to 'float'
sessions['secs_elapsed']=sessions['secs_elapsed'].astype(float)
#%%
## Filling null values in column 'secs_elapsed' by grouping 'action'
def sum_values_secs_elapsed(df,feature):
nw_df=pd.DataFrame(df.groupby(['action'],as_index=False)['secs_elapsed'].median())
df=df.merge(nw_df,left_on=['action'],right_on=['action'],how='left')
return df
#%%
## Passing values to the function
sessions=sum_values_secs_elapsed(sessions,'secs_elapsed')
#%%
# Replacing null values using col secs_elapsed_y
sessions.loc[sessions.secs_elapsed_x.isnull(),'secs_elapsed_x']=sessions.secs_elapsed_y
#%%
## Assigning values of secs_elapsed_x to secs_elapsed
sessions['secs_elapsed']=sessions.secs_elapsed_x
#%%
## Dropping cols in sessions
sessions=sessions.drop(['secs_elapsed_x','secs_elapsed_y'],axis=1)
#%%
''' Creating a new dataframe new_session by grouping various cols ['user_id','action','device_type','action_type','action_detail']
and finding mean of 'secs_elapsed'
'''
new_session=pd.DataFrame(sessions.groupby(['user_id','action','device_type','action_type','action_detail'])['secs_elapsed'].mean())
#%%
## Resetting the index
new_session=new_session.reset_index()
#%%
## Counting values in feature 'device_type'
new_session['device_type'].value_counts()
#%%
## Seggregating device_types into respective categories by reducing categories in a list
apple_device=['Mac Desktop','iPhone','iPad Tablet','iPodtouch']
android_device=['Android Phone','Android App Unknown Phone/Tablet','Tablet']
windows_device=['Windows Desktop','Windows Phone']
other_device=['Linux Desktop','Chromebook','Blackberry','Opera Phone']
#%%
## Creating a dictionary by appending values of list
device_types={'apple_device':apple_device,
'android_device':android_device,
'windows_device':windows_device,
'other_device':other_device}
#%%
## Creating columns for key cols and naming in form of 0 and 1
for device in device_types:
new_session[device]=0
new_session.loc[new_session.device_type.isin(device_types[device]),device]=1
#%%
## Dropping col 'device_type' from dataframe new_session
new_session=new_session.drop(['device_type'],axis=1)
#%%
## Finding out the duration the time user spends on a website
time_spent=pd.DataFrame(sessions.groupby(['user_id'])['secs_elapsed'].sum())
#%%
## Resetting index in dataframe time_spent
time_spent.reset_index()
#%%
## Merging time_spent dataframe with new_session dataframe
new_session=new_session.merge(time_spent,left_on='user_id',right_on='user_id',how='left')
#%%
new_session['duration']=new_session.secs_elapsed_y
#%%
## Dropping cols in new_session
new_session=new_session.drop(['secs_elapsed_x','secs_elapsed_y'],axis=1)
#%%
## Dropping duplicates in dataframe new_session
new_session=new_session.drop_duplicates()
#%%
## Checking null values in dataframe new_session
new_session.isnull().sum()
#%%
## Merging new_session with dataframe data[train_users_2]
train1=data['train_users_2'].merge(new_session,left_on=data['train_users_2']['id'],right_on=new_session['user_id'],how='inner')
#%%
## User_id not present in data['train_users_2']
train2=data['train_users_2'][train1 !='id']
#%%
## Concatenate 2 dataframes train1 and train2 into train
train=pd.concat([train1,train2])
#%%
## Merging new_session with dataframe data[test_users]
test1=data['test_users'].merge(new_session,left_on=data['test_users']['id'],right_on=new_session['user_id'],how='inner')
#%%
## User_id not present in data['test_users']
test2=data['test_users'][test1!='id']
#%%
test=pd.concat([test1,test2])
#%%
## Combining train and test dataframes together
df=pd.concat([train,test])
#%%
## Dropping user_id and key_0 similar to id
df=df.drop(['key_0','user_id','first_device_type'],axis=1)
#%%
## Checking for null values in dataframe df
df.isnull().sum()
#%%
## Replacing null values by 0 in list cols
cols=['android_device','apple_device','duration','windows_device','other_device']
df[cols]=df[cols].fillna(0)
#%%
## Finding out null values in dataframe df
df.isnull().sum()
#%%
## Replace null values using "missing" in list col1
col1=['action','action_type','action_detail']
df[col1]=df[col1].fillna("missing")
#%%
## Finding out null values in dataframe df
df.isnull().sum()
#%%
## Checking language col being null belong to which country_destination
df[df.language.isnull()].country_destination.value_counts()
#%%
def min_language_null_values(frame,feature):
n_df=pd.DataFrame(frame.groupby(['country_destination'])['language'].value_counts())
n_df.rename(columns={'language':'count'},inplace=True)
n_df=n_df.reset_index()
n_df_new=pd.DataFrame(n_df.groupby(['country_destination'])['count'].max())
n_df_new=n_df_new.reset_index()
## Merging two dataframes
n_df_new=n_df_new.merge(n_df,on=['country_destination','count'])
## Merging with main dataframe
frame=frame.merge(n_df_new,left_on=['country_destination'],right_on=['country_destination'],how='left')
return frame
#%%
## Passing values to function min_language_null_values
df=min_language_null_values(df,'language')
#%%
# Replacing null values using col language_y
df.loc[df.language_x.isnull(),'language_x']=df.language_y
#%%
## Assigning values of col language_x to col language
df['language']=df.language_x
#%%
## Dropping cols from dataframe
df=df.drop(['language_x','language_y','count'],axis=1)
#%%
## Checking for null values in dataframe df
df.isnull().sum()
#%%
## Finding out null values in col first_affiliate_tracked
df[df.first_affiliate_tracked.isnull()]['affiliate_channel'].value_counts()
#%%
## Filling null values in first_affiliate_tracked
def min_first_affiliate_null(frame,feature):
nf_frame=pd.DataFrame(frame.groupby(['affiliate_channel','affiliate_provider'])['first_affiliate_tracked'].value_counts())
nf_frame.rename(columns={'first_affiliate_tracked':'count'},inplace=True)
nf_frame=nf_frame.reset_index()
## Finding max value in it
nf_frame_max=pd.DataFrame(nf_frame.groupby(['affiliate_channel','affiliate_provider'])['count'].max())
## Resetting the index of dataframe nf_frame_max
nf_frame_max.reset_index()
## Merging two dataframes
nf_frame_max=nf_frame_max.merge(nf_frame,on=['affiliate_channel','affiliate_provider','count'])
## Merging with main dataframe
frame=frame.merge(nf_frame_max,left_on=['affiliate_channel','affiliate_provider'],right_on=['affiliate_channel','affiliate_provider'],how='left')
return frame
#%%
df=min_first_affiliate_null(df,'first_affiliate_tracked')
#%%
## Replacing null values in 'first_affiliate_tracked_x'
df.loc[df.first_affiliate_tracked_x.isnull(),'first_affiliate_tracked_x']=df.first_affiliate_tracked_y
#%%
## Assigning values of 'first_affiliate_tracked_x' to col 'first_affiliate_tracked'
df['first_affiliate_tracked']=df.first_affiliate_tracked_x
#%%
## Dropping cols from dataframe df
df=df.drop(['first_affiliate_tracked_x','first_affiliate_tracked_y'],axis=1)
#%%
## Checking for null values in dataframe df
df.isnull().sum()
#%%
## Filling null values in col 'date_first_booking'
df['date_first_booking']=df['date_first_booking'].fillna(df['date_first_booking'].mode()[0])
#%%
## Checking for null values in dataframe df
df.isnull().sum()
#%%
## Converting feature 'timestamp_first_active' to 'datetime' datatype
df['timestamp_first_active']=df['timestamp_first_active'].astype(str)
#%%
from datetime import datetime
df['timestamp_first_active']=df['timestamp_first_active'].apply(lambda x:datetime.strptime(x,'%Y%m%d%H%M%S'))
#%%
## Converting timestamp_first_active into year,month and date:
df['timestamp_hour']=df['timestamp_first_active'].map(lambda x: x.hour)
df['timestamp_minute']=df['timestamp_first_active'].map(lambda x: x.minute)
df['timestamp_second']=df['timestamp_first_active'].map(lambda x: x.hour)
df['timestamp_year']=df['timestamp_first_active'].map(lambda x: x.year)
df['timestamp_month']=df['timestamp_first_active'].map(lambda x: x.month)
df['timestamp_day']=df['timestamp_first_active'].map(lambda x: x.day)
df['timestamp_weekday']=df['timestamp_first_active'].map(lambda x: x.weekday())
#%%
## Dropping column 'timestamp_first_active'
df=df.drop(['timestamp_first_active'],axis=1)
#%%
## Converting cols 'date_account_created' and 'date_first_booking' to 'datetime' datatype
df['date_account_created']=df['date_account_created'].astype(str)
df['date_first_booking']=df['date_first_booking'].astype(str)
#%%
df['date_account_created']=df['date_account_created'].apply(lambda x:datetime.strptime(x,'%Y-%m-%d'))
df['date_first_booking']=df['date_first_booking'].apply(lambda x:datetime.strptime(x,'%Y-%m-%d'))
#%%
## Feature 'time_to_first_booking' is a difference between 'date_account_created' and 'date_first_booking'
df['time_to_first_booking']=df['date_account_created']-df['date_first_booking']
#%%
## Univariate analysis of DataFrame df
df['country_destination'].value_counts(normalize=True).plot.bar(title='Destination_Country')
#%%
## Bi-variate analysis
# 1. Action Feature
Action=pd.crosstab(df['action'],df['country_destination'])
#%%
Action.plot(kind='bar',figsize=(10,10),stacked=True)
#%%
## Using a cut-off value to reduce the categories in feature 'action'
cut_off=1378
other_actions=[]
for action,count in df.action.value_counts().iteritems():
if count < cut_off:
other_actions.append(action)
df.loc[df.action.isin(other_actions),"action"]="other_action"
#%%
df.action.value_counts()
#%%
# 2. Action_Type Feature
action_type=pd.crosstab(df['action_type'],df['country_destination'])
action_type.plot(kind='bar',figsize=(10,10),stacked=True)
#%%
## Using a cut-off value to reduce the categories in feature 'action_type'
other_action_type=[]
for action_type,count in df.action_type.value_counts().iteritems():
if count < cut_off:
other_action_type.append(action_type)
df.loc[df.action_type.isin(other_action_type),"action_type"]="other"
#%%
df.action_type.value_counts()
#%%
# 3. Action_Detail Feature
action_detail=pd.crosstab(df['action_detail'],df['country_destination'])
action_detail.plot(kind='bar',figsize=(10,10),stacked=True)
#%%
## Using a cut-off value to reduce the categories in feature 'action_detail'
other_action_detail=[]
for action_detail,count in df.action_detail.value_counts().iteritems():
if count < cut_off:
other_action_detail.append(action_detail)
df.loc[df.action_detail.isin(other_action_detail),"action_detail"]="other"
#%%
df.action_detail.value_counts()
#%%
# 4. affiliate_channel Feature
affiliate_channel=pd.crosstab(df['affiliate_channel'],df['country_destination'])
affiliate_channel.plot(kind='bar',figsize=(10,10),stacked=True)
#%%
# 5. affiliate_provider Feature
affiliate_provider=pd.crosstab(df['affiliate_provider'],df['country_destination'])
affiliate_provider.plot(kind='bar',figsize=(10,10),stacked=True)
#%%
## Using cut-off value to reduce the categories in feature 'affiliate_provider'
other_affiliate_providers=[]
for affiliate_provider,count in df.affiliate_provider.value_counts().iteritems():
if count < cut_off:
other_affiliate_providers.append(affiliate_provider)
df.loc[df.affiliate_provider.isin(other_affiliate_providers),"affiliate_provider"]="other"
#%%
df.affiliate_provider.value_counts()
#%%
# 6.first_browser Feature
first_browser=pd.crosstab(df['first_browser'],df['country_destination'])
#%%
## Create a new feature for mobile browsers
mobile_browsers=['Mobile Safari','Chrome Mobile','Android Browser','Mobile Firefox','IE Mobile']
df.loc[df.first_browser.isin(mobile_browsers),"first_browser"]="Mobile"
#%%
## Categorizing other_browsers using cut_off
other_browsers=[]
for browser,count in df.first_browser.value_counts().iteritems():
if count < cut_off:
other_browsers.append(browser)
df.loc[df.first_browser.isin(other_browsers),"first_browser"]="Other"
#%%
df.first_browser.value_counts()
#%%
# 7. gender Feature
gender=pd.crosstab(df['gender'],df['country_destination'])
#%%
## 8. signup_app Feature
signup_app=pd.crosstab(df['signup_app'],df['country_destination'])
#%%
## 9. signup_method Feature
signup_method=pd.crosstab(df['signup_method'],df['country_destination'])
#%%
## 10.language Feature
language=pd.crosstab(df['language'],df['country_destination'])
#%%
## Categorizing language into English , Non-English
other_lang=[]
for language,count in df.language.value_counts().iteritems():
if count < 275:
other_lang.append(language)
df.loc[df.language.isin(other_lang),"language"]="other"
#%%
df['language']=df.language.map(lambda x: 0 if x == 'en' else 1)
#%%
df['language'].value_counts()
#%%
## signup_flow field page from which the user choose to signup 0----> Home Page
df['signup_flow_simple']=df['signup_flow'].map(lambda x: 0 if x == 0 else 1)
#%%
## Analysis of feature 'first_booking'
df['year_first_booking']=df.date_first_booking.dt.year
df['month_first_booking']=df.date_first_booking.dt.month
df['weekday_first_booking']=df.date_first_booking.dt.weekday
#%%
## Value count in first_booking(yearly,monthly and weekday)
df['year_first_booking'].value_counts()
#%%
df['month_first_booking'].value_counts()
#%%
df['weekday_first_booking'].value_counts()
#%%
## Analysis of feature 'Account_created'
df['year_acct_created']=df.date_account_created.dt.year
df['month_acct_created']=df.date_account_created.dt.month
df['weekday_acct_created']=df.date_account_created.dt.weekday
#%%
## Value count in 'date_account_created'(yearly,monthly and weekday)
df['year_acct_created'].value_counts()
#%%
df['month_acct_created'].value_counts()
#%%
df['weekday_acct_created'].value_counts()
#%%
## Drop features which do not add more information in model building
df=df.drop(['date_first_booking','date_account_created','timestamp_hour','timestamp_minute','timestamp_second','timestamp_year','timestamp_month','timestamp_day','timestamp_weekday','year_acct_created','month_acct_created','weekday_acct_created','time_to_first_booking','age','signup_flow'],axis=1)
#%%
## Seperate target feature 'country_destination' from other input features
label=pd.DataFrame(df['country_destination'])
#%%
## Drop target feature 'country_destination' from input variables
df=df.drop(['country_destination'],axis=1)
#%%
## Displaying datatypes of df
df.dtypes
#%%
## Splitting data in terms of categorical and continous features
cat_features=[]
cont_features=[]
for feature in df.columns:
if df[feature].dtype == float or df[feature].dtype == 'int64':
cont_features.append(feature)
elif df[feature].dtype == object:
cat_features.append(feature)
#%%
## Date feature needs to be in categorical list
date_lst=['year_first_booking','month_first_booking','weekday_first_booking']
for feature in date_lst:
if feature in cont_features:
cont_features.remove(feature)
cat_features.remove(feature)
#%%
## Create dummies for categorical features
for feature in cat_features:
dummies=pd.get_dummies(df[feature],prefix=feature,drop_first=False)
## Drop non-important features
df=df.drop(feature,axis=1)
df=pd.concat([df,dummies],axis=1)
print("{} is complete".format(feature))
#%%
#######################################################################
''' Model Building Phase
'''
#######################################################################
'''Using LDA ( Linear Discriminant Analysis) for reducing features.
Data fit is restricted to a limit , since it throws an out of memory error for more data.'''
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda=LinearDiscriminantAnalysis(n_components=3)
df_lda=lda.fit_transform(df[:650000],label[:650000])
#%%
## Applying K-Fold cross validation technique on data
from sklearn.model_selection import KFold
kf=KFold(n_splits=20,shuffle=True,random_state=42)
for train_index,val_index in kf.split(df_lda):
x_train,x_val=df_lda[train_index],df_lda[val_index]
y_train,y_val=label.iloc[train_index],label.iloc[val_index]
#%%
## Model Building using Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
lr=LogisticRegression(penalty='l2',C=0.01,multi_class='ovr',max_iter=300,solver='lbfgs',n_jobs=-1,random_state=42)
lr.fit(x_train,y_train)
y_lr_pred=lr.predict(x_val)
print("Accuracy score",metrics.accuracy_score(y_val,y_lr_pred))
#%%
# Model Building using Decision Trees
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
dtc=DecisionTreeClassifier(max_depth=10,min_samples_split=90,min_samples_leaf=20,random_state=42)
dtc.fit(x_train,y_train)
y_dtc_predict=dtc.predict(x_val)
acc_dtc_score=metrics.accuracy_score(y_val,y_dtc_predict)
acc_dtc_score
#%%
## Model Building using RandomForest
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
rfc=RandomForestClassifier(n_estimators=200,max_depth=10,min_samples_split=90,n_jobs=-1,max_features=0.13,random_state=20,verbose=10)
rfc.fit(x_train,y_train)
y_rfc_pred=rfc.predict(x_val)
acc_rfc_score=metrics.accuracy_score(y_val,y_rfc_pred)
acc_rfc_score
| UTF-8 | Python | false | false | 23,829 | py | 3 | Airbnb_new_user_booking.py | 1 | 0.716774 | 0.710017 | 0 | 599 | 38.75793 | 298 |
pombredanne/django-oscar-cybersource | 6,287,832,144,998 | 5e1e2c12ec8e2f912965d2b1cee65a40e692e29f | 6007252aac3a9163d601e9aa986d13555f05631b | /cybersource/actions.py | 2d6eeb4c86297bc8ed119736068d3030cbf57d90 | [
"ISC"
] | permissive | https://github.com/pombredanne/django-oscar-cybersource | 9fe9e84517068ea53f7626112d7595a45071b3da | c522088112f9437867aac779c06923ee40aa242e | refs/heads/master | 2021-01-18T05:41:13.422445 | 2016-05-29T01:09:55 | 2016-05-29T01:09:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
from . import settings, signature
import random
import time
import re
class SecureAcceptanceAction(object):
access_key = settings.ACCESS
currency = settings.DEFAULT_CURRENCY
date_format = settings.DATE_FORMAT
locale = settings.LOCALE
profile_id = settings.PROFILE
signed_field_names = set()
transaction_type = ''
unsigned_field_names = set()
def fields(self):
names = self.signed_field_names | self.unsigned_field_names
fields = { name: '' for name in names }
data, signed_fields = self.build_request_data()
fields.update(data)
signed_fields = signed_fields | set(['signed_date_time', 'signed_field_names', 'unsigned_field_names'])
unsigned_fields = set(fields.keys()) - signed_fields
fields['signed_date_time'] = datetime.utcnow().strftime(self.date_format)
fields['signed_field_names'] = ','.join(signed_fields)
fields['unsigned_field_names'] = ','.join(unsigned_fields)
signer = signature.SecureAcceptanceSigner()
fields['signature'] = signer.sign(fields, signed_fields)
return fields
def build_request_data(self):
data = {
'access_key': self.access_key,
'currency': self.currency,
'locale': self.locale,
'profile_id': self.profile_id,
'transaction_type': self.transaction_type,
'transaction_uuid': self.generate_uuid(),
}
data.update( self.build_signed_data() )
signed_fields = self.signed_field_names | set(data.keys())
data.update( self.build_unsigned_data() )
return data, signed_fields
def build_signed_data(self):
return {}
def build_unsigned_data(self):
return {}
def generate_uuid(self):
return '%s%s' % (int(time.time()), random.randrange(0, 100))
class CreateAndAuthorizePaymentToken(SecureAcceptanceAction):
signed_field_names = set([
'access_key',
'profile_id',
'transaction_uuid',
'signed_field_names',
'unsigned_field_names',
'signed_date_time',
'locale',
'transaction_type',
'reference_number',
'customer_ip_address',
'device_fingerprint_id',
'payment_method',
'ship_to_forename',
'ship_to_surname',
'ship_to_address_line1',
'ship_to_address_line2',
'ship_to_address_city',
'ship_to_address_state',
'ship_to_address_country',
'ship_to_address_postal_code',
'ship_to_phone',
'currency',
'amount',
'line_item_count',
])
unsigned_field_names = set([
'bill_to_forename',
'bill_to_surname',
'bill_to_address_line1',
'bill_to_address_line2',
'bill_to_address_city',
'bill_to_address_state',
'bill_to_address_postal_code',
'bill_to_address_country',
'bill_to_phone',
'bill_to_email',
'card_type',
'card_number',
'card_expiry_date',
'card_cvn',
])
transaction_type = 'authorization,create_payment_token'
url = settings.ENDPOINT_PAY
def __init__(self, order_number, order_total, basket, **kwargs):
self.order_number = order_number
self.order_total = order_total
self.basket = basket
self.shipping_address = kwargs.get('shipping_address')
self.billing_address = kwargs.get('billing_address')
self.customer_ip_address = kwargs.get('customer_ip_address')
self.device_fingerprint_id = kwargs.get('fingerprint_session_id')
self.extra_fields = kwargs.get('extra_fields')
def build_signed_data(self):
data = {}
# Basic order info
data['payment_method'] = 'card'
data['reference_number'] = self.order_number
data['currency'] = self.order_total.currency
data['amount'] = str(self.order_total.incl_tax)
# Add shipping and billing info
if self.shipping_address:
data['ship_to_forename'] = self.shipping_address.first_name
data['ship_to_surname'] = self.shipping_address.last_name
data['ship_to_address_line1'] = self.shipping_address.line1
data['ship_to_address_line2'] = self.shipping_address.line2
data['ship_to_address_city'] = self.shipping_address.line4
data['ship_to_address_state'] = self.shipping_address.state
data['ship_to_address_postal_code'] = self.shipping_address.postcode
data['ship_to_phone'] = re.sub('[^0-9]', '', self.shipping_address.phone_number.as_rfc3966)
data['ship_to_address_country'] = self.shipping_address.country.code
if self.billing_address:
data['bill_to_forename'] = self.billing_address.first_name
data['bill_to_surname'] = self.billing_address.last_name
data['bill_to_address_line1'] = self.billing_address.line1
data['bill_to_address_line2'] = self.billing_address.line2
data['bill_to_address_city'] = self.billing_address.line4
data['bill_to_address_state'] = self.billing_address.state
data['bill_to_address_postal_code'] = self.billing_address.postcode
data['bill_to_address_country'] = self.billing_address.country.code
# Add line item info
i = 0
for line in self.basket.all_lines():
data['item_%s_name' % i] = line.product.title
data['item_%s_sku' % i] = line.stockrecord.partner_sku
data['item_%s_quantity' % i] = str(line.quantity)
data['item_%s_unit_price' % i] = str(line.unit_price_incl_tax)
i += 1
data['line_item_count'] = str(i)
# Other misc fields
for field in ('customer_ip_address', 'device_fingerprint_id'):
value = getattr(self, field, None)
if value is not None:
data[field] = value
data.update(self.extra_fields)
return data
| UTF-8 | Python | false | false | 6,068 | py | 11 | actions.py | 10 | 0.59855 | 0.594265 | 0 | 168 | 35.119048 | 111 |
cacosandon/stream-interface | 15,805,479,679,389 | 10cce81707f58e67e3ed2d03c2d661c5f631201d | 19f36cd125d55b0a078124005dd61760ed90ef53 | /animation.py | da18801f111f6d1f14d4d5dc8f0a565a1761182b | [] | no_license | https://github.com/cacosandon/stream-interface | 094924b75268bac2f0a0be65ef89bbbcf2414019 | 8544d603e4be8fd02ea4244a6c87f6482c9f36d8 | refs/heads/master | 2022-11-24T07:24:17.792004 | 2020-07-11T20:45:50 | 2020-07-11T20:45:50 | 276,810,117 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from functions import superposicion
from functions import velocity_SourceSink, velocity_uniformDiag, velocity_uniformX, velocity_uniformY, velocity_Vortex
from functions import stream_SourceSink, stream_uniformDiag, stream_uniformX, stream_uniformY, stream_Vortex
from functions import vel_inf_A, vel_inf_SS, vel_inf_V, vel_inf_X, vel_inf_Y
import numpy as np
def graph(params, what, potencia, corriente, presion, puntos):
"""
[(U), (U), (U, alpha), (M, x, y), (M, x, y), (gamma, x, y)], [Boolean, Boolean, Boolean, Boolean, Boolean, Boolean]
"""
U_x = params[0][0]
U_y = params[1][0]
U_diag, alpha = params[2]
alpha = alpha * np.pi
M_sink, x_sink, y_sink = params[4]
M_sink = -M_sink
M_source, x_source, y_source = params[3]
gamma, xv, yv = params[5]
N = 60 # número de puntos en cada dirección
x_start, x_end = -20, 20 # límites en dirección x
y_start, y_end = -10, 10 # límite en dirección y
x = np.linspace(x_start, x_end, N) # crea arreglo de 1D con coord X
y = np.linspace(y_start, y_end, N) # crea arreglo de 1D con coord Y
X, Y = np.meshgrid(x,y) # genera una mesh grid (grilla de puntos)
# Vortex
u_vortex, v_vortex = velocity_Vortex(gamma, xv, yv)
psi_vortex = stream_Vortex(gamma, xv, yv)
inf_vortex = vel_inf_V(gamma, xv, yv)
# Sink
u_sink, v_sink = velocity_SourceSink(M_sink, x_sink, y_sink)
psi_sink = stream_SourceSink(M_sink, x_sink, y_sink)
inf_sink = vel_inf_SS(M_sink, x_sink, y_sink)
# Source
u_source, v_source = velocity_SourceSink(M_source, x_source, y_source)
psi_source = stream_SourceSink(M_source, x_source, y_source)
inf_source = vel_inf_SS(M_source, x_source, y_source)
# Uniform X
u_x, v_x = velocity_uniformX(U_x)
psi_uniform_X = stream_uniformX(U_x, X, Y)
inf_x = U_x
# Uniform Y
u_y, v_y = velocity_uniformY(U_y)
psi_uniform_Y = stream_uniformX(U_y, X, Y)
inf_y = U_y
# Uniform Alpha
u_diag, v_diag = velocity_uniformDiag(U_diag, alpha)
psi_diag = stream_uniformDiag(U_diag, alpha, X, Y)
inf_a = U_diag
inf_list = [inf_x, inf_y, inf_a, inf_source, inf_sink, inf_vortex]
u_list = [u_x, u_y, u_diag, u_source, u_sink, u_vortex]
v_list = [v_x, v_y, v_diag, v_source, v_sink, v_vortex]
psi_list = [psi_uniform_X, psi_uniform_Y, psi_diag, psi_source, psi_sink, psi_vortex]
def get_boolean(index):
return what[index]
new_ulist = []
new_vlist = []
new_psilist = []
new_inf = 0
for i in range(len(u_list)):
if get_boolean(i):
new_ulist.append(u_list[i])
new_vlist.append(v_list[i])
new_psilist.append(psi_list[i])
new_inf += inf_list[i]
return superposicion(new_ulist, new_vlist, new_psilist, new_inf, potencia, corriente, presion, puntos)
| UTF-8 | Python | false | false | 2,877 | py | 7 | animation.py | 4 | 0.619296 | 0.611982 | 0 | 84 | 33.119048 | 123 |
fukmar/ds4a_team8 | 7,035,156,458,622 | 7e751536bc562ff84c9cf0bd2cc6459596e8dd54 | 5b99ea7e39dd4d507585e4a229c3e6712bdadce1 | /Data Ingestion V2/Raw/Amplitude/get_data_amplitude.py | 8f6afc411d9630b0e8fbf4c73370e85e3181cd11 | [] | no_license | https://github.com/fukmar/ds4a_team8 | c162b2a24cdba5e551b112e5dccf9ba8e0570c17 | 17fb99b61e9e857f65b326a53cfbb98622a0edc5 | refs/heads/main | 2023-06-23T16:45:58.050112 | 2021-07-22T20:21:03 | 2021-07-22T20:21:03 | 361,181,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import pyspark.sql.functions as func
from awsglue.dynamicframe import DynamicFrame
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.sql.types import *
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from pyspark.sql import SparkSession
from awsglue.job import Job
import pyspark.sql.functions as F
import json
import boto3
import ast
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import gc
from pyspark.conf import SparkConf
import pandas as pd
import os
from io import BytesIO
import awswrangler as wr
os.environ['PYSPARK_SUBMIT_ARGS'] = "--packages=com.amazonaws:aws-java-sdk-bundle:1.11.271,org.apache.hadoop:hadoop-aws:3.1.2 pyspark-shell"
print('Lectura de parámetros')
# ----------------------------------------------------------------------------------
print('NOW:', datetime.now())
args = getResolvedOptions(sys.argv,
['bucket_amplitude_data',
'today',
'kms_key_arn',
'recommendations_bucket'])
bucket_amplitude_data = args['bucket_amplitude_data']
recommendations_bucket = args['recommendations_bucket']
kms_key_id = args['kms_key_arn']
today = args['today']
#--------------------------------------------------------------------------------------------------------------
#https://stackoverflow.com/questions/52932459/accessing-s3-bucket-from-local-pyspark-using-assume-role
print('Crear objetos S3-ssm')
# ----------------------------------------------------------------------------------
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
ssm = boto3.client('ssm')
#--------------------------------------------------------------------------------------------------------------
print('Parámetros:')
path_key_amplitude = 'ar/amplitude/tb_ar_amplitude_events_stage/'
## FECHAS INTERVALO
#print('1. CALCULO DE FECHAS')
##Today llevado al primero del mes menos 1 día
#today = datetime.strptime(today, '%Y-%m-%d').date().replace(day=1)
#last_day=(today-pd.offsets.DateOffset(days=1)).date()
##
#first_day=(last_day-pd.offsets.DateOffset(days=365)).date()
#
#print('2. Intevalo de fechas analizada: ',first_day,'y',last_day)
def first_and_last(today):
fecha=datetime.strptime(today, '%Y-%m-%d').date()
first_day=fecha.replace(day=1)
next_month = fecha.replace(day=28) + timedelta(days=4)
last_day_of_month = next_month - timedelta(days=next_month.day)
return first_day,last_day_of_month
print('Declaración de funciones')
def list_objects_function(buckets_, first_day, last_day, keys_, retrieve_last=False):
sts = boto3.client('sts')
response = sts.assume_role(
RoleArn='arn:aws:iam::514405401387:role/aws-rol-ml-read-stage-prod', #es el rol que existe en produccion por el cual "nos hacemos pasar" para acceder a los buckets de s3
RoleSessionName='sesion-dsr-recomendaciones', # nombre que le damos a la sesión
DurationSeconds=3600 # es el tiempo que dura la sesion por default si no especificamos este parámetro.
)
s3 = boto3.client(
's3',
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken']
)
paginator = s3.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket_amplitude_data, Prefix=path_key_amplitude)
files_in_bucket=[]
for page in pages:
files_page=[key['Key'] for key in page['Contents']]
files_in_bucket+=files_page
files_objets = [f"s3a://{bucket_amplitude_data}/" + i for i in files_in_bucket if
(keys_ in i) and (i.find('.parquet') >= 0)]
df_bucket_files = pd.DataFrame({
'key': [i[:(i.find('dt=') + 14)] for i in files_objets],
'path': files_objets,
'date': pd.to_datetime([i[(i.find('dt=') + 3):(i.find('dt=') + 13)] for i in files_objets])
})
files=list(df_bucket_files.loc[df_bucket_files['date'].between(str(first_day),str(last_day)),'path'].values)
return files
map_events = {
"cuoti_selecciona_elegircuotas" : "cuotificaciones",
"cuoti_sigue_seleccion_consumos" : "cuotificaciones",
"prestamos_selecciona_simular_prestamo": "prestamos",
"prestamos_espera": "prestamos",
"general_ingresa_promociones": "promociones",
"recargas_click_empezar": "recargas",
"recargas_click_repetir": "recargas",
"transferencia_selecciona_tieneuala": "transferencia_c2c",
"transferencia_selecciona_notieneuala": "transferencia_cvu",
"general_ingresa_cobros": "cobros",
"cobros_acepta_tyc" : "cobros",
"cobros_elige_link": "cobros",
"cobros_elige_mpos": "cobros",
"pagos_empezar": "pago_servicios",
"click_inversiones":"inversiones"
}
eventos_recommendations = list(map_events.keys())
#-----------------------------------------------------------------------------------------------------------------
first_day,last_day = first_and_last(today)
print('Primer dia',first_day)
print('Ultimo dia',last_day)
files_objets_amplitude = list_objects_function(bucket_amplitude_data, first_day, last_day ,path_key_amplitude)
print(f'Hay {len(files_objets_amplitude)} archivos de survival en la carpeta')
#df_amplitude = spark.read.parquet(*files_objets_amplitude).select(['user_id',"os_name","event_type","event_time"])
sts = boto3.client('sts')
response = sts.assume_role(
RoleArn='arn:aws:iam::514405401387:role/aws-rol-ml-read-stage-prod', #es el rol que existe en produccion por el cual "nos hacemos pasar" para acceder a los buckets de s3
RoleSessionName='sesion-dsr-spark', # nombre que le damos a la sesión
DurationSeconds=3600 # es el tiempo que dura la sesion por default si no especificamos este parámetro.
)
print('Spark Configuración')
spark_conf = SparkConf().setAll([
("spark.hadoop.fs.s3.enableServerSideEncryption", "true"),
("spark.hadoop.fs.s3.serverSideEncryption.kms.keyId", kms_key_id)
])
sc = SparkContext(conf=spark_conf)
glueContext = GlueContext(sc)
spark = glueContext.spark_session
logger = glueContext.get_logger()
spark._jsc.hadoopConfiguration().set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider")
spark._jsc.hadoopConfiguration().set("fs.s3a.access.key", response["Credentials"]["AccessKeyId"])
spark._jsc.hadoopConfiguration().set("fs.s3a.secret.key", response["Credentials"]["SecretAccessKey"])
spark._jsc.hadoopConfiguration().set("fs.s3a.session.token", response["Credentials"]["SessionToken"])
print(f"Hadoop version = {sc._jvm.org.apache.hadoop.util.VersionInfo.getVersion()}")
df_amplitude = spark.read.parquet(*files_objets_amplitude).select(['user_id',"os_name","event_type","event_time"])
df_amplitude=df_amplitude.filter(df_amplitude.event_type.isin(eventos_recommendations))
df_amplitude = df_amplitude.withColumn('year_month', F.date_format(df_amplitude.event_time,'YYYY-MM'))
df_amplitude = df_amplitude.drop("event_time")
df_amplitude = df_amplitude.na.replace(map_events,1,"event_type")
df_amplitude = (df_amplitude
.groupBy(['user_id', 'event_type', 'year_month'])
.agg(F.count('event_type').alias('cant'),
F.max('os_name').alias('os_name'))
.groupBy(['user_id','year_month','os_name'])
.pivot("event_type")
.agg(F.sum('cant'))
.na.fill(0)
)
print(spark.sparkContext.getConf().getAll())
#### NUEVA INSTANCIA boto3 para usar buckets en stage #####
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
ssm = boto3.client('ssm')
df_pandas=df_amplitude.toPandas()
df_pandas['dt'] = first_day
wr.s3.to_parquet(df_pandas,
path='s3://{}/data/raw/amplitude/'.format(recommendations_bucket),
dataset=True,
partition_cols=['dt'],
mode="append",
concurrent_partitioning=True,
index=False)
print('Ubicación files', f's3://{recommendations_bucket}/data/raw/amplitude/dt={str(first_day)}')
#DELETE $FOLDER$
def retrieve_files(path, file_type, list_dates):
bucket=path.split('/')[2]
prefix='/'.join(path.split('/')[3:])
list_objects=list(s3.Bucket(bucket).objects.all())
list_objects=[f's3://{bucket}/{i.key}' for i in list_objects if ((i.key.find(prefix)>=0) & any(x in i.key.lower() for x in list_dates) & (i.key.find(file_type)>=0))]
return list_objects
delete_files = retrieve_files(path=f's3://{recommendations_bucket}/data/', file_type='$folder$', list_dates=['$folder$'])
print('Files to delete', delete_files)
files_keys=[]
for i in range(0,len(delete_files)):
files_keys=files_keys+[{'Key':('/').join(delete_files[i].split('/')[3:])}]
if len(files_keys)>0:
s3_client.delete_objects(Bucket=recommendations_bucket,
Delete={'Objects':files_keys})
del delete_files
gc.collect()
print(df_amplitude.show())
print((df_amplitude.count(), len(df_amplitude.columns)))
| UTF-8 | Python | false | false | 9,183 | py | 18 | get_data_amplitude.py | 5 | 0.641339 | 0.628366 | 0 | 231 | 38.701299 | 177 |
CidadeIluminada/TCMCidadeIluminada | 9,096,740,778,313 | 68589d573519788da5223beba0ed448a8d522e40 | 531cedff011ab1b2e7455e96434226a33d61b3b1 | /cidadeiluminada/protocolos/__init__.py | cfa92a45ae937c6b29f8b50b0366a819487708e3 | [] | no_license | https://github.com/CidadeIluminada/TCMCidadeIluminada | e2bd31be7084540be6a5f6cc95658d4123597f0a | 8befe1f373608f24451352d85f2edf061baaba0d | refs/heads/master | 2020-04-12T16:06:11.320492 | 2015-12-13T13:36:11 | 2015-12-13T13:36:11 | 30,217,823 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: UTF-8
from __future__ import absolute_import
from cidadeiluminada.protocolos import models, rotas # NOQA
def init_app(app):
rotas.init_app(app)
| UTF-8 | Python | false | false | 162 | py | 21 | __init__.py | 14 | 0.728395 | 0.722222 | 0 | 8 | 19.25 | 60 |
baobabsoluciones/corn | 6,158,983,143,933 | 55481f5dde272c6c9417e357aaa363d36535a9d2 | b78d623cd698508dc7c589af76aca38b537932a1 | /cornflow-server/cornflow/cli/tools/endpoint_tools.py | d39c52dd03fc6761bdfefd86ca29e6115add3467 | [
"Apache-2.0"
] | permissive | https://github.com/baobabsoluciones/corn | 35ff257abedf028e2d7b94363283097d605a2bdc | f37fee176fb5c3e7276a728aa8661eedfb49fba7 | refs/heads/master | 2023-07-08T18:24:41.841203 | 2023-05-05T08:03:30 | 2023-05-05T08:03:30 | 262,996,192 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Endpoints
SP8 = 8 * " "
SP12 = 12 * " "
class EndpointGenerator:
def __init__(self, table_name, app_name, model_name, schemas_names):
self.table_name = table_name
self.app_name = app_name
self.model_name = model_name
self.schemas_names = schemas_names
self.descriptions = {
"base": "Endpoint used to manage the table",
"bulk": "Endpoint used to perform bulk operations on the table",
"detail": "Endpoint used to perform detail operations on the table"
}
def generate_endpoints_imports(self, roles):
"""
Generate the import text for an endpoint.
:param roles: list of roles to import
:return: import text
"""
return (
"# Imports from libraries\n"
"from flask_apispec import doc, marshal_with, use_kwargs\n\n"
"# Import from internal modules\n"
"from cornflow.endpoints.meta_resource import BaseMetaResource\n\n"
f"from ..models import {self.model_name}\n"
f"from ..schemas import {', '.join(self.schemas_names.values())}\n\n"
"from cornflow.shared.authentication import authenticate, Auth\n"
f"from cornflow.shared.const import {', '.join(roles)}\n"
)
def get_type_methods(self, methods, ep_type):
"""
Select the methods of the table to use in the type of endpoint.
:param methods: list of methods used for this table
:param ep_type: type of endpoint (base, bulk or detail)
:return:
"""
name_types = dict(base="list", bulk ="bulk", detail ="detail")
return [v[0] for v in [m.split("_") for m in methods] if v[1] == name_types[ep_type]]
def generate_endpoint_description(self, methods, ep_type="base"):
"""
Generate the description of an endpoint.
:param methods: list of available methods.
:param ep_type: type of endpoint (base, bulk or detail)
:return: the description text
"""
type_methods = self.get_type_methods(methods, ep_type)
description = self.descriptions[ep_type]
app_name = f' of app {self.app_name}' if self.app_name is not None else ""
res = ' """\n'
res += f" {description} {self.table_name}{app_name}.\n\n"
res += f" Available methods: [{', '.join(type_methods)}]\n"
res += ' """\n'
return res
def generate_endpoint_init(self):
res = " def __init__(self):\n"
res += SP8 + "super().__init__()\n"
res += SP8 + f"self.data_model = {self.model_name}\n"
res += SP8 + f"self.unique = ['id']\n"
return res
def generate_endpoint_get_all(self):
schema_name = self.schemas_names["one"]
res = " @doc(\n"
res += SP8 + 'description="Get list of all the elements in the table",\n'
res += SP8 + f'tags=["{self.app_name}"],\n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f" @marshal_with({schema_name}(many=True))\n"
res += " def get(self, **kwargs):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to get all the rows of the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += (
SP8
+ ":return: A list of objects with the data, and an integer with the HTTP status code.\n"
)
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.get_list(**kwargs)\n"
return res
def generate_endpoint_get_one(self):
schema_name = self.schemas_names["one"]
res = " @doc(\n"
res += SP8 + 'description="Get one element of the table",\n'
res += SP8 + f'tags=["{self.app_name}"],\n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f" @marshal_with({schema_name})\n"
res += " def get(self, idx):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to get a row of the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":param idx: ID of the row\n"
res += (
SP8
+ ":return: A dictionary with the response data and an integer with the HTTP status code.\n"
)
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.get_detail(idx=idx)\n"
return res
def generate_endpoint_post(self):
schema_marshal = self.schemas_names["one"]
schema_kwargs = self.schemas_names["postRequest"]
res = " @doc(\n"
res += SP8 + 'description="Add a new row to the table",\n'
res += SP8 + f'tags=["{self.app_name}"],\n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f" @marshal_with({schema_marshal})\n"
res += f' @use_kwargs({schema_kwargs}, location="json")\n'
res += " def post(self, **kwargs):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to add a row to the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":return: An object with the data for the created row,\n"
res += SP8 + "and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.post_list(data=kwargs)\n"
return res
def generate_endpoint_delete_one(self):
res = " @doc(\n"
res += SP8 + 'description="Delete one row of the table",\n'
res += SP8 + f'tags=["{self.app_name}"], \n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += " def delete(self, idx):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to delete a row of the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":param idx: ID of the row\n"
res += (
SP8
+ ":return: A dictionary with a message (error if authentication failed, "
+ "or the execution does not exist or\n"
)
res += SP8 + "a message) and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.delete_detail(idx=idx)\n"
return res
def generate_endpoint_put(self):
schema_name = self.schemas_names["editRequest"]
res = " @doc(\n"
res += SP8 + 'description="Edit one row of the table",\n'
res += SP8 + f'tags=["{self.app_name}"], \n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f' @use_kwargs({schema_name}, location="json")\n'
res += " def put(self, idx, **data):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to edit a row of the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":param idx: ID of the row\n"
res += (
SP8
+ ":return: A dictionary with a message (error if authentication failed, "
+ "or the execution does not exist or\n"
)
res += SP8 + "a message) and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.put_detail(data=data, idx=idx)\n"
return res
def generate_endpoint_patch(self):
schema_name = self.schemas_names["editRequest"]
res = " @doc(\n"
res += SP8 + 'description="Patch one row of the table",\n'
res += SP8 + f'tags=["{self.app_name}"], \n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f' @use_kwargs({schema_name}, location="json")\n'
res += " def patch(self, idx, **data):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to patch a row of the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":param idx: ID of the row\n"
res += (
SP8
+ ":return: A dictionary with a message (error if authentication failed, "
+ "or the execution does not exist or\n"
)
res += SP8 + "a message) and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.patch_detail(data=data, idx=idx)\n"
return res
def generate_endpoint_post_bulk(self):
schema_marshal = self.schemas_names["one"]
schema_kwargs = self.schemas_names["postBulkRequest"]
res = " @doc(\n"
res += SP8 + 'description="Add several new rows to the table",\n'
res += SP8 + f'tags=["{self.app_name}"],\n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f" @marshal_with({schema_marshal}(many=True))\n"
res += f' @use_kwargs({schema_kwargs}, location="json")\n'
res += " def post(self, **kwargs):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to add several new rows to the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":return: An object with the data for the created row,\n"
res += SP8 + "and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.post_bulk(data=kwargs)\n"
return res
def generate_endpoint_put_bulk(self):
schema_marshal = self.schemas_names["one"]
schema_kwargs = self.schemas_names["putBulkRequest"]
res = " @doc(\n"
res += SP8 + 'description="Updates several rows of the table or adds them if they do not exist",\n'
res += SP8 + f'tags=["{self.app_name}"],\n'
res += " )\n"
res += " @authenticate(auth_class=Auth())\n"
res += f" @marshal_with({schema_marshal}(many=True))\n"
res += f' @use_kwargs({schema_kwargs}, location="json")\n'
res += " def put(self, **kwargs):\n"
res += SP8 + '"""\n'
res += SP8 + "API method to add several new rows to the table.\n"
res += (
SP8
+ "It requires authentication to be passed in the form of a token that has to be linked to\n"
)
res += SP8 + "an existing session (login) made by a user.\n\n"
res += SP8 + ":return: An object with the data for the created row,\n"
res += SP8 + "and an integer with the HTTP status code.\n"
res += SP8 + ":rtype: Tuple(dict, integer)\n"
res += SP8 + '"""\n'
res += SP8 + "return self.post_bulk_update(data=kwargs)\n"
return res
def generate_endpoint(self, method):
ep_map = dict(get_list=self.generate_endpoint_get_all, post_list=self.generate_endpoint_post, get_detail=self.generate_endpoint_get_one,
put_detail=self.generate_endpoint_put,
patch_detail=self.generate_endpoint_patch,
delete_detail= self.generate_endpoint_delete_one,
post_bulk=self.generate_endpoint_post_bulk,
put_bulk=self.generate_endpoint_put_bulk
)
return ep_map[method]()
| UTF-8 | Python | false | false | 12,640 | py | 416 | endpoint_tools.py | 280 | 0.53307 | 0.525079 | 0 | 288 | 42.888889 | 144 |
frankyrumple/smc | 19,026,705,143,754 | e2f3c2525f1a1f1cdffae89e1743a2fc34b89fe3 | 72a7fad3f95a29b38a5255559b8c7e7948e17cfc | /controllers/test.py | d4a217ab023986abde745cafb00ac09feb1b6e0a | [
"MIT"
] | permissive | https://github.com/frankyrumple/smc | ffe73f04792a338b3d2c8c8aeeabf3cf9b453491 | 975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6 | refs/heads/master | 2021-01-11T03:08:05.594878 | 2018-02-08T07:38:57 | 2018-02-08T07:38:57 | 71,087,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# try something like
#from ednet.ad import AD
#from ednet import Util, SequentialGUID, AppSettings, W2Py, Student
#from ednet import *
from ednet.ad import AD
from ednet.faculty import Faculty
from ednet.util import Util
from ednet.canvas import Canvas
from pytube import YouTube
import os
import ldap
import sys
def test():
#db_canvas = current.db_canvas
#sql = "select * from users"
#rows = db_canvas.executesql(sql)
#test = Canvas.EnsureAdminAccessToken()
#student_test = Canvas.EnsureStudentAccessToken("s777777")
initial_run = cache.ram("startup", lambda:True, time_expire=3600)
cache_time = cache.ram("tmptime", lambda:time.ctime(), time_expire=30)
return locals()
def index():
yt = YouTube()
# Set the video URL.
ret = ""
tmpurl = "http://youtu.be/BthR2vlqrLo!!1"
tmpurl = tmpurl.replace("/embed/", "/watch?v=")
yt.url = tmpurl
ret += str(yt.videos)
yt.filename = "tempvid"
# Get the highest res mp4
ret += str(type(yt.filter('mp4')))
f = yt.filter('mp4')[-1]
try:
f.download()
except Exception, e:
ret += str(e)
#test = {}
#ret = isinstance(test, dict)
#AD.Connect()
#cn="cn=ropulsipher,OU=CSE,OU=Faculty,DC=cbcc,DC=pencollege,DC=net"
#cn_name = AD.GetNameFromLDAPPath(cn)
#ret = ""
#p1 = AD.GetParentLDAPPath(cn, 1)
#p2 = AD.GetParentLDAPPath(cn, 2)
#r = AD._ldap.search_s(p2, ldap.SCOPE_SUBTREE, "(name=" + str(p2) + ")" , ['distinguishedName'])
#AD._errors.append("Found Object : " + str(r))
#cn = "OU=CSE," + cn
#ret = AD.MakePathCN(cn)
#ret = AD.CreateUser('walshb', cn)
#errors = AD._errors
#AD.Close()
#path = sys.path
#a = Util.ParseName("bob smith")
#b = Student.GetQuota("777777")
#c = Student.QueueActiveDirectoryImports("SIMPLE SHEET")
#d = Student.ProcessADStudent()
#e = AD.GetCN("OU=Students,DC=cbcc,DC=pencollege,DC=net")
#f = AD.GetCN("CN=s777777,OU=Students,DC=cbcc,DC=pencollege,DC=net")
#createou = AD.CreateOU("OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#creategroup = AD.CreateGroup("CN=TestGroup,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#createdn = AD.GetDN("1st", "2nd")
#createuser = AD.CreateUser("s777777", "OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#addtogroup = AD.AddUserToGroup("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "CN=TestGroup,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#setpassword = AD.SetPassword("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "SID7777772")
#enableuser = AD.EnableUser("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#updateuser = AD.UpdateUserInfo("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net", "s777777@cored.com", "bob", "smith", "smith, bob", description="Student account", id_number="s777777", home_drive_letter="", home_directory="", login_script="", profile_path="", ts_allow_login='FALSE')
#disableuser = AD.DisableUser("CN=s777777,OU=StudentsTest,DC=cbcc,DC=pencollege,DC=net")
#setpass = AD.SetPassword("CN=s777780,OU=Students,DC=cbcc,DC=pencollege,DC=net", "123f")
#groupdn = AD.GetLDAPObject("CN=Students,OU=StudentGroups,DC=cbcc,DC=pencollege,DC=net")
#cn = AD.GetLDAPObject("OU=StudentGroups,DC=cbcc,DC=pencollege,DC=net")
#setpass = Faculty.SetPassword("walshb", "12345612ABC")
#ad_errors = AD._errors
return dict(vars=locals())
| UTF-8 | Python | false | false | 3,559 | py | 22 | test.py | 15 | 0.652149 | 0.62068 | 0 | 108 | 31.953704 | 297 |
Spittie/cavestory | 10,264,971,871,767 | 36de240687721ee2aab64b7db7dd769853aedb24 | 1533aa3e2928a4a4b5c9b3ea05de75e6f86878df | /src/game.py | d2cc1ab639ba5c44ac0770b33a6efab5b296895e | [
"MIT"
] | permissive | https://github.com/Spittie/cavestory | f39c5ff10f84e9c4fd03313b43f24685ea012bec | cde0bbeace37a32cf8e41d53aeee16935bae0d55 | refs/heads/master | 2021-01-22T06:45:27.048921 | 2014-02-28T15:47:33 | 2014-02-28T15:47:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
from graphics import Graphics
from sprite import Sprite
class Game(object):
def __init__(self, height=640, width=480, fps=60):
self.height = height
self.width = width
self.fps = fps
pygame.init()
self.graphics = Graphics(self.height, self.width)
self.clock = pygame.time.Clock()
self.quote = Sprite("../con/MyChar.bmp", 0, 0, 32, 32)
def __del__(self):
pygame.quit()
def eventloop(self):
running = True
while running:
# Handle input
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Handle everything else
self.update()
self.draw()
# 60fps
print (self.clock.get_fps())
self.clock.tick(self.fps)
def update(self):
pass
def draw(self):
self.quote.draw(self.graphics, 55, 55)
pygame.display.flip() | UTF-8 | Python | false | false | 1,008 | py | 4 | game.py | 3 | 0.53373 | 0.513889 | 0 | 45 | 21.422222 | 62 |
arnort20/ChuckNorris | 6,425,271,107,338 | 5c8ccdfb01d973181d33f1fdf9370ac36ac62a29 | 75db7ba1a3019a8681a350a9b8d745341e1efd7a | /Logic_classes/contract_logic.py | cd4bf31f0f3afc3b653112c0f663b3942ee19053 | [] | no_license | https://github.com/arnort20/ChuckNorris | a3211456460f184e2be8a014b3e3cab3a3296931 | f5655cac9adfc96eb686bc05f830d959a71dfd40 | refs/heads/main | 2023-01-28T15:55:21.834478 | 2020-12-11T21:25:23 | 2020-12-11T21:25:23 | 317,488,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #from Data_classes.DataAPI import DataAPI
from Data_classes.DataAPI import DataAPI as dAPI
from datetime import datetime
class Contract_logic:
def __init__(self):
self.dAPI = dAPI()
now = datetime.now()
self.current_date = now.strftime("%Y.%m.%d")
def get_contract(self, contractID):
#get a contract object, containing all information regarding a contract
contract = self.dAPI.get_contract(contractID)
return contract
def change_contract(self, contractID, change_value):
#sends contractID and dict with value for change to the data layer
self.dAPI.change_Contract(contractID, change_value)
def delete_current_contract(self, contractID):
#for when a contract gets invalidated
self.dAPI.delete_contract(contractID)
def make_contract(self, employeeID,customerID,vehicleID,location,start_date,end_date):
#makes the contract, all contracts are unpaid by deafault
#all contract IDs are automatically generated to make sure there's no contracts that share the same ID
contractID = self.dAPI.contract_makeID()
paid = "no"
self.dAPI.add_contract(contractID, employeeID, customerID, vehicleID, location, start_date, end_date,paid,self.current_date)
def all_contracts(self):
#get all the contracts as objects
return self.dAPI.get_contracts()
def check_vehicle_reservations(self, vehicle_ID):
"""
#a function to see at what times the vehicle is reserved
#returns a list of tuples containing two strings
#returns None if there's no reservations for that car
#used by the logic wrapper in check_reservations
"""
all_contracts = self.all_contracts()
vehicle_contracts = []
for cont in all_contracts:
if vehicle_ID == cont.vehicle_id:
vehicle_contracts.append(cont)
if vehicle_contracts:
dates_reserved = []
for reservation in vehicle_contracts:
dates = (reservation.start_date, reservation.end_date)
dates_reserved.append(dates)
return dates_reserved
else:
return None | UTF-8 | Python | false | false | 2,230 | py | 33 | contract_logic.py | 23 | 0.656951 | 0.656951 | 0 | 55 | 39.563636 | 132 |
hope-seo/my-first-blog | 18,605,798,340,398 | 985f1c663ce6c350a8188555665d4152634befae | be80c2be175aa24930d05d5ee32cc7d4c268807f | /blog/cafe.py | 789c07c19d0c9516fb60f11220fc16107a78829c | [] | no_license | https://github.com/hope-seo/my-first-blog | 9e64236aeda388fce0b07b121374001b18489850 | 698ff8f65f4d193fdbd85080502f224ab2521781 | refs/heads/master | 2021-01-10T10:03:28.956886 | 2016-02-25T01:00:02 | 2016-02-25T01:00:02 | 52,331,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .models import Brand
from django.shortcuts import render
# from .forms import PostForm
def brand_list(request):
items = Brand.objects.all()
return render(request, 'blog/cafe_brandlist.html',{'items': items})
def menu_list(request):
items = Brand.objects.all()
return render(request, 'blog/cafe_menulist.html',{'items': items}) | UTF-8 | Python | false | false | 350 | py | 3 | cafe.py | 2 | 0.717143 | 0.717143 | 0 | 12 | 28.25 | 71 |
PabloJoel/mcts | 9,216,999,840,825 | 47956bd77d4a06bc345688beae605a2dc8c5bc7b | b7b8d077980eb8a4e91d4836fef316dcd925bc2f | /game_start.py | ae722816b9ef3c7f4f663f6a8a0fab16e295ee0b | [] | no_license | https://github.com/PabloJoel/mcts | ec605261ec26112a85cc0ef75e6b5886de68c410 | 4486c07399a09d22799f757f99700e4b8992c0b5 | refs/heads/main | 2023-08-22T17:12:31.592820 | 2021-10-14T15:29:29 | 2021-10-14T15:29:29 | 390,261,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
import sys
sys.path.append("..")
import games.CheckersGame as cg
from visual.MainMenu import menu
import visual.VisualModel as vm
class Game_Start():
def __init__(self, size):
self.size_screen = size
def start(self):
pygame.init()
pygame.font.init()
#Screen creation
screen = pygame.display.set_mode((self.size_screen,self.size_screen))
#Main loop
running = True
while running:
#Events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
#Menu to select type of players
res = menu(screen, self.size_screen)
if isinstance(res, str):
running = False
elif isinstance(res, tuple):
#Play the game
vm.play(screen, self.size_screen, cg.CheckersGame(), res[0], res[1], res[2], res[3])
pygame.quit()
quit()
gs = Game_Start(500)
gs.start() | UTF-8 | Python | false | false | 1,053 | py | 26 | game_start.py | 26 | 0.549858 | 0.54321 | 0 | 44 | 22.954545 | 100 |
bivin1999/code | 2,104,534,017,487 | fef10be2c629feeb2deba4d86e8039616bff3cc3 | 28ca5727a642a24e840463e7cb12e47f32d22578 | /code12.py | 9ebae5f5b1b3e8fe0e80b92f5f1a7fd460214323 | [] | no_license | https://github.com/bivin1999/code | ee54282b6615e021ccd7e4c70c8ca319f26e69b3 | 152118111ecc45655713587362ed34c35705bda2 | refs/heads/master | 2020-06-16T10:58:07.311117 | 2019-07-06T16:25:28 | 2019-07-06T16:25:28 | 195,547,991 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #divisible sum pairs
#link : https://www.hackerrank.com/challenges/divisible-sum-pairs/problem
import sys
def divisibleSumPairs(n, k, ar):
s=0
for i in range(n-1):
a=ar[i]
for x in ar[i+1:]:
if (a+x)%k==0:
s+=1
return s
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
ar = list(map(int, input().strip().split(' ')))
result = divisibleSumPairs(n, k, ar)
print(result)
| UTF-8 | Python | false | false | 459 | py | 20 | code12.py | 20 | 0.533769 | 0.522876 | 0 | 21 | 19.857143 | 73 |
vinay5656/Python | 2,576,980,395,449 | a21e4e667f63059327a6ad1afc8e7e3175320f78 | 762fef5249e213f6175d8446a337081a1e9c10b4 | /Guess My Number/guessnumber.py | 07b0eb09cab9ec901f147ae94a672b2ee9dfcb77 | [] | no_license | https://github.com/vinay5656/Python | 8f4e27ebb7950f1b032cbb210506e7d1c93db527 | f1f9540d80999f25eab89f4d05a8e2a5269f71ed | refs/heads/main | 2023-06-29T16:18:30.041054 | 2021-08-04T05:58:35 | 2021-08-04T05:58:35 | 389,193,241 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from art1 import logo1
from art2 import logo2
import random
print("Welcome to the Number Guess Game !")
# myNumber = random.randrange(1,101)
# print(myNumber)
again = True
areYouSuccesssed = False
def campareNumber(number_1,number_2) :
if number_1==number_2 :
areYouSuccesssed = True
return "Correct Guess"
else :
return "Too low" if number_1 > number_2 else "Too high"
def guessNumber(attempts) :
isItYourNumber = int(input("Guess a number"))
whatsAboutNumber=campareNumber(myNumber,isItYourNumber)
print(whatsAboutNumber)
if whatsAboutNumber != "Correct Guess" :
return attempts - 1
return 0
while again == True :
myNumber = random.randrange(1,101)
print(myNumber)
difficultyLevel = input("chosse a difficulty level : Hard or Easy : ")
if difficultyLevel == "Hard" :
print(logo2)
attempts = 5
remainingAttempts = guessNumber(attempts)
while not areYouSuccesssed and remainingAttempts>0 :
print(f"You have {remainingAttempts} attempts remaining to guess the number")
remainingAttempts=guessNumber(remainingAttempts)
elif difficultyLevel == "Easy" :
print(logo1)
attempts = 10
remainingAttempts = guessNumber(attempts)
while not areYouSuccesssed and remainingAttempts>0 :
print(f"You have {remainingAttempts} attempts remaining to guess the number")
remainingAttempts=guessNumber(remainingAttempts)
else :
print("Please choose a difficulty level")
if input("Do you want to play Again y/n") == "n":
again=False
| UTF-8 | Python | false | false | 1,702 | py | 21 | guessnumber.py | 20 | 0.648061 | 0.632197 | 0 | 48 | 33.458333 | 89 |
DilipBDabahde/PythonExample | 6,296,422,087,862 | dbcb19353ebdb922dadd86bd237c51e532013721 | 682319f56c17e949bab0d6e418838d33977dd760 | /Assignment_8/Thread_List_Even_Odd_Sum.py | 09dc9536a5072fcb82196e77d3eac821a7aae1ff | [] | no_license | https://github.com/DilipBDabahde/PythonExample | 8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b | 669762a8d9ee81ce79416d74a4b6af1e2fb63865 | refs/heads/master | 2020-08-23T01:05:44.788080 | 2020-07-25T21:59:52 | 2020-07-25T21:59:52 | 216,511,985 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
3.Design python application which creates two threads as evenlist and oddlist. Both the
threads accept list of integers as parameter. Evenlist thread add all even elements
from input list and display the addition. Oddlist thread add all odd elements from input
list and display the addition.
'''
import threading
def EvenList(listxx):
isum = 0;
for i in range(len(listxx)):
if listxx[i]%2==0:
isum += listxx[i];
print("Sum of all Even nums is: ",isum);
def OddList(listxx):
isum = 0;
for i in range(len(listxx)):
if listxx[i]%2 !=0:
isum += listxx[i];
print("Sum of all Odd nums is: ",isum);
def main():
size = int(input("Enter size of list: "));
listx=[];
for i in range(size):
listx.append(int(input("Enter num: ")));
print("Given list is: ",listx);
evenlist = threading.Thread(target = EvenList, args=(listx,));
oddlist = threading.Thread(target = OddList, args=(listx,));
#EvenList(listx);
#OddList(listx);
# using threads to call above defined methods indirectly with start()
evenlist.start();
oddlist.start();
if __name__ == "__main__":
main();
| UTF-8 | Python | false | false | 1,110 | py | 309 | Thread_List_Even_Odd_Sum.py | 308 | 0.673874 | 0.667568 | 0 | 52 | 20.346154 | 88 |
songcser/geopy | 16,320,875,751,581 | c4ff93aacd282f087ac409360aa323ddc4f98a9e | b28c1d4265668af5c5aed94da9a73fefc7b48018 | /geopy/geocoders/tencent.py | 0ebc1ec7b2f28cef3151b4d30594e5e016cf23eb | [
"MIT"
] | permissive | https://github.com/songcser/geopy | 916ba9656685d930de843f18aaeee827ac09978b | fa1cca11bbf0cbafc2bf2e3f653684815caa8ed1 | refs/heads/master | 2020-04-06T04:44:11.354023 | 2017-05-03T05:56:08 | 2017-05-03T05:56:08 | 82,897,988 | 0 | 0 | null | true | 2017-02-23T07:29:09 | 2017-02-23T07:29:09 | 2017-02-23T06:32:40 | 2017-01-29T14:17:28 | 1,445 | 0 | 0 | 0 | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
GeocoderAuthenticationFailure,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("Tencent", )
class Tencent(Geocoder):
"""
Geocoder using the Tencent Maps v1 API. Documentation at:
http://lbs.qq.com/webservice_v1/guide-geocoder.html
"""
def __init__(
self,
api_key,
scheme='http',
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None
):
"""
Initialize a customized Tencent geocoder using the v1 API.
.. versionadded:: 1.0.0
:param string api_key: The API key required by Tencent Map to perform
geocoding requests. API keys are managed through the Tencent APIs
console (http://lbs.qq.com/mykey.html).
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is http and only http support.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(Tencent, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.api_key = api_key
self.scheme = scheme
self.doc = {}
self.api = 'http://apis.map.qq.com/ws/geocoder/v1/'
@staticmethod
def _format_components_param(components):
"""
Format the components dict to something Baidu understands.
"""
return "|".join(
(":".join(item)
for item in components.items()
)
)
def geocode(
self,
query,
exactly_one=True,
timeout=None,
city=None,
):
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'output': 'json',
'address': self.format_string % query,
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one=exactly_one
)
def reverse(self, query, timeout=None): # pylint: disable=W0221
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'output': 'json',
'location': self._coerce_point_to_string(query),
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_reverse_json(
self._call_geocoder(url, timeout=timeout)
)
@staticmethod
def _parse_reverse_json(page):
"""
Parses a location from a single-result reverse API call.
"""
place = page.get('result')
location = place['formatted_address']['recommend'].encode('utf-8')
latitude = place['location']['lat']
longitude = place['location']['lng']
return Location(location, (latitude, longitude), place)
def _parse_json(self, page, exactly_one=True):
"""
Returns location, (latitude, longitude) from JSON feed.
"""
place = page.get('result', None)
if not place:
self._check_status(page.get('status'))
return None
def parse_place(place):
"""
Get the location, lat, lng from a single JSON place.
"""
location = place.get('title')
latitude = place['location']['lat']
longitude = place['location']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(place)
else:
return [parse_place(item) for item in place]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
if status == 0:
# When there are no results, just return.
return
if status == 110:
raise GeocoderQueryError(
u'请求来源未被授权.'
)
elif status == 306:
raise GeocoderQueryError(
u'请求有护持信息请检查字符串.'
)
elif status == 310:
raise GeocoderAuthenticationFailure(
u'请求参数信息有误.'
)
elif status == 311:
raise GeocoderQuotaExceeded(
u'key格式错误.'
)
else:
raise GeocoderQueryError('Unknown error: %s' % status)
| UTF-8 | Python | false | false | 6,071 | py | 2 | tencent.py | 2 | 0.557202 | 0.551707 | 0 | 194 | 29.943299 | 82 |
yellowssi/study | 11,836,929,911,681 | c93cf77dc7cd381eadd2cd7f4fed5f47ea1a159e | 6acabc1c11098282226846a14d6f974746879dac | /patternRecognition/pattern_recognition.py | 126f67fdf14f17ea5e7a451d6063482c08b69b62 | [] | no_license | https://github.com/yellowssi/study | 3ce8946f2ea72ef88c3a93795c5d92400bcd1200 | 7135d3484a52e72f71c8e7281afb911dec522cc8 | refs/heads/master | 2018-08-24T01:06:45.159786 | 2018-06-03T01:21:45 | 2018-06-03T01:21:45 | 124,374,876 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bayes import Bayes
from fisher import Fisher
from kNN import kNN
import matplotlib.pyplot as plt
trains3 = []
trainshw = []
trainshs = []
trainsws = []
def get_data(file, gender):
global trains3, trainshw, trainshs, trainsws
if gender == 'boy':
try:
data = open(file)
for line in data.readlines():
height, weight, shoe_size = line.split()
trains3.append([float(height), float(weight), float(shoe_size), 1])
trainshw.append([float(height), float(weight), 1])
trainshs.append([float(height), float(shoe_size), 1])
trainsws.append([float(weight), float(shoe_size), 1])
except IOError:
return
elif gender == 'girl':
try:
data = open(file)
for line in data.readlines():
height, weight, shoe_size = line.split()
trains3.append([float(height), float(weight), float(shoe_size), 0])
trainshw.append([float(height), float(weight), 0])
trainshs.append([float(height), float(shoe_size), 0])
trainsws.append([float(weight), float(shoe_size), 0])
except IOError:
return
def main():
get_data('data/boy82.txt', 'boy')
get_data('data/boy83.txt', 'boy')
get_data('data/boynew.txt', 'boy')
get_data('data/girl35.txt', 'girl')
get_data('data/girl42.txt', 'girl')
get_data('data/girlnew.txt', 'girl')
test3 = []
testhw = []
tesths = []
testws = []
boys = open('data/boy.txt')
girls = open('data/girl.txt')
print("1) Bayes")
print("2) Fisher")
print("3) kNN")
choice = input("Input the algorithm: ")
if choice == '1':
for line in boys.readlines():
height, weight, shoe_size = line.split()
test3.append([[float(height)], [float(weight)], [float(shoe_size)], 1])
testhw.append([[float(height)], [float(weight)], 1])
tesths.append([[float(height)], [float(shoe_size)], 1])
testws.append([[float(weight)], [float(shoe_size)], 1])
for line in girls.readlines():
height, weight, shoe_size = line.split()
test3.append([[float(height)], [float(weight)], [float(shoe_size)], 0])
testhw.append([[float(height)], [float(weight)], 0])
tesths.append([[float(height)], [float(shoe_size)], 0])
testws.append([[float(weight)], [float(shoe_size)], 0])
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.plot([0, 1.0], [0, 1.0], color='red')
b3 = Bayes(trains3)
b3.paint(test3, 'b')
bhw = Bayes(trainshw)
bhw.paint(testhw, 'g')
bhs = Bayes(trainshs)
bhs.paint(tesths, 'r')
bws = Bayes(trainsws)
bws.paint(testws, 'y')
elif choice == '2':
for line in boys.readlines():
height, weight, shoe_size = line.split()
test3.append([[float(height)], [float(weight)], [float(shoe_size)], 1])
testhw.append([[float(height)], [float(weight)], 1])
tesths.append([[float(height)], [float(shoe_size)], 1])
testws.append([[float(weight)], [float(shoe_size)], 1])
for line in girls.readlines():
height, weight, shoe_size = line.split()
test3.append([[float(height)], [float(weight)], [float(shoe_size)], 0])
testhw.append([[float(height)], [float(weight)], 0])
tesths.append([[float(height)], [float(shoe_size)], 0])
testws.append([[float(weight)], [float(shoe_size)], 0])
f3 = Fisher(trains3)
fhw = Fisher(trainshw)
fhs = Fisher(trainshs)
fws = Fisher(trainsws)
print("1) ROC")
print("2) Line")
choice = input("Choose: ")
if choice == '1':
f3.paint(test3, 'b')
fhw.paint(testhw, 'g')
fhs.paint(tesths, 'r')
fws.paint(testws, 'y')
elif choice == '2':
print("1) Height And Weight")
print("2) Height And Shoe Size")
print("3) Weight And Shoe Size")
choice = input("Choose: ")
if choice == '1':
fhw.paint_line(testhw, 'c')
elif choice == '2':
fhs.paint_line(tesths, 'r')
elif choice == '3':
fws.paint_line(testws, 'y')
elif choice == '3':
for line in boys.readlines():
height, weight, shoe_size = line.split()
test3.append([float(height), float(weight), float(shoe_size), 1])
testhw.append([float(height), float(weight), 1])
tesths.append([float(height), float(shoe_size), 1])
testws.append([float(weight), float(shoe_size), 1])
for line in girls.readlines():
height, weight, shoe_size = line.split()
test3.append([float(height), float(weight), float(shoe_size), 0])
testhw.append([float(height), float(weight), 0])
tesths.append([float(height), float(shoe_size), 0])
testws.append([float(weight), float(shoe_size), 0])
choice = input("Input K:")
if choice == '1':
k1 = kNN(trainshs, 1)
print(k1.test(tesths))
k1.paint()
elif choice == '3':
k3 = kNN(trainshs, 3)
print(k3.test(tesths))
k3.paint()
elif choice == '5':
k5 = kNN(trainshs, 5)
print(k5.test(tesths))
k5.paint()
plt.show()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 5,604 | py | 77 | pattern_recognition.py | 31 | 0.523911 | 0.50571 | 0 | 145 | 37.648276 | 83 |
webkom/lego | 13,640,816,165,109 | a2c987d15d26328c58d122cf5f4b90e3e1ed35e8 | 9be47cdc14abab79458d3fe4d1a1d39169f3c94f | /lego/apps/gallery/migrations/0007_gallery_public_metadata.py | 7f4d935d9a52256cf77c4d029776c74ad33555d4 | [
"MIT"
] | permissive | https://github.com/webkom/lego | ae370f0881a8dcaa21caaa9b0d0cc552a6799e12 | 2c1909fd84fe3b3e0a9d3792c4bcc51089ad5a87 | refs/heads/master | 2023-08-07T17:06:21.159973 | 2023-07-13T19:18:36 | 2023-07-13T19:18:36 | 24,420,075 | 53 | 33 | MIT | false | 2023-09-12T07:48:21 | 2014-09-24T15:13:29 | 2023-08-20T11:34:56 | 2023-09-12T07:48:21 | 11,229 | 53 | 19 | 163 | Python | false | false | # Generated by Django 2.0.3 on 2018-04-10 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("gallery", "0006_auto_20171210_1610")]
operations = [
migrations.AddField(
model_name="gallery",
name="public_metadata",
field=models.BooleanField(default=False),
)
]
| UTF-8 | Python | false | false | 380 | py | 726 | 0007_gallery_public_metadata.py | 587 | 0.623684 | 0.542105 | 0 | 15 | 24.333333 | 59 |
kyamashiro/atcoder | 7,602,092,145,561 | 8ee62aba7888d2d8cf9bfc04270f351ba649034d | 9bb16f8fbf9f562f1171a3bbff8318a47113823b | /abc244/abc244_d/main.py | 4f0d1876e22637309cfedc94d7dbb02a81665a7e | [] | no_license | https://github.com/kyamashiro/atcoder | 83ab0a880e014c167b6e9fe9457e6972901353fc | 999a7852b70b0a022a4d64ba40d4048ee4cc0c9c | refs/heads/master | 2022-06-01T03:01:39.143632 | 2022-05-22T05:38:42 | 2022-05-22T05:38:42 | 464,391,209 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# from typing import *
YES = 'Yes'
NO = 'No'
# def solve(n: int, a: List[int]) -> str:
def solve(N, S):
cnt = 0
for i in range(3):
if N[i] == S[i]:
cnt += 1
if cnt == 1:
return NO
return YES
# generated by oj-template v4.8.1 (https://github.com/online-judge-tools/template-generator)
def main():
N = list(map(str, input().split()))
S = list(map(str, input().split()))
a = solve(N, S)
print(a)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 522 | py | 147 | main.py | 115 | 0.522989 | 0.507663 | 0 | 29 | 17 | 92 |
AdamZhouSE/pythonHomework | 17,111,149,710,012 | 8d0299f1c7ef2fd2fb8123eede87961bd4fa244d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2936/60782/304890.py | 640b6e0bbfb944d799ced36196e57333a138da74 | [] | no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('310-1010 2') | UTF-8 | Python | false | false | 19 | py | 45,079 | 304890.py | 43,489 | 0.684211 | 0.263158 | 0 | 1 | 19 | 19 |
nagagopi19/Python_learning_curve | 12,189,117,218,594 | 4c4e54e1690184f46be1f9be8cf43f1d1c00921f | c80df62eee7b4b23827ca9e128326edbcc21dcee | /Used Defined Functions/First-Class Functions_2.py | 3c2930ef7673ac59a5270c4bb9d6bac3c54c858b | [] | no_license | https://github.com/nagagopi19/Python_learning_curve | ad39a312eca81f341231939673e5195c45d7bd9f | b0487d758f38e97cc0aaea3a81d13474c23a21b5 | refs/heads/master | 2023-06-01T13:46:15.646609 | 2021-06-09T09:54:44 | 2021-06-09T09:54:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Here, html_tag is a first-class function that can be assigned to a variable(line 12),
passed as an argument(line 12, 14, 16), return these functions from other functions
remember!, First-class function must return the enclosing function & mustn't execute it."""
def html_tag(tag):
def wrap_text(msg):
print("<{0}>{1}</{0}>".format(tag, msg))
return wrap_text #don't use parenthesis after function name
print_h1 = html_tag('h1') #assigning a variable to a firstclass function
'''The parameter passed to the variable is then passed to the enclosed function of the
firstclass function. Hence, the variable acts as the enclosed function'''
print_msg1 = print_h1('Hello') #using a Closure function
print_msg2 = print_h1('My name is Kiran')
print_msg3 = print_h1("I am learning Python") | UTF-8 | Python | false | false | 840 | py | 255 | First-Class Functions_2.py | 190 | 0.697619 | 0.675 | 0 | 19 | 43.263158 | 94 |
RonanLeanardo11/Python-Lectures | 11,209,864,660,839 | de424af19cabbeb32f7b7c0016def124bacb1ea8 | 0f23dfd24dfd09258e6b953e154c2e79af6f55cf | /Lectures, Labs & Exams/Lecture Notes/Lect 5 - Classes and Definitions/Lect 6 - Objects 2/Lect 6 Example 3 - Karen's Solution.py | bdd4ede9a3a7de2936ce2b5dfa5c4c416f0781bd | [] | no_license | https://github.com/RonanLeanardo11/Python-Lectures | 224c33d4c98636665b925d5ef7c06828f365e026 | 0af024e1f72b4ae7861c3320961510c5b8472b70 | refs/heads/master | 2020-05-06T20:02:57.427535 | 2019-04-08T20:23:29 | 2019-04-08T20:23:29 | 180,218,871 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Create a class called DebitCard:
# Class Variables
# balance
class DebitCard:
#balance = 0
__balance = 0
def __init__(self):
self.__balance = 10
# lodge(amount)
# One argument, amount. Adds amount to balance if amount if > 0
# Returns True if successful, False if unsuccessful
def lodge(self, amount):
if amount > 0:
self.__balance += amount
return True
else:
return False
# withdraw(amount)
# Returns True if the amount is less than the current balance and the amount is > 0. If this is True it
# removes the amount from the balance. If not True return False.
def withdraw(self, amount):
if amount > 0 and amount < self.__balance:
self.__balance -= amount
return True
else:
return False
# print_details()
# ◦ method that prints all DebitCards details
def print_details(self):
print("Balance :", self.__balance)
# Test the class, create an instance and pass in arguments
# Create a debit card
# Print details
# Withdraw 2 euro and print appropriate message if successful of not
# Print details
# Lodge 180 euro, and print appropriate message if successful or not
karenAcc = DebitCard()
karenAcc.print_details()
if karenAcc.withdraw(2):
print("Money withdrew successfully")
else:
print("Error, not enough funds")
karenAcc.print_details()
karenAcc.balance = 5000 # if want to reset the balance (Direct access to the variable - i have closed variable now though so this 5000 wont lodge)
if karenAcc.lodge(180):
print("Money Lodged successfully")
else:
print("error contact bank")
karenAcc.print_details()
| UTF-8 | Python | false | false | 1,805 | py | 46 | Lect 6 Example 3 - Karen's Solution.py | 43 | 0.637183 | 0.623662 | 0 | 61 | 28.04918 | 146 |
patchgreen99/advanced_vision_1 | 2,525,440,813,702 | c82041a2faf3d173baa27302a4c81fb8c410f475 | 13bac31eb687ccec72be21774b6ef5220d8c6f28 | /tochroma.py | e3474287a3f448ef2cc96694d3e5e14fc0442b38 | [] | no_license | https://github.com/patchgreen99/advanced_vision_1 | 91751c12f1407efe0a30d8d30bc9ebd550e5e0b3 | 8e874e30ea75048151e95f5efc45fa1a5286aeb0 | refs/heads/master | 2020-03-28T17:21:22.962945 | 2018-10-15T11:50:27 | 2018-10-15T11:50:27 | 148,781,030 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import the necessary packages
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage import io
import matplotlib.pyplot as plt
import argparse
import glob
import cv2,os
images = sorted(glob.glob("day2/*.jpg"))
done_images = sorted(glob.glob("chroma/*.jpg"))
def mynorm(image):
image = cv2.normalize(image,image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
return image
def toChroma(image):
for x in xrange(image.shape[0]):
for y in xrange(image.shape[1]):
try:
p = image[x,y]
sump = sum(p)
image[x,y] = [255*p[0]/sump,255*p[1]/sump,255*p[2]/sump]
except KeyboardInterrupt:
break
return image
for i,x in enumerate(images):
done_images = sorted(glob.glob("chroma/*.jpg"))
done_images = [os.path.basename(x)==os.path.basename(y) for y in done_images]
done = any(done_images)
print str(i) + " / " + str(len(images)) + " Done ( ", 100*i / len(images), "% )"
if done: continue
im1 = cv2.imread(x)
im2 = toChroma(im1)
cv2.imshow("NEW",im2)
xs = str(i)
while len(xs) < len(str(len(images))): xs = "0" + xs
filename = "chroma/"+ x[5:]
print "saved: ", filename
cv2.imwrite(filename, im2)
# cv2.waitKey(2)
| UTF-8 | Python | false | false | 1,250 | py | 9 | tochroma.py | 7 | 0.6792 | 0.6488 | 0 | 43 | 28.069767 | 98 |
chuckharmston/yorumipsum | 13,297,218,770,994 | 06f5d0d796e31a20a970392e1cbfbffc9ed48494 | 95b279e00f2cc026fbbc381f64a6c998c82ea270 | /yorumipsum/urls.py | f404b26432c6629c7a3f72ae21853fb88290310d | [] | no_license | https://github.com/chuckharmston/yorumipsum | 6316b26827dfee8c6312b02936e7e48a89b93a44 | 94c244397965a9faaff1d18ad90ae619eb0212b6 | refs/heads/master | 2016-09-05T16:56:25.016891 | 2012-05-27T16:09:22 | 2012-05-27T16:09:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^404/', TemplateView.as_view(template_name='404.html')),
url(r'^500/', TemplateView.as_view(template_name='500.html')),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
'show_indexes': True
}),
)
urlpatterns += staticfiles_urlpatterns()
| UTF-8 | Python | false | false | 745 | py | 21 | urls.py | 16 | 0.679195 | 0.663087 | 0 | 23 | 31.391304 | 70 |
MayerMax/pythonHome | 2,147,483,657,994 | 8561c648b36b25e8dc2171edcd56c29771b9a6ae | ef07224f1e089b310e4177c8d20d6c8ff4ad40ea | /untitled/introToPython/intro.py | d51f95173847788c3971b97123bf0753537c388b | [] | no_license | https://github.com/MayerMax/pythonHome | b135a0824fed288189c6350e278a38e15b4af09d | 03027c80b6c310634f75fc9c5811947a5927e578 | refs/heads/master | 2016-08-12T17:09:15.346000 | 2016-03-26T20:19:30 | 2016-03-26T20:19:30 | 53,084,584 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Максим'
# -*- coding: utf8 -*-
a = b'abc' # let convert string in a bytes sequences, after we can check what can do with it
if a.isdigit():
print(a[0])
else:
print('none')
def tuple_creation():
t = 1, 'asd', 'lol', False, (2,1j) # we've made a tuple - sequence of different objects contained in one
print(t) # print the whole sequence
print(t[0]) # call the exactly element you need
u = 1, t, False # put one tuple inside another and add something else
print(u[1][2])
y = False, # make tuple of one element or y = tuple() - create empty tuple
tuple_creation()
def list_creation():
a = ['1',2,3,"4"] # list creation
a[0] = 0 # as list is mutable we can change it from inside
print(a[0])
a[1] = [2,3,4] # put a list inside a list
print(a[1])
print("-----")
list_creation()
print("----")
def bytes_array_creation():
b = bytearray(b'ab') # creation of bytearray
b.append(97) # add a symbol "a" (as it's number is 97) to the 'ab'
print(b) # 'a,b,a'
b[1] = 97
print(b)
bytes_array_creation() | UTF-8 | Python | false | false | 1,101 | py | 12 | intro.py | 11 | 0.599087 | 0.573516 | 0 | 51 | 20.490196 | 108 |
pppppp040/text-classifier-py | 2,602,750,208,238 | fa2f7f3096b3419ff5bc4ec3b879af3329b4b2a3 | b125ba703fb014470c2619f0b3a171b2fae40b4b | /NaiveBayes/testMultiClassifier.py | 5b49e457c38bb41bc02ac013ee96ee7d80163abe | [] | no_license | https://github.com/pppppp040/text-classifier-py | 557e55aedb8bc3969350bc589b5c0eaf61fcec0c | 6186a6d3ead582e39513284c1fe77f7eba774df1 | refs/heads/master | 2021-10-11T05:01:24.136209 | 2019-01-22T11:32:26 | 2019-01-22T11:32:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! -*- coding:utf-8 -*-
# __author__ = "houlisha"
import random
import multiClassifier as nb
trainFile = "../data/mostClass.train.seg"
featureFile = "../data/mostClass.features"
modelFile = "../data/mostClass.model"
if __name__ == "__main__":
all_datas = [line.strip() for line in open(trainFile).readlines()]
random.shuffle(all_datas)
train_datas = all_datas[:3 * len(all_datas) / 5]
test_datas = all_datas[3 * len(all_datas) / 5:]
print "特征选择..."
feature_words = nb.featureSelecter(train_datas, "chi")
print "共选择特征数量:", len(feature_words)
nb.save_features(feature_words, featureFile)
feature_prob, category_prior, categorys = nb.train(train_datas, feature_words)
nb.save_model(feature_prob, category_prior, categorys, modelFile)
feature_prob, category_prior, categorys = nb.load_model(modelFile)
nb.predict(feature_prob, category_prior, categorys, test_datas) | UTF-8 | Python | false | false | 970 | py | 14 | testMultiClassifier.py | 11 | 0.658228 | 0.652954 | 0 | 30 | 29.666667 | 82 |
Lekomrat/Game-raw- | 14,946,486,203,083 | 17341cc76cd5efbbf6b515f72a92a7d64eb4f66f | 28eb4f35e557b4d589c914cbb253dff4458d3a23 | /Game.py | 46ada115c66151db171ebdc470911461038b2390 | [] | no_license | https://github.com/Lekomrat/Game-raw- | ae8b332b00462998d8c3b55bf7854737c7cdda91 | dbaa2e55aa0c50575f84ee31ce207d614c7f4d0f | refs/heads/master | 2020-06-18T19:07:57.291521 | 2019-07-11T14:41:46 | 2019-07-11T14:41:46 | 196,412,567 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import random
actions_words=['say','walk','info','fight','start']
fightings_words=['hit','item','run']
locations_words=['forest','village']
items=['health_potion','mana_potion']
info_actions=['say', 'walk','sleep','items','equip']
players_weapons=["arm","sword_iron","axe_iron","spear_iron"]
players_magics=["fire","lighting"]
forest=["Wolf","Big_wolf"]
damage={"arm":1,"sword_iron":8, "axe_iron":6, "spear_iron":10,"Wolf":3, "Big_wolf":6}
stamina={"arm":0,"sword_iron":2, "axe_iron":1, "spear_iron":3,"Wolf":2,"Big_wolf":4}
enemy_stamina={"Wolf":6,"Big_wolf":16}
armor={"leather":1,"iron":2,"dragon":4,"Wolf":0,"Big_wolf":1}
health={"Wolf":20,"Big_wolf":50,"health_potion":15}
magic={"Wolf":0,"Big_wolf":0,"fire":6,"lighting":10}
mana={"Wolf":0,"Big_wolf":0,"fire":1,"lighting":3,"mana_potion":12}
XP={"Wolf":5,"Big_wolf":10}
player_location='village'
equiped_weapon='sword_iron'
equiped_magic='fire'
equiped_armor='leather'
Health_heal=40
Mana_heal=30
Stamina_heal=30
def Get_input():
if len(inp) <=0:
print('Try to say ":info info".')
elif len(inp) > 2:
print('So many words. Try command "info {}".'.format(inp[0]))
elif len(inp)==1:
print('Need second word.Try command "info {}".'.format(inp[0]))
class Game:
def __init__(self,all_words,main_word):
self.all_words=all_words
self.main_word=main_word
def Say(self):
print('You said:{}'.format(sub_word))
def Walk(self):
print('Now going to {}'.format(sub_word))
print('You are in the {}.'.format(sub_word))
if sub_word=='forest':
print('Enter ":start fighting" for battle')
def Info(self):
if sub_word == 'walk':
print('village, forest - available words for "walk".')
elif sub_word == 'info':
print('walk, fighting - available words for "info".')
elif sub_word == 'fighting':
print('Useable command: change (arm, sword_iron, axe_iron or srear_iron), attack, magic, item (health_potion or mana_potion), run away. ')
else:
print('Unknown action word')
#Player stats
class Player:
def __init__(self,player_health,player_weapon,player_mana,player_stamina,player_armor,player_magic,player_magic_equiped,player_xp):
self.player_health=player_health
self.player_weapon=equiped_weapon
self.player_mana=player_mana
self.player_stamina=player_stamina
self.player_armor=player_armor
self.player_magic=player_magic
self.player_magic_equiped=equiped_magic
self.player_xp=player_xp
#Enemy stats and actions
class Creatures(Player):
def __init__(self,mob_health,mob_mana,mob_stamina,mob_attack,mob_magic,mob_armor):
self.mob_health=mob_health
self.mob_mana=mob_mana
self.mob_stamina=mob_stamina
self.mob_attack=mob_attack
self.mob_magic=mob_magic
self.mob_armor=mob_armor
def Fighting(self):
print('The battle begin.')
random_numb=random.randint(1,10)
if not random_numb ==1 or not random_numb ==2:
print(comm_enemy,'appear!')
enemy_name=comm_enemy
enemy=Creatures(health[comm_enemy],mana[comm_enemy],enemy_stamina[comm_enemy],damage[comm_enemy],magic[comm_enemy],armor[comm_enemy])
else:
print(big_enemy,'appear!')
enemy_name=big_enemy
enemy=Creatures(health[big_enemy],mana[big_enemy],enemy_stamina[big_enemy],damage[big_enemy],magic[big_enemy],armor[big_enemy])
def mob_crithit():
player.player_health-=int(round(enemy.mob_attack*2))
enemy.mob_stamina-=stamina[enemy_name]
def mob_magic():
player.player_health-=int(round(enemy.mob_magic))
enemy.mob_mana-=mana[enemy_name]
def player_hit():
enemy.mob_health-=int(round(damage[player.player_weapon]*(1 - enemy.mob_armor/10)))
player.player_stamina-=stamina[player.player_weapon]
def player_criticalhit():
enemy.mob_health-=int(round(damage[player.player_weapon]*2))
player.player_stamina-=stamina[player.player_weapon]
enemy.mob_stamina=stamina[enemy_name]
def player_magic_hit():
enemy.mob_health-=int(round(magic[player.player_magic_equiped]))
player.player_mana-=mana[player.player_magic_equiped]
def mob_hit():
player.player_health-=int(round(enemy.mob_attack*(1 - player.player_armor/10)))
enemy.mob_stamina-=stamina[enemy_name]
while True:
if enemy.mob_health > 0 and player.player_health > 0:
random_numb=random.randint(1,10)
print('HP-',player.player_health,'; Stamina-',player.player_stamina,'; Mana-',player.player_mana,'; Damage-',damage[player.player_weapon])
print('Equiped weapon -',player.player_weapon)
print('Enemy stats: HP-',enemy.mob_health,'; Stamina-',enemy.mob_stamina,'; Armor-',enemy.mob_armor,'; Mana-',enemy.mob_mana)
inp=input(': ').split()
if len(inp)==2:
act_word=inp[0]
sub_word=inp[1]
#command for leave
if act_word== 'run' and sub_word=='away':
print('Leaving battle.')
break
#command for items
elif act_word == 'item':
if sub_word == 'health_potion':
if player.player_health < 36:
player.player_health+=health[sub_word]
else:
player.player_health=50
elif sub_word == 'mana_potion':
if player.player_mana < 29:
player.player_mana+=mana[sub_word]
else:
player.player_mana=30
else:
print('Unknown item')
#command for change weapon
elif act_word=='change':
if sub_word in players_weapons and not sub_word == player.player_weapon:
player.player_weapon=sub_word
print('Done.')
elif sub_word in players_weapons and sub_word == player.player_weapon:
print('You are already equip',sub_word)
elif sub_word not in players_weapons:
if sub_word in players_magics and sub_word == player.player_magic_equiped:
print('You are already with magic',sub_word)
elif sub_word in players_magics and not sub_word == player.player_magic_equiped:
equiped_magic=sub_word
player.player_magic_equiped=sub_word
print('Done.')
else:
print('You have no',sub_word)
else:
print('Unknown command')
elif len(inp)==1:
act_word=inp[0]
#command for attack
if act_word=='attack':
if player.player_stamina > stamina[player.player_weapon]:
if random_numb == 10:
player_criticalhit()
else:
if enemy.mob_stamina==0:
player_criticalhit()
else:
player_hit()
random_numb=random.randint(1,10)
if random_numb == 10:
mob_crithit()
else:
mob_hit()
else:
print("You can't fight with this weapon. Leave this battle and restore some stramina or change weapon.")
#command for magic
if act_word=='magic':
if player.player_mana > mana[player.player_magic_equiped]:
player_magic_hit()
else:
print("You can't use this magic. Leave this battle and restore some mana or change magic.")
else:
print('Unknown command.')
elif enemy.mob_health <= 0:
print(enemy_name,'is dead.')
print(XP[enemy_name],'XP earned')
player.player_xp+=XP[enemy_name]
break
elif player.player_health <=0:
print("Oops, you died.")
break
#1-hp, 2-weapon, 3-mana,4-stamina,5-armor, 6-magic damage 7-equiped magic 78XP
player = Player(50,equiped_weapon,6,24,armor[equiped_armor],magic[equiped_magic],equiped_magic,0)
print('Try to write "info info".')
while True:
if player.player_health > 0:
inp = input(': ').split()
if not len(inp)==2:
Get_input()
else:
act_word = inp[0]
sub_word = inp[1]
if act_word in actions_words:
talk = Game(inp,act_word)
if act_word == 'say':
talk.Say()
elif act_word=='walk':
if sub_word in locations_words:
if not player_location==sub_word:
player_location=sub_word
talk.Walk()
else:
print('You are already in {}'.format(sub_word))
else:
print('Unknown location. Try command "info location".')
elif act_word=='info':
talk.Info()
elif act_word=='start' and sub_word=='fighting':
if player_location=='forest':
comm_enemy="Wolf"
big_enemy="Big_wolf"
Creatures.Fighting(0)
else:
print('Unknown action. Try command "info actions".')
else:
break
| UTF-8 | Python | false | false | 10,723 | py | 1 | Game.py | 1 | 0.497622 | 0.487084 | 0 | 241 | 42.443983 | 154 |
kevwjin/ContactsWebScraper | 12,876,312,001,479 | 57a7c14076166cde8556e36500fbd5791ea7847a | 9223e41fa2489085dc4af37e5856fb8741bc11a6 | /WebContactScraper.py | 7b6b2d6d3535f0c65c59c0823ae1c989a68be192 | [] | no_license | https://github.com/kevwjin/ContactsWebScraper | d522ccf9d41682e7d954f2aa0639ca6165e6ca19 | 165b598399db5178f70e225eab681724620f9533 | refs/heads/master | 2023-04-13T19:58:30.306724 | 2023-03-21T04:52:19 | 2023-03-21T04:52:19 | 288,385,997 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Demo file for Spyder Tutorial
# Hans Fangohr, University of Southampton, UK
import requests
from bs4 import BeautifulSoup
import re
from sortedcontainers import SortedSet
class WebsiteContacts:
"""
The WebsiteContacts module is used to extract contact information from a website given a url.
Given a url, the module will extract the emails, facebooks, instragrams, twitters, and linkedins
on all pages of a website containing 'about' or 'contact' in the relative url.
"""
def __init__(self, url="", emails=None, facebooks=None, instagrams=None, twitters=None, linkedins=None):
self._url = url
emptyCnt = 0
if emails == None:
emails = SortedSet()
emptyCnt += 1
if facebooks == None:
facebooks = SortedSet()
emptyCnt += 1
if instagrams == None:
instagrams = SortedSet()
emptyCnt += 1
if twitters == None:
twitters = SortedSet()
emptyCnt += 1
if linkedins == None:
linkedins = SortedSet()
emptyCnt += 1
self._emails = emails
self._facebooks = facebooks
self._instagrams = instagrams
self._twitters = twitters
self._linkedins = linkedins
# only find contacts if all fields other than url are empty
if url != "" and emptyCnt == 5:
self.__find_contacts()
def __sorted_set_to_string(self, convert_set):
"""Converts the sorted set to a string with values separated by commas.
Pre:
self: WebsiteContacts object
convert_set: the set to be converted
Post:
None
Return:
returnStr: the formatted string from the convert_set
"""
returnStr = ''
for i in range(len(convert_set)):
returnStr += convert_set[i] + ', '
returnStr = returnStr[:len(returnStr)-2]
return returnStr
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def emails(self):
return self.__sorted_set_to_string(self._emails)
@emails.setter
def emails(self, emails):
self._emails = emails
@property
def facebooks(self):
return self.__sorted_set_to_string(self._facebooks)
@facebooks.setter
def facebooks(self, facebooks):
self._facebooks = facebooks
@property
def instagrams(self):
return self.__sorted_set_to_string(self._instagrams)
@instagrams.setter
def instagrams(self, instagrams):
self._instagrams = instagrams
@property
def twitters(self):
return self.__sorted_set_to_string(self._twitters)
@twitters.setter
def twitters(self, twitters):
self._twitters = twitters
@property
def linkedins(self):
return self.__sorted_set_to_string(self._linkedins)
@linkedins.setter
def linkedins(self, linkedins):
self._linkedins = linkedins
def __extract_urls(self, html, root):
"""Extracts all URLs with shared roots from a given webpage.
Pre:
self: WebsiteContacts object
html: html of webpage
root: root of URL
Post:
None
Return:
URLs: set of URLs associated with the given root
"""
#print("Root is:", root)
#print()
soup = BeautifulSoup(html, 'lxml')
urls = set()
for a in soup.find_all('a', href=True):
if (a['href'].find(root) == 0):
urls.add(a['href'])
#print("Found URL:", a['href'])
# else if only relative URL
elif (len(a['href']) > 0 and a['href'][0] == '/'):
urls.add(root + a['href'])
#print("Found URL:", root + a['href'])
#print(urls)
return urls
def __decode_email(self, email):
"""Decodes encoded email.
Pre:
self: WebsiteContacts object
email: encoded email
Post:
None
Return:
decoded: decoded email
"""
decoded = ''
key = int(email[:2], 16)
for i in range(2, len(email)-1, 2):
decoded += chr(int(email[i:i+2], 16)^key) # ^ stands for XOR
return decoded
def __find_contacts(self):
"""Finds emails of a blog given the root URL.
Pre:
self - WebsiteContacts object
Post:
self.emails: updated set of emails
self.facebooks: updated set of facebooks
self.instagrams: updated set of instagrams
self.twitters: updated set of twitters
self.twitters: updated set of linkedins
Return:
None
"""
# find domain start given root URL
domain_start = 0
for i in range(len(self._url)-1, 0, -1):
if self._url[i] == ".":
domain_start = i
break
# find domain end given root URL
domain_end = 0
for i in range(len(self._url)-1, domain_start, -1):
if self._url[i] == '/':
domain_end = i
break
if domain_end == 0:
domain_end = len(self._url)-1
domain = self._url[domain_start : domain_end]
# setup for while loop conditions
visited = set()
urls = [self._url]
i = 0
while i < min(200, len(urls)):
url = urls[i]
i += 1
# if URL in visited then skip to the next iteration
if url in visited:
continue
# otherwise add the URL to set of visited URLs
visited.add(url)
# print URL to be tried
#print("Trying:", url);
try:
html = requests.get(url, headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"}).text
except:
print ("HTML from", url, "was not extracted.")
# if HTML was not extracted, continue to next iteration
continue
if len(re.findall('(contact|about|our team|board of)', url)) != 0:
# list of emails found on URL
url_emails = re.findall('[a-z0-9-_]+@[a-z0-9-_]+.com', html, re.IGNORECASE)
encoded_url_emails = re.findall("data-cfemail=\"[a-z0-9]{38}\"", html)
# list of SNS contact found on URL
url_facebooks = re.findall('facebook.com/[@a-z0-9-_]+/?\"', html, re.IGNORECASE)
url_instagrams = re.findall('instagram.com/[@a-z0-9-_]+/?\"', html, re.IGNORECASE)
url_twitters = re.findall('twitter.com/[@a-z0-9-_]+/?\"', html, re.IGNORECASE)
url_linkedins = re.findall('linkedin.com/[@a-z0-9-_]+/?\"', html, re.IGNORECASE)
# find email contacts
for email in url_emails:
self._emails.add(email.lower())
for email in encoded_url_emails:
self._emails.add(self.__decode_email(email[-39:-1]).lower())
# find SNS contacts
for facebook in url_facebooks:
if (facebook[-2 : -1] == '/'):
self._facebooks.add(facebook.lower()[:-2])
else:
self._facebooks.add(facebook.lower()[:-1])
for instagram in url_instagrams:
if (instagram[-2 : -1] == '/'):
self._instagrams.add(instagram.lower()[:-2])
else:
self._instagrams.add(instagram.lower()[:-1])
for twitter in url_twitters:
if (twitter[-2 : -1] == '/'):
self._twitters.add(twitter.lower()[:-2])
else:
self._twitters.add(twitter.lower()[:-1])
for linkedins in url_linkedins:
if (linkedins[-2 : -1] == '/'):
self._linkedins.add(linkedins.lower()[:-2])
else:
self._linkedins.add(linkedins.lower()[:-1])
# look for additional URLs only if its the first iteration
if i == 1:
for next_urls in self.__extract_urls(html, url[:url.find(domain)+4]):
urls.append(next_urls);
def __repr__(self):
"""Convert to formal string, for repr().
Pre:
self: WebsiteContacts object
Post:
None
Return:
string representing WebsiteContact constructor with corresponding contents
"""
return "WebsiteContacts({}, {}, {}, {}, {}, {})".format(self._url, self._emails, self._facebooks, self._instagrams, self._twitters, self._linkedins)
def return_contacts(self, contacts):
"""The contacts of the contact type are printed.
Pre:
self: WebsiteContacts object
contacts: set or sorted set of contacts
Post:
None
Return:
items in contacts returned
"""
returnStr = ""
if len(contacts) == 0:
returnStr = 'None\n'
else:
for contact in contacts:
returnStr += contact + '\n'
return returnStr
def __str__(self):
"""All contacts in the object are formatted to a string.
Pre:
self: WebsiteContacts object
Post:
all contacts printed
Return:
None
"""
returnStr = 'URL: ' + self.url + '\n'
returnStr += 'Email(s):\n' + self.return_contacts(self._emails) + '\n'
returnStr += 'Facebook(s):\n' + self.return_contacts(self._facebooks) + '\n'
returnStr += 'Instagram(s):\n' + self.return_contacts(self._instagrams) + '\n'
returnStr += 'Twitter(s):\n' + self.return_contacts(self._twitters) + '\n'
returnStr += 'LinkedIn(s):\n' + self.return_contacts(self._linkedins)
return returnStr
if __name__ == '__main__':
contact = WebsiteContacts('http://fitlittlecookie.wordpress.com/')
print(str(contact)) | UTF-8 | Python | false | false | 10,756 | py | 4 | WebContactScraper.py | 3 | 0.505857 | 0.49656 | 0 | 318 | 32.827044 | 195 |
mtshrmn/horrible-downloader | 14,551,349,222,944 | 603a1c05527c4178cba688723ae402e15f554ab7 | b4ee0d716b234eede629d09719ec276eeb141b89 | /test/test_cmd_funcs.py | 3b454ca0028bf7e1a643910fdf7630e6d32fb54c | [
"MIT"
] | permissive | https://github.com/mtshrmn/horrible-downloader | b15d6c7f086b42aea70b4492f90cb20ed9df46e2 | f6fd6abcc4022dedf8c79b01a8f06e57a9d7695c | refs/heads/master | 2022-12-24T14:58:25.231657 | 2020-10-02T07:53:33 | 2020-10-02T07:53:33 | 138,234,790 | 11 | 7 | MIT | false | 2020-09-20T15:20:50 | 2018-06-22T00:17:01 | 2020-09-17T15:51:02 | 2020-09-20T15:20:49 | 220 | 28 | 7 | 4 | Python | false | false | import os
import sys
import pytest
from itertools import combinations
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from HorribleDownloader.cmd import valid_qualities, episode_filter
def test_quality_verification():
for r in range(1, 3):
for qualities in combinations(["480", "720", "1080"], r):
assert valid_qualities(qualities)
def test_episode_filter_generation():
data = [
("1,2,3,4", [1, 3, 4.5, 5], [1, 3]),
("1,3,5-7", [0.5, 1, 2, 5, 6], [1, 5, 6]),
("=<3,9>", [0, 0.1, 2.9, 3, 5, 9, 10.5], [0, 0.1, 2.9, 3, 10.5])
]
for query, episodes, expected_output in data:
def ep_filter(episode, filter_str=query):
return episode_filter(episode, filter_str)
filtered = list(filter(ep_filter, episodes))
assert filtered == expected_output
| UTF-8 | Python | false | false | 871 | py | 15 | test_cmd_funcs.py | 7 | 0.602755 | 0.535017 | 0 | 27 | 31.259259 | 79 |
corl19paper273/midlevel-visual-priors | 12,927,851,594,178 | b081ac263c3e602a9616f9ddf4a51bce207e67f3 | 1abce5fa8185575ca58a37e8dbdd1352a37b5a51 | /evkit/models/actor_critic_module_curiosity.py | 6cc46107ef8917fcd4a7a255c86492e52afad5dc | [
"MIT"
] | permissive | https://github.com/corl19paper273/midlevel-visual-priors | a76efe6c470709b88b230043bb3a2f4dd5054aa2 | 9dbfeabf072ee9c841fefb1f854c8bc6ca8f1cc1 | refs/heads/master | 2023-04-02T16:12:28.566110 | 2019-07-07T21:49:11 | 2019-07-07T21:49:11 | 195,699,842 | 0 | 0 | MIT | false | 2023-03-25T00:00:32 | 2019-07-07T21:46:06 | 2019-07-07T21:51:08 | 2023-03-25T00:00:28 | 380,394 | 0 | 0 | 1 | Python | false | false |
from .actor_critic_module import NaivelyRecurrentACModule
class ForwardInverseACModule(NaivelyRecurrentACModule):
'''
This Module adds a forward-inverse model on top of the perception unit.
'''
def __init__(self, perception_unit, forward_model, inverse_model, use_recurrency=False, internal_state_size=512):
super().__init__(perception_unit, use_recurrency, internal_state_size)
self.forward_model = forward_model
self.inverse_model = inverse_model | UTF-8 | Python | false | false | 501 | py | 16 | actor_critic_module_curiosity.py | 13 | 0.710579 | 0.704591 | 0 | 12 | 40.75 | 117 |
Bill78Zhang/Data-Mining-and-Warehousing-Lab | 3,530,463,166,627 | f392abfb6c459fb9c47272a3789bc08b60634b34 | f6affb8b53f0b4c1a637955abc513141c5dc4004 | /mknn.py | 0d20d2050695083490393f0e71197abe17e2123a | [] | no_license | https://github.com/Bill78Zhang/Data-Mining-and-Warehousing-Lab | 52e258b1defdb8fcd878b6a98ada19ed1a07913e | 34de179d4a92a672ae693ad7963bf91bada89536 | refs/heads/master | 2020-05-29T13:14:04.021569 | 2018-10-23T10:16:32 | 2018-10-23T10:16:32 | 189,153,222 | 1 | 0 | null | true | 2019-05-29T04:57:08 | 2019-05-29T04:57:07 | 2018-10-23T10:16:34 | 2018-10-23T10:16:33 | 165 | 0 | 0 | 0 | null | false | false | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 15:58:57 2018
@author: Student
"""
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('car.csv')
#assign name to the columns
dataset.columns = ['buying','maint','doors','persons','lug_boot','safety','classes']
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
#Categorical values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X = X.apply(LabelEncoder().fit_transform)
onehotencoder = OneHotEncoder(categorical_features=[2,3])
X = onehotencoder.fit_transform(X).toarray()
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
y_train = y_train.ravel()
y_test = y_test.ravel()
for K in range(25):
K_value = K+1
neigh = KNeighborsClassifier(n_neighbors = K_value, weights='distance', algorithm='auto')
neigh.fit(X_train, y_train)
y_pred = neigh.predict(X_test)
print "Accuracy is ", accuracy_score(y_test,y_pred)*100,"% for K-Value:",K_value
'''plt.plot(neighbors, MSE)
plt.xlabel('Number of Neighbors K')
plt.ylabel('Misclassification Error')
plt.show()''' | UTF-8 | Python | false | false | 1,462 | py | 29 | mknn.py | 24 | 0.713406 | 0.694938 | 0 | 45 | 30.533333 | 93 |
advanced4/covid19-predict | 11,330,123,776,986 | b97cb627023ecd697b2b677db29882c824523c16 | bb9d73016624de23ca6135beb688c03c7106f62f | /example-settings.py | abae4765ccfb0778e224139d53ef52e92fdb79cd | [] | no_license | https://github.com/advanced4/covid19-predict | f3ee231b7f04e8f585164ccb3698691a3ea5f7ef | d70418070ff50dc36cf46058047e1a1bf8383160 | refs/heads/master | 2021-04-22T02:21:01.557983 | 2020-05-05T20:43:00 | 2020-05-05T20:43:00 | 249,843,228 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | file_to_load = "example"
population = 1000 | UTF-8 | Python | false | false | 42 | py | 8 | example-settings.py | 5 | 0.738095 | 0.642857 | 0 | 2 | 20.5 | 24 |
trhn94/pythonexamples | 2,963,527,463,098 | 0e226efa1f7289ccaf799f8ff70db0bf107f3e66 | e2d2c16dec3e748b39f6ec45c39f294818d1f92f | /alıstırma12.py | 80a386c2b0e96579406ea860c7a5487d656f8479 | [] | no_license | https://github.com/trhn94/pythonexamples | 8e1287192391b0caf4c22c5094731c8c24e1830f | 66d440621781c8a103fb02ee1547bcaaa80b2a8f | refs/heads/master | 2021-01-25T14:21:45.150995 | 2018-03-03T12:00:26 | 2018-03-03T12:00:26 | 123,682,581 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import randrange as rr
x= rr(1,100)
print(x)
import math | UTF-8 | Python | false | false | 68 | py | 45 | alıstırma12.py | 45 | 0.764706 | 0.705882 | 0 | 4 | 16.25 | 34 |
ubnt-amber/selenium-v5 | 1,451,698,985,188 | 353e4f4809c4170e50497c6628ca9e518b3f30ce | c5c36ddb89cf347a6090d76808bea1cc740445e5 | /reusable_functions_generic.py | 4312eab3c6d87d05e4a6f35311b7035f00be9fd1 | [] | no_license | https://github.com/ubnt-amber/selenium-v5 | 500d22bab98fc9d5ce74b66359c5350670be6f95 | 27bafe00fb309832910aba07784fd333f1cd1109 | refs/heads/master | 2016-08-09T00:15:18.195803 | 2016-03-05T15:43:29 | 2016-03-05T15:43:29 | 49,012,692 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import time, sys, inspect #re, signal, os, os.path, subprocess,
from reusable_variables import *
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
# --------------------
# Get elements
# --------------------
# Get elements with xpath
# driver: webdriver
# searchElement: css element(s) to search for
# searchString: value string(s) to search for
# elementType: css element type
def getElements(driver, searchElement, searchString, elementType):
try:
# Initialize variables
upperElementType = convertTo(elementType, 'UPSTR')
xType = elementType
string = ''
finalString = ''
if ( upperElementType == 'ID' ):
xType = 'id'
elif ( upperElementType == 'CLASS' ):
xType = 'class'
if isNull(xType) == False: # xType != ''
if isinstance(searchElement, list):
string = '' # Clear the string value
for obj in searchElement:
string += "contains(@" + xType + ", '" + obj + "') and "
finalString += string[ 0 : len(string)-5 ]
else:
finalString += "contains(@" + xType + ", '" + searchElement + "')"
if isNull(searchString) == False: # searchString != ''
string = '' # Clear the string value
if isNull(finalString) == False: # finalString != ''
string = ' and '
if isinstance(searchString, list):
for obj in searchString:
string += "contains(text(), '" + obj + "') and "
finalString += string[ 0 : len(string)-5 ]
else:
finalString += string + "contains(text(), '" + searchString + "')"
#if debug == True:
#print finalString
return driver.find_elements_by_xpath("//*[" + finalString + "]")
except Exception as inst:
print "--------------------------------------------------"
print "Invalid getElements():"
print "Element={0}, searchString={1}, Type={2}" .format(convertTo(searchElement,'STR'), convertTo(searchString,'STR'), elementType)
print "----------"
print "Type:"
print type(inst)
print "----------"
print "Inst:"
print inst
print "--------------------------------------------------\n"
quit(driver)
# Find element by type
# args:
# (3)
# driver: webdriver
# searchEle: element to find
# elemType: type of element
# (4)
# driver: webdriver
# searchEle: element to find
# elemType: type of element
# quitTest: quit script if fails (default True)
# (5)
# driver: webdriver
# searchEle: element to find
# elemType: type of element
# quitTest: quit script if fails
# findVisible: find only visible elements (default False)
def findElementByType(*args):
meetsArgRequirement(args, 3)
# Initialize variables
driver = args[0]
searchEle = args[1]
elemType = args[2]
quitTest = args[3] if len(args) >=4 else True # Override with passed quitTest
findVisible = args[4] if len(args) >= 5 else False
upperElemType = convertTo(elemType, 'UPSTR')
if upperElemType == 'NAME':
elemsFound = driver.find_elements_by_name(searchEle)
elif upperElemType == 'ID':
elemsFound = driver.find_element_by_id(searchEle)
else:
elemsFound = getElements(driver, searchEle, '', elemType)
"""
elif upperElemType == 'CLASS':
elemsFound = getElements(driver, searchEle, '', elemType)
else:
print 'Find element by {0} does not exist\n' .format(upperElemType)
if quitTest == True:
quit(driver)
"""
if isinstance(elemsFound, list) == False:
elemsFound = [elemsFound]
if len(elemsFound) == 0:
return [] # Return null
if findVisible == True:
for elem in elemsFound:
if elem.is_displayed():
return elem # Return the first element found
return elemsFound[0]
# Wait for an element to become present on the screen
# driver: webdriver
# time: wait time
# searchElement: css element(s) to search for
# searchString: value string(s) to search for
# elementType: css element type
def waitForElementPresent(driver, time, searchElement, searchString, elementType):
try:
# Initialize variables
upperElementType = convertTo(elementType, 'UPSTR')
xType = elementType
string = ''
finalString = ''
if ( upperElementType == 'ID' ):
xType = 'id'
#return WebDriverWait(driver, time).until(EC.presence_of_element_located((By.ID, searchElement)))
elif ( upperElementType == 'CLASS' ):
xType = 'class'
#if ( upperElementType == "CUSTOM" ):
#finalString = "@customattribute='" + searchElement + "'"
#return WebDriverWait(driver, time).until(EC.visibility_of_element_located((By.XPATH, "//*[" + finalString + "]")))
if isNull(xType) == False: # xType != ''
if isinstance(searchElement, list):
string = '' # Clear string value
for obj in searchElement:
string += "contains(@" + xType + ", '" + obj + "') and "
finalString += string[ 0 : len(string)-5 ]
else:
finalString += "contains(@" + xType + ", '" + searchElement + "')"
if isNull(searchString) == False: # searchString != ''
string = '' # Clear string value
if isNull(finalString) == False: # finalString != ''
string = ' and '
if isinstance(searchString, list):
for obj in searchString:
string += "contains(text(), '" + obj + "') and "
finalString += string[ 0 : len(string)-5 ]
else:
finalString += string + "contains(text(), '" + searchString + "')"
#if debug == True:
#print finalString
return WebDriverWait(driver, time).until(EC.visibility_of_element_located((By.XPATH, "//*[" + finalString + "]")))
except Exception as inst:
print "--------------------------------------------------"
print "Invalid waitForElementPresent():"
print getCallerInfo() + "\n"
print "Element={0}, searchString={1}, Type={2}, TimeOut={3}" .format(convertTo(searchElement, 'STR'), convertTo(searchString, 'STR'), elementType, convertTo(time, 'STR'))
print "----------"
print "Type:"
print type(inst)
print "----------"
print "Inst:"
print inst
print "--------------------------------------------------\n"
quit(driver)
# --------------------
# Script functions
# --------------------
# Define the driver.
# unifiController: controller link
def getDriver(unifiController):
# Initialize variables
driver = webdriver.Firefox() # Get local session of firefox
driver.get(unifiController) # Load page
driver.maximize_window() # Maximize the page
return driver
# Delay the script
# sleepTime: time to delay
def sleep(sleepTime):
time.sleep(sleepTime)
# Check if object is null.
# obj: object for comparison
def isNull(obj):
if obj is None or obj == '' or (type(obj) == list and len(obj) == 0):
return True
return False
# Print the function name.
# function: function name
def printFunctionName(function):
print "**************************************************"
print convertTo(function, 'UPSTR')
print "**************************************************"
# Set the element's text.
# element: element to be modified
# text: text to be set
def setText(element, text):
element.clear()
element.send_keys(text)
print "Entered value:\"{0}\" into field:\"{1}\"\n" .format(convertTo(text, 'STR'), convertTo(element.get_attribute('name'), 'STR'))
# Check if the values are matching.
# driver: webdriver
# actualVal: current value
# expectedVal: expected value
def getMatching(driver, actualVal, expectedVal):
if convertTo(actualVal, 'UPSTR') != convertTo(expectedVal, 'UPSTR'):
print "The values do not match. Actual={0}, Expected={1}\n" .format(convertTo(actualVal, 'STR'), convertTo(expectedVal, 'STR'))
return False
return True
# Convert the value to the desired type.
# val: value to convert
# instType: desired type
def convertTo(val, instType):
upperInstType = str(instType).upper()
if ('STR' in upperInstType) or ('STRING' in upperInstType):
if isinstance(val, str) == False:
val = str(val)
if 'UP' in upperInstType:
val = val.upper()
elif upperInstType == 'LIST':
if isinstance(val, list) == False:
val = [val]
else:
print "Invalid convertTo(): instType={0}" .format(instType)
if debug:
print getCallerInfo()
print ''
return val
# Get caller information for error debugging.
def getCallerInfo():
# Use [1] to get the caller
# Use [2] to get the caller of the caller
(frame, filename, line_number, function_name, lines, index) = inspect.getouterframes(inspect.currentframe())[2]
if convertTo(function_name, 'STR') == '<module>':
function_name = 'main'
return "Called from {0}: {1}() on line {2}" .format(filename, function_name, line_number)
# Ensure the passed arguments meet the requirements.
# args:
# (2)
# arg: arguments for the caller
# minLength: minimum arguments for the caller
# (3)
# arg: arguments for the caller
# minLength: minimum arguments for the caller
# maxLength: maximum arguments for the caller
def meetsArgRequirement(*args):
# Required arguments
if len(args) < 2:
print "Invalid meetsArgRequirement(): {0} args found" .format(len(args))
if debug:
print getCallerInfo()
print ''
quitScript()
# Initialize variables
arg = args[0] # Arguments to compare
minLength = args[1] # Minimum number of arguments
maxLength = args[2] if len(args) >= 3 else '' # Maximum number of arguments
argLength = len(arg)
callerFn = ''
if (argLength < minLength) or ((isNull(maxLength) == False) and (argLength > maxLength)):
callerFn = inspect.stack()[1][3]
if argLength == 0:
print "Invalid meetsArgRequirement(). {0} args found" .format(argLength)
else:
print "Invalid meetsArgRequirement():"
for idx, arg in enumerate(args):
print "arg[{0}]={1}" .format(idx, arg)
if debug:
print getCallerInfo()
print ''
quitScript()
# Print the arguments in a columnar format.
# args:
# (1)
# data: values on screen
# (2)
# data: values on screen
# headers: name of values
def prettyPrint(*args):
meetsArgRequirement(args, 1)
# Initialize variables
padding = 2
finalHeader = ''
data = args[0]
headers = args[1] if len(args) >= 2 else '' # Override with passed headers
headersWidth = max(len(word) for word in headers[:-1]) if isinstance(headers, list) else len(headers) # Width of headers
dataWidth = max(len(word) for row in data for word in row[:-1]) # Width of data
colWidth = max(headersWidth, dataWidth) + padding # Width of columns
#print 'headersWidth='+str(headersWidth) + ', dataWidth='+str(dataWidth) + ', colWidth='+str(colWidth)
rows = []
headerRows = []
for header in headers:
headerRows.append(convertTo(header, 'UPSTR'))
rows.append(headerRows)
for row in data:
rowVals = []
for word in row:
rowVals.append(convertTo(word, 'str'))
rows.append(rowVals)
widths = [max(map(len, col))+padding for col in zip(*rows)]
for row in rows:
print ("".join((val.ljust(width) for val, width in zip(row, widths)))).join("".ljust(padding))
"""
if isinstance(headers, list):
for header in headers:
finalHeader += ''.join(convertTo(header,'UPSTR').ljust(colWidth))
else:
finalHeader += convertTo(headers, 'STR')
print finalHeader.join(''.ljust(padding))
for row in data:
print (''.join(word.ljust(colWidth) for word in row)).join(''.ljust(padding))
"""
print ''
# Close the browser and quit application.
# driver: webdriver
def quit(driver):
printFunctionName("QUIT")
sleep(2)
driver.quit()
sys.exit()
# Quit the script wihout changing the app state.
def quitScript():
sys.exit("Quitting the application unexpectedly.") | UTF-8 | Python | false | false | 11,964 | py | 9 | reusable_functions_generic.py | 9 | 0.639 | 0.633567 | 0 | 388 | 29.837629 | 172 |
mnestis/advent2015 | 7,567,732,422,188 | 90ad4279bb844ef1c92d2420cd73e5fedf421171 | 6232fdbcdd1951faefef1477d6e8a026ce5a253c | /12/part2.py | 8b86a182004bd12c41d6d56f7904bb17f0566ade | [
"MIT"
] | permissive | https://github.com/mnestis/advent2015 | 3c191d33e284fb206a91316dd026198c11b32852 | 36d85230f77d23f246604db34c4c7a103b70780a | refs/heads/master | 2021-01-10T06:06:28.436860 | 2015-12-25T05:16:58 | 2015-12-25T05:16:58 | 47,896,749 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import json
def process_json_file(input_filename):
json_obj = json.load(open(input_filename))
return process_obj(json_obj)
def process_obj(obj):
running_total = 0
for key in obj:
if obj[key] == "red":
break
else:
for key in obj:
if isinstance(obj[key], int):
running_total += obj[key]
elif isinstance(obj[key], list):
running_total += process_list(obj[key])
elif isinstance(obj[key], unicode):
pass
else:
running_total += process_obj(obj[key])
return running_total
def process_list(lst):
running_total = 0
for item in lst:
if isinstance(item, int):
running_total += item
elif isinstance(item, list):
running_total += process_list(item)
elif isinstance(item, unicode):
pass
else:
running_total += process_obj(item)
return running_total
if __name__=="__main__":
print process_json_file("input.txt")
| UTF-8 | Python | false | false | 1,107 | py | 36 | part2.py | 35 | 0.535682 | 0.533875 | 0 | 50 | 21.04 | 55 |
lilongsy/python-tools | 12,807,592,486,736 | f1c03b2b4d384771e6eecb2d4d2bfa4d850d7f3b | 8c175ba523ff076d93f652951e52e17f6a281f66 | /xls2mysql/xls2mysql.py | 114c1ad214e6b62e0006ac844599233a23b99d8f | [] | no_license | https://github.com/lilongsy/python-tools | fa568e0db7dabfa5cb2ff7c6d25e1fe04c2f6e39 | 5bc32b0a5f81fb0a7279ee835bfc973eb46b8420 | refs/heads/master | 2020-12-24T05:23:29.453487 | 2017-05-16T08:56:10 | 2017-05-16T08:56:10 | 62,382,184 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import MySQLdb
import configparser
import os
import xlrd
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))
# connect database
dbc = configparser.ConfigParser()
dbc.read(PATH('../db.ini'))
conn = MySQLdb.connect(host=dbc.get("db", 'db_host'), user=dbc.get("db", 'db_user'), passwd=dbc.get("db", 'db_pass'), db=dbc.get("db", 'db_database'), port=int(dbc.get("db", 'db_port')), charset='utf8')
cur = conn.cursor(MySQLdb.cursors.DictCursor)
# read xls file
work_book = xlrd.open_workbook(filename=PATH('wechat.xls'))
sheets = work_book.sheets()
for sheet in sheets:
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
value = sheet.cell(row, col).value
value = str(int(value)) if isinstance(value, float) or isinstance(value, int) else value
values.append(value)
# remove repeated record
wechatname = values[1]
cur.execute("SELECT count(*) as count FROM `iqilu_wechat` WHERE `wechatname`='%s'" % wechatname)
count = cur.fetchone()['count']
if not count:
print "INSERT %s" % values[0]
cur.execute("INSERT INTO `iqilu_wechat` (`wid`, `wechat`, `wechatname`, `catid`, `status`) "
"VALUES (NULL, '%s', '%s', '%s', '%s');" % tuple(values))
conn.commit()
cur.close()
conn.close()
| UTF-8 | Python | false | false | 1,447 | py | 10 | xls2mysql.py | 7 | 0.619903 | 0.616448 | 0 | 41 | 34.292683 | 202 |
dkmiller/tidbits | 10,058,813,440,225 | feb97b122c792dfa4c80fc162a4a9b02b0a505cc | b6f35e66fdf0d007d58baea60a38b48e5b143892 | /2021/2021-11-04_mnist-e2e/components/convert_idx_to_npy/run.py | c2e34b7a21e944c8067dcf029652e3119283792b | [] | no_license | https://github.com/dkmiller/tidbits | 4467983c1d2b8a6495fbe1677d06be642303df22 | ccc6560cf8ff1a211f6b22c16da90692907ec638 | refs/heads/master | 2023-08-31T19:10:57.967971 | 2023-08-31T15:40:12 | 2023-08-31T15:40:12 | 19,216,030 | 0 | 0 | null | false | 2023-09-05T05:37:25 | 2014-04-27T20:14:39 | 2021-12-13T23:32:41 | 2023-09-05T05:37:24 | 43,968 | 0 | 0 | 43 | TeX | false | false | """
Imitate: https://stackoverflow.com/a/44712152 .
"""
import idx2numpy
import numpy as np
import os
from pathlib import Path
import sys
print(f"Arguments: {sys.argv}")
in_dir = Path(sys.argv[1])
out_dir = Path(sys.argv[2])
files = in_dir.glob("*")
for in_path in files:
out_path = out_dir / (in_path.name + ".npy")
if os.path.isfile(in_path):
print(f"{in_path} -> {out_path}")
arr = idx2numpy.convert_from_file(str(in_path))
np.save(str(out_path), arr)
| UTF-8 | Python | false | false | 496 | py | 482 | run.py | 136 | 0.629032 | 0.604839 | 0 | 27 | 17.37037 | 55 |
Edenzzzz/yoloface-master | 6,116,033,446,441 | ff865266c63ec7f2d64593aa619da0e69f0bf24d | cc164c16c28644aa0e5f9970d75b40af22c41062 | /resources/yoloface/Python39/Lib/ctypes/test/test_pep3118.py | 252511c25cdc400e272bbfd396842fa96c485724 | [] | no_license | https://github.com/Edenzzzz/yoloface-master | 03b5f46035f874ec29c39ba8dc1db942b2898cc8 | 148b96c2a4874064fb60fc4644822513b74e7d85 | refs/heads/main | 2023-07-08T01:47:39.428313 | 2022-10-06T04:08:39 | 2022-10-06T04:08:39 | 394,413,651 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:d8e35b9aa00030159f5907c4e0db059fb5d6fe5fafb498d27942f9c7a6c697d4
size 8516
| UTF-8 | Python | false | false | 129 | py | 1,383 | test_pep3118.py | 1,076 | 0.883721 | 0.51938 | 0 | 3 | 42 | 75 |
IndomitableIbises/Bangazon-API-Sprint-1 | 14,431,090,140,231 | db2276820326e8d931373ce2988d3ee13b496880 | e39971c5ac21fbdb72685043b6097cc09df6ae12 | /bangazon/API/migrations/0001_initial.py | 41bb67023b9091182dcd1b58186dfd3e7226082e | [] | no_license | https://github.com/IndomitableIbises/Bangazon-API-Sprint-1 | cf834693f8d8dc79864f86841b18742821fa6f64 | 273b000263900da31fc7b600ca2d3e379cad6342 | refs/heads/master | 2020-03-23T22:24:55.966311 | 2018-07-31T20:24:15 | 2018-07-31T20:24:15 | 142,174,514 | 0 | 0 | null | false | 2018-07-31T20:59:51 | 2018-07-24T15:00:28 | 2018-07-31T20:24:17 | 2018-07-31T20:46:31 | 3,270 | 0 | 0 | 3 | Python | false | null |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Computer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purchase_date', models.DateTimeField(auto_now_add=True)),
('decom_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_date', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('last_login', models.DateTimeField(auto_now_add=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('budget', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
],
),
migrations.CreateModel(
name='Emp_Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('supervisor', models.BooleanField(default=False)),
('name', models.CharField(max_length=30)),
('computer_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Computer')),
('department_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Department')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('completed', models.BooleanField(default=False)),
('customer_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Customer')),
],
),
migrations.CreateModel(
name='Payment_Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('account_num', models.IntegerField(unique=True)),
('customer_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Customer')),
],
),
migrations.CreateModel(
name='Prod_Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Order')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('description', models.CharField(max_length=250)),
('quantity', models.IntegerField(default=1)),
('customer_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='API.Customer')),
],
),
migrations.CreateModel(
name='Product_Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Training_Prog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('max_attendees', models.PositiveIntegerField()),
],
),
migrations.AddField(
model_name='product',
name='type_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='API.Product_Type'),
),
migrations.AddField(
model_name='prod_order',
name='product_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Product'),
),
migrations.AddField(
model_name='order',
name='payment_id',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='API.Payment_Type'),
),
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(through='API.Prod_Order', to='API.Product'),
),
migrations.AddField(
model_name='emp_training',
name='employee_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Employee'),
),
migrations.AddField(
model_name='emp_training',
name='training_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Training_Prog'),
),
]
| UTF-8 | Python | false | false | 6,446 | py | 8 | 0001_initial.py | 6 | 0.561589 | 0.557245 | 0 | 138 | 45.695652 | 142 |
hirenbioinfo/Dev | 15,298,673,527,858 | 97b04b29065409fd478dc2c7ccb841fcea146f77 | f3d50139ceb1f64628fc8fc10d53da381c290c37 | /get-unmapped-from-mgrast/get-list-of-hits.py | f4f8e9e2fb79c885f7dd3e1e990c40ec080cd50c | [] | no_license | https://github.com/hirenbioinfo/Dev | d06d83d1adbfe9858dbdc5e636a8d7ea8181cc4a | 96307fd788a381e6f46584f85e22addca8b3b024 | refs/heads/master | 2020-12-29T02:07:27.686495 | 2014-10-30T17:55:18 | 2014-10-30T17:55:18 | 28,400,857 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
d = {}
for line in open(sys.argv[1]):
line = line.rstrip().split('\t')
orf = line[0]
d[orf] = ''
for key in d.keys():
print key
| UTF-8 | Python | false | false | 156 | py | 162 | get-list-of-hits.py | 156 | 0.538462 | 0.525641 | 0 | 9 | 16.333333 | 36 |
CHAKFI/Python | 7,559,142,469,172 | b4235384043b7355e48fe48fd4a9e15ea2802b85 | 701f162f186047c44e0964fbc4a8a66b3a2486d1 | /TP3/Exo3/ex3.3.py | be93d3ea09875736aae029e4d35d4b37aea7a999 | [] | no_license | https://github.com/CHAKFI/Python | 6447ee7253258afc3786a7c68db0c2b227c5ca45 | 6ace8ce61780d2300422393361bfd0d29522965c | refs/heads/master | 2020-04-03T18:11:08.491447 | 2018-12-31T12:39:24 | 2018-12-31T12:39:24 | 155,474,083 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | nbr = int(input('Veuillez entrer entrer : \n'))
if nbr < 10:
print('Prix : 5 DH')
elif nbr >= 10:
if nbr <= 20:
print('Prix : 4 DH')
elif nbr >= 20:
print('Prix : 3 DH') | UTF-8 | Python | false | false | 194 | py | 22 | ex3.3.py | 19 | 0.515464 | 0.458763 | 0 | 9 | 20.666667 | 47 |
alex-zaplik/crypto_labs | 6,201,932,819,059 | c4c28c387f2f9e4921776a8050fed25d320f50e3 | 8daa7c40acafd3210353c8cb223ad992193234d6 | /Lab_4/libs/utils.py | f923757fe3b5e880fc3c126301b3b0676528ab4d | [] | no_license | https://github.com/alex-zaplik/crypto_labs | f96f4ef5286dba4b49de36d6a6fa672313c75904 | 10cc99dfb62c4ec58bd39add03f77e52429a20cf | refs/heads/main | 2023-05-22T10:09:12.833286 | 2021-06-13T10:41:04 | 2021-06-13T10:41:04 | 353,636,284 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import functools
import random
def duplets(hex_input: str):
if len(hex_input) % 2 != 0:
hex_input = '0' + hex_input
iterable = iter(hex_input)
while True:
yield next(iterable) + next(iterable)
def hex_from_bytes(bytes_input: bytes):
return bytes_input.hex()
def byte_from_hex(hex_input: str):
return int(hex_input, base=16).to_bytes(length=1, byteorder='big')
def bytes_from_hex(hex_input: str):
return bytearray.fromhex(hex_input)
def random_bytes(bytes_size):
return bytes(random.getrandbits(8) for _ in range(bytes_size))
def increment_bytes(bytes_number):
length = len(bytes_number)
incremented_int = int.from_bytes(bytes_number, byteorder='big') + 1
return incremented_int.to_bytes(length, byteorder='big')
def xor_bytes(left, right):
return bytes(left_byte ^ right_byte for left_byte, right_byte in zip(left, right))
| UTF-8 | Python | false | false | 901 | py | 31 | utils.py | 13 | 0.680355 | 0.671476 | 0 | 36 | 24.027778 | 86 |
tunglx/group14_k57ca | 11,871,289,635,488 | 4a3f17d97821401d41cf3260152d7409cff9ce2f | 8d10d1fdfe4dafd95dcfdd413b0c86d9c72e9d67 | /periods/views.py | 6e105b2c0016ee354253e7b294ea9d871ffc8373 | [] | no_license | https://github.com/tunglx/group14_k57ca | 02b7aeba064aeb9391b33f31e5a5a038b00b0d9f | f384b3b51a561d7fd6752228baf485246f65c5a3 | refs/heads/master | 2021-01-18T20:48:20.361190 | 2014-03-17T01:32:47 | 2014-03-17T01:32:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
# Create your views here.
from models import Period
from forms import EditPeriodForm
def index(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/users/auth_login')
periods_array = Period.objects.all()
context = {'periods_array': periods_array}
return render(request, 'periods/index.html', context)
def detail(request, pid="0"):
if not request.user.is_authenticated():
return HttpResponseRedirect('/users/auth_login')
objs = []
periods = Period.objects.filter(id=pid)
if not periods.exists():
return HttpResponse("Period not found!")
for period in periods:
objs.append(period)
content = {
'period_id': pid,
'periods': objs,
}
return render(request, 'periods/detail/index.html', content)
def edit(request, pid="0"):
if not request.user.is_authenticated():
return HttpResponseRedirect('/users/auth_login')
if request.method == 'POST': # if the form has been submitted...
form = EditPeriodForm(request.POST) # A form bound to the POST data
if form.is_valid() and form.check_conflict(pid): # All validation rules pass
Period.objects.filter(id=pid).update(code=form.cleaned_data['code'],
name=form.cleaned_data['name'],
lecturer=form.cleaned_data['lecturer'],
position=(int(form.cleaned_data['day']) - 1) * 10 + int(form.cleaned_data['start']),
length=form.cleaned_data['length'])
return HttpResponseRedirect('/periods/id='+pid) # Redirect after POST
else:
return HttpResponse("Wrong Input! Try again, pls")
else:
form = EditPeriodForm()
return render(request, 'periods/edit/index.html', {
'form': form})
def add(request, pid="0"):
if not request.user.is_authenticated():
return HttpResponseRedirect('/users/auth_login')
if request.method == 'POST': # if the form has been submitted...
form = EditPeriodForm(request.POST) # A form bound to the POST data
if form.is_valid() and form.check_conflict(pid): # All validation rules pass
new_period = Period(
code=form.cleaned_data['code'],
name=form.cleaned_data['name'],
lecturer=form.cleaned_data['lecturer'],
position=(int(form.cleaned_data['day']) - 1) * 10 + int(form.cleaned_data['start']),
length=form.cleaned_data['length'],
timetable_id=pid)
new_period.save()
return HttpResponseRedirect('/') # Redirect after POST
else:
return HttpResponse("Wrong Input! Try again, pls")
else:
form = EditPeriodForm()
return render(
request,
'periods/edit/index.html', {
'form': form
}
) | UTF-8 | Python | false | false | 3,197 | py | 3 | views.py | 2 | 0.576791 | 0.573976 | 0 | 84 | 37.071429 | 133 |
xavoliva/CAT | 19,370,302,522,772 | 15303b40f73e2bd81dd1f9b2395d3f8beba7a240 | 5c6a8cd15955f7ca5f822b17b56c37c36ca4144d | /reference/UCL/dataloaders/femnist.py | 027494456488bad185b5ca0571c759bfb9214530 | [
"MIT"
] | permissive | https://github.com/xavoliva/CAT | 57e48eb958d10f17071797645f4836ed33ae74a7 | 5f32ada1eed4bf4de4488840bd3ae7163e9dd22b | refs/heads/main | 2023-01-22T16:06:40.200292 | 2020-12-08T17:38:30 | 2020-12-08T17:38:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os,sys
import numpy as np
import torch
from torchvision import datasets,transforms
import json
from torch.utils.data import Dataset
from sklearn.utils import shuffle
from PIL import Image
########################################################################################################################
def get(seed=0,fixed_order=False,pc_valid=0.10,tasknum = 10,args=0):
data={}
taskcla=[]
size=[1,28,28]
# MNIST
mean=(0.1307,)
std=(0.3081,)
dat={}
if 'small' in args.note:
data_type = 'small'
elif 'full' in args.note:
data_type = 'full'
train_dataset = FEMMNISTTrain(root_dir='/home/zixuan/KAN/image/dat/femnist/'+data_type+'/iid/train/',transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['train'] = train_dataset
test_dataset = FEMMNISTTest(root_dir='/home/zixuan/KAN/image/dat/femnist/'+data_type+'/iid/test/',transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test'] = test_dataset
# number of example
# x = FEMMNISTTrain(root_dir='/home/zixuan/KAN/image/dat/femnist/'+data_type+'/iid/train/',transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
# y=torch.utils.data.DataLoader(x,batch_size=1,shuffle=True)
# print(len([0 for user, image, target in y]))
#
# x = FEMMNISTTest(root_dir='/home/zixuan/KAN/image/dat/femnist/'+data_type+'/iid/test/',transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
# y=torch.utils.data.DataLoader(x,batch_size=1,shuffle=True)
# print(len([0 for user, image, target in y]))
#
# number of example
#
users = [x[0] for x in set([user for user,image,target in torch.utils.data.DataLoader(dat['train'],batch_size=1,shuffle=True)])]
users.sort()
print('users: ',users)
print('users length: ',len(users))
# # totally 47 classes, each tasks 5 classes
#
for task_id,user in enumerate(users):
data[task_id]={}
data[task_id]['name'] = 'femnist-'+str(user)
data[task_id]['ncla'] = 62
for s in ['train','test']:
loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=True)
for task_id,user in enumerate(users):
data[task_id][s]={'x': [],'y': []}
for user,image,target in loader:
label=target.numpy()[0]
data[users.index(user[0])][s]['x'].append(image)
data[users.index(user[0])][s]['y'].append(label)
# # "Unify" and save
for n,user in enumerate(users):
for s in ['train','test']:
data[n][s]['x']=torch.stack(data[n][s]['x']).view(-1,size[0],size[1],size[2])
data[n][s]['y']=torch.LongTensor(np.array(data[n][s]['y'],dtype=int)).view(-1)
# Validation
# for t in data.keys():
# data[t]['valid'] = {}
# data[t]['valid']['x'] = data[t]['train']['x'].clone()
# data[t]['valid']['y'] = data[t]['train']['y'].clone()
# Validation
for t in data.keys():
r=np.arange(data[t]['train']['x'].size(0))
r=np.array(shuffle(r,random_state=seed),dtype=int)
nvalid=int(pc_valid*len(r))
ivalid=torch.LongTensor(r[:nvalid])
itrain=torch.LongTensor(r[nvalid:])
data[t]['valid']={}
data[t]['valid']['x']=data[t]['train']['x'][ivalid].clone()
data[t]['valid']['y']=data[t]['train']['y'][ivalid].clone()
data[t]['train']['x']=data[t]['train']['x'][itrain].clone()
data[t]['train']['y']=data[t]['train']['y'][itrain].clone()
# # Others
n=0
for t in data.keys():
taskcla.append((t,data[t]['ncla']))
n+=data[t]['ncla']
data['ncla']=n
#
return data,taskcla,size
########################################################################################################################
# customize dataset class
class FEMMNISTTrain(Dataset):
"""Federated EMNIST dataset."""
def __init__(self, root_dir, transform=None):
self.transform = transform
self.size=[1,28,28]
self.x = []
self.y = []
self.user = []
for file in os.listdir(root_dir):
with open(root_dir+file) as json_file:
data = json.load(json_file) # read file and do whatever we need to do.
for key, value in data['user_data'].items():
for type, data in value.items():
if type == 'x':
self.x.append(torch.from_numpy(np.array(data)))
elif type == 'y':
self.y.append(data)
for _ in range(len(data)):
self.user.append(key)
#number of class
print(len(set([b for a in self.y for b in a])))
#number of class
self.x=torch.cat(self.x,0).view(-1,self.size[1],self.size[2])
self.y=torch.LongTensor(np.array([d for f in self.y for d in f],dtype=int)).view(-1).numpy()
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
user = self.user[idx]
x = self.x[idx]
y = self.y[idx]
x = x.data.numpy()
x = Image.fromarray(x)
# x = Image.fromarray((x * 255).astype(np.uint8))
if self.transform:
x = self.transform(x)
return user,x,y
class FEMMNISTTest(Dataset):
"""Federated EMNIST dataset."""
def __init__(self, root_dir, transform=None):
self.transform = transform
self.size=[1,28,28]
self.x = []
self.y = []
self.user = []
for file in os.listdir(root_dir):
with open(root_dir+file) as json_file:
data = json.load(json_file) # read file and do whatever we need to do.
for key, value in data['user_data'].items():
for type, data in value.items():
if type == 'x':
self.x.append(torch.from_numpy(np.array(data)))
elif type == 'y':
self.y.append(data)
for _ in range(len(data)):
self.user.append(key)
self.x=torch.cat(self.x,0).view(-1,self.size[1],self.size[2])
self.y=torch.LongTensor(np.array([d for f in self.y for d in f],dtype=int)).view(-1).numpy()
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
user = self.user[idx]
x = self.x[idx]
y = self.y[idx]
x = x.data.numpy()
x = Image.fromarray(x)
# x = Image.fromarray((x * 255).astype(np.uint8))
if self.transform:
x = self.transform(x)
return user,x,y
| UTF-8 | Python | false | false | 6,800 | py | 83 | femnist.py | 39 | 0.531618 | 0.521029 | 0 | 203 | 32.492611 | 190 |
rootid23/fft-py | 2,336,462,230,004 | 54feaa9d09ff553ff873071f7dc9dce0c53c34f2 | fad3016136563a930e5ec98fd69782a339c570ba | /tricks/remove-duplicates.py | acdbe0c74820af99076274f7fbae312f2d0419df | [] | no_license | https://github.com/rootid23/fft-py | cabfafbca4a3ff0d5df17586d950a4c35029e678 | 182c864ec8b9d62d40a7a91ccc323d37de1dc223 | refs/heads/master | 2022-12-10T02:15:25.583804 | 2019-05-30T10:52:26 | 2019-05-30T10:52:26 | 124,316,458 | 0 | 0 | null | false | 2022-12-07T23:48:22 | 2018-03-08T01:08:20 | 2019-05-30T10:53:00 | 2022-12-07T23:48:22 | 1,670 | 0 | 0 | 5 | Python | false | false |
# For example, given nums = [0, 1, 1, 0, 3, 3, 0, 3, 4, 5, 0, 5, 5, 12], after calling your
#function, nums should be [1, 3, 4, 5, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0].
#Move idx in forward direction
def removeDuplicates(nums) :
if(not nums) : return nums
idx = 0
prev = 0
n = len(nums)
for i in range(n) :
if(nums[i] != 0 and prev != nums[i]) :
nums[idx] = nums[i]
prev = nums[idx]
idx += 1
else :
nums[i] = 0
for i in range(idx, n) :
nums[i] = 0
return nums
print ( removeDuplicates ( [ 0, 1, 1, 0, 3, 3, 0, 3, 4, 5, 0, 5, 5, 12] ) )
| UTF-8 | Python | false | false | 587 | py | 239 | remove-duplicates.py | 199 | 0.519591 | 0.432709 | 0 | 22 | 25.590909 | 91 |
minikiller/flask-feme | 6,296,422,106,055 | 982a619bd0cd11e71808e0752aeb06a9b131ce69 | 9605f02e509ae70754c35b0fb4eda2d224ae6a64 | /app/resources/__init__.py | 18e973882067bc1f0a006adc81f7145c912d6535 | [] | no_license | https://github.com/minikiller/flask-feme | e8bfbb759dffa6ce4b61466c951d645b71161120 | 3727987b639cc17c043e97a25ac0bc95d1cb8aa1 | refs/heads/main | 2023-03-31T18:10:58.166097 | 2021-04-07T08:15:19 | 2021-04-07T08:15:19 | 340,590,583 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .example import ns as example_ns
from ..utils import PatchedApi
from flask_restful import Api
from flask import Blueprint
from .trade_views import TradeListApi, TradeApi
from .setting_views import SettingListApi, SettingApi
from .user import UserList, UserUpdate
from .role import RoleList, RoleUpdate
from .service import ListService, MDStartService, MDStopService, MEStartService, MEStopService
# api = PatchedApi()
# api.add_namespace(example_ns)
def initialize_trades(api):
api.add_resource(TradeListApi, '/trades')
api.add_resource(TradeApi, '/trade/<trade_id>')
def initialize_users(api):
# Users
api.add_resource(UserList, '/users')
api.add_resource(UserUpdate, '/user/<int:id>')
def initialize_roles(api):
# Roles
api.add_resource(RoleList, '/roles')
api.add_resource(RoleUpdate, '/role/<int:id>')
def initialize_settings(api):
# Roles
api.add_resource(SettingListApi, '/settings')
api.add_resource(SettingApi, '/setting/<int:setting_id>')
def initialize_service(api):
# service
api.add_resource(ListService, '/ListService')
api.add_resource(MDStartService, '/MDStartService')
api.add_resource(MDStopService, '/MDStopService')
api.add_resource(MEStartService, '/MEStartService')
api.add_resource(MEStopService, '/MEStopService')
def initialize_api(api):
initialize_users(api)
initialize_roles(api)
initialize_trades(api)
initialize_settings(api)
initialize_service(api)
| UTF-8 | Python | false | false | 1,482 | py | 29 | __init__.py | 17 | 0.732794 | 0.732794 | 0 | 50 | 28.64 | 94 |
glosoftgroup/school-erp | 3,607,772,549,776 | 7b94807240ddac667f18aa0d88e568be961a326d | 28f24439203f2996710fab379fae5fb5b354a023 | /app_dir/modules/workload/class_allocation/models.py | d43f343b994b48fa5e400d58edf6a53845d8486f | [] | no_license | https://github.com/glosoftgroup/school-erp | 18cc376f1416d368f83573cecb3fbc16184e5532 | a51bddab6fd5f804ded42d0db7539acf8fad9216 | refs/heads/master | 2021-03-30T15:33:15.233425 | 2018-08-14T15:35:30 | 2018-08-14T15:35:30 | 120,430,345 | 0 | 1 | null | false | 2018-08-14T15:35:31 | 2018-02-06T09:17:35 | 2018-08-14T10:27:50 | 2018-08-14T15:35:31 | 6,243 | 0 | 0 | 0 | JavaScript | false | null | from __future__ import unicode_literals
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.timezone import now
from django.contrib.auth import get_user_model
from app_dir.modules.academics.academic_year.models import AcademicYear
from app_dir.modules.academics.classes.models import Class
from app_dir.modules.academics.subject.models import Subject
from app_dir.modules.term.models import Term
User = get_user_model()
class ClassAllocation(models.Model):
teacher = models.ForeignKey(User)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
classTaught = models.ForeignKey(Class, on_delete=models.CASCADE)
term = models.ForeignKey(Term, on_delete=models.CASCADE)
academicYear = models.ForeignKey(AcademicYear, on_delete=models.CASCADE)
hours = models.CharField(
pgettext_lazy('ClassAllocation field', 'hours_per_week'), max_length=128)
updated_at = models.DateTimeField(
pgettext_lazy('Term field', 'updated at'), auto_now=True, null=True)
created = models.DateTimeField(pgettext_lazy('Term field', 'created'),
default=now, editable=False)
class Meta:
app_label = 'class_allocation'
verbose_name = pgettext_lazy('ClassAllocation model', 'term')
verbose_name_plural = pgettext_lazy('ClassAllocation model', 'term')
def __str__(self):
return str(self.teacher.username)
| UTF-8 | Python | false | false | 1,461 | py | 315 | models.py | 237 | 0.722108 | 0.720055 | 0 | 36 | 39.472222 | 81 |
silky/bell-ppls | 4,166,118,301,665 | 7b79373bbf68761aa74986f970c42d51405f7464 | 8edd63a42469bf09fcad1c1070995ceda6e49646 | /env/lib/python2.7/site-packages/observations/r/attitude.py | 22900084cbcc314043baeb3e73dcab7cc8dcae9c | [] | no_license | https://github.com/silky/bell-ppls | fa0b5418f40dab59de48b7220ff30caba5945b56 | 369e7602c810b694a70ac1e875017480c8910ac8 | refs/heads/master | 2020-04-06T08:40:28.588492 | 2018-11-01T06:51:33 | 2018-11-01T06:51:33 | 157,312,221 | 1 | 0 | null | true | 2018-11-13T03:04:18 | 2018-11-13T03:04:18 | 2018-11-13T02:32:47 | 2018-11-01T06:55:56 | 124,660 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def attitude(path):
"""The Chatterjee–Price Attitude Data
From a survey of the clerical employees of a large financial
organization, the data are aggregated from the questionnaires of the
approximately 35 employees for each of 30 (randomly selected)
departments. The numbers give the percent proportion of favourable
responses to seven questions in each department.
A data frame with 30 observations on 7 variables. The first column are
the short names from the reference, the second one the variable names in
the data frame:
+--------+--------------+-----------+-------------------------------------+
| Y | rating | numeric | Overall rating |
+--------+--------------+-----------+-------------------------------------+
| X[1] | complaints | numeric | Handling of employee complaints |
+--------+--------------+-----------+-------------------------------------+
| X[2] | privileges | numeric | Does not allow special privileges |
+--------+--------------+-----------+-------------------------------------+
| X[3] | learning | numeric | Opportunity to learn |
+--------+--------------+-----------+-------------------------------------+
| X[4] | raises | numeric | Raises based on performance |
+--------+--------------+-----------+-------------------------------------+
| X[5] | critical | numeric | Too critical |
+--------+--------------+-----------+-------------------------------------+
| X[6] | advancel | numeric | Advancement |
+--------+--------------+-----------+-------------------------------------+
Chatterjee, S. and Price, B. (1977) *Regression Analysis by Example*.
New York: Wiley. (Section 3.7, p.68ff of 2nd ed.(1991).)
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `attitude.csv`.
Returns:
Tuple of np.ndarray `x_train` with 30 rows and 7 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'attitude.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/datasets/attitude.csv'
maybe_download_and_extract(path, url,
save_file_name='attitude.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| UTF-8 | Python | true | false | 2,958 | py | 830 | attitude.py | 823 | 0.505074 | 0.494587 | 0 | 71 | 40.633803 | 77 |
Mechachleopteryx/polya | 2,353,642,101,519 | ad50c74f884e8ed6298cec02a03ced3d7c404017 | aac95cbe821ce402ee15b7874a7462f295a8086f | /polya/main/main.py | fe06d25f869c9756f46e42e64b192a31fb397884 | [
"Apache-2.0"
] | permissive | https://github.com/Mechachleopteryx/polya | c674ec35732bc50e482fdac80cde012190d32727 | 6d611bf47185249a96f4cf7ee9b3884bc70a15ac | refs/heads/master | 2021-09-22T01:25:31.806248 | 2015-10-01T19:49:26 | 2015-10-01T19:49:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ####################################################################################################
#
# main.py
#
# Authors:
# Jeremy Avigad
# Rob Lewis
# Cody Roux
#
# Contains the main module for running the Polya inequality prover, with some prepackaged
# solving methods.
#
####################################################################################################
from __future__ import division
#import polya.main.terms as terms
import polya.main.messages as messages
import polya.modules.polyhedron.lrs as lrs
# import polya.modules.polyhedron.poly_add_module as poly_add_module
# import polya.modules.polyhedron.poly_mult_module as poly_mult_module
# import polya.modules.fourier_motzkin.fm_add_module as fm_add_module
# import polya.modules.fourier_motzkin.fm_mult_module as fm_mult_module
import polya.interface.solve_util as solve_util
import polya.interface.example as example
from terms import Var, Vars, UVar, Func, Contradiction, exp, log, minm, maxm, floor, ceil, root
from formulas import Forall, Implies, And, Or, Not
from polya.modules.polyhedron.poly_mult_module import PolyMultiplicationModule
from polya.modules.polyhedron.poly_add_module import PolyAdditionModule
from polya.modules.fourier_motzkin.fm_add_module import FMAdditionModule
from polya.modules.fourier_motzkin.fm_mult_module import FMMultiplicationModule
from polya.modules.congruence_closure_module import CongClosureModule
from polya.modules.axiom_module import AxiomModule
from polya.modules.exponential_module import ExponentialModule
from polya.modules.absolute_value_module import AbsModule
from polya.modules.minimum_module import MinimumModule
from polya.modules.nth_root_module import NthRootModule
from polya.modules.builtins_module import BuiltinsModule
from polya.main.blackboard import Blackboard
from polya.interface.example import run_examples
from polya.main.messages import set_verbosity, quiet, modules, low, normal, debug
####################################################################################################
#
# System configuration
#
####################################################################################################
solver_options = ['fm', 'poly']
default_solver = 'none'
default_split_depth = 0
default_split_breadth = 0
try:
import cdd
have_cdd = True
except Exception:
have_cdd = False
if lrs.lrs_path and lrs.redund_path and have_cdd:
default_solver = 'poly'
else:
default_solver = 'fm'
def show_configuration():
"""
Prints information about the present components at verbosity level INFO.
"""
messages.announce('', messages.INFO)
messages.announce('Welcome to the Polya inequality prover.', messages.INFO)
messages.announce('Looking for components...', messages.INFO)
if lrs.lrs_path is None:
messages.announce('lrs not found.', messages.INFO)
else:
messages.announce('lrs found (path: {0!s}).'.format(lrs.lrs_path), messages.INFO)
if lrs.redund_path is None:
messages.announce('redund not found.', messages.INFO)
else:
messages.announce('redund found (path: {0!s}).'.format(lrs.redund_path), messages.INFO)
if have_cdd:
messages.announce('cdd found.', messages.INFO)
else:
messages.announce('cdd not found.', messages.INFO)
messages.announce('', messages.INFO)
def set_solver_type(s):
"""
Sets the solver to a given method, s, in solver_options.
"""
if s in solver_options:
messages.announce('Setting solver type: {0!s}'.format(s), messages.INFO)
global default_solver
default_solver = s
else:
messages.announce('Error:{0!s} is not in the list of possible arithmetic solvers'.format(s),
messages.INFO)
messages.announce('solver options = {0!s}'.format(solver_options), messages.INFO)
def set_split_defaults(split_depth, split_breadth):
"""
Sets the default split depth and breadth.
"""
global default_split_depth, default_split_breadth
default_split_depth, default_split_breadth = split_depth, split_breadth
####################################################################################################
#
# Prepackaged solving methods
#
####################################################################################################
def solve(*assertions):
"""
Runs the default modules on the given assertions, using default solver and split settings.
Arguments:
-- assertions: a list of TermComparisons, ie 5*x < 3*y
Returns True if the assertions are contradictory, False otherwise.
"""
return solve_util.solve(default_split_depth, default_split_breadth, default_solver, *assertions)
def run(B):
"""
Runs the default modules on the given Blackboard object, using default solver and split
settings.
"""
return solve_util.run(B, default_split_depth, default_split_breadth, default_solver)
def Solver(assertions=list(), terms=list(), axioms=list(), modules=list(),
split_depth=default_split_depth, split_breadth=default_split_breadth,
solver_type=default_solver):
"""
Instantiates a Solver object.
Arguments:
-- assertions: a list of TermComparisons to assert to the new Solver. Defaults to empty.
-- axioms: a list of Axioms to assert to the Solver's axiom module. Defaults to empty.
-- modules: a list of modules for the solver to use. Defaults to all available modules.
-- split_depth: How many successive (cumulative) case splits to try.
-- split_breadth: How many split options to consider.
-- solver_type: 'fm' or 'poly' arithmetic.
"""
return solve_util.Solver(split_depth, split_breadth, assertions, terms, axioms, modules,
solver_type)
def Example(hyps=None, terms=None, conc=None, axioms=None, modules=None, omit=False, comment=None,
split_depth=default_split_depth, split_breadth=default_split_breadth):
"""
Instantiates an Example object. Used to create lists of test problems.
Arguments:
-- hyps: a list of TermComparisons, the hypotheses
-- conclusion: a TermComparison, to try to derive. Defaults to False, ie, show hyps
is contradictory.
-- axioms: a list of extra axioms to use.
-- modules: a list of modules to use. Defaults to all available modules.
-- omit: the example will not run if omit=True. Defaults to False.
-- comment: prints comment when the example is run.
-- split_depth, split_depth: as in Solver.
"""
return example.Example(hyps, terms, conc, axioms, modules, omit, comment,
split_depth, split_breadth, default_solver) | UTF-8 | Python | false | false | 6,721 | py | 40 | main.py | 28 | 0.649011 | 0.647671 | 0 | 171 | 38.309942 | 100 |
scott198510/python_map_construction | 12,146,167,542,732 | a0e305ed9b4098c3cced12cfb7da267541d9fbb5 | eacab90fc5d2d9e6255445fca369df6c2fe4137b | /src/importer/sample_hog_feature.py | 49311dd6428b521d16e447f6f06292a7adff68bc | [] | no_license | https://github.com/scott198510/python_map_construction | 9a94a3670f203f138a63d6b3fb6f4315d6320849 | ff153c6a59467c8de35d3a7d647ce7f415c54c19 | refs/heads/master | 2021-05-31T21:31:00.113005 | 2014-05-15T22:30:05 | 2014-05-15T22:30:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Created on Wed Oct 09 16:56:33 2013
@author: ChenChen
"""
import sys
import cPickle
import math
import random
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as CM
from matplotlib.collections import LineCollection
from scipy import spatial
from scipy import signal
from skimage.transform import hough_line,hough_line_peaks, probabilistic_hough_line
from skimage.filter import canny
from skimage.morphology import skeletonize
from sklearn.cluster import DBSCAN, MeanShift, estimate_bandwidth
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import gps_track
from point_cloud import PointCloud
import l1_skeleton_extraction
import const
def main():
compute_canonical_dir = False
GRID_SIZE = 2.5 # in meters
# Target location and radius
# test_point_cloud.dat
LOC = (447772, 4424300)
R = 500
# test_point_cloud1.dat
#LOC = (446458, 4422150)
#R = 500
if len(sys.argv) != 3:
print "ERROR! Correct usage is:"
print "\tpython sample_hog_feature.py [sample_point_cloud.dat] [sample_direction.dat]"
return
with open(sys.argv[1], 'rb') as fin:
sample_point_cloud = cPickle.load(fin)
with open(sys.argv[2], 'rb') as fin:
sample_directions = cPickle.load(fin)
sample_point_kdtree = spatial.cKDTree(sample_point_cloud.locations)
mean_pt = np.mean(sample_point_cloud.locations, axis=0)
K = 10
n_bin = 16
delta_angle = 360.0 / n_bin
alpha = 0.0
sample_features = []
for sample_idx in range(0, sample_point_cloud.locations.shape[0]):
dist, nb_idxs = sample_point_kdtree.query(sample_point_cloud.locations[sample_idx], K)
loc_feature = sample_point_cloud.locations[sample_idx] - mean_pt
loc_feature = loc_feature / 1000 * alpha
hog_feature = np.array([0.0]*n_bin)
for pt_idx in nb_idxs:
for direction in sample_directions[pt_idx]:
angle = np.arccos(np.dot(direction, np.array([1.0, 0.0])))
angle = np.rad2deg(angle)
if direction[1] < 0:
angle = 360 - angle
angle_bin = int(angle / delta_angle)
hog_feature[angle_bin] += 1
sum_feature = sum(hog_feature)
if sum_feature > 1e-3:
hog_feature /= sum(hog_feature)
sample_feature = np.concatenate((loc_feature, hog_feature), axis=0)
sample_features.append(sample_feature)
sample_features = np.array(sample_features)
print "There are %d samples."%len(sample_features)
# DBSCAN clustering
db = DBSCAN(eps=0.05, min_samples=5).fit(sample_features)
core_samples = db.core_sample_indices_
labels = db.labels_
# number of clusters, ignoring noise if present
unique_labels = set(labels)
## MeanShift clustering
#bandwidth = estimate_bandwidth(sample_features, quantile=0.2, n_samples=500)
#ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
#ms.fit(sample_features)
#labels = ms.labels_
#unique_labels = set(labels)
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print "There are %d clusters"%n_clusters_
horizontal_road = np.array([0.0]*(n_bin+2))
horizontal_road[0] = 0.25
horizontal_road[1] = 0.25
horizontal_road[-1] = 0.25
horizontal_road[-2] = 0.25
#distances = np.array([0.0]*sample_point_cloud.locations.shape[0])
#for sample_idx in range(0, sample_point_cloud.locations.shape[0]):
# vec = sample_features[sample_idx] - horizontal_road
# distances[sample_idx] = np.linalg.norm(vec)
#print "min distance: ",min(distances)
fig = plt.figure(figsize=const.figsize)
ax = fig.add_subplot(111, aspect='equal')
for k in unique_labels:
markersize = 12
if k == -1:
col = 'k'
markersize = 6
color = const.colors[np.random.randint(7)]
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_sample_locations = sample_point_cloud.locations[class_members]
ax.plot(cluster_sample_locations[:,0],
cluster_sample_locations[:,1],
'.', color=color)
#ax.plot(sample_point_cloud.locations[:,0],
# sample_point_cloud.locations[:,1],
# '.', color='gray')
#error = 0.8
#ax.plot(sample_point_cloud.locations[distances<error,0],
# sample_point_cloud.locations[distances<error,1],
# 'r.')
#ax.plot(sample_point_cloud.locations[sample_idx,0],
# sample_point_cloud.locations[sample_idx,1],
# 'or')
ax.set_xlim([LOC[0]-R, LOC[0]+R])
ax.set_ylim([LOC[1]-R, LOC[1]+R])
#ax = fig.add_subplot(122)
#ax.plot(np.arange(n_bin), hog_feature, '.-')
#ax.set_xlim([0,n_bin+1])
#ax.set_ylim([-0.1,1.1])
plt.show()
return
if __name__ == "__main__":
sys.exit(main())
| UTF-8 | Python | false | false | 5,237 | py | 72 | sample_hog_feature.py | 71 | 0.609891 | 0.580867 | 0 | 157 | 31.356688 | 94 |
ikibardin/DepositBot | 9,363,028,713,576 | f6238db167b91da425c334c72e772fe255383d16 | 73929c17b65a26d67df83f4eccb7e5c55af7e0b6 | /database/converter.py | cb2abe0c54c081c3cae3af832177501d9ac99afe | [] | no_license | https://github.com/ikibardin/DepositBot | 8c50a0ef82fcfa3d6b39d3b985e5ddab3b3bbdff | 993f36eeb6564ec3e1718cd61ec74d4ca1d6ecde | refs/heads/master | 2021-01-20T13:58:09.739012 | 2017-07-20T19:23:47 | 2017-07-20T19:23:47 | 90,543,579 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
from telebot import types
import windows
import events
from bank import Bank
from constants import errorquotes
class Converter:
def __init__(self, database):
self._db = database
@staticmethod
def user_to_row(user):
if not isinstance(user, types.User):
raise TypeError(errorquotes.TYPE_ERROR.format(types.User,
type(user)))
return [user.id, user.first_name, user.last_name, user.username]
@staticmethod
def row_to_user(row):
if not isinstance(row, sqlite3.Row):
raise TypeError(errorquotes.ROW_EXPECTED.format(type(row)))
return types.User(id=row['id'],
first_name=row['first_name'],
last_name=row['last_name'],
username=row['username'])
@staticmethod
def chat_to_row(chat):
if not isinstance(chat, types.Chat):
raise TypeError(errorquotes.TYPE_ERROR.format(types.Chat,
type(chat)))
return [chat.id, chat.type, chat.title]
@staticmethod
def row_to_chat(row):
if not isinstance(row, sqlite3.Row):
raise TypeError(errorquotes.ROW_EXPECTED.format(type(row)))
return types.Chat(id=row['id'],
type=row['type'],
title=row['title'])
def event_to_row(self, event):
if not isinstance(event, events.Event):
raise TypeError(
errorquotes.TYPE_ERROR.format(events.Event, type(event)))
result = [event.datetime,
self._db._get_bank_id(event.chat_id),
event.who.id]
if event.type is events.EventType.CHANGE:
result.append('CHANGE')
elif event.type is events.EventType.SET:
result.append('SET')
else:
raise TypeError(errorquotes.UNKNOWN_EVENT.format(type(event)))
result.extend([event.number, event.description])
return result
@staticmethod
def row_to_event(row):
assert isinstance(row, sqlite3.Row)
user = types.User(
id=row['user_id'],
first_name=row['u_first_name'],
last_name=row['u_last_name'],
username=row['u_username']
)
if row['type'] == 'CHANGE':
event_type = events.EventType.CHANGE
elif row['type'] == 'SET':
event_type = events.EventType.SET
else:
raise TypeError('Unknown event type.')
result = events.Event(
chat_id=row['chat_id'],
user=user,
event_type=event_type,
number=row['what'],
description=row['descr'],
datetime_=row['datetime'],
is_deleted=row['is_deleted'],
id_=row['event_id']
)
return result
@staticmethod
def window_to_row(window):
""" Returns list of arguments of the window to be stored
in the database. """
if not isinstance(window, windows.DepositBotWindow):
raise TypeError(
errorquotes.TYPE_ERROR.format(windows.DepositBotWindow,
type(window))
)
result = [
window.user.id,
window.chat.id,
window.message_id
]
return result
def row_to_window(self, row):
""" row should be tuple-like object:
(user_id, first_name, last_name, username,
chat_id, chat_type, chat_title,
window_message_id, window_type, window_mode, window_number)"""
if not isinstance(row, sqlite3.Row):
raise TypeError(errorquotes.ROW_EXPECTED.format(type(row)))
user = types.User(id=row['user_id'],
first_name=row['first_name'],
last_name=row['last_name'],
username=row['username'])
chat = types.Chat(id=row['chat_id'],
type=row['c_type'],
title=row['c_title'])
return windows.DefaultWindow(bot=self._db._bot,
user=user,
chat=chat,
message_id=row['w_message_id'])
def row_to_bank(self, row):
if not isinstance(row, sqlite3.Row):
raise TypeError(errorquotes.ROW_EXPECTED.format(type(row)))
chat = types.Chat(id=row['c_id'],
type=row['c_type'],
title=row['c_title'])
owner = types.User(id=row['u_id'],
first_name=row['u_first_name'],
last_name=row['u_last_name'],
username=row['u_username'])
return Bank(chat, owner, self._db)
| UTF-8 | Python | false | false | 4,936 | py | 33 | converter.py | 30 | 0.513371 | 0.512156 | 0 | 135 | 35.562963 | 74 |
hwalterm/DjangoProject | 13,082,470,384,280 | 02c2065b0b56e4a4ef17350acc79459de1485d9f | 6ddadcc1ffef686cc11d5f1b65656358836caf4f | /Django_Forus/blog/urls.py | 6f949396a0b1d842cfecc8386cf2becaeeae9fb2 | [] | no_license | https://github.com/hwalterm/DjangoProject | 501865c887f39894d9b144ffd070548e180dba51 | fe0ae574c7c022dd483d6c978f10af44313d0c4d | refs/heads/master | 2020-09-17T08:48:42.365297 | 2019-11-26T01:40:56 | 2019-11-26T01:40:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path, include
from django.conf.urls import url
from .views import (
PostListView, PostDetailView, PostCreateView,PostDeleteView, PostUpdateView, home)
from . import views
urlpatterns = [
path('', home, name ='blog-home'),
path('post/<int:id>/', PostDetailView, name ='post-detail'),
path('about/', views.about, name = 'blog-about'),
path('post/new/',PostCreateView.as_view(success_url="/blog/"), name = 'post-create'),
path('post/<int:pk>/delete', PostDeleteView.as_view(), name = 'post-delete'),
path('post/<int:pk>/delete', PostUpdateView.as_view(), name = 'post-update'),
path('post/reply/<int:uid>', views.replyPost, name='replyPost'),
url(r'^reply_post/([\w\-]{36})?/?$', views.replyPost, name='replyPost'),
] | UTF-8 | Python | false | false | 776 | py | 24 | urls.py | 17 | 0.667526 | 0.664948 | 0 | 17 | 44.705882 | 89 |
vlsd/nlsymb | 6,305,012,034,941 | 9a90e5f9978c6b5ab851c58d63d1693d89a9522b | ccbc35fd0307325fe7f87a898b7c750807958fb8 | /nlsymb/__init__.py | a831dd00cafc0a21fd7b0383859c976e0243fe12 | [
"MIT"
] | permissive | https://github.com/vlsd/nlsymb | 7ba5744589da096b3b4ad868e48f320c987fe9b2 | 525414cdd23b2efe97ef0436794f1fe2792c8a7d | refs/heads/master | 2021-01-01T18:54:19.966141 | 2014-10-28T03:32:30 | 2014-10-28T03:32:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sympy as sym
from functools import reduce
import time
import scipy
from scipy.integrate import ode
import scipy.interpolate
from copy import deepcopy
from timeout import TimeoutError
from termcolor import colored
# from matutils import matmult
def matmult(*x):
"""
Shortcut for standard matrix multiplication.
matmult(A,B,C) returns A*B*C.
"""
return reduce(np.dot, x)
class Trajectory():
# a class to reyysresent a trajectory, takes lists of points and
# returns interpolation objects (callables)
def __init__(self, *args):
# takes as arguments the names of the fields it stores
for name in args:
setattr(self, '_' + name, [])
self._t = []
self.tmax = None
self.tmin = None
self.feasible = False
# def __call__(self, t):
# evaluates at t if there is only one series stored
# TODO make sure this works; not really necessary now
# num = 0
# for k in self.__dict__.keys():
# if k[0] is not '_':
# num += 1
# key = k
# if num is 1:
# func = getattr(self, key)
# return func(t)
def addpoint(self, t, **kwargs):
# keyword arguments in the foysm x=val
if self._t is []:
self.tmax = t
self.tmin = t
else:
if t > self.tmax:
self.tmax = t
if t < self.tmin:
self.tmin = t
self._t.append(t)
for name, val in kwargs.iteritems():
current = getattr(self, '_' + name)
setattr(self, '_' + name, current + [val])
def reset(self):
# used for resetting all the args to []
# does not delete interpolation objects already created
for k in self.__dict__.keys():
setattr(self, '_' + name, [])
if k[0] is '_':
setattr(self, k, [])
self._t = []
def interpolate(self):
for k in self.__dict__.keys():
if k[0] is '_' and k[1:] is not 't':
ifunc = interxpolate(self._t, getattr(self, k),
axis=0, kind='slinear')
setattr(self, k[1:], ifunc)
def __add__(self, other):
if len(other._t) > len(self._t):
return other + self
names = (set(self.__dict__) & set(other.__dict__)) - {'t', '_t'}
names = {k[1:] for k in names if k[0] is '_'}
tj = Trajectory(*names)
other.interpolate()
for t in self._t:
tj.addpoint(t, **{n: (getattr(self, n)(t) + getattr(other, n)(t))
for n in names})
tj.interpolate()
tj.feasible = False
# use the most restrictive time limits
tj.tlims = (max(self.tlims[0], other.tlims[0]),
min(self.tlims[1], other.tlims[1]))
return tj
def __neg__(self):
return -1.0*self
def __rmul__(self, scalar):
# multiplies everything by the scalar
names = {k[1:] for k in self.__dict__.keys()
if k[0] is '_' and k[1:] is not 't'}
out = Trajectory(*names)
for t in self._t:
out.addpoint(t, **{n: (scalar * getattr(self, n)(t))
for n in names})
out.interpolate()
out.feasible = False
out.tlims = self.tlims
return out
""" old version of add, see above for new version
def __add__(self, other):
out = deepcopy(self)
for (t, x, u) in zip(out._t, out._x, out._u):
x += direction.z(t)
u += direction.v(t)
out.interpolate()
out.feasible = False
return out
"""
def xtoq(self, s):
self._q = map(s.xtopq, self._x)
self.interpolate()
def xtonq(self, s):
self._q = map(s.xtoq, self._x)
self.interpolate()
def __getstate__(self):
temp = self.__dict__.copy()
for k in temp.keys():
if k[0] is '_':
pass
elif k not in ['tmin', 'tmax', 'feasible']:
del temp[k]
return temp
class LineSearch():
def __init__(self, func, grad, alpha=1, beta=1e-8):
# func takes a point
# grad takes a point and a direction
self.func = func
self.grad = grad
self.alpha = alpha
self.beta = beta
def search(self):
x = self.x
p = self.p
# grad = self.grad(x, p)
grad = 1
func = self.func(x)
gamma = self.alpha
while True:
try:
if self.func(x + gamma * p) > \
func + self.beta * gamma * grad:
gamma = gamma / 2
print("decreasing gamma to %e" % gamma)
# this will not work with the -O flag
assert gamma > 1e-15, gamma
else:
break
except TimeoutError:
gamma = gamma / 10
print("Timed out, decreasing gamma to %e" % gamma)
except OverflowError:
gamma = gamma / 10
print("Error in VODE, decreasing gamma to %e" % gamma)
self.gamma = gamma
class Timer():
def __init__(self, fmts=""):
self.fmts = fmts + " took %fs to run"
def __enter__(self):
self.start = time.time()
def __exit__(self, *args):
delta = time.time() - self.start
print(self.fmts % delta)
def sysIntegrate(func, init, control=None, phi=None, debug=False,
tlims=(0, 10), jac=None, method='bdf', **kw):
"""
func(t, x, u): returns xdot
init: the value of x at tlims[0]
'tlims': (ta, tb), ta < tb
'control': a Controller() instance
'jac': jac(t, x, u) the jacobian of func. used only if provided,
not used if 'control' is provided
'method': see the 'method' argument for the 'vode' integrator
'debug': if True, prints debug statements
'phi': phi(x) that returns the distance to the switching plane
'jumps': [(tj,fj), ...] list of times and jump matrices
fj is a matrix that multiplies x at the jump time
'delfunc': delf(t, x, u) a callable that returns a jump matrix
"""
ti, tf = tlims
t, x = ([ti], [init])
solver = ode(func, jac)
solver.set_integrator('vode',
max_step=1e-2,
method=method)
solver.set_initial_value(init, ti)
if control is not None:
solver.set_f_params(control)
dim = len(init)
jumps_out = []
jumps_in = kw['jumps'] if 'jumps' in kw else []
while solver.successful() and solver.t <= tf:
solver.integrate(tf, relax=True, step=True)
xx = solver.y
if jumps_in:
for (tj, fj) in jumps_in:
if t[-1] < tj and tj < solver.t:
xx = matmult(fj,xx)
solver.set_initial_value(xx, solver.t)
x.append(xx)
t.append(solver.t)
if phi:
dp, dn = map(phi, x[-2:]) # distance prev, distance next
if dp * dn < 0: # if a crossing occured
# use interpolation (linear) to find the time
# and config at the jump
# TODO do a line search instead: scipy.optimize.brentq()
alpha = dp / (dn - dp)
tcross = t[-2] - alpha * (t[-1] - t[-2])
xcross = x[-2] - alpha * (x[-1] - x[-2])
# replace the wrong values
t[-1], x[-1] = (tcross, xcross)
# obtain jump term
if 'delfunc' in kw:
delf = kw['delfunc']
jmatrix = delf(tcross, xcross)
jumps_out.append((tcross, jmatrix))
# reset integration
solver.set_initial_value(xcross, tcross)
if debug:
print("found intersection at t=%f" % tcross)
# make the last point be exactly at tf
# xf = x[-2] + (tf - t[-2])*(x[-1] - x[-2])/(t[-1] - t[-2])
# x[-1] = xf
# t[-1] = tf
return (t[:-1], x[:-1], jumps_out)
# a wrapper around interp1d that also extrapolates
class interxpolate(scipy.interpolate.interp1d):
def __call__(self, x):
try:
return super(interxpolate, self).__call__(x)
except ValueError as e:
# TODO make sure this is only triggered for the
# proper exception. Maybe use error numbers?
xs, ys = (self.x, self.y)
if x < xs[0] - 2e-2 or x > xs[-1] + 2e-2:
print "ERROR: Interpolation called out of bounds at time %f" % x
raise
# if it is within tolerance simply extrapolate
if x < xs[0]:
return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
raise
| UTF-8 | Python | false | false | 9,134 | py | 17 | __init__.py | 14 | 0.491898 | 0.482921 | 0 | 294 | 30.068027 | 80 |
CodyRichter/cr_dynamic | 2,052,994,382,128 | c3732e933e222a8467b8bece4b3ecdbb51245818 | bdc92a08f3ca3c74f3b49a2b0f4ea998e2cb9116 | /reports/admin.py | efe878f36608b1a302614198f038402acc7c4d39 | [] | no_license | https://github.com/CodyRichter/cr_dynamic | fcb7f08b48b680b0645ca6824be6fa0041971bff | f326e55fad149381f0bbc497c4e9a8b620d8f8b4 | refs/heads/master | 2022-02-28T22:46:59.837432 | 2020-06-06T11:34:54 | 2020-06-06T11:34:54 | 202,457,212 | 0 | 0 | null | false | 2022-02-10T08:56:29 | 2019-08-15T02:10:07 | 2020-06-06T11:34:58 | 2022-02-10T08:56:27 | 1,499 | 0 | 0 | 8 | JavaScript | false | false | from django.contrib import admin
from .models import Post, Interaction
admin.site.register(Post)
admin.site.register(Interaction)
| UTF-8 | Python | false | false | 133 | py | 42 | admin.py | 22 | 0.81203 | 0.81203 | 0 | 7 | 18 | 37 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.