repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
RL89pl/usb_test | 19,602,230,750,324 | 6780dcd89b277a5ed726eb63bfd22bfd522022e4 | d11c147586c218079940afea337fe267c7f68d5b | /usb_test.py | 67f047e37a5ae7f618cc7d5370cb7770fc0ed620 | []
| no_license | https://github.com/RL89pl/usb_test | 0e263532ecd4683647c8e3fda5e10bbed62625d0 | 094ff03a768ef6cc2d86af6140af46948b62658b | refs/heads/master | 2020-06-13T03:09:18.448770 | 2019-06-30T12:30:18 | 2019-06-30T12:30:18 | 194,513,577 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env/python3
import time, os, sys
import usb.core
import pyudev
def writetofile(filename,mysizeMB):
# writes string to specified file repeatdely, until mysizeMB is reached. Then deletes fle
mystring = "The quick brown fox jumps over the lazy dog"
writeloops = int(1000000*mysizeMB/len(mystring))
try:
f = open(filename, 'w')
except:
# no better idea than:
raise
for x in range(0, writeloops):
f.write(mystring)
f.close()
os.remove(filename)
##############
def diskspeedmeasure(dirname):
# returns writing speed to dirname in MB/s
# method: keep writing a file, until 0.5 seconds is passed. Then divide bytes written by time passed
filesize = 1 # in MB
maxtime = 0.5 # in sec
filename = os.path.join(dirname,'outputTESTING.txt')
start = time.time()
loopcounter = 0
while True:
try:
writetofile(filename, filesize)
except:
# I have no better idea than:
raise
loopcounter += 1
diff = time.time() - start
if diff > maxtime: break
return (loopcounter*filesize)/diff
############## Start of main
if __name__ == "__main__":
time.sleep(3)
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='usb')
for device in iter(monitor.poll, None):
time.sleep(3)
if device.action == 'add':
# print("----------------------------------------")
# print('{} podlaczone'.format(device))
# print("----------------------------------------")
dev = usb.core.find()
idVendor = hex(dev.idVendor)
idProduct = hex(dev.idProduct)
# print(idVendor)
# print(idProduct)
time.sleep(3)
os.system("sudo mount /dev/sda1 /media/usb")
print("***********")
print("Rozpoczecie")
print("***********")
dirname = "/media/usb"
try:
speed = diskspeedmeasure(dirname)
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Predkosc zapisu na dysku: %.2f Mb na sekunde" % speed)
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
except IOError as e:
if e.errno == 13:
print("************************************************************************")
print("Problem zapisu na dysku. Prawdopodobnie uszkodzone USB")
print("************************************************************************")
except:
print("************************")
print("Cos poszlo nie tak")
print("************************")
raise
os.system("sudo umount /media/usb")
print("***********************************************")
print("Zakonczone")
print("***********************************************")
#lsblk
| UTF-8 | Python | false | false | 2,651 | py | 2 | usb_test.py | 1 | 0.516786 | 0.50811 | 0 | 91 | 28.131868 | 101 |
rrana/django-simple_blog | 18,811,956,770,367 | 83df158a0525fda5335b74ac6c7fae31c3e85e50 | a0453439f0a94e0baa8a25477af0e6c8605b1c69 | /project/context_processors/vars.py | 7caca2d39debd22e5ea368981a1744181de3f424 | []
| no_license | https://github.com/rrana/django-simple_blog | 82656f880f0e7f46eee04a9c90f1cf166a96331e | 1a6cd48a352db2ea5444f803af592a19e7e9e7a5 | refs/heads/master | 2021-01-09T06:00:47.158432 | 2011-10-02T20:45:03 | 2011-10-02T20:45:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import project.settings
from project.apps.blog.models import Group, Entry
def setting_vars(request):
return {
'PROJECT_NAME' : project.settings.PROJECT_NAME,
'TITLE': project.settings.TITLE,
}
def menu_vars(request):
groups = Group.objects.all().order_by('name')
return {
'menu': groups,
}
def draft_count_vars(request):
return {
'draft_count': Entry.objects.filter(author = request.user.id, draft = True).order_by('-date_pub').count(),
} | UTF-8 | Python | false | false | 536 | py | 30 | vars.py | 17 | 0.619403 | 0.617537 | 0 | 23 | 22.347826 | 114 |
nicktang1983/python | 6,768,868,498,427 | c08c02c3b9665ba5a317e215f6800b745810bf43 | 0098258043540a0e9e00137d7ef14a02c1c5026e | /py_module/src/importBackend.py | 30f121ac8e80760397cb0b316d2db70860c72424 | []
| no_license | https://github.com/nicktang1983/python | 60c3a9031eb795fe71fd6c97fbe806f7cda948ed | aa6ba97e7f5756dc21cf058330b091538d2adc73 | refs/heads/master | 2018-02-22T09:44:57.318786 | 2017-05-13T05:08:27 | 2017-05-13T05:08:27 | 50,985,832 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Author: nick_tang
# @Date: 2016-10-12 11:36:27
# @Last Modified by: nick_tang
# @Last Modified time: 2017-02-04 21:15:31
import sys
from pprint import pprint
import fibo
import moduleA
from moduleB import *
# You can find sys, fibo, module_a and module_b_func
print('-'*30)
print('Current namespaces')
pprint(locals())
# modules add to sys.modules and pyc file generated
print('-'*30)
print('Current sys.modules')
print(sys.modules['sys'])
print(sys.modules['fibo'])
print(sys.modules['moduleA'])
print(sys.modules['moduleB'])
| UTF-8 | Python | false | false | 565 | py | 332 | importBackend.py | 208 | 0.699115 | 0.640708 | 0 | 24 | 22.5 | 52 |
pmdproject2020/UploadFile_FlaskApp | 9,887,014,718,691 | d7be8918d0c7a559e8a009127568e9ead46030d8 | 4d087c23662299e42d15afd2535b2f3781f2fa54 | /img_upload_working.py | 1f3c0070df7f9fef902c3286c806e1f3bfdccdd1 | []
| no_license | https://github.com/pmdproject2020/UploadFile_FlaskApp | 8e0810fe0f6a46f959eab2af7e386c44cb0be91c | 2848b0d4ee53a0350c4b842ee01483e6a0255eaf | refs/heads/master | 2020-12-21T07:48:40.244062 | 2020-01-26T19:26:14 | 2020-01-26T19:26:14 | 236,363,800 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""img_upload_working.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WY-HtFC_SjCjFKoTRafXSqjzwApFJvRU
"""
!pip install flask-ngrok
from flask import Flask , render_template, request
from flask_ngrok import run_with_ngrok
from os.path import join as pathJoin
from os.path import getsize as fSize
import os
app=Flask(__name__)
app.config['SECRET_KEY']="Dhusss Shala"
app.config['IMAGE_UPLOAD']="/content/static/uploaded_images"
path_to_img=app.config['IMAGE_UPLOAD']
run_with_ngrok(app)
@app.route('/',methods=["POST","GET"])
def index_upload():
if request.method=="POST":
k=False
if request.files and request.files['myImage'].filename != '' :
filename=request.files['myImage']
print(filename)
filename.save(pathJoin(app.config['IMAGE_UPLOAD'],filename.filename))
k=True
path="../static/uploaded_images"
path=pathJoin(path,filename.filename)
#path="../static/uploaded_images/"
return render_template("img.html",fileName=path)
#return render_template("index.html",val=k,msg="Plz Upload Again",sz=fSize(pathJoin(app.config['IMAGE_UPLOAD'],filename.filename))//(1024*1024))
else:
k=False
return render_template("index.html",val=k,msg="Plz Upload Again" , sz=0)
#return render_template("img.html",fileName=path)
else:
return render_template('index.html',msg="Upload The Image",sz=0)
"""
@app.route("/showImg")
def show_img(filename):
path=pathJoin(app.config['IMAGE_UPLOAD'],filename)
render_template('img.html',fileName=path)
"""
if __name__=="__main__":
app.run()
#os.rmdir(pathJoin(path_to_img))
| UTF-8 | Python | false | false | 1,709 | py | 1 | img_upload_working.py | 1 | 0.691047 | 0.684026 | 0 | 54 | 30.62963 | 150 |
yijxiang/flack | 15,109,694,965,152 | 233f729fbb1da3ef861f85248ca216e63f8529a3 | 0383eec4050f449903ac572384fb80b7c1f33c67 | /flack.py | 412f31ac05f2610ab482aaa0a552b4f62634e6de | [
"MIT"
]
| permissive | https://github.com/yijxiang/flack | d7777415d3c71530c6c605953efcd32c6ace07cc | d14e5328a96abd735f73d7eaf0ce256e8c5a5c51 | refs/heads/master | 2021-05-31T11:45:46.889053 | 2016-04-13T04:26:53 | 2016-05-11T19:21:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import binascii
import os
import threading
import time
from flask import Flask, render_template, url_for as _url_for, request, \
abort, jsonify, g
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from flask_bootstrap import Bootstrap
from werkzeug.security import generate_password_hash, check_password_hash
from markdown import markdown
import bleach
from bs4 import BeautifulSoup
import requests
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = '51f52814-0071-11e6-a247-000ec6c2372c'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
'DATABASE_URL', 'sqlite:///' + os.path.join(basedir, 'db.sqlite'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Flask extensions
db = SQLAlchemy(app)
Bootstrap(app)
# Authentication objects for username/password auth, token auth, and a
# token optional auth that is used for open endpoints.
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth('Bearer')
token_optional_auth = HTTPTokenAuth('Bearer')
# We use a list to calculate requests per second
request_stats = []
def timestamp():
"""Return the current timestamp as an integer."""
return int(time.time())
def url_for(*args, **kwargs):
"""url_for replacement that returns external URLs by default."""
if '_external' not in kwargs:
kwargs['_external'] = True
return _url_for(*args, **kwargs)
class User(db.Model):
"""The User model."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.Integer, default=timestamp)
updated_at = db.Column(db.Integer, default=timestamp, onupdate=timestamp)
last_seen_at = db.Column(db.Integer, default=timestamp)
nickname = db.Column(db.String(32), nullable=False, unique=True)
password_hash = db.Column(db.String(256), nullable=False)
token = db.Column(db.String(64), nullable=True, unique=True)
online = db.Column(db.Boolean, default=True)
messages = db.relationship('Message', lazy='dynamic', backref='user')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
self.token = None # if user is changing passwords, also revoke token
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_token(self):
"""Creates a 64 character long randomly generated token."""
self.token = binascii.hexlify(os.urandom(32)).decode('utf-8')
return self.token
def ping(self):
"""Marks the user as recently seen and online."""
self.last_seen_at = timestamp()
self.online = True
@staticmethod
def create(data):
"""Create a new user."""
user = User()
user.from_dict(data, partial_update=False)
return user
def from_dict(self, data, partial_update=True):
"""Import user data from a dictionary."""
for field in ['nickname', 'password']:
try:
setattr(self, field, data[field])
except KeyError:
if not partial_update:
abort(400)
def to_dict(self):
"""Export user to a dictionary."""
return {
'id': self.id,
'created_at': self.created_at,
'updated_at': self.updated_at,
'nickname': self.nickname,
'last_seen_at': self.last_seen_at,
'online': self.online,
'_links': {
'self': url_for('get_user', id=self.id),
'messages': url_for('get_messages', user_id=self.id),
'tokens': url_for('new_token')
}
}
@staticmethod
def find_offline_users():
"""Find users that haven't been active and mark them as offline."""
users = User.query.filter(User.last_seen_at < timestamp() - 60,
User.online == True).all() # noqa
for user in users:
user.online = False
db.session.add(user)
db.session.commit()
class Message(db.Model):
"""The Message model."""
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.Integer, default=timestamp)
updated_at = db.Column(db.Integer, default=timestamp, onupdate=timestamp)
source = db.Column(db.Text, nullable=False)
html = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
@staticmethod
def create(data):
"""Create a new message."""
msg = Message(user=g.current_user)
msg.from_dict(data, partial_update=False)
return msg
def from_dict(self, data, partial_update=True):
"""Import message data from a dictionary."""
for field in ['source']:
try:
setattr(self, field, data[field])
except KeyError:
if not partial_update:
abort(400)
def to_dict(self):
"""Export message to a dictionary."""
return {
'id': self.id,
'created_at': self.created_at,
'updated_at': self.updated_at,
'source': self.source,
'html': self.html,
'user_id': self.user.id,
'_links': {
'self': url_for('get_message', id=self.id),
'user': url_for('get_user', id=self.user.id)
}
}
@staticmethod
def on_changed_source(target, value, oldvalue, initiator):
"""SQLAlchemy event that automatically renders the message to HTML."""
# first render markdown to HTML with a tag whitelist
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
# next see if we have any URLs that we can expand
for link in BeautifulSoup(target.html, 'html5lib').select('a'):
url = link.get('href', '')
try:
rv = requests.get(url)
except requests.exceptions.ConnectionError:
continue
if rv.status_code == 200:
soup = BeautifulSoup(rv.text, 'html5lib')
title_tags = soup.select('title')
if len(title_tags) > 0:
title = title_tags[0].string.strip()
else:
title = url
description = 'No description found.'
for meta in soup.select('meta'):
if meta.get('name', '').lower() == 'description':
description = meta.get('content', description).strip()
break
# add the detail of the link to the rendered message
tpl = ('<blockquote><p><a href="{url}">{title}</a></p>'
'<p>{desc}</p></blockquote>')
target.html += tpl.format(url=url, title=title,
desc=description)
db.event.listen(Message.source, 'set', Message.on_changed_source)
@basic_auth.verify_password
def verify_password(nickname, password):
"""Password verification callback."""
user = User.query.filter_by(nickname=nickname).first()
if user is None or not user.verify_password(password):
return False
user.ping()
db.session.add(user)
db.session.commit()
g.current_user = user
return True
@basic_auth.error_handler
def password_error():
"""Return a 401 error to the client."""
# To avoid login prompts in the browser, use the "Bearer" realm.
return (jsonify({'error': 'authentication required'}), 401,
{'WWW-Authenticate': 'Bearer realm="Authentication Required"'})
@token_auth.verify_token
def verify_token(token):
"""Token verification callback."""
user = User.query.filter_by(token=token).first()
if user is None:
return False
user.ping()
db.session.add(user)
db.session.commit()
g.current_user = user
return True
@token_auth.error_handler
def token_error():
"""Return a 401 error to the client."""
return (jsonify({'error': 'authentication required'}), 401,
{'WWW-Authenticate': 'Bearer realm="Authentication Required"'})
@token_optional_auth.verify_token
def verify_optional_token(token):
"""Alternative token authentication that allows anonymous logins."""
if token == '':
# no token provided, mark the logged in users as None and continue
g.current_user = None
return True
# but if a token was provided, make sure it is valid
return verify_token(token)
@app.before_first_request
def before_first_request():
"""Start a background thread that looks for users that leave."""
def find_offline_users():
while True:
User.find_offline_users()
db.session.remove()
time.sleep(5)
if not app.config['TESTING']:
thread = threading.Thread(target=find_offline_users)
thread.start()
@app.before_request
def before_request():
"""Update requests per second stats."""
t = timestamp()
while len(request_stats) > 0 and request_stats[0] < t - 15:
del request_stats[0]
request_stats.append(t)
@app.route('/')
def index():
"""Serve client-side application."""
return render_template('index.html')
@app.route('/api/users', methods=['POST'])
def new_user():
"""
Register a new user.
This endpoint is publicly available.
"""
user = User.create(request.get_json() or {})
if User.query.filter_by(nickname=user.nickname).first() is not None:
abort(400)
db.session.add(user)
db.session.commit()
r = jsonify(user.to_dict())
r.status_code = 201
r.headers['Location'] = url_for('get_user', id=user.id)
return r
@app.route('/api/users', methods=['GET'])
@token_optional_auth.login_required
def get_users():
"""
Return list of users.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
users = User.query.order_by(User.updated_at.asc(), User.nickname.asc())
if request.args.get('online'):
users = users.filter_by(online=(request.args.get('online') != '0'))
if request.args.get('updated_since'):
users = users.filter(
User.updated_at > int(request.args.get('updated_since')))
return jsonify({'users': [user.to_dict() for user in users.all()]})
@app.route('/api/users/<id>', methods=['GET'])
@token_optional_auth.login_required
def get_user(id):
"""
Return a user.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
return jsonify(User.query.get_or_404(id).to_dict())
@app.route('/api/users/<id>', methods=['PUT'])
@token_auth.login_required
def edit_user(id):
"""
Modify an existing user.
This endpoint is requires a valid user token.
Note: users are only allowed to modify themselves.
"""
user = User.query.get_or_404(id)
if user != g.current_user:
abort(403)
user.from_dict(request.get_json() or {})
db.session.add(user)
db.session.commit()
return '', 204
@app.route('/api/tokens', methods=['POST'])
@basic_auth.login_required
def new_token():
"""
Request a user token.
This endpoint is requires basic auth with nickname and password.
"""
if g.current_user.token is None:
g.current_user.generate_token()
db.session.add(g.current_user)
db.session.commit()
return jsonify({'token': g.current_user.token})
@app.route('/api/tokens', methods=['DELETE'])
@token_auth.login_required
def revoke_token():
"""
Revoke a user token.
This endpoint is requires a valid user token.
"""
g.current_user.token = None
db.session.add(g.current_user)
db.session.commit()
return '', 204
@app.route('/api/messages', methods=['POST'])
@token_auth.login_required
def new_message():
"""
Post a new message.
This endpoint is requires a valid user token.
"""
msg = Message.create(request.get_json() or {})
db.session.add(msg)
db.session.commit()
r = jsonify(msg.to_dict())
r.status_code = 201
r.headers['Location'] = url_for('get_message', id=msg.id)
return r
@app.route('/api/messages', methods=['GET'])
@token_optional_auth.login_required
def get_messages():
"""
Return list of messages.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
since = int(request.args.get('updated_since', '0'))
day_ago = timestamp() - 24 * 60 * 60
if since < day_ago:
# do not return more than a day worth of messages
since = day_ago
msgs = Message.query.filter(Message.updated_at > since).order_by(
Message.updated_at)
return jsonify({'messages': [msg.to_dict() for msg in msgs.all()]})
@app.route('/api/messages/<id>', methods=['GET'])
@token_optional_auth.login_required
def get_message(id):
"""
Return a message.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
return jsonify(Message.query.get_or_404(id).to_dict())
@app.route('/api/messages/<id>', methods=['PUT'])
@token_auth.login_required
def edit_message(id):
"""
Modify an existing message.
This endpoint is requires a valid user token.
Note: users are only allowed to modify their own messages.
"""
msg = Message.query.get_or_404(id)
if msg.user != g.current_user:
abort(403)
msg.from_dict(request.get_json() or {})
db.session.add(msg)
db.session.commit()
return '', 204
@app.route('/stats', methods=['GET'])
def get_stats():
return jsonify({'requests_per_second': len(request_stats) / 15})
if __name__ == '__main__':
db.create_all()
app.run(host='0.0.0.0', debug=True)
| UTF-8 | Python | false | false | 14,442 | py | 5 | flack.py | 3 | 0.614389 | 0.60601 | 0 | 448 | 31.236607 | 78 |
Aliersh/thinkcspy | 19,404,662,249,375 | 744d461ddde210814a7cbc1ef03e575eb916f068 | c5fe05b1295c5425e1640c2b581767f6864714c9 | /Chapter_8/ex_8_11.py | 86b01f85020142e80774a6039a825254f659896e | []
| no_license | https://github.com/Aliersh/thinkcspy | 52be2508db5cc824d5c87b491f4ecbf5783084bb | 3f8bdd8978d0e902c255e3bf04090c54fcd33956 | refs/heads/master | 2022-11-12T03:00:17.469349 | 2020-07-01T01:27:04 | 2020-07-01T01:27:04 | 271,703,063 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''Write a function to uniformly enlarge an image by a factor of 2 (double the size).'''
# I couldn't do this exercise. Below the answer
import image
def double(oldimage):
oldw = oldimage.getWidth()
oldh = oldimage.getHeight()
newim = image.EmptyImage(oldw * 2, oldh * 2)
for row in range(oldh):
for col in range(oldw):
oldpixel = oldimage.getPixel(col, row)
newim.setPixel(2*col, 2*row, oldpixel)
newim.setPixel(2*col+1, 2*row, oldpixel)
newim.setPixel(2*col, 2*row+1, oldpixel)
newim.setPixel(2*col+1, 2*row+1, oldpixel)
return newim
img = image.Image("luther.jpg")
win = image.ImageWin(img.getWidth()*2, img.getHeight()*2)
bigimg = double(img)
bigimg.draw(win)
win.exitonclick() | UTF-8 | Python | false | false | 778 | py | 66 | ex_8_11.py | 66 | 0.641388 | 0.619537 | 0 | 29 | 25.862069 | 88 |
restato/Algorithms | 12,850,542,177,601 | 5dad4c44686873e788db766a7c079261a6aad32d | c1049c60bd1dc8d68d14c32a0ba766f10ca7e42c | /Tree/populating_next_right_pointers_in_each_node(level).py | d8b5fd2a769215bea5304bc886a87ef2e067a09d | []
| no_license | https://github.com/restato/Algorithms | 0bb109943e41e1549b52a0cabe07446c06ebad90 | 2536cf3d7cd248088d37e5ddcd4470a6333305ab | refs/heads/master | 2022-04-28T15:25:39.315352 | 2022-03-22T15:35:01 | 2022-03-22T15:35:01 | 168,792,210 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://leetcode.com/problems/populating-next-right-pointers-in-each-node
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def level_order_traversal(self, root):
if root == None:
return
queues = [deque(), deque()]
current_queue = queues[0]
next_queue = queues[1]
current_queue.append(root)
level_number = 0
node_dict = {}
while current_queue:
temp = current_queue.popleft()
if level_number not in node_dict:
node_dict[level_number] = []
node_dict[level_number].append(temp)
if temp.left != None:
next_queue.append(temp.left)
if temp.right != None:
next_queue.append(temp.right)
if not current_queue:
level_number += 1
current_queue = queues[level_number % 2]
next_queue = queues[(level_number + 1) % 2]
for nodes in node_dict.values():
for index in range(len(nodes)-1):
nodes[index].next = nodes[index+1]
return root
def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':
return self.level_order_traversal(root)
| UTF-8 | Python | false | false | 1,536 | py | 27 | populating_next_right_pointers_in_each_node(level).py | 26 | 0.50651 | 0.5 | 0 | 49 | 30.346939 | 101 |
isaintnik/ml-rank | 16,269,336,130,522 | dfda8b6f3d96724888588654fb17de2e4cd58bab | 137df3d0c74ba6d8a0cde625b3ab5310497d2775 | /ffs_service.py | a1a3061f4bdbcf2e09f87666c9b078202bed1322 | []
| no_license | https://github.com/isaintnik/ml-rank | d79311ab74854e366cc65884e8de218e4e0f2069 | e97121db61d5b29d94c7383b60d113233d739a21 | refs/heads/master | 2020-04-01T09:43:20.710492 | 2020-02-06T12:02:35 | 2020-02-06T12:02:35 | 153,087,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import warnings
import time
from functools import partial
from mlrank.preprocessing.dichotomizer import DichotomizationImpossible
from mlrank.submodular.metrics import log_likelihood_regularized_score_multiplicative_balanced
from mlrank.submodular.optimization.ffs_parallel import ForwardFeatureSelectionCompositeClient
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
import numpy as np
from sklearn.externals import joblib
from itertools import product
from mlrank.benchmarks.holdout_bench import HoldoutBenchmark
from mlrank.benchmarks.traintest_bench import TrainTestBenchmark
from config import (
ALGO_PARAMS,
HYPERPARAMS
)
def benchmark_holdout(
service_eval_host: str,
service_eval_port: str,
service_store_host: str,
service_store_port: str,
dataset,
decision_function,
lambda_param,
bins,
):
dataset['data'].load_from_folder()
dataset['data'].process_features()
dataset['data'].cache_features()
if bins >= dataset['data'].get_target().size * 0.8 + 1:
print(key, bins, 'very small dataset for such dichtomization.')
raise DichotomizationImpossible(bins, int(dataset['data'].get_target().size * 0.8))
dfunc = decision_function['classification']
score_function = partial(log_likelihood_regularized_score_multiplicative_balanced, _lambda=lambda_param)
bench = HoldoutBenchmark(
ForwardFeatureSelectionCompositeClient(
server_clc=service_eval_host,
port_clc=service_eval_port,
decision_function=decision_function['type'],
score_function=score_function,
n_bins=bins,
train_share=0.9,
n_cv_ffs=8,
),
decision_function=dfunc,
requires_linearisation=decision_function['type'] != 'gbdt',
n_holdouts=100,
n_jobs=24
)
return bench.benchmark(dataset['data'])
def benchmark_train_test(
service_eval_host: str,
service_eval_port: str,
dataset,
decision_function,
lambda_param,
bins,
):
dataset['data'].load_train_from_file()
dataset['data'].load_test_from_file()
dataset['data'].process_features()
dataset['data'].cache_features()
y_train = dataset['data'].get_train_target()
if bins >= y_train.size * 0.8 + 1:
print(key, bins, 'very small dataset for such dichtomization.')
raise DichotomizationImpossible(bins, int(y_train.size * 0.8))
score_function = partial(log_likelihood_regularized_score_multiplicative_balanced, _lambda=lambda_param)
bench = TrainTestBenchmark(
optimizer=ForwardFeatureSelectionCompositeClient(
server_clc=service_eval_host,
port_clc=service_eval_port,
decision_function=decision_function['type'],
score_function=score_function,
n_bins=bins,
train_share=0.9,
n_cv_ffs=8,
),
decision_function=decision_function['classification'],
requires_linearisation=decision_function['type'] != 'gbdt'
)
start_time = time.time()
result = bench.benchmark(dataset['data'])
print("--- %s seconds ---" % (time.time() - start_time))
return result
EVAL_SERVICE_HOST = '35.193.250.1'
EVAL_SERVICE_PORT = '5001'
if __name__ == '__main__':
np.random.seed(42)
joblib.dump('test', "./data/testdoc.bin")
results = {}
for dataset, decision_function in product([ALGO_PARAMS['dataset'][1]], ALGO_PARAMS['decision_function']):
dfunc = decision_function[dataset['problem']]
key = "{}, {}".format(dataset['name'], dfunc.__class__.__name__)
results[key] = list()
print('>>', key)
for lambda_param, bins in product(HYPERPARAMS['lambda'], HYPERPARAMS['bins']):
print('>> >>', lambda_param, bins)
if decision_function['type'] not in dataset['supported']:
continue
predictions = None
try:
if dataset['type'] == 'holdout':
predictions = benchmark_holdout(dataset, decision_function, lambda_param, bins)
elif dataset['type'] == 'train_test':
predictions = benchmark_train_test(
EVAL_SERVICE_HOST,
EVAL_SERVICE_PORT,
dataset,
decision_function,
lambda_param,
bins
)
else:
print('unknown target type')
except DichotomizationImpossible as e:
print(str(e))
continue
results[key].append({
'bins': bins,
'lambda': lambda_param,
'result': predictions
})
joblib.dump(results, f"./data/{dataset['name']}_composite_gbdt_1.bin")
| UTF-8 | Python | false | false | 5,054 | py | 41 | ffs_service.py | 39 | 0.601702 | 0.594183 | 0 | 163 | 30.006135 | 109 |
beccae1225/Python_Challenge | 790,274,019,278 | e016ded78469c911d0de4de0aae7420a5420996f | 80168b068e4802328b808a98aab2826b20fa7299 | /PyPoll/main_redo.py | af9bcb916192db1bba3f56a8d56e21c2309be369 | []
| no_license | https://github.com/beccae1225/Python_Challenge | 5a58e0879e380b4e54d758fd04e601badeede157 | f2e0a94da88e863a550df2696500f2acb20f68a0 | refs/heads/master | 2022-11-13T07:13:51.690982 | 2020-07-11T19:54:37 | 2020-07-11T19:54:37 | 273,612,173 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import csv
election_data_csv = os.path.join("Resources", "election_data.csv")
total_votes = 0
votes_per_person = 0
candidate_list = []
vote_counts = []
unique_list = []
vote_percents = []
max_vote_count = 0
with open(election_data_csv) as csvfile:
csvreader=csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
#print(csv_header)
for i in csvreader:
#Vote Count
total_votes = total_votes + 1
#Candidate Names to List
candidate_list.append(i[2])
#check list for candidate
if i[2] not in unique_list:
unique_list.append(i[2])
print("Election Results\n")
print("---------------------------\n")
print(f"Total Votes: {total_votes}\n")
print("---------------------------\n")
for i in unique_list:
votes_per_person = 0
for v in candidate_list:
if i == v:
votes_per_person = votes_per_person + 1
if max_vote_count < votes_per_person:
max_vote_count = votes_per_person
winner = i
percent = round(((votes_per_person/total_votes) * 100),5)
print(i + ": " + str(percent) + "% (" + str( votes_per_person) + ")" +"\n")
print("-----------------------------\n")
print(f"The winner is: {winner}")
print("-----------------------------\n")
output_file = os.path.join("Analysis", "Vote_results.txt")
with open(output_file, "w") as datafile:
writer = csv.writer(datafile)
datafile.write(f"Election Results\n")
datafile.write(f"----------------------\n")
datafile.write(f"Total Votes: {total_votes}\n")
for i in range(len(unique_list)):
datafile.write(f"{i} : {str(percent)} % ( {str(votes_per_person)})\n")
datafile.write(f"--------------------")
datafile.write(f"The winner is: {winner}") | UTF-8 | Python | false | false | 1,778 | py | 4 | main_redo.py | 3 | 0.559618 | 0.552306 | 0 | 65 | 26.369231 | 79 |
minimav/project-euler | 7,284,264,564,987 | 4b083d48291dc029cbb760c4d37307f67b0e11fa | 90e44c7afbf262e4baf9f4e8fd6c40c16008b12b | /old_solutions/projecteuler20.py | 781922008e9548ac8ab98c39a90a9d9d574d1378 | []
| no_license | https://github.com/minimav/project-euler | 980474227a4534a45cbc10b54ebb064e4ea6df48 | ddcbd7d874466e61d33913826ebd2ccbf9f1520d | refs/heads/master | 2015-08-11T13:48:38.811962 | 2015-06-24T21:18:02 | 2015-06-24T21:18:02 | 19,576,426 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import *
import time
s = time.time()
def fac(n):
if n == 1:
return 1
else:
return n*fac(n-1)
f = [1]
i = 1
while i < 100:
if len(f) < i:
f.append(i*fac(i-1))
i*fac(i-1)
i += 1
sum = 0
m = str(f[-1])
j = 0
while j < len(m):
sum += int(m[j])
j+=1
print (sum)
print (time.time() -s)
| UTF-8 | Python | false | false | 379 | py | 132 | projecteuler20.py | 125 | 0.427441 | 0.387863 | 0 | 26 | 12.5 | 28 |
eminamitani/HDNNP-tools | 9,311,489,118,665 | 749c0b1b662d60f2c63f316c15317b1e178f1628 | 4b026650a63ffde8093641384d6cc06317843f44 | /BulkSi/checkRMSE-TC/plotRMSETCdata-T5L7-3.py | 84c5e02696593f3793a05670282c2c1a5360847c | []
| no_license | https://github.com/eminamitani/HDNNP-tools | 3426ec92e7071d0c0a50ed36c12481003b472c03 | 27d4d8a598cc3aa5b5450a76404a1df97c027818 | refs/heads/master | 2023-03-14T23:21:19.843507 | 2021-03-28T02:08:25 | 2021-03-28T02:08:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import csv,sys
import re
import matplotlib.pyplot as plt
"""
This script is for plotting scatter of force/RMSE & TC err from
2100 samples in /Si-200917/T5L7/result/RMSETCdata.csv
"""
if __name__ == '__main__':
T5L7folder="/home/okugawa/HDNNP/Si-200917/T5L7/"
T5L7data= T5L7folder+"result/RMSETCdata.csv"
smalldata="/home/okugawa/HDNNP/Si-200917/small/result/RMSETCdata.csv"
grps= ["TH2-L7","TH3-L7","TH3-LM5","TMH2-L7","TM3-L7","TM3-LM5",
"TML2-L7","TL3-L7","TL3-LM5","TL2-L7","all60"]
plotfile=T5L7folder+"result/RMSETC-T5L7-2100.png"
colors=["cyan","b","b","lime","green","green","olive","red","red","orange",
"brown","grey"]
marks=[".",".","+",".",".","+",".",".","+",".",".","."]
lbls=["T:H2-L:7","T:H3-L:7","T:H3-L:M5","T:MH2-L:7",
"T:M3-L:7","T:M3-L:M5","T:ML2-L:7","T:L3-L:7",
"T:L3-L:M5","T:L2-L:7","T:5-L:7","small"]
#Plotting force/RMSE and TC error of each sample
with open(T5L7data,'r') as f1, open(smalldata,'r') as f2:
RMSETC = []
T5L7dt = csv.reader(f1)
for row in T5L7dt:
dname = re.split('[-]',row[0])
if len(dname)==3:
dataname = dname[0]+"-"+dname[1]
elif len(dname)==2:
dataname = dname[0]
else:
print(f'Data name length error: {len(dname)}')
sys.exit()
if dataname in grps:
grpindex= grps.index(dataname)
RMSETC.append([float(row[1]),abs(float(row[2])),colors[grpindex],
marks[grpindex]])
sm0599= csv.reader(f2)
for row in sm0599:
if 'small-' in row[0]:
dname = re.split('[-]',row[0])
if int(dname[1])>30:
RMSETC.append([float(row[1]),abs(float(row[2])),"grey","."])
#Plotting force/RMSE and TC error of each sample
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title("force/RMSE & TC err of each 2100 samples (Temp:5 LC:7)")
ax1.set_xlabel("TC Err (fm 112.1W/m-K:300K)")
ax1.set_ylabel("force/RMSE (meV/ang)")
ax1.grid(True)
plt.rcParams["legend.edgecolor"] ='green'
plt.rcParams["legend.borderpad"] ='0.3' #Space between handle & edge
plt.rcParams["legend.handlelength"] ='1' #Length of handle
plt.rcParams["legend.handletextpad"] ='0.2' #Space between handle & label
plt.rcParams["legend.columnspacing"] ='1' #Space between columns
plt.rcParams["legend.labelspacing"] ='0.2' #Space between row of label
for RTdata in RMSETC:
ax1.scatter(RTdata[1],RTdata[0],c=RTdata[2],marker=RTdata[3])
left, right = ax1.get_xlim()
ax1.set_xlim(left, right*1.3)
#ax2 is only for plotting legend of all kind of data
ax2 = ax1.twinx()
for k in range(12):
ax2.scatter(RTdata[1],RTdata[0],c=colors[k],marker=marks[k],label=lbls[k])
handler2, label2 = ax2.get_legend_handles_labels()
ax1.legend(handler2, label2,loc='upper right')
fig.delaxes(ax2)
plt.savefig(plotfile)
plt.close() | UTF-8 | Python | false | false | 3,137 | py | 132 | plotRMSETCdata-T5L7-3.py | 128 | 0.564233 | 0.510041 | 0 | 79 | 38.721519 | 82 |
kshitij1235/Windows10Activation | 3,599,182,618,574 | ca34e83668a6a0dfb93d2716cc1362098d4550aa | 7228e82648ebd6d486e5c85b0f96e3a5fe458543 | /windows10 activation/windows_activation_source_code.py | b933df628fdfa635bd952c54ed8a6b8b5094e9ec | []
| no_license | https://github.com/kshitij1235/Windows10Activation | 379d600f33f0ee48f8a6a91b6dea11bdb5fcf034 | 06aa974da7d797d6408cd184645a700265f294f0 | refs/heads/main | 2023-08-20T21:29:53.701119 | 2021-10-24T17:31:29 | 2021-10-24T17:31:29 | 420,074,658 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """windows actvation pakages"""
import threading
from tkinter import ttk
import os
import tkinter
from PIL import ImageTk, Image
def unlock():
"""allows threading that helps with perforamnce"""
pb.place(x=10, y=170)
threading.Thread(target=commands).start()
def commands():
"""commands that help windows activation"""
pb['value'] += 20
os.system("c:")
pb['value'] += 10
os.system("cd C:/WINDOWS/system32")
pb['value'] += 30
os.system("slmgr.vbs /ipk W269N-WFGWX-YVC9B-4J6C9-T83GX")
pb['value'] += 10
os.system("slmgr.vbs /skms kms.lotro.cc")
pb['value'] += 20
os.system("slmgr.vbs /ato")
if __name__ == "__main__":
root = tkinter.Tk()
root.config(bg="#218DBF")
root.title("Windows Activation")
try:root.iconbitmap('./icon.ico')
except:pass
root.geometry("300x230")
root.minsize(300, 230)
root.maxsize(300, 230)
try:
image1 = Image.open("icon.ico")
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test, bg="#218DBF")
label1.image = test
label1.place(x=70, y=1)
except:pass
lab = tkinter.Label(root, text="! make sure you open this software as admin", bg="lightblue")
lab.place(y=120, x=10)
pb = ttk.Progressbar(root, orient='horizontal',mode='determinate', length=280)
B = tkinter.Button(root, text="Activate", command=unlock)
B.place(x=100, y=200)
root.mainloop()
| UTF-8 | Python | false | false | 1,516 | py | 2 | windows_activation_source_code.py | 1 | 0.600923 | 0.55343 | 0 | 60 | 23.266667 | 97 |
ajiexw/old-zarkpy | 4,861,903,003,602 | b0ea0b136073cf519dcdce3c3b836e0fbf6b5fee | 5e8404b864402563e17d7d02c130c81b56e94323 | /web/cgi/testing/__init__.py | b87e9ea9c1e80d3aa6f357ba21e8e1c62838edf5 | []
| no_license | https://github.com/ajiexw/old-zarkpy | ea1b82f96d6066c88f2dabb3f4f8ef6c61fbad96 | 718c4817fbad53daa221652e5b9d576867e92f99 | refs/heads/master | 2016-09-06T13:45:11.832938 | 2013-10-09T09:25:01 | 2013-10-09T09:25:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
'''注意:python自带一个库库叫做test,请勿重名!'''
| UTF-8 | Python | false | false | 78 | py | 195 | __init__.py | 144 | 0.708333 | 0.6875 | 0 | 2 | 23 | 33 |
samarth-kashyap/dopplervel | 10,703,058,518,130 | 8dc2a2c2b927e9209cca24126c88c4461e19dc0c | 6b3e1de2a7695e0b31f11b38a0c411b3eea9ae3c | /magnetogram/vecmagneto_ts.py | b61b9b5ff8c7c458a6956766907a1b44f5dff088 | []
| no_license | https://github.com/samarth-kashyap/dopplervel | efb29320d665cf75cbbab05c813317b6b793f7c3 | a3d8bff33b521b8391f23142fcd2411d5d030b71 | refs/heads/master | 2023-05-06T23:58:47.376040 | 2021-05-24T20:20:27 | 2021-05-24T20:20:27 | 265,152,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# {{{ analyze_blocks_plot(u, comp, freqarr, num_blocks, lmax):
def analyze_blocks_plot(u, comp, freqarr, num_blocks, lmax):
if comp==0:
title_str = " $\\sqrt{\sum_{s, t} |B^r_{st}(\sigma)|^2}$ s $\in$ "
elif comp==1:
title_str = " $\sqrt{\sum_{s, t} s(s+1)|B^p_{st}(\sigma)|^2}$ s $\in$ "
elif comp==2:
title_str = " $\sqrt{\sum_{s, t} s(s+1)|B^t_{st}(\sigma)|^2}$ s $\in$ "
block_size = lmax // num_blocks
lmin, lmax = 0, 0
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(20, 8))
mask_pos = freqarr >= 0
for i in range(num_blocks):
print(f"Block number = {i+1} of {num_blocks}")
lmin = lmax
lmax = lmin + block_size
xst = np.arange(lmax+1)
axs.flatten()[i].semilogy(freqarr[mask_pos], abs(u[:, i])[mask_pos], 'black')
axs.flatten()[i].set_title(title_str + f"({lmin}, {lmax})")
axs.flatten()[i].set_xlabel(" $\sigma$ in $\mu$Hz")
axs.flatten()[i].set_ylabel("Magnetic field in gauss")
# axs.flatten()[i].legend()
plt.tight_layout()
fig.show()
return None
# }}} analyze_blocks_plot(u, comp, freqarr, num_blocks, lmax)
# {{{ def sum_st(alm, ell, comp):
def sum_st(alm, ell, comp):
if comp==0:
return np.sqrt((abs(alm)**2)).sum()
else:
return np.sqrt((abs(alm)**2) * ell * (ell + 1)).sum()
# }}} def sum_st(alm, ell, comp):
# {{{ def analyze_blocks(u, comp, num_blocks, lmax):
def analyze_blocks(u, comp, num_blocks, lmax):
block_size = lmax // num_blocks
lmin, lmax = 0, 0
u_block = np.zeros(num_blocks, dtype=np.float)
for i in range(num_blocks):
# print(f"Block number = {i+1} of {num_blocks}")
lmin = lmax
lmax = lmin + block_size
mask_block = (ellArr <= lmax)*(ellArr > lmin)
ell, alm = ellArr[mask_block], u[mask_block]
u_block[i] = sum_st(alm, ell, comp)
return u_block
# }}} analyze_blocks(u, comp, num_blocks, lmax)
# {{{ def get_alm(comp):
def get_alm(comp):
if comp == 0:
prefix = "BrlmA"
elif comp == 1:
prefix = "BplmA"
elif comp == 2:
prefix = "BtlmA"
alm = np.load(f"{data_dir}/{prefix}.{suffix}.npy")
return alm
# }}} get_alm(comp)
if __name__ == "__main__":
max_dates = {"jan": 31, "feb": 28, "mar": 31, "apr": 30, "may": 31,
"jun": 30, "jul": 31, "aug": 30} #, "sep": 31}
months = {"jan": 1, "feb": 2, "mar": 3, "apr": 4, "may": 5,
"jun": 6, "jul": 7, "aug": 8} #, "sep": 9}
data_dir = "/scratch/g.samarth/HMIDATA/magnetogram"
arrlm = np.load(f"{data_dir}/arrlm.npz")
ellArr, emmArr = arrlm['ellArr'], arrlm['emmArr']
lmax = ellArr.max()
tot_blocks = 8
Br_time = np.zeros((400, tot_blocks), dtype=np.float)
Bp_time = np.zeros((400, tot_blocks), dtype=np.float)
Bt_time = np.zeros((400, tot_blocks), dtype=np.float)
time_count = 0
for month in months:
print(f"month = {month}")
for i in range(1, max_dates[month]+1):
try:
suffix = f"2019{months[month]:02d}{i:02d}"
Br, Bp, Bt = get_alm(0), get_alm(1), get_alm(2)
Br_time[time_count, :] = analyze_blocks(Br, 0, tot_blocks, lmax)
Bp_time[time_count, :] = analyze_blocks(Bp, 1, tot_blocks, lmax)
Bt_time[time_count, :] = analyze_blocks(Bt, 2, tot_blocks, lmax)
time_count += 1
except FileNotFoundError:
pass
Br_time = Br_time[:time_count, :].copy()
Bp_time = Bp_time[:time_count, :].copy()
Bt_time = Bt_time[:time_count, :].copy()
Br_freq = np.fft.fft(Br_time, axis=0)
Bp_freq = np.fft.fft(Bp_time, axis=0)
Bt_freq = np.fft.fft(Bt_time, axis=0)
time_arr = np.arange(time_count)
freq_arr = np.fft.fftfreq(len(time_arr), d=24*60*60)*1e6 # microHz
analyze_blocks_plot(Br_freq, 0, freq_arr, tot_blocks, lmax)
analyze_blocks_plot(Bp_freq, 1, freq_arr, tot_blocks, lmax)
analyze_blocks_plot(Bt_freq, 2, freq_arr, tot_blocks, lmax)
| UTF-8 | Python | false | false | 4,113 | py | 66 | vecmagneto_ts.py | 52 | 0.546317 | 0.522733 | 0 | 117 | 34.153846 | 85 |
superspma/Word-Query-Server-and-Client | 652,835,036,878 | 282c3bb435324f65e12d0cbcc36337bcee863539 | e28d1a8f8e50e7cc0a1374677f943a15942d0e3b | /dict/demo.py | 68f9001329d408bb403b1a001c692bb4a4b7ec36 | []
| no_license | https://github.com/superspma/Word-Query-Server-and-Client | 11e3aad2cdefcf3be16d0ab6546269768323b391 | ef9651c399d9ddad594a01d8ca61179ac52e2fdb | refs/heads/master | 2020-06-06T10:38:10.735707 | 2019-06-20T01:37:32 | 2019-06-20T01:37:32 | 192,717,245 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
客户端界面简述
"""
while True:
print("第一界面")
cmd = input(">>>:")
if cmd == "go":
while True:
print("第二界面")
cmd = input(">>>:")
if cmd == "out":
break
| UTF-8 | Python | false | false | 250 | py | 4 | demo.py | 2 | 0.359091 | 0.359091 | 0 | 13 | 15.923077 | 31 |
sharathgowda815/py8 | 17,428,977,324,062 | c2b5f72b6b6c61b263110b10cb40f9d4b6b1499c | 486732078c0d767bf34d828395841b7d9c00cecd | /assignment_2/task_1/3/3.py | e675053b36b6b92b87584e7a1e00d03f8d75e380 | []
| no_license | https://github.com/sharathgowda815/py8 | 94dc1e80000a16f6bda06fa7e0b978d25440b7cc | 93e79dc13caf5f08c6a53c5c5aefcf8f3a1b3fb2 | refs/heads/master | 2022-08-02T15:08:49.599213 | 2020-05-25T14:04:04 | 2020-05-25T14:04:04 | 262,832,958 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def longestWord(words):
length_of_word = len(words[0])
longest_Word = words[0]
for word in words:
if(len(word) > length_of_word):
length_of_word = len(word)
longest_Word = word
return longest_Word
print('Longest Word is ' + str(longestWord(words = ['PHP', 'Exercises', 'Backend']))) | UTF-8 | Python | false | false | 346 | py | 15 | 3.py | 15 | 0.575145 | 0.569364 | 0 | 13 | 24.769231 | 85 |
asmaur/azo-projeto | 10,642,928,967,480 | b5657206819d29cc893b2fbad324cb9beff3e3d6 | a648fd77abe6bf808c2d55b0b1f7426f0043e759 | /apps/azo/cms_app.py | 3e7d8aa324b18de9055e3a18490bbdecf7e15dd3 | []
| no_license | https://github.com/asmaur/azo-projeto | d4a4b6ed3dcabb0da61d36498a818c675ee61b07 | 9f44572b7225f6c3631bac5458ddc66ac95b725c | refs/heads/master | 2018-09-12T20:43:08.466842 | 2016-07-25T20:51:20 | 2016-07-25T20:51:20 | 63,534,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
#from .menu import StaffSubMenu
class AzoApp(CMSApp):
name = _('Azo')
urls = ['apps.azo.urls', ]
app_name = 'azo'
#menus = [StaffSubMenu, ]
apphook_pool.register(AzoApp) | UTF-8 | Python | false | false | 344 | py | 62 | cms_app.py | 36 | 0.680233 | 0.677326 | 0 | 16 | 20.5625 | 55 |
mboylevt/cred | 14,302,241,120,366 | 8e4c5af2a90d8fbee3ddb437ea0f31ab8f6d2c46 | a7533d98b56f8956f3e5155c3360106b512a51af | /lib/RecordLib.py | 366fc3d57116496ec69f59b66de37c76469b722b | []
| no_license | https://github.com/mboylevt/cred | a77413c1cdd5b159bb8c00e168308994a390c1c6 | 818719ce30d6dfd1ba53d3bae3dbe644ab5e359a | refs/heads/master | 2021-01-25T05:15:50.347044 | 2014-05-11T15:29:19 | 2014-05-11T15:29:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'matt'
from models.Models import Record
def add_record(session, student_id, record_type_id, class_id, date, dow, score):
"""
Function to add a record to the models
"""
session.add(Record(student_id=student_id, record_type_id=record_type_id,
class_id=class_id, date_of_record=date, day_of_week=dow, score=score))
session.commit()
def list_records_per_student(session, student_id):
"""
Function to list all records belonging to a student
Not particularly useful in data analysis, mostly for debugging's sake
"""
for instance in session.query(Record).order_by(Record.id).filter(Record.student_id == student_id):
print instance.id, instance.student_id, instance.score
def get_records_per_student(session, student_id):
"""
Function to get all records belonging to a student
"""
return session.query(Record).order_by(Record.id).filter(Record.student_id == student_id).all()
def calculate_percentage(session, student_id, class_id, record_type):
"""
Calculate a student's percentage based on a particular record type
"""
total_points = 0
points_earned = 0
for instance in session.query(Record).filter(Record.student_id == student_id, Record.record_type_id == record_type, Record.class_id == class_id):
total_points = total_points + 1
if instance.score == 1:
points_earned = points_earned + 1
print "For record_type {type}, student {student} earned {poss}/{total}".format(type=record_type,student=student_id,poss=points_earned,total=total_points) | UTF-8 | Python | false | false | 1,588 | py | 17 | RecordLib.py | 13 | 0.688917 | 0.685768 | 0 | 42 | 36.833333 | 157 |
maxs-im/Tasks | 16,810,502,032,431 | 87434e7f9b3339b6d6e3dfdc923e6ace5584f9ee | 7c24ab6786e9d65fd3baf4f3122c088de6a33844 | /Catch/scripts/catch_test_run.py | 833d68a053c4bbb9cceab369c390669122dd0973 | [
"BSL-1.0"
]
| permissive | https://github.com/maxs-im/Tasks | deb8451cd79ae24ca898eaf5995b90f8b5df8836 | bf26ce060aeee75130dca33ac7a13cecdf5bd87a | refs/heads/master | 2017-10-06T07:40:32.568753 | 2017-06-24T01:26:50 | 2017-06-24T01:26:50 | 94,937,511 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import os
from catch_test_case import TestCaseApprovedParser
from catch_test_case import TestCaseResultParser
from catch_test_case import TestCaseData
from catch_conditions import RandomOutput
class TestRunData:
def __init__(self):
self.appname = ""
self.version = ""
self.testcases = []
self.results = ""
self.output = []
self.outputLine = 0
self.writtenOutput = False
self.sysout = []
self.syserr = []
self.errors = ""
self.failures = ""
self.tests = ""
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
result = "[" + self.appname + ", " + self.version + " [ "
suffix = ""
for case in self.testcases:
result += suffix
result += repr(case)
suffix = ", "
result += " ]"
result += self.results
result += " ]"
return result
def empty(self):
if len(self.appname):
return False
return True
def generateApprovedLines(self):
if self.empty():
raise Exception("Empty test run..." + repr(self))
lines = []
self.writtenOutput = False
if not(self.writtenOutput) and len(self.output) > 0 and self.outputLine == 0:
lines += self.output
self.writtenOutput = True
if len(self.appname):
lines.append("")
lines.append("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
lines.append(self.appname + " is a " + self.version + " host application.")
lines.append("Run with -? for options")
lines.append("")
for case in self.testcases:
lines += case.generateApprovedLines()
if not(self.writtenOutput) and len(self.output) > 0 and len(lines) >= self.outputLine:
lines += self.output
self.writtenOutput = True
lines.append("===============================================================================")
lines.append(self.results)
lines.append("")
return lines
def generateSortedApprovedLines(self):
if self.empty():
raise Exception("Empty test run..." + repr(self))
lines = []
self.writtenOutput = False
if not(self.writtenOutput) and len(self.output) > 0 and self.outputLine == 0:
lines += self.output
self.writtenOutput = True
if len(self.appname):
lines.append("")
lines.append("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
lines.append(self.appname + " is a " + self.version + " host application.")
lines.append("Run with -? for options")
lines.append("")
sortedTestcases = sorted(self.testcases, key=lambda x: x.name, reverse=False)
for case in sortedTestcases:
lines += case.generateApprovedLines()
if not(self.writtenOutput) and len(self.output) > 0 and len(lines) >= self.outputLine:
lines += self.output
self.writtenOutput = True
lines.append("===============================================================================")
lines.append(self.results)
lines.append("")
return lines
def generateResultLines(self):
if self.empty():
raise Exception("Empty test run..." + repr(self))
lines = []
self.writtenOutput = False
if not(self.writtenOutput) and len(self.output) > 0 and self.outputLine == 0:
lines += self.output
self.writtenOutput = True
if len(self.appname):
lines.append("")
lines.append("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
lines.append(self.appname + " is a " + self.version + " host application.")
lines.append("Run with -? for options")
lines.append("")
for case in self.testcases:
lines += case.generateResultLines()
if not(self.writtenOutput) and len(self.output) > 0 and len(lines) >= self.outputLine:
lines += self.output
self.writtenOutput = True
lines.append("===============================================================================")
lines.append(self.results)
lines.append("")
return lines
def generateUnapprovedLines(self, outputLine):
if self.empty():
raise Exception("Empty test run..." + repr(self))
lines = []
self.writtenOutput = False
#print "U:",outputLine,",",self.output
if not(self.writtenOutput) and len(self.output) > 0 and outputLine == 0:
lines += self.output
self.writtenOutput = True
if len(self.appname):
lines.append("")
lines.append("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
lines.append("CatchSelfTest" + " is a " + "<version>" + " host application.")
lines.append("Run with -? for options")
lines.append("")
for case in self.testcases:
lines += case.generateUnapprovedLines()
if not(self.writtenOutput) and len(self.output) > 0 and len(lines) >= outputLine:
lines += self.output
self.writtenOutput = True
lines.append("===============================================================================")
lines.append(self.results)
lines.append("")
return lines
def generateSortedUnapprovedLines(self, outputLine):
if self.empty():
raise Exception("Empty test run..." + repr(self))
lines = []
self.writtenOutput = False
#print "U:",outputLine,",",self.output
if not(self.writtenOutput) and len(self.output) > 0 and outputLine == 0:
lines += self.output
self.writtenOutput = True
if len(self.appname):
lines.append("")
lines.append("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
lines.append("CatchSelfTest" + " is a " + "<version>" + " host application.")
lines.append("Run with -? for options")
lines.append("")
sortedTestcases = sorted(self.testcases, key=lambda x: x.name, reverse=False)
for case in sortedTestcases:
lines += case.generateUnapprovedLines()
if not(self.writtenOutput) and len(self.output) > 0 and len(lines) >= outputLine:
lines += self.output
self.writtenOutput = True
lines.append("===============================================================================")
lines.append(self.results)
lines.append("")
return lines
def generateSortedUnapprovedJunit(self):
lines = []
#print "U:",outputLine,",",self.output
lines.append("<testsuites>")
l = " <testsuite name=\""
l += self.appname
l += "\" errors=\""
l += self.errors
l += "\" failures=\""
l += self.failures
l += "\" tests=\""
l += self.tests
l += "\" hostname=\"tbd\" time=\"{duration}\" timestamp=\"tbd\">"
lines.append(l)
sortedTestcases = sorted(self.testcases, key=lambda x: x.classname, reverse=False)
sortedTestcases = sorted(sortedTestcases, key=lambda x: x.name, reverse=False)
#sortedTestcases = self.testcases
for case in sortedTestcases:
lines += case.generateUnapprovedJunit()
if len(self.sysout) > 0:
lines.append(" <system-out>")
for l in self.sysout:
lines.append(l)
lines.append(" </system-out>")
if len(self.syserr) > 0:
lines.append(" <system-err>")
for l in self.syserr:
lines.append(l)
lines.append(" </system-err>")
lines.append(" </testsuite>")
lines.append("</testsuites>")
return lines
def generateSortedUnapprovedXml(self):
lines = []
#print "U:",outputLine,",",self.output
lines.append("<Catch name=\"" + self.appname + "\">")
lines.append(" <Group name=\"~_\">")
sortedTestcases = sorted(self.testcases, key=lambda x: x.classname, reverse=False)
sortedTestcases = sorted(sortedTestcases, key=lambda x: x.name, reverse=False)
#sortedTestcases = self.testcases
for case in sortedTestcases:
lines += case.generateUnapprovedXml()
l = "<OverallResults successes=\""
# successes="663" failures="109"
l += self.tests
l += "\" failures=\""
l += self.failures
l += "\"/>"
lines.append(" " + l)
lines.append(" </Group>")
lines.append(" " + l)
lines.append("</Catch>")
return lines
def addTestCase(self, name):
testcase = TestCaseData()
testcase.name = name
testcase.nameParts.append(name)
self.testcases.append(testcase)
return testcase
def addClassTestCase(self, cls, name):
testcase = TestCaseData()
testcase.classname = cls
testcase.name = name
testcase.nameParts.append(name)
self.testcases.append(testcase)
return testcase
def addSysout(self, output):
self.sysout = output
def addSyserr(self, output):
self.syserr = output
class TestRunApprovedParser:
NONE = 0
VERSION_EXPECTED = 1
TEST_CASE_EXPECTED = 2
END_RUN_INFO = 3
versionParser = re.compile( r'(.*)is a (<version>*).*' )
def __init__(self):
self.state = self.NONE
self.current = TestRunData()
self.testcaseParser = TestCaseApprovedParser()
self.lineNumber = 0
def parseApprovedLine(self,line):
result = None
if self.state == self.NONE:
if line.startswith("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"):
self.state = self.VERSION_EXPECTED
elif len(line):
raise Exception("Unknown parse line: '" + line + "'")
elif self.state == self.VERSION_EXPECTED:
m = self.versionParser.match(line)
if m:
self.current.appname = m.group(1).strip()
self.current.version = m.group(2).strip()
self.state = self.TEST_CASE_EXPECTED
elif len(line):
raise Exception("Unknown parse line: '" + line + "'")
elif self.state == self.TEST_CASE_EXPECTED:
if line == "Run with -? for options":
pass
else:
testcase = None
try:
testcase = self.testcaseParser.parseApprovedLine(line)
except RandomOutput as e:
#print "E:", self.lineNumber, ", ",e.output
self.current.output = e.output
self.current.outputLine = self.lineNumber - 10
if isinstance(testcase, TestCaseData):
self.current.testcases.append(testcase)
if line.startswith("==============================================================================="):
self.state = self.END_RUN_INFO
elif self.state == self.END_RUN_INFO:
if len(line):
self.current.results = line.strip()
result = self.current
self.lineNumber += 1
return result
class TestRunApprovedHandler:
def __init__(self, filePath):
rawFile = open( filePath, 'r' )
parser = TestRunApprovedParser()
lineNumber = 0
self.current = None
for line in rawFile:
line = line.rstrip()
#print "L:", lineNumber, "'",line,"'"
result = parser.parseApprovedLine(line)
if isinstance(result, TestRunData):
self.current = result
lineNumber += 1
if not(isinstance(self.current, TestRunData) ):
raise Exception("File could not be parsed: '" + filePath + "'")
def writeRawFile(self,filePath):
rawWriteFile = open( filePath, 'wb' )
lines = self.current.generateApprovedLines()
for line in lines:
rawWriteFile.write(line + "\n")
def writeSortedRawFile(self,filePath):
rawWriteFile = open( filePath, 'wb' )
lines = self.current.generateSortedApprovedLines()
for line in lines:
rawWriteFile.write(line + "\n")
class TestRunResultParser:
NONE = 0
VERSION_EXPECTED = 1
TEST_CASE_EXPECTED = 2
END_RUN_INFO = 3
versionParser = re.compile( r'(.*)is a (Catch v[0-9]*.[0-9]* b[0-9]*).*' )
def __init__(self):
self.state = self.NONE
self.current = TestRunData()
self.testcaseParser = TestCaseResultParser()
self.lineNumber = 0
def parseResultLine(self,line):
result = None
if self.state == self.NONE:
if line.startswith("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"):
self.state = self.VERSION_EXPECTED
elif len(line):
self.current.output.append(line.strip())
if len(self.current.output) == 10:
if (self.current.output[0] == "Message from section one" and self.current.output[1] == "Message from section two" and
self.current.output[2] == "Some information" and self.current.output[3] == "An error" and
self.current.output[4] == "Message from section one" and self.current.output[5] == "Message from section two" and
self.current.output[6] == "Some information" and self.current.output[7] == "An error" and
self.current.output[8] == "hello" and self.current.output[9] == "hello" ):
self.current.outputLine = self.lineNumber - 9
elif self.state == self.VERSION_EXPECTED:
m = self.versionParser.match(line)
if m:
self.current.appname = m.group(1).strip()
self.current.version = m.group(2).strip()
self.state = self.TEST_CASE_EXPECTED
elif len(line):
raise Exception("Unknown parse line: '" + line + "'")
elif self.state == self.TEST_CASE_EXPECTED:
if line == "Run with -? for options":
pass
else:
testcase = None
try:
testcase = self.testcaseParser.parseResultLine(line)
except RandomOutput as e:
#print "E:", self.lineNumber, ", ",e.output
self.current.output = e.output
self.current.outputLine = self.lineNumber - 10
if isinstance(testcase, TestCaseData):
self.current.testcases.append(testcase)
if line.startswith("==============================================================================="):
self.state = self.END_RUN_INFO
elif self.state == self.END_RUN_INFO:
if len(line):
self.current.results = line.strip()
result = self.current
self.lineNumber += 1
return result
class TestRunResultHandler:
def __init__(self, filePath):
rawFile = open( filePath, 'r' )
parser = TestRunResultParser()
lineNumber = 0
self.current = None
for line in rawFile:
line = line.rstrip()
#print "L:", lineNumber, "'",line,"'"
result = parser.parseResultLine(line)
if isinstance(result, TestRunData):
self.current = result
lineNumber += 1
if not(isinstance(self.current, TestRunData) ):
raise Exception("File could not be parsed: '" + filePath + "'")
def writeRawFile(self,filePath):
rawWriteFile = open( filePath, 'wb' )
lines = self.current.generateResultLines()
for line in lines:
rawWriteFile.write(line + os.linesep)
def writeUnapprovedFile(self,filePath,outputLine):
rawWriteFile = open( filePath, 'wb' )
lines = self.current.generateUnapprovedLines(outputLine)
for line in lines:
rawWriteFile.write(line + "\n")
def writeSortedUnapprovedFile(self,filePath,outputLine):
rawWriteFile = open( filePath, 'wb' )
lines = self.current.generateSortedUnapprovedLines(outputLine)
for line in lines:
rawWriteFile.write(line + "\n")
| UTF-8 | Python | false | false | 14,108 | py | 68 | catch_test_run.py | 49 | 0.615466 | 0.610717 | 0 | 430 | 31.809302 | 122 |
zakir360hossain/MyProgramming | 17,291,538,346,377 | 7a1150d3d771befcadabc9cce273b3414832f8a7 | 9881f114b357d334b5010b74f9d8ee4d9f124d87 | /Languages/Python/Learning/topic2/duck_typing/ex1.py | e3a1ad301499e9a2fa55e9da15dc3a7fdfcafa51 | []
| no_license | https://github.com/zakir360hossain/MyProgramming | 55dc6b796805a8497d62c245188c9de326fb8426 | 4b810b08a8bef0adc9a76e9c3082dada06516e53 | refs/heads/master | 2023-06-11T00:35:54.681794 | 2021-04-19T03:56:21 | 2021-04-19T03:56:21 | 219,240,979 | 1 | 0 | null | false | 2021-01-06T01:58:04 | 2019-11-03T02:27:16 | 2020-12-30T05:42:23 | 2021-01-06T01:58:03 | 43,256 | 0 | 0 | 2 | Jupyter Notebook | false | false |
class Laptop:
def code(self, ide):
ide.execute()
class PyCharm:
def execute(self):
print("Compiling")
print("Running")
class myEditor:
def execute(self):
print("Convention Checking")
print("Compiling")
print("Debugging")
print("Running")
ide = PyCharm()
mac = Laptop()
mac.code(ide)
# let say
ide = myEditor()
# Will the type of the ide change? NO, because the execute methods in both classes does something very
# very similar. So, if something behaves like a duck, it is a duck.
mac2 = Laptop()
mac2.code(ide)
| UTF-8 | Python | false | false | 545 | py | 588 | ex1.py | 428 | 0.686239 | 0.682569 | 0 | 29 | 17.62069 | 102 |
student513/qrating | 12,017,318,526,544 | 186dd51a0dadcdab2d6a2784966a97d5b61e3261 | ef9978aed4eadad7043a2d1ec5dfd17a43b0f241 | /blog/migrations/0002_question_category.py | 5622f841d3745151637ea0a8cc6beb7af695d475 | []
| no_license | https://github.com/student513/qrating | c15581fff0d87ab6c09fe2295cc2185121cc3a16 | 664a43418dfeb16d2b22f0abdff145d34c03ada2 | refs/heads/master | 2021-02-28T05:50:04.851723 | 2019-09-02T08:56:43 | 2019-09-02T08:56:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.3 on 2019-08-29 13:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='category',
field=models.CharField(choices=[('economy', '경제학'), ('programming', '프로그래밍'), ('math', '수학'), ('management', '경영학'), ('cpa', 'CPA/고시'), ('etc', '기타')], default='economy', max_length=15),
),
]
| UTF-8 | Python | false | false | 556 | py | 24 | 0002_question_category.py | 10 | 0.565134 | 0.524904 | 0 | 18 | 28 | 198 |
oddlama/forge | 12,086,037,991,254 | f81c2d13e5c89a2db17f4a0a6ce3bff4b41cdb70 | e81d97e0151f37dab9980f602d615e907582127f | /test/inventory/mock_inventories/missing_definition.py | 8bb1603ba5780149fb7e00755af9653234141717 | [
"MIT"
]
| permissive | https://github.com/oddlama/forge | 08fe8b2d792b6a59428e27361c199e063dbe4d9f | e270a021c45666c8b22250f1ae7a1534fe6040d3 | refs/heads/main | 2023-09-02T13:28:53.206632 | 2022-06-04T12:38:58 | 2022-06-04T12:38:58 | 329,370,687 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # No hosts= definition
| UTF-8 | Python | false | false | 23 | py | 75 | missing_definition.py | 71 | 0.73913 | 0.73913 | 0 | 1 | 22 | 22 |
3098701248/python | 7,971,459,306,066 | cc8ab78c443173a89c7da60678e21ca620d6bfaf | e5151fd95c3f4b53eeb91116e2ae93a8b318041e | /_function.py | c273c745f4619f968581a5b013ff996e93e498e1 | []
| no_license | https://github.com/3098701248/python | efbb924b65833adffa919947f64831154477d938 | 9913e3011625931e4f71c3ca22b92f99d3ebba8f | refs/heads/master | 2018-10-10T11:23:52.677604 | 2018-09-28T09:14:37 | 2018-09-28T09:14:37 | 105,228,116 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
def say(name, *, major='CS'):
print('{} majored in {}.'.format(name, major))
say('I', major='AI')
| UTF-8 | Python | false | false | 128 | py | 43 | _function.py | 43 | 0.5625 | 0.5625 | 0 | 8 | 15 | 50 |
HaroldXu97/Exercise | 5,609,227,324,194 | d91568a480361b360b3d379a3afc971d2b0b8ec2 | 8b94dda5e951ca1d421b4df2986b1ac1f22941e2 | /PartitionArrayIntoThreePartsWithEqualSum/PartitionArrayIntoThreePartsWithEqualSum_2.py | ba43fc50f15f04ddee2d430c2b63456bc203aa0d | []
| no_license | https://github.com/HaroldXu97/Exercise | aa8cb708e0036871f3271ef45f1cbf8bd0f6030b | 90079d92278d5f06a7893ec91be4d92a3fb41d13 | refs/heads/master | 2020-08-04T16:45:52.229155 | 2019-11-14T22:17:41 | 2019-11-14T22:17:41 | 212,207,834 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Question: how to check whether x is before 2x?
class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
return (lambda x,y: x in y and 2*x in y)(sum(A)//3,itertools.accumulate(A)) | UTF-8 | Python | false | false | 207 | py | 82 | PartitionArrayIntoThreePartsWithEqualSum_2.py | 82 | 0.676329 | 0.661836 | 0 | 4 | 51 | 83 |
KataOcean/openhub-scraping | 11,278,584,158,479 | e3cf2ac41697ce77c9fd27a2d6f8e366b1de7dfe | 6c48adcc295c2e64daca18259172247db3e6dba9 | /src/main.py | fc3fdca9abd8f1fdc654be61a52a0417a7745b36 | [
"MIT"
]
| permissive | https://github.com/KataOcean/openhub-scraping | 4c66ffe6f08a18ff26f33eea3a26884c4d237833 | 1708891172f93947a533231f42beb85cf8259579 | refs/heads/master | 2022-12-12T01:05:06.345399 | 2019-12-20T15:30:19 | 2019-12-20T15:30:19 | 227,323,729 | 0 | 0 | MIT | false | 2022-12-08T03:19:20 | 2019-12-11T09:16:07 | 2019-12-20T15:30:31 | 2022-12-08T03:19:18 | 20 | 0 | 0 | 2 | Python | false | false | import requests
import time
import argparse
import urllib.parse
import re
import os
import sys
from bs4 import BeautifulSoup
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tag', default='gui')
parser.add_argument('-e', '--explicit_tags', nargs='*')
parser.add_argument('-i', '--index', default=1)
args = parser.parse_args()
def get_repos_url(relative_url):
soup = get_soup(urllib.parse.urljoin(
'https://www.openhub.net/p/', relative_url + '/enlistments'))
repos_url = soup.select_one(
'tr.enlistment td'
).get_text().split()[0]
return repos_url
def get_soup(url):
time.sleep(5)
r = requests.get(url)
return BeautifulSoup(r.content, 'html.parser')
def write_csv(path, text):
if not os.path.exists(os.path.dirname(path)):
os.mkdir(os.path.dirname(path))
with open(path, mode='a') as f:
f.write(text + '\n')
explicit_list = args.explicit_tags
def is_explicit(soup):
if not explicit_list:
return False
tags = [x.get_text().strip() for x in soup.select('a.tag')]
for tag in tags:
for explicit_tag in explicit_list:
if explicit_tag in tag:
return True
return False
tqdm.write('start')
index = int(args.index)
repos_table_path = os.path.join(args.tag + '/', 'repos_table.csv')
not_found_repos_table_path = os.path.join(
args.tag + '/', 'not_found_repos_table.csv')
isEnd = False
while not isEnd:
try:
tqdm.write('page : ' + str(index))
query = {'names': args.tag, 'page': str(index)}
encoded_query = urllib.parse.urlencode(query)
soup = get_soup(
'https://www.openhub.net/tags?' + encoded_query)
list_root = soup.select_one('div#projects_index_list')
if not list_root:
isEnd = True
for project in tqdm(list_root.select('div.well')):
try:
title_content = project.select_one('h2.title a')
url = title_content.get('href')
title = title_content.get_text()
tqdm.write(title)
info_content = project.select_one('div.stats')
pattern = r'([\d\.]+).*'
loc = re.match(pattern, info_content.select_one(
'a').get_text()).group(1)
if is_explicit(project):
raise Exception
if float(loc) <= 0:
raise Exception
detail = get_soup(urllib.parse.urljoin(
'https://www.openhub.net/p/', url))
if is_explicit(detail):
raise Exception
repos_url = get_repos_url(url)
if not 'github' in repos_url:
continue
tqdm.write(repos_url)
write_csv(repos_table_path, title + ',' + repos_url)
except:
write_csv(not_found_repos_table_path, title)
pass
except:
tqdm.write('error occured page :' + str(index))
break
index += 1
| UTF-8 | Python | false | false | 3,103 | py | 6 | main.py | 4 | 0.555914 | 0.553335 | 0 | 114 | 26.219298 | 69 |
BerilBBJ/scraperwiki-scraper-vault | 2,774,548,875,428 | 5056072282c37fe80bb0ecd29cb3debd869619a0 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/S/san/kw_realty.py | 6a9156224b73e3c0931563b4fdc4ca78ce417eb8 | []
| no_license | https://github.com/BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Scrapes KW Realty associates' details
import scraperwiki
import lxml.html
# Records counter, used as ID in records
counter = 1
# Get root element
def scrape_content(url):
html = scraperwiki.scrape(url)
root = lxml.html.fromstring(html)
return root
# Scrape cities
def parse_cities(root):
div = root.cssselect('div.podsOfficeList')[1]
#city = div.cssselect('ul a')[0].attrib['href']
#print city
for el in div.cssselect('ul a:first-child'):
city = el.text_content()
url = el.attrib['href']
parse_associates(url)
# Get Associates' data from each city
def parse_associates (url):
global counter
root = scrape_content(url)
for el in root.cssselect('div.agentPod'):
res = dict()
res['name'] = el.cssselect('b')[0].text_content()
res['office'] = el.cssselect('span')[0].text_content()
res['phone'] = el.cssselect('li.phone')[0].text_content()
mail_data = el.cssselect('li.email a')[0].attrib['href']
res['email'] = decipher_mail(mail_data)
res['id'] = counter
print scraperwiki.sqlite.save(unique_keys=['id'], data=res)
counter += 1
def decipher_mail(data):
data = data[15:].rstrip(')').split(',')
return let(data[0].strip('"'), data[1], data[2])
def let(grandfather,alchemy,tree):
grandfather += ' '
length = len(grandfather)
horse = 0
drawer = ''
index = 0
while (index < length):
horse = 0;
while(ord(grandfather[index]) != 32):
horse = horse * 10
horse = horse + ord(grandfather[index]) - 48
index = index + 1
drawer += chr(shake(horse,int(alchemy),int(tree)))
index = index + 1
# if (arguments[3]):
# drawer += arguments[3]
return drawer
def shake(people,farm,historian):
if (historian % 2 == 0):
mathematical = 1;
message = 1
while (message <= historian / 2):
memory = (people*people) % farm;
mathematical = (memory*mathematical) % farm
message = message + 1
else:
mathematical = people;
member = 1
while( member <= historian / 2):
memory = (people*people) % farm;
mathematical = (memory*mathematical) % farm;
member = member + 1
return mathematical
#src = 'http://www.kw.com/kw/OfficeSearchSubmit.action?stateProvId=TX'
src = 'http://kwallen.yourkwoffice.com/mcj/user/AssociateSearchSubmitAction.do?orgId=2021&rows=100'
#root = scrape_content(src)
#parse_cities(root)
parse_associates(src)
# Scrapes KW Realty associates' details
import scraperwiki
import lxml.html
# Records counter, used as ID in records
counter = 1
# Get root element
def scrape_content(url):
html = scraperwiki.scrape(url)
root = lxml.html.fromstring(html)
return root
# Scrape cities
def parse_cities(root):
div = root.cssselect('div.podsOfficeList')[1]
#city = div.cssselect('ul a')[0].attrib['href']
#print city
for el in div.cssselect('ul a:first-child'):
city = el.text_content()
url = el.attrib['href']
parse_associates(url)
# Get Associates' data from each city
def parse_associates (url):
global counter
root = scrape_content(url)
for el in root.cssselect('div.agentPod'):
res = dict()
res['name'] = el.cssselect('b')[0].text_content()
res['office'] = el.cssselect('span')[0].text_content()
res['phone'] = el.cssselect('li.phone')[0].text_content()
mail_data = el.cssselect('li.email a')[0].attrib['href']
res['email'] = decipher_mail(mail_data)
res['id'] = counter
print scraperwiki.sqlite.save(unique_keys=['id'], data=res)
counter += 1
def decipher_mail(data):
data = data[15:].rstrip(')').split(',')
return let(data[0].strip('"'), data[1], data[2])
def let(grandfather,alchemy,tree):
grandfather += ' '
length = len(grandfather)
horse = 0
drawer = ''
index = 0
while (index < length):
horse = 0;
while(ord(grandfather[index]) != 32):
horse = horse * 10
horse = horse + ord(grandfather[index]) - 48
index = index + 1
drawer += chr(shake(horse,int(alchemy),int(tree)))
index = index + 1
# if (arguments[3]):
# drawer += arguments[3]
return drawer
def shake(people,farm,historian):
if (historian % 2 == 0):
mathematical = 1;
message = 1
while (message <= historian / 2):
memory = (people*people) % farm;
mathematical = (memory*mathematical) % farm
message = message + 1
else:
mathematical = people;
member = 1
while( member <= historian / 2):
memory = (people*people) % farm;
mathematical = (memory*mathematical) % farm;
member = member + 1
return mathematical
#src = 'http://www.kw.com/kw/OfficeSearchSubmit.action?stateProvId=TX'
src = 'http://kwallen.yourkwoffice.com/mcj/user/AssociateSearchSubmitAction.do?orgId=2021&rows=100'
#root = scrape_content(src)
#parse_cities(root)
parse_associates(src)
| UTF-8 | Python | false | false | 5,212 | py | 9,616 | kw_realty.py | 7,963 | 0.595165 | 0.579048 | 0 | 179 | 28.111732 | 99 |
mikpim01/ReflexGame | 10,737,418,271,469 | 8d803c17f826eeac7b49ed484fd734df8bd57de6 | 101da39fc18efd02f72d43140348138517b506bb | /ReflexGame.py | 8f25d6f1cc3487edfc2ccfc882d3b9eb3012e60d | []
| no_license | https://github.com/mikpim01/ReflexGame | e319b44e9b32f5277a18bd9714ecc82a2701b1c2 | bb6625203cf13092a5dc2e4110372d8f6c43ac32 | refs/heads/main | 2023-01-05T04:22:18.324210 | 2020-10-18T10:19:46 | 2020-10-18T10:19:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #initialising pygame
import pygame
import sys
import time
from utils import *
pygame.init()
screen = pygame.display.set_mode((width,height))
def drawGrid():
blockSize = 20 #Set the size of the grid block
grid_width = pow(blockSize,2)
centering = (width - grid_width)//2
for x in range(20):
for y in range(20):
rect = pygame.Rect(x*blockSize + centering, y*blockSize + (centering-40),blockSize, blockSize)
pygame.draw.rect(screen,WHITE,rect, 1)
def main():
#initialising display
background = GRAY
screen.fill(background)
# pygame.display.update()
#creating event loop
running = True
while running:
drawGrid()
for event in pygame.event.get():
# print(event)
if(event.type == pygame.QUIT):
# screen.fill(RED)
# pygame.display.update()
# time.sleep(1)
running = False
pygame.display.update()
pygame.quit()
quit()
main()
| UTF-8 | Python | false | false | 1,032 | py | 2 | ReflexGame.py | 1 | 0.585271 | 0.573643 | 0 | 42 | 23.571429 | 106 |
ONSdigital/eq-questionnaire-runner | 4,922,032,539,962 | 219c0af0400cac281a8804a9f660b96be5eace3e | 6f866eb49d0b67f0bbbf35c34cebe2babe2f8719 | /tests/integration/questionnaire/test_questionnaire_page_titles.py | afa5d2f976714d04b196cfeaf093e8282c5496db | [
"MIT",
"LicenseRef-scancode-proprietary-license"
]
| permissive | https://github.com/ONSdigital/eq-questionnaire-runner | 681b0d081f9cff0ee4ae3017ecc61f7390d553bf | 87e7364c4d54fee99e6a5e96649123f11c4b53f1 | refs/heads/main | 2023-09-01T21:59:56.733363 | 2023-08-31T15:07:55 | 2023-08-31T15:07:55 | 219,752,509 | 12 | 18 | MIT | false | 2023-09-14T11:37:31 | 2019-11-05T13:32:18 | 2023-06-13T14:56:23 | 2023-09-14T11:37:30 | 26,445 | 7 | 11 | 8 | Python | false | false | from tests.integration.integration_test_case import IntegrationTestCase
class TestQuestionnairePageTitles(IntegrationTestCase):
def test_introduction_has_introduction_in_page_title(self):
# Given, When
self.launchSurvey("test_submit_with_custom_submission_text")
# Then
self.assertEqualPageTitle("Introduction - Submit without summary")
def test_should_have_question_in_page_title_when_loading_questionnaire(self):
# Given
self.launchSurvey("test_submit_with_custom_submission_text")
# When
self.post(action="start_questionnaire")
# Then
self.assertEqualPageTitle(
"What is your favourite breakfast food - Submit without summary"
)
def test_should_have_question_in_page_title_on_submit_page(self):
# Given
self.launchSurvey("test_submit_with_custom_submission_text")
# When
self.post(action="start_questionnaire")
self.post({"breakfast-answer": ""})
# Then
self.assertEqualPageTitle("Submit your questionnaire - Submit without summary")
def test_should_have_question_in_page_title_on_submit_page_with_summary(self):
# Given
self.launchSurvey("test_percentage")
# When
self.post({"answer": ""})
self.post({"answer-decimal": ""})
# Then
self.assertEqualPageTitle(
"Check your answers and submit - Percentage Field Demo"
)
def test_should_have_survey_in_page_title_on_thank_you(self):
# Given
self.launchSurvey("test_submit_with_custom_submission_text")
self.post(action="start_questionnaire")
self.post({"breakfast-answer": ""})
# When submit
self.post()
# Then
self.assertEqualPageTitle(
"We’ve received your answers - Submit without summary"
)
def test_session_timed_out_page_title(self):
# Given
self.launchSurvey("test_submit_with_custom_submission_text")
# When
self.get("/session-expired")
# Then
self.assertEqualPageTitle("Page is not available - Submit without summary")
def test_should_have_content_title_in_page_title_on_interstitial(self):
# Given
self.launchSurvey("test_interstitial_page")
self.post(action="start_questionnaire")
# When
self.post({"favourite-breakfast": ""})
# Then
self.assertEqualPageTitle("Breakfast interstitial - Interstitial Pages")
def test_html_stripped_from_page_titles(self):
# Given
self.launchSurvey("test_markup")
# When
# Then
self.assertEqualPageTitle("This is a title with emphasis - Markup test")
def test_should_have_question_title_in_page_title_on_question(self):
# Given
self.launchSurvey("test_checkbox")
# When
# Then
self.assertEqualPageTitle(
"Which pizza toppings would you like? - Other input fields"
)
def test_should_not_use_names_in_question_page_titles(self):
# Given
self.launchSurvey(
"test_placeholder_full", display_address="68 Abingdon Road, Goathill"
)
# When
self.post({"first-name": "Kevin", "last-name": "Bacon"})
# Then
self.assertEqualPageTitle("What is … date of birth? - Placeholder Test")
def test_content_page_should_use_nested_content_text_in_page_title_if_it_exists(
self,
):
# Given
self.launchSurvey("test_interstitial_page_title")
# When
# Then
self.assertEqualPageTitle("Your RU name: … - Interstitial Page Titles")
def test_should_have_error_in_page_title_when_fail_validation(self):
# Given
self.launchSurvey("test_checkbox")
# When
self.post()
# Then
self.assertEqualPageTitle(
"Error: Which pizza toppings would you like? - Other input fields"
)
| UTF-8 | Python | false | false | 4,004 | py | 772 | test_questionnaire_page_titles.py | 599 | 0.625313 | 0.624812 | 0 | 113 | 34.380531 | 87 |
mover-io/raspee | 11,244,224,383,580 | 94c9c9248268383efcd78ccb81d3c70a7500df1d | 976ebbddb3010d3e3e2151b7a9192685fc076cd0 | /raw_switch_test.py | 9f1a588f68071a4cacc120d982c2cd3b440202d8 | []
| no_license | https://github.com/mover-io/raspee | 7dae17ca8dd4873ae38a90d4d4d1b04b706676b0 | a803a177fdb73d3089153d78e8e1d76c02c40798 | refs/heads/master | 2016-09-06T11:17:41.437405 | 2013-11-08T22:06:55 | 2013-11-08T22:06:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN)
# Loop and output the value of input pin 0
while True:
input = GPIO.input(18)
print state
time.sleep(1)
| UTF-8 | Python | false | false | 195 | py | 6 | raw_switch_test.py | 6 | 0.723077 | 0.692308 | 0 | 11 | 16.727273 | 42 |
Juan8bits/holbertonschool-higher_level_programming | 352,187,331,246 | c9fc35da121789c0058bc87472ac1b8d8c6a9972 | e2b168b9a229592fa7ead4a4d1c32c8653a86aca | /0x0B-python-input_output/8-class_to_json.py | c2f21957303f2b59a63c5df9e457b9f740575c0f | []
| no_license | https://github.com/Juan8bits/holbertonschool-higher_level_programming | ce7aea7ccdee448b666bd2080485f6ec3850febd | 289470b9193a5eecc1cad2fa464172dc93ec66f8 | refs/heads/main | 2023-04-22T02:44:34.271963 | 2021-05-12T23:54:41 | 2021-05-12T23:54:41 | 319,356,637 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
"""Function that returns the dictionary description
with simple data structure (list, dictionary,
string, integer and boolean) for JSON serialization
of an object.
"""
def class_to_json(obj):
"""class_to_json function"""
return obj.__dict__
| UTF-8 | Python | false | false | 279 | py | 104 | 8-class_to_json.py | 89 | 0.698925 | 0.695341 | 0 | 11 | 24.363636 | 54 |
PatrykDagiel/Python_Dawson | 11,441,792,889,167 | 9e18ca0c910a39afcedd81193e4b16ecdffb726e | 01494c3ac2e3281d71066ee220628afc452beb70 | /Chapter IV/dostep_swobodny.py | 24b2e875deb371d3f45b0076b3882a8bab9ddd20 | []
| no_license | https://github.com/PatrykDagiel/Python_Dawson | b3a4aab8dbb875eda54c0cd46ceed3650edc3dc7 | d3a04a5041df5ac728e2596331521191f941f536 | refs/heads/master | 2020-07-17T12:58:47.390099 | 2017-10-11T21:11:37 | 2017-10-11T21:11:37 | 94,321,332 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
word = "indeks"
high=len(word)
low=-len(word)
for i in range(10):
position = random.randrange(low, high)
print("word[", position, "]\t", word[position])
input("\nAby zakonczyc program nacisnij enter") | UTF-8 | Python | false | false | 225 | py | 32 | dostep_swobodny.py | 30 | 0.684444 | 0.675556 | 0 | 11 | 19.545455 | 51 |
edwardmpgh/event_portal | 17,583,596,112,172 | 44771e6f9893e56b0e0f0c0f54fc178227213b89 | a2ce1913d68f82acb8c5ad3a3019d3f00ee4b95e | /events/migrations/0042_attendee_comment.py | 3ea8fc507fae95d78074bc4e6ff8bebbf39af766 | []
| no_license | https://github.com/edwardmpgh/event_portal | b0cc1376ffe7549c17c4d7e164f860c22cb19a53 | 244a116ee3b9aff913aaf06413acb82c8eed2348 | refs/heads/main | 2023-05-27T17:49:42.352668 | 2021-06-16T15:31:36 | 2021-06-16T15:31:36 | 377,543,366 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2 on 2019-04-16 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0041_attendee_title'),
]
operations = [
migrations.AddField(
model_name='attendee',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
| UTF-8 | Python | false | false | 388 | py | 79 | 0042_attendee_comment.py | 49 | 0.587629 | 0.541237 | 0 | 18 | 20.555556 | 58 |
tyj144/noisystudent-lite | 15,530,601,751,449 | 356ade281c19e185fe9381c4204fe6dc769e03c4 | 3a6b2532f9cfc2842b040045c5cfc6c79da4c8fb | /experiments/data_experiments/compare_sets.py | 527c3334b7d21e7e7cc3ec69755e5ed43869c045 | []
| no_license | https://github.com/tyj144/noisystudent-lite | 6102939d290be74356b20be21aaf8f4de67fed28 | 95bc74b74e09d8211d8121e17264086b9525d52f | refs/heads/main | 2023-02-10T19:29:42.453898 | 2020-12-30T22:05:38 | 2020-12-30T22:05:38 | 308,987,774 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Find percentage of overlap between Tiny ImageNet and ImageNet-A labels.
Results:
74 / 200 of Tiny ImageNet labels are in ImageNet-A, 0.37
Tiny ImageNet # of labels: 200
ImageNet-A # of labels: 200
'''
with open("datasets/TinyImageNet_ids.txt", 'r') as file:
tiny_imgnet_ids = file.read().split('\n')
with open("datasets/ImageNetA_ids.txt", 'r') as file:
lines = file.read().split('\n')
imgnet_a_ids = dict([tuple(line.split(' ', 1)) for line in lines])
count = 0
overlap = []
print('Overlapping IDs:')
for id in tiny_imgnet_ids:
if id in imgnet_a_ids:
count += 1
print(id, imgnet_a_ids[id])
overlap.append(id)
print('Tiny ImageNet # of labels:', len(tiny_imgnet_ids))
print('ImageNet-A # of labels:', len(imgnet_a_ids))
print('% of Tiny ImageNet IDs in ImageNet-A', count, '/',
len(tiny_imgnet_ids), count / len(tiny_imgnet_ids))
'''
Overlapping IDs:
n04067472 reel
n04540053 volleyball
n04099969 rocking chair
n07749582 lemon
n01641577 American bullfrog
n02802426 basketball
n09246464 cliff
n03891332 parking meter
n02106662 German Shepherd Dog
n02279972 monarch butterfly
n04146614 school bus
n04507155 umbrella
n03854065 organ
n03804744 nail
n02486410 baboon
n01944390 snail
n04275548 spider web
n07695742 pretzel
n01774750 tarantula
n07753592 banana
n02233338 cockroach
n02236044 mantis
n07583066 guacamole
n04456115 torch
n01855672 goose
n01882714 koala
n02669723 academic gown
n02165456 ladybug
n02099601 Golden Retriever
n02948072 candle
n02206856 bee
n02814860 lighthouse
n01910747 jellyfish
n04133789 sandal
n02268443 dragonfly
n07734744 mushroom
n04562935 water tower
n03014705 chest
n02190166 fly
n03670208 limousine
n04366367 suspension bridge
n03026506 Christmas stocking
n02906734 broom
n01770393 scorpion
n04118538 rugby ball
n04179913 sewing machine
n02123394 Persian cat
n02793495 barn
n02730930 apron
n03388043 fountain
n02837789 bikini
n04399382 teddy bear
n03355925 flagpole
n03250847 drumstick
n03255030 dumbbell
n02883205 bow tie
n01698640 American alligator
n01784675 centipede
n04376876 syringe
n03444034 go-kart
n04532670 viaduct
n07768694 pomegranate
n02999410 chain
n03617480 kimono
n02410509 bison
n02226429 grasshopper
n02231487 stick insect
n02085620 Chihuahua
n02129165 lion
n03837869 obelisk
n02815834 beaker
n07720875 bell pepper
n12267677 acorn
n02504458 African bush elephant
'''
| UTF-8 | Python | false | false | 2,478 | py | 23 | compare_sets.py | 12 | 0.75908 | 0.513317 | 0 | 107 | 22.158879 | 75 |
pokabu55/ImageViewer | 15,539,191,691,728 | 30ff0abef62761ac1a06b74b56147075fca11ee8 | 2fdd741dc9ca66b8c79c5890ea8ce99d2b9fbdb1 | /ImageViewer.py | 9cd152a2db7e2590cbd15f90683b56169d79714b | []
| no_license | https://github.com/pokabu55/ImageViewer | d69cc6e70f95aff2afee81dedf55e363fd9265ce | 74da14d9f1cfc5d2c45e9be8f5e71f2be16ff341 | refs/heads/master | 2022-07-11T23:24:48.318280 | 2022-07-06T11:34:14 | 2022-07-06T11:34:14 | 193,224,941 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import numpy as np
import tkinter as tk
import tkinter.ttk as ttk
from PIL import Image, ImageTk
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
# アプリケーション(GUI)クラス
class Application(tk.Frame):
DEBUG_LOG = True
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.create_widgets()
def create_widgets(self):
print('DEBUG:----{}----'.format(sys._getframe().f_code.co_name)) if self.DEBUG_LOG else ""
# 実行
root = tk.Tk()
myapp = Application(master=root)
myapp.master.title("My Application") # タイトル
myapp.master.geometry("1000x500") # ウィンドウの幅と高さピクセル単位で指定(width x height)
myapp.mainloop() | UTF-8 | Python | false | false | 810 | py | 23 | ImageViewer.py | 13 | 0.689041 | 0.679452 | 0 | 30 | 23.366667 | 98 |
fbesserer/DataStructuresAndAlgorithms-Problemset2 | 798,863,936,779 | b4fb1bb95ed0d409fc4f993fd08f94d2b4cf7cb9 | d1239316b8e96ca44e8efc3f72d4a2d453d19513 | /problem_6_unsorted_int_arr.py | a9a957c0f9d4ab27c047b3091133042015ce5dd8 | []
| no_license | https://github.com/fbesserer/DataStructuresAndAlgorithms-Problemset2 | b52b28f0a228b5467df3ea18f5799c9791e75c43 | f75eaf4dd66330ec70a8192685bb18ba1b9a0a70 | refs/heads/master | 2023-06-02T04:08:34.511605 | 2021-06-22T10:05:43 | 2021-06-22T10:05:43 | 379,223,615 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## Example Test Case of Ten Integers
import random
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if not ints:
return
min = ints[0]
max = ints[0]
# loop over vals and check if smaller than min or greater than max
for int in ints:
if int < min:
min = int
elif int > max:
max = int
return (min, max)
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
l = [i for i in range(1, 9)] # a list containing 1 - 8
random.shuffle(l)
print ("Pass" if ((1, 8) == get_min_max(l)) else "Fail")
l = [i for i in range(2, 5)] # a list containing 2 - 4
random.shuffle(l)
print ("Pass" if ((2, 4) == get_min_max(l)) else "Fail")
l = [i for i in range(-10, -1)] # a list containing -10 - -1
random.shuffle(l)
print ("Pass" if ((-10, -2) == get_min_max(l)) else "Fail")
l = [] # an empty list
print ("Pass" if (None == get_min_max(l)) else "Fail")
# runtime = O(n) - single traversal
| UTF-8 | Python | false | false | 1,163 | py | 15 | problem_6_unsorted_int_arr.py | 8 | 0.577816 | 0.552021 | 0 | 45 | 24.844444 | 70 |
ChrisLR/BasicDungeonRL | 15,753,940,071,770 | 227d104c0c351074e9ec0150f1b044306285ee75 | 51108a50ffb48ad154f587c230045bb783f22240 | /bflib/characters/races/elf.py | cae7f2bf367643b771b198cdd39113b8ac3825b2 | [
"MIT"
]
| permissive | https://github.com/ChrisLR/BasicDungeonRL | c90bd0866c457557cccbad24e14689d5d6db7b00 | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | refs/heads/master | 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 | MIT | false | 2019-08-05T16:28:23 | 2017-09-20T21:35:19 | 2018-11-11T00:40:35 | 2019-08-05T16:28:22 | 659 | 3 | 0 | 0 | Python | false | false | from datetime import timedelta
from bflib import dice, languages, restrictions, units
from bflib.characters import abilityscores, specialabilities, savingthrows
from bflib.characters.races.base import Race
class Elf(Race):
name = "Elf"
average_height = units.Feet(5)
average_weight = units.Pound(130)
average_lifespan = timedelta(days=438000)
restriction_set = restrictions.RestrictionSet(
ability_score=restrictions.AbilityScoreRestrictionSet(
minimum_set=abilityscores.AbilityScoreSet(intelligence=9),
maximum_set=abilityscores.AbilityScoreSet(constitution=17),
),
classes=restrictions.ClassRestrictionSet(
access_combined=True
),
hit_dice_max_size=restrictions.HitDiceMaxSizeRestriction(dice.D6)
)
racial_language = languages.Elvish
special_ability_set = specialabilities.SpecialAbilitySet((
specialabilities.Darkvision,
specialabilities.DetectSecretDoor,
specialabilities.GhoulParalysisImmunity,
specialabilities.SurpriseResistance
))
saving_throw_set = savingthrows.SavingThrowSet(
paralysis_stone=-1,
spells=-2,
wands=-2
)
| UTF-8 | Python | false | false | 1,209 | py | 544 | elf.py | 541 | 0.712159 | 0.698098 | 0 | 35 | 33.542857 | 74 |
derooie/agile | 369,367,208,552 | 445a1c36dca13026c8668651c4fccdd75f5f10ab | d13b3abfbbc5b84cda87d3462ba6e548065d467e | /src/accounts/tests/test_settings.py | 0a05ddd11416e99753ba3c8c59ed1619b00d954a | []
| no_license | https://github.com/derooie/agile | f6a41bd950b2b301c749346c71cabd1ea557ee8e | 8bd105d471fd53ab12eb2f3e13681922c045fa31 | refs/heads/master | 2021-06-23T16:22:21.836242 | 2018-11-16T12:28:18 | 2018-11-16T12:28:18 | 199,092,960 | 0 | 0 | null | false | 2021-06-10T21:46:22 | 2019-07-26T23:46:04 | 2019-07-26T23:46:35 | 2021-06-10T21:46:20 | 5,043 | 0 | 0 | 1 | JavaScript | false | false | from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
from accounts.models import AgileUser, Team
from accounts.forms import AppSettingsForm
class SettingsViewTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('test_user', 'test@test.com', 'testPassword')
self.team = Team.objects.create(team_name='Test team')
self.agile_user = AgileUser.objects.create(user=self.user, team=self.team)
def test_settings_view_no_access(self):
self.client.login(username='test_user', password='wrongTestPassword')
response = self.client.get(reverse('accounts:settings', kwargs={'pk': 1}))
self.assertEqual(response.status_code, 302)
def test_settings_view_access(self):
self.client.login(username='test_user', password='testPassword')
response = self.client.get(reverse('accounts:settings', kwargs={'pk': 1}))
self.assertEqual(response.status_code, 200)
# Valid form data, line is accepted chart type
def test_form_valid(self):
form = AppSettingsForm(data={'team': self.team, 'user': self.user, 'chart_type': 'line'})
self.assertTrue(form.is_valid())
# Invalid form data
def test_form_invalid(self):
form = AppSettingsForm(data={'team': self.team, 'user': self.user, 'chart_type': 'pie'})
self.assertFalse(form.is_valid())
| UTF-8 | Python | false | false | 1,457 | py | 43 | test_settings.py | 19 | 0.684969 | 0.679478 | 0 | 34 | 41.852941 | 97 |
mauronunez/api | 4,922,032,533,287 | 38ed114b61388faef3595c7b8c682755c50cb828 | bd3724258c1426ba768a5d6431a571c7849f2aa2 | /endpoints/licitacion.py | 3dd54dbde40460d2d7002bd134eeb964e639e87d | []
| no_license | https://github.com/mauronunez/api | d95b034fcef05a49f560f2c584032401a95115e5 | 41668080e72165406e3cc5d8a927e259c876e995 | refs/heads/master | 2020-12-31T02:02:18.876230 | 2015-11-27T23:06:07 | 2015-11-27T23:06:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import operator
import dateutil.parser
import falcon
from models import models_api
from utils.myjson import JSONEncoderPlus
from utils.mypeewee import ts_match
class LicitacionId(object):
@models_api.database.atomic()
def on_get(self, req, resp, licitacion_id=None):
# Obtener la licitacion
try:
if '-' in licitacion_id:
licitacion = models_api.Licitacion.get(models_api.Licitacion.codigo_licitacion == licitacion_id)
elif licitacion_id.isdigit():
licitacion_id = int(licitacion_id)
licitacion = models_api.Licitacion.get(models_api.Licitacion.id_licitacion == licitacion_id)
else:
raise models_api.Licitacion.DoesNotExist()
except models_api.Licitacion.DoesNotExist:
raise falcon.HTTPNotFound()
response = {
'id': licitacion.id_licitacion,
'codigo': licitacion.codigo_licitacion,
'nombre': licitacion.nombre_licitacion,
'descripcion': licitacion.descripcion_licitacion,
'organismo': {
'id': licitacion.id_organismo,
'categoria': licitacion.nombre_ministerio,
'nombre': licitacion.nombre_organismo
},
'unidad': {
'nombre': licitacion.nombre_unidad,
'rut': licitacion.rut_unidad,
'region': licitacion.region_unidad,
'comuna': licitacion.comuna_unidad,
'direccion': licitacion.direccion_unidad
},
'usuario': {
'cargo': licitacion.cargo_usuario_organismo,
'nombre': licitacion.nombre_usuario_organismo,
'rut': licitacion.rut_usuario_organismo
},
'responsable_contrato': {
'nombre': licitacion.nombre_responsable_contrato,
'telefono': licitacion.fono_responsable_contrato,
'email': licitacion.email_responsable_contrato
},
'responsable_pago': {
'nombre': licitacion.nombre_responsable_pago,
'email': licitacion.email_responsable_pago
},
'estado': licitacion.estado,
'fecha_cambio_estado': licitacion.fecha_cambio_estado,
'fecha_creacion': licitacion.fecha_creacion,
'fecha_publicacion': licitacion.fecha_publicacion,
'fecha_inicio': licitacion.fecha_inicio,
'fecha_final': licitacion.fecha_final,
'fecha_cierre': licitacion.fecha_cierre,
'fecha_adjudicacion': licitacion.fecha_adjudicacion,
'n_items': licitacion.items_totales,
'adjudicacion': {
'n_items': licitacion.items_adjudicados,
'monto': int(licitacion.monto_total) if licitacion.monto_total else None,
'acta': licitacion.url_acta,
} if licitacion.monto_total else None, # Only if there is an monto_total
'categorias': [
{
'id': licitacion.id_categoria_nivel1[i],
'nombre': licitacion.categoria_nivel1[i],
}
for i in range(len(licitacion.id_categoria_nivel1))]
}
response = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
callback = req.params.get('callback', None)
if callback:
response = "%s(%s)" % (callback, response)
resp.body = response
# TODO Definir orden de resultados
class Licitacion(object):
MAX_RESULTS = 10
"""
q
categoria_producto
producto
estado
monto
fecha_publicacion
fecha_cierre
fecha_adjudicacion
organismo
proveedor
orden
"""
@models_api.database.atomic()
def on_get(self, req, resp):
# Obtener todas las licitacion
licitaciones = models_api.Licitacion.select()
wheres = []
order_bys = []
# Busqueda de texto
q_q = req.params.get('q', None)
if q_q:
# TODO Try to make just one query over one index instead of two or more ORed queries
wheres.append(ts_match(models_api.Licitacion.nombre_licitacion, q_q) | ts_match(models_api.Licitacion.descripcion_licitacion, q_q))
# Busqueda por categoria de producto
q_categoria_producto = req.params.get('categoria_producto', None)
if q_categoria_producto:
if isinstance(q_categoria_producto, basestring):
q_categoria_producto = [q_categoria_producto]
try:
q_categoria_producto = map(lambda x: int(x), q_categoria_producto)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "categoria_producto debe ser un entero")
wheres.append(models_api.Licitacion.id_categoria_nivel1.contains_any(q_categoria_producto))
# Busqueda por producto
q_producto = req.params.get('producto', None)
if q_producto:
if isinstance(q_producto, basestring):
q_producto = [q_producto]
try:
q_producto = map(lambda x: int(x), q_producto)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "producto debe ser un entero")
wheres.append(models_api.Licitacion.id_categoria_nivel3.contains_any(q_producto))
# Busqueda por estado
q_estado = req.params.get('estado', None)
if q_estado:
if isinstance(q_estado, basestring):
q_estado = [q_estado]
try:
q_estado = map(lambda x: int(x), q_estado)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "estado debe ser un entero")
wheres.append(models_api.Licitacion.estado << q_estado)
# Busqueda por organismo
q_organismo = req.params.get('organismo', None)
if q_organismo:
if isinstance(q_organismo, basestring):
q_organismo = [q_organismo]
try:
q_organismo = map(lambda x: int(x), q_organismo)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "organismo debe ser un entero")
wheres.append(models_api.Licitacion.id_organismo << q_organismo)
# Busqueda por proveedor
q_proveedor = req.params.get('proveedor', None)
if q_proveedor:
if isinstance(q_proveedor, basestring):
q_proveedor = [q_proveedor]
try:
q_proveedor = map(lambda x: int(x), q_proveedor)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "proveedor debe ser un entero")
wheres.append(models_api.Licitacion.empresas_ganadoras.contains_any(q_proveedor))
# Busqueda por monto
q_monto = req.params.get('monto', None)
if q_monto:
if isinstance(q_monto, basestring):
q_monto = [q_monto]
filter_monto = []
for montos in q_monto:
montos = montos.split('|')
try:
monto_min = int(montos[0]) if montos[0] else None
monto_max = int(montos[1]) if montos[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en monto deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en monto deben ser enteros")
if monto_min and monto_max:
filter_monto.append((models_api.Licitacion.monto_total >= monto_min) & (models_api.Licitacion.monto_total <= monto_max))
elif monto_min:
filter_monto.append(models_api.Licitacion.monto_total >= monto_min)
elif monto_max:
filter_monto.append(models_api.Licitacion.monto_total <= monto_max)
if filter_monto:
wheres.append(reduce(operator.or_, filter_monto))
# Busqueda por fecha de publicacion
q_fecha_publicacion = req.params.get('fecha_publicacion', None)
if q_fecha_publicacion:
if isinstance(q_fecha_publicacion, basestring):
q_fecha_publicacion = [q_fecha_publicacion]
filter_fecha_publicacion = []
for fechas in q_fecha_publicacion:
fechas = fechas.split('|')
try:
fecha_publicacion_min = dateutil.parser.parse(fechas[0], dayfirst=True, yearfirst=True).date() if fechas[0] else None
fecha_publicacion_max = dateutil.parser.parse(fechas[1], dayfirst=True, yearfirst=True).date() if fechas[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en fecha_publicacion deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "El formato de la fecha en fecha_publicacion no es correcto")
if fecha_publicacion_min and fecha_publicacion_max:
filter_fecha_publicacion.append((models_api.Licitacion.fecha_publicacion >= fecha_publicacion_min) & (models_api.Licitacion.fecha_publicacion <= fecha_publicacion_max))
elif fecha_publicacion_min:
filter_fecha_publicacion.append(models_api.Licitacion.fecha_publicacion >= fecha_publicacion_min)
elif fecha_publicacion_max:
filter_fecha_publicacion.append(models_api.Licitacion.fecha_publicacion <= fecha_publicacion_max)
if filter_fecha_publicacion:
wheres.append(reduce(operator.or_, filter_fecha_publicacion))
# Busqueda por fecha de cierre
q_fecha_cierre = req.params.get('fecha_cierre', None)
if q_fecha_cierre:
if isinstance(q_fecha_cierre, basestring):
q_fecha_cierre = [q_fecha_cierre]
filter_fecha_cierre = []
for fechas in q_fecha_cierre:
fechas = fechas.split('|')
try:
fecha_cierre_min = dateutil.parser.parse(fechas[0], dayfirst=True, yearfirst=True).date() if fechas[0] else None
fecha_cierre_max = dateutil.parser.parse(fechas[1], dayfirst=True, yearfirst=True).date() if fechas[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en fecha_cierre deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "El formato de la fecha en fecha_cierre no es correcto")
if fecha_cierre_min and fecha_cierre_max:
filter_fecha_cierre.append((models_api.Licitacion.fecha_cierre >= fecha_cierre_min) & (models_api.Licitacion.fecha_cierre <= fecha_cierre_max))
elif fecha_cierre_min:
filter_fecha_cierre.append(models_api.Licitacion.fecha_cierre >= fecha_cierre_min)
elif fecha_cierre_max:
filter_fecha_cierre.append(models_api.Licitacion.fecha_cierre <= fecha_cierre_max)
if filter_fecha_cierre:
wheres.append(reduce(operator.or_, filter_fecha_cierre))
# Busqueda por fecha de adjudicacion
q_fecha_adjudicacion = req.params.get('fecha_adjudicacion', None)
if q_fecha_adjudicacion:
if isinstance(q_fecha_adjudicacion, basestring):
q_fecha_adjudicacion = [q_fecha_adjudicacion]
filter_fecha_adjudicacion = []
for fechas in q_fecha_adjudicacion:
fechas = fechas.split('|')
try:
fecha_adjudicacion_min = dateutil.parser.parse(fechas[0], dayfirst=True, yearfirst=True).date() if fechas[0] else None
fecha_adjudicacion_max = dateutil.parser.parse(fechas[1], dayfirst=True, yearfirst=True).date() if fechas[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en fecha_adjudicacion deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "El formato de la fecha en fecha_adjudicacion no es correcto")
if fecha_adjudicacion_min and fecha_adjudicacion_max:
filter_fecha_adjudicacion.append((models_api.Licitacion.fecha_adjudicacion >= fecha_adjudicacion_min) & (models_api.Licitacion.fecha_adjudicacion <= fecha_adjudicacion_max))
elif fecha_adjudicacion_min:
filter_fecha_adjudicacion.append(models_api.Licitacion.fecha_adjudicacion >= fecha_adjudicacion_min)
elif fecha_adjudicacion_max:
filter_fecha_adjudicacion.append(models_api.Licitacion.fecha_adjudicacion <= fecha_adjudicacion_max)
if filter_fecha_adjudicacion:
wheres.append(reduce(operator.or_, filter_fecha_adjudicacion))
q_orden = req.params.get('orden', None)
if q_orden:
if q_orden == 'monto':
wheres.append(models_api.Licitacion.monto_total.is_null(False))
order_bys.append(models_api.Licitacion.monto_total.asc())
elif q_orden == '-monto':
wheres.append(models_api.Licitacion.monto_total.is_null(False))
order_bys.append(models_api.Licitacion.monto_total.desc())
elif q_orden == 'fecha_publicacion':
wheres.append(models_api.Licitacion.fecha_publicacion.is_null(False))
order_bys.append(models_api.Licitacion.fecha_publicacion.asc())
elif q_orden == '-fecha_publicacion':
wheres.append(models_api.Licitacion.fecha_publicacion.is_null(False))
order_bys.append(models_api.Licitacion.fecha_publicacion.desc())
if wheres:
licitaciones = licitaciones.where(*wheres)
if order_bys:
licitaciones = licitaciones.order_by(*order_bys)
# Get page
q_pagina = req.params.get('pagina', '1')
q_pagina = max(int(q_pagina) if q_pagina.isdigit() else 1, 1)
response = {
'n_licitaciones': licitaciones.count(),
'licitaciones': [
{
'id': licitacion['id_licitacion'],
'codigo': licitacion['codigo_licitacion'],
'nombre': licitacion['nombre_licitacion'],
'descripcion': licitacion['descripcion_licitacion'],
'organismo': {
'id': licitacion['id_organismo'],
'categoria': licitacion['nombre_ministerio'],
'nombre': licitacion['nombre_organismo'],
},
'fecha_publicacion': licitacion['fecha_publicacion'],
'fecha_cierre': licitacion['fecha_cierre'],
'fecha_adjudicacion': licitacion['fecha_adjudicacion'],
'estado': licitacion['estado'],
'fecha_cambio_estado': licitacion['fecha_cambio_estado'],
'n_items': licitacion['items_totales'],
'adjudicacion': {
'n_items': licitacion['items_adjudicados'],
'monto': int(licitacion['monto_total']) if licitacion['monto_total'] else None,
'acta': licitacion['url_acta'],
} if licitacion['monto_total'] else None, # Only if there is monto_total
}
for licitacion in licitaciones.paginate(q_pagina, Licitacion.MAX_RESULTS).dicts()
]
}
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class LicitacionIdItem(object):
@models_api.database.atomic()
def on_get(self, req, resp, licitacion_id):
# Get the licitacion
try:
if licitacion_id.isdigit():
licitacion_id = int(licitacion_id)
items = models_api.LicitacionIdItem.select().filter(models_api.LicitacionIdItem.licitacion == licitacion_id)
else:
raise models_api.LicitacionIdItem.DoesNotExist()
except models_api.Licitacion.DoesNotExist:
raise falcon.HTTPNotFound()
n_items = items.count()
# Get page
q_page = req.params.get('pagina', None)
if q_page:
q_page = max(int(q_page) if q_page.isdigit() else 1, 1)
items = items.paginate(q_page, 10)
response = {
'n_items': n_items,
'items': [
{
'adjudicacion': {
'cantidad': float(item['cantidad_adjudicada']),
'monto_unitario': int(item['monto_pesos_adjudicado']) if item['monto_pesos_adjudicado'] else None,
'monto_total': int(item['monto_total']) if item['monto_total'] else None,
'fecha': item['fecha_adjudicacion'],
'proveedor': {
'id': item['id_empresa'],
'nombre': item['nombre_empresa'],
'rut': item['rut_sucursal']
}
} if item['id_empresa'] else None,
'codigo_categoria': item['codigo_categoria'],
'nombre_categoria': item['categoria_global'],
'codigo_producto': item['codigo_producto'],
'nombre_producto': item['nombre_producto'],
'descripcion': item['descripcion'],
'unidad': item['unidad_medida'],
'cantidad': item['cantidad']
}
for item in items.dicts().iterator()]
}
response = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
callback = req.params.get('callback', None)
if callback:
response = "%s(%s)" % (callback, response)
resp.body = response
| UTF-8 | Python | false | false | 18,484 | py | 16 | licitacion.py | 15 | 0.574064 | 0.572441 | 0 | 435 | 41.491954 | 193 |
moraygrieve/pysys | 2,757,369,033,368 | 728857a0f9757b87d2eadcd0b46f93cbaa7b170a | 113d2f4b998c049a981d5c82260c0a7316d7a8a2 | /pysys-examples/internal/testcases/PySys_internal_056/run.py | 69d59a1a18cb9395027b52ef5dbd5c5a26e25f81 | []
| no_license | https://github.com/moraygrieve/pysys | a9055b56fc7fb8c7ec3c50b641b2bfa1939e1c9c | 3f93cbedbb806b6c53de89358025f93c740ebdc3 | refs/heads/master | 2020-04-29T10:39:42.850088 | 2019-02-27T18:07:13 | 2019-02-27T18:07:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pysys.constants import *
from pysys.basetest import BaseTest
class PySysTest(BaseTest):
def execute(self):
pass
def validate(self):
self.assertTrue(True)
self.assertTrue(False)
self.addOutcome(TIMEDOUT, 'simulated timeout')
self.assertTrue(True)
self.assertGrep('not_there', expr="")
self.checkOutcome()
def checkOutcome(self):
outcome = self.getOutcome()
self.log.info('Outcome is %s' % self.outcome)
self.log.info('Outcome is %s' % LOOKUP[outcome])
self.outcome = []
if outcome == BLOCKED: self.addOutcome(PASSED)
else: self.addOutcome(FAILED)
| UTF-8 | Python | false | false | 616 | py | 115 | run.py | 97 | 0.685065 | 0.685065 | 0 | 22 | 25.727273 | 50 |
0x706972686f/experimentation | 274,877,920,057 | 6962097a80904286c850d7faad6c9476e2618a70 | c0413119e4e6c459cd9acc9e5459ed6d4cffc71a | /python37example.py | 37ca1e7aeff4bacce270c98a6bcc5864aec5550c | []
| no_license | https://github.com/0x706972686f/experimentation | ace3f73da0d3f63f9889ed925c13ca23e58a7fd9 | 68c592e8e25e0f227e05ef11d0d9e23d665720d4 | refs/heads/master | 2020-07-29T19:57:41.884138 | 2019-09-21T07:15:07 | 2019-09-21T07:15:07 | 209,941,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This is an experiment to play with some of the new features of Python 3.7
Specifically
-Dataclasses (https://docs.python.org/3/library/dataclasses.html)
-Coroutines and Tasks (https://docs.python.org/3/library/asyncio-task.html)
-Breakpoint (https://docs.python.org/3/library/functions.html#breakpoint)
-Annotations and Typing (https://docs.python.org/3/library/typing.html)
Other features include:
-The introduction of nanosecond precision in Python
(by using an int instead of a float) - the Time package doesn't support
this level of precision though, you need to use astropy.
-Dictionaries retaining order (they'll iterate over in the same order as
inserted)
-importing data files with importlib.resources
-Executing code with python3.7 -X importtime my_script.py to see how long
it takes to import
"""
from __future__ import annotations # From future uses the python4 version of annotations, that postpone evaluations of annotations
from dataclasses import dataclass
from typing import * # Dict, Tuple, List, Set
import asyncio
# Annotations allow for proper and easier to use typing
Person = Dict[str, int]
@dataclass
#Dataclass means that __init__(), __repr__(), and __eq__() are created automatically.
class BankAccount:
name: str
age: int
money: float = 0.0
async def greeting(self) -> str:
# breakpoint() - replaces import pdb; pdb.set_trace() If you uncomment this you'll need to ensure that PYTHONBREAKPOINT environment variable to continue
return "Hello " + self.name
async def balance(self) -> float:
return self.money
async def change_money(self, change: float) -> None:
self.money += change
async def person_information(self) -> Person:
person = {}
person[self.name] = self.age
return person
async def is_child(self) -> bool:
if self.age < 18:
return True
else:
return False
async def Jons_Account() -> None:
name: str = 'Jon Snow'
age: int = 20
money: float = 100.0
jon: BankAccount = BankAccount(name, age, money)
print(await jon.greeting())
print("You currently have " + str(await jon.balance()) + " dollars in your account")
print("Is " + jon.name + " a child? " + str(await jon.is_child()))
await jon.change_money(-250.0)
print("You currently have " + str(await jon.balance()) + " dollars in your account")
async def Sansas_Account() -> None:
name: str = 'Sansa Stark'
age: int = 15
money: float = 0.0
sansa: BankAccount = BankAccount(name, age, money)
print(await sansa.greeting())
print("You currently have " + str(await sansa.balance()) + " dollars in your account")
print("Is " + sansa.name + " a child? " + str(await sansa.is_child()))
await sansa.change_money(1000.01)
print("You currently have " + str(await sansa.balance()) + " dollars in your account")
# Using async to execute the functions in parallel
async def main() -> None:
await asyncio.gather(
Sansas_Account(),
Jons_Account(),
)
if __name__ == "__main__":
asyncio.run(main()) | UTF-8 | Python | false | false | 3,214 | py | 1 | python37example.py | 1 | 0.656192 | 0.645924 | 0 | 92 | 32.956522 | 160 |
BanosLopezDA/BEATCLOU | 2,671,469,686,112 | 900830e0714ad15caff5c08cfe5129d6c56da8b3 | 47a19b098719d020feb7bacd93aaefe5c11b4afc | /SistemaAudios/models.py | 689c1d995077566c463e7529b2bdd2fd0bf78c26 | []
| no_license | https://github.com/BanosLopezDA/BEATCLOU | a8860bb3e0fd5cff6450d5a800c3cbc0c7a13a5e | 3ceedfeb9bb88525eab3444287dc104add2660b6 | refs/heads/master | 2023-02-10T22:57:56.653201 | 2021-01-08T00:56:09 | 2021-01-08T00:56:09 | 327,760,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
codigoSeguridad = models.CharField(max_length=20, null = True, blank=True)
class Categoria(models.Model):
idCategoria = models.AutoField(primary_key=True)
nombre = models.CharField(max_length=16)
descripcion = models.CharField(max_length=50)
def __str__(self):
return self.nombre
class Recurso(models.Model):
idRecurso = models.AutoField(primary_key=True)
nombre = models.CharField(max_length=16)
descripcion = models.CharField(max_length=56)
archivo = models.FileField(upload_to='audios/')
categoria = models.ForeignKey(Categoria, null = True, blank = True, on_delete=models.CASCADE)
usuario = models.ForeignKey(User, null = True, blank = True, on_delete=models.CASCADE)
def __str__(self):
return self.nombre
class Lista(models.Model):
idLista = models.AutoField(primary_key=True)
descripcion = models.CharField(max_length=56)
tipo = models.BooleanField()
usuario = models.ForeignKey(User, null = True, blank = True, on_delete=models.CASCADE)
class DetalleLista(models.Model):
idDetalle = models.AutoField(primary_key=True)
lista = models.ForeignKey(Lista, null = True, blank = True, on_delete=models.CASCADE)
recurso = models.ForeignKey(Recurso, null = True, blank = True, on_delete=models.CASCADE)
| UTF-8 | Python | false | false | 1,439 | py | 22 | models.py | 9 | 0.719944 | 0.711605 | 0 | 37 | 37.891892 | 97 |
gfanto/aiopika | 18,219,251,282,874 | 02932e2779dbe2bfec812ef783564aeefef0de83 | 490cee06cfdd914ee1505b4f00384602ae103bf2 | /aiopika/frame.py | 28140c338e5564a76f0eca1b8d257b224cb32ba6 | [
"MIT"
]
| permissive | https://github.com/gfanto/aiopika | 33ef4ad58c7bd49686a3351799f0ab90b24d8272 | c2da34e3169dc1592cbf89076fd6024285e2f206 | refs/heads/master | 2022-03-28T01:46:20.359848 | 2019-12-30T22:33:23 | 2019-12-30T22:33:23 | 217,752,525 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import struct
import asyncio
from typing import Union, Iterable
from . import exceptions
from . import spec
from . import amqp_object
LOGGER = logging.getLogger(__name__)
class Frame(amqp_object.AMQPObject):
"""Base Frame object mapping. Defines a behavior for all child classes for
assignment of core attributes and implementation of the a core _marshal
method which child classes use to create the binary AMQP frame.
"""
NAME = 'Frame'
def __init__(self, frame_type, channel_number):
"""Create a new instance of a frame
:param int frame_type: The frame type
:param int channel_number: The channel number for the frame
"""
self.frame_type = frame_type
self.channel_number = channel_number
def _marshal(self, pieces):
"""Create the full AMQP wire protocol frame data representation
:rtype: bytes
"""
payload = b''.join(pieces)
return struct.pack('>BHI', self.frame_type, self.channel_number,
len(payload)) + payload + bytes((spec.FRAME_END,))
def marshal(self):
"""To be ended by child classes
:raises NotImplementedError
"""
raise NotImplementedError
class Method(Frame):
"""Base Method frame object mapping. AMQP method frames are mapped on top
of this class for creating or accessing their data and attributes.
"""
NAME = 'METHOD'
def __init__(self, channel_number, method):
"""Create a new instance of a frame
:param int channel_number: The frame type
:param pika.Spec.Class.Method method: The AMQP Class.Method
"""
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
self.method = method
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.method.encode()
pieces.insert(0, struct.pack('>I', self.method.INDEX))
return self._marshal(pieces)
class Header(Frame):
"""Header frame object mapping. AMQP content header frames are mapped
on top of this class for creating or accessing their data and attributes.
"""
NAME = 'Header'
def __init__(self, channel_number, body_size, props):
"""Create a new instance of a AMQP ContentHeader object
:param int channel_number: The channel number for the frame
:param int body_size: The number of bytes for the body
:param pika.spec.BasicProperties props: Basic.Properties object
"""
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
self.body_size = body_size
self.properties = props
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.properties.encode()
pieces.insert(
0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size))
return self._marshal(pieces)
class Body(Frame):
"""Body frame object mapping class. AMQP content body frames are mapped on
to this base class for getting/setting of attributes/data.
"""
NAME = 'Body'
def __init__(self, channel_number, fragment):
"""
Parameters:
- channel_number: int
- fragment: unicode or str
"""
Frame.__init__(self, spec.FRAME_BODY, channel_number)
self.fragment = fragment
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal([self.fragment])
class Heartbeat(Frame):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
NAME = 'Heartbeat'
def __init__(self):
"""Create a new instance of the Heartbeat frame"""
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal(list())
class ProtocolHeader(amqp_object.AMQPObject):
"""AMQP Protocol header frame class which provides a pythonic interface
for creating AMQP Protocol headers
"""
NAME = 'ProtocolHeader'
def __init__(self, major=None, minor=None, revision=None):
"""Construct a Protocol Header frame object for the specified AMQP
version
:param int major: Major version number
:param int minor: Minor version number
:param int revision: Revision
"""
self.frame_type = -1
self.major = major or spec.PROTOCOL_VERSION[0]
self.minor = minor or spec.PROTOCOL_VERSION[1]
self.revision = revision or spec.PROTOCOL_VERSION[2]
def marshal(self):
"""Return the full AMQP wire protocol frame data representation of the
ProtocolHeader frame
:rtype: str
"""
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
self.revision)
def get_key(method_frame: Method):
return type(method_frame.method)
def is_method(frame_value: Frame) -> bool:
return isinstance(frame_value, Method)
def is_header(frame_value: Frame) -> bool:
return isinstance(frame_value, Header)
def is_heartbeat(frame_value: Frame) -> bool:
return isinstance(frame_value, Heartbeat)
def is_body(frame_value: Frame) -> bool:
return isinstance(frame_value, Body)
def is_protocol_header(frame_value: Frame) -> bool:
return isinstance(frame_value, ProtocolHeader)
def is_method_instance(
frame_value: Frame,
cls_or_iter: Union[type, Iterable[type]]
) -> bool:
ism = is_method(frame_value)
if not ism:
return False
if isinstance(cls_or_iter, Iterable):
return isinstance(frame_value.method, tuple(cls_or_iter))
else:
return isinstance(frame_value.method, cls_or_iter)
def is_method_instance_of(
channel_number: int,
cls_or_iter: Union[type, Iterable[type]],
frame_value: Frame
) -> bool:
right_channel = frame_value.channel_number == channel_number
if not right_channel:
return False
if isinstance(cls_or_iter, Iterable):
return is_method_instance(frame_value, tuple(cls_or_iter))
else:
return is_method_instance(frame_value, cls_or_iter)
def has_content(frame_value: Method):
return spec.has_content(frame_value.method.INDEX)
class FrameDecoder:
def __init__(self, reader):
self._reader = reader
self._lock = asyncio.Lock()
async def read_frame(self):
async with self._lock:
header_data = await self._reader.readexactly(spec.FRAME_HEADER_SIZE)
try:
if header_data[0:4] == b'AMQP':
major, minor, revision = struct.unpack_from(
'BBB',
header_data,
5
)
return ProtocolHeader(major, minor, revision)
except struct.error:
pass
try:
(frame_type, channel_number, frame_size) = struct.unpack(
'>BHL', header_data)
except struct.error:
raise exceptions.InvalidFrameError('Invalid frame header')
frame_data = await self._reader.readexactly(frame_size)
frame_end = await self._reader.readexactly(spec.FRAME_END_SIZE)
if frame_end != bytes((spec.FRAME_END,)):
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
if frame_type == spec.FRAME_METHOD:
method_id = struct.unpack_from('>I', frame_data)[0]
method = spec.methods[method_id]()
method.decode(frame_data, 4)
return Method(channel_number, method)
elif frame_type == spec.FRAME_HEADER:
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
properties = spec.props[class_id]()
properties.decode(frame_data[12:])
return Header(channel_number, body_size, properties)
elif frame_type == spec.FRAME_BODY:
return Body(channel_number, frame_data)
elif frame_type == spec.FRAME_HEARTBEAT:
return Heartbeat()
raise exceptions.InvalidFrameError(f"Unknown frame type: {frame_type}")
| UTF-8 | Python | false | false | 8,471 | py | 12 | frame.py | 10 | 0.617401 | 0.61563 | 0 | 294 | 27.809524 | 80 |
smolynets/ajax_test | 8,349,416,453,896 | 103c9bca202a8cd48284539712a8a712de35a29e | a2478aaaa2b470365f2eaa4687895653f15588b3 | /ajax/urls.py | b19d6e5ce9be8e9b1681b11b06445361fe753655 | []
| no_license | https://github.com/smolynets/ajax_test | 7b188a417c045816b4d7d310870d686acfbeb579 | b0ad375e461dc9636c52bdbb1b7f93d0081eea58 | refs/heads/master | 2021-05-12T04:12:01.183162 | 2018-03-28T19:00:55 | 2018-03-28T19:00:55 | 117,154,811 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.test, name='main'),
url(r'^adding', views.adding, name='adding'),
]
| UTF-8 | Python | false | false | 197 | py | 7 | urls.py | 5 | 0.675127 | 0.675127 | 0 | 8 | 23.5 | 49 |
indico/indico | 5,918,464,938,537 | cf77a451b01190f15935be256588d18863c91fb0 | 7af0ff378525ef6132f74bac0b1eb54ce4c40c08 | /indico/modules/events/sessions/blueprint.py | b550c85cf08a4550e5921924cca890ce79dca24a | [
"MIT"
]
| permissive | https://github.com/indico/indico | 1126ee0ac3e9d36510a64989ce71be9c02680831 | 463951511d3a8409f944f98f29875c4323f3e897 | refs/heads/master | 2023-08-31T11:15:00.092526 | 2023-08-30T11:07:25 | 2023-08-30T11:07:25 | 2,113,067 | 1,549 | 429 | MIT | false | 2023-09-13T20:09:56 | 2011-07-27T13:56:30 | 2023-09-10T02:22:28 | 2023-09-13T20:09:56 | 244,382 | 1,524 | 383 | 689 | Python | false | false | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import partial
from indico.modules.events.sessions.controllers.compat import compat_session
from indico.modules.events.sessions.controllers.display import (RHDisplaySession, RHDisplaySessionList,
RHExportSessionTimetableToPDF, RHExportSessionToICAL)
from indico.modules.events.sessions.controllers.management.sessions import (RHCreateSession, RHCreateSessionType,
RHDeleteSessions, RHDeleteSessionType,
RHEditSessionType, RHExportSessionsCSV,
RHExportSessionsExcel, RHExportSessionsPDF,
RHManageSessionBlock, RHManageSessionTypes,
RHModifySession, RHSessionACL,
RHSessionACLMessage, RHSessionBlocks,
RHSessionPersonList, RHSessionProtection,
RHSessionREST, RHSessionsList)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('sessions', __name__, template_folder='templates', virtual_template_folder='events/sessions',
url_prefix='/event/<int:event_id>')
_bp.add_url_rule('/manage/sessions/', 'session_list', RHSessionsList)
_bp.add_url_rule('/manage/sessions/create', 'create_session', RHCreateSession, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/delete', 'delete_sessions', RHDeleteSessions, methods=('POST',))
_bp.add_url_rule('/manage/sessions/sessions.csv', 'export_csv', RHExportSessionsCSV, methods=('POST',))
_bp.add_url_rule('/manage/sessions/sessions.xlsx', 'export_excel', RHExportSessionsExcel, methods=('POST',))
_bp.add_url_rule('/manage/sessions/sessions.pdf', 'export_pdf', RHExportSessionsPDF, methods=('POST',))
_bp.add_url_rule('/manage/sessions/<int:session_id>', 'session_rest', RHSessionREST, methods=('PATCH', 'DELETE'))
_bp.add_url_rule('/manage/sessions/<int:session_id>/blocks', 'session_blocks', RHSessionBlocks)
_bp.add_url_rule('/manage/sessions/<int:session_id>/modify', 'modify_session', RHModifySession, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/person-list/', 'person_list', RHSessionPersonList, methods=('POST',))
_bp.add_url_rule('/manage/sessions/<int:session_id>/protection', 'session_protection', RHSessionProtection,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/<int:session_id>/blocks/<int:block_id>', 'manage_session_block',
RHManageSessionBlock, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/<int:session_id>/acl', 'acl', RHSessionACL)
_bp.add_url_rule('/manage/sessions/<int:session_id>/acl-message', 'acl_message', RHSessionACLMessage)
# Session types
_bp.add_url_rule('/manage/sessions/types/', 'manage_types', RHManageSessionTypes)
_bp.add_url_rule('/manage/sessions/types/create', 'create_type', RHCreateSessionType, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/types/<int:session_type_id>', 'manage_type', RHEditSessionType,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/sessions/types/<int:session_type_id>/delete', 'delete_type', RHDeleteSessionType,
methods=('POST',))
# Display
_bp.add_url_rule('/sessions/mine', 'my_sessions', RHDisplaySessionList)
_bp.add_url_rule('/sessions/<int:session_id>/', 'display_session', RHDisplaySession)
_bp.add_url_rule('/sessions/<int:session_id>/session.ics', 'export_ics', RHExportSessionToICAL)
_bp.add_url_rule('/sessions/<int:session_id>/session-timetable.pdf', 'export_session_timetable',
RHExportSessionTimetableToPDF)
# Legacy URLs
_compat_bp = IndicoBlueprint('compat_sessions', __name__, url_prefix='/event/<int:event_id>')
_compat_bp.add_url_rule('/session/<legacy_session_id>/', 'session',
partial(compat_session, 'display_session'))
_compat_bp.add_url_rule('/session/<legacy_session_id>/session.ics', 'session_ics',
partial(compat_session, 'export_ics'))
_compat_bp.add_url_rule('/my-conference/sessions', 'my_sessions', make_compat_redirect_func(_bp, 'my_sessions'))
_compat_bp.add_url_rule('!/sessionDisplay.py', 'session_modpython',
make_compat_redirect_func(_compat_bp, 'session',
view_args_conv={'confId': 'event_id',
'sessionId': 'legacy_session_id'}))
| UTF-8 | Python | false | false | 5,130 | py | 2,723 | blueprint.py | 1,673 | 0.614425 | 0.612865 | 0 | 74 | 68.324324 | 120 |
VB6Hobbyst7/3-Dimensional-Modeling | 10,290,741,657,396 | 8e9546b532cf2a4f47c6d762666a03879d3d0fd3 | 7dea487156aa5e716780fe9cb23b4d586eaf0859 | /script.py | 79b5dadec93642b880aa30c3bd098013aa80cfdb | []
| no_license | https://github.com/VB6Hobbyst7/3-Dimensional-Modeling | bae7b04e65c5ef4322906301b01e516cbcdd8339 | 02b3f01e567cbe976afbe3acc7bf9aea8f2bd539 | refs/heads/master | 2021-05-18T23:50:50.416212 | 2019-07-14T07:12:03 | 2019-07-14T07:12:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 22:20:34 2019
@author:Wei Huajing
@company:Nanjing University
@e-mail:jerryweihuajing@126.com
@title:seismic profile model construction-execution script
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import copy as cp
import sys,os
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
from Object.o_discrete_point import discrete_point
from Module import Image as Im
from Module import Depth as Dep
from Module import Display as Dis
from Module import Dictionary as Dict
from Module import Initialize as Init
from Module import Interpolation as Int
from Module import TargetExtraction as TE
load_path='./Data/1.bmp'
images_paths=['./Data/flat_1.bmp',
'./Data/flat_2.bmp',
'./Data/flat_3.bmp']
'''
demand:
target abstraction in seismic image
'''
def FractionSurface(load_path,k):
#导入图片,生成rgb矩阵
img_rgb=Init.LoadImage(load_path)
#生rgb相关的列表和字典
#rgb_dict=Init.InitDict(img_rgb)
rgb_dict=Init.InitDict(img_rgb,base_adjust=True)
# print(rgb_dict)
#生成tag矩阵
img_tag=Im.RGB2Tag(img_rgb,rgb_dict)
#初始化fractions,并显示
total_fractions=Init.InitFractions(img_rgb,img_tag,rgb_dict)
total_layers=Init.InitLayers(total_fractions)
total_faults=Init.InitFaults(total_fractions)
# #all surfaces in this image
# surfaces=[]
#
# for this_layer in total_layers:
#
# surfaces.append(Dep.FractionDepth(this_layer,'top'))
#
# return surfaces
return Dep.FractionDepth(total_fractions[k],'top')+Dep.FractionDepth(total_fractions[k],'bottom')
#list of discrete points
images=[]
for k in range(2):
discrete_points=[]
pos_x=0
'''Display ERROR'''
for this_load_path in images_paths:
#different profile
pos_x+=5
#surface of this fraction
pos_surface=FractionSurface(this_load_path,k)
# print(len(pos_surface))
for pos_z,pos_y in pos_surface:
#new discrete_point object
new_discrete_point=discrete_point()
new_discrete_point.pos_x=pos_x
new_discrete_point.pos_y=pos_y
new_discrete_point.pos_z=pos_z
discrete_points.append(new_discrete_point)
pixel_step=1
#interpolation
this_img=Int.GlobalIDWInterpolation(discrete_points,pixel_step)
plt.figure()
plt.imshow(this_img,cmap='terrain')
#collect all the image
images.append(this_img)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors=['red','blue']
count=0
for this_img in images:
#3D coordinates
X,Y,Z=[],[],[]
for i in range(np.shape(this_img)[0]):
for j in range(np.shape(this_img)[1]):
X.append(j)
Y.append(i)
Z.append(this_img[i,j])
ax.plot_trisurf(X, Y, Z,color=colors[count])
plt.show()
count+=1
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
#plt.axis('equal')
#plt.zlim(min(Z),max(Z)) | UTF-8 | Python | false | false | 3,457 | py | 11 | script.py | 11 | 0.616721 | 0.606123 | 0 | 152 | 21.355263 | 101 |
kvsaijayanthkrishna/Python_Programming | 14,027,363,212,224 | f4402d4e5fbf9ea40d21be3431f30fb1e13adc97 | 5fbd4d5dae0091e829a9b41a7e00f6c5f8e1656e | /leap year or not.py | c51619ab05afbce623e3e57c99e417b6cf8dee7a | []
| no_license | https://github.com/kvsaijayanthkrishna/Python_Programming | 999ec6a91643f0d03757d7b6b1010f6dc1da130d | 0816c84c7010f550125b9e8e259104bee027eb53 | refs/heads/main | 2023-01-22T05:26:42.948436 | 2020-11-18T06:49:50 | 2020-11-18T06:49:50 | 307,591,168 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #. WAP to check whether entered year is leap year or not
year=int(input("enter a year\t:"))
if not(year%100) and not(year%400):print(year,"is leap year")
elif not(year%4) and year%100:print(year,"is a leap year")
else:print(year,"is not a leap year")
| UTF-8 | Python | false | false | 256 | py | 51 | leap year or not.py | 50 | 0.699219 | 0.660156 | 0 | 5 | 49.2 | 61 |
gogulakiran/TaskCompletionPortal | 987,842,524,402 | 7ec4f7ff1455d6269983866ed00656b90bedd84d | 1f9b349edcd4ae0b33d4474c32b50c9e9980913c | /cal/forms.py | 9133130baf9873ba32fa31beb9cbd3c4230373a9 | []
| no_license | https://github.com/gogulakiran/TaskCompletionPortal | c37f70f505fdf56eff624d2c44bbf91f1fd32e00 | 06bc3f40c2ee2c2d373a861ed9c20c56105db7b2 | refs/heads/master | 2022-04-16T19:38:57.330664 | 2020-04-13T06:40:57 | 2020-04-13T06:40:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.forms import ModelForm, DateInput
| UTF-8 | Python | false | false | 46 | py | 7 | forms.py | 6 | 0.847826 | 0.847826 | 0 | 1 | 45 | 45 |
manim-kindergarten/manim | 18,296,560,709,400 | e634e80b927a7e22978add6151c20a7dd7eb9d9a | 7a6644b553316ece2498e4f8f629454e0b379d23 | /manimlib/utils/space_ops.py | d6ccc280d0c318ed26be5922599c077c4462bd5d | [
"MIT"
]
| permissive | https://github.com/manim-kindergarten/manim | 9f17cac6c1c4db5db6e7f4edfe4885eee9ec1f5e | 99fe80a55cdc5c2fcc249b3645d7f1cd19852bcd | refs/heads/master | 2023-06-27T19:44:05.384032 | 2022-12-08T04:00:27 | 2022-12-08T04:00:27 | 245,434,121 | 130 | 27 | MIT | true | 2023-06-17T07:15:31 | 2020-03-06T14:00:57 | 2023-05-19T09:55:15 | 2023-01-29T13:37:43 | 77,198 | 112 | 11 | 3 | Python | false | false | from __future__ import annotations
from functools import reduce
import math
import operator as op
import platform
from mapbox_earcut import triangulate_float32 as earcut
import numpy as np
from scipy.spatial.transform import Rotation
from tqdm import tqdm as ProgressDisplay
from manimlib.constants import DOWN, OUT, RIGHT
from manimlib.constants import PI, TAU
from manimlib.utils.iterables import adjacent_pairs
from manimlib.utils.simple_functions import clip
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Callable, Iterable, Sequence
import numpy.typing as npt
def cross(v1: np.ndarray, v2: np.ndarray) -> list[np.ndarray]:
return [
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0]
]
def get_norm(vect: Iterable) -> float:
return sum((x**2 for x in vect))**0.5
def normalize(vect: np.ndarray, fall_back: np.ndarray | None = None) -> np.ndarray:
norm = get_norm(vect)
if norm > 0:
return np.array(vect) / norm
elif fall_back is not None:
return fall_back
else:
return np.zeros(len(vect))
# Operations related to rotation
def quaternion_mult(*quats: Sequence[float]) -> list[float]:
# Real part is last entry, which is bizzare, but fits scipy Rotation convention
if len(quats) == 0:
return [0, 0, 0, 1]
result = quats[0]
for next_quat in quats[1:]:
x1, y1, z1, w1 = result
x2, y2, z2, w2 = next_quat
result = [
w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,
w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,
w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
]
return result
def quaternion_from_angle_axis(
angle: float,
axis: np.ndarray,
) -> list[float]:
return Rotation.from_rotvec(angle * normalize(axis)).as_quat()
def angle_axis_from_quaternion(quat: Sequence[float]) -> tuple[float, np.ndarray]:
rot_vec = Rotation.from_quat(quat).as_rotvec()
norm = get_norm(rot_vec)
return norm, rot_vec / norm
def quaternion_conjugate(quaternion: Iterable) -> list:
result = list(quaternion)
for i in range(3):
result[i] *= -1
return result
def rotate_vector(
vector: Iterable,
angle: float,
axis: np.ndarray = OUT
) -> np.ndarray | list[float]:
rot = Rotation.from_rotvec(angle * normalize(axis))
return np.dot(vector, rot.as_matrix().T)
def rotate_vector_2d(vector: Iterable, angle: float):
# Use complex numbers...because why not
z = complex(*vector) * np.exp(complex(0, angle))
return np.array([z.real, z.imag])
def rotation_matrix_transpose_from_quaternion(quat: Iterable) -> np.ndarray:
return Rotation.from_quat(quat).as_matrix()
def rotation_matrix_from_quaternion(quat: Iterable) -> np.ndarray:
return np.transpose(rotation_matrix_transpose_from_quaternion(quat))
def rotation_matrix(angle: float, axis: np.ndarray) -> np.ndarray:
"""
Rotation in R^3 about a specified axis of rotation.
"""
return Rotation.from_rotvec(angle * normalize(axis)).as_matrix()
def rotation_matrix_transpose(angle: float, axis: np.ndarray) -> np.ndarray:
return rotation_matrix(angle, axis).T
def rotation_about_z(angle: float) -> list[list[float]]:
return [
[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]
]
def rotation_between_vectors(v1, v2) -> np.ndarray:
if np.all(np.isclose(v1, v2)):
return np.identity(3)
return rotation_matrix(
angle=angle_between_vectors(v1, v2),
axis=np.cross(v1, v2)
)
def z_to_vector(vector: np.ndarray) -> np.ndarray:
return rotation_between_vectors(OUT, vector)
def angle_of_vector(vector: Sequence[float]) -> float:
"""
Returns polar coordinate theta when vector is project on xy plane
"""
return np.angle(complex(*vector[:2]))
def angle_between_vectors(v1: np.ndarray, v2: np.ndarray) -> float:
"""
Returns the angle between two 3D vectors.
This angle will always be btw 0 and pi
"""
n1 = get_norm(v1)
n2 = get_norm(v2)
if n1 == 0 or n2 == 0:
return 0
cos_angle = np.dot(v1, v2) / np.float64(n1 * n2)
return math.acos(clip(cos_angle, -1, 1))
def project_along_vector(point: np.ndarray, vector: np.ndarray) -> np.ndarray:
matrix = np.identity(3) - np.outer(vector, vector)
return np.dot(point, matrix.T)
def normalize_along_axis(
array: np.ndarray,
axis: np.ndarray,
) -> np.ndarray:
norms = np.sqrt((array * array).sum(axis))
norms[norms == 0] = 1
buffed_norms = np.repeat(norms, array.shape[axis]).reshape(array.shape)
array /= buffed_norms
return array
def get_unit_normal(
v1: np.ndarray,
v2: np.ndarray,
tol: float = 1e-6
) -> np.ndarray:
v1 = normalize(v1)
v2 = normalize(v2)
cp = cross(v1, v2)
cp_norm = get_norm(cp)
if cp_norm < tol:
# Vectors align, so find a normal to them in the plane shared with the z-axis
new_cp = cross(cross(v1, OUT), v1)
new_cp_norm = get_norm(new_cp)
if new_cp_norm < tol:
return DOWN
return new_cp / new_cp_norm
return cp / cp_norm
###
def thick_diagonal(dim: int, thickness: int = 2) -> np.ndarray:
row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))
col_indices = np.transpose(row_indices)
return (np.abs(row_indices - col_indices) < thickness).astype('uint8')
def compass_directions(n: int = 4, start_vect: np.ndarray = RIGHT) -> np.ndarray:
angle = TAU / n
return np.array([
rotate_vector(start_vect, k * angle)
for k in range(n)
])
def complex_to_R3(complex_num: complex) -> np.ndarray:
return np.array((complex_num.real, complex_num.imag, 0))
def R3_to_complex(point: Sequence[float]) -> complex:
return complex(*point[:2])
def complex_func_to_R3_func(
complex_func: Callable[[complex], complex]
) -> Callable[[np.ndarray], np.ndarray]:
return lambda p: complex_to_R3(complex_func(R3_to_complex(p)))
def center_of_mass(points: Iterable[npt.ArrayLike]) -> np.ndarray:
points = [np.array(point).astype("float") for point in points]
return sum(points) / len(points)
def midpoint(
point1: Sequence[float],
point2: Sequence[float]
) -> np.ndarray:
return center_of_mass([point1, point2])
def line_intersection(
line1: Sequence[Sequence[float]],
line2: Sequence[Sequence[float]]
) -> np.ndarray:
"""
return intersection point of two lines,
each defined with a pair of vectors determining
the end points
"""
x_diff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
y_diff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(x_diff, y_diff)
if div == 0:
raise Exception("Lines do not intersect")
d = (det(*line1), det(*line2))
x = det(d, x_diff) / div
y = det(d, y_diff) / div
return np.array([x, y, 0])
def find_intersection(
p0: npt.ArrayLike,
v0: npt.ArrayLike,
p1: npt.ArrayLike,
v1: npt.ArrayLike,
threshold: float = 1e-5
) -> np.ndarray:
"""
Return the intersection of a line passing through p0 in direction v0
with one passing through p1 in direction v1. (Or array of intersections
from arrays of such points/directions).
For 3d values, it returns the point on the ray p0 + v0 * t closest to the
ray p1 + v1 * t
"""
p0 = np.array(p0, ndmin=2)
v0 = np.array(v0, ndmin=2)
p1 = np.array(p1, ndmin=2)
v1 = np.array(v1, ndmin=2)
m, n = np.shape(p0)
assert(n in [2, 3])
numer = np.cross(v1, p1 - p0)
denom = np.cross(v1, v0)
if n == 3:
d = len(np.shape(numer))
new_numer = np.multiply(numer, numer).sum(d - 1)
new_denom = np.multiply(denom, numer).sum(d - 1)
numer, denom = new_numer, new_denom
denom[abs(denom) < threshold] = np.inf # So that ratio goes to 0 there
ratio = numer / denom
ratio = np.repeat(ratio, n).reshape((m, n))
return p0 + ratio * v0
def get_closest_point_on_line(
a: np.ndarray,
b: np.ndarray,
p: np.ndarray
) -> np.ndarray:
"""
It returns point x such that
x is on line ab and xp is perpendicular to ab.
If x lies beyond ab line, then it returns nearest edge(a or b).
"""
# x = b + t*(a-b) = t*a + (1-t)*b
t = np.dot(p - b, a - b) / np.dot(a - b, a - b)
if t < 0:
t = 0
if t > 1:
t = 1
return ((t * a) + ((1 - t) * b))
def get_winding_number(points: Iterable[float]) -> float:
total_angle = 0
for p1, p2 in adjacent_pairs(points):
d_angle = angle_of_vector(p2) - angle_of_vector(p1)
d_angle = ((d_angle + PI) % TAU) - PI
total_angle += d_angle
return total_angle / TAU
##
def cross2d(a: np.ndarray, b: np.ndarray) -> np.ndarray:
if len(a.shape) == 2:
return a[:, 0] * b[:, 1] - a[:, 1] * b[:, 0]
else:
return a[0] * b[1] - b[0] * a[1]
def tri_area(
a: Sequence[float],
b: Sequence[float],
c: Sequence[float]
) -> float:
return 0.5 * abs(
a[0] * (b[1] - c[1]) +
b[0] * (c[1] - a[1]) +
c[0] * (a[1] - b[1])
)
def is_inside_triangle(
p: np.ndarray,
a: np.ndarray,
b: np.ndarray,
c: np.ndarray
) -> bool:
"""
Test if point p is inside triangle abc
"""
crosses = np.array([
cross2d(p - a, b - p),
cross2d(p - b, c - p),
cross2d(p - c, a - p),
])
return np.all(crosses > 0) or np.all(crosses < 0)
def norm_squared(v: Sequence[float]) -> float:
return v[0] * v[0] + v[1] * v[1] + v[2] * v[2]
# TODO, fails for polygons drawn over themselves
def earclip_triangulation(verts: np.ndarray, ring_ends: list[int]) -> list:
"""
Returns a list of indices giving a triangulation
of a polygon, potentially with holes
- verts is a numpy array of points
- ring_ends is a list of indices indicating where the ends of new paths are
"""
rings = [
list(range(e0, e1))
for e0, e1 in zip([0, *ring_ends], ring_ends)
]
def is_in(point, ring_id):
return abs(abs(get_winding_number([i - point for i in verts[rings[ring_id]]])) - 1) < 1e-5
def ring_area(ring_id):
ring = rings[ring_id]
s = 0
for i, j in zip(ring[1:], ring):
s += cross2d(verts[i], verts[j])
return abs(s) / 2
# Points at the same position may cause problems
for i in rings:
verts[i[0]] += (verts[i[1]] - verts[i[0]]) * 1e-6
verts[i[-1]] += (verts[i[-2]] - verts[i[-1]]) * 1e-6
# First, we should know which rings are directly contained in it for each ring
right = [max(verts[rings[i], 0]) for i in range(len(rings))]
left = [min(verts[rings[i], 0]) for i in range(len(rings))]
top = [max(verts[rings[i], 1]) for i in range(len(rings))]
bottom = [min(verts[rings[i], 1]) for i in range(len(rings))]
area = [ring_area(i) for i in range(len(rings))]
# The larger ring must be outside
rings_sorted = list(range(len(rings)))
rings_sorted.sort(key=lambda x: area[x], reverse=True)
def is_in_fast(ring_a, ring_b):
# Whether a is in b
return reduce(op.and_, (
left[ring_b] <= left[ring_a] <= right[ring_a] <= right[ring_b],
bottom[ring_b] <= bottom[ring_a] <= top[ring_a] <= top[ring_b],
is_in(verts[rings[ring_a][0]], ring_b)
))
chilren = [[] for i in rings]
ringenum = ProgressDisplay(
enumerate(rings_sorted),
total=len(rings),
leave=False,
ascii=True if platform.system() == 'Windows' else None,
dynamic_ncols=True,
desc="SVG Triangulation",
delay=3,
)
for idx, i in ringenum:
for j in rings_sorted[:idx][::-1]:
if is_in_fast(i, j):
chilren[j].append(i)
break
res = []
# Then, we can use earcut for each part
used = [False] * len(rings)
for i in rings_sorted:
if used[i]:
continue
v = rings[i]
ring_ends = [len(v)]
for j in chilren[i]:
used[j] = True
v += rings[j]
ring_ends.append(len(v))
res += [v[i] for i in earcut(verts[v, :2], ring_ends)]
return res
| UTF-8 | Python | false | false | 12,520 | py | 217 | space_ops.py | 88 | 0.590176 | 0.566454 | 0 | 452 | 26.699115 | 98 |
Der-Eddy/file-info | 8,787,503,088,029 | 40c4e7b964bd0aaac7b2c6ec21d368f1e834e34b | 75e25efb77df8310f7541444163704b0dbf3230e | /fileinfo.py | d8ba41dceefddc457ed28bab7fc2ecf573b7d10d | [
"Unlicense"
]
| permissive | https://github.com/Der-Eddy/file-info | a54da8210b9e132f29138ed281e79f348382bd0d | 2bf0b45ea882172aa03e29b4cc761267be400b57 | refs/heads/master | 2021-01-22T22:07:40.404392 | 2017-03-22T23:48:16 | 2017-03-22T23:48:16 | 85,506,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import hashlib
import datetime
import platform
from tkinter import Tk, Label, Entry, StringVar, filedialog
class Fileinfo:
def __init__(self, filePath):
self.file = filePath
self.getStats()
def getStats(self):
self.name = os.path.basename(self.file)
self.stats = os.stat(self.file)
self.size = self.stats.st_size
self.KBSize = round(self.size / 1024, 3)
self.MBSize = round(self.size / 1024 / 1024, 3)
self.ATime = datetime.datetime.fromtimestamp(self.stats.st_atime)
self.MTime = datetime.datetime.fromtimestamp(self.stats.st_mtime)
self.CTime = datetime.datetime.fromtimestamp(self.stats.st_ctime)
self.getHashes()
self.initGUI()
def getHashes(self):
self.MD5 = self.getHash(hashlib.md5())
self.SHA1 = self.getHash(hashlib.sha1())
self.SHA256 = self.getHash(hashlib.sha256())
self.SHA512 = self.getHash(hashlib.sha512())
def getHash(self, hashAlgorithm=hashlib.sha256()):
blocksize = 65536
algo = hashAlgorithm
with open(self.file, 'rb') as file:
buffer = file.read(blocksize)
while len(buffer) > 0:
algo.update(buffer)
buffer = file.read(blocksize)
return algo.hexdigest()
def makeReadable(self, format='GUI'):
formatDate = '%d.%m.%y %H:%M:%S'
if format == 'Console':
self.name = f'Name: {self.name}'
self.size = f'Size: {self.MBSize} MB ({self.size} Bytes)'
self.ATime = f'Last Access: {self.ATime:{formatDate}}'
self.MTime = f'Last Modified: {self.MTime:{formatDate}}'
self.CTime = f'Created at: {self.CTime:{formatDate}}'
self.MD5 = f'MD5: {self.MD5}'
self.SHA1 = f'SHA1: {self.SHA1}'
self.SHA256 = f'SHA256: {self.SHA256}'
self.SHA512 = f'SHA512: {self.SHA512}'
elif format == 'GUI':
self.size = f'{self.MBSize} MB ({self.size} Bytes)'
self.ATime = f'{self.ATime:{formatDate}}'
self.MTime = f'{self.MTime:{formatDate}}'
self.CTime = f'{self.CTime:{formatDate}}'
def consoleOutput(self):
self.makeReadable('Console')
entryList = [self.name, self.size, self.ATime, self.MTime, self.CTime, \
self.MD5, self.SHA1, self.SHA256, self.SHA512]
for entry in entryList:
print(entry)
info.holdPrompt()
def initGUI(self):
#Needed for filedialog.askopenfilename()
self.app = Tk()
def GUIOutput(self):
self.makeReadable('GUI')
self.app.geometry('900x200')
self.app.wm_title(f'{self.name} - {self.SHA256}')
labelList = ['Name: ', 'Size: ', 'Last Access: ', 'Last Modified: ', \
'Created at: ', 'MD5: ', 'SHA1: ', 'SHA256: ', 'SHA512: ']
for i, labelName in enumerate(labelList, start=0):
Label(self.app, text=labelName).grid(row=i, sticky='W')
entryList = [self.name, self.size, self.ATime, self.MTime, self.CTime, \
self.MD5, self.SHA1, self.SHA256, self.SHA512]
for i, entry in enumerate(entryList, start=0):
tmpEntry = Entry(self.app, width=130)
tmpEntry.insert(0, entry)
tmpEntry.configure(state='readonly') #for some reason Tkinter doesn't allow to insert to a readonly Entry
tmpEntry.grid(row=i, column=1)
self.app.mainloop()
@staticmethod
def holdPrompt():
#A windows thing
if platform.system() == 'Windows':
input("Press enter to close the program")
if __name__ == '__main__':
try:
file = sys.argv[1]
except IndexError:
#root.withdraw()
file = filedialog.askopenfilename()
#file = 'C:\\Users\\Eduard\\Desktop\\GifCam.exe'
info = Fileinfo(file)
info.GUIOutput()
'''
try:
if sys.argv[2].lower == 'nogui' or sys.argv[2].lower == '--nogui':
info.consoleOutput()
else:
info.GUIOutput()
except IndexError:
info.GUIOutput()
'''
| UTF-8 | Python | false | false | 4,229 | py | 2 | fileinfo.py | 1 | 0.573658 | 0.54812 | 0 | 119 | 34.537815 | 117 |
Tribruin/AdventOfCode | 3,934,190,069,309 | 38aedfc8ef5fb3b08bf1a8af64307740ca973249 | c1c859cdae98593270d74a3baffc6317f0279a28 | /2020/Day12/main.py | 314f91b476098dd1df085f4364599fcd76135767 | []
| no_license | https://github.com/Tribruin/AdventOfCode | a7b97d19609bfc8cb7f6f5e3587d85bba5745c82 | 93709de6469093ef8d137c1dca377ce3393b4dac | refs/heads/master | 2023-08-17T22:29:39.646596 | 2023-08-05T01:43:57 | 2023-08-05T01:43:57 | 225,481,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append("/Users/rblount/Scripts/AdventOfCode/2020/tools")
from AOC import AOC
move_options = {"N": (0, -1), "E": (1, 0), "S": (0, 1), "W": (-1, 0)}
move_directions = list(move_options.values())
dir_chars = list(move_options.keys())
turn_chars = ["L", "R"]
move_char = ["F"]
class Ferry_Nav:
def __init__(self):
self.x, self.y = 0, 0
self.wp_x, self.wp_y = 10, -1
self.move_x, self.move_y = move_options["E"]
def turn_ferry(self, direction, degrees):
for _ in range(degrees // 90):
if direction == "R":
self.move_x, self.move_y = -self.move_y, self.move_x
else:
self.move_x, self.move_y = self.move_y, -self.move_x
return
def move_ferry(self, direction, move_value):
x1, y1 = move_options[direction]
self.x += x1 * move_value
self.y += y1 * move_value
def forward_part1(self, move_value):
x1, y1 = self.move_x, self.move_y
self.x, self.y = self.x + x1 * move_value, self.y + y1 * move_value
def move_waypoint(self, direction, move_value):
x1, y1 = move_options[direction]
self.wp_x += x1 * move_value
self.wp_y += y1 * move_value
def forward_part2(self, move_value):
self.x, self.y = (
self.x + self.wp_x * move_value,
self.y + self.wp_y * move_value,
)
def turn_waypoint(self, direction, degrees):
for _ in range(degrees // 90):
if direction == "R":
self.wp_x, self.wp_y = -self.wp_y, self.wp_x
else:
self.wp_x, self.wp_y = self.wp_y, -self.wp_x
def manhattan_dist(self):
return abs(self.x) + abs(self.y)
def part1():
ferry = Ferry_Nav()
for step in steps:
char, value = step["move"], step["value"]
# print(f"{char}{value}: ", end="")
if char in dir_chars:
ferry.move_ferry(char, value)
elif char in turn_chars:
ferry.turn_ferry(char, value)
else:
ferry.forward_part1(value)
# print(f"x={ferry.x} y={ferry.y} dir={ferry.direction}")
print(ferry.x, ferry.y, ferry.manhattan_dist())
def part2():
ferry = Ferry_Nav()
for step in steps:
char, value = step["move"], step["value"]
# print(f"{char}{value}: ", end="")
if char in dir_chars:
ferry.move_waypoint(char, value)
elif char in turn_chars:
ferry.turn_waypoint(char, value)
else:
ferry.forward_part2(value)
# print(f"x={ferry.x} y={ferry.y} wp_x = {ferry.wp_x} wp_y = {ferry.wp_y}")
print(ferry.x, ferry.y, ferry.manhattan_dist())
a = AOC(12, test=False)
steps = [{"move": x[0], "value": int(x[1:])} for x in a.read_lines()]
part1()
part2() | UTF-8 | Python | false | false | 2,827 | py | 181 | main.py | 171 | 0.540149 | 0.524231 | 0 | 96 | 28.458333 | 83 |
NGnius/casl | 5,720,896,449,181 | c552d3a7d756f50a44b1b3d5ee350000a42af807 | 7e246c308597762dccb129883706fb5f827b1f05 | /examples/net_debug.py | c2295d7d8523341f05e7b87df5cbb5bb3178feec | []
| no_license | https://github.com/NGnius/casl | b54bdd26003e582d77bb04b4e80e13c34074b4ad | db5bc4fbf6819ba89d0258e4c24a7fa85273d145 | refs/heads/master | 2023-03-01T08:52:31.681391 | 2021-02-05T03:12:43 | 2021-02-05T03:12:43 | 330,711,583 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import socket
import sys
import json
ip = "127.0.0.1"
port = 42069
# Create socket for server
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Do Ctrl+c to exit the program !!")
s.bind(("0.0.0.0", 3198))
# Let's send data through UDP protocol
while True:
data, addr = s.recvfrom(8192)
payload = json.loads(data.decode('utf-8'))
print("Received:", payload)
response = json.dumps({"action": {"type": "Custom"}})
s.sendto(response.encode('utf-8'), (ip, port))
print("Responded")
#data, address = s.recvfrom(4096)
#print("\n\n 2. Client received : ", data.decode('utf-8'), "\n\n")
# close the socket
s.close()
| UTF-8 | Python | false | false | 672 | py | 13 | net_debug.py | 10 | 0.644345 | 0.596726 | 0 | 26 | 24.846154 | 70 |
koliankolin/public_vk | 13,176,959,681,376 | e18ec9c952640a97fdabc8c27917c1f83d21cb74 | 7000409eea0bb0d5cdb60514672b91e2501b27ea | /Classes/vk/Post.py | bdc7eb1955b83905290c9df4bc750762186f6ce5 | []
| no_license | https://github.com/koliankolin/public_vk | 81e9b91874f29837c6b6845035d12affa02fd623 | 99b8390e3561680bcd6fa29f5fab1f0203c0d1f3 | refs/heads/master | 2022-11-17T17:42:03.819301 | 2020-07-13T09:27:20 | 2020-07-13T09:27:20 | 276,551,951 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import constants
from Classes.vk.Base import Base
from Classes.vk.Image import ImageCls
from Classes.image.NewsLoaderMirror import NewsLoaderMirror
from tqdm import tqdm
import time
class Post(Base):
def __init__(self):
super().__init__()
def post(self, publish_dates=()):
for i, date in enumerate(publish_dates):
# try:
self.api.method('wall.post', {
'owner_id': -constants.VK_GROUP_ID,
'from_group': 1,
# 'message': f'{new.teaser_en}\n\n{new.text_en}\n\n---------------------------------------\n\n{new.teaser_ru}\n\n{new.text_ru}\n\nИсточник: {new.source.capitalize()}',
'attachments': ImageCls().loadMem(),
'publish_date': date,
'signed': 0,
})
time.sleep(3)
# except:
# print('Something went wrong')
# exit(1)
# print('All posted')
| UTF-8 | Python | false | false | 967 | py | 15 | Post.py | 13 | 0.514077 | 0.509906 | 0 | 27 | 34.518519 | 184 |
palanceli/MachineLearningSample | 10,617,159,164,557 | 98618dc35419f9a92faa2a6413ac2a1eab457dd2 | f7a9153bb65b5e6fc8ea12198383c464ed8e315c | /LinearRegression/sample1.py | f36be5027fa465b2377cb32e43acfd60638a460f | []
| no_license | https://github.com/palanceli/MachineLearningSample | 398cf9bf1d1d5cff09f22618b610b839881b4ef7 | 177a891e97f553b8b63803c7ec8b595dc2f45b8b | refs/heads/master | 2021-01-22T05:01:07.515658 | 2018-05-20T16:08:01 | 2018-05-20T16:08:01 | 102,275,158 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding:utf-8 -*-
import logging
import os
import random
import pandas
import io
import sklearn
import sklearn.linear_model
import matplotlib
import matplotlib.pyplot
import mpl_toolkits.mplot3d
import numpy
class DictReader(object):
def __init__(self):
self.path = os.path.join(os.path.dirname(os.getcwd()), 'data', 'dict.txt')
# logging.debug(self.path)
self.lineData = self.readData()
def readData(self):
# 返回词典中的 [(词串, 拼音串), *]
lineData = []
with open(self.path) as f:
for line in f:
line = line.strip('\n').strip('\r')
hanzi, pinyin = line.split('\t')
hanzi = hanzi.strip()
pinyin = pinyin.strip('{').strip('}').strip(',')
lineData.append((hanzi, pinyin))
return lineData
def ReadRandomLines(self, lineNum):
# 从词典中随机抽取lineNum条数据,返回值为[(词串, 拼音串), *]
result = []
for i in range(lineNum):
iLine = int(len(self.lineData) * random.random())
result.append(self.lineData[iLine])
return result
class DataCreator(object):
def readRandomLines(self, dictReader):
# 随机读取cHanziLines条词 和 cPinyinLines条拼音,返回它们对应的字节数
weight = 0.01 # 随机读取的词条数占总词条数的比例关系
cHanziLines = int(len(dictReader.lineData) * random.random() * weight)
lineData = dictReader.ReadRandomLines(cHanziLines)
hanziString = ''
for (hanzi, pinyin) in lineData:
hanziString += hanzi
# logging.debug('%d %s' % (len(hanzi), hanzi))
cbHanzi = len(hanziString)
pinyinString = ''
cPinyinLines = int(len(dictReader.lineData) * random.random()* weight)
lineData = dictReader.ReadRandomLines(cPinyinLines)
for (hanzi, pinyin) in lineData:
pinyinString += pinyin
# logging.debug('%d %s' % (len(pinyin.encode('utf-8')), pinyin))
cbPinyin = len(pinyinString)
return (cHanziLines, cPinyinLines, cbHanzi, cbPinyin)
def CreateSampleForSingleFeature(self, nSample):
# 产生nSample个样本:[(中文词条数,文件大小), *]
result = []
dictReader = DictReader()
for i in range(nSample):
cHanziLines, cPinyinLines, cbHanzi, cbPinyin = self.readRandomLines(dictReader)
result.append((cHanziLines, cbHanzi))
return result
def CreateSampleForDoubleFeatures(self, nSample):
# 产生nSample个样本:[(中文词条数,拼音条数, 文件大小), *]
result = []
dictReader = DictReader()
for i in range(nSample):
cHanziLines, cPinyinLines, cbHanzi, cbPinyin = self.readRandomLines(dictReader)
result.append((cHanziLines, cPinyinLines, cbHanzi + cbPinyin))
return result
def SingleFeatureLearning():
# 单变量线性回归学习过程
dc = DataCreator()
# 生成训练样本
cSamples = 30 # 训练样本个数
samples = dc.CreateSampleForSingleFeature(cSamples)
csvData = 'lines,bytes\n'
for s in samples:
csvData += '%d,%d\n' % (s[0], s[1])
# 将训练样本读入dataFrame
dataFrame = pandas.read_csv(io.StringIO(csvData.decode('utf-8')))
logging.debug(dataFrame)
# 建立线性回归模型
regr = sklearn.linear_model.LinearRegression()
# 拟合
regr.fit(dataFrame['lines'].values.reshape(-1, 1), dataFrame['bytes']) # reshape(-1, 1)是什么意思?
# 生成测试样本
cSample = 5 # 测试样本个数
samples = dc.CreateSampleForSingleFeature(cSample)
csvTestData = 'lines,bytes\n'
for s in samples:
csvTestData += '%d,%d\n' % (s[0], s[1])
# 将训练样本读入dataFrame
testDataFrame = pandas.read_csv(io.StringIO(csvTestData.decode('utf-8')))
print(testDataFrame)
# 预测10000条词的大小
logging.debug(regr.predict(10000))
# 画图
# 1. 训练样本的点
matplotlib.pyplot.scatter(dataFrame['lines'], dataFrame['bytes'], color='blue')
# 2. 测试样本的点
matplotlib.pyplot.scatter(testDataFrame['lines'], testDataFrame['bytes'], marker='x', color='green')
# 3. 拟合直线
matplotlib.pyplot.plot(dataFrame['lines'], regr.predict(dataFrame['lines'].values.reshape(-1, 1)), color='red')
#
matplotlib.pyplot.title('words num - file bytes relationship')
matplotlib.pyplot.ylabel('file bytes')
matplotlib.pyplot.xlabel('words num')
matplotlib.pyplot.xlim(0)
matplotlib.pyplot.ylim(0)
matplotlib.pyplot.show()
class DoubleFeatureLearning(object):
def createSampleDataFrame(self, dataCreator, cSamples):
samples = dataCreator.CreateSampleForDoubleFeatures(cSamples)
csvData = 'hanziLines,pinyinLines,bytes\n'
for s in samples:
csvData += '%d,%d,%d\n' % (s[0], s[1], s[2])
# 将训练样本读入dataFrame
dataFrame = pandas.read_csv(io.StringIO(csvData.decode('utf-8')))
# logging.debug(dataFrame)
return dataFrame
def Main(self):
# 二元线性回归学习过程
dataCreator = DataCreator()
# 生成训练样本
trainingDataFrame = self.createSampleDataFrame(dataCreator, 30)
# 建立线性回归模型
regr = sklearn.linear_model.LinearRegression()
# 拟合
regr.fit(trainingDataFrame[['hanziLines', 'pinyinLines']].values.reshape(-1, 2), trainingDataFrame['bytes'])
# 生成测试样本
testingDataFrame = self.createSampleDataFrame(dataCreator, 5)
# 验证预测
predictBytes = regr.predict(testingDataFrame[['hanziLines', 'pinyinLines']].values.reshape(-1, 2))
print(predictBytes)
# 打印测试样本和预测结果
print(pandas.concat([testingDataFrame, pandas.Series(predictBytes, name="predict")], axis=1))
# 画图
fig = matplotlib.pyplot.figure()
ax = mpl_toolkits.mplot3d.Axes3D(fig)
# 绘制训练样本
ax.scatter(trainingDataFrame['hanziLines'], trainingDataFrame['pinyinLines'], trainingDataFrame['bytes'])
# 绘制测试样本
ax.scatter(testingDataFrame['hanziLines'], testingDataFrame['pinyinLines'], testingDataFrame['bytes'], marker='+', color='red')
# 绘制预测样本
ax.scatter(testingDataFrame['hanziLines'], testingDataFrame['pinyinLines'], predictBytes, marker='X', color='green')
# 绘制预测平面
xSurf, ySurf = numpy.meshgrid(numpy.linspace(trainingDataFrame['hanziLines'].min(), trainingDataFrame['hanziLines'].max(), 100),
numpy.linspace(trainingDataFrame['pinyinLines'].min(), trainingDataFrame['pinyinLines'].max(), 100))
zSurf = predictBytes[0] * xSurf + predictBytes[1] * ySurf + predictBytes[2]
# ax.plot_surface(xSurf, ySurf, zSurf, color='None', alpha = 0.4)
# 设置坐标轴
matplotlib.pyplot.title('[hanziLines, pinyinLines] - file bytes relationship')
ax.set_xlabel('hanziLines')
ax.set_ylabel('pinyinLines')
ax.set_zlabel('bytes')
matplotlib.pyplot.show()
if __name__ == '__main__':
logFmt = '%(asctime)s %(lineno)04d %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=logFmt, datefmt='%H:%M',)
learner = DoubleFeatureLearning()
learner.Main()
| UTF-8 | Python | false | false | 7,004 | py | 16 | sample1.py | 12 | 0.682157 | 0.67221 | 0 | 203 | 30.689655 | 133 |
frequent-nomad/python-start | 8,211,977,497,223 | 16d4cb722004b82534aa78dd810d215c7a7cbca5 | 4950b1819a19a9a94c1fba08a8efa6f183bda58d | /Codewars test1.py | 38ba9f0dc705ec58fd9930e33d218d5e982dd32f | []
| no_license | https://github.com/frequent-nomad/python-start | 903f31b5be79bea55fba4061f689ba1de4703540 | ed7be4ee5a6cf8fbe03e7270126cecc9ddb23b19 | refs/heads/master | 2018-09-22T22:42:11.234892 | 2018-06-06T19:48:06 | 2018-06-06T19:48:06 | 135,634,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# You are going to be given a word. return the middle character of the word.
# If the word's length is odd, return the middle character.
# If the word's length is even, return the middle 2 characters.
# #Examples:
# Kata.getMiddle("test") should return "es"
# Kata.getMiddle("testing") should return "t"
# Kata.getMiddle("middle") should return "dd"
# Kata.getMiddle("A") should return "A"
def get_middle(word):
if len(word) == 1:
return word
elif (len(word)) % 2 == 0:
half_number = int(len(word) * 0.5)
result = (word[(half_number - 1)]) + (word[half_number])
return result
else:
half_number = int(len(word) * 0.5)
result = (word[(half_number)])
return result
get_middle("zGSvprCzFsxdXUNQmrDN")
| UTF-8 | Python | false | false | 774 | py | 14 | Codewars test1.py | 14 | 0.630491 | 0.618863 | 0 | 28 | 26.607143 | 76 |
ToddPeterson/QCReview | 18,580,028,561,716 | 151298215e9255acf0e9e6249d08cb3b57e930d1 | dbb0be329cf29d44b806ce11d07c5e2db11822bd | /qc_review/tests/test_forms.py | 53bd0e9b341f84fc82339eaa24083f6d7fb09070 | []
| no_license | https://github.com/ToddPeterson/QCReview | 585c244c43870a70f963d49ae43c423b9562a0b1 | 0fb0f8e415128ba3b14163928baaf54b093b557f | refs/heads/master | 2022-05-26T21:45:53.440238 | 2020-06-06T14:06:37 | 2020-06-06T14:06:37 | 211,562,633 | 1 | 0 | null | false | 2022-04-22T22:24:52 | 2019-09-28T21:16:22 | 2020-06-06T14:06:40 | 2022-04-22T22:24:50 | 21 | 1 | 0 | 2 | Python | false | false | from django.test import TestCase
from qc_review.forms import QCRunSelectForm
from .utils import create_qcrun, create_suite
class TestForms(TestCase):
def setUp(self):
self.run1 = create_qcrun(123, 'ADAMSDEV')
self.suite1 = create_suite(self.run1, 'adamspy', 10, 5, 1)
self.run2 = create_qcrun(456, 'ADAMSDEV2')
self.suite2 = create_suite(self.run2, 'adamspy', 10, 5, 1)
def test_qcrun_select_valid_data(self):
form = QCRunSelectForm(data={
'codeline': 'ADAMSDEV',
'changelist': 123
})
self.assertTrue(form.is_valid())
def test_qcrun_select_no_data(self):
form = QCRunSelectForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 2)
def test_qcrun_select_invalid_codeline(self):
form = QCRunSelectForm(data={
'codeline': 'XXX',
'changelist': 123
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
def test_qcrun_select_invalid_changelist(self):
form = QCRunSelectForm(data={
'codeline': 'ADAMSDEV2',
'changelist': 0
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
| UTF-8 | Python | false | false | 1,285 | py | 26 | test_forms.py | 20 | 0.603891 | 0.578988 | 0 | 41 | 30.341463 | 66 |
oscarjb/sensor-box | 1,305,670,101,149 | 20f22bc5aaf6cfb672fb779134aa4fa035d6b759 | f14b8c37e72dc46f006c54d54a673c40d5398bbb | /server/migrations/0010_auto_20200228_1104.py | 9ef1d76c12b393f220fd6009a977889a3c052416 | []
| no_license | https://github.com/oscarjb/sensor-box | cbe2bb4fc0a292b73b7baddc4ecec42250ab5dbd | 19b7d46a9bec9025b300bc827465597e896cb534 | refs/heads/main | 2023-02-16T07:11:03.035084 | 2021-01-13T12:00:43 | 2021-01-13T12:00:43 | 305,773,983 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.7 on 2020-02-28 10:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0009_auto_20200213_1159'),
]
operations = [
migrations.AddField(
model_name='test',
name='outcome',
field=models.CharField(choices=[('Positive', 'Positive'), ('Negative', 'Negative')], default='Negative', max_length=10),
),
migrations.AlterField(
model_name='test',
name='date',
field=models.DateField(default=datetime.datetime(2020, 2, 28, 11, 4, 36, 642850)),
),
]
| UTF-8 | Python | false | false | 697 | py | 50 | 0010_auto_20200228_1104.py | 39 | 0.56241 | 0.48924 | 0 | 24 | 27.041667 | 132 |
PaPiix/verificationcog | 9,861,244,925,338 | 3e8d361b8c26f9bd837bb0ac509c0be6f4e72dc6 | 7cbb735e70c55f47868aa9cd7166a2b07be514a3 | /verify.py | 34d04ef22a45f9ff2d5afcac28e8f5eda13e6e5d | []
| no_license | https://github.com/PaPiix/verificationcog | f1e90543fd36e60f3c85b0edd596a6519262475c | cc67cb895583d3de7e6c2c4333a117bf26ecf9ac | refs/heads/master | 2021-09-09T16:58:34.712985 | 2018-03-18T11:44:43 | 2018-03-18T11:44:43 | 125,718,555 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
class verify:
"""Verify"""
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/verificationsettings.json')
for s in self.settings:
self.settings[s]['usercache'] = []
def save_json(self):
dataIO.save_json("data/verification/settings.json", self.settings)
@commands.group(name="verify", pass_context=True, no_pm=True)
async def appset(self, ctx):
"""configuration settings"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
def initial_config(self, server_id):
"""makes an entry for the server, defaults to turned off"""
if server_id not in self.settings:
self.settings[server_id] = {'inactive': True,
'output': [],
'cleanup': False,
'usercache': [],
'multiout': False
}
self.save_json()
@checks.admin_or_permissions(Manage_server=True)
@appset.command(name="reset", pass_context=True, no_pm=True)
async def fix_cache(self, ctx):
"""Reset cache for verification forms"""
server = ctx.message.server
self.initial_config(ctx.message.server.id)
self.settings[server.id]['usercache'] = []
self.save_json()
await self.bot.say("Cache has been reset")
@checks.admin_or_permissions(Manage_server=True)
@appset.command(name="roles", pass_context=True, no_pm=True)
async def rolecreation(self, ctx):
server = ctx.message.server
author = ctx.message.author
aprole = discord.utils.get(server.roles, name="Verfied")
if aprole not in server.roles:
await self.bot.create_role(server, name="Verfied")
await self.bot.say("Roles has been set!")
else:
await self.bot.say("Roles already present")
@checks.admin_or_permissions(Manage_server=True)
@appset.command(name="channel", pass_context=True, no_pm=True)
async def setoutput(self, ctx, chan=None):
"""Sets a channel to embed the output of verified application once finished."""
server = ctx.message.server
if server.id not in self.settings:
self.initial_config(server.id)
if chan in self.settings[server.id]['output']:
return await self.bot.say("Channel already set as output")
for channel in server.channels:
if str(chan) == str(channel.id):
if self.settings[server.id]['multiout']:
self.settings[server.id]['output'].append(chan)
self.save_json()
return await self.bot.say("Channel added to output list")
else:
self.settings[server.id]['output'] = [chan]
self.save_json()
return await self.bot.say("Channel set as output")
await self.bot.say("I could not find a channel with that id")
@checks.admin_or_permissions(Manage_server=True)
@appset.command(name="toggle", pass_context=True, no_pm=True)
async def reg_toggle(self, ctx):
"""Toggle verification applications for the server"""
server = ctx.message.server
if server.id not in self.settings:
self.initial_config(server.id)
self.settings[server.id]['inactive'] = \
not self.settings[server.id]['inactive']
self.save_json()
if self.settings[server.id]['inactive']:
await self.bot.say("Verification disabled.")
else:
await self.bot.say("Verification enabled.")
@commands.command(name="apply", pass_context=True)
async def application(self, ctx):
""""Verify your self by following the prompts"""
author = ctx.message.author
server = ctx.message.server
aprole = discord.utils.get(server.roles, name="Verified")
if server.id not in self.settings:
return await self.bot.say("Verification applications are not setup on this server!")
if self.settings[server.id]['inactive']:
return await self.bot.say("We are not currently accepting verification applications, Try again later, Thanks!")
if aprole in author.roles:
await self.bot.say("{}You are already verified on this server!".format(author.mention))
else:
await self.bot.say("{}Lets start the Verification".format(author.mention))
while True:
avatar = author.avatar_url if author.avatar \
else author.default_avatar_url
em = discord.Embed(timestamp=ctx.message.timestamp, title="ID: {}".format(author.id), color=discord.Color.blue())
em.set_author(name='Verification for {}'.format(author.name), icon_url=avatar)
agemsg = await self.bot.send_message(author, "What is your Username? (eg: PaPí#0001)")
while True:
age = await self.bot.wait_for_message(channel=agemsg.channel, author=author, timeout=30)
if age is None:
await self.bot.send_message(author, "Sorry you took to long, please try again later!")
break
else:
em.add_field(name="Username: ", value=age.content, inline=True)
break
if age is None:
break
timemsg = await self.bot.send_message(author, "How old are you?")
while True:
time = await self.bot.wait_for_message(channel=timemsg.channel, author=author, timeout=30)
if time is None:
await self.bot.send_message(author, "Timed out, Please run command again.")
break
else:
em.add_field(name="Age:", value=time.content, inline=True)
break
if time is None:
break
nationmsg = await self.bot.send_message(author, "What is your Gender?")
while True:
nation = await self.bot.wait_for_message(channel=nationmsg.channel, author=author, timeout=30)
if nation is None:
await self.bot.send_message(author, "Timed out Please run command again")
break
else:
em.add_field(name="Gender: ", value=nation.content, inline=True)
break
if nation is None:
break
activemsg = await self.bot.send_message(author, "Personality? ( Exmaple: Kinky, Dominant, Straight, Gay, Lesbian, Bisexual or Transgender")
while True:
active = await self.bot.wait_for_message(channel=activemsg.channel, author=author, timeout=60)
if active is None:
await self.bot.send_message(author, "Timed Out. Please re-run command and try again!")
break
else:
em.add_field(name="Personality: ", value=active.content, inline=False)
break
if active is None:
break
whymsg = await self.bot.send_message(author, "Status? (Example: Taken or Single)")
while True:
why = await self.bot.wait_for_message(channel=whymsg.channel, author=author, timeout=60)
if why is None:
await self.bot.send_message(author, "Timed out, Please Re-Run command and try again!")
break
else:
em.add_field(name="Status: ", value=why.content, inline=False)
aprole = discord.utils.get(server.roles, name="Verified")
await self.bot.add_roles(author, aprole)
await self.bot.send_message(author, "You have finished the verification Process. And you are granted to all the public channels. Thank you!")
break
if why is None:
break
for output in self.settings[server.id]['output']:
where = server.get_channel(output)
if where is not None:
await self.bot.send_message(where, embed=em)
break
break
return
def check_folder():
f = 'data/verification'
if not os.path.exists(f):
os.makedirs(f)
def check_file():
f = 'data/verification/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = staffapp(bot)
bot.add_cog(n)
| UTF-8 | Python | false | false | 9,145 | py | 2 | verify.py | 1 | 0.553478 | 0.551947 | 0 | 193 | 46.378238 | 165 |
mxmua/airflow_project | 5,660,766,912,252 | 8208f1c1e10cf5a145e79003ba73aadb67b37be5 | e96721568355e62a380af9bb8e1c04a00af6bddb | /air_project_dag.py | 689f4a3c3e0021e9313b70516e043b22e5cecff0 | [
"MIT"
]
| permissive | https://github.com/mxmua/airflow_project | 1c99d6226719cdd315e00582fc35bf503a705cd4 | 0b88cb4a7fcdca4fb875abc633a641275d5c79f2 | refs/heads/master | 2022-11-18T17:02:38.872436 | 2020-07-06T19:34:23 | 2020-07-06T19:34:23 | 273,556,461 | 0 | 0 | MIT | true | 2020-07-06T19:34:25 | 2020-06-19T17:59:52 | 2020-06-21T12:25:13 | 2020-07-06T19:34:24 | 77 | 0 | 0 | 0 | Python | false | false | from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.python_operator import PythonOperator
from datetime import timedelta
import requests
import re
import csv
import os
import air_project as libs
import secur.credentials as ENV
args = {
'owner': 'air101',
'start_date': days_ago(1),
'retries': 0,
'retry_delay': timedelta(minutes=1),
}
def on_failure_action(*args, **kwargs):
alert_text = '--- Failure ---'
libs.bot_message(alert_text)
def sla_miss_action(*args, **kwargs):
alert_text = '--- SLA MISSED ---'
libs.bot_message(message_text=alert_text)
def load_links_from_gsheet(gsheet_url: str, stage_filename: str) -> None:
libs.write_list_to_csv(['url'],
libs.get_url_from_gsheet(
table_url=gsheet_url,
auth_json_file=ENV.GSHEET_KEY_FILE),
stage_filename)
def parse_links_watchers(stage_filename: str, result_filename: str) -> None:
libs.csv_parser(uploaded_sheet_file=stage_filename,
parsed_file_name=result_filename)
with DAG(dag_id='air101_project',
default_args=args,
schedule_interval=timedelta(days=1),
sla_miss_callback=sla_miss_action,
on_failure_callback=on_failure_action,
) as dag:
load_links_from_gsheet = PythonOperator(
task_id='load_links_from_gsheet',
python_callable=load_links_from_gsheet,
# provide_context=True,
op_kwargs={'gsheet_url': ENV.TABLE_URL,
'stage_filename': ENV.UPLOADED_GSHEET_FILE},
)
parse_links_watchers = PythonOperator(
task_id='parse_links_watchers',
python_callable=parse_links_watchers,
# provide_context=True,
op_kwargs={'stage_filename': ENV.UPLOADED_GSHEET_FILE,
'result_filename': ENV.PARSED_DATA_SET_FILE}
)
write_to_gsheet = PythonOperator(
task_id='write_to_gsheet',
python_callable=libs.write_to_gsheet
)
send_report = PythonOperator(
task_id='send_report',
python_callable=libs.render_and_send_report,
op_kwargs={'parsed_file_name': ENV.PARSED_DATA_SET_FILE},
)
load_links_from_gsheet >> parse_links_watchers >> \
write_to_gsheet >> send_report
| UTF-8 | Python | false | false | 2,357 | py | 5 | air_project_dag.py | 3 | 0.619856 | 0.615613 | 0 | 81 | 28.098765 | 76 |
TonyFlury/py-importjson | 8,650,064,143,531 | f645ae92ecf1cbb05c96edf1ed569f4c589e2b4a | 6e9b6e978e8100c6432703f1591202b6212eac63 | /sandbox/classproperty.py | 022a9382c254a7c213e244ea53e57e31434f11bb | []
| no_license | https://github.com/TonyFlury/py-importjson | 44670edb2e507cfe600c16f64dce8119e73b2b6f | 1b86453afd20a32d4124b6ee7061a70f8e28fc61 | refs/heads/master | 2021-01-10T15:19:04.065297 | 2019-05-27T22:28:12 | 2019-05-27T22:28:12 | 44,206,947 | 8 | 3 | null | false | 2019-04-09T10:18:44 | 2015-10-13T21:49:02 | 2019-04-07T09:50:28 | 2019-04-09T10:18:44 | 1,115 | 6 | 3 | 14 | Python | false | false | #!/usr/bin/env python
"""
# importjson : Implementation of classproperty.py
Summary :
<summary of module/class being implemented>
Use Case :
As a <actor> I want <outcome> So that <justification>
Testable Statements :
Can I <Boolean statement>
....
"""
__version__ = "0.1"
__author__ = 'Tony Flury : anthony.flury@btinternet.com'
__created__ = '14 Dec 2015'
import inspect
class ClassPropertyMetaClass(type):
def __setattr__(self, key, value):
if key in self.__dict__:
obj = self.__dict__.get(key)
if obj and type(obj) is ClassPropertyDescriptor:
return obj.__set__(self, value)
else:
return super(ClassPropertyMetaClass, self).__setattr__(key, value)
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
if inspect.isclass(obj):
type_ = obj
obj = None
else:
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class Bar(object):
__metaclass__ = ClassPropertyMetaClass
_x = 1
y = 1
@classproperty
def x(self):
print "fetching x"
return Bar._x
@x.setter
def x(self,value):
print "setting x before {} - after {}".format(Bar._x, value)
Bar._x = value
print "After setting x to value {}".format(Bar._x)
if __name__ == "__main__":
print Bar.x, Bar.y
Bar.x = 3
Bar.y = 4
assert Bar.x == 3
assert Bar.x == 4
| UTF-8 | Python | false | false | 2,117 | py | 20 | classproperty.py | 6 | 0.578649 | 0.572036 | 0 | 87 | 23.333333 | 78 |
holen/Python | 10,720,238,401,222 | ebdfad095411d3333af405b035f7c7b59a9e2eaa | 95f9c734c4bf5de8e5d0adff9ac2cf0228df75ac | /mailcode/adjectStrategy-new.py | e34c8811373cff85842fd7ed71fae9d25a1bf0c8 | []
| no_license | https://github.com/holen/Python | 7a996b13ff2224084397223879c380169d47ff8c | 506fff291d6e9c6f80c30a51cc3b77e9dd048468 | refs/heads/master | 2022-12-12T22:12:51.561716 | 2019-10-16T03:08:00 | 2019-10-16T03:08:00 | 14,278,665 | 1 | 0 | null | false | 2022-12-08T00:51:26 | 2013-11-10T15:29:59 | 2019-10-16T03:08:15 | 2022-12-08T00:51:24 | 3,972 | 0 | 0 | 5 | Python | false | false | #-* coding:UTF-8 -*
#!/usr/bin/env python
import common.mdb as mdb
import sys, argparse
from getloadstr import getLoadInfo
from getserverip import getServerIp
from showresource import getresource
def deloldstg(domainkey, owner_value):
sql = " delete from strategy where domain_key = '%s' and owner_value = %s "
try:
resource_conn = mdb.get_resource_conn()
#print sql % (domainkey, owner_value)
row_info = mdb.exe_update_sql(resource_conn, sql % (domainkey, owner_value), False, True, False, False)
print row_info
except Exception,e:
print e
sys.exit()
def selectstg(domainkey, owner_value):
sql = " select * from strategy s where s.domain_key = '%s' and s.owner_value = %s "
try:
resource_conn = mdb.get_resource_conn()
#print sql % (domainkey, owner_value)
result = mdb.exe_sql(resource_conn, sql % (domainkey, owner_value), False, True)
return result
except Exception,e:
print e
sys.exit()
def insertnewstg(server_id, server_ip, domainkey, resource_ids, owner_type, owner_value, for_test_msg, switch_type, switch_value, init_group_size, min_group_size):
insert_load_sql = '''
INSERT INTO `strategy`
(`server_id`, `server_ip`, `domain_key`, `resource_ids`, `owner_type`, `owner_value`, `for_test_msg`, `switch_type`, `switch_value`, `init_group_size`, `min_group_size`, `last_update_time`, `from_domains`, `remark`)
VALUES
('%s', '%s', '%s', '%s', '%s', %s, %s, '%s', %s, %s, %s, '2015-03-22 16:28:00', NULL, NULL);
'''
try:
resource_conn = mdb.get_resource_conn()
#print insert_load_sql % (server_id, server_ip, domainkey, resource_ids, owner_type, owner_value, for_test_msg, switch_type, switch_value, init_group_size, min_group_size)
row_info = mdb.exe_update_sql(resource_conn, insert_load_sql % (server_id, server_ip, domainkey, resource_ids, owner_type, owner_value, for_test_msg, switch_type, switch_value, init_group_size, min_group_size), False, True, False, False)
print row_info
except Exception,e:
print e
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="配置通道\n\nExample:\n QQ: python %(prog)s -d qqdomain -c 80008 -l 2\n 163: python %(prog)s -d netease -c 80008 -r 799\n sina: python %(prog)s -d sinadomain -c 8008 -r 799")
parser.add_argument("-d", "--domainkey", action="store", dest='domainkey', default='qqdomain',
help="domainkey: qqdomain, netease, sinadomain, elink.other, default")
parser.add_argument("-D", "--delete", action="store_true", help="Delete strategy!")
parser.add_argument("-c", "--owner_value", required=True, type=int, action="store", dest='owner_value',
help="client id")
parser.add_argument("-l", "--load_id", action="store", dest='load_id', help="load_id just for QQ domain")
parser.add_argument("-r", "--resource_ids", action="store", dest='resource_ids', help="resource_id for strategy")
parser.add_argument("-t", "--for_test_msg", action="store", type=int, dest='for_test_msg', default=0,
help="test msg, 0 or 1 ")
parser.add_argument("-o", "--owner_type", action="store", dest="owner_type", default="Client",
help="owner_type: Message, Project, Division, Client, Branch, Common")
args = parser.parse_args()
min_group_size = 1
owner_value = args.owner_value
domainkey = args.domainkey
if domainkey == "qqdomain":
switch_type = "ByGroup"
switch_value = 12
else:
switch_type = "ByTime"
switch_value = 720
if args.delete :
deloldstg(domainkey, owner_value)
print "Done! delete old strategy on client_id: %s " % owner_value
sys.exit()
owner_type = args.owner_type
for_test_msg = args.for_test_msg
flag = selectstg(domainkey, owner_value)
if(flag):
deloldstg(domainkey, owner_value)
if args.resource_ids:
rids = args.resource_ids
rinfos = getresource(rids)
for rinfo in rinfos:
server_id = rinfo['server_id']
server_ip = getServerIp(rinfo['server_id'])
resource_ids = rinfo['rids']
init_group_size = rinfo['count']
try:
resource_conn = mdb.get_resource_conn()
insertnewstg(server_id, server_ip, domainkey, resource_ids, owner_type, owner_value, for_test_msg,
switch_type, switch_value, init_group_size, min_group_size)
print "Done! Add a new strategy at client_id: %s on %s " % (owner_value, domainkey)
except Exception,e:
print e
sys.exit()
elif args.load_id:
load_id = args.load_id
load_array = getLoadInfo(load_id)
for load_list in load_array:
try:
resource_conn = mdb.get_resource_conn()
server_id = load_list['server_id']
server_ip = getServerIp(load_list['server_id'])
resource_ids = load_list['rids']
init_group_size = load_list['count']
insertnewstg(server_id, server_ip, domainkey, resource_ids, owner_type, owner_value, for_test_msg,
switch_type, switch_value, init_group_size, min_group_size)
print "Done! Add a new strategy at client_id: %s on %s " % (owner_value, domainkey)
except Exception,e:
print e
sys.exit()
else:
print "No enough argument, please get resource_ids or load_id !"
sys.exit()
| UTF-8 | Python | false | false | 5,828 | py | 171 | adjectStrategy-new.py | 147 | 0.595361 | 0.587113 | 0 | 120 | 47.5 | 245 |
Yoonhyungseon/codeReview | 5,042,291,639,560 | 9701c744802b3ee0f2185a537a96174be817c9b4 | 7adc1c5eb910fc7d7320c666b2ec6ffa5d855a64 | /pythonWorkspace/LeeBros/1_Basic(26)/3_문자열 다루기/2_부분문자열 위치 구하기.py | c891df93e94224a9ebdcd58122d3d651e0af10da | []
| no_license | https://github.com/Yoonhyungseon/codeReview | 87661f38ba9b022a60b37a9896c642e3970bc27c | c006e662d2956f977fc96755a5d0a2256975bc10 | refs/heads/main | 2023-08-05T19:42:22.437149 | 2021-09-16T14:43:59 | 2021-09-16T14:43:59 | 407,200,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
주어진 입력 문자열에 대하여 목적 문자열이 부분 문자열로 존재하는 경우, 부분 문자열의 시작 인덱스를 출력하는 코드를 작성해보세요. 인덱스는 0부터 시작한다고 가정합니다.
입력 형식
첫 번째 줄에는 입력 문자열이 주어지고,
두 번째 줄에는 목적 문자열이 주어집니다.
1 ≤ 목적 문자열의 길이(M) ≤ 입력 문자열의 길이 (N) ≤ 1,000
출력 형식
주어진 입력 문자열에 대하여 목적 문자열이 부분 문자열로 존재하는 경우, 부분 문자열의 시작 인덱스를 출력하고, 목적 문자열이 부분 문자열로 존재하지 않는 경우 -1을 출력합니다.
단 목적 문자열이 입력 문자열 내에 여러 번 나타나는 경우, 가장 앞선 인덱스를 출력해줍니다.
입출력 예제
예제1
입력:
apple
pp
출력:
1
예제2
입력:
banana
na
출력:
2
예제3
입력:
leebros
mango
출력:
-1
'''
a = input()
b = input()
switch = 0
for i in range(len(a)-len(b)+1):
if a[i:i+len(b)] == b:
print(i)
switch = 1
break
if switch == 0: print(-1) | UTF-8 | Python | false | false | 1,154 | py | 88 | 2_부분문자열 위치 구하기.py | 81 | 0.611465 | 0.582803 | 0 | 50 | 11.58 | 100 |
Tawfik-Metwally/Python | 14,929,306,357,162 | 8544adcbeaeeb4f10d902a96fe23e7c58baaf43b | b34f5da31f6a5df430f558f959527254fcf62b43 | /12-Bhaskara.py | c6f07a64c92e22a2353c3b837df4db622f477fea | []
| no_license | https://github.com/Tawfik-Metwally/Python | c090f706a6f91eb2d7a82edf1681b2beb68e6c8f | d6b38ca8b828f3deb2d9539ee6d159defaa5dcd1 | refs/heads/master | 2020-12-19T14:04:41.230898 | 2020-01-23T09:14:38 | 2020-01-23T09:14:38 | 235,755,534 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = float(input("Digite o numero a: "))
b = float(input("Digite o numero b: "))
c = float(input("Digite o numero c: "))
delta = (b**2) - (4 * a * c)
import math
if delta < 0:
print("esta equação não possui raízes reais")
else:
raiz1 = (-b + math.sqrt(delta)) / (2 * a)
raiz2 = (-b - math.sqrt(delta)) / (2 * a)
if raiz1 > raiz2:
R1 = int(raiz1)
R2 = int(raiz2)
else:
R1 = int(raiz2)
R2 = int(raiz1)
if delta == 0:
print("a raiz desta equação é", raiz1)
else:
print("as raízes da equação são",R2,"e",R1)
| UTF-8 | Python | false | false | 590 | py | 15 | 12-Bhaskara.py | 15 | 0.537133 | 0.500864 | 0 | 20 | 27.95 | 51 |
AR85AR/stiner_django | 1,675,037,267,083 | 7be55bf8a5c9183528b78b57d43a985b0b66e8c4 | 03eeb74899a8d44fdd0587cd06f413b4a67a1f30 | /contact/forms.py | 15dfa92235146da7838a7f1324d4274ccf047674 | []
| no_license | https://github.com/AR85AR/stiner_django | 74ef202a5e14145bba2d4ad3350f5796d056ead8 | d7346d725756f3a90749b682bbbdcbe1b45e65d3 | refs/heads/master | 2023-05-27T16:27:52.995170 | 2021-06-10T08:00:58 | 2021-06-10T08:00:58 | 362,386,010 | 1 | 0 | null | true | 2021-06-10T08:01:51 | 2021-04-28T08:00:09 | 2021-06-10T07:59:00 | 2021-06-10T08:01:50 | 159,282 | 0 | 0 | 0 | CSS | false | false | from django import forms
class EmailForm(forms.Form):
""" Formularz wysyłania wiadomości email """
name = forms.CharField(max_length=25,
widget=forms.TextInput(attrs={'placeholder': 'Podaj swoje imię', 'class': 'name'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'placeholder': 'Podaj swój adres email', 'class': 'email'}))
comments = forms.CharField(required=False, widget=forms.Textarea(
attrs={'placeholder': 'Tu wpisz wiadomość do Nas.', 'class': 'text'}))
| UTF-8 | Python | false | false | 534 | py | 178 | forms.py | 88 | 0.662879 | 0.659091 | 0 | 11 | 47 | 119 |
Fieoner/padmiss-daemon | 171,798,740,053 | c61c35cfb08da6f0b63aff9adec51d59048ee274 | c621719284f603127376f15bf6d95be85909177d | /hid.py | 58da6946bb9c75cd223649676b6173ba7507c005 | []
| no_license | https://github.com/Fieoner/padmiss-daemon | 228f38e2da7f3a93aa1c45eb919f01ccd0185970 | 6a59d9b51b2781d09f29444c2a110e96b9b45194 | refs/heads/master | 2020-06-17T10:38:31.611655 | 2019-07-07T12:02:32 | 2019-07-07T12:02:32 | 195,898,953 | 0 | 0 | null | true | 2019-07-08T23:26:45 | 2019-07-08T23:26:45 | 2019-07-07T12:02:49 | 2019-07-07T12:02:47 | 177 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
import sys
import usb
import logging
import time
import os
from pprint import pprint
from config import PadmissConfig, ScannerConfig
log = logging.getLogger(__name__)
def listDevices():
ret = []
dev = usb.core.find(find_all=True)
for d in dev:
vendor = str(("%x" % d.idVendor).zfill(4))
product = str(("%x" % d.idProduct).zfill(4))
ret.append({'idVendor': vendor, 'idProduct': product, 'port_number': d.port_number, 'bus': d.bus})
return ret
class RFIDReader(object):
def __init__(self, scannerConfig: ScannerConfig):
self.scannerConfig = scannerConfig
result = self.connect()
if result == False:
raise RuntimeError('Not found')
def _get_find_match(self):
match = {}
if (len(self.scannerConfig.id_vendor) != 0):
match["idVendor"] = int(self.scannerConfig.id_vendor, 16)
if (len(self.scannerConfig.id_product) != 0):
match["idProduct"] = int(self.scannerConfig.id_product, 16)
if (self.scannerConfig.port_number is not None):
match["port_number"] = self.scannerConfig.port_number
if (self.scannerConfig.bus is not None):
match["bus"] = self.scannerConfig.bus
return match
def connect(self):
self.cfg = None
self.intf = None
self.detached = False
self.last_pressed = set()
match = self._get_find_match()
self.dev = usb.core.find(**match)
if self.dev is None:
log.debug('Device not found with search: %s', match)
return False
log.debug('Found device %s', repr(self.dev))
try:
self._find_intf()
except RuntimeError:
return False
if os.name != 'nt':
try:
if self.dev.is_kernel_driver_active(self.intf.bInterfaceNumber):
log.debug('Detaching kernel driver from %s', repr(self))
self.dev.detach_kernel_driver(self.intf.bInterfaceNumber)
self.detached = True
except NotImplementedError:
log.debug('Detaching kernel driver not supported on this platform')
try:
log.debug('Setting BOOT protocol on %s', repr(self))
self.dev.ctrl_transfer(0b00100001, 0x0B, 0, self.intf.bInterfaceNumber, 0)
except:
self.release()
raise
return True
def _find_intf(self):
for cfg in self.dev:
for intf in cfg:
if intf.bInterfaceClass == 3 and intf.bInterfaceSubClass == 1 and intf.bInterfaceProtocol == 1:
self.cfg = cfg
self.intf = intf
return
raise RuntimeError('%s does not appear to be RFID reader' % repr(self))
def find(self):
result = False
while result == False:
time.sleep(5)
log.debug('Searching ...')
try:
self.release()
result = self.connect()
except usb.core.USBError as e:
result = False
log.debug('Found it again')
def poll(self, initial_timeout = 500, key_timeout = 20):
typed = []
timeout = initial_timeout
while True:
try:
ep = self.intf[0]
data = ep.read(8, timeout)
# Hardcoded BOOT protocol decoding
mods = data[0]
pressed = set()
new_keys = []
for key in data[2:]:
# Specials:
# 0 = NoEvent
# 1 = ErrorRollOver
# 2 = POSTFail
# 3 = ErrorUndefined
if key == 0:
continue
elif key == 1:
return None
elif key == 2:
raise RuntimeError('%s reports POSTFail' % repr(self))
elif key == 3:
raise RuntimeError('%s reports ErrorUndefined' % repr(self))
else:
if not key in self.last_pressed:
new_keys.append(key)
pressed.add(key)
self.last_pressed = pressed
typed.extend(new_keys)
except usb.core.USBError as e:
# Ignore timeouts, why isn't there a better way to do this in PyUSB?!
if e.errno == 110 or e.errno == 10060: # 110 for linux, 10060 for windows
break
elif e.errno == 19 or e.errno == 5:
log.debug("Disconnect err")
self.find()
return ''
else:
raise
timeout = key_timeout
return ''.join(self._translate(key) for key in typed)
def _translate(self, key):
if key < 30 or key > 40:
raise RuntimeError('Missing lookup entry for key code %d' % key)
if key < 39:
return chr(ord('1') + (key - 30))
elif key == 39:
return '0'
elif key == 40:
return '\n'
def release(self):
usb.util.dispose_resources(self.dev)
if os.name != 'nt':
if self.detached:
log.debug('Reattaching kernel driver to %s', repr(self))
try:
self.dev.attach_kernel_driver(self.intf.bInterfaceNumber)
except usb.core.USBError:
log.exception('Error while reattaching kernel driver')
finally:
self.detached = False
def __repr__(self):
if self.intf:
return 'Bus %d device %d interface %d' % (self.dev.bus, self.dev.address, self.intf.bInterfaceNumber)
else:
return 'Bus %d device %d' % (self.dev.bus, self.dev.address)
# idVendor 0x08ff AuthenTec, Inc.
# idProduct 0x0009
if __name__ == '__main__':
pprint(listDevices())
r = RFIDReader(idVendor=0x08ff, idProduct=0x0009)
print('Starting read loop')
try:
while True:
data = r.poll()
if data:
print('poll result:')
print(data)
finally:
r.release()
| UTF-8 | Python | false | false | 6,399 | py | 15 | hid.py | 10 | 0.504298 | 0.490077 | 0 | 201 | 30.835821 | 113 |
TimothySeah/SOSPrinceton | 6,055,903,897,428 | f00650bc3264fbc206ba7c5a98d2f9de319a72cd | 444fa360b62816ee87d64ffc51036b00990330aa | /pickle.py | c7d5fa1dfe7168769c1cf738d8b46db46c488d8c | []
| no_license | https://github.com/TimothySeah/SOSPrinceton | 8cc09b77129d737886013ac7358ed54375841257 | 644c9c3da2ae87b637fec310ecdeb33ac8a6f6b6 | refs/heads/master | 2021-01-10T15:10:15.946859 | 2015-11-08T03:46:01 | 2015-11-08T03:46:01 | 45,764,549 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2
import numpy as np
from sklearn import mixture
from sklearn import svm
from sklearn import preprocessing
from sklearn import multiclass
from sklearn import metrics
from sklearn import cross_validation
from matplotlib.pyplot import *
def loadData(dataset):
X = np.genfromtxt(dataset + '_X.dat')
y = np.genfromtxt(dataset + '_y.dat')
return (X,y)
def generatePublicVector(X):
mu = np.mean(X,axis=0)
sigma = np.cov(X.T)
Z = gaussianMixtureSamples([mu],[sigma],samples=3*X.shape[1])
return Z
def gaussianMixtureSamples(centroids,ccov,mc=None,samples=1):
cc = centroids
D = len(cc[0])
# Check if inputs are ok:
K = len(cc)
if mc is None: # Default equally likely clusters
mc = np.ones(K) / K
if len(ccov) != K:
raise ValueError, "centroids and ccov must contain the same number" +"of elements."
if len(mc) != K:
raise ValueError, "centroids and mc must contain the same number" +"of elements."
# Check if the mixing coefficients sum to one:
EPS = 1E-15
if np.abs(1-np.sum(mc)) > EPS:
raise ValueError, "The sum of mc must be 1.0"
# Cluster selection
cs_mc = np.cumsum(mc)
cs_mc = np.concatenate(([0], cs_mc))
sel_idx = np.random.rand(samples)
# Draw samples
res = np.zeros((samples, D))
for k in range(K):
idx = (sel_idx >= cs_mc[k]) * (sel_idx < cs_mc[k+1])
ksamples = np.sum(idx)
drawn_samples = np.random.multivariate_normal(cc[k], ccov[k], ksamples)
res[idx,:] = drawn_samples
return res
def generateUniformSample(low,high):
sample = np.zeros(len(low))
for i in range(len(low)):
foo = np.random.uniform(low[i],high[i])
sample[i] = foo
return sample
def generateUserPrivacyParameters(Z, dim, userData, alpha=0.3, dist='Gaussian'):
M = Z.shape[1]
sigmaZ = np.cov(Z.T)
epsilon = np.random.multivariate_normal(np.zeros(M),alpha*sigmaZ)
Ru = np.array([])
## note that other matrix here is the transposed version of the ones in the paper, except Ru. Ru has the same dim as in the paper.
if dist == 'Gaussian':
Ru = np.random.normal(0,1,(dim,M))
else:
Ru = np.random.uniform(0,1,(dim,M))
Zu = np.inner((Z+epsilon),Ru)
Xu = np.inner((userData+epsilon),Ru)
return (Ru,epsilon,Zu,Xu)
def regression(Zu,Zc):
invQ = np.linalg.pinv(Zu)
theta = np.inner(invQ,Zc.T)
return theta
def reconstruction(Xu,theta):
Xhat = np.inner(Xu,theta.T)
return Xhat
def reconstructionError(X,Xhat):
# 2-norm
diff = Xhat-X
foo = np.inner(diff,diff)
re_2norm = np.sqrt(np.diag(foo))
(re_rmse,re_R2) = rmseAndR2(Xhat,X)
return (re_2norm,re_rmse,re_R2)
def rmseAndR2(Xhat,Xtrue):
xhat = Xhat.T
xtrue = Xtrue.T
sum_y2 = 0
sum_yp = 0
sum_p2 = 0
sum_y = 0
n = 0
for i in range(len(xtrue)):
sum_y2 += xtrue[i] * xtrue[i]
sum_yp += xtrue[i] * xhat[i]
sum_p2 += xhat[i] * xhat[i]
sum_y += xtrue[i]
n += 1
R2 = 1 - ((sum_y2 - 2*sum_yp + sum_p2)/(sum_y2 - (sum_y*sum_y)/n))
rmse = np.sqrt((sum_y2 - 2*sum_yp + sum_p2)/n)
return (rmse,R2)
def ovrSVM(X,y,svmKernel):
labelValues = range(int(min(y)),int(max(y))+1)
y2 = preprocessing.label_binarize(y,classes=labelValues)
clf = multiclass.OneVsRestClassifier(svm.SVC(kernel=svmKernel, probability=True))
clf.fit(X,y2)
return clf
def performance(prediction, target):
acc = metrics.accuracy_score(target, prediction, normalize=True)
return acc
def randomSplit(X,y,user,svmKernel='rbf',perturb = True, dim = 10, noiseInt = 0.3,RDist = 'Gaussian'):
accuracy = np.array([])
re = np.array([])
for i in range(20):
# leave 20% out for testing
skf = cross_validation.StratifiedKFold(user,n_folds=5,shuffle=True)
for cv_i,test_i in skf:
train_user = user[cv_i]
train_X = X[cv_i]
train_y = y[cv_i]
if perturb:
# 1. the cloud creates public vectors
Z = generatePublicVector(train_X)
# 2. users perturb the pub vectors and the training vectors
(Ru, epsilon, Zu, Xu) = generateUserPrivacyParameters(Z,dim,train_X,alpha=noiseInt,dist = RDist)
# 3. regression by the cloud
theta = regression(Zu, Z)
# 4. the cloud reconstructs the training vectors
Xhat = reconstruction(Xu, theta)
else:
Xhat = train_X
# do training here
clf = ovrSVM(Xhat,train_y,svmKernel)
test_user = user[test_i]
test_X = X[test_i]
test_y = y[test_i]
# do testing here
prediction = clf.predict(test_X)
labelValues = range(int(min(y)),int(max(y))+1)
test_y2 = preprocessing.label_binarize(test_y,classes=labelValues)
# record performance
foo = performance(prediction, test_y2)
accuracy = np.append(accuracy, foo)
(twoNorm,rmse,r2) = reconstructionError(train_X,Xhat)
re = np.append(re,twoNorm)
break #use only one test set and then re-shuffle
mean_acc = np.mean(accuracy)
mean_re = np.mean(re)
return (mean_acc, mean_re)
def main():
(X, y) = loadData('wine')
user = np.zeros(y.shape)
# no privacy case
(baseline_acc, baseline_re) = randomSplit(X,y,user,perturb=False)
# with diff dimensions
dimensions = (10,7,5,2)
accuracy1 = np.array([])
re1 = np.array([])
for d in dimensions:
(foo, bar) = randomSplit(X,y,user,perturb = True, dim = d)
accuracy1 = np.append(accuracy1,foo)
re1 = np.append(re1, bar)
plot(dimensions,(1-baseline_acc+accuracy1)*100)
xlim(10,2)
gca().yaxis.grid(True)
title('Reduction in accuracy vs dimension')
ylabel('% accuracy')
xlabel('dimension')
show()
plot(dimensions,re1)
xlim(10,2)
gca().yaxis.grid(True)
title('Reconstruction error vs dimension')
ylabel('Reconstruction error')
xlabel('dimension')
show()
# with diff noise intensity
alphas = (0.1, 0.3, 0.5, 0.75, 1)
accuracy2 = np.array([])
re2 = np.array([])
for a in alphas:
(foo, bar) = randomSplit(X,y,user,perturb = True, dim = 10, noiseInt=a)
accuracy2 = np.append(accuracy2,foo)
re2 = np.append(re2, bar)
plot(alphas,(1-baseline_acc+accuracy2)*100)
gca().yaxis.grid(True)
title('Reduction in accuracy vs noise intensity')
ylabel('% accuracy')
xlabel('noise intensity')
show()
plot(alphas,re2)
gca().yaxis.grid(True)
title('Reconstruction error vs noise intensity')
ylabel('Reconstruction error')
xlabel('noise intensity')
show()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 6,233 | py | 4 | pickle.py | 4 | 0.664688 | 0.646238 | 0 | 250 | 23.924 | 131 |
rpytel1/occlusion-vs-data-augmentations | 8,323,646,625,357 | 2ed5fde458527e2d76254fc7f174a5347250d7f9 | 635006678377a79d62e2e003abd744ff837f3fd6 | /code/Top_down/lib/utils/cutout_util.py | b7ebe40501c7def750119e388860b8f25e3be858 | [
"BSD-3-Clause"
]
| permissive | https://github.com/rpytel1/occlusion-vs-data-augmentations | 80a5228bdac19cb0f09db4c05d37519806bf2474 | 6a21650e578c3cc8b166dea23528d4b76b93dfde | refs/heads/master | 2023-02-16T02:30:50.191259 | 2021-01-15T11:15:41 | 2021-01-15T11:15:41 | 305,066,044 | 8 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import json
import math
import shutil
import os
from lib.utils.keypoints_constants import part_mapping, advanced_parts, keypoint_names
def get_image_mean_color(img):
return img.mean(axis=0).mean(axis=0)
def get_xy(center, widths, img_dims):
x, y = center
width_x, width_y = widths
h, w = img_dims
y1 = int(np.clip(y - width_y / 2, 0, h))
y2 = int(np.clip(y + width_y / 2, 0, h))
x1 = int(np.clip(x - width_x / 2, 0, w))
x2 = int(np.clip(x + width_x / 2, 0, w))
return x1, x2, y1, y2
def rect_cutout(image, x_center, y_center, x_length, y_length, color):
x1, x2, y1, y2 = get_xy((x_center, y_center), (x_length, y_length), image.shape[:-1])
image[y1:y2, x1:x2] = color
return image
def circle_cutout(image, x_center, y_center, radius, color):
h, w = image.shape[:-1]
if x_center >= 0 and x_center < image.shape[0] and y_center >= 0 and y_center < image.shape[1]:
x_center = max(x_center, 0)
y_center = max(y_center, 0)
x, y = np.ogrid[-y_center:h - y_center, -x_center:w - x_center]
mask = x * x + y * y <= radius * radius
image[mask] = color
return image
def circle_blur(image, x_center, y_center, radius):
# 9 times std (as it is 3 times
blurred_img = cv2.GaussianBlur(image, (2*radius-1, 2*radius-1), 0)
mask = np.zeros(image.shape, dtype=np.uint8)
mask = circle_cutout(mask, x_center, y_center, radius, np.array([255, 255, 255]))
out = np.where(mask == np.array([255, 255, 255]), blurred_img, image)
return out
def blur(image, x_center, y_center, x_length, y_length):
blurred_img = cv2.GaussianBlur(image, (31, 31), 0)
mask = np.zeros(image.shape, dtype=np.uint8)
mask = rect_cutout(mask, x_center, y_center, x_length, y_length, [255, 255, 255])
out = np.where(mask == np.array([255, 255, 255]), blurred_img, image)
return out
def get_image_dict(annotations):
images_list = [(elem['id'], elem['file_name']) for elem in annotations['images']]
return dict(images_list)
def extract_json(annotations, dataset):
keypoints_dict = {}
if dataset == "coco":
images_dict = get_image_dict(annotations)
for elem in annotations['annotations']:
file_name = images_dict[elem['image_id']]
if file_name not in keypoints_dict:
keypoints_dict[file_name] = []
keypoints = extract_keypoints(elem['keypoints'], dataset)
if keypoints != {}:
keypoints_dict[file_name].append(keypoints)
else:
for elem in annotations:
if elem['image'] not in keypoints_dict:
keypoints_dict[elem['image']] = []
keypoints = extract_keypoints(elem['joints'], "mpii")
if keypoints != {}:
keypoints_dict[elem['image']].append(keypoints)
return keypoints_dict
def extract_keypoints(keypoints_json, dataset):
keypoints_pos = {}
if dataset == "coco":
for keypoint_id in range(len(keypoints_json) // 3):
if keypoints_json[keypoint_id * 3 + 2] != 0:
keypoints_pos[keypoint_names[dataset][keypoint_id]] = (
keypoints_json[keypoint_id * 3], keypoints_json[keypoint_id * 3 + 1])
else:
keypoints_pos = {}
for i, elem in enumerate(keypoints_json):
keypoints_pos[keypoint_names[dataset][i]] = (elem[0], elem[1])
return keypoints_pos
def get_centerpoint(keypoints, key1, key2):
keypoint1 = np.array(keypoints[key1])
keypoint2 = np.array(keypoints[key2])
return np.mean(np.array([keypoint1, keypoint2]), axis=0)
def get_shifted_point(keypoints, key1, key2, ratio):
keypoint1 = np.array(keypoints[key1])
keypoint2 = np.array(keypoints[key2])
dist = ratio * abs(keypoint1 - keypoint2)
return keypoint2 - dist
def get_dist(keypoints, key1, key2, key3=None):
keypoint1 = np.array(keypoints[key1])
keypoint2 = np.array(keypoints[key2])
dist = abs(keypoint1 - keypoint2)
min_dist = np.flip(0.25 * dist)
dist = np.max([dist, min_dist], axis=0)
if key3 and key3 in keypoints.keys():
keypoint3 = np.array(keypoints[key3])
dist2 = abs(keypoint2 - keypoint3)
dist = np.array([dist, dist2]).max(axis=0)
return dist
def calculate_center_with_dist(keypoints, chosen_keypoints):
chosen_keypoints_pos = []
for key in chosen_keypoints:
if key in keypoints:
chosen_keypoints_pos.append(keypoints[key])
if len(chosen_keypoints_pos) > 1:
chosen_keypoints_arr = np.array(chosen_keypoints_pos)
min_pos = np.min(chosen_keypoints_arr, axis=0)
max_pos = np.max(chosen_keypoints_arr, axis=0)
dists = (max_pos - min_pos)
center_point = np.mean(np.array([max_pos, min_pos]), axis=0)
return center_point, dists
return (0, 0), (0, 0)
def get_cords_with_width(keypoints, part, dataset):
"""
Method using various heuristics to return coordinates of middle point for box
:param img: image on which
:param keypoints: dictionary of positions of various keypoints
:param part: part which will be occluded
:return: coordinates of a center point of box, width in X and Y of a box
"""
if part == "head":
if dataset == "coco":
out = calculate_center_with_dist(keypoints, advanced_parts[dataset]['head'])
return out[0], (1.2 * out[1][0], 1.2 * out[1][1])
else:
if "head_top" in keypoints.keys() and "upper_neck" in keypoints.keys():
dists = get_dist(keypoints, "head_top", "upper_neck")
return get_centerpoint(keypoints, "head_top", "upper_neck"), (2*dists[0], dists[1])
elif part == "hip":
if "left_hip" in keypoints.keys() and "right_hip" in keypoints.keys():
return get_centerpoint(keypoints, "left_hip", "right_hip"), 2 * get_dist(keypoints, "left_hip",
"right_hip")
elif part == "left_arm":
if "left_wrist" in keypoints.keys() and "left_elbow" in keypoints.keys() and "left_shoulder" in keypoints.keys():
return calculate_center_with_dist(keypoints, ["left_wrist", "left_elbow", "left_shoulder"])
elif "left_wrist" in keypoints.keys() and "left_elbow" in keypoints.keys():
return keypoints['left_elbow'], 2 * get_dist(keypoints, "left_wrist", "left_elbow", "left_shoulder")
elif "left_shoulder" in keypoints.keys() and "left_elbow" in keypoints.keys():
return keypoints['left_elbow'], 2 * get_dist(keypoints, "left_shoulder", "left_elbow", "left_wrist")
elif "left_shoulder" in keypoints.keys() and "left_wrist" in keypoints.keys():
return get_centerpoint(keypoints, "left_shoulder", "left_wrist"), get_dist(keypoints, "left_wrist",
"left_shoulder")
elif part == "right_arm":
if "right_wrist" in keypoints.keys() and "right_elbow" in keypoints.keys() and "right_shoulder" in keypoints.keys():
return calculate_center_with_dist(keypoints, ["right_wrist", "right_elbow", "right_shoulder"])
elif "right_wrist" in keypoints.keys() and "right_elbow" in keypoints.keys():
return keypoints['right_elbow'], 2 * get_dist(keypoints, "right_wrist", "right_elbow", "right_shoulder")
elif "right_shoulder" in keypoints.keys() and "right_elbow" in keypoints.keys():
return keypoints['right_elbow'], 2 * get_dist(keypoints, "right_shoulder", "right_elbow")
elif "right_shoulder" in keypoints.keys() and "right_wrist" in keypoints.keys():
return get_centerpoint(keypoints, "right_shoulder", "right_wrist"), get_dist(keypoints, "right_wrist",
"right_shoulder")
elif part == "left_leg":
if "left_hip" in keypoints.keys() and "left_knee" in keypoints.keys() and "left_ankle" in keypoints.keys():
return calculate_center_with_dist(keypoints, ["left_ankle", "left_knee", "left_hip"])
elif "left_hip" in keypoints.keys() and "left_knee" in keypoints.keys():
return keypoints['left_knee'], 2 * get_dist(keypoints, "left_hip", "left_knee", "left_ankle")
elif "left_ankle" in keypoints.keys() and "left_knee" in keypoints.keys():
return keypoints['left_knee'], 2 * get_dist(keypoints, "left_ankle", "left_knee")
elif "left_ankle" in keypoints.keys() and "left_hip" in keypoints.keys():
return get_centerpoint(keypoints, "left_ankle", "left_hip"), get_dist(keypoints, "left_ankle", "left_hip")
elif part == "right_leg":
if "right_hip" in keypoints.keys() and "right_knee" in keypoints.keys() and "right_ankle" in keypoints.keys():
return calculate_center_with_dist(keypoints, ["right_ankle", "right_knee", "right_hip"])
elif "right_hip" in keypoints.keys() and "right_knee" in keypoints.keys():
return keypoints['right_knee'], 2 * get_dist(keypoints, "right_hip", "right_knee", "right_ankle")
elif "right_ankle" in keypoints.keys() and "right_knee" in keypoints.keys():
return keypoints['right_knee'], 2 * get_dist(keypoints, "right_ankle", "right_knee", "right_ankle")
elif "right_ankle" in keypoints.keys() and "right_hip" in keypoints.keys():
return get_centerpoint(keypoints, "right_ankle", "right_hip"), get_dist(keypoints, "right_ankle",
"right_hip", "right_knee")
elif part == "corpus":
return calculate_center_with_dist(keypoints, advanced_parts[dataset]["corpus"])
elif part == "upper_body":
return calculate_center_with_dist(keypoints, advanced_parts[dataset]["upper_body"])
elif part == "lower_body":
return calculate_center_with_dist(keypoints, advanced_parts[dataset]["lower_body"])
elif part == "right_side":
return calculate_center_with_dist(keypoints, advanced_parts[dataset]["right_side"])
elif part == "left_side":
return calculate_center_with_dist(keypoints, advanced_parts[dataset]["left_side"])
return None # Fallback option
def blurr_img(img, keypoints_list, part, dataset, width=6):
concetrated_cutout = part not in part_mapping[dataset].keys()
for keypoints in keypoints_list:
if concetrated_cutout:
if part in keypoints:
# width = 6 # As it is in the ground truth
x, y = keypoints[part]
img = circle_blur(img, x, y, width)
return img, (x, y), (width, width)
else:
cords = get_cords_with_width(keypoints, part, dataset)
if cords:
(x, y), (width_x, width_y) = cords
img = blur(img, x, y, width_x, width_y)
return img, (x, y), (width_x, width_y)
return img, (0, 0), (0, 0)
def cutout_img(img, keypoints_list, part, mean_coloring, dataset, width = 6):
color = [0, 0, 0]
concetrated_cutout = part not in part_mapping[dataset].keys()
if mean_coloring:
color = get_image_mean_color(img)
for keypoints in keypoints_list:
if concetrated_cutout:
if part in keypoints:
x, y = keypoints[part]
img = circle_cutout(img, x, y, width, color)
return img, (x, y), (width, width)
else:
cords = get_cords_with_width(keypoints, part, dataset)
if cords:
(x, y), (width_x, width_y) = cords
img = rect_cutout(img, x, y, width_x, width_y, color)
return img, (x, y), (width_x, width_y)
return img, (0, 0), (0, 0)
def copy_image_files(path, dst):
if not os.path.exists(dst):
os.mkdir(dst)
for file in os.listdir(path):
if file.endswith(".jpg"):
shutil.copyfile(path + file, dst + file)
def remove_annotation(keypoint_arr, joints_vis, center_pos, widths):
for id in range(keypoint_arr.shape[0]):
pos = (keypoint_arr[id][0], keypoint_arr[id][1])
if is_within_box(pos, center_pos, widths):
keypoint_arr[id] = [0, 0, 0]
joints_vis[id] = [0, 0, 0]
return keypoint_arr, joints_vis
def is_within_box(pos, center_pos, widths):
x_lower = center_pos[0] - widths[0]
x_upper = center_pos[0] + widths[0]
y_lower = center_pos[1] - widths[1]
y_upper = center_pos[1] + widths[1]
if widths[0] == 0 or widths[0] == 0:
return False
return x_lower < pos[0] <= x_upper and y_lower <= pos[1] <= y_upper
| UTF-8 | Python | false | false | 12,817 | py | 23 | cutout_util.py | 18 | 0.596395 | 0.580635 | 0 | 304 | 41.161184 | 124 |
Anseik/algorithm | 9,895,604,659,851 | d0ff06942c1144038806859c4b04e04b23416bed | d7cfe98faeb0fe1b4ce02d54d8bbedaca82764f7 | /1105_문제풀이/swea_5208_전기버스2_solution.py | 2d0fdaf23fe06de6f2a7b558d603164d22819ffb | []
| no_license | https://github.com/Anseik/algorithm | 27cb5c8ec9692cf705a8cea1d60e079a7d78ef72 | 925404006b84178682206fbbb3b989dcf4c3dee9 | refs/heads/master | 2023-02-26T00:02:01.696624 | 2021-02-03T14:10:28 | 2021-02-03T14:10:28 | 301,753,927 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.stdin = open('swea_5208_전기버스2_solution.txt')
# idx : 정류장 번호 / cnt : 배터리 교환횟수 / remain : 배터리 잔량
def backtrack(idx, cnt, remain):
global min_cnt
# 다음 정류장 도착하면 remain -1
remain -= 1
if cnt >= min_cnt: # 중간 배터리 교환 횟수가 정답후보 이상이면 리턴
return
if idx == N:
min_cnt = cnt
return
# 배터리를 교환하는 경우
backtrack(idx + 1, cnt + 1, arr[idx])
# 배터리를 교환하지 않는 경우(배터리 잔량이 남아있어야만 교환하지 않을 수 있다.)
if remain > 0:
backtrack(idx + 1, cnt, remain)
T = int(input())
for tc in range(1, T + 1):
arr = list(map(int, input().split()))
N = arr[0]
min_cnt = 0xffffffff
backtrack(2, 0, arr[1])
print('#{} {}'.format(tc, min_cnt))
| UTF-8 | Python | false | false | 886 | py | 450 | swea_5208_전기버스2_solution.py | 435 | 0.557143 | 0.531429 | 0 | 29 | 23.103448 | 52 |
Ilade-s/QRCode_V1 | 16,827,681,870,067 | 7929bcab0a988da32c3e08c0ef0a1e3bd6df1680 | f8822f01114a62da72c7207e6e9da3b5b08c3786 | /QRcode_Std.py | 7a6c4ba73bb0d34071bc79c0b556b4bccdfc773c | [
"MIT"
]
| permissive | https://github.com/Ilade-s/QRCode_V1 | 888c27cb8d2d1cd7d8f2a8eb5199a7ee0b4411a3 | 49d20c3d3ee4930851a0b8183fccf075cf4205fa | refs/heads/main | 2023-03-30T19:01:48.921183 | 2021-03-30T11:41:01 | 2021-03-30T11:41:01 | 349,127,294 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Module des fonctions pour décoder un QR code V1 (plus petit mode)
La fonction pricipale est lire_QRCode(QR_code)
Quand lancé en main, proposera une interface avec toutes les fonctionnalités du programme
-----------
Pour utlisation dans un autre programme, utiliser FuncLaunch(ImgName) pour un lancement sans interface ni texte
-----------
"""
from raw_data import get_bits,get_raw_data
from QR_ImgToMat import ImgToMat # Conversion image QR en matrice exploitable
from creer_qr import CreerQR # Création de QR code
def FuncLaunch(ImgName):
""" Fonction qui permet de lancer le décodage d'un QR code à partir du nom de l'image :
SANS INTERFACE // SANS TEXTE DE DEBUG
Paramètre d’entrée :
-------------------
ImgName : str
Nom de l'image (sans extension de fichier), doit être au format .png
Paramètre de sortie :
-------------------
Value : str
Texte décodé du QR_Code
"""
QR_code = ImgToMat(ImgName, False)
value = lire_QRCode(QR_code, False)
return value
def mask(type) :
""" Fonction qui génère la matrice de masque en fonction du type
mat_mask = mask(type)
Paramètre d’entrée :
-------------------
type : code compris entre 0 et 7
type de masque à générer
Paramètre de sortie :
-------------------
mat_mask : matrice 21X21
la matrice générée dépend du type
"""
mat_mask = [[0 for i in range(21)] for j in range(21)]
if type==0: # Damier fin
for i in range(21):
for j in range(21):
if (i+j)%2 == 0:
mat_mask[j][i] = 1
elif type==1: # Horizontales
for i in range(21):
for j in range(21):
if j%2 == 0:
mat_mask[j][i] = 1
elif type==2: # Verticales
for i in range(21):
for j in range(21):
if i%3 == 0:
mat_mask[j][i] = 1
elif type==3: # Diagonales
for i in range(21):
for j in range(21):
if (i+j)%3 == 0:
mat_mask[j][i] = 1
elif type==4: # Damier gros carreaux
for j in range(21):
for i in range(21):
if (j//2+i//3)%2 == 0:
mat_mask[j][i] = 1
elif type==5: # Carreaux avec losanges
for i in range(21):
for j in range(21):
if (i*j)%2+(i*j)%3 == 0:
mat_mask[j][i] = 1
elif type==6: # Très noir
for i in range(21):
for j in range(21):
if ((i*j)%3+i*j)%2 == 0:
mat_mask[j][i] = 1
elif type==7: # Taches éclatées
for i in range(21):
for j in range(21):
if ((i*j)%3+i+j)%2 == 0:
mat_mask[j][i] = 1
return mat_mask
def format(QR_code) :
""" fonction qui retourn le taux de correction appliqué et le type de masque appliqué au QR code
(correction, masque) = format(QR_code)
Paramètre d’entrée :
-------------------
QR_code : Matrice 21 X 21
QR_code à décoder
Paramètre de sortie :
-------------------
(correction, masque) : (int, int)
correction : int L 01
M 00
Q 11
H 10
masque : int de 0 à 7
"""
# mask = " 10 101 0000010010"
masque = 0
correction = 0
# recherche en colonne 8, de l.16 à l.18 (masque) puis l.19 et l.20 (correction)
for l in range(3): # Masque
masque += QR_code[l+16][8]*(2**l)
masque = masque^5 # Decodage XOR
for l in range(2): # Correction
correction += QR_code[l+19][8]*(2**l)
correction = correction^2 # Decodage XOR
return (correction, masque)
def decode(QR_code,QR_masque) :
"""Fonction qui applique le QR_masque au QR_code
QR_Decode = decode(QR_code,masque)
Paramètre d’entrée :
------------------
QR_code : Matrice 21 X 21
QR_code à décoder
masque : Matrice 21 X 21
masque à appliquer au QR_code
Paramètre de sortie :
--------------------
QR_Decode : Matrice 21 X 21
"""
QR_Decode = QR_code
# Boucles XOR
# Haut
for l in range(8):
for c in range(9,13):
QR_Decode[l][c] = QR_code[l][c]^QR_masque[l][c]
# Milieu
for l in range(9,13):
for c in range(21):
QR_Decode[l][c] = QR_code[l][c]^QR_masque[l][c]
# Bas
for l in range(13,21):
for c in range(9,21):
QR_Decode[l][c] = QR_code[l][c]^QR_masque[l][c]
return QR_Decode
def get_mode(raw_data) :
"""
(mode,nb_car) = get_mode(Raw_Data)
Paramètre d’entrée :
------------------
Raw_Data : Données brutes issue d’un Qrcode vesrion 1.
Raw_Data : int contenant les données brutes du QR code
mode de codage sur 4 bits,
le nombre de caractères codés sur :
8 bits pour l'ASCII,
9 bits pour l'alphanumérique,
10 bits pour le numérique
Paramètre de sortie :
--------------------
(mode, nb_car) : (int, int)
Mode : int
1 : Numérique
2 : Alphanumérique
4 : ASCII
nb_car : int
Nombre de caractères encodés.
"""
mode = get_bits(raw_data,0,4)
# Recherche nombre caractères en fonction de l'encodage
if mode==1:
nb_car = get_bits(raw_data,4,10)
elif mode==2:
nb_car = get_bits(raw_data,4,9)
elif mode==4:
nb_car = get_bits(raw_data,4,8)
else:
nb_car = 0
#mode = 2
#nb_car = 2
return (mode,nb_car)
def lire_ASCII(raw_data,nb_car) :
"""
Paramètre d’entrée :
--------------------
Raw_Data : Données brutes issue d’un Qrcode vesrion 1.
nb_car : int
Nombre de caractères à lire
Paramètre de sortie :
--------------------
message : string
message contenu dans le QR_code
"""
message = ""
for i in range(nb_car):
message += chr(get_bits(raw_data,12+i*8,8))
return message
def lire_AlphaNum(raw_data,nb_car):
"""
Paramètre d’entrée :
--------------------
Raw_Data : Données brutes issue d’un Qrcode vesrion 1.
nb_car : int
Nombre de caractères à lire
Paramètre de sortie :
--------------------
message : str
message contenu dans le QR_code
"""
dict_chr = {
0:"0",
1:"1",
2:"2",
3:"3",
4:"4",
5:"5",
6:"6",
7:"7",
8:"8",
9:"9",
10:"A",
11:"B",
12:"C",
13:"D",
14:"E",
15:"F",
16:"G",
17:"H",
18:"I",
19:"J",
20:"K",
21:"L",
22:"M",
23:"N",
24:"O",
25:"P",
26:"Q",
27:"R",
28:"S",
29:"T",
30:"U",
31:"V",
32:"W",
33:"X",
34:"Y",
35:"Z",
36:" ",
37:"$",
38:"%",
39:"*",
40:"+",
41:"-",
42:".",
43:"/",
44:":",
}
# Technique :
# 45*a + b = int(bin) sur 11 bits
# Donc a = bin/45 arrondi à 0
# et b = bin%45 (reste)
message = ""
if nb_car%2==0: # Tous 11 bits (pair)
passe = 0
while nb_car>len(message):
valueBin = get_bits(raw_data,13+passe*11,11)
message += dict_chr[int(valueBin/45)]\
+dict_chr[valueBin%45]
passe+=1
else: # Dernier caractère sur 6 bits (impair)
# Caractères par paire sur 11 bits
passe = 0
while nb_car-len(message)>=2:
valueBin = get_bits(raw_data,13+passe*11,11)
message += dict_chr[int(valueBin/45)]\
+dict_chr[valueBin%45]
passe+=1
# Dernier caractère
message += dict_chr[get_bits(raw_data,13+passe*11,6)]
return message
def lire_Num(raw_data,nb_car):
"""
Paramètre d’entrée :
--------------------
Raw_Data : Données brutes issue d’un Qrcode version 1.
nb_car : int
Nombre de caractères à lire
Paramètre de sortie :
--------------------
message : str
message contenu dans le QR_code
"""
message = ""
if nb_car%3==0: # Uniquement groupes de 3 chiffres (10 bits)
nGrp = int(nb_car/3)
for i in range(nGrp):
message += str(get_bits(raw_data,14+i*10,10))
if len(message)<nb_car:
message = "0"+message
else: # Dernier groupe en 4 bits (1 chiffre) ou 7 bits (2 chiffres)
nGrpDe3 = int(nb_car/3)
nCaracDernierGrp = nb_car%3
nBitsDernierGrp = 1+nCaracDernierGrp*3
for i in range(nGrpDe3):
Data = str(get_bits(raw_data,14+i*10,10))
message += Data
message += str(get_bits(raw_data,14+nGrpDe3*10,nBitsDernierGrp))
if len(message)<nb_car:
message = "0"+message
return message
def lire_QRCode(QR_code, Debug=True) :
CorrDict = {0:"M",1:"L",3:"Q",2:"H"}
message = ""
# Recherche format
(correction, masque) = format(QR_code)
# Création masque
masque_mat = mask(masque)
# Décodage avec masque
QR_decode = decode(QR_code, masque_mat)
# Récupération données QR code
Raw_Data = get_raw_data(QR_decode)
# Récupération encodage
(mode,nb_car) = get_mode(Raw_Data)
# Choix def en fonction de l'encodage
if mode==1: # Numérique
message = lire_Num(Raw_Data, nb_car)
elif mode==2: # Alphanumérique
message = lire_AlphaNum(Raw_Data, nb_car)
elif mode==4: # ASCII
message = lire_ASCII(Raw_Data, nb_car)
else: # mode incorrect
print("mode incorrect :",mode)
if Debug:
print("============================================================================")
print("\t\tDEBUG")
print("Niveau de correction :",CorrDict[correction])
print("Masque :",masque)
print("Nombre de caractères :",nb_car)
print("Encodage :",mode)
print("\t\tDEBUG")
print("============================================================================")
return message
if __name__ == "__main__":
print("============================================================================")
print("Bienvenue dans mon programme de decodage d'un QR Code")
print("Vous pouvez soit :")
print('\t1 : Decoder un exemple de QR Code (texte : "Exemple")')
print("\t2 : Decoder un QR Code à partie d'une image de votre choix")
print("\t (format png, à mettre dans le dossier 'Img_QR')")
print("\t3 : Créer un QR code (stocké dans le dossier 'Img_QR') : EXPERIMENTAL")
print("\t (format png, encodage non contrôlable, choisira le plus optimal, nom en QR_xxx.png)")
print("============================================================================")
print("\a")
Choix = input("Choix (1, 2 ou 3) : ")
if Choix=="2": # Décodage à partir d'une image
ImgName = "QR_"+input("\tNom de l'image (sans extension de fichier, seulement le texte inconnu : QR_xxx.png) : ")
print("============================================================================")
print("\t\tIMAGE")
QR_code = ImgToMat(ImgName)
print("\t\tIMAGE")
msg = lire_QRCode(QR_code)
print("Message :",msg)
elif Choix=="3": # Création QR Code
print("============================================================================")
print("Limite de caractères :")
print("\tASCII : 11 caractères")
print("\tAlphanumérique : 16 caractères")
print("\tNumérique : 27 caractères")
print("============================================================================")
Data = input("\tTexte à encoder : ")
ImgName = "QR_"+input("\tNom de l'image du QR code (sans extension de fichier) (Laisser vide pour nom=data) : ")
if ImgName=="QR_":
ImgName += Data
CreerQR(Data, ImgName)
print("\t\tFAIT")
else: # Décodage d'un exemple de QR Code
QR_code = [ # ASCII "Exemple"
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0],
]
msg = lire_QRCode(QR_code)
print("Message :",msg)
| UTF-8 | Python | false | false | 14,296 | py | 6 | QRcode_Std.py | 4 | 0.457439 | 0.399209 | 0 | 425 | 32.336471 | 121 |
MountPOTATO/Elevator-Dispatching | 14,242,111,588,769 | d3b3bd044f31f07cca4d00ce2e6aabd62cec4537 | 51828b2d8cec8b090b581e7573125e4834a962d6 | /src/main.py | 76e721defc4d0341f63193febec99fe3e29405ce | []
| no_license | https://github.com/MountPOTATO/Elevator-Dispatching | a249d5bd0eac3c76fa27731b6628fdf72a9f1d38 | 941c7f3ac62f4f4ce6abb6faf5f1f99e334858ba | refs/heads/master | 2023-04-12T02:36:46.991490 | 2021-05-21T01:51:03 | 2021-05-21T01:51:03 | 362,339,027 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Author: mount_potato
Date: 2021-04-28 12:47:13
LastEditTime: 2021-05-12 22:22:29
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Elevator-Dispatching\src\main.py
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from utils import *
from elevator_ui import *
class ElevatorInterface(QtWidgets.QMainWindow,Ui_MainWindow):
def __init__(self):
super(ElevatorInterface,self).__init__()
self.setupUi(self)
self.setWindowTitle('Operator System Elevator Dispatcher')
self.setWindowIcon(QIcon('resources/window_icon.png'))
if __name__=='__main__':
app=QApplication(sys.argv)
main_window=ElevatorInterface()
main_window.show()
sys.exit(app.exec())
| UTF-8 | Python | false | false | 760 | py | 6 | main.py | 4 | 0.705263 | 0.665789 | 0 | 31 | 23.387097 | 66 |
Yansb/pythonAprender | 9,199,819,971,383 | d583c8f5d4d6a3cae45b8c9a52cf34e5d97d2618 | 12b8ca1390643c0a9e8fa5539e291b39f1a53406 | /Learning/motorbike.py | de3b25acb5ad3a09892a2d82d8cbe73eadd2a336 | []
| no_license | https://github.com/Yansb/pythonAprender | 710d11a6b8a9c4414c1cb7496d13cc92aef2ccd2 | 93080b35ea9b69420b430cadce7ec949cbcd3ba0 | refs/heads/master | 2020-04-06T17:26:16.283323 | 2018-11-16T03:49:50 | 2018-11-16T03:49:50 | 157,659,194 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | bike={"make": "Honda", "model": "250 dream", "colour": "red", "engine size": 250}
print(bike["make"])
| UTF-8 | Python | false | false | 102 | py | 40 | motorbike.py | 39 | 0.598039 | 0.539216 | 0 | 2 | 50 | 81 |
doublejy715/Problem-Solve | 206,158,466,267 | 4f18468f7ccf3cdbd9a4c34f2497cdf7706e2655 | 739e91039c05943352a3fc07e768641f74097482 | /Python_Practice/insert_sort.py | 265f3ca96076f5088fd661a5a017cc6cbd11199e | []
| no_license | https://github.com/doublejy715/Problem-Solve | 651182079ded1a9da3478dd30a4c4507894de85e | 57d5a672a48103769c8cc022cb7132d988624600 | refs/heads/master | 2023-07-13T09:51:23.145427 | 2021-08-31T04:35:04 | 2021-08-31T04:35:04 | 234,250,382 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 단순히 스왑 형식으로 마무리 할 수 있다.
def Insert_Sort(data):
for index1 in range(len(data)-1):
for index2 in range(index1+1,0,-1):
if data[index2] < data[index2-1]:
data[index2], data[index2-1] = data[index2-1],data[index2]
else:
break
return data | UTF-8 | Python | false | false | 340 | py | 272 | insert_sort.py | 271 | 0.538961 | 0.487013 | 0 | 10 | 29.9 | 74 |
eironside/PMLDM | 14,903,536,554,618 | d1621d1285cd37c0b1b8165406be5dbd5cfdd24a | f586b45973b3f6eb8939dff6326a6a2aaa399dba | /ngce/wrapper/NRCS_Contours.py | cf86ea8851ff2c3275549271974ef60455529e1a | []
| no_license | https://github.com/eironside/PMLDM | e9648f4e2f1d9f82792dd2902a9d9274487b1f05 | e15c2e2ba5abd87adf6ec047b18a4fcb19ee053e | refs/heads/master | 2021-01-02T22:47:02.761359 | 2020-04-30T16:40:26 | 2020-04-30T16:40:26 | 99,386,267 | 3 | 2 | null | false | 2020-04-16T16:10:59 | 2017-08-04T23:22:07 | 2020-02-03T18:22:36 | 2020-04-16T16:10:59 | 39,155 | 1 | 1 | 0 | Python | false | false | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# NRCS_Contours.py
# Created on: 2019-04-11
# Description: Wrapper for the contour generation for NRCS
# ---------------------------------------------------------------------------
import ALClasses
import arcpy
import os
import sys
filePath = os.path.abspath(__file__)
toolsPath = "\\".join(filePath.split("\\")[0:-3])
sys.path.append(toolsPath)
import ngce.pmdm.RunUtil
import traceback
# Set global variables
unitcodes = [["MT", "FT", "US_FT"],["Meters", "Feet (International)", "Feet (US Survey)"]]
outputfcs = ['Contours_OCS', 'Contours_WM']
tempshps = ['footprints_clip_cont.shp', 'footprints_clip_md.shp']
gdbname = 'Contours.gdb'
def main():
try:
# Read parameters from command line
inType = arcpy.GetParameterAsText(0)
inFolder = arcpy.GetParameterAsText(1)
inMosaic = arcpy.GetParameterAsText(2)
outFolder = arcpy.GetParameterAsText(3)
inSpace = arcpy.GetParameter(4)
inUnits = arcpy.GetParameterAsText(5)
aoiFC = arcpy.GetParameterAsText(6)
outLabels = arcpy.GetParameter(7)
inUnitCode = ""
# Check input parameters exist and are set correctly
if inType == '#' or not inType:
arcpy.AddError("Input type not set")
raise arcpy.ExecuteError
# Check inType has a valid value
# Also check that the Folder or Mosaic exist as appropriate
if inType == "Folder":
if not inFolder or inFolder == '#':
arcpy.AddError("Input folder not set")
raise arcpy.ExecuteError
elif inType == "Mosaic":
if not inMosaic or inMosaic == '#':
arcpy.AddError("Input mosaic not set")
raise arcpy.ExecuteError
else:
arcpy.AddError("Input type incorrect = {}".format(inType))
raise arcpy.ExecuteError
# Check that the output folder exists
if not outFolder or outFolder == '#':
arcpy.AddError("Output folder not set")
raise arcpy.ExecuteError
elif not arcpy.Exists(outFolder):
arcpy.AddError("Output folder does not exist")
raise arcpy.ExecuteError
# Check the required contour spacing is of correct type
if inSpace == None:
arcpy.AddError("Contour spacing not set")
raise arcpy.ExecuteError
elif isinstance(inSpace, str):
if inSpace == '#':
arcpy.AddError("Contour spacing not set")
raise arcpy.ExecuteError
else:
inSpace = int(inSpace)
elif not isinstance(inSpace, int) or isinstance(inSpace, long):
arcpy.AddError("Contour spacing set to the wrong type {}".format(type(inSpace)))
raise arcpy.ExecuteError
# Check the vertical units
if not inUnits or inUnits == '#':
arcpy.AddError("Vertical units not set")
raise arcpy.ExecuteError
elif not inUnits in unitcodes[1]:
arcpy.AddError("Vertical units not set correctly = {}".format(inUnits))
raise arcpy.ExecuteError
else:
inUnitCode = unitcodes[0][unitcodes[1].index(inUnits)]
# Check that the AOI exists if supplied
if not aoiFC or aoiFC == '#':
aoiFC = None
elif not arcpy.Exists(aoiFC):
arcpy.AddError("AOI folder does not exist")
raise arcpy.ExecuteError
# Set default in case outLabels is not set
outLabels_bool = True
if outLabels == None:
outLabels_bool = True
elif isinstance(outLabels, bool):
outLabels_bool = outLabels
elif isinstance(outLabels, str):
if outLabels.lower() == 'true' or outLabels == '#':
outLabels_bool = True
elif outLabels.lower() == 'false':
outLabels_bool = False
else:
arcpy.AddError('Create label option set to incorrect value {}, should be \'true\' or \'false\''.format(outLabels))
raise arcpy.ExecuteError
else:
arcpy.AddError('Create label option set incorrectly, of type {}'.format(type(outLabels)))
raise arcpy.ExecuteError
# Make sure that the output gdb exists
outGdb = os.path.join(outFolder, gdbname)
if not arcpy.Exists(outGdb):
arcpy.CreateFileGDB_management(outFolder, gdbname)
if not arcpy.Exists(outGdb):
arcpy.AddError('Output file geodatabase {} does not exist and cannot be created'.format(outGdb))
raise arcpy.ExecuteError
# Clear out existing feature classes
for fc in outputfcs:
out_contours_fc = os.path.join(str(outGdb), fc)
with ALClasses.FeatureClass(out_contours_fc) as contours_fc:
if contours_fc.delete() == False:
raise arcpy.ExecuteError
# Delete temp shapefiles used for controlling execution
for tempsh in tempshps:
out_footprint_shp = os.path.join(outFolder, tempsh)
with ALClasses.FeatureClass(out_footprint_shp) as footprint_shp:
if footprint_shp.delete() == False:
raise arcpy.ExecuteError
# Create temporary mosaic
# If this is an existing mosaic create a temporary reference mosaic
# Otherwise, make a new mosaic from the folder of data
refMosaic = None
if inType == "Folder":
# Get reference from first image
firstraster = None
walk = arcpy.da.Walk(inFolder, datatype="RasterDataset")
for dirpath, dirnames, filenames in walk:
if filenames:
firstraster = ALClasses.Raster(dirpath,filenames[0])
break
if firstraster == None:
arcpy.AddError('Cannot find rasters in folder {}'.format(inFolder))
raise arcpy.ExecuteError
spref = firstraster.spatialreference()
refMosaic = ALClasses.TempMosaicDataset()
refMosaic.createmosaic(proj=spref)
refMosaic.addrasters(inFolder)
else:
# Make reference by copying from original so that we can apply clip
origMosaic = ALClasses.MosaicDataset(inMosaic)
spref = origMosaic.spatialreference()
refMosaic = ALClasses.TempMosaicDataset()
refMosaic.createmosaic(proj=spref)
refMosaic.addtable(origMosaic,exclude_overviews=True)
# Now create a temporary feature class from the footprints
fps = ALClasses.TempFeatureClass("TempFootprints")
refMosaic.exportgeometry(fps, where_clause="Category = 1", geometry_type="FOOTPRINT")
# If we do not have an AOI supplied use the boundary of the temp mosaic
refAOI = None
clip_bound = False
if not aoiFC:
refAOI = ALClasses.TempFeatureClass("TempAOI")
refMosaic.exportgeometry(refAOI, geometry_type="BOUNDARY")
else:
origAOI = ALClasses.FeatureClass(aoiFC)
refAOI = ALClasses.TempFeatureClass("TempAOI")
origAOI.copyfeatures(refAOI)
# Now clip the footprints to the AOI
tempfps = fps
fps = ALClasses.TempFeatureClass("TempFootprintClip")
tempfps.clip(refAOI, fps)
# Also clip the mosaic footprints to the AOI
refMosaic.deleteexternalrasters(refAOI)
refMosaic.importgeometry (fps,'UriHash',geometry_type="FOOTPRINT")
# Make sure that the other reference mosaic does not exist
# Defined as a temporary mosaic will ensure that it gets deleted when out of scope
with ALClasses.MosaicDataset('{}_Cprep'.format(refMosaic.fname)) as derivedMosaic:
if derivedMosaic.delete() == False:
raise arcpy.ExecuteError
# Add call to generate contours tool here!
arcpy.AddMessage('Parameters:')
arcpy.AddMessage(' Mosaic = {}'.format(refMosaic.fname))
arcpy.AddMessage(' Mosaic _Cprep = {}'.format(derivedMosaic.fname))
arcpy.AddMessage(' Footprints = {}'.format(fps.fname))
arcpy.AddMessage(' Output folder = {}'.format(outFolder))
arcpy.AddMessage(' Contour spacing = {}'.format(inSpace))
arcpy.AddMessage(' Vertical units = {}'.format(inUnitCode))
arcpy.AddMessage(' AOI = {}'.format(refAOI.fname))
arcpy.AddMessage(' Create labels = {}'.format(outLabels))
PATH = r'ngce\pmdm\c\C01ProcessContoursFromMDParallel.py'
args = [refMosaic.fname, fps.fname, outFolder, inSpace, inUnitCode]
ngce.pmdm.RunUtil.runTool(PATH, args, log_path=outFolder)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages(2)
# Return tool error messages for use with a script tool
arcpy.AddError(msgs)
# Print tool error messages for use in Python/PythonWin
print msgs
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print pymsg + "\n"
print msgs
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 9,974 | py | 88 | NRCS_Contours.py | 63 | 0.594746 | 0.591638 | 0 | 239 | 40.728033 | 130 |
alexjorenby/ConvNNSandbox | 14,723,147,938,148 | 79df36f1744bd7eb5d37121b833cc031abde9580 | 9237bb56fbd9b5bd677a5d84c1e1c756e63fdcb7 | /Scripts/helpers.py | d12b6c13ec2b96043b152663ace814b7ca58cea9 | []
| no_license | https://github.com/alexjorenby/ConvNNSandbox | 41d56b8dd79aa124fccef01c69ee831e7d6fd7a7 | 521e9a2ed7617f2bb95a5d49c82303b1332e3724 | refs/heads/master | 2020-07-02T11:55:30.352509 | 2019-08-19T03:31:55 | 2019-08-19T03:31:55 | 201,520,871 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(hist['epoch'], hist['acc'], label='Train Acc')
plt.plot(hist['epoch'], hist['val_acc'], label='Val Acc')
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(hist['epoch'], hist['loss'], label='Train Loss')
plt.plot(hist['epoch'], hist['val_loss'], label='Val Loss')
plt.legend()
plt.show()
def encode_target(df, target, one_hot=True, num_outputs=1108):
values = np.array(df[[target]])
if one_hot:
onehot_encoder = OneHotEncoder(sparse=False, categories='auto')
onehot_encoded = onehot_encoder.fit_transform(values)
Z = onehot_encoded.tolist()
[x.extend([0] * (num_outputs - len(x))) for x in Z]
df[target] = Z
encoder = onehot_encoder
else:
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
df[target] = integer_encoded.tolist()
encoder = label_encoder
return df, encoder | UTF-8 | Python | false | false | 1,570 | py | 10 | helpers.py | 10 | 0.643949 | 0.63758 | 0 | 50 | 30.42 | 74 |
burnSTATION/spgt | 5,205,500,394,007 | d73531c64889b54a392c5d5e72fcfa9d258e2e3d | 466735b3ae1473e4d6254d2aa6cac99959966fd2 | /ready_service.py | 76aa2fb711597864faf81f6201160473fa498033 | []
| no_license | https://github.com/burnSTATION/spgt | fada79c55ab9739a8836f6d8467d1f26457ea7ad | 7006a3778127acbc912501da182457a8dc72930b | refs/heads/master | 2020-12-06T22:20:16.599883 | 2016-09-06T11:19:35 | 2016-09-06T11:19:35 | 67,461,258 | 1 | 0 | null | false | 2016-09-16T23:49:46 | 2016-09-06T01:09:16 | 2016-09-06T15:50:28 | 2016-09-16T23:49:45 | 14 | 1 | 0 | 3 | Python | null | null | from listeners import OnClientFullyConnect, OnClientDisconnect, OnLevelInit
from messages import SayText2
from players.entity import Player
from commands.say import SayCommand
from engines.server import engine_server
### GLOBALS & CONSTANTS ###
CHAT_PREFIX = "[\x02eSN\x01]"
players = {}
is_warmup_period = True
class CreatePlayer():
def __init__(self, username):
self.username = username
self.is_ready = False
self.permission_level = 0
self.steam_id = ""
def endless_warmup():
if is_warmup_period == True:
engine_server.server_command('mp_warmup_pausetimer 1;')
def try_start_match(index):
if len(players) >= 1 and all_players_ready(players):
SayText2(CHAT_PREFIX + "The match will now begin").send(index)
start_match()
else:
SayText2(CHAT_PREFIX + "Still waiting for 10 players to be ready to start the match").send(index)
def all_players_ready(players):
""" Iterates through dict of players and returns True if all of their
is_ready values is True. Otherwise, return False. """
for key, value in players.items():
if not value.is_ready:
return False
return True
def start_match():
is_warmup_period = False
engine_server.server_command('mp_warmup_end;')
SayText2(CHAT_PREFIX + "!LIVE ON NEXT RESTART!").send()
engine_server.server_command('mp_restartgame 10;')
@OnLevelInit
def on_level_init(map_name):
is_warmup_period = True
endless_warmup()
@OnClientFullyConnect
def on_client_fully_connect(index):
player = Player(index)
players[player.index] = CreatePlayer(player.name)
@OnClientDisconnect
def on_client_disconnect(index):
player = Player(index)
players.pop(player.index)
@SayCommand('.players')
def list_players(command, index, team):
all_players = []
for key, value in players.items():
ready_status = value.is_ready
if ready_status is True:
player_ready = "READY"
elif ready_status is False:
player_ready = "NOT READY"
all_players.append(value.username + " : " + player_ready + ", ")
SayText2(CHAT_PREFIX + str(all_players)).send(index)
@SayCommand('.ready')
def make_player_ready(command, index, team):
key = Player(index).index
current_player = players[key]
if not current_player.is_ready:
current_player.is_ready = True
SayText2(CHAT_PREFIX + "You have been marked as \x06READY").send(index)
try_start_match(index)
elif current_player.is_ready:
SayText2(CHAT_PREFIX + "You're already \x06READY").send(index)
@SayCommand('.notready')
def make_player_notready(command, index, team):
key = Player(index).index
current_player = players[key]
if current_player.is_ready:
current_player.is_ready = False
SayText2(CHAT_PREFIX + "You have been marked as \x02NOT READY").send(index)
elif not current_player.is_ready:
SayText2(CHAT_PREFIX + "You're already \x02NOT READY").send(index) | UTF-8 | Python | false | false | 2,996 | py | 1 | ready_service.py | 1 | 0.676569 | 0.667223 | 0.002003 | 93 | 31.225806 | 105 |
Fawaz441/scholarsbank-pya | 8,186,207,712,960 | b0c0765bf00f5740aad1ea388448e96acd3105b6 | 2356bc6be0b8938c30ca07d36ec8bfd352094405 | /accounts/models.py | 244e8a539cde524a8e3822d275556e66d2312d0b | []
| no_license | https://github.com/Fawaz441/scholarsbank-pya | 3ccfa74dd1afd0c53972612c2810d3efbf85a1d6 | 56458ea5325e8f92c7989037879284a9409cf8ef | refs/heads/master | 2022-07-30T20:45:09.166321 | 2020-05-12T12:46:10 | 2020-05-12T12:46:10 | 263,336,879 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User,PermissionsMixin
from django.urls import reverse
# Create your models here.
class Student(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,related_name='student')
uploads = models.IntegerField(default=0)
def __str__(self):
return self.user.username + " student account "
class Seller(models.Model):
user = models.OneToOneField(User,on_delete=models.SET_NULL,related_name='seller',null=True,blank=True)
number = models.CharField(max_length=50)
whatsapp_number = models.CharField(blank=True,null=True,max_length=50)
location = models.CharField(max_length=100)
full_name = models.CharField(max_length=100)
def __str__(self):
return self.user.username + " seller account " | UTF-8 | Python | false | false | 824 | py | 60 | models.py | 28 | 0.728155 | 0.714806 | 0 | 21 | 38.285714 | 106 |
fblrainbow/Python | 3,118,146,279,802 | aa9e2d94751ba2e40ab956c333ae2f44ecbf69e7 | fd0f25debda5eb51b8d404e78661752bf6eb1e5f | /python3.6.5/2018/第三季度/8月/27/class.py | 5fd6861ef33e91db078af312317c6d8bf5d5052e | []
| no_license | https://github.com/fblrainbow/Python | 53f5be4de065dbde2809f69de41fa276107b176a | 525604e756e1107183ae8fcc4d6dd611ea0f34ef | refs/heads/master | 2021-01-20T14:03:11.263929 | 2019-01-06T15:19:33 | 2019-01-06T15:19:33 | 90,551,373 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#coding=utf-8
#__author__ = 'Administrator'
# class Man(object):
# def __init__(self, name, gender):
# self.name = name
# self.__gender = gender
# def get_gender(self):
# return self.__gender
# def set_gender(self,gender):
# self.__gender=gender
# bart = Man('Bart', 'male')
# if bart.get_gender() != 'male':
# print('测试失败!')
# else:
# bart.set_gender('female')
# if bart.get_gender() != 'female':
# print('测试失败!')
# else:
# print('测试成功! The sex of Bart is %s' %bart._Man__gender)
class Student(object):
def __init__(self,name,score):
self.name=name
self.__score=score
def get_score(self):
return self.__score
def set_score(self,score):
if 0 <= score <= 100:
self.__score=score
else:
raise ValueError('bad score!')
#测试
Lisa=Student('lisa',-10)
print('Her name is %s and her score is %d ' %(Lisa.name,Lisa.get_score()))
| UTF-8 | Python | false | false | 1,057 | py | 1,117 | class.py | 177 | 0.532556 | 0.525753 | 0 | 35 | 27.4 | 74 |
foxlf823/relation_classification | 15,307,263,443,730 | 18ef367c9b688129b28ce7e8d53322f30e8d11df | a28e8416614e280ee7463479f801b634daf26475 | /capsulenet.py | 91a5d0994e7c933b23821816e7e2296ef7ccf8b8 | []
| no_license | https://github.com/foxlf823/relation_classification | c8eb8510420f4da7b82bb7fda28e20a674ce36b3 | 334e0bda8a807d668635254a939990c8c23246c1 | refs/heads/master | 2021-05-02T03:25:50.078806 | 2018-02-26T05:27:16 | 2018-02-26T05:27:16 | 120,898,055 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
from torch import nn
from torch.autograd import Variable
from capsulelayers import DenseCapsule, PrimaryCapsule
import pyt_acnn as pa
import math
import torch.nn.functional as F
class CapsuleNet(nn.Module):
def __init__(self, max_len, embedding, pos_embed_size,
pos_embed_num, slide_window, class_num,
num_filters, keep_prob, routings, embfinetune, pad_embfinetune, use_crcnn_loss,
include_other):
super(CapsuleNet, self).__init__()
self.dw = embedding.shape[1]# word emb size
self.vac_len = embedding.shape[0]+1
self.dp = pos_embed_size # position emb size
self.d = self.dw + 2 * self.dp # word representation size
self.np = pos_embed_num # position emb number
self.include_other = include_other
self.other_id = class_num-1 # only used when no other
if include_other:
self.nr = class_num # relation class number
else:
self.nr = class_num-1 # relation class number
self.k = slide_window # convolutional window size
self.n = max_len # sentence length
self.keep_prob = keep_prob # dropout keep probability
self.routings = routings
self.conv1_out_channel = 256 #256
self.primarycap_out_channel = 256#256 # 8*32
self.primarycap_dim = 8 #8
self.densecap_input_channel = 32#32
self.densecap_dim = 16#16
if pad_embfinetune:
self.pad_emb = pa.myCuda(Variable(torch.randn(1, self.dw), requires_grad=True))
else:
self.pad_emb = pa.myCuda(Variable(torch.zeros(1, self.dw)))
if embfinetune:
# self.other_emb = nn.Parameter(torch.from_numpy(embedding[1:, :]))
self.other_emb = nn.Parameter(torch.from_numpy(embedding[:, :]))
else:
# self.other_emb = pa.myCuda(Variable(torch.from_numpy(embedding[1:, :])))
self.other_emb = pa.myCuda(Variable(torch.from_numpy(embedding[:, :])))
# self.dropout_word = nn.Dropout(1-self.keep_prob)
if self.dp != 0:
if pad_embfinetune:
self.pad_pos_emb = pa.myCuda(Variable(torch.randn(1, self.dp), requires_grad=True))
else:
self.pad_pos_emb = pa.myCuda(Variable(torch.zeros(1, self.dp)))
self.other_pos_emb = nn.Parameter(torch.FloatTensor(self.np-1, self.dp))
self.other_pos_emb.data.normal_(0, 1)
# self.dist1_embedding = nn.Embedding(self.np, self.dp)
# self.dist2_embedding = self.dist1_embedding
# self.dropout_dist1 = nn.Dropout(1-self.keep_prob)
# self.dropout_dist2 = nn.Dropout(1-self.keep_prob)
# Layer 1: Just a conventional Conv2D layer
self.conv1 = nn.Conv2d(1, self.conv1_out_channel, kernel_size=(self.k, self.d), stride=1, padding=0)
self.last = self.n - self.k + 1
# self.W_res1_1 = nn.Parameter(torch.FloatTensor(self.d, 1))
# self.W_res1_1.data.uniform_(-math.sqrt(6. / (self.d)) , math.sqrt(6. / (self.d)))
# self.W_res1_2 = nn.Parameter(torch.FloatTensor(self.last, self.n))
# self.W_res1_2.data.uniform_(-math.sqrt(6. / (self.last+self.n)) , math.sqrt(6. / (self.last+self.n)))
# self.dropout_conv1 = nn.Dropout(1-self.keep_prob)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_caps, dim_caps]
# output channel should consider capsule dim, e.g., 32*8=256
self.primarycaps = PrimaryCapsule(self.conv1_out_channel, self.primarycap_out_channel, self.primarycap_dim, kernel_size=(2*self.k, 1), stride=1, padding=0)
self.lastlast = self.last - 2*self.k + 1
# self.W_res2_1 = nn.Parameter(torch.FloatTensor(self.last, 1))
# self.W_res2_1.data.uniform_(-math.sqrt(6. / (self.last)) , math.sqrt(6. / (self.last)))
# self.W_res2_2 = nn.Parameter(torch.FloatTensor(self.primarycap_dim, self.primarycap_out_channel))
# self.W_res2_2.data.uniform_(-math.sqrt(6. / (self.primarycap_dim+self.primarycap_out_channel)) , math.sqrt(6. / (self.primarycap_dim+self.primarycap_out_channel)))
# self.dropout_primary = nn.Dropout(1-self.keep_prob)
# Layer 3: Capsule layer. Routing algorithm works here.
self.digitcaps = DenseCapsule(in_num_caps=self.densecap_input_channel*self.lastlast, in_dim_caps=self.primarycap_dim,
out_num_caps=self.nr, out_dim_caps=self.densecap_dim, routings=routings)
# self.dropout_dense = nn.Dropout(1-self.keep_prob)
# self.linear = nn.Linear(self.n*self.d, self.nr)
# self.W_res3_1 = nn.Parameter(torch.FloatTensor(self.d, 1))
# self.W_res3_1.data.uniform_(-math.sqrt(6. / (self.d)) , math.sqrt(6. / (self.d)))
# self.W_res3_2 = nn.Parameter(torch.FloatTensor(self.n, self.densecap_dim))
# self.W_res3_2.data.uniform_(-math.sqrt(6. / (self.n+self.densecap_dim)) , math.sqrt(6. / (self.n+self.densecap_dim)))
self.relu = nn.ReLU()
# x = pa.myCuda(Variable(torch.LongTensor([[word_dict['has']]*self.n]*2)))
# x_embedding = torch.cat((self.pad_emb, self.other_emb),0)
# x_embed = torch.matmul(pa.one_hot2(x.contiguous().view(2,self.n,1), self.vac_len), x_embedding)
# pass
# a = nn.Linear(111,222)
# pass
self.use_crcnn_loss = use_crcnn_loss
if use_crcnn_loss:
self.W_class = nn.Parameter(torch.FloatTensor(self.nr, self.densecap_dim))
stdv = math.sqrt(6. / (self.nr+self.densecap_dim))
self.W_class.data.uniform_(-stdv, stdv)
def forward(self, x, e1, e2, dist1, dist2):
bz = x.data.size()[0]
# x_embedding = torch.cat((self.pad_emb, self.other_emb),0)
x_embedding = torch.cat((self.pad_emb, self.other_emb),0)
x_embed = torch.matmul(pa.one_hot2(x.contiguous().view(bz,self.n,1), self.vac_len), x_embedding)
# x_embed = self.dropout_word(x_embed)
if self.dp !=0:
# dist1_embed = self.dist1_embedding(dist1) # (batch, length, postion_dim)
# dist2_embed = self.dist2_embedding(dist2) # (batch, length, postion_dim)
pos_embedding = torch.cat((self.other_pos_emb, self.pad_pos_emb),0)
dist1_embed = torch.matmul(pa.one_hot2(dist1.contiguous().view(bz,self.n,1), self.np), pos_embedding)
dist2_embed = torch.matmul(pa.one_hot2(dist2.contiguous().view(bz,self.n,1), self.np), pos_embedding)
# dist1_embed = self.dropout_dist1(dist1_embed)
# dist2_embed = self.dropout_dist2(dist2_embed)
x_concat = torch.cat((x_embed, dist1_embed, dist2_embed), 2) # (batch, length, word_dim+2*postion_dim)
else:
x_concat = x_embed
# input attention
# e1_embed = torch.matmul(pa.one_hot2(e1.contiguous().view(bz,1,1), self.vac_len), x_embedding)
# e2_embed = torch.matmul(pa.one_hot2(e2.contiguous().view(bz,1,1), self.vac_len), x_embedding)
# A1 = torch.matmul(x_embed, e1_embed.permute(0,2,1)) # (batch, length, 1)
# A2 = torch.matmul(x_embed, e2_embed.permute(0,2,1))
# alpha1 = F.softmax(A1, dim=1)
# alpha2 = F.softmax(A2, dim=1)
# alpha = torch.div(torch.add(alpha1, alpha2), 2)
# R = torch.mul(x_concat, alpha) # (batch, length, word_dim+2*postion_dim)
x_concat = x_concat.view(bz, 1, self.n, self.d)
# x_concat = R.view(bz, 1, self.n, self.d)
# y_conv1 = self.relu(self.conv1(x_concat))
y_conv1 = self.relu(self.conv1(x_concat))
# y = self.dropout_conv1(y)
# y_res1 = y_conv1 + F.relu(torch.matmul(self.W_res1_2, torch.matmul(x_concat, self.W_res1_1)).expand(-1, self.conv1_out_channel, -1, -1))
y_primary = self.primarycaps(y_conv1)
# y = self.dropout_primary(y)
# y_res2 = y_primary+F.relu(torch.matmul(self.W_res2_2, torch.matmul(y_res1.squeeze(-1), self.W_res2_1)).permute(0,2,1).expand(-1, self.densecap_input_channel*self.lastlast, -1))
y = self.digitcaps(y_primary) # [bz, nr, dim_caps]
# y = self.dropout_dense(y)
# y = y_digit + F.relu(torch.matmul(torch.matmul(x_concat, self.W_res3_1).squeeze(-1), self.W_res3_2).expand(-1, 19, -1))
if self.use_crcnn_loss:
y = torch.matmul(y.view(bz, self.nr, 1, self.densecap_dim), self.W_class.view(self.nr, self.densecap_dim, 1))
y = y.view(bz, self.nr)
else:
y = y.norm(dim=-1)
# y = self.linear(x_concat.view(bz, -1))
return y
def loss_func(self, by, y_pred):
if self.use_crcnn_loss:
loss = self._crcnn_loss(by, y_pred)
else:
loss = self._caps_loss(by, y_pred)
return loss
def _crcnn_loss(self, by, y_pred):
bz = by.size()[0]
m_pos = 2.5
m_neg = 0.5
r = 2
# y_pred (bz, 18), by may contain 'other' (id=18), which leads out of index
if self.include_other:
new_by = by
else:
other_mask = pa.myCuda(Variable(torch.LongTensor(by.size())))
other_mask.fill_(self.other_id)
other_mask.ne_(by)
new_by = by*other_mask # mask other to 0, although 0 correspond to a class, we will mask its score later
y_true = pa.myCuda(Variable(torch.zeros(y_pred.size()))).scatter_(1, new_by.view(-1, 1), 1.)
s_gold = torch.matmul(y_true.view(bz, 1, self.nr), y_pred.view(bz, self.nr, 1)).view(new_by.size())
left = torch.log(1+torch.exp(r*(m_pos - s_gold)))
if self.include_other == False:
left.mul_(other_mask.float())
mask = pa.one_hot1(new_by.view(bz,1), self.nr, -1000) # mask gold
if self.include_other == False:
aaaa = other_mask.view(bz, 1).expand(-1, self.nr).float()
mask = mask*aaaa # mask fake 0 (actually other)
masked_y = torch.add(y_pred, mask)
s_neg = torch.max(masked_y, dim=1)[0]
right = torch.log(1+torch.exp(r*(m_neg+s_neg)))
loss = left+right
loss = loss.mean()
return loss
def _caps_loss(self, by, y_pred):
'''
by: (bz)
y_pred: (bz, nr)
'''
bz = by.size()[0]
m_pos = 0.9
m_neg = 0.1
if self.include_other:
new_by = by
else:
other_mask = pa.myCuda(Variable(torch.LongTensor(by.size())))
other_mask.fill_(self.other_id)
other_mask.ne_(by)
new_by = by*other_mask
y_true = pa.myCuda(Variable(torch.zeros(y_pred.size()))).scatter_(1, new_by.view(-1, 1), 1.)
if self.include_other == False:
aaaa = other_mask.view(bz, 1).expand(-1, self.nr).float()
y_true.mul_(aaaa.float()) # mask fake 0 (actually other)
L = y_true * torch.clamp(m_pos - y_pred, min=0.) ** 2 + \
0.5 * (1 - y_true) * torch.clamp(y_pred - m_neg, min=0.) ** 2
L_margin = L.sum(dim=1).mean()
return L_margin
def predict(self, by, y_pred):
if self.use_crcnn_loss:
accuracy, answer = self._crcnn_predict(by, y_pred)
else:
accuracy, answer = self._cap_predict(by, y_pred)
return accuracy, answer
def _cap_predict(self, by, y_pred):
bz = by.data.size()[0]
correct = 0
m_neg = 0.1
if self.include_other:
predict = y_pred.max(1)[1]
else: # this code is only correct when other is the last id
max_score, temp = y_pred.max(1)
mask1 = max_score.gt(m_neg)
not_other_predict = mask1.long()*temp
mask2 = max_score.lt(m_neg)
other_predict = pa.myCuda(Variable(torch.LongTensor(by.size()))).fill_(self.other_id)
other_predict = mask2.long()*other_predict
predict = not_other_predict + other_predict
correct = predict.eq(by).cpu().sum().data[0]
return correct / bz, predict
def _crcnn_predict(self, by, y_pred):
bz = by.data.size()[0]
correct = 0
if self.include_other:
predict = y_pred.max(1)[1]
else: # this code is only correct when other is the last id
max_score, temp = y_pred.max(1)
mask1 = max_score.gt(0)
not_other_predict = mask1.long()*temp
mask2 = max_score.lt(0)
other_predict = pa.myCuda(Variable(torch.LongTensor(by.size()))).fill_(self.other_id)
other_predict = mask2.long()*other_predict
predict = not_other_predict + other_predict
correct = predict.eq(by).cpu().sum().data[0]
return correct / bz, predict
| UTF-8 | Python | false | false | 13,293 | py | 10 | capsulenet.py | 7 | 0.558715 | 0.535094 | 0 | 310 | 41.858065 | 186 |
Valeriitsoy/HW | 17,282,948,403,246 | 5299b2eec2cdc09fe2f0377137b4b595f383ea8b | 9ba12fc26836841693cfbe66e6b4c714bcf290b3 | /lesson03/task_6.py | e12e112fe028cea68457235ddab7d56c42b5eaa6 | []
| no_license | https://github.com/Valeriitsoy/HW | ea48bb9113af4c8af613a8dd322f0c3fc6745308 | 2291538d9b5069fb01d3eece60bd24bc55dbcd3e | refs/heads/master | 2020-12-20T03:35:24.857882 | 2020-02-18T20:27:43 | 2020-02-18T20:27:43 | 235,948,022 | 0 | 0 | null | false | 2020-02-18T20:27:44 | 2020-01-24T05:57:09 | 2020-02-15T17:00:08 | 2020-02-18T20:27:44 | 19 | 0 | 0 | 0 | Python | false | false | def int_func(word):
return word.capitalize()
print(int_func('text'))
print(list(map(int_func, input('Введите набор слов на латинце в нижнем регситре: ').split())))
| UTF-8 | Python | false | false | 215 | py | 45 | task_6.py | 41 | 0.68 | 0.68 | 0 | 8 | 20.625 | 95 |
langenhagen/experiments-and-tutorials | 18,872,086,298,962 | 0683a81d2caf0de4949799a5d2d354a738c73b13 | fa3f5368cbba48de3b9c57c79785e51086afb04d | /Python/inheritance-hello.py | ad1228f82a34d437c255c2e3b42459f18a8c24a4 | []
| no_license | https://github.com/langenhagen/experiments-and-tutorials | 8f853675e0d8718581c33ff099fcb35c8958f315 | 9598af1b8be7ebe8462a0bbfc87a6edfa5063741 | refs/heads/master | 2023-08-03T15:07:38.757388 | 2023-07-31T16:15:34 | 2023-07-31T16:15:34 | 211,196,519 | 4 | 1 | null | false | 2022-03-27T10:02:49 | 2019-09-26T23:15:40 | 2022-01-05T14:28:47 | 2022-03-27T10:02:48 | 254,076 | 1 | 1 | 15 | HTML | false | false | """
Showcase inheritance.
"""
print("---1---")
class C:
def __init__(self):
print("Hello from C.__init__!")
class D(C):
pass
c = C() # calls the print
d = D() # also calls the print
| UTF-8 | Python | false | false | 207 | py | 1,602 | inheritance-hello.py | 1,027 | 0.512077 | 0.507246 | 0 | 18 | 10.5 | 39 |
Hellofafar/Leetcode | 11,364,483,488,334 | c691a91222818c9d56d22abd8ba983378800b579 | 6ac0bba8c1851e71529269c0d9d89a7c8fa507f2 | /Medium/467.py | ee21d5c6a072c61604aaf74d66d17f56c37701a3 | []
| no_license | https://github.com/Hellofafar/Leetcode | e81dc85689cd6f9e6e9756beba070cb11e7b192e | 7a459e9742958e63be8886874904e5ab2489411a | refs/heads/master | 2021-05-16T07:07:19.823953 | 2020-02-17T03:00:09 | 2020-02-17T03:00:09 | 103,690,780 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ------------------------------
# 467. Unique Substrings in Wraparound String
#
# Description:
# Consider the string s to be the infinite wraparound string of "abcdefghijklmnopqrstuvwxyz",
# so s will look like this: "...zabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd....".
# Now we have another string p. Your job is to find out how many unique non-empty substrings of
# p are present in s. In particular, your input is the string p and you need to output the number
# of different non-empty substrings of p in the string s.
# Note: p consists of only lowercase English letters and the size of p might be over 10000.
# Example 1:
# Input: "a"
# Output: 1
# Explanation: Only the substring "a" of string "a" is in the string s.
# Example 2:
# Input: "cac"
# Output: 2
# Explanation: There are two substrings "a", "c" of string "cac" in the string s.
# Example 3:
# Input: "zab"
# Output: 6
# Explanation: There are six substrings "z", "a", "b", "za", "ab", "zab" of string "zab" in the
# string s.
#
# Version: 1.0
# 10/16/19 by Jianfa
# ------------------------------
class Solution:
def findSubstringInWraproundString(self, p: str) -> int:
if len(p) <= 1:
return len(p)
# stringDict store the following information
# key = ending letter of the substring
# value = number of longest substring ended with key letter
preLetter = p[0]
stringDict = {preLetter:1}
size = 1
for i in range(1, len(p)):
if (ord(p[i]) - ord(preLetter)) % 26 != 1:
size = 1
else:
size += 1
if p[i] not in stringDict:
stringDict[p[i]] = size
else:
stringDict[p[i]] = max(stringDict[p[i]], size)
preLetter = p[i]
return sum(stringDict[k] for k in stringDict)
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from: https://leetcode.com/problems/unique-substrings-in-wraparound-string/discuss/95439/Concise-Java-solution-using-DP
# Main idea is the max number of unique substring ENDS with a letter equals to the length of
# max contiguous substring ends with that letter. Example "abcd", the max number of unique
# substring ends with 'd' is 4, apparently they are "abcd", "bcd", "cd" and "d"
# At first my idea is to check the substring STARTS with a letter, but it's not easy to update
# max number during traverse, because you don't have the exact number for the new letter to
# update.
#
# O(n) time and O(n) space | UTF-8 | Python | false | false | 2,661 | py | 540 | 467.py | 537 | 0.6118 | 0.59752 | 0 | 74 | 34.972973 | 126 |
MuteMeteor/PythonLearning | 9,345,848,841,926 | 6e2aa4597029e25178e027a196b4ae377c999f78 | d6e2322a19508ad9f172fd09dd3ce59babe53da3 | /01 Python3/Jan-kenPunch.py | badac2e441aa382b0979c4e8e8577c984a16c919 | [
"MIT"
]
| permissive | https://github.com/MuteMeteor/PythonLearning | a14034af0abef9923279e0cee511c94c021b738e | 9400b119e16be262a7fe61250f3f2f5946018607 | refs/heads/master | 2021-05-13T20:41:21.409910 | 2018-01-23T10:23:44 | 2018-01-23T10:23:44 | 116,915,226 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
#1. 提示并获取用户的输入
player = int(input("请输入 0剪刀 1石头 2布:"))
#2. 让电脑出一个
computer = random.randint(0,2)
#2. 判断用户的输入,然后显示对应的结果
#if 玩家获胜的条件:
if (player==0 and computer==2) or (player==1 and computer==0) or (player==2 and computer==1):
print("You win.")
#elif 玩家平局的条件:
elif player==computer:
print("Draw.")
else:
print("You lose.")
| UTF-8 | Python | false | false | 459 | py | 6 | Jan-kenPunch.py | 6 | 0.660969 | 0.621083 | 0 | 17 | 19.588235 | 93 |
karthikpappu/pyc_source | 13,314,398,637,683 | 9574f54f0c17c00637358b03d0776301af09fbf7 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/ClueBin-0.2.3-py2.5/pastebin.py | 94bc01dd98b9d072a123df3986778ccb0c170a0a | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.5 (62131)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.linux-i686/egg/cluebin/pastebin.py
# Compiled at: 2008-06-27 12:04:19
import webob
from StringIO import StringIO
import pygments
from pygments import lexers
from pygments import formatters
from pygments import util
from xml.sax import saxutils
from cluebin import paste as pastebase
from cluebin import utils
class PasteBinApp(object):
"""WSGI app representing a pastebin.
>>> app = PasteBinApp()
"""
COOKIE_LANGUAGE = 'cluebin.last_lang'
COOKIE_AUTHOR = 'cluebin.last_author'
def __init__(self, display_tag_line=True):
self.pmanager = pastebase.PasteManager()
self.display_tag_line = display_tag_line
def __call__(self, environ, start_response):
request = webob.Request(environ)
response = webob.Response(content_type='text/html')
out = StringIO()
handler = self.index
pieces = [ x for x in environ['PATH_INFO'].split('/') if x ]
if pieces and hasattr(self, pieces[0]):
handler = getattr(self, pieces[0])
handler(request, response, out, *pieces[1:])
if response.status_int != 200:
return response(environ, start_response)
version = '0.2.2'
tag_line = ''
if self.display_tag_line:
tag_line = 'ClueBin v%s by <a href="http://www.serverzen.com">ServerZen Software</a>' % version
top = '\n <html>\n <head>\n <title>PasteBin</title>\n <style>\n PRE { margin: 0; }\n .code, .linenos { font-size: 90%; }\n .source { border: 1px #999 dashed; margin: 0; padding: 1em }\n .left { width: 70%; float: left; }\n .right { margin-left: 2em; width: 20%; float: left; }\n .field { margin-bottom: 1em; }\n .field LABEL { font-weight: bold; width: 20%; display: block; float: left; }\n .field INPUT { width: 80% }\n .field TEXTAREA { width: 100%; height: 10em }\n .previous_paste DD { margin-left: 0; }\n .clear { display: block; clear; both; }\n .header { font-size: 90%; float: right; }\n </style>\n </head>'
top += '<body><div id="main"><div class="header">%s</div>' % tag_line
footer = ''
bottom = '<div class="footer">%s</div><div class="clear"><!-- --></div></div></body></html>' % footer
response.unicode_body = top + out.getvalue() + bottom
return response(environ, start_response)
def paste_listing(self, request, response, out):
print >> out, '<fieldset><legend>Previous Pastes</legend><ul>'
for pobj in self.pmanager.get_pastes():
if pobj.date is not None:
pdate = pobj.date.strftime('%x at %X')
else:
pdate = 'UNKNOWN'
print >> out, '<li><a href="%s">Post by: %s on %s</a></li>' % (
utils.url(request, 'pasted/%i' % pobj.pasteid),
pobj.author_name, pdate)
print >> out, '</ul></fieldset>'
return
def preferred_author(self, request):
author_name = request.params.get('author_name', '')
if not author_name:
author_name = request.cookies.get(self.COOKIE_AUTHOR, '')
if isinstance(author_name, str):
author_name = unicode(author_name, 'utf-8')
return author_name
def preferred_language(self, request):
language = request.cookies.get(self.COOKIE_LANGUAGE, '')
if isinstance(language, str):
language = unicode(language, 'utf-8')
def index(self, request, response, out, msg='', paste_obj=None):
if msg:
msg = '<div class="message">%s</div>' % msg
paste = ''
language = self.preferred_language(request)
if paste_obj is not None:
paste = paste_obj.paste or ''
try:
if paste_obj.language:
l = lexers.get_lexer_by_name(paste_obj.language)
else:
l = lexers.guess_lexer(paste_obj.paste)
language = l.aliases[0]
except util.ClassNotFound, err:
l = lexers.TextLexer()
else:
formatter = formatters.HtmlFormatter(linenos=True, cssclass='source')
formatted_paste = pygments.highlight(paste, l, formatter)
print >> out, '\n <style>%s</style>\n <dl class="previous_paste">\n <dt>Previous Paste</dt>\n <dd>Format: %s</dd>\n <dd>%s</dd>\n </dl>\n ' % (formatter.get_style_defs(), l.name, formatted_paste)
lexer_options = '<option value="">-- Auto-detect --</option>'
all = [ x for x in lexers.get_all_lexers() ]
all.sort()
for (name, aliases, filetypes, mimetypes_) in all:
selected = ''
if language == aliases[0]:
selected = ' selected'
lexer_options += '<option value="%s"%s>%s</option>' % (aliases[0],
selected,
name)
print >> out, '\n %s\n <div class="left">\n ' % msg
print >> out, '\n <form action="%(action)s" method="POST">\n <fieldset>\n <legend>Paste Info</legend>\n <div class="field">\n <label for="author_name">Name</label>\n <input type="text" name="author_name" value="%(author_name)s" />\n </div>\n <div class="field">\n <label for="language">Language</label>\n <select name="language">\n%(lexers)s\n </select>\n </div>\n <div class="field">\n <label for="paste">Paste Text</label>\n <textarea name="paste">%(paste)s</textarea>\n </div>\n <input type="submit" />\n </fieldset>\n </form>\n </div>\n ' % {'action': utils.url(request, 'paste'), 'paste': saxutils.escape(paste),
'lexers': lexer_options,
'author_name': self.preferred_author(request)}
print >> out, '<div class="right">'
self.paste_listing(request, response, out)
print >> out, '</div>'
return
def pasted(self, request, response, out, *args):
pobj = self.pmanager.get_paste(args[0])
self.index(request, response, out, paste_obj=pobj)
def paste(self, request, response, out):
if not request.params.get('paste', None):
self.index(request, response, out, msg='* You did not fill in body')
else:
paste = request.params['paste']
author_name = request.params['author_name']
language = request.params['language']
response.set_cookie(self.COOKIE_AUTHOR, author_name)
response.set_cookie(self.COOKIE_LANGUAGE, language)
if isinstance(author_name, str):
author_name = unicode(author_name, 'utf-8')
if isinstance(language, str):
language = unicode(language, 'utf-8')
if isinstance(paste, str):
paste = unicode(paste, 'utf-8')
pobj = self.pmanager.save_paste(author_name, paste, language)
newurl = utils.url(request, 'pasted/%s' % str(pobj.pasteid))
response.status = '301 Moved Permanently'
response.headers['Location'] = newurl
return
def make_app(global_config, datastore=None):
app = PasteBinApp()
if datastore is not None:
app.pmanager.datastore = datastore
return app
def build_datastore(datastore_name, *datastore_args):
f = utils.importattr(datastore_name)
return f(*datastore_args)
def main(cmdargs=None):
from wsgiref import simple_server
import sys, optparse
logger = utils.setup_logger()
if cmdargs is None:
cmdargs = sys.argv[1:]
storages = ['cluebin.googledata.GooglePasteDataStore',
'cluebin.sqldata.SqlPasteDataStore']
parser = optparse.OptionParser()
parser.add_option('-i', '--interface', dest='interface', default='0.0.0.0', help='Interface to listen on (by default it is 0.0.0.0 which is shorthand for all interfaces)')
parser.add_option('-p', '--port', dest='port', default='8080', help='Port to listen on (by default 8080)')
parser.add_option('-s', '--storage', dest='storage_name', default='', help='Storage to use for pastes (by default non-persistent), cluebin-provided options are: %s' % str(storages))
(opts, args) = parser.parse_args(cmdargs)
appargs = [{}]
datastore = None
if opts.storage_name:
datastore = build_datastore(opts.storage_name, *args)
logger.info('Using storage: %s' % opts.storage_name)
logger.info('Using storage arguments: %s' % str(args))
app = make_app(datastore)
server = simple_server.make_server(opts.interface, int(opts.port), app)
logger.info('ClueBin now listening on %s:%s using non-persistent datastore' % (
opts.interface, opts.port))
server.serve_forever()
return 0
if __name__ == '__main__':
import sys
sys.exit(main()) | UTF-8 | Python | false | false | 9,398 | py | 114,545 | pastebin.py | 111,506 | 0.561077 | 0.549159 | 0 | 184 | 50.081522 | 931 |
yang5426/pythonCrawler | 15,539,191,689,676 | f776b69610fcb1073787fb9382a944ce2a7c3c75 | b8df4370781b091efff72134af86914d1a0757dc | /pthonCrawler/MongoDBSave.py | 76391909376e8fa5435f370d2ba8492b36f2d46f | []
| no_license | https://github.com/yang5426/pythonCrawler | da87e78f48f7bcd413573571ab975a35def8cb48 | 73030b59bb97c742e5b5026d7b8787cffd7e2a57 | refs/heads/master | 2022-12-13T05:56:13.811053 | 2020-09-10T09:34:40 | 2020-09-10T09:34:40 | 292,805,667 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pymongo
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.test
# db = client['test']
collection = db.students
# collection = db['students']
student = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
# result = collection.insert(student)
# print(result)
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 21,
'gender': 'male'
}
# result = collection.insert([student1, student2])
# print(result)
student = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
# result = collection.insert_one(student)
# print(result)
# print(result.inserted_id)
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 21,
'gender': 'male'
}
# result = collection.insert_many([student1, student2])
# print(result)
# print(result.inserted_ids)
print('查询')
result = collection.find_one({'name': 'Mike'})
print(type(result))
print(result)
# 根据 ObjectId 来查询,此时需要调用 bson 库里面的 objectid
from bson.objectid import ObjectId
result = collection.find_one({'_id': ObjectId('5f4f3afc00bf77e47d030070')})
print(result)
print()
print('find 方法 多条查询')
results = collection.find({'age': 20})
print(results)
for result in results:
print(result)
results = collection.find({'age': {'$gt': 20}})
results = collection.find({'name': {'$regex': '^M.*'}})
for result in results:
print(result)
print("计数")
count = collection.find().count()
print(count)
count = collection.find({'age': 20}).count()
print(count)
print('排序')
results = collection.find().sort('name', pymongo.ASCENDING)
print([result['name'] for result in results])
results = collection.find().sort('name', pymongo.DESCENDING)
print([result['name'] for result in results])
print('偏移')
results = collection.find().sort('name', pymongo.ASCENDING).skip(8).limit(2)
print([result['name'] for result in results])
# 数据量非常庞大的时候
results = collection.find({'_id': {'$gt': ObjectId('5f4f3b9fb8594a94a9effe96')}})
print([result['name'] for result in results])
# 如果不用 $set 的话,则会把之前的数据全部用 student 字典替换;如果原本存在其他字段,则会被删除。
print('更新')
condition = {'name': 'Jordan'}
student = collection.find_one(condition)
student['age'] = 25
result = collection.update_one(condition, {'$set': student})
print(result)
print(result.matched_count, result.modified_count)
condition = {'age': {'$gt': 20}}
result = collection.update_many(condition, {'$inc': {'age': 1}})
print(result)
print(result.matched_count, result.modified_count)
print('删除')
result = collection.remove({'name': 'Kevin'})
print(result)
result = collection.delete_one({'name': 'Kevin'})
print(result)
print(result.deleted_count)
result = collection.delete_many({'age': {'$lt': 25}})
print(result.deleted_count)
# PyMongo 还提供了一些组合方法,如 find_one_and_delete、find_one_and_replace 和 find_one_and_update,它们分别用于查找后删除、替换和更新操作
# 我们还可以对索引进行操作,相关方法有 create_index、create_indexes 和 drop_index
| UTF-8 | Python | false | false | 3,366 | py | 30 | MongoDBSave.py | 13 | 0.66343 | 0.62589 | 0 | 143 | 20.608392 | 105 |
WorkRock/python_lab | 9,414,568,345,312 | 12bfd9f88cfa58fc86b2bd0439145579e5d8edda | 2862a0f5a5e344650300365a6c9f1e2c9bd5a0d4 | /HomeWork/15/lab5_12.py | 8596f4cfd32012eab5edf0420b52f7d75c44ad97 | []
| no_license | https://github.com/WorkRock/python_lab | 8da4e377eb1c0a2302f30154acaf9e4400c34863 | 01c82b050addd4261e1aaace3b19ee1e9307f27d | refs/heads/master | 2021-09-01T23:50:12.381411 | 2017-12-29T08:01:31 | 2017-12-29T08:01:31 | 109,681,471 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
주제 :
01. intSet 클래스는 정수들의 집합이다. 정수들을 관리하는 리스트 selfvals를 애트리뷰트로 가진다
02. 새로운 정수 e를 추가하는 insert 메소드 이미 있다면 추가하지 않음.
03. e가 정수집합에 포함되어 있는지 확인하는 member메소드인 involve(True, False 반환)
04. e를 제거하는 remove메소드. 단, e가 해당 집합에 없다면 '적당한 오류 메세지 출력'
05. 집합 형식의 문자열로 변환시켜주는 __str__메소드. 단, 정수들은 정렬되어 반환되어야 한다.
06. intSet을 저장하는 변수 s를 정의
07. s에 5, 3, 7을 삽입
08. s를 정렬하여 출력
09. s에 8이 있는지 결과 출력
10. s에 3이 있는지 결과 출력
11. s에서 3 제거
12. s에서 4 제거
13. s를 정렬하여 출력
작성일 : 17. 11. 06.
작성자 : 201632023 이지훈
"""
class intSet:
def __init__(self,*e):
self.vals = list(e)
def insert(self, e):
if (self.involve(e) == True):
return print("이미 존재!")
self.vals.append(e)
return print("삽입 완료 : " + str(e))
def involve(self, e):
if(e in self.vals):
return True
return False
def remove(self, e):
if (self.involve(e) == True):
self.vals.remove(e)
return "삭제 완료 : " + str(e)
else:
return "삭제하려는 정수가 존재하지 않습니다. : " + str(e)
def __str__(self):
self.vals.sort()
return "현재 만들어진 집합 : " + str(self.vals)
s = intSet(5,3,7)
s.insert(6)
s.insert(3)
print(s)
print(s.involve(8))
print(s.involve(3))
print(s.remove(3))
print(s.remove(4))
print(s) | UTF-8 | Python | false | false | 1,741 | py | 44 | lab5_12.py | 41 | 0.573705 | 0.528287 | 0 | 56 | 21.428571 | 61 |
sjy428034591/python | 7,249,904,826,954 | 9138b1d63c751b60769680fec37a2cfcc87de4a9 | 03bda622ab7bcc736ff71e829b32895cb33cf4df | /call_satr.py | e75fb5f2491bcddc025a9fb631140a7d321e6e7e | []
| no_license | https://github.com/sjy428034591/python | a0aaab81cf3048640dbb8c2fb8fa51ed0a60fbb0 | 49bbff00cffb4bc355c2eaab0c2af9b2758f14d8 | refs/heads/master | 2020-05-20T05:21:50.231016 | 2019-05-09T13:16:27 | 2019-05-09T13:16:27 | 185,403,005 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import star
print(star.hi)
star.pstar()
star.pstar(30)
| UTF-8 | Python | false | false | 56 | py | 37 | call_satr.py | 37 | 0.732143 | 0.696429 | 0 | 5 | 10.2 | 14 |
KL35Ronaldo/MegaUploaderbot | 4,312,147,178,123 | a7aa4441f72a67df218cec035cd0020b56ccd062 | b2c44782b33a063bdb5066e92a924b596e5215d8 | /helper/downloader/tgDL.py | 0ceded81ff45c6c009ee359a9f605ff7e483b5d3 | [
"Apache-2.0"
]
| permissive | https://github.com/KL35Ronaldo/MegaUploaderbot | 1e414ea93d2d5e801ac25eceaefd2fcf535b3577 | ddd58980ee568f696d226b52750778ca138416e3 | refs/heads/master | 2023-09-02T17:19:52.795920 | 2021-11-19T16:41:56 | 2021-11-19T16:41:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # !/usr/bin/env python3
"""Importing"""
from pyrogram.errors import exceptions
# Importing Required developer defined data
from helper.downloader.downloadingData import *
class TgDown:
def __init__(self, bot, msg, process_msg, Downloadfolder):
self.msg = msg
self.bot = bot
self.process_msg = process_msg
self.Downloadfolder = Downloadfolder
async def start(self):
async def __editProgressMsg(current, total):
completedFloat = (current/1024)/1024
completed = int(completedFloat)
stream = current/total
progress = int(18*stream)
progress_bar = '■' * progress + '□' * (18 - progress)
percentage = int((stream)*100)
speed = round((completedFloat/(time() - t1)), 1)
if speed == 0:
speed = 0.1
remaining = int((((total - current)/1024)/1024)/speed)
try:
self.process_msg = await self.process_msg.edit_text(f"<b>Downloading... !! Keep patience...\n {progress_bar}\n📊Percentage: {percentage}%\n✅Completed: {completed} MB\n🚀Speed: {speed} MB/s\n⌚️Remaining Time: {remaining} seconds</b>", parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
finally:
sleep(1)
def __progressBar(current, total):
self.bot.loop.create_task(__editProgressMsg(current, total))
global t1
t1 = time()
self.filename = await self.msg.download(file_name = self.Downloadfolder, progress = __progressBar)
if self.filename:
self.filename = path.basename(self.filename)
try:
self.n_msg = await self.process_msg.edit_text(BotMessage.uploading_msg, parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
else:
return True
else:
rmtree(self.Downloadfolder)
await self.process_msg.delete()
await self.msg.reply_text(BotMessage.uploading_unsuccessful, parse_mode = 'html')
return
| UTF-8 | Python | false | false | 2,200 | py | 22 | tgDL.py | 19 | 0.579212 | 0.561813 | 0 | 58 | 36.637931 | 268 |
xiaopp123/machine-learning | 13,005,160,977,517 | aee412cfba456121e3cf926efd33517065d33ec9 | c1b62fef790878d3e4f917bcc9f6ff56c6f639bf | /PCA/pca.py | d3d70599cf703fc2fc25abe32ea41d992a86bb77 | []
| no_license | https://github.com/xiaopp123/machine-learning | 86a11af2919985b7a8d6fd75f79fc99a611e0915 | 0e27972d48837b294953e18b1e6bfd88bdb03575 | refs/heads/master | 2021-09-14T00:04:24.254311 | 2018-05-06T10:43:17 | 2018-05-06T10:43:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#import numpy as np
#
#X = [[2, 0, -1.4],
# [2.2, 0.2, -1.5],
# [2.4, 0.1, -1],
# [1.9, 0, -1.2]]
#
##print (np.mean(X, axis=0))
##print (np.cov(np.array(X).T))
#
#w, v = np.linalg.eig(np.array([[1, -2],
# [2, -3]]))
##print('特征值:{}\n特征向量:{}'.format(w, v))
#
#x = np.mat([[0.9, 2.4, 1.2, 0.5, 0.3, 1.8, 0.5, 0.3, 2.5, 1.3],
# [1, 2.6, 1.7, 0.7, 0.7, 1.4, 0.6, 0.6, 2.6, 1.1]])
#
#x = x.T
#T = x - x.mean(axis=0)
## print T
#C = np.cov(x.T)
##print C
#w, v = np.linalg.eig(C)
#v_ = np.mat(v[:, 0])
#v_ = v_.T
#y = T * v_
##print y
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
data = load_iris()
y = data.target
X = data.data
pca = PCA(n_components=2)
reduced_X = pca.fit_transform(X)
print reduced_X
red_x, red_y = [],[]
blue_x, blue_y = [], []
green_x, green_y = [], []
for i in range(len(reduced_X)):
if y[i] == 0:
red_x.append(reduced_X[i][0])
red_y.append(reduced_X[i][1])
elif y[i] == 1:
blue_x.append(reduced_X[i][0])
blue_y.append(reduced_X[i][1])
else:
green_x.append(reduced_X[i][0])
green_y.append(reduced_X[i][1])
plt.scatter(red_x, red_y, c='r', marker='x')
plt.scatter(green_x, green_y, c='g', marker='D')
plt.scatter(blue_x, blue_y, c='b', marker='.')
plt.show()
| UTF-8 | Python | false | false | 1,400 | py | 13 | pca.py | 12 | 0.512301 | 0.457308 | 0 | 59 | 22.423729 | 64 |
dherna/Development-Tools | 13,348,758,386,320 | 40659acc79648074b983f7bee13d71867a9e047b | 3554b165fd042d12a84aa3c9c78b582df71f1f80 | /ejercicio1.py | 46d4968f3ff26bbf7f7b2e407e95ca248354975b | []
| no_license | https://github.com/dherna/Development-Tools | eb70ae9a7714c4b9d7a55d9760d0736ae89056a8 | 4bd99deb7abdb99262ad576cca449c5a5a159319 | refs/heads/master | 2021-01-19T14:09:30.879241 | 2012-05-02T22:03:39 | 2012-05-02T22:03:39 | 2,400,493 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
try:
fichero = file('/etc/passwd','r')
except IOError:
print "Esto no es un sistama Linux"
else:
while True:
linea = fichero.readline()
if not linea: break
dato = linea.split(":")
print dato[0]+" --> "+dato[6]
print "fin del ejercicio1"
| UTF-8 | Python | false | false | 299 | py | 9 | ejercicio1.py | 7 | 0.591973 | 0.58194 | 0 | 13 | 21.923077 | 43 |
deepfunc/ita | 16,647,293,255,655 | c8732e96ba58664607aae751617e36d93b08125a | 98fc79b9e67cf896b6ee6fe766e560e4a7f08c2b | /search/randomized_select.py | 43f1f0f74b473ee3820bb2bcc70b1281ca63b2ed | []
| no_license | https://github.com/deepfunc/ita | d9bcaa20717bc43edc6870ab3def378cb899635b | ea25e196ee61118c3aba9ff23323ac1da54296ed | refs/heads/master | 2020-07-23T19:50:10.115603 | 2020-01-14T09:58:48 | 2020-01-14T09:58:48 | 207,688,260 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sort.quick_sort import QuickSort
class RandomizedSelect(object):
"""返回序列的第 i 小元素
"""
def __init__(self, list_):
if not isinstance(list_, list):
raise Exception('list_ must be a list!')
self._list = list_
def select(self, i):
a = self._list
if i <= 0:
raise Exception('i must be greater than 0!')
if i > len(a):
raise Exception('i must be equal or less than %d!' % len(a))
return self.randomized_select(a, 0, len(a) - 1, i)
@staticmethod
def randomized_select(a, p, r, i):
if p == r:
return a[p]
q = QuickSort.randomized_partition1(a, p, r)
k = q - p + 1
if i == k:
return a[q]
elif i < k:
return RandomizedSelect.randomized_select(a, p, q - 1, i)
else:
return RandomizedSelect.randomized_select(a, q + 1, r, i - k)
| UTF-8 | Python | false | false | 949 | py | 36 | randomized_select.py | 36 | 0.518797 | 0.510204 | 0 | 34 | 26.382353 | 73 |
dana6691/deep-learning | 283,467,878,372 | befa7d6c9a031b2918d6e66b1865070e0aea4d8d | 655159b76898d468a009163f5a894e343c40ffad | /Basic.py | 3f30f7b18cd781b0d796eb7a35dcce2fe9605d0c | []
| no_license | https://github.com/dana6691/deep-learning | adc5f10f3b6008f550997727b18f7acec1c3610d | 91a1c6d03f80742f0ad979432fde0dac60230390 | refs/heads/master | 2020-09-12T17:42:20.947460 | 2019-11-18T20:50:55 | 2019-11-18T20:50:55 | 222,498,796 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import numpy as np
def basic_sigmoid(z):
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+np.exp(-z))
### END CODE HERE ###
return s
print(basic_sigmoid(3))
# GRADED FUNCTION: sigmoid_derivative
def sigmoid_derivative(x):
### START CODE HERE ### (≈ 2 lines of code)
s = 1/(1+np.exp(-x))
ds = s*(1-s)
### END CODE HERE ###
return ds
x = np.array([1, 2, 3])
print ("sigmoid_derivative(x) = " + str(sigmoid_derivative(x)))
# GRADED FUNCTION: image2vector
def image2vector(image):
"""
Argument:
image -- a numpy array of shape (length, height, depth)
Returns:
v -- a vector of shape (length*height*depth, 1)
"""
### START CODE HERE ### (≈ 1 line of code)
v = image[1:]*image[1:]
### END CODE HERE ###
return v
# This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values
image = np.array([[[ 0.67826139, 0.29380381],
[ 0.90714982, 0.52835647],
[ 0.4215251 , 0.45017551]],
[[ 0.92814219, 0.96677647],
[ 0.85304703, 0.52351845],
[ 0.19981397, 0.27417313]],
[[ 0.60659855, 0.00533165],
[ 0.10820313, 0.49978937],
[ 0.34144279, 0.94630077]]])
print ("image2vector(image) = " + str(image2vector(image)))
| UTF-8 | Python | false | false | 1,364 | py | 1 | Basic.py | 1 | 0.559647 | 0.423417 | 0 | 61 | 21.262295 | 112 |
asnogordo/eth2.tax | 17,540,646,456,673 | 05134b76f6a073101bf9e9f40e4027b3e9e06a61 | 7665a4bd77679fc33ad30c1786c4795910df0270 | /src/indexer/balances.py | 85442fd188be5c68fc919516ecd5209f35413823 | []
| no_license | https://github.com/asnogordo/eth2.tax | 7705dbeeeca1fedd5f78d3c4a959b43f7c137272 | 2e24b6193b413d152d9252a115c550dd7fa78aae | refs/heads/main | 2023-05-09T20:34:52.339376 | 2021-06-05T11:43:59 | 2021-06-05T11:52:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
from contextlib import contextmanager
import asyncio
import pytz
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from tqdm import tqdm
from prometheus_client import start_http_server, Gauge
from shared.setup_logging import setup_logging
from shared.config import config
from providers.beacon_node import BeaconNode, GENESIS_DATETIME
from db.tables import Balance
logger = setup_logging(name=__file__)
engine = create_engine(config["db"]["url"], executemany_mode="batch")
START_DATE = "2020-01-01"
ALREADY_INDEXED_SLOTS = set()
slots_with_missing_balances = Gauge(
"slots_with_missing_balances",
"Slots for which balances still need to be indexed and inserted into the database",
)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session(bind=engine)
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
async def index_balances():
global ALREADY_INDEXED_SLOTS
start_date = datetime.date.fromisoformat(START_DATE)
end_date = datetime.date.today() + datetime.timedelta(days=1)
beacon_node = BeaconNode(
host=config["beacon_node"]["host"],
port=config["beacon_node"]["port"],
response_timeout=300,
)
logger.info(f"Indexing balances for {len(pytz.common_timezones)} timezones.")
slots_needed = set()
logger.debug(f"Calculating the needed slot numbers...")
for timezone in pytz.common_timezones:
timezone = pytz.timezone(timezone)
start_dt = datetime.datetime.combine(start_date, datetime.time.min)
end_dt = datetime.datetime.combine(end_date, datetime.time.min)
start_dt = timezone.localize(start_dt)
end_dt = timezone.localize(end_dt)
# Cap the start datetime at genesis
start_dt = max(start_dt.astimezone(pytz.utc), GENESIS_DATETIME)
start_dt = start_dt.astimezone(timezone)
# Cap the end date at today
end_dt = min(
end_dt.astimezone(pytz.utc),
datetime.datetime.utcnow().replace(tzinfo=pytz.utc),
)
end_dt = end_dt.astimezone(timezone)
initial_slot = await BeaconNode.slot_for_datetime(start_dt)
slots = [initial_slot]
current_dt = start_dt.replace(hour=23, minute=59, second=59)
datetimes = []
while current_dt <= end_dt:
datetimes.append(current_dt)
current_dt += datetime.timedelta(days=1)
head_slot = await beacon_node.head_slot()
slots.extend(
await asyncio.gather(
*(beacon_node.slot_for_datetime(dt) for dt in datetimes)
)
)
# Cap slots at head slot
slots = [s for s in slots if s < head_slot]
for slot in slots:
slots_needed.add(slot)
# Remove slots that have already been retrieved previously
logger.info("Removing previously retrieved slots")
if len(ALREADY_INDEXED_SLOTS) == 0:
with session_scope() as session:
ALREADY_INDEXED_SLOTS = [
s for s, in session.query(Balance.slot).distinct().all()
]
for s in ALREADY_INDEXED_SLOTS:
slots_needed.remove(s)
# Order the slots - to retrieve the balances for the oldest slots first
slots_needed = sorted(slots_needed)
logger.info(f"Getting balances for {len(slots_needed)} slots")
slots_with_missing_balances.set(len(slots_needed))
commit_every = 3
current_tx = 0
with session_scope() as session:
for slot in tqdm(slots_needed):
current_tx += 1
# Store balances in DB
logger.debug(f"Executing insert statements for slot {slot}")
balances_for_slot = await beacon_node.balances_for_slot(slot)
if len(balances_for_slot) == 0:
# No balances available for slot (yet?), move on
continue
session.execute(
"INSERT INTO balance(validator_index, slot, balance) VALUES(:validator_index, :slot, :balance)",
[
{
"validator_index": balance.validator_index,
"slot": balance.slot,
"balance": balance.balance,
}
for balance in balances_for_slot
],
)
ALREADY_INDEXED_SLOTS.append(slot)
slots_with_missing_balances.dec(1)
if current_tx == commit_every:
logger.debug("Committing")
current_tx = 0
session.commit()
session.commit()
if __name__ == "__main__":
# Start metrics server
start_http_server(8000)
from time import sleep
loop = asyncio.get_event_loop()
try:
while True:
loop.run_until_complete(index_balances())
logger.info("Sleeping for a minute now")
sleep(60)
except KeyboardInterrupt:
loop.close()
| UTF-8 | Python | false | false | 5,102 | py | 13 | balances.py | 11 | 0.610937 | 0.604665 | 0 | 161 | 30.689441 | 112 |
Royal-Devendra01/Manager | 3,358,664,448,715 | ae0dea7a8f93501f244c158d5605f07ec45f4711 | f034a29ce5748fdd3cf21cbdd968c3464205f5a5 | /Manager/modules/rules.py | 563b4fd170d952c461ff15e3019d76d33a53efbd | [
"MIT"
]
| permissive | https://github.com/Royal-Devendra01/Manager | b20fad5226293faec2bcf13ebfe14a4d4e6ce088 | 0cc3e585cd79f25c0af77946074c869599def128 | refs/heads/master | 2023-04-08T07:37:07.361563 | 2021-04-13T18:22:07 | 2021-04-13T18:22:07 | 339,805,559 | 0 | 1 | MIT | true | 2021-04-13T08:59:59 | 2021-02-17T17:40:26 | 2021-04-13T07:50:02 | 2021-04-13T08:59:59 | 281 | 0 | 1 | 0 | Python | false | false | from typing import Optional
from telegram import Message, Update, Bot, User
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.error import BadRequest
from telegram.ext import CommandHandler, run_async, Filters
from telegram.utils.helpers import escape_markdown
import Manager.modules.sql.rules_sql as sql
from Manager import dispatcher
from Manager.modules.helper_funcs.chat_status import user_admin
from Manager.modules.helper_funcs.string_handling import markdown_parser
@run_async
def get_rules(bot: Bot, update: Update):
chat_id = update.effective_chat.id
send_rules(update, chat_id)
# Do not async - not from a handler
def send_rules(update, chat_id, from_pm=False):
bot = dispatcher.bot
user = update.effective_user # type: Optional[User]
try:
chat = bot.get_chat(chat_id)
except BadRequest as excp:
if excp.message == "Chat not found" and from_pm:
bot.send_message(user.id, "The rules shortcut for this chat hasn't been set properly! Ask admins to "
"fix this.")
return
else:
raise
rules = sql.get_rules(chat_id)
text = f"The rules for *{escape_markdown(chat.title)}* are:\n\n{rules}"
if from_pm and rules:
bot.send_message(user.id, text, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=True)
elif from_pm:
bot.send_message(user.id, "The group admins haven't set any rules for this chat yet. "
"This probably doesn't mean it's lawless though...!")
elif rules:
update.effective_message.reply_text("Contact me in PM to get this group's rules.",
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Rules",
url=f"t.me/{bot.username}?start={chat_id}")]]))
else:
update.effective_message.reply_text("The group admins haven't set any rules for this chat yet. "
"This probably doesn't mean it's lawless though...!")
@run_async
@user_admin
def set_rules(bot: Bot, update: Update):
chat_id = update.effective_chat.id
msg = update.effective_message # type: Optional[Message]
raw_text = msg.text
args = raw_text.split(None, 1) # use python's maxsplit to separate cmd and args
if len(args) == 2:
txt = args[1]
offset = len(txt) - len(raw_text) # set correct offset relative to command
markdown_rules = markdown_parser(txt, entities=msg.parse_entities(), offset=offset)
sql.set_rules(chat_id, markdown_rules)
update.effective_message.reply_text("Successfully set rules for this group.")
@run_async
@user_admin
def clear_rules(bot: Bot, update: Update):
chat_id = update.effective_chat.id
sql.set_rules(chat_id, "")
update.effective_message.reply_text("Successfully cleared rules!")
def __stats__():
return f"{sql.num_chats()} chats have rules set."
def __import_data__(chat_id, data):
# set chat rules
rules = data.get('info', {}).get('rules', "")
sql.set_rules(chat_id, rules)
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
def __chat_settings__(chat_id, user_id):
return f"This chat has had it's rules set: `{bool(sql.get_rules(chat_id))}`"
__help__ = """
• `/rules`*:* get the rules for this chat.
*Admins only:*
• `/setrules <your rules here>`*:* set the rules for this chat.
• `/clearrules`*:* clear the rules for this chat.
"""
__mod_name__ = "Rules"
GET_RULES_HANDLER = CommandHandler("rules", get_rules, filters=Filters.group)
SET_RULES_HANDLER = CommandHandler("setrules", set_rules, filters=Filters.group)
RESET_RULES_HANDLER = CommandHandler("clearrules", clear_rules, filters=Filters.group)
dispatcher.add_handler(GET_RULES_HANDLER)
dispatcher.add_handler(SET_RULES_HANDLER)
dispatcher.add_handler(RESET_RULES_HANDLER)
| UTF-8 | Python | false | false | 4,077 | py | 72 | rules.py | 67 | 0.647261 | 0.646524 | 0 | 111 | 35.675676 | 118 |
zhenxinlei/StockDataUtil | 19,043,885,010,887 | 71c3f753e4c476085577ec8574458930b14ea1e1 | 30e0d9e4d25e1420aecec42d9b1f53e2b80297a4 | /com/datautil/RankingEng/StockRankingEng.py | 69899bb6ad289f4268b95c8385df5375c73ef3f1 | []
| no_license | https://github.com/zhenxinlei/StockDataUtil | c173363796d283a1476b3bdf51481805cfdc74a7 | 03b81b0f5b773653d7153a558c11109d90811dcf | refs/heads/master | 2020-04-22T05:48:40.937534 | 2019-09-14T09:02:41 | 2019-09-14T09:02:41 | 25,719,488 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Zhenxin Lei'
import numpy as np
import pandas
import com.datautil.RiskCal.EqCVarCalImp as cVarCal
def rankReOverCvar(reMx, cvar ):
'''
:param reMx:
:param cvar:
:return:
ranking result of stocks( column index)
'''
a=np.mean(reMx,0)/cvar
array = np.array(a[0])
for i in range(len(array)):
if np.isnan(array[i]):
array[i]=-float("inf")
order = array.argsort()
ranks = order.argsort()
return ranks
def momRanking(reData, rankPers, holdPers, topPercent):
"""
:param data:
asset retrun dataframe
:param rankPers:
:param holdPers:
:param topPercent:
:return:
"""
eqRe = reData.values
[r, c] = np.shape(eqRe)
date = []
symbol = []
alpha =0.06
i = 0
while i + rankPers - 1 <= r - 1:
subperiod = eqRe[i:i + rankPers - 1, :]
cVar = cVarCal.eqCVarCalImp(subperiod, alpha)
print('return ', subperiod)
print("c Var ",cVar)
ranking = rankReOverCvar(subperiod, cVar)
symbolindex = np.argsort(ranking)[::-1]
symbolindex = symbolindex[:int(len(ranking) * topPercent / 100)]
# date.append(i + rankPers)
date.append(reData.index[i + rankPers])
a=reData.axes[1][symbolindex]
symbol.append(reData.axes[1][symbolindex])
# date=date.append([reData.axes[0][i]])
#symbol=symbol.append(reData.axes[1][symbolindex])
if i+holdPers+rankPers>r-1:
break
i = i + holdPers
rankindex = [i + 1 for i in range(int(c * topPercent / 100))]
recomStkData = pandas.DataFrame(symbol, date, rankindex)
print("recom stock ", recomStkData)
#print(rankindex)
return recomStkData
| UTF-8 | Python | false | false | 1,813 | py | 29 | StockRankingEng.py | 16 | 0.570877 | 0.558742 | 0 | 72 | 24.125 | 72 |
GuazP/Python | 16,612,933,514,693 | fd1a912a39dc9243e75b062ed6a994141bd284b8 | d2ffcebe0fe74f3b59661a7c51fe77f9c62598d2 | /Alghoritms/Sort_by_unregular_value/sort_cards.py | d87990743486e07b17912987e2cd94cbeab0f77d | []
| no_license | https://github.com/GuazP/Python | f7cecdb4aab1f852d04bbac3dad09f0407353f21 | 41e0a7ce8b0840fb27c3c892e5ff294816424082 | refs/heads/master | 2020-06-10T17:46:36.757444 | 2019-06-26T00:52:48 | 2019-06-26T00:52:48 | 193,697,149 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def main():
test = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
test2 = ['6', '8', '10', 'Q', 'A', '3', '5', '7', '9', 'J', 'K', '2', '4']
test3 = ['10', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'J', 'K', 'Q']
print (sortuj(test))
print (sortuj(test2))
print (sortuj(test3))
talia = {"A" : 1, "K" : 13, "Q" : 12, "10" : 10, "J" : 11, "8" : 8, "9" : 9, "7" : 7, "6" : 6, "5" : 5, "4" : 4, "3" : 3, "2" : 2}
print (sortuj2(talia)) #Result ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
def sortuj2(slownik):
talia = {"A" : 1, "K" : 13, "Q" : 12, "10" : 10, "J" : 11, "8" : 8, "9" : 9, "7" : 7, "6" : 6, "5" : 5, "4" : 4, "3" : 3, "2" : 2}
return [i for i in sorted(slownik, key=talia.get)]
def sortuj(L):
value_of_str = {"A": 1, "J": 11, "Q": 12, "K": 13}
for i in L:
try:
value_of_str[i] = value_of_str.get(i, int(i))
except ValueError:
pass
return [i for i in sorted(value_of_str, key=value_of_str.get)]
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,085 | py | 60 | sort_cards.py | 58 | 0.38894 | 0.290323 | 0 | 26 | 40.692308 | 134 |
devdonghyun/Algorithm_Class | 16,432,544,906,416 | b3a9bc0128c5bc096e940c32664aee24b6e085ad | 662db11f5f7c698488d44e6c58567446167afb63 | /newLeftAlign.py | dfb705e5d08f367eb0ce6b5f5028ef6a749ef068 | []
| no_license | https://github.com/devdonghyun/Algorithm_Class | 2b6ed459033b05a8ca0e6179d3aee4c8efcd847e | a08b1f5b12bdc0e0724e33c183ee93d73a3d926b | refs/heads/main | 2023-02-26T11:53:30.659999 | 2021-02-08T14:59:25 | 2021-02-08T14:59:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | W = int(input())
words = input().split()
# code below
length = len(words)
DP = [[0] * length for i in range(length)]
cost = [0 for i in range(length)]
for i in range(length):
if i == length-1:
break
DP[i][i] = W - len(words[i])
for j in range(i+1, length):
DP[i][j] = DP[i][j-1] - len(words[j]) - 1
if j-i == 1:
j -= 1
if DP[i][j] < 0:
DP[i][j] = float('inf')
else:
DP[i][j] = DP[i][j] ** 3
j += 1
if DP[i][j] < 0:
DP[i][j] = float('inf')
else:
DP[i][j] = DP[i][j] ** 3
else:
if DP[i][j] < 0:
DP[i][j] = float('inf')
else:
DP[i][j] = DP[i][j] ** 3
if i == length-2 and j == length-1:
i += 1
DP[i][i] = W - len(words[i])
if DP[i][j] < 0:
DP[i][j] = float('inf')
break
else:
DP[i][j] = DP[i][j] ** 3
break
for i in range(length-1, -1, -1):
cost[i] = DP[i][length-1]
for j in range(length-1, i, -1):
if DP[i][j-1] == float('inf'):
continue
prevCost = cost[j] + DP[i][j-1]
if cost[i] > prevCost:
cost[i] = prevCost
print(cost[0])
| UTF-8 | Python | false | false | 1,346 | py | 23 | newLeftAlign.py | 23 | 0.372214 | 0.350669 | 0 | 54 | 23.925926 | 49 |
DatTobbes/Analyse-Retail-Data | 18,915,035,995,693 | 5c70c622f225abbb445b63354d626cfd45e695a5 | dbc88423d3ecc11fc21f408bb9e17db51deca0e7 | /data_aggregation/read_price_data.py | 5d84700ea3990fd706a34d1752c902be43bde6f2 | []
| no_license | https://github.com/DatTobbes/Analyse-Retail-Data | 459a8e1da09fcd5c37f863a251c9bb330ae8347c | 0225f789c896f61705e87a2d3f0851aad37c02d4 | refs/heads/master | 2020-04-02T02:41:20.758096 | 2018-11-13T09:10:05 | 2018-11-13T09:10:05 | 153,922,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from bokeh.plotting import figure, output_file, show
class HistoricalPriceDataReader:
def __init__(self, path='../data/price_data_reader.csv'):
self.df = pd.read_csv(path, sep='\t')
def get_historical_data_for_device(self, item="Apple iPhone 7 Plus (32GB)"):
df = self.df.loc[self.df['name']==item].transpose()
df.columns=['prices']
df = df.reset_index(drop=True)
df = df['prices'].str.replace(',', '.')
min_prices = df.iloc[2:30].values
max_prices = df.iloc[29:57].values
availability = df.iloc[57:85].values
min_prices = min_prices.reset_index(drop=True)
availability = availability.reset_index(drop=True)
df = pd.DataFrame(
data=[np.asarray(min_prices, dtype=np.float32),
np.asarray(max_prices, dtype=np.float32),
availability],
dtype=np.float32)
df = df.transpose()
df.columns = ['min_price', 'max_price', 'availability' ]
return df
def plot(self, df):
p = figure()
p.scatter(np.asarray(df['max_price'], dtype=np.float32), np.asarray(df['ava'], dtype=np.float32),
fill_color="red")
output_file("scatter.html")
show(p)
if __name__ == "__main__":
reader = HistoricalPriceDataReader()
print(reader.get_historical_data_for_device()) | UTF-8 | Python | false | false | 1,432 | py | 15 | read_price_data.py | 8 | 0.584497 | 0.567737 | 0 | 43 | 32.325581 | 105 |
Yaeeun-Kang/Djangotest | 2,585,570,350,073 | d2b891c7fe1e3edc0a3282857e6395be293046bb | d103018a6e612c5342d789e911e399d2a468244c | /Wordcount/wordcount_app/views.py | a32771815c02d4ac3f6145db5071212f0a8df304 | []
| no_license | https://github.com/Yaeeun-Kang/Djangotest | 05e49e3c2c715359689a68bb12246de8618b4c3a | 8568cdd64820829165ab55820f75815c5556c8dc | refs/heads/master | 2022-11-20T22:09:08.062013 | 2020-07-26T11:29:15 | 2020-07-26T11:29:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'home.html')
def test(request):
return render(request, 'test.html')
def test1(request):
return render(request, 'test1.html')
def test2(request):
return render(request, 'test2.html') | UTF-8 | Python | false | false | 306 | py | 5 | views.py | 1 | 0.712418 | 0.699346 | 0 | 15 | 19.466667 | 40 |
rodalbert/Flask | 11,149,735,135,712 | 401837c44fa03d55ea59891e1248ac85fa28ba7e | 239c4d66d4e57c075526cc2f81cbbb025d7d2cf5 | /hello_world.py | 076cb35beac1e820991852aff3cfa6abc52e22ba | []
| no_license | https://github.com/rodalbert/Flask | a7985156010b7b79574e9435d4b017988dbbc363 | 7746d08a490ee41e6567a404bc2b79ff4d6322d4 | refs/heads/main | 2023-02-05T22:56:53.052753 | 2020-12-23T19:47:39 | 2020-12-23T19:47:39 | 323,988,294 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""Our first Flask application."""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
"""Our front page for our Flask app."""
return "Hello world!"
if (__name__ == "__main__"):
app.run(debug=True)
| UTF-8 | Python | false | false | 278 | py | 2 | hello_world.py | 1 | 0.607914 | 0.604317 | 0 | 15 | 17.533333 | 43 |
gkgg123/TIL | 19,559,281,071,408 | 8aedb29daa78775966391312a2504297068cc087 | cb97973dc79a58f8b785cd94f7c129c9492694a1 | /Python/csv_ex/daum_read.py | fea1c25557c6359dbba6e3b570f18dd9b3884994 | []
| no_license | https://github.com/gkgg123/TIL | 6aa25cd2e9390bded3f842ce1e6843a9081cf1b0 | cbbd55190893550cd40920fe1cd732b4da1830a0 | refs/heads/master | 2023-07-23T23:08:22.542224 | 2022-07-27T01:02:42 | 2022-07-27T01:02:42 | 235,233,061 | 0 | 0 | null | false | 2023-07-18T23:31:17 | 2020-01-21T01:38:15 | 2022-07-24T12:40:30 | 2023-07-18T23:31:17 | 54,245 | 0 | 0 | 6 | Python | false | false | import csv
with open('ranking.csv','r',encoding='utf-8',newline='') as csvfile:
csv_reader=csv.DictReader(csvfile)
for row in csv_reader:
print(row['rank'],row['value']) | UTF-8 | Python | false | false | 186 | py | 299 | daum_read.py | 240 | 0.655914 | 0.650538 | 0 | 6 | 30.166667 | 68 |
mike-fang/led_micro | 13,013,750,926,765 | d1a4d052c1f78961414f9f2c3d2986a9cfe3514c | caaf7723580684886559dedba9a0cfa19036243d | /capture_msi_spin.py | de962b7fe1430d85a4bc2144adbb7691576d281a | []
| no_license | https://github.com/mike-fang/led_micro | 27214b5d9e67abd3dbc85c2962be13bb82c83723 | c08105b1cd84836fed2dea11074e1d47d13f099a | refs/heads/master | 2022-11-28T10:46:09.647242 | 2020-08-02T19:44:22 | 2020-08-02T19:44:22 | 275,946,959 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ft245r import relay_ft245r
from ft245r.relay_ft245r import FT245R
import sys
from time import sleep
import numpy as np
from spin_spin import Stepper
from elp_usb_cam import ELP_Camera
import matplotlib.pylab as plt
from msi_proc import *
import cv2
import os
from led_controller import LED_Controller
def init_rb():
rb = FT245R()
dev_list = rb.list_dev()
if len(dev_list) == 0:
print('No FT245R devices found')
sys.exit()
dev = dev_list[0]
print('Using device with serial number ' + str(dev.serial_number))
rb.connect(dev)
return rb
def capture_ms_img(cam, rb, stepper, exposures=None, n_leds=8, pause=0):
led_control = LED_Controller(rb, stepper)
H, W, n_channels = cam.img_shape
ms_img = np.zeros((H, W, n_channels*n_leds), dtype=np.uint8)
cam.set_auto_wb(False)
state = np.zeros(8)
state[4] = 1
rb.set_state(state)
stepper.engage()
for n in range(n_leds):
stepper.goto_filter(n)
if exposures is not None:
try:
cam.set_exp(exposures[n])
except:
cam.set_exp(exposures)
sleep(pause)
for _ in range(10):
img = cam.capture_img()
print(img.mean())
ms_img[:, :, n*n_channels:(n+1)*n_channels] = img
stepper.disengage()
rb.set_state(np.zeros(8))
return ms_img
def get_exposures(cam, rb, n_leds=8, pause=.5, target=128, tol=10, n_iter=20):
exposures = []
for n in range(n_leds):
# Turn on nth led
state = np.zeros(n_leds)
state[n] = 1
rb.set_state(state)
sleep(pause)
high = 5000
low = 0
for _ in range(n_iter):
exposure = (high + low)/2.
if high - low < 10:
break
cam.set_exp(exposure)
for _ in range(5):
img = cam.capture_img()
mean_lum = img.mean()
if mean_lum < target - tol:
print(f'too low -- mean lum: {mean_lum:.2f}, high: {high:.2f}, low: {low:.2f}')
low = exposure
elif mean_lum > target + tol:
print(f'too high -- mean lum: {mean_lum:.2f}, high: {high:.2f}, low: {low:.2f}')
high = exposure
else:
print(f'good enough -- mean lum: {mean_lum:.2f}, high: {high:.2f}, low: {low:.2f}')
break
exposures.append(exposure)
rb.set_state(np.zeros(n_leds))
return np.array(exposures)
if __name__ == '__main__':
cam = ELP_Camera(0)
rb = init_rb()
#exposures = get_exposures(cam, rb, n_leds=5, pause=.5, target=75,tol=5)
#np.save('./exposures.npy', exposures)
exposures = 200
stepper = Stepper(config_file='spinspin_config.json', pulse_time=0.0005)
ms_img = capture_ms_img(cam, rb, stepper, n_leds=4, exposures=exposures, pause=.5)
for n in range(4):
plt.figure(figsize=(10, 6))
plt.imshow(ms_img[:, :, n*3:(n+1)*3])
plt.show()
| UTF-8 | Python | false | false | 3,024 | py | 27 | capture_msi_spin.py | 24 | 0.553571 | 0.525463 | 0 | 98 | 29.836735 | 99 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.