repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
idlecool/freshdomainnames-GAE | 11,871,289,625,605 | 7910a91ccbcd3459c40be0eeb3d1f96b097a2a6b | e961c29c4d496f2e3d3f8c928453b0a00a82d236 | /worldnews.py | 735557a538c678e35ee77eebb4c6057511cfad4f | [] | no_license | https://github.com/idlecool/freshdomainnames-GAE | 5282da34dad7142dce6c6aae422b8d802f5089f3 | d8db3988733d26de823c38c918031e4d53d87cb1 | refs/heads/master | 2021-01-18T10:27:51.908710 | 2010-12-10T12:04:59 | 2010-12-10T12:04:59 | 1,123,545 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
""" retrieve random words from news feeds """
import urllib
import random
import re
from BeautifulSoup import BeautifulStoneSoup
stream = ""
def _worldnews():
# webservice = 'http://www.thehindu.com/sci-tech/technology/?service=rss'
webservice = 'http://www.thehindu.com/news/international/?service=rss'
global stream
if stream == "":
stream = urllib.urlopen(webservice).read()
soup = BeautifulStoneSoup(stream)
feedtitles = soup.findAll('title')
feedlinks = soup.findAll('link')
feeddescriptions = soup.findAll('description')
feeds = [feedtitles, feedlinks, feeddescriptions]
return feeds
def _randomfeed():
feeds = _worldnews()
[feedtitles, feedlinks, feeddescriptions] = feeds
numfeed = len(feedtitles)
anyfeed = random.randrange(1,numfeed)
feedtitle = feedtitles[anyfeed]
feedlink = feedlinks[anyfeed]
feeddescription = feeddescriptions[anyfeed]
feedtitlestr = feedtitle.string.encode('utf-8')
feedlinkstr = feedlink.string.encode('utf-8')
feeddescriptionstr = feeddescription.prettify().split("\n")[2]
feed = [feedtitlestr, feedlinkstr, feeddescriptionstr ]
return feed
def _getproperwords():
feed = _randomfeed()
[feedstring, feedlink, feeddescription ] = feed
stringaslist = re.split(r"[^a-z^A-Z]",feedstring)
for num in xrange(stringaslist.count("")):
stringaslist.remove("")
properwords = []
for word in stringaslist:
if len(word) > 4 and len(word) < 10:
properwords.append(word)
return properwords, feed
def getwords():
properwords, feed = _getproperwords()
while(len(properwords) < 2):
properwords, feedstring = _getproperwords()
feedlen = len(properwords)
words = []
word1 = properwords[random.randrange(0,feedlen)]
#properwords.remove(word1)
#word2 = properwords[random.randrange(0,feedlen-1)]
words.extend([word1,])
return words, feed
if __name__ == "__main__":
words, feed = getwords()
print words[0], ",", words[1]
print "have been selected from:", feed[0]
print feed[1]
print feed[2]
| UTF-8 | Python | false | false | 2,153 | py | 10 | worldnews.py | 6 | 0.669763 | 0.660474 | 0 | 73 | 28.493151 | 77 |
smullinmatteo/One-Month-Coursework | 2,491,081,070,409 | dfdef9f20135813eefa9fc66875e2dcb64b382bd | 8d7337b3eca5ac8c9c971f1b36ed42469ea89578 | /Python Coursework/tip-caplculator.py | c033d86da67ed1a9d509e41aa27ef09c09ff2354 | [] | no_license | https://github.com/smullinmatteo/One-Month-Coursework | 7798302cc01226641e2c6f4390410386e4b9abf9 | 75c9d5611fde38ae63e36bf4745ef222361bdf22 | refs/heads/main | 2023-06-03T15:36:53.943019 | 2021-06-18T17:42:22 | 2021-06-18T17:42:22 | 368,909,800 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #takes the bill amount from user and then suggests three bill+tip totals for 12,15, and %20 for the user
bill_amount = float(input("What is the bill total?").strip("$"))
print ((
f"Here are your options: "
f"12%: Tip ${bill_amount*.12:.2f} | Total ${bill_amount*1.12:.2f}, "
f"15%: Tip ${bill_amount*.15:.2f} | Total ${bill_amount*1.15:.2f}, "
f"20%: Tip ${bill_amount*.20:.2f} | Total ${bill_amount*1.20:.2f}"
)) | UTF-8 | Python | false | false | 420 | py | 33 | tip-caplculator.py | 13 | 0.638095 | 0.559524 | 0 | 10 | 41.1 | 104 |
James120393/comp110-worksheets | 16,904,991,282,448 | cc1b61741f14b782f84815ebf3b00aa0532e346b | a2d6b96f1a629693db178fb42614f6c811d288be | /comp110-worksheet-3/WebServer/webserver.py | f5ca771b62479cbc5c242057f59489865d632e8e | [] | no_license | https://github.com/James120393/comp110-worksheets | 3be49a81811e14995f9640f905b534792460f685 | 8865ac18e9af72074dffe2960b601df75ac0e5c1 | refs/heads/master | 2021-01-18T13:28:36.283612 | 2016-05-30T15:40:13 | 2016-05-30T15:40:13 | 43,420,629 | 0 | 0 | null | true | 2015-09-30T08:16:28 | 2015-09-30T08:16:28 | 2015-09-30T07:54:37 | 2015-09-30T08:07:46 | 0 | 0 | 0 | 0 | null | null | null | #!/usr/bin/python
# Turn on debug mode.
import cgitb
cgitb.enable()
print("Content-Type: text/html; charset=utf-8\n\n")
import cgi
form = cgi.FieldStorage()
if 'user' not in form:
print ("Something Went Wrong")
else:
print ("Hello" + str(form.getvalue("user")))
# Connect to the database.
import pymysql
conn = pymysql.connect(
db='HighScores',
user='root',
passwd='',
host='localhost')
c = conn.cursor()
my_first_int = 2
my_first_str = 'Two'
# Insert some example data.
c.execute("INSERT INTO High_Score VALUES (Caught, '2')")
c.execute("INSERT INTO High_Score VALUES (Lost, '3')")
c.execute("INSERT INTO High_Score VALUES (ID, '1')")
# c.execute("INSERT INTO users VALUES (" + my_first_int + ", '" + my_first_str +$
conn.commit()
# Print the contents of the database.
c.execute("SELECT * FROM High_Score")
c.execute("SELECT * FROM High_Score")
print([(r[0], r[1]) for r in c.fetchall()])
| UTF-8 | Python | false | false | 917 | py | 20 | webserver.py | 15 | 0.666303 | 0.65867 | 0 | 38 | 23.131579 | 81 |
jandrovins/Taller2ProyectoIntegradorI | 5,033,701,717,033 | 3eb2130cbcf76c182f4f8955c9fabdf9c73760ce | cc6d3210265037d7d4a34619a033efadac2ce1fc | /measure/views.py | 55306840ae498a96d554850c345c59bb0f1af7a8 | [] | no_license | https://github.com/jandrovins/Taller2ProyectoIntegradorI | 2ddbb8ccef44fb08a9bcdb699d62e634c751c789 | 368c32a85c2fba8436725340a578763180129cd1 | refs/heads/master | 2022-07-20T17:33:15.964850 | 2020-05-19T21:23:35 | 2020-05-19T21:23:35 | 255,716,097 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
import requests
def measure(request):
error = False
# Verifica si hay un parámetro value en la petición POST
if request.method == "POST": # when user sends registration info:
args = {
'codigo' : request.POST['codigo'],
'latitud' : request.POST['latitud'],
'longitud' : request.POST['longitud'],
'terreno' : request.POST['terreno'],
'area' : request.POST['area'],
}
if int(args['area']) <= 0:
error = True
response = requests.post('http://127.0.0.1:8000/measure/', args)
measure_json = response.json()
# Realiza una petición GET al Web Services
response = requests.get('http://127.0.0.1:8000/measure/')
# Convierte la respuesta en JSON
measures = response.json()
# Rederiza la respuesta en el template measure
return render(request, "measure/measure.html", {'measures': measures, 'error': error})
| UTF-8 | Python | false | false | 1,127 | py | 2 | views.py | 1 | 0.637011 | 0.615658 | 0 | 29 | 37.758621 | 90 |
krishna0512/benchmark | 10,977,936,433,867 | 3ca9fe525791d63201d6d4b00ab3340c62ee72cc | 2a431aed19c3a37381e1bbeb0295531c39bdd1b2 | /benchmark/views.py | e9f988c995727886aef0e2edfa25a0cf21095b9a | [] | no_license | https://github.com/krishna0512/benchmark | 2e84925f08880ad7271a379fec379016cae90e6c | a6962ea5f35c387cbb55d4008ccbdd57a180120f | refs/heads/master | 2020-04-08T14:54:08.715927 | 2019-01-12T09:09:27 | 2019-01-12T09:09:27 | 150,698,627 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import generic, View
from benchmark.forms import UserRegistrationForm, SubmissionForm
from benchmark.models import Document, Language, Dataset, Resource, TaskCategory, Submission, Modality
# Create your views here.
class DatasetListView(LoginRequiredMixin ,generic.ListView):
"""generic listbased views for dataset
default template name - 'benchmark/dataset_list.html'
default context object name - 'dataset_list'
using the LoginRequiredMixin to support the login_required
similar to the login_required decorator.
- - @method_decorator(login_required)
- - def dispatch(self, *args, **kwargs):
- - return super().dispatch(*args, **kwargs)
"""
model = Dataset
class ResourceListView(generic.ListView):
"""generic listbased views for resource
default template name - 'benchmark/resource_list.html'
default context object name - 'resource_list'
"""
model = Resource
class DocumentDetailView(generic.DetailView):
"""generic detailbased view for task selection
default template name - 'benchmark/document_detail.html'
default context name - 'document' that conatins extactly one instance of Document
as described by the pk given in url
"""
model = Document
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['language_list'] = Language.objects.all()
return context
class LeaderboardDetailView(generic.DetailView):
"""generic detailbased view for task selection
default template name - 'benchmark/task_category_detail.html'
default context name - 'document' that conatins extactly one instance of Document
as described by the pk given in url
"""
model = TaskCategory
template_name = 'benchmark/leaderboard.html'
context_object_name = 'tc'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = SubmissionForm(tc_id=self.kwargs['pk'])
return context
class RegisterView(View):
context = {}
form_class = UserRegistrationForm
template_name = 'registration/register.html'
def add_new_user(self, form):
"""Inputs the form inputted to the POST method
and outputs the newly created user which is not logged in.
"""
email = form.cleaned_data['email']
username = form.cleaned_data['username']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
password = form.cleaned_data['password']
user = User.objects.create_user(
username,
email,
password,
first_name=first_name,
last_name=last_name
)
user.profile.affiliation_name = form.cleaned_data['affiliation_name']
user.profile.dob = form.cleaned_data['dob']
user.save()
return authenticate(username=username, password=password)
def get(self, request, *args, **kwargs):
"""Display empty form"""
self.context['form'] = self.form_class()
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = self.add_new_user(form)
login(request, user)
messages.success(request, 'User successfully registered and logged in')
return redirect(reverse('benchmark:index'))
messages.error(request, 'Please check and correct the errors in form below.')
return render(request, self.template_name, self.context)
class DummyLeaderboardView(View):
"""This is a dummy leaderboard page whose only task
is to redirect the request coming from task_selection
page to leaderboard page after calculating tc for given parameters
"""
def post(self, request, *args, **kwargs):
if not (request.POST['modality'] and request.POST['language']):
messages.error(request, 'Please select a valid Modality and Language')
return redirect('benchmark:document_detail', pk=self.kwargs['document_id'])
tc = TaskCategory.objects.get(
modality__id=int(request.POST['modality']),
task__id=self.kwargs['task_id'],
language__id=int(request.POST['language'])
)
return redirect('benchmark:leaderboard', pk=tc.id)
def uploadSubmission(request, tc_id):
"""
This function will process the result of the submission form
and then will do all the preprocessing and redirect to the previous leaderboard page
the data for choosing the leaderboard page will come from the tc_id that is obtained via GET
"""
tc = TaskCategory.objects.get(id=tc_id)
if request.method == 'POST':
form = SubmissionForm(request.POST, request.FILES, tc_id=tc_id)
if form.is_valid():
dataset = Dataset.objects.get(id=form.cleaned_data['dataset'])
user = request.user
title = form.cleaned_data['title']
description = form.cleaned_data['description']
authors = form.cleaned_data['authors']
paper_link = form.cleaned_data['paper_link']
is_result_public = form.cleaned_data['is_result_public']
s = Submission(
dataset=dataset,
user=user,
task_category=tc,
title=title,
description=description,
authors=authors,
paper_link=paper_link,
is_result_public=is_result_public,
result=request.FILES['result'],
# right now eval measure is random in the future we can use
# a job scheduler to schedule a job and compute the measures and
# only after that will the submission be saved in database.
# evaluation_measure_1 = str(get_evaluation_measure(request.FILES['result'])),
evaluation_measure_1='{0:.2f}'.format(random.uniform(20.0, 80.0)),
evaluation_measure_2='{0:.2f}'.format(random.uniform(20.0, 80.0)),
evaluation_measure_3='{0:.2f}'.format(random.uniform(20.0, 80.0))
)
try:
s.save()
if s.evaluate_measure(request.FILES['result']):
messages.success(request, 'Submission form saved Successfully')
else:
messages.error(request, 'Failed to Evaluate the Submission')
except:
messages.error(request, 'Failed to save Submission form')
return redirect('benchmark:leaderboard', pk=tc_id)
def delete_submission(request):
"""
Deletes the given submission from the id and redirects the
user to the leaderboard page.
TODO: redirect the user to the mymethods page instead of the leaderboard page.
TODO: Make this method more secure
Right now it is highly inscure because any person can access the url delete_submission
and arbitrary sub_id to delete the submission
To make it more secure check if user is logged in and user can delete only his sub.
"""
sid = request.GET.get('sub_id', None)
try:
s = Submission.objects.get(pk=sid)
tid = s.task_category.id
s.delete()
except:
messages.warning(request, 'Invalid Submission ID to delete_submission')
return redirect('benchmark:index')
messages.success(request, '<Submission: {}> Deleted!'.format(sid))
return redirect('benchmark:leaderboard', pk=tid)
def getLeaderboardTableData(request, tc_id):
"""Ajax view that returns the leaderboard table
"""
data = []
s = Submission.objects.filter(task_category__id=tc_id)
for i in s:
# functionlity for is_result_public
if i.is_result_public or i.user == request.user:
data.append(i.getJSONDict())
else:
pass
# this submission should not be displayed to leaderboard
return JsonResponse({'data':data})
def getmyMethodsTableData(request, tc_id):
"""Ajax view that returns mymethods table for logged in users
"""
data = []
s = Submission.objects.filter(task_category__id=tc_id).filter(user=request.user)
for i in s:
data.append(i.getJSONDict())
return JsonResponse({'data':data})
| UTF-8 | Python | false | false | 8,815 | py | 148 | views.py | 44 | 0.645604 | 0.642428 | 0 | 216 | 39.810185 | 102 |
attilapalfi92/machine-learning-ingatlancom-predictor | 10,969,346,515,784 | 799ab0fcf1eda2d2b38a22d6275280fc3a7cd9ec | 68fdbd773931c072da76c7fe9b28a4117f801f33 | /constants.py | 4a9dc2c021f9e09f483aa426470a552b7998449a | [] | no_license | https://github.com/attilapalfi92/machine-learning-ingatlancom-predictor | a766197af710e0a690d4136ad0a0845ada7202cc | 25748d212e497b25d7bcf42560b875805f4a2dc7 | refs/heads/master | 2022-07-20T08:40:29.223785 | 2019-01-28T09:31:03 | 2019-01-28T09:31:03 | 122,330,260 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | BUILDING_MATERIAL = (
("Csúszózsalus lakás bérleti joga eladó", "Csúszózsalus lakás bérleti joga eladó"),
("Eladó csúszózsalus lakás", "Eladó csúszózsalus lakás"),
("Eladó panel lakás", "Eladó panel lakás"),
("Eladó tégla építésű lakás", "Eladó tégla építésű lakás"),
("Panel lakás bérleti joga eladó", "Panel lakás bérleti joga eladó"),
("Tégla építésű lakás bérleti joga eladó", "Tégla építésű lakás bérleti joga eladó"),
("Új építésű lakás lakóparkban", "Új építésű lakás lakóparkban")
)
COMFORT = (
("duplakomfortos", "duplakomfortos"),
("félkomfortos", "félkomfortos"),
("komfort nélküli", "komfort nélküli"),
("komfortos", "komfortos"),
("luxus", "luxus"),
("nincs megadva", "nincs megadva"),
("összkomfortos", "összkomfortos")
)
CONDITION = (
("befejezetlen", "befejezetlen"),
("felújítandó", "felújítandó"),
("felújított", "felújított"),
("jó állapotú", "jó állapotú"),
("közepes állapotú", "közepes állapotú"),
("nincs megadva", "nincs megadva"),
("új építésű", "új építésű"),
("újszerű", "újszerű")
)
HEATING = (
("cserépkályha", "cserépkályha"),
("egyéb", "egyéb"),
("egyéb kazán", "egyéb kazán"),
("elektromos", "elektromos"),
("gáz (cirko)", "gáz (cirko)"),
("gáz (konvektor)", "gáz (konvektor)"),
("fan-coil", "fan-coil"),
("gázkazán", "gázkazán"),
("falfűtés", "falfűtés"),
("vegyes tüzelésű kazán", "vegyes tüzelésű kazán"),
("házközponti", "házközponti"),
("távfűtés", "távfűtés"),
("padlófűtés", "padlófűtés"),
("mennyezeti hűtés-fűtés", "mennyezeti hűtés-fűtés"),
("hőszivattyú", "hőszivattyú"),
("megújuló energia", "megújuló energia")
)
PARKING = (
("nincs megadva", "nincs megadva"),
("teremgarázs hely", "teremgarázs hely"),
("teremgarázs hely - benne van az árban", "teremgarázs hely - benne van az árban"),
("teremgarázs hely - bérelhető", "teremgarázs hely - bérelhető"),
("teremgarázs hely - kötelező megvenni", "teremgarázs hely - kötelező megvenni"),
("teremgarázs hely - megvásárolható", "teremgarázs hely - megvásárolható"),
("udvari beálló", "udvari beálló"),
("udvari beálló - benne van az árban", "udvari beálló - benne van az árban"),
("udvari beálló - bérelhető", "udvari beálló - bérelhető"),
("udvari beálló - kötelező kibérelni", "udvari beálló - kötelező kibérelni"),
("udvari beálló - kötelező megvenni", "udvari beálló - kötelező megvenni"),
("udvari beálló - megvásárolható", "udvari beálló - megvásárolható"),
("utca, közterület", "utca, közterület"),
("utca, közterület - fizetős övezet", "utca, közterület - fizetős övezet"),
("utca, közterület - ingyenes", "utca, közterület - ingyenes"),
("önálló garázs", "önálló garázs"),
("önálló garázs - benne van az árban", "önálló garázs - benne van az árban"),
("önálló garázs - bérelhető", "önálló garázs - bérelhető"),
("önálló garázs - kötelező megvenni", "önálló garázs - kötelező megvenni"),
("önálló garázs - megvásárolható", "önálló garázs - megvásárolható")
)
SUB_TYPE = (
("Csúszózsalus lakás bérleti joga eladó", "Csúszózsalus lakás bérleti joga eladó"),
("Eladó csúszózsalus lakás", "Eladó csúszózsalus lakás"),
("Eladó panel lakás", "Eladó panel lakás"),
("Eladó tégla építésű lakás", "Eladó tégla építésű lakás"),
("Panel lakás bérleti joga eladó", "Panel lakás bérleti joga eladó"),
("Tégla építésű lakás bérleti joga eladó", "Tégla építésű lakás bérleti joga eladó"),
("Új építésű lakás lakóparkban", "Új építésű lakás lakóparkban")
)
TOILET_TYPE = (
("egy helyiségben", "egy helyiségben"),
("külön helyiségben", "külön helyiségben"),
("külön és egyben is", "külön és egyben is"),
("nincs megadva", "nincs megadva")
)
| UTF-8 | Python | false | false | 4,212 | py | 48 | constants.py | 19 | 0.652208 | 0.652208 | 0 | 89 | 41.741573 | 89 |
jbwyme/flask-proxy-example | 12,249,246,766,972 | f5a547a2dc7b68d3f57bf28fd78e6e0f681df473 | 09f36de5cef13064d0654258fc864d5cfe4462ed | /flask_proxy/tests/test_mixpanel_calls.py | 4658f7b0477a14efe379c8e9d1cda72771c435c5 | [] | no_license | https://github.com/jbwyme/flask-proxy-example | e0666d719630a9dc604845989e79de7d985e9829 | ddc83f40d8b54da51837438363e7b017e199234c | refs/heads/master | 2023-03-01T20:06:54.316465 | 2021-02-11T18:41:57 | 2021-02-11T18:41:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import base64
import json
import urllib
event_data = {
'event': 'test event',
'properties': {
'token': '6888bfdec29d84ab2d36ae18c57b8535',
'str': 'string',
'int': 1,
'float': 1.235,
'object': {'one': 1, 'two': 2},
'list': ['one', 'two'],
}
}
event_encoded = base64.b64encode(str.encode(json.dumps(event_data))).decode('utf-8')
class TestTrack:
def test_get_no_data(self, client):
resp = client.get('/track?verbose=1')
assert resp.json['status'] == 0
def test_get(self, client):
resp = client.get('/track?verbose=1&ip=1&data=%s' % event_encoded)
assert resp.json['status'] == 1
def test_post(self, client):
resp = client.post('/track', data={'verbose': 1, 'ip': 1, 'data': json.dumps(event_data)})
assert resp.json['status'] == 1
| UTF-8 | Python | false | false | 854 | py | 2 | test_mixpanel_calls.py | 2 | 0.569087 | 0.521077 | 0 | 30 | 27.466667 | 98 |
minhphung171093/phucthien | 15,925,738,740,052 | bbee8364e921c0779541a28fd7f3c7edcc5fbf60 | c14f0bce37cfaf376b33d9d99cb6ec452a8563e4 | /addons-phucthien/green_erp_phucthien_hr/hr_payroll.py | c84093fc852e3c3ae34be8783d613b04798be212 | [] | no_license | https://github.com/minhphung171093/phucthien | 3f3975a60419441ec8d3bbdb8d21583e588d9b95 | 4547374db8d25eca082c4a13e23d5752104bd79c | refs/heads/master | 2016-09-11T02:30:28.840903 | 2015-10-05T04:33:48 | 2015-10-05T04:33:48 | 28,329,699 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import time
from openerp import pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp import netsvc
import httplib
from openerp import SUPERUSER_ID
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
def get_worked_day_lines(self, cr, uid, contract_ids, date_from, date_to, context=None):
"""
@param contract_ids: list of contract id
@return: returns a list of dict containing the input that should be applied for the given contract between date_from and date_to
"""
def was_on_leave(employee_id, datetime_day, context=None):
res = False
day = datetime_day.strftime("%Y-%m-%d")
holiday_ids = self.pool.get('hr.holidays').search(cr, uid, [('state','=','validate'),('employee_id','=',employee_id),('type','=','remove'),('date_from','<=',day),('date_to','>=',day)])
if holiday_ids:
res = self.pool.get('hr.holidays').browse(cr, uid, holiday_ids, context=context)[0].holiday_status_id.name
return res
res = []
for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):
if not contract.working_hours:
continue
attendances = {
'name': _("Normal Working Days paid at 100%"),
'sequence': 1,
'code': 'Normal',
'number_of_days': 26,
'number_of_hours': 208,
'contract_id': contract.id,
}
res += [attendances]
return res
def get_inputs(self, cr, uid, contract_ids, date_from, date_to, context=None):
res = []
contract_obj = self.pool.get('hr.contract')
rule_obj = self.pool.get('hr.salary.rule')
structure_ids = contract_obj.get_all_structures(cr, uid, contract_ids, context=context)
rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)
sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]
depend = {'name': 'Người phụ thuộc',
'code': 'DEPEND',}
for contract in contract_obj.browse(cr, uid, contract_ids, context=context):
for rule in rule_obj.browse(cr, uid, sorted_rule_ids, context=context):
if rule.input_ids:
for input in rule.input_ids:
inputs = {
'name': input.name,
'code': input.code,
'contract_id': contract.id,
}
res += [inputs]
depend.update({'amount': contract.employee_id.depend_qty,'contract_id': contract.id,})
res += [depend]
return res
hr_payslip()
class hr_template(osv.osv):
_name = "hr.template"
_columns = {
'name' : fields.char('Name', 128, required=True),
'template_line' : fields.one2many('hr.template.line','template_id','Template Line'),
}
_sql_constraints = [
('name_uniq', 'unique (code)', 'The name must be unique !'),
]
def copy(self, cr, uid, ids, default={}, context=None, done_list=[], local=False):
record = self.browse(cr, uid, ids, context=context)
if not default:
default = {}
default = default.copy()
default['name'] = (record['name'] or '') + '(copy)'
return super(hr_template, self).copy(cr, uid, ids, default, context=context)
hr_template()
class hr_template_line(osv.osv):
_name = "hr.template.line"
_columns = {
'name' : fields.many2one('hr.salary.rule','Rule', required=True),
'template_id' : fields.many2one('hr.template','Template'),
'sequence' : fields.integer('Sequence'),
}
hr_template_line()
| UTF-8 | Python | false | false | 4,246 | py | 295 | hr_payroll.py | 159 | 0.566171 | 0.561925 | 0 | 99 | 41.757576 | 196 |
dovietdungcntt1402/demopy | 2,525,440,809,251 | 0e948f7631c7299b8faf21d65df6fd0e58b8596c | 5f53d6241f6d3c9e00bfcbf91b64ffdcaad57e24 | /Untitled-1.py | f79e3f227609aec4fac130b938522213a487f28c | [] | no_license | https://github.com/dovietdungcntt1402/demopy | 005c599804c9d7a0ab0367fcc602027c2dd89de8 | d73cf22628de6841dd3a88f2174482dbc557f62b | refs/heads/master | 2023-08-01T00:33:17.684197 | 2021-09-11T04:55:17 | 2021-09-11T04:55:17 | 405,286,593 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("nhập vào 1 số")
n=int(input())
flag= true
if ( n < 2):
flag = false
elif (n == 2):
flag = true
if ( n % 2 == 0):
flag = false
else:
for i in range(3,n,2):
if (n % i == 0 ):
flag=false
if flag == false
print(n,"là số nguyên tố")
else
print( n, "không phải") | UTF-8 | Python | false | false | 322 | py | 2 | Untitled-1.py | 2 | 0.496753 | 0.470779 | 0 | 17 | 17.176471 | 30 |
bodii/test-code | 8,924,942,083,991 | 4c4490d0191e77e7139b2b3b621b616295799b09 | 4e89d371a5f8cca3c5c7e426af1bcb7f1fc4dda3 | /python/Flask_web/app1/model/post.py | 9fdda7ea8d136aaa27e5fc4ee35f3da93022c7c7 | [] | no_license | https://github.com/bodii/test-code | f2a99450dd3230db2633a554fddc5b8ee04afd0b | 4103c80d6efde949a4d707283d692db9ffac4546 | refs/heads/master | 2023-04-27T16:37:36.685521 | 2023-03-02T08:38:43 | 2023-03-02T08:38:43 | 63,114,995 | 4 | 1 | null | false | 2023-04-17T08:28:35 | 2016-07-12T01:29:24 | 2023-01-07T16:01:52 | 2023-04-17T08:28:30 | 29,190 | 2 | 0 | 7 | Go | false | false | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
''' 博客文章模型 '''
from main import db
from datetime import datetime
# 将文章的Markdown源文本保存的数据库中
from markdown import markdown
import bleach
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
body_html = db.Column(db.Text)
@staticmethod
def generate_fake(count=100):
"""生成虚拟博客文章"""
from random import seed, randint
from sqlalchemy.exc import IntegrityError
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
self.body = forgery_py.lorem_ipsum.sentences(randint(1,
self.timestamp = forgery_py.date.date(True)
self.author = u
db.session.add(self)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = [
'a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p',
]
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True
))
db.event.listen(Post.body, 'set', 'Post.on_changed_body')
| UTF-8 | Python | false | false | 1,752 | py | 4,003 | post.py | 2,624 | 0.536557 | 0.530071 | 0 | 52 | 30.615385 | 74 |
dnguyen85/kiilib_python | 4,982,162,082,970 | 819d780a1daf39e72badff838fa096151c6e04a3 | 7bfa5f949e031f924c916a3955081a5f3fd71a3f | /kiilib/kii.py | 6ebd14d823397f6a6de9dfe4e7d0bb461551dea3 | [
"Apache-2.0"
] | permissive | https://github.com/dnguyen85/kiilib_python | 37c9d8b8de807d051accfd4d5c3f2c82b90f1512 | c6a70c53d4d89d6782daac8a62e91246699f2abf | refs/heads/master | 2020-05-29T11:03:41.795702 | 2014-09-17T13:29:30 | 2014-09-17T13:29:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import client
import kiiobject
import bucket
class KiiContext(object):
def __init__(self, app_id, app_key, url):
self.app_id = app_id
self.app_key = app_key
self.url = url
self.factory = client.KiiClientFactory()
self.access_token = None
def newClient(self):
return self.factory.newClient()
class CloudException(Exception):
def __init__(self, code, body):
self.code = code
self.body = body
def __repr__(self):
return 'HTTP %d %s' % (self.code, self.body)
__str__ = __repr__
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class KiiApp(object):
"""
>>> # just a singleton check =)
>>> KiiApp().__repr__() == KiiApp().__repr__()
True
"""
__metaclass__ = Singleton
def getPath(self):
return ""
def __repr__(self):
return "app-scope"
APP_SCOPE = KiiApp()
class KiiUser(object):
"""
A user in Kii Cloud. This class is immutable.
"""
def __init__(self, userID=None, **fields):
self.id = userID
self.data = {k:v for (k,v) in fields.iteritems()}
def getPath(self):
if self.id == None:
raise Exception("tried to generate URL while id is None")
return '/users/%s' % (self.id)
def __getattr__(self, name):
return self.data.get(name)
def __repr__(self):
return "KiiUser(id:%s, %s)" % (self.id, ', '.join(["%s:%s" % (k, v) for (k, v) in self.data.iteritems()]))
class KiiGroup(object):
def __init__(self, id):
self.id = id
def getPath(self):
return 'groups/%s' % (self.id)
class AppAPI(object):
def __init__(self, context):
self.context = context
self.objectAPI = kiiobject.ObjectAPI(context)
self.bucketAPI = bucket.BucketAPI(context)
def _login(self, body):
url = '%s/oauth2/token' % self.context.url
client = self.context.newClient()
client.method = "POST"
client.url = url
client.setContentType('application/json')
client.setKiiHeaders(self.context, False)
(code, body) = client.send(body)
if code != 200:
raise CloudException(code, body)
self.context.access_token = body['access_token']
id = body['id']
return KiiUser(id)
def login(self, userIdentifier, password):
return self._login(
body = {
'username' : userIdentifier,
'password' : password
}
)
def loginAsAdmin(self, client_id, client_secret):
return self._login(
body = {
'client_id' : client_id,
'client_secret' : client_secret
}
)
def signup(self, username, password, **extFields):
url = '%s/apps/%s/users' % (self.context.url, self.context.app_id)
body = {
'password' : password
}
if extFields != None:
for k, v in extFields.items():
body[k] = v
if username != None:
body['loginName'] = username
client = self.context.newClient()
client.method = "POST"
client.url = url
client.setContentType('application/json')
client.setKiiHeaders(self.context, False)
(code, body) = client.send(body)
if code != 201:
raise CloudException(code, body)
id = body['userID']
return KiiUser(userID=id, loginName=username, **extFields)
def doctest():
import doctest
import json
doctest.testmod()
| UTF-8 | Python | false | false | 3,788 | py | 19 | kii.py | 16 | 0.546199 | 0.544351 | 0 | 132 | 27.689394 | 114 |
cs2be/VisualDL | 18,313,740,574,964 | fc9399f9e1475e7b9aa3f34582b0b027f61d6bdf | ada58cae168778afdf40aef9570a996a6ae7180f | /visualdl/server/storage_mock.py | 98690fd5adaa7bb5e74f686cc79553f9884cc80f | [
"Apache-2.0"
] | permissive | https://github.com/cs2be/VisualDL | 71adefa4f206d45cfa3c8d708be8e47ecf642b62 | 1574cc022746684446019091c3e99dc1cf7dacb4 | refs/heads/develop | 2021-05-10T10:08:52.915335 | 2018-02-06T19:14:10 | 2018-02-06T19:14:10 | 118,949,453 | 0 | 0 | Apache-2.0 | true | 2018-01-30T23:55:06 | 2018-01-25T18:10:48 | 2018-01-25T18:10:50 | 2018-01-30T23:53:28 | 3,014 | 0 | 0 | 0 | C++ | false | null | import random
import numpy as np
def add_scalar(writer, mode, tag, num_steps, skip):
with writer.mode(mode) as my_writer:
scalar = my_writer.scalar(tag)
for i in range(num_steps):
if i % skip == 0:
scalar.add_record(i, random.random())
def add_image(writer,
mode,
tag,
num_samples,
num_passes,
step_cycle,
shape=[50, 50, 3]):
with writer.mode(mode) as writer_:
image_writer = writer_.image(tag, num_samples, step_cycle)
for pass_ in xrange(num_passes):
image_writer.start_sampling()
for ins in xrange(2 * num_samples):
data = np.random.random(shape) * 256
data = np.ndarray.flatten(data)
image_writer.add_sample(shape, list(data))
image_writer.finish_sampling()
def add_histogram(writer, mode, tag, num_buckets):
with writer.mode(mode) as writer:
histogram = writer.histogram(tag, num_buckets)
for i in range(10):
histogram.add_record(i, np.random.normal(
0.1 + i * 0.01, size=1000))
| UTF-8 | Python | false | false | 1,176 | py | 21 | storage_mock.py | 12 | 0.547619 | 0.529762 | 0 | 38 | 29.947368 | 66 |
predictable-success/predictable_success | 790,274,001,807 | 809294c22cca9ab2286b222447d3be62bc971a9c | 6d9112d77b2864ac2d4b8b3135149f1c8eb07901 | /leadership_styles/migrations/0011_auto_20160914_1427.py | 8b8d40ba5ba37daf82efcb82fac1e3e79f7c72bf | [] | no_license | https://github.com/predictable-success/predictable_success | 77b880cefe0fe363572bc43f72ac558c405c820e | 7cdbdcd5686781b4ac8bf4a3cd60c34ac4cee0f5 | refs/heads/master | 2021-01-19T01:09:10.251217 | 2017-05-05T17:39:29 | 2017-05-05T17:39:29 | 64,931,697 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('leadership_styles', '0010_auto_20160914_1039'),
]
operations = [
migrations.AlterField(
model_name='teamleadershipstyle',
name='quiz_requests',
field=models.ManyToManyField(related_name='team_leadership_styles', null=True, to='leadership_styles.QuizUrl', blank=True),
),
migrations.AlterField(
model_name='teamleadershipstyle',
name='team_members',
field=models.ManyToManyField(related_name='team_leadership_styles', null=True, to='org.Employee', blank=True),
),
]
| UTF-8 | Python | false | false | 761 | py | 354 | 0011_auto_20160914_1427.py | 159 | 0.629435 | 0.607096 | 0 | 24 | 30.708333 | 135 |
danielhensilva/exercism | 10,797,547,789,090 | 14f8130ba480dc319ea38a03f5209aeb7401d8a2 | 4b5d084d8fb4d34aec7ff500ceb41af18d36cace | /python/armstrong-numbers/armstrong_numbers.py | fc9f144444e1407a375fe832a4126867f262d86e | [] | no_license | https://github.com/danielhensilva/exercism | beb3a4b655a2a82bc61a331e9e7bd1f71f89883c | 92bdd05299bd8c48209e5320f4f037f194dca2af | refs/heads/master | 2023-06-16T12:17:12.868664 | 2021-07-07T06:30:26 | 2021-07-07T06:30:26 | 115,460,736 | 0 | 0 | null | false | 2021-04-05T14:25:08 | 2017-12-26T22:59:12 | 2021-04-05T14:24:48 | 2021-04-05T14:25:06 | 3,227 | 0 | 0 | 0 | JavaScript | false | false | import math
def is_armstrong(number):
result = 0
expected = number
power = math.floor(math.log10(number) + 1)
while number > 0:
digit = number % 10
number -= digit
number /= 10
result += pow(digit, power)
return result == expected
| UTF-8 | Python | false | false | 288 | py | 142 | armstrong_numbers.py | 52 | 0.569444 | 0.538194 | 0 | 15 | 18.133333 | 46 |
pkgcore/pkgcheck | 7,129,645,716,458 | f44c7d18acf56e6a40d1b2f4787f8e35c00812f9 | a041397bebfee1c7ed9a5d26891b9836dc350a4c | /tests/test_api.py | cf546ee556353ebfbb44447e11176b5eca986ff8 | [
"BSD-3-Clause"
] | permissive | https://github.com/pkgcore/pkgcheck | d0912fcdfce08d840d40fbbaaab404c79c444a1c | a159aca5222ca9ef08087ee7aa16325fe136080a | refs/heads/master | 2023-09-01T18:47:21.862067 | 2023-09-01T15:49:58 | 2023-09-01T15:49:58 | 13,432,888 | 27 | 33 | BSD-3-Clause | false | 2023-09-14T11:04:33 | 2013-10-09T04:17:55 | 2023-03-12T22:21:41 | 2023-09-14T11:04:25 | 6,568 | 29 | 26 | 94 | C | false | false | import multiprocessing
import os
import signal
import pytest
from pkgcheck import PkgcheckException, scan
from pkgcheck import objects
class TestScanApi:
@pytest.fixture(autouse=True)
def _setup(self, testconfig):
self.base_args = ["--config", testconfig]
self.scan_args = ["--config", "no", "--cache", "no"]
def test_argparse_error(self, repo):
with pytest.raises(PkgcheckException, match="unrecognized arguments"):
scan(["-r", repo.location, "--foo"])
def test_no_scan_args(self):
pipe = scan(base_args=self.base_args)
assert pipe.options.target_repo.repo_id == "standalone"
def test_no_base_args(self, repo):
assert [] == list(scan(self.scan_args + ["-r", repo.location]))
def test_keyword_import(self):
"""Keyword classes are importable from the top-level module."""
from pkgcheck import NonsolvableDeps, Result
assert issubclass(NonsolvableDeps, Result)
def test_module_attributes(self):
"""All keyword class names are shown for the top-level module."""
import pkgcheck
assert set(objects.KEYWORDS) < set(dir(pkgcheck))
def test_sigint_handling(self, repo):
"""Verify SIGINT is properly handled by the parallelized pipeline."""
def run(queue):
"""Pipeline test run in a separate process that gets interrupted."""
import sys
import time
from functools import partial
from unittest.mock import patch
from pkgcheck import scan
def sleep():
"""Notify testing process then sleep."""
queue.put("ready")
time.sleep(100)
with patch("pkgcheck.pipeline.Pipeline.__iter__") as fake_iter:
fake_iter.side_effect = partial(sleep)
try:
iter(scan([repo.location]))
except KeyboardInterrupt:
queue.put(None)
sys.exit(0)
queue.put(None)
sys.exit(1)
mp_ctx = multiprocessing.get_context("fork")
queue = mp_ctx.SimpleQueue()
p = mp_ctx.Process(target=run, args=(queue,))
p.start()
# wait for pipeline object to be fully initialized then send SIGINT
for _ in iter(queue.get, None):
os.kill(p.pid, signal.SIGINT)
p.join()
assert p.exitcode == 0
| UTF-8 | Python | false | false | 2,468 | py | 802 | test_api.py | 114 | 0.58671 | 0.584279 | 0 | 74 | 32.351351 | 80 |
enesdurmus/Infected_Mario | 7,782,480,778,552 | d7c0416ec740b18faa6d6cec64bc5656b178c3d0 | 6109fdd0410fdf4f7ad033cfbbc375d029f61356 | /Elements.py | 88959361c4b8c63527bdc2aa5bfdff81b291b8e3 | [] | no_license | https://github.com/enesdurmus/Infected_Mario | d1353416c726d8ed692660e7e9146d93f15b053a | 584dc280f2b4806d9b4667a35e4e5b952ef2e5d0 | refs/heads/master | 2022-12-29T10:08:20.920430 | 2020-10-15T14:15:29 | 2020-10-15T14:15:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
class Element(pygame.sprite.Sprite):
def __init__(self, width, height, image_path, x=0, y=0):
super(Element, self).__init__()
self.width = width
self.height = height
self.image = pygame.image.load(image_path)
self.image = pygame.transform.scale(self.image, (width, height))
self.rect = self.image.get_rect()
self.move_to(x,y)
def move_to(self, x, y):
self.rect.x = x
self.rect.y = y
class Map:
def getMap(map_name):
file = open(map_name)
data = file.read()
file.close()
data = data.split('\n')
game_map = []
for x in data:
a = []
for y in range(len(x)):
a.append(x[y])
game_map.append(a)
return game_map
class Bullet(Element):
def __init__(self, width, height, image_path, x=0, y=0):
super().__init__(width, height, image_path, x=0, y=0)
self.move_to(x,y)
def move_up(self, pixels=5):
self.rect.y -= pixels
def move_right(self, pixels=5):
self.rect.x += pixels
def move_left(self, pixels=5):
self.rect.x -= pixels
| UTF-8 | Python | false | false | 1,235 | py | 7 | Elements.py | 3 | 0.510931 | 0.503644 | 0 | 44 | 25.977273 | 72 |
avkim/kdcount | 12,515,534,750,857 | b1676dabdf353e29fbe54bdeb22324662590f8f8 | 0685ad05fdfe84d11c608c517f5cacd6e805b77b | /kdcount/cluster.py | 16f6405bb70f71368c7d9bf425fe43239b4b0af8 | [] | no_license | https://github.com/avkim/kdcount | 8e45f66f5084c47d70428d1ed8c2da7502140ca0 | fe565d59bcf8f87c9a24c32451829990c18df8d3 | refs/heads/master | 2017-04-30T08:06:18.735555 | 2014-12-10T23:40:59 | 2014-12-10T23:40:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy
from models import dataset
import utils
from sys import stdout
class fof(object):
def __init__(self, data, linking_length, np=None, verbose=False):
self.data = data
self.linking_length = linking_length
# iterations
self.iterations = 0
perm = utils.empty(len(data), dtype='intp')
head = utils.empty(len(data), dtype='intp')
head[:] = numpy.arange(len(data), dtype='intp')
llfactor = 8
while llfactor > 0:
op = 1
ll = self.linking_length / llfactor
while op > 0:
op = self._once(perm, head, np, ll)
self.iterations = self.iterations + 1
if verbose:
print 'FOF iteration', self.iterations, op, llfactor
stdout.flush()
if llfactor != 1:
break
llfactor = llfactor // 2
u, labels = numpy.unique(head, return_inverse=True)
self.N = len(u)
length = utils.bincount(labels, 1, self.N)
# for example old labels == 5 is the longest halo
# then a[0] == 5
# we want to replace in labels 5 to 0
# thus we need an array inv[5] == 0
a = length.argsort()[::-1]
length = length[a]
inv = numpy.empty(self.N, dtype='intp')
inv[a] = numpy.arange(self.N)
#print inv.max(), inv.min()
self.labels = inv[labels]
self.length = length
self.offset = numpy.empty_like(length)
self.offset[0] = 0
self.offset[1:] = length.cumsum()[:-1]
self.indices = self.labels.argsort()
def find(self, groupid):
""" return all of the indices of particles of groupid """
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]]
def sum(self, weights=None):
""" return the sum of weights of each object """
if weights is None:
weights = self.data._weights
if weights is None:
weights = 1.0
return utils.bincount(self.labels, weights, self.N)
def center(self, weights=None):
""" return the center of each object """
if weights is None:
weights = self.data._weights
if weights is None:
weights = 1.0
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp
def _once(self, perm, head, np, ll):
""" fof iteration,
head[i] is the index of the head particle of the FOF group i
is currently in
perm is a scratch space for permutation;
in each iteration head[i] is replaced with perm[head[i]]
"""
tree = self.data.tree
if np != 0:
p = list(utils.divide_and_conquer(tree, tree, 10000))
else:
p = [(tree, tree)]
#print 'p', len(p)
ops = [0]
with utils.MapReduce(np=np) as pool:
chunksize = 1024 * 1024
# fill perm with no changes
def init(i):
s = slice(i, i + chunksize)
a, b, c = s.indices(len(head))
perm[s] = numpy.arange(a, b)
pool.map(init, range(0, len(head), chunksize))
# calculate perm, such that if two groups are
# merged, the head is set to the smaller particle index
def work(iwork):
n1, n2 = p[iwork]
operations = [0]
def process(r, i, j):
if len(r) == 0: return
# print iwork, 'len(r)', len(r)
# update the head id;
# only for those that would decrease
mask2 = head[i] > head[j]
i = i[mask2]
j = j[mask2]
ni = head[i]
nj = head[j]
# we will replace in head all ni-s to nj
# find the minimal replacement of ni
arg = numpy.lexsort((ni, -nj))
ni = ni[arg]
nj = nj[arg]
# find the last item in each i
lasts = (ni[1:] != ni[:-1]).nonzero()[0]
ni = ni[lasts]
nj = nj[lasts]
# write to each entry, once, in order
# minimizing memory clashes from many ranks;
# the algorithm is stable against racing
# but it would slow down the cache if each rank
# were directly writing.
with pool.critical:
mask = perm[ni] > nj
ni = ni[mask]
nj = nj[mask]
perm[ni] = nj
operations[0] += len(ni)
# print iwork, 'len(r)', len(i)
n1.enum(n2, ll, process, bunch=10000 * 8)
# print 'work', iwork, 'done'
return operations[0]
def reduce(op):
#count number of operations
#print ops[0]
ops[0] += op
pool.map(work, range(len(p)), reduce=reduce)
# replace; this is done repeatedly
# since it is possible we count a progenitor
# into a merged progenitor.
# in that case we do not want to do another
# round of expensive tree walk
def work2(i):
s = slice(i, i + chunksize)
N = 1
while N > 0:
tmp = perm[head[s]]
N = (head[s] != tmp).sum()
head[s] = tmp
pool.map(work2, range(0, len(head), chunksize))
return ops[0]
| UTF-8 | Python | false | false | 6,118 | py | 16 | cluster.py | 8 | 0.471559 | 0.459791 | 0 | 165 | 36.072727 | 72 |
Escalation99/Workev | 1,657,857,407,024 | 015d14a1c4fa69e59ece763163c60e14178fdb58 | 782efe22f3251a701796e68e82fbce27c2ce2d8f | /Employee/migrations/0046_auto_20200719_1500.py | 7e8a9308ba9fa006f160cb55ed4fd2dc40b3ab2d | [] | no_license | https://github.com/Escalation99/Workev | ffc10e64776bf90d206a4a7a8ef3655c22f0223b | c2312c54c152b823e991ef5955b5d2df7ff58222 | refs/heads/main | 2023-03-13T05:36:54.386719 | 2021-03-06T10:27:06 | 2021-03-06T10:27:06 | 310,613,595 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-07-19 08:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Employee', '0045_auto_20200712_2050'),
]
operations = [
migrations.CreateModel(
name='SubTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('description', models.CharField(blank=True, max_length=255)),
('finished', models.BooleanField(default=False)),
('belongs_to', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primary_task', to='Employee.Task')),
],
),
migrations.AlterField(
model_name='meeting',
name='category',
field=models.CharField(choices=[('Alignment Meeting', 'Alignment Meeting'), ('Regular Meeting', 'Regular Meeting'), ('Sprint Planning', 'Sprint Planning'), ('Sprint Retrospective', 'Sprint Retrospective'), ('Code Review', 'Code Review')], default='Regular Meeting', max_length=50),
),
migrations.AlterField(
model_name='meeting',
name='division',
field=models.CharField(choices=[('Fullstack Developer', 'Fullstack Developer'), ('Backend Developer', 'Backend Developer'), ('Project Manager', 'Project Manager'), ('Scrum Master', 'Scrum Master'), ('UI/UX Designer', 'UI/UX Designer'), ('All', 'All'), ('Vice CEO', 'Vice CEO'), ('Intern', 'Intern'), ('Junior Developer', 'Junior Developer'), ('Frontend Developer', 'Frontend Developer'), ('CEO', 'CEO')], default='All', max_length=50),
),
migrations.AlterField(
model_name='meeting',
name='type',
field=models.CharField(choices=[('Seminar', 'Seminar'), ('Online', 'Online'), ('Regular', 'Regular')], default='Regular', max_length=255),
),
]
| UTF-8 | Python | false | false | 2,133 | py | 73 | 0046_auto_20200719_1500.py | 26 | 0.607126 | 0.585091 | 0 | 41 | 51.02439 | 447 |
tipech/spatialnet | 8,306,466,761,794 | bbf62644d57483af2f5124c4746e4665919f8350 | 413b5df0d34e63d264a2407a178b19c4370ad18d | /common/generic/iterators/__init__.py | 377c380f884fde69b349c85d62797513744b6a30 | [] | no_license | https://github.com/tipech/spatialnet | c1f21890ef01dca55c5e34ff77f962c70165c9d5 | 0394980efc628bfedd4fd504079a534418cbb89a | refs/heads/main | 2023-04-07T00:26:14.378092 | 2021-04-29T18:41:30 | 2021-04-29T18:41:30 | 301,519,197 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from .base_stream import *
# from .spatial_stream import *
# from .temporal_stream import *
| UTF-8 | Python | false | false | 120 | py | 48 | __init__.py | 46 | 0.683333 | 0.683333 | 0 | 5 | 22 | 32 |
OlgaRabodzei/ikea_availability | 19,490,561,597,777 | 0be8712c3232c42279e5ff7dc8e60e5854515c53 | 0149513b56e6c3de5e94ce9dab4ac2f539402be8 | /ikea_availability_api.py | 8431024e27b45213ba2ea1073193a01b074079c9 | [] | no_license | https://github.com/OlgaRabodzei/ikea_availability | 834d25bc13c41d3c798b1ff901376bb9d4b23109 | 08cf9b9467f26648f108634c010d3b2600eca875 | refs/heads/main | 2022-12-24T06:48:45.166021 | 2020-10-07T16:11:40 | 2020-10-07T16:11:40 | 301,714,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from xml.etree import ElementTree
# product_id_low_availability = '50219044'
# product_id_high_availability = '90263902'
class IkeaAvailability:
def __init__(self, product_id):
url_base = 'https://www.ikea.com/ua/uk/iows/catalog/availability/'
response = requests.get(url_base + product_id)
if response.status_code != 200:
# TODO better error handling.
# raise Exception('Sorry! Service is an available.')
return
# TODO Can the generator raise an error?
self.response_content = ElementTree.fromstring(response.content)
def is_product_available(self):
for stock in self.response_content.findall('availability/localStore/stock'):
if stock.find('isSoldInStore') is None or stock.find('availableStock') is None:
continue
# Does the product belongs to the store’s portfolio.
is_in_store = True if stock.find('isSoldInStore').text == 'true' else False
amount_available = int(stock.find('availableStock').text)
# Check if the product is available in the stock.
if is_in_store and amount_available:
return True
return False
def availability_forecast(self):
forecast_dates = {}
for stock in self.response_content.findall('availability/localStore/stock'):
if stock.find('forecasts') is None:
continue
forecast_dates = {
forecast.find('validDate').text: forecast.find('availableStock').text
for forecast
in stock.find('forecasts')
}
return forecast_dates
| UTF-8 | Python | false | false | 1,700 | py | 3 | ikea_availability_api.py | 2 | 0.618375 | 0.607185 | 0 | 42 | 39.428571 | 91 |
kingianfong/battleship-plus | 4,406,636,451,411 | cfc098fbf2b03885566f2190957d0f9a0c506bcc | 9d42cd3ef7de195d2c54b7c5f1593e74b56918da | /gui.py | 84bb189a595abda86eada806e4083890b18d7032 | [] | no_license | https://github.com/kingianfong/battleship-plus | 3907b353bd87e9ea1d1eadcab1a17c969e58c678 | 231eb575b6c7c772aeb1de16b7cf01571ac513ef | refs/heads/master | 2022-01-20T22:13:21.029569 | 2019-07-21T14:33:58 | 2019-07-21T14:33:58 | 198,060,877 | 1 | 1 | null | false | 2019-07-21T14:32:34 | 2019-07-21T13:39:26 | 2019-07-21T13:39:29 | 2019-07-21T14:32:34 | 0 | 0 | 0 | 0 | null | false | false | import matplotlib.pyplot as plt
import warnings
from board import *
from matplotlib import style
warnings.filterwarnings("ignore") # ignores matplotlib warning
style.use("seaborn") # sets style for plots
def colour(state_number):
""" Returns the character corresponding to a specific colour. """
colours = ['r', 'g', 'b', 'c', 'y', 'm']
if state_number > len(ships_dict):
state_number -= len(ships_dict) + 1 # ensures ships keep colour after hits
return colours[state_number % len(colours)]
def display_boards(player_board, cpu_board):
""" Shows the states of both players' boards. """
plt.suptitle("CLOSE WINDOW TO CONTINUE", fontsize = 16)
for (x, y, z), state_number in player_board.items():
# plotting points on player board
L = get_state(player_board, (x,y,z))
C = colour(state_number)
M = None
if z == 1:
plt.subplot(2,2,1)
plt.title("Your surface")
else:
plt.subplot(2,2,2)
plt.title("Your subsea")
if "hit" in L:
M = "x"
if "empty" in L:
C = "k"
if state_number != 0:
plt.scatter(x, y, label = L, color = C, marker = M)
for (x, y, z), state_number in cpu_board.items():
# plotting points on cpu board
L = get_state(cpu_board, (x,y,z))
M = "x"
if z == 1:
plt.subplot(2,2,3)
plt.title("Opponent surface")
else:
plt.subplot(2,2,4)
plt.title("Opponent subsea")
if "hit" in L: # plots only if there are hits
C = "k"
if "empty" not in L:
L = "ship " + L.split()[1] # renamed label
C = "r"
plt.scatter(x, y, label = L, color = C, marker = M)
ticks = [i for i in range(11)]
ticklabels = [str(i) for i in range(11)]
for i in range(1,5):
# adjusts setting for all subplots
plt.subplot(2,2,i)
plt.subplots_adjust(right = 0.8, wspace = 0.8, hspace = 0.45)
plt.xlim(0, 11)
plt.ylim(0, 11)
plt.xticks(ticks, ticklabels)
plt.yticks(ticks, ticklabels)
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.grid(True)
handles, labels = plt.gca().get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in label_list:
handle_list.append(handle)
label_list.append(label)
plt.legend(handle_list, label_list,
bbox_to_anchor = (1.6, 0.3) )
plt.show()
if __name__ == "__main__":
for i in range(1):
board1 = new_cpu_board()
to_hit1 = (3, 3, 1)
board1 = hit(board1, to_hit1) # ensures hitting the same spot twice is okay
to_hit2 = (5, 5, 0)
board1 = hit(board1, to_hit2)
display_boards(board1, board1)
| UTF-8 | Python | false | false | 3,055 | py | 8 | gui.py | 6 | 0.520786 | 0.500491 | 0 | 93 | 31.849462 | 84 |
LiamLowndes/CodeSignal-Solutions | 6,347,961,698,144 | 7647a25eba798431991e276c6d647c9622e051c1 | 27c45093ffdb65758b1f7e69825187710fe3e9e4 | /isCryptSolution/Answer.py | 6bed2b1ad057960aa0b760d17ed697be88fa469d | [] | no_license | https://github.com/LiamLowndes/CodeSignal-Solutions | f4f805d0bb6c4129968053248789cae6c048be30 | 37bcaee0785e8f63217cf5f8b9eb7fcaa783361a | refs/heads/master | 2021-01-26T07:16:49.514708 | 2020-03-01T18:01:42 | 2020-03-01T18:01:42 | 243,362,165 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def isCryptSolution(crypt, solution):
combo = []
#make combo a list of the three numbers seperated by "_"
for i in range(3):
for j in crypt[i]:
for k in solution:
if k[0] == j:
combo.append(k[1])
break
combo.append("_")
newCombo = [[]]
count = 0
#break the numbers into their three individual parts and store it in the 2D array newCombo
for i in combo:
if i == "_":
newCombo.append([])
count+=1
else:
newCombo[count].append(i)
newCombo.remove([])
#check for leading zeros
for i in range(3):
if ((newCombo[i][0] == '0') and len(newCombo[i]) > 1):
return(False)
#check validity using numb function
if numb(newCombo[0]) + numb(newCombo[1]) != numb(newCombo[2]):
return(False)
return(True)
#creates the integer value for the list of numbers (ex. it converts [1, 2, 3] into 123)
def numb(arr):
num = 0
lenArr = len(arr)
count = -1
for i in range(lenArr-1, -1, -1):
count += 1
num += int(arr[count]) * (10 ** i)
return(num)
| UTF-8 | Python | false | false | 1,212 | py | 6 | Answer.py | 5 | 0.512376 | 0.490099 | 0 | 47 | 24.787234 | 94 |
hetacharya12/hetresume | 11,982,958,787,061 | 8b009c883b0e7b2988d400f61a71b9f5b5451492 | 27e182a66490e1285646d4c2dfa55a0c175a4cb1 | /Jobs/views.py | 629b035295d3bd482ee2857a80b7c5bf4f48ac6f | [] | no_license | https://github.com/hetacharya12/hetresume | 7995417daf65ccf26c9d1cac79be97cde23b3ed7 | 909391885d31588ef216800381443c2d69e695d4 | refs/heads/master | 2020-03-27T12:06:50.676652 | 2018-09-19T19:34:31 | 2018-09-19T19:34:31 | 146,526,941 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import Job, Myimage
from datetime import datetime
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def home(request):
jobs = Job.objects
images = Myimage.objects
no_of_months=diff_month(datetime.now(), datetime(2014,7,7))+20
for imageurls in images.all():
myimageurl = imageurls.image.url
return render(request, 'jobs/index-5.html',{'jobs': jobs,'no_of_months':no_of_months})
| UTF-8 | Python | false | false | 488 | py | 5 | views.py | 3 | 0.692623 | 0.657787 | 0 | 14 | 33.857143 | 90 |
PalytoxinRK/DeepLearningFlappyBird | 2,817,498,581,781 | 1a7fcff2884ac1196b668bb15a40a2e4ca5e700f | 10a50f83677a26bb1e451d9f9f63c340ffe83a61 | /DQN-NATURE.py | b6a3feab6954d62fbc1b97c9024b1466af95aca4 | [] | no_license | https://github.com/PalytoxinRK/DeepLearningFlappyBird | 544fabd46e5b0cdd97b527ce31dce6c61b837bac | 0dd48407d98eb8fb5c10a247e578028d7019ec71 | refs/heads/master | 2020-05-26T09:34:09.489370 | 2019-05-23T07:58:37 | 2019-05-23T07:58:37 | 188,187,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from __future__ import print_function # 新版本特性
import tensorflow as tf
import cv2
import sys
sys.path.append("game/")
import wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque # 双端队列
# 参数
GAME = 'bird' # 游戏名称
ACTIONS = 2 # 动作种类 上or下
GAMMA = 0.99 # Q-learning 衰减率α
OBSERVE = 100. # 经验池的样本数
EXPLORE = 200000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.001 # 结束探索时候的选择动作的ε概率
INITIAL_EPSILON = 0.01 # 开始探索时候的选择动作的ε概率
REPLAY_MEMORY = 50000 # 经验池的最大内存
BATCH = 32 # 随机抽样的样本数
FRAME_PER_ACTION = 1
UPDATE_TIME = 100 #更新目标网络
class DQN_NATURE:
def __init__(self):
# 初始化经验池
self.memory = deque()
# 初始化步数 检测模型保存和EPSILION的改变
self.timeStep = 0
self.epsilon = INITIAL_EPSILON
# 初始化当前Q网络
self.stateInput,self.QValue,self.W_conv1,self.b_conv1,self.W_conv2,self.b_conv2,self.W_conv3,self.b_conv3,self.W_fc1,self.b_fc1,self.W_fc2,self.b_fc2 = self.createNetwork()
# 初始化目标Q网络
self.stateInputT,self.QValueT,self.W_conv1T,self.b_conv1T,self.W_conv2T,self.b_conv2T,self.W_conv3T,self.b_conv3T,self.W_fc1T,self.b_fc1T,self.W_fc2T,self.b_fc2T = self.createNetwork()
#将当前Q网络赋值给目标Q网络 tf.assign为赋值操作
self.copyTargetQNetworkOperation = [self.W_conv1T.assign(self.W_conv1),self.b_conv1T.assign(self.b_conv1),self.W_conv2T.assign(self.W_conv2),self.b_conv2T.assign(self.b_conv2),self.W_conv3T.assign(self.W_conv3),self.b_conv3T.assign(self.b_conv3),self.W_fc1T.assign(self.W_fc1),self.b_fc1T.assign(self.b_fc1),self.W_fc2T.assign(self.W_fc2),self.b_fc2T.assign(self.b_fc2)]
#初始化损失函数
self.createTrainingMethod()
# 保存和加载网络模型
# TensorFlow采用Saver来保存。一般在Session()建立之前,通过tf.train.Saver()获取Saver实例
self.saver = tf.train.Saver()
self.sess = tf.InteractiveSession()
self.sess.run(tf.initialize_all_variables())
#如果检查点存在就载入已经有的模型
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
# 初始化当前状态
def setInitState(self,observation):
self.currentState = np.stack((observation, observation, observation, observation), axis = 2)
# 构建CNN卷积神经网络
# 权重 tf.truncated_normal(shape, mean, stddev):
# shape表示生成张量的维度,mean是均值,stddev是标准差 一个截断的产生正太分布的函数
# TensorFlow的世界里,变量的定义和初始化是分开的 tf.Variable(initializer,name),initializer是初始化参数,name是可自定义的变量名称
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
# 偏置 TensorFlow创建常量tf.constant
def bias_variable(self, shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
# 卷积 tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)
# input -- 卷积输入图像 Tensor [batch, in_height, in_width, in_channels] [训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数]
# filter -- 卷积核 Tensor [filter_height, filter_width, in_channels, out_channels] [卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]
# strides -- 卷积时在图像每一维的步长 步长不为1的情况,文档里说了对于图片,因为只有两维,通常strides取[1,stride,stride,1]
# padding -- "SAME","VALID" SAME: 输出大小等于输入大小除以步长 VALID: 输出大小等于输入大小减去滤波器大小加上1,最后再除以步长 向上取整
def conv2d(self, x, W, stride):
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
# 池化 tf.nn.max_pool(value, ksize, strides, padding, name=None)
# 输入 [batch, height, width, channels]
# 池化窗口大小 池化窗口的大小,取一个四维向量,一般是[1, height, width, 1],因为我们不想在batch和channels上做池化,所以这两个维度设为了1
# 步长 和卷积类似,窗口在每一个维度上滑动的步长,一般也是[1, stride,stride, 1]
# 填充 "SAME","VALID"
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
# 构建CNN模型 inputState QValue
def createNetwork(self):
# 第一层卷积 卷积核 8*8*4*32
W_conv1 = self.weight_variable([8, 8, 4, 32])
b_conv1 = self.bias_variable([32])
# 第二层卷积 卷积核 4*4*32*64
W_conv2 = self.weight_variable([4, 4, 32, 64])
b_conv2 = self.bias_variable([64])
# 第三层卷积 卷积核 3*3*64*64
W_conv3 = self.weight_variable([3, 3, 64, 64])
b_conv3 = self.bias_variable([64])
# 第一层全连接 1600 - 512
W_fc1 = self.weight_variable([1600, 512])
b_fc1 = self.bias_variable([512])
# 第二层全连接 512 - 2
W_fc2 = self.weight_variable([512, ACTIONS])
b_fc2 = self.bias_variable([ACTIONS])
# 输入层
stateInput = tf.placeholder("float", [None, 80, 80, 4])
# 第一层隐藏层+池化层 tf.nn.relu激活函数 80*80*4 -> 20*20*32 80/4 = 20
h_conv1 = tf.nn.relu(self.conv2d(stateInput, W_conv1, 4) + b_conv1) # 80*80*4 -> 20*20*32 80/4 = 20
h_pool1 = self.max_pool_2x2(h_conv1) # 20*20*32 -> 10*10*32 20/2 = 10
# 第二层隐藏层(这里只用了一层池化层)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2, 2) + b_conv2) # 10*10*32 -> 5*5*64 10/2 = 5
# h_pool2 = max_pool_2x2(h_conv2)
# 第三层隐藏层
h_conv3 = tf.nn.relu(self.conv2d(h_conv2, W_conv3, 1) + b_conv3) # 5*5*64 -> 5*5*64 5/1 = 5
# h_pool3 = max_pool_2x2(h_conv3)
# Reshape
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600]) # 5*5*64 = 1600 n*1600 --1600
# 全连接层
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1) # 1600*512 -- 512
# 输出层
# readout layer 动作的Q值
QValue = tf.matmul(h_fc1, W_fc2) + b_fc2 # 512*2 -- 2
return stateInput,QValue,W_conv1,b_conv1,W_conv2,b_conv2,W_conv3,b_conv3,W_fc1,b_fc1,W_fc2,b_fc2
#赋值目标Q网络
def copyTargetQNetwork(self):
self.sess.run(self.copyTargetQNetworkOperation)
# 损失函数
def createTrainingMethod(self):
# 这里的actionInput表示输出的动作,即强化学习模型中的Action,yInput表示标签值,Q_action表示模型输出与actionInput相乘后,在一维求和,损失函数对标签值与输出值的差进行平方
self.actionInput = tf.placeholder("float", [None, ACTIONS]) # 输出动作
self.yInput = tf.placeholder("float", [None]) # 标签
Q_action = tf.reduce_sum(tf.multiply(self.QValue, self.actionInput), reduction_indices=1)
self.cost = tf.reduce_mean(tf.square(self.yInput - Q_action))
# train_step表示对损失函数进行Adam优化。
self.train_step = tf.train.AdamOptimizer(1e-6).minimize(self.cost)
# 训练网络
def trainNetwork(self): # 图片输入 输出层 全连接层 tf训练
# 梯度下降
# 获取最训练数据
minibatch = random.sample(self.memory, BATCH)
state_batch = [d[0] for d in minibatch]
action_batch = [d[1] for d in minibatch]
reward_batch = [d[2] for d in minibatch]
next_state_batch = [d[3] for d in minibatch]
# 计算标签值
y_batch = []
QValue_batch = self.QValueT.eval(feed_dict = {self.stateInputT : next_state_batch})
for i in range(0, BATCH):
terminal = minibatch[i][4]
# if terminal, only equals reward
if terminal:
y_batch.append(reward_batch[i])
else:
y_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))
# perform gradient step
self.train_step.run(feed_dict = {
self.yInput : y_batch,
self.actionInput : action_batch,
self.stateInput : state_batch
})
# save progress every 10000 iterations
if self.timeStep % 10000 == 0:
self.saver.save(self.sess, 'saved_networks/' + GAME + '-dqn', global_step = self.timeStep)
#更新目标Q网络
if self.timeStep % UPDATE_TIME == 0:
self.copyTargetQNetwork()
#新的观察状态进入经验池
def setPerception(self,nextObservation,action,reward,terminal):
newState = np.append(self.currentState[:,:,1:], nextObservation, axis = 2)
self.memory.append((self.currentState, action, reward, newState, terminal))
#防止经验池内存占用过大
if len(self.memory) > REPLAY_MEMORY:
self.memory.popleft()
# 开始训练网络
if self.timeStep > OBSERVE:
self.trainNetwork()
# print info
state = ""
if self.timeStep <= OBSERVE:
state = "observe"
elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print ("TIMESTEP", self.timeStep, "/ STATE", state, \
"/ EPSILON", self.epsilon)
self.currentState = newState
self.timeStep = self.timeStep + 1
def getAction(self):
# 根据ε 概率选择一个Action
QValue = self.QValue.eval(feed_dict={self.stateInput : [self.currentState]})[0]
action = np.zeros([ACTIONS])
action_index = 0
if self.timeStep % FRAME_PER_ACTION == 0:
if random.random() <= self.epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
action[action_index] = 1
else:
print("----------QNetwork Action----------")
action_index = np.argmax(QValue)
action[action_index] = 1
else:
action[0] = 1 # do nothing
# 缩减ε
if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
return action
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
return np.reshape(observation,(80,80,1))
def playGame():
#初始化DQN-Nature
brain = DQN_NATURE()
# 打开游戏已经仿真通信器
flappyBird = game.GameState()
# 开始游戏
# 获得出事状态
action0 = np.array([1,0]) # do nothing
observation0, reward0, terminal = flappyBird.frame_step(action0)
# 首先将图像转换为80*80,然后进行灰度化
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
# 对灰度图像二值化
ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0)
# 开始游戏
while 1!= 0:
action = brain.getAction()
nextObservation,reward,terminal = flappyBird.frame_step(action)
nextObservation = preprocess(nextObservation)
brain.setPerception(nextObservation,action,reward,terminal)
def main():
playGame()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 11,414 | py | 3 | DQN-NATURE.py | 2 | 0.671439 | 0.627672 | 0 | 266 | 34.729323 | 372 |
CedricLeon/scripts | 2,001,454,799,715 | c3b43461b7c75446311033eda2e81f23ddbfa852 | 42e30e13c182e8afa0b6406b773d8a1a805ac65b | /python/database/balance_10CSV-dtb_from_unbalanced.py | 5a1ab4f88fb57524af860dcbd321cbda8bbc9869 | [] | no_license | https://github.com/CedricLeon/scripts | b498c5526f4ea38bcdf1c8bd2ac7a7e3028c3042 | 1f231c0fbbc4d2ab57f9c847bfeb25275a97d4f4 | refs/heads/master | 2023-07-09T10:43:09.754505 | 2021-08-20T12:47:30 | 2021-08-20T12:47:30 | 379,915,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import numpy as np
from tqdm import tqdm
from os import listdir
from os.path import isfile, join
import shutil
import csv
import time
import re
import math
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
print(bcolors.HEADER + "This script is used to balance a database of .csv files owning features for 10 CUs. Please check full_DTB_management.py for it's full usage." + bcolors.ENDC)
print("Example: python3.6 /home/cleonard/dev/stage/scripts/python/database/balance_10CSV-dtb_from_unbalanced.py /media/cleonard/alex/cedric_TPG-VVC/unbalanced_datasets/32x64/ /media/cleonard/alex/cedric_TPG-VVC/balanced_datasets/ /media/cleonard/alex/cedric_TPG-VVC/Composition_unbalanced_dtb.txt /media/cleonard/alex/cedric_TPG-VVC/balanced_datasets/AllDtbCompo.txt")
# Defining paths
path_dataset_origin = sys.argv[1] # '/home/cleonard/Data/features/32x32_unbalanced/'
path_dataset_arrival = sys.argv[2] # '/home/cleonard/Data/features/balanced2/'
recap_file = sys.argv[3] # '/media/cleonard/alex/cedric_TPG-VVC/Composition_unbalanced_dtb.txt'
store_file = sys.argv[4] # '/media/cleonard/alex/cedric_TPG-VVC/unbalanced_datasets/AllDtbCompo.txt'
# Create arrival directory if it doesn't exist (/!\ DOESN'T EMPTY IT ELSE)
path_dataset_arrival = str(path_dataset_arrival + path_dataset_origin.split("/")[-2] + "_balanced/")
if not os.path.isdir(path_dataset_arrival):
print("Create directory \"", path_dataset_arrival, "\"")
os.mkdir(path_dataset_arrival)
# Get dtb name
dtb = path_dataset_origin.split('/')[-2]
# Init the min elements of each class
# For "/home/cleonard/Data/features/unbalanced/" min class is "TTH" with 119375 elements
nbCus = np.array([])
with open(recap_file, "r") as file:
for line in file:
if re.search(dtb, line):
# Get CUs repartition (avoid dtb_name and total)
words = line.split(' ')
words = words[1:]
words = words[:-1]
# Avoid nbFeatures in the count
i = 0
for w in words:
if w and w[0] == "[":
words = words[i:]
break
i += 1
for w in words:
w = w.replace("[", "")
w = w.replace(",", "")
w = w.replace("]", "")
nbCus = np.append(nbCus, int(w))
# Compute min and print it
min = int(min(nbCus[ nbCus != 0 ]))
print(str(nbCus) + bcolors.OKCYAN + " Min: " + str(min) + bcolors.ENDC)
# Picking every file
fichiers = [f for f in listdir(path_dataset_origin) if isfile(join(path_dataset_origin, f))]
# Shuffle files
np.random.shuffle(fichiers)
count = [0,0,0,0,0,0]
# Init index to rename files
copiedFiles = 0
# Browse files
for file in tqdm(fichiers):
# Open each file in the repertory
with open(path_dataset_origin+file) as csv_file:
# Open the file as a .csv and specify the delimiter
csv_reader = csv.reader(csv_file, delimiter=',')
# Count which line we are (needed to avoid computing the first line)
line_count = 0
for row in csv_reader:
# Avoid first line (contain column names)
if line_count == 0:
line_count += 1
else:
line_count += 1
# Get the split name
splitString = row[2]
# Deduce split number
if splitString == "NS":
split = 0;
elif splitString == "QT":
split = 1;
elif splitString == "BTH":
split = 2;
elif splitString == "BTV":
split = 3;
elif splitString == "TTH":
split = 4;
elif splitString == "TTV":
split = 5;
else:
print("WTF : ", splitString)
sys.exit("Unknown split name in : ", csv_file, ", at line ", line_count)
# Balance the database
if count[split] < min:
# Transform row in a string by concatenating each word and a ','
data = ""
for word in row[1:]: # We don't take the first word wich corresponds to the "CU" number in the original csv file: no interest
data += word + ','
data = data[:-1]
# Create or overwrite a file and write data in it
arrivalFile = path_dataset_arrival + str(copiedFiles) + ".csv"
file = open(arrivalFile, "w")
file.write(data)
# Increment the index of copied files
count[split] += 1
copiedFiles += 1
# Compute Total and check if there is as much copied files as expected
total = 0
for cnt in count:
total = total + cnt
check = bcolors.OKGREEN
if copiedFiles != total:
check = bcolors.WARNING
# Print and store results
print("Count: " + str(count) + ", total: " + check + str(total) + bcolors.ENDC + " Copied files: " + check + str(copiedFiles) + bcolors.ENDC)
with open(store_file, "a") as file:
file.write(str(dtb)+"_balanced"+" "+str(count)+" "+str(total)+"\n\n")
| UTF-8 | Python | false | false | 5,458 | py | 27 | balance_10CSV-dtb_from_unbalanced.py | 24 | 0.570722 | 0.552767 | 0 | 150 | 35.386667 | 368 |
codenation-dev/squad-6-ad-python-women-magalu-1 | 19,550,691,155,921 | 9dfefa82a8b4f97f528309cb1b5f8b6ab5b7d0d6 | 37cd91d9d795c49e1a1da77b4b40ff42e387dd20 | /central-erros/errorcenter/api/serializers.py | 4e3125349426a70b8ffa7841beab9add7e7a5966 | [] | no_license | https://github.com/codenation-dev/squad-6-ad-python-women-magalu-1 | 07afba99044246e0b701f3a996bd4bc770b39771 | 5d702bb8ee19fcfcd954bab2cba763283cfa4306 | refs/heads/master | 2022-12-17T09:27:07.690706 | 2020-06-06T09:20:06 | 2020-06-06T09:20:06 | 204,016,066 | 0 | 3 | null | false | 2022-12-08T02:35:19 | 2019-08-23T14:39:01 | 2020-06-06T09:20:09 | 2022-12-08T02:35:19 | 8,041 | 0 | 2 | 7 | CSS | false | false | from rest_framework import serializers
import django.contrib.auth.password_validation as validators
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.core import exceptions
from django.db import models
from django.views.decorators.csrf import csrf_exempt
from .models import (Log,
Origin,
Environment,
Level)
class LogSerializer(serializers.ModelSerializer):
class Meta:
model = Log
fields = ('title',
'details',
'number_events',
'occurrence_date',
'active',
'environment',
'level',
'origin',
'user'
)
read_only_fields = ['occurrence_date', 'active']
class OriginSerializer(serializers.ModelSerializer):
class Meta:
model = Origin
fields = ['description']
read_only_fields = ['description']
class LevelSerializer(serializers.ModelSerializer):
class Meta:
model = Level
fields = ['description']
read_only_fields = ['description']
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'password']
def validate_password(self, data):
try:
validators.validate_password(data, self.instance)
except exceptions.ValidationError as e:
raise serializers.ValidationError(e.messages)
return data
def validate_email(self, data):
users = User.objects.filter(email=data)
if(users):
raise serializers.ValidationError(["email must be unique"])
return data
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
user.is_active = True
user.save()
return user
class EnvironmentSerializer(serializers.ModelSerializer):
class Meta:
model = Environment
fields = ['id', 'description']
read_only_fields = ['description']
| UTF-8 | Python | false | false | 2,145 | py | 13 | serializers.py | 7 | 0.591142 | 0.591142 | 0 | 75 | 27.6 | 71 |
caglaruba/Company-Management-backend-Python-Django | 1,786,706,426,016 | 25301e75c4b50dc1ad1b80253f24fe885fc7a0e1 | 76e64405b7e51f1b6e630538769ea9b2205b3c5c | /models/permission.py | e43cc18662c7a59cfa13a54ff404753814d026cd | [] | no_license | https://github.com/caglaruba/Company-Management-backend-Python-Django | dbe962598da32e6ad9779a60db800fe1b0aa892c | e0ed001c22f1f7b231d7d9cf6f0d566cfff3a745 | refs/heads/master | 2022-02-15T05:28:56.378600 | 2016-10-29T06:47:33 | 2016-10-29T06:47:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from models import Base
class Permission(Base):
__tablename__ = "permission"
id = Column(Integer, primary_key=True, autoincrement=True)
role_id = Column(Integer, ForeignKey("role.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False)
role = relationship("Role", backref=backref("permissions", order_by=id, cascade="all,delete-orphan", ))
action = Column(String(length=256), nullable=False)
| UTF-8 | Python | false | false | 623 | py | 31 | permission.py | 28 | 0.757624 | 0.752809 | 0 | 19 | 31.789474 | 108 |
JPAkira/Ez4Admin | 953,482,778,762 | 3338613d8b1759ce4fc4ed81c12f31e65f091b49 | 123fb9317cfbe8a250bb64e2ffc721d1224ad7ec | /apps/vendas/views/salvar_produto_na_venda.py | 3a1306fda9a8d904477656f88815f72bb629521e | [] | no_license | https://github.com/JPAkira/Ez4Admin | d0d63462b289ed59ebd57a39ecd14badbd6fc312 | 059d72878a75522271c886b04025814213429617 | refs/heads/main | 2023-04-29T22:25:12.150675 | 2021-05-23T23:55:24 | 2021-05-23T23:55:24 | 370,127,119 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import messages
from django.shortcuts import redirect
from apps.estoque.models import Produto
from apps.vendas.models import Venda, Produto_Venda
def salvar_produto(request):
'''Autenticando o acesso a pagina'''
if not request.user.is_authenticated:
return redirect('login')
'''requests da pagina'''
venda_id = request.POST['venda_id']
codigo = request.POST['codigo']
quantidade = request.POST['quantidade']
''' Procurando o produto, caso não existe retorna erro'''
try:
produto = Produto.objects.get(pk=codigo)
except Produto.DoesNotExist:
messages.error(request, 'Produto não encontrado')
return redirect('adicionar_produto/{}'.format(venda_id))
''' Procurando venda '''
try:
venda = Venda.objects.get(pk=venda_id)
except Venda.DoesNotExist:
messages.error(request, 'Essa venda não existe ou foi apagada')
return redirect('vendas')
''' Calculando o total (quantidade * preço unitario)'''
total = float(produto.preco.replace(",", ".")) * int(quantidade)
''' Retornando um dado melhor formatado, apenas 2 digitos além da virgula '''
total = float("{:.2f}".format(total))
''' Resgatando os valores da venda e transformando em seu respectivo tipo de dado '''
valor_total = float(venda.valor_total)
volume_de_compra = int(venda.volume_de_compra)
''' Somando o valor da venda com o produto recém adicionado '''
venda.valor_total = valor_total + total
venda.volume_de_compra = volume_de_compra + int(quantidade)
''' Salvando os novos dados '''
venda.save()
''' Criando a venda do produto na tabela auxiliar '''
venda_produto = Produto_Venda.objects.create(venda_id=venda, produto_id=produto, quantidade=quantidade,
total=total)
venda_produto.save()
return redirect('adicionar_produto', venda_id) | UTF-8 | Python | false | false | 1,944 | py | 66 | salvar_produto_na_venda.py | 48 | 0.665635 | 0.664603 | 0 | 52 | 36.288462 | 107 |
ahujaradhika/jkim | 8,830,452,787,259 | 53aa9881bcaf14202a1adde48a284fe8fc8d6b7d | 09486b2afc10f1e6b02b6475d37c15971f7a6e31 | /fit_meta_d_SSE.py | 24d675a035eb458295bd621a3a5e0b433e0456a0 | [] | no_license | https://github.com/ahujaradhika/jkim | 17260d1cda570a2389f6b9edaa3433f76778b22d | 7396188329cc37b1e6d1cb00df447ccd696911f1 | refs/heads/main | 2023-02-20T12:07:52.812309 | 2021-01-13T09:04:48 | 2021-01-13T09:04:48 | 323,440,378 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 13:23:43 2017
fit = fit_meta_d_SSE(obs_HR2_rS1,
obs_FAR2_rS1,
obs_HR2_rS2,
obs_FAR2_rS2,
cprime,
s,
d_min,
d_max,
d_grain)
Given response-conditional type 2 hit rates and type 2 false alarm rates,
as well as the empirically estimated relative criterion cprime = c / d',
use a signal detection theory model to estimate meta-d',
the value of d' that would have been expected to generate the observed
type 2 data.
Estimation is done by testing different values for meta-d' to see what
value gives the best fit to the observed data (best fit = minimizes sum
squared error between observed and expected type 2 HR and type 2 FAR).
required inputs
---------------
obs_HR2_rS1, obs_FAR2_rS1 :
Arrays of observed type 2 hit rate (p(high confidence|correct "S1" response))
and type 2 false alarm rate (p(high confidence|incorrect "S1" response)).
The size of each array is N-1, where N is the number of options for
rating confidence. So for instance, if you use a confidence scale with
4 levels of confidence, these arrays should contain 3 elements each.
The i_th element corresponds to the type 2 HR and type 2 FAR found by
considering all confidence ratings of i+1 or higher to be "high
confidence".
The i_th element of obs_HR2_rS1 must correspond to the i_th element of
obs_FAR2_rS1. Otherwise, ordering of the data is not important.
obs_HR2_rS2, obs_FAR2_rS2 : same as above, for "S2" responses
cprime :
The relative type 1 criterion.
c' = c / d', where
d' = z(type 1 HR) - z(type 1 FAR)
c = -.5 * [z(type 1 HR) + z(type 1 FAR)]
and z is the inverse of the normal cumulative distribution function.
If s != 1, specify c' in units of the S1 distribution, as follows.
d' = (1/s)*z(type 1 HR) - z(type 1 FAR)
c = [ -1 / (1+s) ] * [z(type 1 HR) + z(type 1 FAR)]
optional inputs
---------------
s :
The ratio of the standard deviations of the evidence distributions for
stimulus classes S1 and S2. Can be estimated from rating data.
If unspecified, s = 1
d_min :
The minimimum value for meta-d' that will be tested.
If unspecified, d_min = -5
d_max :
The maximum value for meta-d' that will be tested.
If unspecified, d_max = 5
d_grain :
The step size used in testing values of meta-d'.
If unspecified, d_grain = .01
output (as a python dictionary)
------
fit['meta_d'] :
meta_d' value that minimizes the SSE between observed and expected type 2
data. If s != 1, meta_d' is specified in units of the S1 distribution.
fit['meta_c'] :
The value of type 1 criterion c used in conjunction with meta_d'.
meta_c / meta_d = cprime, the constant type 1 criterion specified in the
input. If s != 1, meta-c is specified in units of the S1 distribution.
fit['s'] :
The value of s used in the type 2 data fitting, where s = sd(S1) / sd(S2)
fit['t2c_rS1'] :
Values for the type 2 criteria that, along with meta-d' and c', provide
the best fit for type 2 data for "S1" responses
fit['t2c_rS2'] :
Likewise, for "S2" responses
fit['SSE'] :
Sum of squared errors between observed and expected type 2 data
fit['est_HR2_rS1'] :
The type 2 hit rates for "S1" responses expected from meta_d, meta_c, s,
and t2c_rS1
fit['obs_HR2_rS1'] :
Empirically observed type 2 hit rates for "S1" responses
fit['est_FAR2_rS1'], fit['obs_FAR2_rS1'], fit['est_HR2_rS2'], ...
Likewise as above, for expected and observed type 2 FAR for "S1"
responses and type 2 HR and type 2 FAR for "S2" responses
26/10/2017 wrote it
"""
import numpy as np
import sys
import scipy.stats as sstats
from math import sqrt
def fit_meta_d_SSE(
obs_HR2_rS1,
obs_FAR2_rS1,
obs_HR2_rS2,
obs_FAR2_rS2,
cprime,
s=1,
d_min=-5,
d_max=5,
d_grain=0.01
):
# Initialize analysis
nRatings = len(obs_HR2_rS1)
ds = np.linspace(d_min, d_max, num=((d_max - d_min)/d_grain + 1))
ds = [round(i, 2) for i in ds]
SSEmin = float('inf')
meta_d = []
meta_c = []
t2c_rS1 = []
t2c_rS2 = []
est_HR2_rS1 = []
est_FAR2_rS1 = []
est_HR2_rS2 = []
est_FAR2_rS2 = []
# Search for meta-d' that minimizes type 2 SSE
for i in range(len(ds)):
# Initialize parameters for current level of meta-d'
d = ds[i]
c = cprime * d
S1mu = - d / 2
S2mu = d / 2
S1sd = 1
S2sd = 1 / s
lowerL = S1mu - 5*max([S1sd, S2sd])
upperL = S2mu + 5*max([S1sd, S2sd])
x = np.linspace(lowerL, upperL, num=((upperL - lowerL) / 0.001 + 1))
diff = min(abs(x - c))
c_ind = np.argmin(abs(x - c))
HRs = 1 - sstats.norm.cdf(x, loc=S2mu, scale=S2sd)
FARs = 1 - sstats.norm.cdf(x, loc=S1mu, scale=S1sd)
# Fit type 2 data for S1 responses
SSE_rS1 = []
rS1_ind = []
est_HR2s_rS1 = (1 - FARs[:c_ind]) / (1 - FARs[c_ind])
est_FAR2s_rS1 = (1 - HRs[:c_ind]) / (1 - HRs[c_ind])
for n in range(nRatings):
SSE = (est_HR2s_rS1 - obs_HR2_rS1[n])**2 + (est_FAR2s_rS1 - obs_FAR2_rS1[n])**2
SSE_rS1.append(min(SSE))
rS1_ind.append(np.argmin(SSE))
# Fit type 2 data for S2 responses
SSE_rS2 = []
rS2_ind = []
est_HR2s_rS2 = HRs[c_ind:] / HRs[c_ind]
est_FAR2s_rS2 = FARs[c_ind:] / FARs[c_ind]
for n in range(nRatings):
SSE = (est_HR2s_rS2 - obs_HR2_rS2[n])**2 + (est_FAR2s_rS2 - obs_FAR2_rS2[n])**2
SSE_rS2.append(min(SSE))
rS2_ind.append(np.argmin(SSE))
# Update analysis
SSEtot = sum(SSE_rS1) + sum(SSE_rS2)
if SSEtot < SSEmin:
SSEmin = SSEtot
meta_d = d
meta_c = c
t2c_rS1 = x[np.array(rS1_ind)]
t2c_rS2 = x[np.array([h + c_ind - 1 for h in rS2_ind])]
est_HR2_rS1 = est_HR2s_rS1[np.array(rS1_ind)]
est_FAR2_rS1 = est_FAR2s_rS1[np.array(rS1_ind)]
est_HR2_rS2 = est_HR2s_rS2[np.array(rS2_ind)]
est_FAR2_rS2 = est_FAR2s_rS2[np.array(rS2_ind)]
# Package output
fit = {
'meta_d1': meta_d,
'meta_c1': meta_c,
's': s,
't2c1_rS1': t2c_rS1.tolist(),
't2c1_rS2': t2c_rS2.tolist(),
'SSE': SSEmin,
'est_HR2_rS1': est_HR2_rS1.tolist(),
'obs_HR2_rS1': obs_HR2_rS1,
'est_FAR2_rS1': est_FAR2_rS1.tolist(),
'obs_FAR2_rS1': obs_FAR2_rS1,
'est_HR2_rS2': est_HR2_rS2.tolist(),
'obs_HR2_rS2': obs_HR2_rS2,
'est_FAR2_rS2': est_FAR2_rS2.tolist(),
'obs_FAR2_rS2': obs_FAR2_rS2
}
return fit
| UTF-8 | Python | false | false | 6,757 | py | 13 | fit_meta_d_SSE.py | 8 | 0.596567 | 0.552908 | 0 | 223 | 29.278027 | 91 |
kshirsagarsiddharth/Algorithms_and_Data_Structures | 7,670,811,614,499 | 553b69562942e52d08524d87dcd8b3841b82e899 | 246acaa90888e81caea746940dba68f93682ee5c | /Tree/AVL_TREE/minimum_nodes_in_avl_tree.py | 6cc9f8a18b374c02d06fcde99acffb31f44b1281 | [] | no_license | https://github.com/kshirsagarsiddharth/Algorithms_and_Data_Structures | 17fcbede52f58fe561cdf5387e2914fb1c62496e | 48560d8a4608383849dfa2d61289d3021bf79510 | refs/heads/master | 2021-05-21T22:08:51.573746 | 2021-01-16T07:20:04 | 2021-01-16T07:20:04 | 252,822,788 | 0 | 0 | null | false | 2020-04-21T22:38:32 | 2020-04-03T19:35:57 | 2020-04-21T22:25:57 | 2020-04-21T22:38:31 | 52 | 0 | 0 | 0 | Python | false | false | def AVLnode(height):
if height == 0:
return 1
elif height == 1:
return 2
return (1 + AVLnode(height - 1) + AVLnode(height - 2))
def rangeCount(root,a,b):
if root == None:
return 0
elif root.data > b:
return rangeCount(root.left,a,b)
elif root.data < 1:
return rangeCount(root.right,a,b)
elif root.data >= a and root.data <= b:
return rangeCount(root.left,a,b) + rangeCount(root.right,a,b) + 1 | UTF-8 | Python | false | false | 488 | py | 216 | minimum_nodes_in_avl_tree.py | 212 | 0.559426 | 0.538934 | 0 | 18 | 25.222222 | 73 |
pepetreshere/odoo-ecuador-contabilidad | 2,216,203,147,442 | 18c998c2605916cf82a5fa0f90d315fb981ec414 | fb166da6760f57a292d23f072eea3cba01df004d | /ecua_invoice/__openerp__.py | 316397d80489c464f49fe53702437f9fc9ddb96a | [] | no_license | https://github.com/pepetreshere/odoo-ecuador-contabilidad | ab39ae90f6c06b66881b674482e3f41044c41d85 | 97a45d4c1be8eba0c7ea76ed2cd76b46a58cf7a0 | refs/heads/master | 2021-01-18T19:31:55.280085 | 2015-09-03T23:09:39 | 2015-09-03T23:09:39 | 9,312,068 | 5 | 10 | null | false | 2015-10-20T14:11:26 | 2013-04-09T03:09:40 | 2015-10-12T16:39:27 | 2015-10-20T14:10:53 | 5,218 | 5 | 4 | 2 | Python | null | null | # -*- encoding: utf-8 -*-
########################################################################
#
# @authors: Andres Calle, Andrea García
# Copyright (C) 2013 TRESCLOUD CÍA LTDA
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see http://www.gnu.org/licenses.
########################################################################
{
"name": "Ecuador Invoices / Facturas Ecuatorianas",
"version": "1.0",
# 'sequence': 4,
'complexity': "easy",
"author": "TRESCLOUD CÍA LTDA",
"website": "http://www.trescloud.com/",
"category": "Ecuadorian Regulations",
# TODO agregar dependencia a aeroo
"depends": ['base',
'sale',
'stock',
'account',
'account_accountant',
'base_optional_quick_create',
'l10n_ec_niif_minimal'],
"description": """
In Ecuador additional rules applies to invoices:
- it is needed to store the "current" name, address, VAT and phone.
- it is needed to store the invoice number
- voiding invoices allowed only to account managers
This module is part of a bigger framework of "ecua" modules developed by
TRESCLOUD, EcuadorEnLinea y 4RSOFT.
Author,
Andres Calle,
Andrea García,
Patricio Rangles
TRESCLOUD Cía Ltda.
""",
"init_xml": [],
"update_xml": [
'security/ir.model.access.csv',
'security/sale_security.xml',
'views/invoice_view.xml',
'views/shop_view.xml',
'views/res_users_view.xml',
'views/sale_create_invoice_view.xml',
'views/printer_point_view.xml',
'views/sequence_view.xml',
],
"installable": True,
"auto_install": False,
"application": False,
}
| UTF-8 | Python | false | false | 2,506 | py | 84 | __openerp__.py | 35 | 0.581367 | 0.57617 | 0 | 70 | 34.728571 | 77 |
deets/clang-complete | 14,594,298,895,372 | 9f4aff247ad5e21ce8770e543ca0449cc0cff0d2 | 26de8ced3173349a6463d194abe93b8968505432 | /clangcomplete/robot.py | 194f112c4405dd884c02199e6600a2b6cba5f73e | [
"MIT"
] | permissive | https://github.com/deets/clang-complete | 97d36395383937a313c092b45d2b16b64173f38b | de371620f69cd93697918096c989d3b624a3bf7a | refs/heads/master | 2016-09-05T11:16:20.142274 | 2013-12-20T14:42:23 | 2013-12-20T14:42:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from abl.util import Bunch
from abl.robot import Robot
from .api import mainloop
logger = logging.getLogger(__name__)
class ClangCompleteRobot(Robot):
CONFIG_NAME = ".clang-completerc"
SEARCH_PATHS = "~"
EXCEPTION_MAILING = "diez.roggisch@ableton.com"
AUTHOR = "diez.roggisch@ableton.com"
def __init__(self):
super(ClangCompleteRobot, self).__init__()
# replace the commandline parser
# because we don't accept any
self.parser = Bunch(parse_args=lambda argv: (
Bunch(
raise_exceptions=None,
config=None,
logfile=None,
loglevel=None,
logformat=None,
config_spec=False,
default_config=None,
), []))
def work(self):
logger.info("Starting clang-complete")
mainloop()
| UTF-8 | Python | false | false | 939 | py | 11 | robot.py | 10 | 0.545261 | 0.545261 | 0 | 38 | 23.657895 | 53 |
alexmilesyounger/dates_times_py | 17,884,243,850,426 | e072121e77d5922da4c974b50def00bdecf866c4 | 3d80cde09b253383208a13b62bbbcdc1d5fa2627 | /time_machine_second_challenge.py | 4100d06499a1b509fefb5b274eb68cc5fcb5cfc5 | [] | no_license | https://github.com/alexmilesyounger/dates_times_py | 6100161826b3324b188d5d82e9ce0b6a4d660dd3 | 3417f013b6bc0103bb24a2449967e05f2082cb1b | refs/heads/master | 2021-01-23T13:31:53.471784 | 2015-08-25T02:02:41 | 2015-08-25T02:02:41 | 40,062,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
starter = datetime.datetime.now()
def time_machine(my_int, my_string):
# minutes
if my_string.lower() == "minutes":
endtime = starter + datetime.timedelta(minutes=my_int)
# hours
elif my_string.lower() == "hours":
endtime = starter + datetime.timedelta(hours=my_int)
# days
elif my_string.lower() == "days":
endtime = starter + datetime.timedelta(days=my_int)
# years
elif my_string.lower() == "years":
endtime = starter + datetime.timedelta(days=(my_int * 365))
else:
pass
return endtime
| UTF-8 | Python | false | false | 527 | py | 13 | time_machine_second_challenge.py | 13 | 0.688805 | 0.683112 | 0 | 20 | 25.35 | 61 |
cerebis/pygraphistry | 4,587,025,079,422 | 46e74e55206f9828831de1e364cde47d74e5684e | fab5ff31f3dcc1a96e4c6fc10c11e7dba807333d | /setup.py | 7fd877bc88e2d848b7ef12f9b068bebdba61ef3d | [
"BSD-3-Clause"
] | permissive | https://github.com/cerebis/pygraphistry | 93da90f4a680ca098e18b8f072eed7f4a91db5b8 | bea05207c7e1c410a9f55460dca4b633e78456e2 | refs/heads/master | 2023-07-07T11:18:02.044204 | 2021-08-15T04:19:39 | 2021-08-15T04:19:39 | 396,559,421 | 0 | 0 | BSD-3-Clause | true | 2021-08-16T01:18:50 | 2021-08-16T01:13:03 | 2021-08-16T01:13:04 | 2021-08-16T01:18:50 | 118,043 | 0 | 0 | 1 | null | false | false | #!/usr/bin/env python
from setuptools import setup, find_packages
#FIXME: prevents pyproject.toml - same as https://github.com/SciTools/cartopy/issues/1270
import versioneer
def unique_flatten_dict(d):
return list(set(sum( d.values(), [] )))
core_requires = ['numpy', 'pandas >= 0.17.0', 'pyarrow >= 0.15.0', 'requests', 'protobuf >= 2.6.0']
stubs = [
'pandas-stubs', 'types-requests'
]
dev_extras = {
'docs': ['sphinx==3.4.3', 'sphinx_autodoc_typehints==1.11.1', 'sphinx-rtd-theme==0.5.1'],
'test': ['flake8', 'mock', 'mypy', 'pytest'] + stubs,
'build': ['build']
}
base_extras = {
'igraph': ['python-igraph'],
'networkx': ['networkx==2.2'],
'gremlin': ['gremlinpython'],
'bolt': ['neo4j', 'neotime'],
'nodexl': ['openpyxl', 'xlrd'],
'jupyter': ['ipython']
}
extras_require = {
**base_extras,
**dev_extras,
#kitchen sink for users -- not recommended
'all': unique_flatten_dict(base_extras),
#kitchen sink for contributors
'dev': unique_flatten_dict(base_extras) + unique_flatten_dict(dev_extras),
}
setup(
name='graphistry',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages = ['graphistry'],
platforms='any',
description = 'A visual graph analytics library for extracting, transforming, displaying, and sharing big graphs with end-to-end GPU acceleration',
long_description=open("./README.md").read(),
long_description_content_type='text/markdown',
url='https://github.com/graphistry/pygraphistry',
download_url= 'https://pypi.python.org/pypi/graphistry/',
python_requires='>=3.6',
author='The Graphistry Team',
author_email='pygraphistry@graphistry.com',
install_requires=core_requires,
extras_require=extras_require,
license='BSD',
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Environment :: GPU :: NVIDIA CUDA',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: Log Analysis',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Sociology',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Widget Sets',
'Topic :: System :: Distributed Computing'
],
keywords=['cugraph', 'cudf', 'dask', 'GPU', 'Graph', 'GraphX', 'Gremlin', 'igraph', 'Jupyter', 'Neo4j', 'Network', 'NetworkX', 'Notebook', 'Pandas', 'Plot', 'Rapids', 'RDF', 'Splunk', 'Spark', 'Tinkerpop', 'Visualization']
)
| UTF-8 | Python | false | false | 3,256 | py | 22 | setup.py | 14 | 0.628071 | 0.615172 | 0 | 88 | 36 | 228 |
ncmiller/JobPrep | 13,941,463,868,900 | 6c05bc2d90a613c357661b8080f9dddc766bd788 | 958237ee432357df5a6fca7a281cb9988db64496 | /hackerrank/algorithms/palindrome_index.py | 27971663d828d0f6d274337ee108264f69d58072 | [] | no_license | https://github.com/ncmiller/JobPrep | a256ea458e8878303e4763f95fa1280424b197d1 | 6db2156d209bbb833fbf01f997ca4e15f7243d8a | refs/heads/master | 2016-04-22T09:05:19.004921 | 2016-04-13T16:35:14 | 2016-04-13T16:35:14 | 52,219,664 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | T = int(raw_input())
for t in xrange(T):
s = raw_input()
l = len(s)
front = 0
back = l-1
deleted = 0
delete_idx = -1
while front < back:
if s[front] != s[back]:
deleted += 1
delete_idx = front
if deleted >= 2:
break
back += 1
front += 1
back -= 1
if deleted <= 1:
print delete_idx
continue
front = 0
back = l-1
deleted = 0
delete_idx = -1
while front < back:
if s[front] != s[back]:
deleted += 1
delete_idx = back
if deleted >= 2:
break
front -= 1
front += 1
back -= 1
if deleted <= 1:
print delete_idx
else:
print "-1" | UTF-8 | Python | false | false | 805 | py | 44 | palindrome_index.py | 39 | 0.403727 | 0.37764 | 0 | 39 | 19.666667 | 31 |
deepenupreti/Phonenumbers | 13,993,003,454,266 | 8ecc13f5739377f888fe75088f595153216a06fa | 05acb6cf79bb779fdb9b870b02f96d672ddf3b44 | /program.py | 77ad9a5e6b86d56488f1906207a36c2f3a0b4588 | [] | no_license | https://github.com/deepenupreti/Phonenumbers | 73f24ed6453707c52b6e349c4bd229ec48e87737 | 2db85b66756ec3d18d85a51ec1e2cecbbf30c013 | refs/heads/master | 2023-03-13T06:34:40.721805 | 2021-03-02T12:51:26 | 2021-03-02T12:51:26 | 343,773,387 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import phonenumbers, xlrd
from phonenumbers import geocoder, carrier
def error_handling_func(phone_num):
if (phonenumbers.is_valid_number(phone_num) == True) and (phonenumbers.is_possible_number(phone_num) == True):
return 1
else:
return 0
def single_phonenumber(phone_num):
if phone_num[0] == '+':
phone_num = phonenumbers.parse(phone_num, None)
elif phone_num[0] == '0':
phone_num = phonenumbers.parse(phone_num, 'NP')
return phone_num
def info_db(phone_num):
phone_num = phonenumbers.format_number(phone_num, phonenumbers.PhoneNumberFormat.NATIONAL)
file_location = "C:\\Users\\malcolmjohn\\Desktop\\advanced python\\Projects\\phone number\\landline.xlsx"
workbook = xlrd.open_workbook(file_location)
sheet = workbook.sheet_by_index(0)
for i in range(sheet.nrows):
if phone_num[1:3] == sheet.cell_value(i,1):
district = sheet.cell_value(i,0)
break
return district
def display_phonenumber(phone_num, district):
print phone_num
print "Country:{}, District:{}".format(repr(geocoder.description_for_number(phone_num, 'en')), district)
print "Network used:{}".format(repr(carrier.name_for_number(phone_num, 'en')))
def main():
phone_num = raw_input("Enter your phonenumber:")
phone_num = single_phonenumber(phone_num)
error_value = error_handling_func(phone_num)
if error_value == 1:
district = info_db(phone_num)
display_phonenumber(phone_num, district)
else:
print "Phone_number is not valid"
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,666 | py | 4 | program.py | 4 | 0.636855 | 0.630252 | 0 | 44 | 36.704545 | 113 |
KOOKOKOK/rt | 4,707,284,197,979 | 0aefa15e6083ff8916e27ce17d846f8406c9de07 | f9a5d4bfa760d5a725878cd1306abf525c209688 | /runtime_tools/inference.py | 814611937f9796105be4071fea679c98b75fc62d | [] | no_license | https://github.com/KOOKOKOK/rt | 7b505be4582eb78616f0e4d97c8049e5d9a2461f | 94ea571ade4c9a66157cad460a7b67769f3f1a3d | refs/heads/main | 2023-02-18T06:19:07.913799 | 2021-01-14T13:11:10 | 2021-01-14T13:11:10 | 329,618,513 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import cv2
from rt.utils import common
from rt.utils.common import load_and_resize,shuffle_and_normalize
import torch
import tensorrt as trt
def inference(model,img_path):
model.eval()
img = cv2.imread(img_path)
height, width = img.shape[:2]
image_raw, image = load_and_resize(img_path, (320, 320))
image = torch.tensor(shuffle_and_normalize(image))
with torch.no_grad():
start = time.time()
results = model(image)
end = time.time()
print('Torch Infer time: %s Seconds' % (end - start))
return results
def inference_trt(engine_file_path ,input_image_path,h=320,w=320):
# engine_file_path = 'checkpoints/nanodet_m.trt'
# input_image_path = 'testdata/bear1.jpg'
TRT_LOGGER = trt.Logger()
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
engine_trt = runtime.deserialize_cuda_engine(f.read())
image_raw, image = load_and_resize(input_image_path, (h, w))
image = shuffle_and_normalize(image)
with engine_trt as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
# Do inference
print('Running inference on image {}...'.format(input_image_path))
inputs[0].host = image
start = time.time()
trt_outputs = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs,
stream=stream)
end = time.time()
print('RT Infer time: %s Seconds' % (end - start))
return trt_outputs | UTF-8 | Python | false | false | 1,667 | py | 9 | inference.py | 7 | 0.620276 | 0.609478 | 0 | 47 | 34.489362 | 108 |
h2oai/h2o-3 | 10,797,547,794,201 | 1054e2e17bae7a781b80c3edea0209f9b0b6205e | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_unique.py | c1d04b69159ce1c0253da82fad1e84c1e855ceaf | [
"Apache-2.0"
] | permissive | https://github.com/h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | false | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | 2023-09-13T16:15:03 | 2023-09-14T18:05:38 | 619,600 | 6,476 | 1,997 | 2,722 | Jupyter Notebook | false | false | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
import numpy as np
from h2o.frame import H2OFrame
def h2o_H2OFrame_unique():
"""
Python API test: h2o.frame.H2OFrame.unique()
"""
python_lists = np.random.randint(-5,5, (100, 1))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newFrame = h2oframe.unique()
allLevels = h2oframe.asfactor().levels()[0]
assert_is_type(newFrame, H2OFrame) # check return type
assert len(allLevels)==newFrame.nrow, "h2o.H2OFrame.unique command is not working." # check shape
newFrame = newFrame.asfactor() # change to enum to make sure elements are string type
for rowIndex in range(newFrame.nrow): # check values
assert newFrame[rowIndex, 0] in allLevels, "h2o.H2OFrame.unique command is not working." # check shape
pyunit_utils.standalone_test(h2o_H2OFrame_unique)
| UTF-8 | Python | false | false | 944 | py | 6,198 | pyunit_h2oH2OFrame_unique.py | 5,451 | 0.707627 | 0.676907 | 0 | 26 | 35.307692 | 110 |
wuhongyi/DjangoNote | 13,950,053,795,317 | af4178a577a95b7c3b6af3f7f694777dc00f1466 | 65cc6a8877896ef69dd03d7b5eee5bed56e5371f | /example/attpc-daq/web/attpcdaq/daq/migrations/0013_auto_20160613_2126.py | 7bbc6b57091023bb7220c2a4d55de96249baa7b8 | [] | no_license | https://github.com/wuhongyi/DjangoNote | 34bdb9e82fc379e19b1df0bd7c90e504fa70a40d | 81ad949ff895feda8131d8bdf5fa1439f962ae37 | refs/heads/master | 2020-05-02T17:54:12.270297 | 2019-05-22T14:37:32 | 2019-05-22T14:37:32 | 178,112,720 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-13 21:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('daq', '0012_experiment_user'),
]
operations = [
migrations.RenameField(
model_name='datasource',
old_name='config',
new_name='selected_config',
),
migrations.RemoveField(
model_name='configid',
name='ecc_server',
),
migrations.RemoveField(
model_name='datasource',
name='ecc_server',
),
migrations.AddField(
model_name='configid',
name='data_source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='daq.DataSource'),
),
migrations.AddField(
model_name='datasource',
name='ecc_ip_address',
field=models.GenericIPAddressField(default='127.0.0.1', verbose_name='ECC server IP address'),
preserve_default=False,
),
migrations.AddField(
model_name='datasource',
name='ecc_port',
field=models.PositiveIntegerField(default=8083, verbose_name='ECC server port'),
),
migrations.AddField(
model_name='datasource',
name='is_transitioning',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='ECCServer',
),
]
| UTF-8 | Python | false | false | 1,622 | py | 246 | 0013_auto_20160613_2126.py | 114 | 0.564735 | 0.546239 | 0 | 53 | 29.603774 | 125 |
felipemanfrin/Python-Zero-ao-Zeno | 5,875,515,261,355 | 40f2a03fdb3cc15ee9bff6b2e9fa3830c34569f7 | d22a2fbb9adb82644c5665242661bad172550552 | /venv/ex76.py | 36b76c94d213f8437e8b41e09a63837d8426d53e | [] | no_license | https://github.com/felipemanfrin/Python-Zero-ao-Zeno | e98ba3e4b974e88801b8bc947f461b125bc665b8 | d6d08aa17071f77170bbd105452b0d05586131c8 | refs/heads/master | 2022-07-29T19:38:41.729178 | 2020-05-25T01:02:18 | 2020-05-25T01:02:18 | 265,356,280 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x = tuple
c = ''
while True:
nome=str(input('Digite o nome do produto: '))
valor=float(input('Digite o valor do produto: '))
for c in range (1,):
x[c] = nome
x[c+1] = valor
continue1=str(input('Voce quer continuar[s/n]? ')).upper().strip()
if continue1 == 'SN':
break
print(x) | UTF-8 | Python | false | false | 325 | py | 127 | ex76.py | 126 | 0.553846 | 0.541538 | 0 | 12 | 26.166667 | 74 |
xunzhang/soapy | 1,889,785,625,162 | d1405108c2e5ee8ec07d05a7a3a4b7786e6a0c48 | 023aaa7995d33389b9bb7e821114316fd0f11f76 | /gen_html.py | e59e483ce4353853bf95117fcd0c3cafe37ac41c | [] | no_license | https://github.com/xunzhang/soapy | 52e3e309d821b04ea46c94ef89fc30b4a6a32d20 | 48c0fed26594d92917c47c5ce238d7f082dfd862 | refs/heads/master | 2021-01-18T19:30:40.030193 | 2017-01-23T16:07:41 | 2017-01-23T16:07:41 | 63,696,212 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
import sys
try:
from optparse import OptionParser
except:
print 'optparse module required'
exit(0)
from soapy.fill import filler
#from soapy.conf import load_cfg
#from soapy.fill import fill_header, fill_footer
if __name__ == '__main__':
optpar = OptionParser()
optpar.add_option('-o', '--output',
action = 'store', type = 'string',
dest = 'output', help = 'output html file')
optpar.add_option('-c', '--config',
action = 'store', type = 'string',
dest = 'config', help = 'config file to specify code source and etc, must be json format')
(options, args) = optpar.parse_args()
if (not options.output) or (not options.config):
print 'Incorrect usage!\nSee usage with "python gen.py --help".'
sys.exit(0)
elif not options.output.endswith('.html'):
print 'output file must end with .html'
sys.exit(0)
print 'output', options.output
elif not options.config.endswith('.json'):
print 'output file must be json format'
sys.exit(0)
else:
handler = filler(options.output, options.config)
handler.fill()
#print load_cfg(options.config)
#fille_header()
| UTF-8 | Python | false | false | 1,281 | py | 7 | gen_html.py | 5 | 0.596409 | 0.593286 | 0 | 37 | 33.594595 | 110 |
shichao1986/daily_exercise | 14,413,910,292,577 | 98bd65505c2c7449d8e8068600e0192d1e1f7f73 | ce990caf3e29ae79796bd2675d2efa79567a711c | /logging_demo_a.py | 8d418f508f92a526f01df43d4d9d352f9db71459 | [] | no_license | https://github.com/shichao1986/daily_exercise | 8531ad046178929d30c1030a2ac0e00aacbda9d6 | eeacbac58045339141296bd68d4a82796861fd1d | refs/heads/master | 2020-04-10T16:29:12.277994 | 2019-12-08T15:53:47 | 2019-12-08T15:53:47 | 161,146,833 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
时间: 2019/10/10 16:43
作者: shichao
更改记录:
重要说明:
"""
import logging
LOGGER = logging.getLogger(__name__)
def test_logger_log_a():
LOGGER.debug('debug message:{}'.format(__name__))
LOGGER.info('info message:{}'.format(__name__))
LOGGER.warning('warning message:{}'.format(__name__))
LOGGER.error('error message:{}'.format(__name__))
LOGGER.critical('critical message:{}'.format(__name__)) | UTF-8 | Python | false | false | 460 | py | 27 | logging_demo_a.py | 23 | 0.619266 | 0.58945 | 0 | 20 | 20.85 | 59 |
DunClickMeBro/teamcornertable | 10,385,230,971,774 | 8f71fe79b7f96fe2e549df538c6fa8b3b453a861 | e38f58f3922cb43cc164f5a74e49bee4b0ab91af | /Testing Folder/Bottom Frame.py | 60425c0d26b9ad2f4d50f60a2d8b420bc4e9a983 | [] | no_license | https://github.com/DunClickMeBro/teamcornertable | c61a08c3817d890e9731c6de624d87e23fedfa53 | b178f4b972429a6d816d27f2e0d6cac61edcee21 | refs/heads/master | 2016-06-01T09:12:12.925490 | 2015-04-16T04:51:15 | 2015-04-16T04:51:15 | 29,829,309 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Michael'
# currently not used since I realized I had to change it for each page
# create another frame(bottom) to hold a enter and done buttons
bottomframe = Frame(parent, bd=2, relief=SUNKEN)
bottomframe.grid(row=7, column=0, sticky="e", columnspan=20)
enterbutton = Button(bottomframe, text='Enter')
enterbutton.grid(row=0, column=0, sticky="e")
donebutton = Button(bottomframe, text='Done', command=show)
donebutton.grid(row=0, column=1, sticky="e")
| UTF-8 | Python | false | false | 473 | py | 31 | Bottom Frame.py | 30 | 0.735729 | 0.716702 | 0 | 15 | 30.533333 | 70 |
lucasugarcia/email-dispatcher | 14,147,622,314,021 | 719215056c838885538d432b057ca56e080c686a | 49c49578b659f24df821904d6b27e61ccddd9e64 | /dispatcher.py | d29858d1627ec0be54e6318137ca585aab344e43 | [] | no_license | https://github.com/lucasugarcia/email-dispatcher | 684c91e699a004df69112c087c2e7fd6dd48134c | f6a002a3e200f714ddc986ce8b73d38b94fb6181 | refs/heads/master | 2020-07-09T03:29:43.529473 | 2019-08-22T19:59:51 | 2019-08-22T19:59:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import smtplib
emails_destino = ["email1@email", "email2@email"]
email_origem = "seuemail@email"
password = "asenhadoseuemail"
assunto = "Assunto do e-mail"
de = "Seu Nome <seuemail@email>"
nome_arquivo = "CaminhoENomeDoArquivo.extensao"
texto_email = "Texto do e-mail"
for x in range (0, len(emails_destino)):
msg = MIMEMultipart()
msg["From"] = de
msg["To"] = emails_destino[x]
msg["Subject"] = assunto
anexo = MIMEBase("application", "extensaoDoArquivo", filename = nome_arquivo)
anexo.set_payload(open(nome_arquivo, "rb").read())
encoders.encode_base64(anexo)
anexo.add_header("Content-Disposition", "attachment", filename = nome_arquivo)
msg.attach(anexo)
msg.attach(MIMEText(texto_email, "plain"))
server = smtplib.SMTP("smtpDoSeuProvedor:porta")
server.starttls()
server.login(email_origem, password)
server.sendmail(email_origem, msg["To"], msg.as_string())
server.quit() | UTF-8 | Python | false | false | 1,098 | py | 1 | dispatcher.py | 1 | 0.708561 | 0.704007 | 0 | 39 | 27.179487 | 82 |
Shehan29/MagicHand | 9,620,726,771,443 | 6cfef3f97b47f33df1b621c18184be77df9e1a18 | b42e07a20e7140db0f46d4a09ee8e9742be3a336 | /tracking/rotations.py | 7565941c710354552b4e276b7a4e37a41ee4efc6 | [] | no_license | https://github.com/Shehan29/MagicHand | b8ca7875f17325b4144b6c34975a1dd37cbc4868 | 8fccac033771fdf2f53fb41536b7a9d3136ae0a7 | refs/heads/master | 2021-03-30T18:22:12.131980 | 2018-04-30T04:02:46 | 2018-04-30T04:02:46 | 123,230,141 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import sin, cos, radians
import cv2
def rotate_image(image, angle):
if angle == 0: return image
height, width = image.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 0.9)
result = cv2.warpAffine(image, rot_mat, (width, height), flags=cv2.INTER_LINEAR)
return result
def rotate_point(pos, img, angle):
if angle == 0: return pos
x = pos[0] - img.shape[1]*0.4
y = pos[1] - img.shape[0]*0.4
newx = x*cos(radians(angle)) + y*sin(radians(angle)) + img.shape[1]*0.4
newy = -x*sin(radians(angle)) + y*cos(radians(angle)) + img.shape[0]*0.4
return int(newx), int(newy), pos[2], pos[3]
# rotationValues = [val for pair in zip([x for x in range(21)], [-x for x in range(21)]) for val in pair]
# rotationValues.remove(0)
rotationValues = [0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10, 11, -11, 12, -12, 13, -13, 14, -14, 15, -15, 16, -16, 17, -17, 18, -18, 19, -19, 20, -20]
| UTF-8 | Python | false | false | 973 | py | 5 | rotations.py | 4 | 0.600206 | 0.501542 | 0 | 21 | 45.333333 | 182 |
gberriz/datarail-2.0 | 6,940,667,156,850 | 0ab631010330d3737e873612a694157da3a6f2d5 | 8fdcf5600565d44931013553a3edf1b41047cb3d | /src/keymapper.py | 391fa56bee2afec1dcfba1c2c5dab62d20085458 | [] | no_license | https://github.com/gberriz/datarail-2.0 | b310720c4f3054f3078a2e7cd892d184924324e4 | 4a6d132f2faa1e2f0e16360a9aefa6b5cd0c5a6b | refs/heads/master | 2021-01-10T11:21:20.763195 | 2012-03-01T21:07:09 | 2012-03-01T21:07:09 | 1,007,001 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from multikeydict import MultiKeyDict as mkd
class SimpleKeyMapper(dict):
__max_ids = 2**32
def __init__(self, seq=None, offset=0,
_id_min=-__max_ids/2, _id_max=__max_ids/2, **kwargs):
self._offset = offset
self._inverse = inv = []
self._len = len(inv)
if seq is None:
id_min = kwargs.pop('id_min', _id_min)
id_max = kwargs.pop('id_max', _id_max)
if kwargs:
raise TypeError('unrecognized keyword(s): %s' %
', '.join(kwargs.keys()))
max_ids = id_max - id_min
if max_ids < 0:
raise ValueError('id_max - id_min must be nonnegative')
if not id_min <= offset <= id_max:
raise ValueError('offset out of range')
self._state = self._offset
def _newid(id_min=id_min, id_max=id_max, max_ids=max_ids):
if self._len >= max_ids:
raise ValueError('no more ids available')
ret = self._state
if ret >= id_max:
assert ret == id_max
self._state = ret = id_min
self._state += 1
s = ret if ret >= self._offset else ret + self._max_ids
assert self._len == s - self._offset
return ret
self.seq = _newid
else:
if 'id_min' in kwargs or 'id_max' in kwargs:
raise TypeError('specifying id_min or id_max is '
'incompatible with specifying seq')
self.seq = lambda: next(seq)
super(SimpleKeyMapper, self).__init__()
def __call__(self, key):
return super(SimpleKeyMapper, self).__getitem__(unicode(key))
def __getitem__(self, i):
ii = self._inverse_index(i)
try:
return self._inverse[ii]
except IndexError:
raise KeyError('mapper index out of range')
def __setitem__(self, i, v):
raise TypeError('read-only access')
def getid(self, key):
try:
ret = self(key)
except KeyError:
key, ret = unicode(key), self.seq()
super(SimpleKeyMapper, self).__setitem__(key, ret)
self._update_inverse(key)
return ret
def todict(self):
return dict(self)
key2idmap = todict
def id2keymap(self):
return dict((v, k) for k, v in self.items())
def _inverse_index(self, i):
if not isinstance(i, int):
raise TypeError('argument must be an integer')
return i - self._offset
def _update_inverse(self, ukey):
inverse = self._inverse
inverse.append(ukey)
self._len = len(inverse)
del __max_ids
class KeyMapper(object):
def __init__(self, arg0, *rest):
if type(arg0) == int:
if len(rest) > 0:
raise TypeError('(only one int argument allowed)')
args = (None for _ in xrange(arg0))
else:
args = (arg0,) + rest
def _getmapper(m):
return (SimpleKeyMapper(m) if m is None
or hasattr(m, '__iter__')
or hasattr(m, '__next__')
else m)
self.mappers = tuple(map(_getmapper, args))
def __call__(self, key):
# NOTE: key must be a tuple (or at least a sequence)
return tuple([mpr(v) for mpr, v in zip(self.mappers, key)])
def __getitem__(self, i):
# NOTE: i must be a tuple (or at least a sequence)
return tuple([mpr[v] for mpr, v in zip(self.mappers, i)])
def __setitem__(self, i, v):
raise TypeError('read-only access')
def getid(self, key):
# NOTE: key must be a tuple (or at least a sequence)
return tuple([mpr.getid(v) for mpr, v in zip(self.mappers, key)])
def todict(self, _seqtype=tuple):
return _seqtype(m.todict() for m in self.mappers)
def key2idmap(self, _seqtype=tuple):
return _seqtype(m.key2idmap() for m in self.mappers)
def id2keymap(self, _seqtype=tuple):
return _seqtype(m.id2keymap() for m in self.mappers)
| UTF-8 | Python | false | false | 4,239 | py | 66 | keymapper.py | 66 | 0.514508 | 0.510026 | 0 | 149 | 27.449664 | 73 |
Nian-Jingqing/GeomSeq | 6,923,487,284,310 | bb457d0c0c0a7cffb96b56d80c77682372ffbc1c | c31ecf2f0714af85900464e3128409296237c184 | /GeomSeq_analyses/0_Create_epochs.py | 80db9fc113c572dea7fcb492b98ffca8508d0777 | [] | no_license | https://github.com/Nian-Jingqing/GeomSeq | 0909443f7f6dd8ac53bac975308452b4225f5e88 | 296b50ade1040d171a5ab0fc1960d396e794bc6b | refs/heads/master | 2023-06-12T01:06:22.280760 | 2021-07-04T18:05:59 | 2021-07-04T18:05:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
===========
0_Create_epochs.py
===========
Script to epoch the data after preprocessing
Author: Fosca Al Roumi <fosca.al.roumi@gmail.com>
"""
from GeomSeq_functions import epoching_funcs
from GeomSeq_analyses import config
# Here choose the identifier of your subjects
subject = config.subjects_list[0]
# epochs data from the primitive part of the experiment
epoching_funcs.compute_epochs(subject, tmin=-0.2, tmax=0.6, decim=1)
# from the sequence part of the experiment
epoching_funcs.compute_epochs(subject, tmin=-0.65, tmax=0.6, decim=1, block_type='sequences')
# from the localizer part of the experiment
epoching_funcs.compute_epochs(subject, tmin=-0.2, tmax=0.6, decim=1, block_type='localizer')
# from the sequence part of the experiment : epoch on the full sequence
epoching_funcs.compute_epochs(subject, tmin=0, tmax=0.433*8, decim=4, block_type='sequences',full_seq_block='full_seq')
# from the sequence part of the experiment : epoch on the full 12 repetitions of the sequences
epoching_funcs.compute_epochs(subject, tmin=0, tmax=0.433*8*12, decim=4, block_type='sequences',full_seq_block='full_block')
| UTF-8 | Python | false | false | 1,126 | py | 16 | 0_Create_epochs.py | 14 | 0.748668 | 0.716696 | 0 | 27 | 40.62963 | 124 |
22lizabeth/Text_Based_Adventure_Game | 9,594,956,965,475 | 07f1e56d78d4b5aadab355d765b5511abe73aebe | c1f5967d6afdbf9dfe3eb5f27bd80be2f0808be1 | /main.py | 1795f6ab5947072ef58d717abaf7d7d8828eae71 | [] | no_license | https://github.com/22lizabeth/Text_Based_Adventure_Game | e6125e50babcf9d8c6fb4c14cadbba7a4e613438 | 402016421b5408d0ca77836302a861f267517425 | refs/heads/master | 2021-01-26T09:13:35.419767 | 2020-02-27T01:00:23 | 2020-02-27T01:00:23 | 243,399,339 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from adventureGame import *
# MAIN
gameObj = AdventureGame()
gameObj.playGame()
print("Goodbye!")
| UTF-8 | Python | false | false | 100 | py | 7 | main.py | 6 | 0.74 | 0.74 | 0 | 7 | 13.285714 | 27 |
DmitryVlaznev/leetcode | 2,224,793,073,453 | 58abed363990acb713ae94307a30384b442567e7 | 078918048099dfa2454cfac2d449ea3d77fbec55 | /849-maximize-distance-to-closest-person.py | a3cd6d758c07fab2f69ecde521ae0ecdc5ad2b89 | [] | no_license | https://github.com/DmitryVlaznev/leetcode | 931784dcc4b465eebda7d22311f5bf5fa879f068 | b2a2afdfc725330545c9a2869fefc7d45ec594bc | refs/heads/master | 2023-06-10T05:42:34.992220 | 2023-06-05T09:54:10 | 2023-06-05T09:54:30 | 241,064,389 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 849. Maximize Distance to Closest Person
# You are given an array representing a row of seats where seats[i] = 1
# represents a person sitting in the ith seat, and seats[i] = 0
# represents that the ith seat is empty (0-indexed).
# There is at least one empty seat, and at least one person sitting.
# Alex wants to sit in the seat such that the distance between him and
# the closest person to him is maximized.
# Return that maximum distance to the closest person.
# Example 1:
# Input: seats = [1,0,0,0,1,0,1]
# Output: 2
# Explanation:
# * If Alex sits in the second open seat (i.e. seats[2]), then the
# closest person has distance 2.
# * If Alex sits in any other open seat, the closest person has distance
# 1.
# * Thus, the maximum distance to the closest person is 2.
# Example 2:
# Input: seats = [1,0,0,0]
# Output: 3
# Explanation:
# * If Alex sits in the last seat (i.e. seats[3]), the closest person is
# 3 seats away.
# * This is the maximum distance possible, so the answer is 3.
# Example 3:
# Input: seats = [0,1]
# Output: 1
# Constraints:
# 2 <= seats.length <= 2 * 104
# seats[i] is 0 or 1.
# At least one seat is empty.
# At least one seat is occupied.
from typing import List
from utils import checkValue
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
p, max_side_gap, max_int_gap = 0, 0, 0
while not seats[p]:
max_side_gap += 1
p += 1
q = len(seats) - 1
while not seats[q]:
max_side_gap = max(max_side_gap, len(seats) - q)
q -= 1
s = None
while p < q:
if seats[p]:
if s is not None:
max_int_gap = max(max_int_gap, p - s)
s = None
else:
if s is None:
s = p
p += 1
if s is not None:
max_int_gap = max(max_int_gap, p - s)
import math
return max(max_side_gap, math.ceil(max_int_gap / 2))
# print("max_side_gap", max_side_gap)
# print("max_int_gap", max_int_gap)
t = Solution()
checkValue(2, t.maxDistToClosest([1, 0, 0, 0, 1, 0, 1]))
checkValue(2, t.maxDistToClosest([1, 0, 1, 0, 0, 0, 1]))
checkValue(2, t.maxDistToClosest([1, 0, 0, 0, 0, 1, 0, 1]))
checkValue(2, t.maxDistToClosest([1, 0, 1, 0, 0, 0, 0, 1]))
checkValue(3, t.maxDistToClosest([1, 0, 0, 0]))
checkValue(3, t.maxDistToClosest([0, 0, 0, 1]))
checkValue(1, t.maxDistToClosest([0, 1]))
checkValue(1, t.maxDistToClosest([1, 0]))
checkValue(3, t.maxDistToClosest([0, 0, 0, 1, 0, 0]))
checkValue(3, t.maxDistToClosest([0, 0, 1, 0, 0, 0]))
| UTF-8 | Python | false | false | 2,646 | py | 584 | 849-maximize-distance-to-closest-person.py | 578 | 0.595238 | 0.55291 | 0 | 89 | 28.730337 | 72 |
antekarin05/programsko | 2,671,469,708,861 | db1c56121b8af01439c482bfeb0bde9cbb41f392 | 1202df05bd8f5e19593aa44465334dc77e4b5740 | /5/Funkcije.py | dc356ac2e90afeb9b4587b3589ab35c164231bd1 | [] | no_license | https://github.com/antekarin05/programsko | 5f25e9248da59c3e32137eb161fe5a84af5ae5f7 | 487e12a6eb5cfa1d93867ea6f1cdc36ee77814a8 | refs/heads/master | 2022-09-10T21:27:23.961082 | 2020-06-03T12:04:20 | 2020-06-03T12:04:20 | 255,997,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import Likovi
import math
from math import pi
def opseg(lik):
if isinstance (lik, Likovi.Kruznica):
return 2 * lik.r * pi
elif isinstance(lik, Likovi.Kvadrat):
return 4 * lik.a
def povrsina(lik):
if isinstance (lik, Likovi.Kruznica):
return math.pow(lik.r, 2) * pi
elif isinstance (lik, Likovi.Kvadrat):
return lik.a**2
if __name__ == "__main__":
print('*** Test Funkcije ***')
print(opseg.__name__)
print(povrsina.__name__) | UTF-8 | Python | false | false | 521 | py | 8 | Funkcije.py | 7 | 0.564299 | 0.556622 | 0 | 20 | 24.15 | 43 |
HoangLong08/Web-shoes | 9,371,618,685,277 | dd3b15ac032dfd86c11a5b99594bb10eba9086d6 | eefeaf895f281a9e89faaed9b4c9549692c7203e | /back-end/bep/controllers/city_controller.py | 630240a4dda0faaab94e07a712209e0aece2aed8 | [] | no_license | https://github.com/HoangLong08/Web-shoes | 180dde94c43140525f95b7cc750809e005fe775c | 267ebaef086febce018fdfe29354e2a5b9f183bb | refs/heads/master | 2023-07-13T18:46:53.914630 | 2021-08-24T09:15:41 | 2021-08-24T09:15:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from configdb.connectdb import connectdb
class City:
def __init__(self) -> None:
pass
def getAllCity(self):
sql = "SELECT * FROM `cities` ;"
myresult1 = connectdb.executeQuery(sql)
result = []
for x in myresult1:
dictionary_city = {
"id" : x[0],
"district" : x[1],
"type" : x[2],
}
result.append(dictionary_city)
return result | UTF-8 | Python | false | false | 492 | py | 103 | city_controller.py | 67 | 0.45935 | 0.449187 | 0 | 17 | 28 | 51 |
machrisaa/opensafely-job-server | 231,928,244,003 | fb3c366ab03ddc70e8887bb04c45f14cb3c40d85 | 245b30d6afd80ac48700cccc3a97b6f63c1e4a90 | /jobserver/authorization/permissions.py | b7c64edf0363df8c2ddabaa189d15fde4bbc2b7a | [] | no_license | https://github.com/machrisaa/opensafely-job-server | 8e1995ad4f9cca2cbc146d7c687ea5fe63a085a9 | e2dd9b85c896ead390e002c197acd9f263a556e5 | refs/heads/main | 2023-05-01T23:42:24.190138 | 2021-04-28T13:41:51 | 2021-04-28T13:41:51 | 363,146,896 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | cancel_job = "cancel_job"
check_output = "check_output"
invite_org_members = "invite_org_members"
invite_project_members = "invite_project_members"
manage_project_members = "manage_project_members"
manage_project_workspaces = "manage_project_workspaces"
publish_output = "publish_output"
review_project = "review_project"
run_job = "run_job"
| UTF-8 | Python | false | false | 342 | py | 105 | permissions.py | 77 | 0.766082 | 0.766082 | 0 | 9 | 37 | 55 |
Inpurple/Leetcode | 4,672,924,436,898 | 226a9d34289fb09b51c11f897a208186615b47f1 | 6a1975a11de163ce0e6a5f001df41758bea3686b | /11. Container With Most Water/solution_双指针.py | 83f3582b3d4430d43d5f816679eaca78df27f564 | [] | no_license | https://github.com/Inpurple/Leetcode | 7f08e0e500d37913e9244f08ea8f603b3fc1ce88 | df2bcca72fd303100dbcd73d1dfae44467abbb44 | refs/heads/master | 2020-05-20T02:17:08.430557 | 2019-09-22T07:51:28 | 2019-09-22T07:51:28 | 185,327,908 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
思路:
要求容器的容量,取决于 min(height[p1],height[p2])*(p2-p1)
关键问题是 短板,当然也有两板之间的距离,不过短板可以作为移动的判断条件,目的是使 短板更高一些
两个指针分别从首尾向中间缩,哪个板更短,就移动一下但面积只记录更大的面积
为什么短板可以当做移动判断的条件:
如果移动长一点的短板,那么整个面积肯定是变小的,所以要想要面积变大必须移动短一点的短板,有可能通过比较面积的最大值变大
"""
p1=0
p2=len(height)-1
maxarea=0
while abs(p2-p1)>=1:
area=min(height[p2],height[p1])*abs(p2-p1)
maxarea=max(maxarea,area)
if height[p1]> height[p2]:
p2=p2-1
else:
p1=p1+1
return maxarea
| UTF-8 | Python | false | false | 1,039 | py | 150 | solution_双指针.py | 146 | 0.546763 | 0.51223 | 0 | 28 | 23.75 | 59 |
qianrenjian/NLP-text-summarization | 14,431,090,135,008 | c009d864f6ca8c435681ec5f102a3eee83e7eafa | 87dc823c6b96aed6f9e0f3883db748fdf6d1414d | /src/extractive_summarizer.py | 3546b5ef4bc100b938a831e39e135c9b9e6b5d62 | [] | no_license | https://github.com/qianrenjian/NLP-text-summarization | cc0475ecb0255a41bc2506af00d5174b4344f4dc | e6a9647ea1c3d148664d8edad797a108c99d3eb2 | refs/heads/master | 2020-07-11T02:18:43.615924 | 2019-08-23T23:33:36 | 2019-08-23T23:33:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import nltk for text preprocessing, tokenizing
# import beautifulsoup to parse URL input option
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import pandas as pd
import bs4 as BeautifulSoup
import urllib.request
# produce a sample summary
def summarize_text(doc):
return article_text
def d_table(article_text):
# using nltk stopwords; sklearn stopwords also an option
stop_words = set(stopwords.words("english"))
# convert article words into tokens
words = word_tokenize(article_text)
# get root words
word_stems = PorterStemmer()
# make and add words to frequency dict
frequent = dict()
for word in words:
word = stem.stem(word)
if word in stop_words:
continue
if word in frequent:
frequent[word] += 1
else:
frequent[word] = 1
return frequent
# code for calculating word probabilities and which ones are included in summary
def sent_scores(sentences, frequent):
sent_probs = dict()
# adjustable len()
for s in sentences:
s_count = (len(word_tokenize(s)))
s_count_no_stop = 0
for word_probs in frequent:
if word_probs in s.lower():
s_count_no_stop += 1
if s[:9] in sent_probs:
sent_probs[s[:9]] += frequent[word_probs]
else:
sent_probs[s[:9]] = frequent[word_probs]
sent_probs[s[:9]] = sent_probs[s[:9]] / s_count_no_stop
return sent_probs
# returns sentence averages
def sent_avg(sent_probs):
sum = 0
for line in sent_probs:
sum += sent_probs[line]
avg = (sum / len(sent_probs))
return avg
# determining, counting sentences most likely to yield best words for summary
def construct_summary(sentences, sent_probs, threshold):
counter = 0
summary = ''
for s in sentences:
if s[:9] in sent_probs and sent_probs[s[:9]] >= (threshold):
summary += " " + s
counter += 1
return summary
def gist(sentences, sent_probs_max = 0)
for k, v in sent_probs.items():
if 10 > s_max:
s_max = v
k_max = k
for i, s in enumerate(sentences):
beg = s[:9]
if beg == k_max:
ind_sent_max_s = i
return sentences[ind_sent_max_s]
# the summary
# threshold parameter can be tuned to preference
def summarize(content):
# frequency dict
frequent = d_table(content)
# tokenizing sentences
sentences = sent_tokenize(content)
# scoring words via sentences
sentence_score = sent_scores(sentences, frequent)
# establishing threshold
threshold = sent_avg(sentence_score)
final_summary = construct_summary(sentences, sentence_score, 1.5 * threshold)
if __name__ == '__main__':
text = pd.read_csv('1st_1000_2_cols.csv', columns=['headline', 'text'])
X = text['text']
y = text['headline']
doc = text['text']
article_text = summarize_text(doc)
content = X.iloc[0]
reference_summary = y.iloc[0]
system_summary = final_summary
print(content)
print(reference_summary)
print(system_summary)
| UTF-8 | Python | false | false | 3,307 | py | 3 | extractive_summarizer.py | 1 | 0.608406 | 0.599637 | 0 | 133 | 23.827068 | 81 |
Hurin0/public_transport_wro | 12,987,981,141,362 | e1c9722d2983092b80b59df4603fa6eeef853e86 | 5a0656e1d358aa0be6b6c5f3e895e34a55b76d18 | /cities.py | 45f1069a31ecdfeef598fb3d167a1025e7752448 | [] | no_license | https://github.com/Hurin0/public_transport_wro | cc1908b9994920dd385cf53b80b8f491a81921c7 | a26beda6dde52d5194cc50b09ff37dbca26d8ef3 | refs/heads/main | 2023-05-06T13:20:13.044224 | 2021-06-01T09:48:32 | 2021-06-01T09:48:32 | 372,461,791 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ..app import db
class City(db.Model):
__tablename__ = "cities1"
city_id = db.Column(db.Integer, nullable=False, primary_key=True)
city_name = db.Column(db.String(50), nullable=False, unique=True)
routes = db.relationship('Routes', backref='city', lazy='dynamic')
def __repr__(self):
return f"{self.city_id}, {self.city_name}"
| UTF-8 | Python | false | false | 364 | py | 28 | cities.py | 25 | 0.642857 | 0.634615 | 0 | 12 | 29.25 | 70 |
brujua/Perceptron | 8,933,531,983,361 | 8ae6f4ea5f57c854b955a53d1f1dda9a0b9729b4 | f2a131235269920d817296b85db07b62ffe74051 | /perceptron.py | ab1deb3e66f3f7f7980b976e6f807b2e3979e22e | [] | no_license | https://github.com/brujua/Perceptron | f7a36954d6382e2884161b964ce1e49e5db28809 | 2cd583b64f943c6e0d69a216f67ddbf7f5badf0c | refs/heads/master | 2020-05-20T19:52:46.080556 | 2019-05-15T22:34:39 | 2019-05-15T22:34:39 | 185,732,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import numpy as np
from random import randint
from typing import List
import matplotlib.pyplot as plt
DIMENSION = 2
ERROR_ARGS = "Invalid Argument. \nUsage: python " + sys.argv[0] + " <data-set-file> \
(tab separated, each data point in one line)"
ERROR_NOT_CONVERGED = "Error, training did not converged, the training data must be non linearly separable"
PLOT_TITLE = "Perceptron"
MAX_EPOCHS = 1000
LEARNING_RATE = 0.1
_weights = []
def main(*args):
file_name = args[0]
init_weights()
training_points = parse_training_data(file_name)
trained = train(training_points)
if trained:
draw_plot(training_points, _weights)
else:
print(ERROR_NOT_CONVERGED)
def train(training_points: List) -> bool:
epoch = 0
finished = False
while not finished and epoch < MAX_EPOCHS:
finished = True
epoch += 1
for point in training_points:
result = evaluate(point[:DIMENSION])
desired_result = point[DIMENSION]
if result != desired_result:
finished = False
if result < desired_result:
for i in range(0, DIMENSION):
_weights[i] += point[i] * LEARNING_RATE
_weights[DIMENSION] += LEARNING_RATE # Bias
else: # result is > than desired
for i in range(0, DIMENSION):
_weights[i] -= point[i] * LEARNING_RATE
_weights[DIMENSION] -= LEARNING_RATE # Bias
return finished
def evaluate(input_: List) -> int:
input_.append(1)
if np.dot(input_, _weights) > 0:
return 1
else:
return 0
def init_weights():
for _ in range(0, DIMENSION+1):
_weights.append(randint(-5, 5))
def parse_training_data(file_name: str) -> List:
training_points = []
with open(file_name) as file:
for line in file.readlines():
data = line.split("\t")
if len(data) is (DIMENSION + 1):
point = []
for i in range(0, len(data)):
point.append(int(data[i]))
training_points.append(point)
return training_points
def draw_plot(data_points: List, weights: List):
plt.figure(figsize=(10, 6))
plt.grid(True)
for point in data_points:
plt.plot(point[0], point[1], 'ro' if (point[2] == 1.0) else 'bo')
slope = -(weights[2] / weights[1]) / (weights[2] / weights[0])
intercept = -weights[2] / weights[1]
_draw_line(slope, intercept)
plt.title(PLOT_TITLE)
plt.show()
def _draw_line(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--')
if __name__ == '__main__':
if len(sys.argv) is not 2:
print(ERROR_ARGS)
else:
main(*sys.argv[1:])
| UTF-8 | Python | false | false | 2,943 | py | 1 | perceptron.py | 1 | 0.573904 | 0.560652 | 0 | 102 | 27.852941 | 107 |
aalramez16/cat-or-dog-image-recognition-project | 15,393,162,831,702 | 57f1e7cd476388ca1b1939d2f9bef9f1ae146a96 | 33a23392894397ca764a2eb3c488843cbd97b9f6 | /test2.py | 9148b567f7f1efdbb4f5cfef30578d26b80e1ac5 | [] | no_license | https://github.com/aalramez16/cat-or-dog-image-recognition-project | a1e1c1995dcc1c429317f702514889e0e68f703d | 051bae3bbec491739f3c6b66e89c22d53066b429 | refs/heads/master | 2020-12-15T07:52:37.562452 | 2020-01-20T06:52:00 | 2020-01-20T06:52:00 | 235,036,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 21:38:00 2019
@author: deser
"""
'''
Portions of this code are referenced from the following links:
https://medium.com/nybles/create-your-first-image-recognition-classifier-using-cnn-keras-and-tensorflow-backend-6eaab98d14dd
https://keras.io/visualization/
'''
import keras
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import numpy as np
import glob
from keras.preprocessing import image
print('test 1')
keras.callbacks.Callback()
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import model_from_json
#from keras import Model
print('test 2')
weights_path = "model.h5"
model = load_model('model.h5')
model.load_weights('model.h5')
train_datagen = ImageDataGenerator(
rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True
)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory(
r'dataset/trainingset',
target_size = (64,64),
batch_size = 32,
class_mode = 'binary'
)
testlista = glob.glob('hand-drawn/*.jpg')
testlistb = glob.glob('hand-drawn/*.png')
testlist = testlista+testlistb
print('test 3')
comparelist = []
for i in range(len(testlist)):
test_image = image.load_img(testlist[i], target_size = (64, 64))
test_image - image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
training_set.class_indices
if result [0][0] >= 0.5:
prediction = 'dog'
else:
prediction = 'cat'
if i < 9:
comparelist.append(['cat',prediction])
else:
comparelist.append(['dog',prediction])
print(comparelist)
numcorrect = 0
for i in range(len(comparelist)):
if(comparelist[i][0] == comparelist[i][1]):
numcorrect = numcorrect + 1
print(numcorrect)
print(len(comparelist))
print(numcorrect/len(comparelist))
from PIL import Image
im = Image.open(testlist[0])
im.show()
print(comparelist[0])
#r'hand-drawn/cat.png'
'''
from PIL import Image
import tkinter
from tkinter import filedialog
import os
#tkinter.Tk.withdraw()
in_path = filedialog.askopenfilename()
print(in_path)
im = Image.open(in_path)
width,height = img.size()
im.show()
bi = Image.new('RGBA',(width+10,height+(.2*height)),'white')
bi.paste(img,(5,5,(width+5),(height+5)))
bi.show()
'''
from IPython.display import Image
from PIL import Image
'''
for i in range (18):
print(testlist[i])
'''
for i in range (18):
path=testlist[i]
size = 256, 256
im = Image.open(path)
im.thumbnail(size, Image.ANTIALIAS)
display(im)
print("Prediction: " + comparelist[i][1] + "\nActual: " + comparelist[i][0])
| UTF-8 | Python | false | false | 3,083 | py | 3 | test2.py | 2 | 0.648394 | 0.62277 | 0 | 132 | 21.295455 | 124 |
ArchiveTeam/NewsGrabber-Services | 18,107,582,152,840 | 44ac99735cbedefd7f4d5e20def24da184c683f4 | 83e2fe88d4537403ee710b631d7d3e8c65415092 | /services/web__alger_info_com.py | f77bda3d49317dcf38a32a054fde6ef193ffa61f | [] | no_license | https://github.com/ArchiveTeam/NewsGrabber-Services | b01f560b18bd5395673132321c16bcf7602608f1 | 2d52eb06a6ca767f4b1d1e623505fa427b6af459 | refs/heads/master | 2020-01-23T21:40:57.220775 | 2019-07-01T09:56:38 | 2019-07-01T09:56:38 | 74,690,301 | 3 | 8 | null | false | 2019-06-03T19:49:47 | 2016-11-24T16:41:59 | 2019-06-03T19:49:36 | 2019-06-03T19:49:46 | 1,460 | 3 | 5 | 0 | Python | false | false | refresh = 86400
version = 20161106.01
urls = ['http://www.alger-info.com/',
'https://feeds.feedburner.com/alger-info-articles?format=xml']
regex = [r'^https?:\/\/[^\/]*alger-info\.com', r'^https?:\/\/[^\/]*feedburner\.com']
videoregex = []
liveregex = [] | UTF-8 | Python | false | false | 256 | py | 659 | web__alger_info_com.py | 659 | 0.632813 | 0.574219 | 0 | 8 | 31.125 | 84 |
nicolasessisbreton/fython | 15,298,673,519,691 | b9717a71b2a8df9bbc302d434eef1f0679e482b4 | febb7a4b889c2f40637e2b688eb770cf0809226f | /fython/code/code.py | 2aae0cff5f5cf3ef7a74bd2f0769ac2e5f1109d2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/nicolasessisbreton/fython | 68253552c626640b5efc2a7cea9384c8e0425c08 | 988f5a94cee8b16b0000501a22239195c73424a1 | refs/heads/master | 2021-01-10T07:10:06.793158 | 2017-08-25T17:27:05 | 2017-08-25T17:27:05 | 50,076,320 | 48 | 3 | null | false | 2016-08-21T17:16:12 | 2016-01-21T02:30:31 | 2016-08-20T17:08:04 | 2016-08-21T17:16:11 | 481 | 20 | 2 | 1 | Python | null | null | from fython.unit import *
class Code(Unit):
unit = l.code
def __init__(s, bofx):
s.module = bofx.module
s.lineno = bofx.lineno
s.raw = []
s.lexem = []
s.linecod = []
s ^ bofx
# +: add linecod
def __add__(s, other):
s ^ other
if not other.is_eofx:
s.linecod.append(other)
if s.nb_linecod > 1:
a, b = s.linecod[-2:]
a.next_linecod = b
b.previous_linecod = a
return s
def clone(s, module):
bofx = s.raw[0].clone(module)
c = Code(bofx)
for n in s.raw[1:]:
c + n
return c | UTF-8 | Python | false | false | 535 | py | 534 | code.py | 330 | 0.558879 | 0.551402 | 0 | 39 | 12.74359 | 31 |
unsw-cse-comp3900-9900-21T1/capstone-project-3900-w18a-let-s-chat | 5,488,968,235,213 | 625758506e5f2139d8c40067b9ffe91a56bf77ac | e4e9370bfb5b5ad5c8a2c9fbbc4bbfab6ed1f434 | /ecommerce/store/migrations/0009_remove_product_seller.py | 38348bf10b1cdd8ff5af16ac2fabca63e6a1a8c6 | [] | no_license | https://github.com/unsw-cse-comp3900-9900-21T1/capstone-project-3900-w18a-let-s-chat | efc5c9bede823f33209474475d8047c7c246e1ad | e45cf4414ed62dc679e26804dae45af5e61cbf63 | refs/heads/main | 2023-04-10T10:44:08.592759 | 2021-04-23T09:07:12 | 2021-04-23T09:07:12 | 343,309,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.7 on 2021-03-17 09:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0008_auto_20210317_1938'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='seller',
),
]
| UTF-8 | Python | false | false | 344 | py | 63 | 0009_remove_product_seller.py | 36 | 0.549419 | 0.459302 | 0 | 17 | 18.235294 | 47 |
gray-panda/grayrepo | 1,606,317,791,976 | dc9bb2a9046371f07f41a5538e251f3a62cb81e7 | d132dc16d0dd2e5b313600ff93f57047967f2a54 | /2021_flareon/10_wizardcult/02_spells.py | d843dd4e636208e5e76f89ed7a4c67056d2f52e2 | [] | no_license | https://github.com/gray-panda/grayrepo | 49b6b273cef918d25b0984cdd9d3d8ab090f2b37 | 72d315151c6f1325cb4f98a57eaa5fe904657961 | refs/heads/master | 2021-11-22T06:26:28.432843 | 2021-10-23T06:54:28 | 2021-10-23T06:54:28 | 42,939,804 | 70 | 14 | null | null | null | null | null | null | null | null | null | null | null | null | null | SPELLS = {}
SPELLS["Eldritch Blast"] = 0
SPELLS["Mass Heal"] = 1
SPELLS["Fireball"] = 2
SPELLS["Dominate Monster"] = 3
SPELLS["Detect Magic"] = 4
SPELLS["Stone Shape"] = 5
SPELLS["Clairvoyance"] = 6
SPELLS["Aid"] = 7
SPELLS["Detect Thoughts"] = 8
SPELLS["Shapechange"] = 9
SPELLS["Fire Shield"] = 10
SPELLS["Pass without Trace"] = 11
SPELLS["Antipathy/Sympathy"] = 12
SPELLS["Sleet Storm"] = 13
SPELLS["Dominate Person"] = 14
SPELLS["Tree Stride"] = 15
SPELLS["Passwall"] = 0x10
SPELLS["Shatter"] = 0x11
SPELLS["Giant Insect"] = 0x12
SPELLS["Revivify"] = 0x13
SPELLS["Circle of Death"] = 0x14
SPELLS["Divination"] = 0x15
SPELLS["Comprehend Languages"] = 0x16
SPELLS["Faerie Fire"] = 0x17
SPELLS["True Polymorph"] = 0x18
SPELLS["Searing Smite"] = 0x19
SPELLS["Dimension Door"] = 0x1a
SPELLS["Shield"] = 0x1b
SPELLS["Enlarge/Reduce"] = 0x1c
SPELLS["Illusory Script"] = 0x1d
SPELLS["Resistance"] = 0x1e
SPELLS["Earthquake"] = 0x1f
SPELLS["Contagion"] = 0x20
SPELLS["Bless"] = 0x21
SPELLS["Raise Dead"] = 0x22
SPELLS["Guidance"] = 0x23
SPELLS["Expeditious Retreat"] = 0x24
SPELLS["Grease"] = 0x25
SPELLS["Message"] = 0x26
SPELLS["Elemental Weapon"] = 0x27
SPELLS["Fear"] = 0x28
SPELLS["Clone"] = 0x29
SPELLS["Wrathful Smite"] = 0x2a
SPELLS["Astral Projection"] = 0x2b
SPELLS["Flaming Sphere"] = 0x2c
SPELLS["Disguise Self"] = 0x2d
SPELLS["Maze"] = 0x2e
SPELLS["Slow"] = 0x2f
SPELLS["Polymorph"] = 0x30
SPELLS["Weird"] = 0x31
SPELLS["Finger of Death"] = 0x32
SPELLS["Protection from Energy"] = 0x33
SPELLS["Nondetection"] = 0x34
SPELLS["Animal Friendship"] = 0x35
SPELLS["Spike Growth"] = 0x36
SPELLS["Goodberry"] = 0x37
SPELLS["Calm Emotions"] = 0x38
SPELLS["Antilife Shell"] = 0x39
SPELLS["Cone of Cold"] = 0x3a
SPELLS["Identify"] = 0x3b
SPELLS["Power Word Stun"] = 0x3c
SPELLS["Control Water"] = 0x3d
SPELLS["Thorn Whip"] = 0x3e
SPELLS["Power Word Kill"] = 0x3f
SPELLS["Blink"] = 0x40
SPELLS["Locate Creature"] = 0x41
SPELLS["Command"] = 0x42
SPELLS["Contingency"] = 0x43
SPELLS["Prismatic Wall"] = 0x44
SPELLS["Blade Ward"] = 0x45
SPELLS["Scrying"] = 0x46
SPELLS["Dominate Beast"] = 0x47
SPELLS["Sacred Flame"] = 0x48
SPELLS["Guards and Wards"] = 0x49
SPELLS["Arcane Eye"] = 0x4a
SPELLS["Mirage Arcane"] = 0x4b
SPELLS["Magic Mouth"] = 0x4c
SPELLS["Glyph of Warding"] = 0x4d
SPELLS["Friends"] = 0x4e
SPELLS["Sending"] = 0x4f
SPELLS["Stinking Cloud"] = 0x50
SPELLS["Compulsion"] = 0x51
SPELLS["Dancing Lights"] = 0x52
SPELLS["Darkness"] = 0x53
SPELLS["Invisibility"] = 0x54
SPELLS["Spare the Dying"] = 0x55
SPELLS["Wall of Fire"] = 0x56
SPELLS["Flame Blade"] = 0x57
SPELLS["Feather Fall"] = 0x58
SPELLS["Magic Weapon"] = 0x59
SPELLS["Purify Food and Drink"] = 0x5a
SPELLS["Spirit Guardians"] = 0x5b
SPELLS["Witch Bolt"] = 0x5c
SPELLS["Animate Objects"] = 0x5d
SPELLS["Gaseous Form"] = 0x5e
SPELLS["Lightning Bolt"] = 0x5f
SPELLS["Move Earth"] = 0x60
SPELLS["Disintegrate"] = 0x61
SPELLS["Mass Healing Word"] = 0x62
SPELLS["Meld into Stone"] = 0x63
SPELLS["Hellish Rebuke"] = 0x64
SPELLS["Aura of Life"] = 0x65
SPELLS["Augury"] = 0x66
SPELLS["Conjure Elemental"] = 0x67
SPELLS["Spider Climb"] = 0x68
SPELLS["Hold Person"] = 0x69
SPELLS["Project Image"] = 0x6a
SPELLS["Heroism"] = 0x6b
SPELLS["Crown of Madness"] = 0x6c
SPELLS["Mirror Image"] = 0x6d
SPELLS["Ray of Sickness"] = 0x6e
SPELLS["Bane"] = 0x6f
SPELLS["Wish"] = 0x70
SPELLS["Contact Other Plane"] = 0x71
SPELLS["Etherealness"] = 0x72
SPELLS["Blinding Smite"] = 0x73
SPELLS["Shield of Faith"] = 0x74
SPELLS["Vampiric Touch"] = 0x75
SPELLS["Shillelagh"] = 0x76
SPELLS["Programmed Illusion"] = 0x77
SPELLS["Remove Curse"] = 0x78
SPELLS["Major Image"] = 0x79
SPELLS["Insect Plague"] = 0x7a
SPELLS["Color Spray"] = 0x7b
SPELLS["Prismatic Spray"] = 0x7c
SPELLS["Charm Person"] = 0x7d
SPELLS["Arms of Hadar"] = 0x7e
SPELLS["Dream"] = 0x7f
SPELLS["Dissonant Whispers"] = 0x80
SPELLS["Teleport"] = 0x81
SPELLS["Dispel Magic"] = 0x82
SPELLS["Forbiddance"] = 0x83
SPELLS["Misty Step"] = 0x84
SPELLS["Cloud of Daggers"] = 0x85
SPELLS["Gentle Repose"] = 0x86
SPELLS["Phantasmal Force"] = 0x87
SPELLS["Circle of Power"] = 0x88
SPELLS["Stoneskin"] = 0x89
SPELLS["Sunbeam"] = 0x8a
SPELLS["Fire Storm"] = 0x8b
SPELLS["Gust of Wind"] = 0x8c
SPELLS["Find Steed"] = 0x8d
SPELLS["Druidcraft"] = 0x8e
SPELLS["Confusion"] = 0x8f
SPELLS["Bestow Curse"] = 0x90
SPELLS["Flesh to Stone"] = 0x91
SPELLS["Arcane Gate"] = 0x92
SPELLS["Ray of Frost"] = 0x93
SPELLS["Greater Invisibility"] = 0x94
SPELLS["Regenerate"] = 0x95
SPELLS["Burning Hands"] = 0x96
SPELLS["Wall of Ice"] = 0x97
SPELLS["True Strike"] = 0x98
SPELLS["Silence"] = 0x99
SPELLS["Banishing Smite"] = 0x9a
SPELLS["Commune with Nature"] = 0x9b
SPELLS["Time Stop"] = 0x9c
SPELLS["Conjure Celestial"] = 0x9d
SPELLS["Magic Jar"] = 0x9e
SPELLS["True Seeing"] = 0x9f
SPELLS["Transport via Plants"] = 0xa0
SPELLS["Teleportation Circle"] = 0xa1
SPELLS["Spiritual Weapon"] = 0xa2
SPELLS["Prayer of Healing"] = 0xa3
SPELLS["Awaken"] = 0xa4
SPELLS["Conjure Woodland Beings"] = 0xa5
SPELLS["Cloudkill"] = 0xa6
SPELLS["Imprisonment"] = 0xa7
SPELLS["Branding Smite"] = 0xa8
SPELLS["Ray of Enfeeblement"] = 0xa9
SPELLS["See Invisibility"] = 0xaa
SPELLS["Word of Recall"] = 0xab
SPELLS["Silent Image"] = 0xac
SPELLS["Eyebite"] = 0xad
SPELLS["Cordon of Arrows"] = 0xae
SPELLS["Globe of Invulnerability"] = 0xaf
SPELLS["Wind Walk"] = 0xb0
SPELLS["Continual Flame"] = 0xb1
SPELLS["Power Word Heal"] = 0xb2
SPELLS["Web"] = 0xb3
SPELLS["Protection from Poison"] = 0xb4
SPELLS["Grasping Vine"] = 0xb5
SPELLS["Telekinesis"] = 0xb6
SPELLS["Heat Metal"] = 0xb7
SPELLS["Harm"] = 0xb8
SPELLS["Antimagic Field"] = 0xb9
SPELLS["Jump"] = 0xba
SPELLS["Greater Restoration"] = 0xbb
SPELLS["Chain Lightning"] = 0xbc
SPELLS["Knock"] = 0xbd
SPELLS["Blade Barrier"] = 0xbe
SPELLS["Scorching Ray"] = 0xbf
SPELLS["Zone of Truth"] = 0xc0
SPELLS["Moonbeam"] = 0xc1
SPELLS["Light"] = 0xc2
SPELLS["Magic Circle"] = 0xc3
SPELLS["Hail of Thorns"] = 0xc4
SPELLS["Heal"] = 0xc5
SPELLS["Blur"] = 0xc6
SPELLS["Water Breathing"] = 0xc7
SPELLS["Cure Wounds"] = 0xc8
SPELLS["Enhance Ability"] = 0xc9
SPELLS["Suggestion"] = 0xca
SPELLS["Water Walk"] = 0xcb
SPELLS["Conjure Barrage"] = 0xcc
SPELLS["Arcane Lock"] = 0xcd
SPELLS["Reverse Gravity"] = 0xce
SPELLS["Planar Ally"] = 0xcf
SPELLS["Mass Suggestion"] = 0xd0
SPELLS["False Life"] = 0xd1
SPELLS["Longstrider"] = 0xd2
SPELLS["Detect Evil and Good"] = 0xd3
SPELLS["Guiding Bolt"] = 0xd4
SPELLS["Glibness"] = 0xd5
SPELLS["Speak with Dead"] = 0xd6
SPELLS["Call Lightning"] = 0xd7
SPELLS["Death Ward"] = 0xd8
SPELLS["Create Undead"] = 0xd9
SPELLS["Beacon of Hope"] = 0xda
SPELLS["Alter Self"] = 0xdb
SPELLS["Acid Splash"] = 0xdc
SPELLS["Phantom Steed"] = 0xdd
SPELLS["Planar Binding"] = 0xde
SPELLS["Prestidigitation"] = 0xdf
SPELLS["Animate Dead"] = 0xe0
SPELLS["Mind Blank"] = 0xe1
SPELLS["Sleep"] = 0xe2
SPELLS["Divine Favor"] = 0xe3
SPELLS["Telepathy"] = 0xe4
SPELLS["Vicious Mockery"] = 0xe5
SPELLS["Blight"] = 0xe6
SPELLS["Barkskin"] = 0xe7
SPELLS["Counterspell"] = 0xe8
SPELLS["Conjure Fey"] = 0xe9
SPELLS["Find Traps"] = 0xea
SPELLS["Animal Shapes"] = 0xeb
SPELLS["Speak with Plants"] = 0xec
SPELLS["True Resurrection"] = 0xed
SPELLS["Warding Bond"] = 0xee
SPELLS["Flame Strike"] = 0xef
SPELLS["Healing Word"] = 0xf0
SPELLS["Wall of Thorns"] = 0xf1
SPELLS["Wind Wall"] = 0xf2
SPELLS["Seeming"] = 0xf3
SPELLS["Chill Touch"] = 0xf4
SPELLS["Lesser Restoration"] = 0xf5
SPELLS["Guardian of Faith"] = 0xf6
SPELLS["Meteor Swarm"] = 0xf7
SPELLS["Shocking Grasp"] = 0xf8
SPELLS["Commune"] = 0xf9
SPELLS["Destructive Wave"] = 0xfa
SPELLS["Staggering Smite"] = 0xfb
SPELLS["Create or Destroy Water"] = 0xfc
SPELLS["Sunburst"] = 0xfd
SPELLS["Forcecage"] = 0xfe
SPELLS["Tongues"] = 0xff
import re
import hashlib
def decode_spells(data):
global SPELLS
output = bytearray()
lines = data.split("\n")
for line in lines:
pos1 = line.find("I cast ")
pos2 = line.find(" on the ")
spellname = line[pos1+len("I cast "):pos2]
#print(spellname)
output.append(SPELLS[spellname])
pos3 = line.find("for ")
if pos3 != -1:
pos4 = line.find(" damage", pos3)
diceroll = line[pos3+len("for "):pos4]
dices = diceroll.split("d")
#print(dices)
output.append(int(dices[0]))
output.append(int(dices[1]))
return bytes(output)
def main():
# First spell fight (Cmdline ls)
with open("spellcast1.txt", "r") as f:
data = f.read()
fight1 = decode_spells(data)
out = bytearray()
for i in range(len(fight1)):
out.append(fight1[i] ^ 0xa2)
print(bytes(out))
# Second spell fight (fileread png)
with open("spellcast2.txt", "r") as f:
data = f.read()
fight2 = decode_spells(data)
print(fight2[:0x200])
print(len(fight2))
with open("encrypted.png", "wb") as f:
f.write(fight2)
print("Spellfight 2 written to file")
# testing data with running 256 bytes
with open("running_example.txt", "r") as f:
data = f.read()
fight3 = decode_spells(data)
print(fight3)
print(len(fight3))
print(hashlib.sha1(fight3).hexdigest())
print("")
ROM1 = [90, 132, 6, 69, 174, 203, 232, 243, 87, 254, 166, 61, 94, 65, 8, 208, 51, 34, 33, 129, 32, 221, 0, 160, 35, 175, 113, 4, 139, 245, 24, 29, 225, 15, 101, 9, 206, 66, 120, 62, 195, 55, 202, 143, 100, 50, 224, 172, 222, 145, 124, 42, 192, 7, 244, 149, 159, 64, 83, 229, 103, 182, 122, 82, 78, 63, 131, 75, 201, 130, 114, 46, 118, 28, 241, 30, 204, 183, 215, 199, 138, 16, 121, 26, 77, 25, 53, 22, 125, 67, 43, 205, 134, 171, 68, 146, 212, 14, 152, 20]
print(len(ROM1))
ROM2 = [185, 155, 167, 36, 27, 60, 226, 58, 211, 240, 253, 79, 119, 209, 163, 12, 72, 128, 106, 218, 189, 216, 71, 91, 250, 150, 11, 236, 207, 73, 217, 17, 127, 177, 39, 231, 197, 178, 99, 230, 40, 54, 179, 93, 251, 220, 168, 112, 37, 246, 176, 156, 165, 95, 184, 57, 228, 133, 169, 252, 19, 2, 81, 48, 242, 105, 255, 116, 191, 89, 181, 70, 23, 194, 88, 97, 153, 235, 164, 158, 137, 238, 108, 239, 162, 144, 115, 140, 84, 188, 109, 219, 44, 214, 227, 161, 141, 80, 247, 52]
print(len(ROM2))
ROM3 = [213, 249, 1, 123, 142, 190, 104, 107, 85, 157, 45, 237, 47, 147, 21, 31, 196, 136, 170, 248, 13, 92, 234, 86, 3, 193, 154, 56, 5, 111, 98, 74, 18, 223, 96, 148, 41, 117, 126, 173, 233, 10, 49, 180, 187, 186, 135, 59, 38, 210, 110, 102, 200, 76, 151, 198]
print(len(ROM3))
ROM4 = [97, 49, 49, 95, 109, 89, 95, 104, 111, 109, 49, 101, 115, 95, 104, 52, 116, 51, 95, 98, 52, 114, 100, 115]
print(len(ROM4))
print("".join([chr(x) for x in ROM4]))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 10,416 | py | 473 | 02_spells.py | 219 | 0.676459 | 0.549827 | 0 | 330 | 30.563636 | 474 |
Kobold/best_vids | 3,693,671,886,794 | 1cd6b15b89fcd91cedfb211d11dc3e7325d92725 | 898fda7c7f87ea5a512733f6485f6ce35e131c88 | /best_vids.py | 67bef04c0821f35d2d273c01b90a3f683ab76beb | [] | no_license | https://github.com/Kobold/best_vids | 1074dd55b08718214c895fa62c73393538ff9d7d | ad27fe150f9ca8c90dbc1d51c2a3a5b0891da3e7 | refs/heads/master | 2020-06-08T23:27:01.049465 | 2014-06-18T12:37:22 | 2014-06-18T12:37:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import click
import dataset
import httplib2
import json
import os
import sys
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
BEST_VIDS_DIR = os.path.dirname(os.path.realpath(__file__))
db = dataset.connect('sqlite:///' + os.path.join(BEST_VIDS_DIR, 'mydatabase.db'))
class DefaultGroup(click.Group):
"""If the command name is not found, try doing bestof username."""
def get_command(self, ctx, cmd_name):
if cmd_name not in self.commands:
name = ctx.args[0]
channel_table = db['channel']
channel = channel_table.find_one(id=name) or channel_table.find_one(title=name)
if channel is not None:
# The default Context.invoke_subcommand will slice off the first
# argument of `ctx.args`, so we add a spare copy of the channel
# name to the args.
ctx.args.append(name)
return self.commands.get('bestof')
return self.commands.get(cmd_name)
@click.group(invoke_without_command=True, cls=DefaultGroup)
@click.pass_context
def cli(ctx):
# Display scraped channels if there's no argument given.
if ctx.invoked_subcommand is None:
ctx.forward(list_)
@cli.command()
@click.argument('username')
def scrape(username):
"""Scrape a given user's videos and ratings."""
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Cloud Console at
# https://cloud.google.com/console.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = os.path.join(BEST_VIDS_DIR, "client_secrets.json")
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Cloud Console
https://cloud.google.com/console
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(BEST_VIDS_DIR, CLIENT_SECRETS_FILE))
# This OAuth 2.0 access scope allows for read-only access to the authenticated
# user's account, but not other types of account access.
YOUTUBE_READONLY_SCOPE = "https://www.googleapis.com/auth/youtube.readonly"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
message=MISSING_CLIENT_SECRETS_MESSAGE,
scope=YOUTUBE_READONLY_SCOPE)
storage = Storage(os.path.join(BEST_VIDS_DIR, "best_vids.py-oauth2.json"))
credentials = storage.get()
if credentials is None or credentials.invalid:
flags = argparser.parse_args([])
credentials = run_flow(flow, storage, flags)
print 'Building service...'
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
# Retrieve the contentDetails part of the channel resource for the
# authenticated user's channel.
print 'Fetching channels...'
channels_response = youtube.channels().list(
forUsername=username,
part="contentDetails,snippet",
).execute()
# Try searching by id
if len(channels_response["items"]) == 0:
print 'Fetching channels by id...'
channels_response = youtube.channels().list(
id=username,
part="contentDetails,snippet",
).execute()
assert len(channels_response["items"]) == 1
channel = channels_response["items"][0]
channel_table = db['channel']
channel_table.upsert(dict(
channel_id=channel['id'],
data=json.dumps(channel),
title=channel['snippet']['title'],
), ['channel_id'])
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
# Retrieve the list of videos uploaded to the authenticated user's channel.
print 'Fetching videos in list: ' + uploads_list_id
playlistitems_list_request = youtube.playlistItems().list(
playlistId=uploads_list_id,
part="snippet",
maxResults=50
)
video_table = db['videos']
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# Print information about each video.
for playlist_item in playlistitems_list_response["items"]:
title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
videos_response = youtube.videos().list(
id=video_id,
part='statistics',
).execute()
assert len(videos_response["items"]) == 1
video = videos_response["items"][0]
like_count = int(video['statistics']['likeCount'])
dislike_count = int(video['statistics']['dislikeCount'])
print "%s (%s) %d, %d" % (title, video_id, like_count, dislike_count)
video_table.upsert(dict(
video_id=video_id,
data=json.dumps(video),
channel_fk=channel['id'],
like_count=like_count,
dislike_count=dislike_count,
title=title,
), ['video_id'])
playlistitems_list_request = youtube.playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
def as_ratio(video):
likes = max(video['like_count'], 1)
total = likes + float(video['dislike_count'])
return likes / total
@cli.command()
@click.argument('username')
@click.pass_context
def bestof(ctx, username):
"""Best rated videos for a given user."""
channel_table = db['channel']
channel = channel_table.find_one(id=username) or channel_table.find_one(title=username)
if channel is None:
ctx.fail('No such username: %s' % username)
video_table = db['videos']
videos = video_table.find(channel_fk=channel['channel_id'])
template = u"""{:05.3f}\t{:,}\t{:,}\t{} ({:,} views)
https://www.youtube.com/watch?v={}
"""
for v in sorted(videos, key=lambda v: (as_ratio(v), v['like_count'])):
data = json.loads(v['data'])
print template.format(
as_ratio(v),
v['like_count'],
v['dislike_count'],
v['title'],
int(data['statistics']['viewCount']),
v['video_id'])
@cli.command('list')
def list_():
"""List the channels already in the database."""
channel_table = db['channel']
for channel in channel_table.all():
print '{} - {}'.format(channel['id'], channel['title'])
if __name__ == '__main__':
cli()
| UTF-8 | Python | false | false | 7,508 | py | 4 | best_vids.py | 1 | 0.6561 | 0.652238 | 0 | 211 | 34.582938 | 91 |
a303990366/crawler | 18,098,992,193,044 | 018a5a442757a23491e0947c0812404e29c15b35 | 132464813a10c48992094ff59b58ca309dd1ecc3 | /scrapy/instagram.py | 232a59dc9eb818f12a9999c79215e5c37d6ed833 | [] | no_license | https://github.com/a303990366/crawler | 23534e7ce7b73915f63bcaceddfb2ab5695ebf15 | 0cc88a1e68b1bc1a111a111bb69209cb06b99442 | refs/heads/master | 2020-11-24T00:54:23.146232 | 2020-07-14T07:19:41 | 2020-07-14T07:19:41 | 227,891,488 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
import json
import time
from ..items import ArticleItem,CommentItem,RepliesItem
import re
#流程:
# 1.進入目標網站利用正則表達式獲取帳號id
# 2.利用查詢文章的網址搭配帳號id獲取文章的json檔,並找出end_cursor作為進行下一次查詢的參數
# 3.從文章json檔獲取文章的留言查詢參數short_code
# 4.利用查詢留言的網址搭配short_code獲取留言的json檔,並找出comment_end_cursor作為進行下一次查詢的參數(可順便獲取留言回覆)
class InstagramSpider(scrapy.Spider):
name='instagram'
website=[
'https://www.instagram.com/typhoon_mi/',
'https://www.instagram.com/cwb_earthquake/',
'https://www.instagram.com/weather_0228/',
'https://www.instagram.com/weather_centre/',
'https://www.instagram.com/weather.taiwan/',
'https://www.instagram.com/professional_meteorology_hksar/',
'https://www.instagram.com/weatherrisk/'
]
custom_settings={
'ITEM_PIPELINES':{
'tutorial.pipelines.InstagramPipeline':300#修改
}
}#指定的pipelin
def change_time(self,item):
t=int(item)
t=time.localtime(t)
return time.strftime("%Y-%m-%d",t)
def start_requests(self):
for web in range(0,len(self.website)):
url=self.website[web]
yield scrapy.Request(url,callback=self.parse)
def parse(self,response):
pattern=r'"profilePage_\d+"'
temp=re.search(pattern,response.text).group()
pattern='\d+'
account_id=re.search(pattern,temp).group()
url_origin='https://www.instagram.com/graphql/query/?query_hash=d496eb541e5c789274548bf473cc553e&variables={"id":"account_id","first":50}'
url=url_origin.replace('account_id',account_id)
yield scrapy.Request(url,callback=self.article_parse,meta={'url_origin':url})
def article_parse(self,response):
time.sleep(5)
data=json.loads(response.text)
end_cursor=data['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']
items_a=ArticleItem()
for i in range(0,len(data['data']['user']['edge_owner_to_timeline_media']['edges'])):
post_time=self.change_time(int(data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['taken_at_timestamp']))
pattern=r'\d{4}'
post_time_year=re.search(pattern,str(post_time)).group()
if int(post_time_year)>=2018:
context=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['edge_media_to_caption']['edges'][0]['node']['text']
like_num=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['edge_media_preview_like']['count']
post_id=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['id']
short_code=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['shortcode']
author_id=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']["owner"]['id']
author_name=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']["owner"]['username']
items_a['author_name']=author_name
items_a['author_id']=author_id
items_a['context']=context
items_a['platform_id']=self.name
items_a['post_id']=post_id
items_a['post_time']=post_time
items_a['like_num']=like_num
comment_count=data['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['edge_media_to_comment']['count']
if int(comment_count)>0:
url='https://www.instagram.com/graphql/query/?query_hash=a92f76e852576a71d3d7cef4c033a95e&variables={"shortcode":"short_code","child_comment_count":3,"fetch_comment_count":40,"parent_comment_count":24,"has_threaded_comments":true}'.replace('short_code',short_code)
yield scrapy.Request(url,callback=self.comment_parse,meta={'post_id':post_id,
'short_code':short_code})
yield items_a
if end_cursor !=None:
url=response.meta['url_origin'].split('}')[0]+',"after":"'+end_cursor+'"}'
yield scrapy.Request(url,callback=self.article_parse,meta={'url_origin':response.meta['url_origin']})
def comment_parse(self,response):
items_c=CommentItem()
items_r=RepliesItem()
data=json.loads(response.text)
time.sleep(5)#5
for i in range(0,len(data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'])):
comment_id=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['id']#comment_id
context=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['text']#context
post_time=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['created_at']#post_time
author_name=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['owner']['username']#author_name
like_num=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_liked_by']['count']#like_num
items_c['comment_id']=comment_id
items_c['author_name']=author_name
items_c['context']=context
items_c['post_id']=response.meta['post_id']
items_c['post_time']=self.change_time(post_time)
items_c['platform_id']=self.name
items_c['like_num']=like_num
reply_count=len(data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'])
if reply_count<=10:
for j in range(0,reply_count):
reply_id=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'][j]['node']['id']
context=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'][j]['node']['text']
post_time=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'][j]['node']['created_at']
author_id=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'][j]['node']['owner']['id']
author_name=data['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['edge_threaded_comments']['edges'][j]['node']['owner']['username']
comment_id=items_c['comment_id']
post_id=items_c['post_id']
platform_id=items_c['platform_id']
items_r['reply_id']=reply_id
items_r['author_id']=author_id
items_r['author_name']=author_name
items_r['context']=context
items_r['post_id']=post_id
items_r['post_time']=self.change_time(post_time)
items_r['comment_id']=comment_id
items_r['platform_id']=platform_id
yield items_r
else:
#yield scrapy.Request(url_reply,callback=self.reply_parse)
pass
yield items_c
comment_end_cursor=data['data']['shortcode_media']['edge_media_to_parent_comment']['page_info']['end_cursor']
if comment_end_cursor != None and comment_end_cursor != "{\"bifilter_token\": \"KA8BAgAoAP______AAAAAAAA\"}":
url_origin='https://www.instagram.com/graphql/query/?query_hash=bc3296d1ce80a24b1b6e40b1e72903f5&variables={"shortcode":"short_code","first":50}'.replace('short_code',response.meta['short_code'])
url=url_origin.split('}')[0]+',"after":"'+comment_end_cursor+'"}'
yield scrapy.Request(url,callback=self.comment_parse,meta={'post_id':response.meta['post_id'],
'url_origin':url_origin,
'short_code':response.meta['short_code']})
def reply_parse(self,response):
pass
| UTF-8 | Python | false | false | 8,617 | py | 18 | instagram.py | 6 | 0.571258 | 0.559213 | 0 | 145 | 56.731034 | 284 |
shriram1807/text-analytics-python | 13,546,326,874,729 | c5756a121cfe66ee2a4580b07b61f6cfeca1244e | 081e77f3e774fb34bd2683d044032b74f2159c3f | /CapstoneA4_rangarajan_shriram.py | 0f5db52d0b32380fc3034ac4ac4de3925190ee8e | [] | no_license | https://github.com/shriram1807/text-analytics-python | 76000b48efaaffea512eb029ee07794a89851e25 | ed9d9dec78dbe8ae35f27fd09679cfb38484f251 | refs/heads/master | 2016-09-13T14:55:14.213954 | 2016-05-04T19:49:52 | 2016-05-04T19:49:52 | 58,079,727 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Shriram Rangarajan
#MISM BIDA - Pyhton capstone
#Activity 4
#In this activity , we strip the python html tags of the webpage and display its contents
#the text is then cleansed of punctuation marks and stopwords for further cleansed
#the words in the final corpus are dispalyed in their alphabetical order
import urllib.request
import urllib.request
import string
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from collections import Counter, defaultdict
web_url = "http://www.opera.com/docs/changelogs/unified/2600/"
request_content = urllib.request.urlopen(web_url)
#store in soup object
soup = BeautifulSoup(request_content.read(),"html.parser")
for content in soup(["script", "style"]):
content.extract()
# get the text content
content_text = soup.get_text()
print(content_text)
# Removing punctuation
print('**********************************************************************************************')
punc_rem = set(string.punctuation)
new_text = ''.join(ch for ch in content_text if ch not in punc_rem)
new_text_list =[ch for ch in content_text if ch not in punc_rem]
print('Punctuation removed text --')
print(new_text)
# stop word removal
print('***********************************************************************************************')
stop_text = ' '.join([word for word in new_text.split() if word not in stopwords.words("english")])
stop_list = [word for word in new_text.split() if word not in stopwords.words("english")]
#Displaying words in ascending order
print("Words in ascending order--")
nl=sorted(stop_list,key=str.lower)
for i in nl:
print(i)
| UTF-8 | Python | false | false | 1,618 | py | 5 | CapstoneA4_rangarajan_shriram.py | 5 | 0.662546 | 0.658838 | 0 | 49 | 32 | 104 |
mdekauwe/pydesica | 1,872,605,785,716 | 5e6c1cce7046e356194c7c7ada6fd6f169fa6dd6 | aabe0cb3ed70cc363a6cfeee2dcc38f5b4f69dcc | /src/run_each_pft.py | 0aa31efe50b62fc568c012d717eeb490e7a13a2f | [] | no_license | https://github.com/mdekauwe/pydesica | 31e0dd64a9602143c799a68400ea600338856c1f | 23597e9bf073a3172b4badfaa8ef8c2cddbf2285 | refs/heads/master | 2021-06-11T02:46:57.576793 | 2020-04-06T14:10:37 | 2020-04-06T14:10:37 | 128,168,696 | 2 | 2 | null | false | 2018-04-13T04:02:14 | 2018-04-05T06:42:33 | 2018-04-13T03:37:19 | 2018-04-13T04:02:14 | 624 | 0 | 1 | 0 | Python | false | null | #!/usr/bin/env python
# coding: utf-8
"""
Run each PFT
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (19.09.2019)"
__email__ = "mdekauwe@gmail.com"
import pandas as pd
import sys
import numpy as np
import matplotlib.pyplot as plt
import os
from generate_met_data import generate_met_data
from canopy import Canopy, FarquharC3
from math import isclose
from calc_pet import calc_net_radiation, calc_pet_energy
import constants as c
from old_desica import Desica
#from desica import Desica
from desica import plot_time_to_mortality
import itertools
import multiprocessing as mp
import random
params = pd.read_csv("outputs/params.csv", index_col=None)
params.index = params["trait"]
pfts = ["rf", "wsf", "dsf", "grw", "saw"]
#pfts = ["rf"]
for pft in pfts:
print(pft)
p = params[pft]
#
## Generate trait space...
#
# min, max, mean
lai = {}
lai["rf"] = (4.78, 6.94, 5.86)
lai["wsf"] = (3.46, 6.19, 4.83)
lai["dsf"] = (1.43, 4.75, 3.09)
lai["grw"] = (1.27, 3.39, 2.33)
lai["saw"] = (0.34, 1.67, 1.0)
lai_low, lai_high, lai_mu = lai[pft]
lat = -35.76
lon = 148.0
Tmax = 35.
RH = 10.
time_step = 30
met = generate_met_data(Tmin=15, Tmax=Tmax, RH=RH, ndays=720,
lat=lat, lon=lon, time_step=time_step)
Dmax = np.max(met.vpd)
Dmean = np.mean(met.vpd)
g0 = 0.0
theta_J = 0.85
Rd25 = 0.92
Q10 = 1.92
Vcmax25 = p.Vcmax
Jmax25 = p.Jmax
Eav = 58550.0
deltaSv = 629.26
Eaj = 29680.
deltaSj = 631.88
FAO = False
psi_stem0 = -0.5
psi_f = p.psiv
kp_sat = p.kpsat
g1 = p.g1
s50 = p.s50
sf = p.sf
AL = lai_mu
psi_e = -1.32 * c.KPA_2_MPA # Sandy clay loam, MPa
b = 6.77
#psi_e = -3.17 * c.KPA_2_MPA # Silty clay clay loam, MPa
#b = 10.39 # Silty clay, SW retention curve param
F = Canopy(g1=g1, g0=g0, theta_J=theta_J, Rd25=Rd25, Q10=Q10,
Vcmax25=Vcmax25, Jmax25=Jmax25, Eav=Eav,
deltaSv=deltaSv, Eaj=Eaj, deltaSj=deltaSj)
D = Desica(psi_stem0=psi_stem0, psi_f=psi_f, F=F, g1=g1, stop_dead=True,
FAO=FAO, kp_sat=kp_sat, s50=s50, sf=sf, AL=AL,
force_refilling=False)
out, day_of_death = D.run_simulation(met)
#odir = "/Users/mdekauwe/Desktop/refilling_plots"
#odir = "/Users/mdekauwe/Desktop/new_plots"
odir = "/Users/mdekauwe/Desktop/old_plots"
if not os.path.exists(odir):
os.makedirs(odir)
plot_time_to_mortality(odir, out, time_step, to_screen=False, pft=pft)
| UTF-8 | Python | false | false | 2,603 | py | 42 | run_each_pft.py | 36 | 0.591241 | 0.5267 | 0 | 103 | 24.271845 | 76 |
sgossage/parmcmc | 9,947,144,283,732 | 640119f3bf74b2595b76770dfda7d668827bbeda | 54bbad4556c503a04db8a3df9e04004f07c7a0e8 | /mk_calcsfhparam_partials.py | 1fa155541e8214d3e105aed047762bf0435d9bb0 | [] | no_license | https://github.com/sgossage/parmcmc | 105ebb3b1448de2dd6f36a3bca2802c8fa4f78a3 | 6f716ae09bb83b59c549cb3e8248366185a67996 | refs/heads/master | 2020-04-05T18:43:47.728094 | 2019-06-17T20:17:37 | 2019-06-17T20:17:37 | 140,914,190 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
from match.scripts.fileio import calcsfh_input_parameter
import os
import sys
import numpy as np
from mk_paramfs import mk_agegrid
from fileio import parse_fname
# sys.argv[x]: 1, 'v' filter (bluer); 2, 'i' filter (redder); 3, photometry file; 4
#def parse_fname(photfn):
# photometry file has bfx.x_avx.x_tx.xx_x.xx_logZx.xx_dmod_x.xx.phot.mags
# this takes these parameters from the file name so that they may be used
# in calcsfh parameter file specification.
# Add these parameters to the cluster photometry file names too.
# bf = float((photfn.split('bf')[-1]).split('_')[0])
# av = float((photfn.split('av')[-1]).split('_')[0])
# agebin = (photfn.split('_t')[-1]).split('_')[:2]
# try:
# agebin = list(map(float, agebin))
# except ValueError:
# age string is just a delta tx.xx, no subsequent endpoint.
# agebin = [float((photfn.split('_t')[-1]).split('_')[0])]
# use smallest model resolution, 0.02 to define endpoint.
# agebin.append(agebin[0]+0.02)
# logZ = float((photfn.split('logZ')[-1]).split('_')[0])
# if "dmod" in photfn:
# dmod = float((photfn.split('dmod')[-1]).split('_')[0])
# else:
# assumes that dmod is 0 if not included in file name.
# dmod = 0.0
# return bf, av, agebin, logZ, dmod
if __name__ == '__main__':
"""
Parameters are specified by the photometry file name. Right now,
the search in av and dmod (and bf) are disabled. The magnitude limit
is set to 8, this may need to be changed per cluster/data. Exclude &
combine gates need to be changed manually right now. Ways to automate?
Also, makes an "age grid" at the end. This makes separate calcsfh
parameter files from the main file, where each new file varies the
age bin. This is done in order to ultimately create an age grid of
Hess diagrams from calcsfh, each fit in ssp mode, in order to act
as potential components of a composite model population.
"""
# Set the parameters...specific to a given cluster?
vfilter_name = sys.argv[1]
ifilter_name = sys.argv[2]
photfn = sys.argv[3]
# not dynamic. make it dynamic??
phot_dir = 'phot_mock'
v = np.genfromtxt(os.path.join(os.getcwd(), phot_dir, photfn), usecols=(0,))
i = np.genfromtxt(os.path.join(os.getcwd(), phot_dir, photfn), usecols=(1,))
vmax = 22.5
imax = vmax
good_i = (-99.0 < v) & (v < vmax) & (-99.0 < i) & (i < imax)
print(good_i)
v = v[good_i]
i = i[good_i]
# try getting errors:
try:
verr = np.genfromtxt(os.path.join(os.getcwd(), phot_dir, photfn.replace('.phot', '.err')), usecols=(0,))
berr = np.genfromtxt(os.path.join(os.getcwd(), phot_dir, photfn.replace('.phot', '.err')), usecols=(1,))
bverr = np.genfromtxt(os.path.join(os.getcwd(), phot_dir, photfn.replace('.phot', '.err')), usecols=(2,))
except IOError:
pass
bf, av, agebin, logZ, dmod = parse_fname(photfn)
# mag cap is not dynamic -- can be troublesome.
vmi = v - i
#mag_cap = 8.0
#vmax = mag_cap
#imax = mag_cap
# vmax = np.amax(v) + 1.5
# imax = np.amax(i) + 1.5
vmin = np.amin(v) - 1.5
imin = np.amin(i) - 1.5
vmi_max = np.amax(vmi) + 0.5
vmi_min = np.amin(vmi) - 0.5
# dav needs to be dynamic if not fixed. Can't be zero!
dav = 0.01
av0 = av #- dav*2.0
if av0 < 0.0:
av0 = 0.0
av1 = av #+ dav*2.0
# ddmod needs to be dynamic if not fixed. Can't be zero!
ddmod = 1.0
dmod0 = dmod #- ddmod*2.0
if dmod0 < 0.0:
dmod0 = 0.0
dmod1 = dmod #+ ddmod*2.0
# set the magnitude and color bin sizes to avg. error size; use suggested dm=0.10 and dc=0.05
# as lower lims to bin size.
try:
# all zeros used to indicate column not available -- maybe change to e.g. inf??
if all(verr == 0):
raise NameError
vbin = float("{:.2f}".format(np.mean(verr)))
if vbin < 0.10:
vbin = 0.10
except NameError:
vbin = 0.10
try:
if all(bverr == 0):
raise NameError
vibin = float("{:.2f}".format(np.mean(bverr)))
if vibin < 0.05:
vibin = 0.05
except NameError:
vibin = 0.05
# params set here:
params = {'dmod0': dmod0, 'dmod1': dmod1, 'ddmod': ddmod, 'av0': av0, 'av1': av1, 'dav': dav,
'dlogz': 0.02, 'logzmax': logZ+0.01, 'logzmin': logZ-0.01, 'tmax': max(agebin), 'tmin': min(agebin), 'tbin': 0.02,
'v': vfilter_name, 'i': ifilter_name, 'vmin': vmin, 'vmax': vmax, 'imin': imin, 'imax':imax,
'vimin': vmi_min, 'vimax': vmi_max, 'vistep': vibin, 'vstep': vbin, 'bf': bf}
# Exclude & combine gates (should be dynamic for each cluster):
nexclude_gates = 0
ex_gate_pts = None #[1.0, 6.0, vmi_max, 6.0, vmi_max, vmin, 1.0, vmin]
if ex_gate_pts != None:
exclude_gates = "{:d} ".format(nexclude_gates) + " ".join([str(element) for element in ex_gate_pts])
else:
exclude_gates = "{:d}".format(nexclude_gates)
ncombine_gates = 0
cb_gate_pts = None
if cb_gate_pts != None:
combine_gates = "{:d} ".format(ncombine_gates) + " ".join([str(element) for element in cb_gate_pts])
else:
combine_gates = "{:d}".format(ncombine_gates)
gate_line = exclude_gates + " " + combine_gates + "\n"
# Write to file:
photbase = photfn.split(".phot")[0]
paramf_name = os.path.join(os.getcwd(), 'csfh_param', "{:s}.param".format(photbase))
with open(paramf_name, 'w+') as outf:
# Use Phil & Dan's code to auto write param file w/ above parameters (doesn't do exclude gates automatically):
outf.write(calcsfh_input_parameter(power_law_imf=False, **params))
# Get lines of that file:
outf.seek(0)
lines = outf.readlines()
outf.seek(0)
# Manually replace the exclude gates line:
lines[7] = gate_line
for line in lines:
outf.write(line)
# creates copies of the calcsfh param file where age is varied.
mk_agegrid(paramf_name, t0=8.30, tf=9.80, tbin=0.02)
| UTF-8 | Python | false | false | 6,255 | py | 11 | mk_calcsfhparam_partials.py | 11 | 0.589129 | 0.563709 | 0 | 175 | 34.742857 | 129 |
NateWeiler/Resources | 18,571,438,594,164 | 54e5f3ec60a93873f50149a376b31ba1774d6d16 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Pygame/pygame-menu/pygame_menu/examples/multi_input.py | 17536f5b7079e6ad836e03ba05d724a606f53a99 | [
"MIT"
] | permissive | https://github.com/NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | false | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | 2021-12-09T12:39:04 | 2022-09-08T15:20:18 | 2,434,051 | 1 | 0 | 32 | null | false | false | version https://git-lfs.github.com/spec/v1
oid sha256:d4a9997a4a55c678f7f8424769a7127d174a4d6217024a739f504d2dcc6db013
size 11449
| UTF-8 | Python | false | false | 130 | py | 36,207 | multi_input.py | 16,386 | 0.884615 | 0.469231 | 0 | 3 | 42.333333 | 75 |
sayinala/projects | 9,509,057,623,517 | dfd27aaadef6365244ca5b19fe046913d563bcca | def5f8e031f2ed3e152383b9491eebf38e2e31f1 | /pedestrian-detector/box/box.py | 85d90ef50bdf585d24a384bf06fb348f0ab4ebf4 | [] | no_license | https://github.com/sayinala/projects | bf5cc42da90e59d21d321105ebe1c010bcc5f870 | 022c7e41820abd6d80f2a35f275faa5c69f3a5ba | refs/heads/master | 2021-01-10T04:02:45.073835 | 2015-10-02T18:29:23 | 2015-10-02T18:29:23 | 43,569,079 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
from bucket import Bucket
import os
file_path = os.path.dirname(os.path.realpath(__file__))
rgb = cv2.imread(file_path + "/output.jpg")
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
horiz_hist = []
LEFT = Bucket(90, 270, (255, 0, 0))
RIGHT= Bucket(270, 90, (0, 0, 255))
buckets = [LEFT, RIGHT]
width = hsv.shape[1]
height = hsv.shape[0]
NOISE_THRESHOLD = 30
NOISE_GAIN = 0.1
BLACK_THRESHOLD = NOISE_THRESHOLD * 1.25
MIN_HEIGHT = 60
for x in range(width):
for y in range(height):
hue = 2 * hsv[y, x, 0]
val = hsv[y, x, 2]
for bucket in buckets:
if val > NOISE_THRESHOLD:
value = val
else:
value = val * NOISE_GAIN
bucket.weighted_add(hue, value, False)
for bucket in buckets:
bucket.commit()
offset = 0
FIXED_OFFSET = 1
"""
for bucket in buckets:
for bound in bucket:
start, end = bound
cv2.rectangle(rgb, (start, 5+offset), (end, 475 -offset), bucket.color, 1)
offset += FIXED_OFFSET
"""
VERTICAL_TOLERANCE = 30
for bucket in buckets:
for bound in bucket:
start, end = bound
active = False
tolerance = VERTICAL_TOLERANCE
for y in range(height):
val = max(hsv[y, x, 2] for x in range(start, end))
if active and val > BLACK_THRESHOLD:
tolerance = VERTICAL_TOLERANCE
if not active and val > BLACK_THRESHOLD:
active = True
start_y = y
if active and val < BLACK_THRESHOLD:
tolerance -= 1
if tolerance <= 0:
active = False
if (y - start_y) > MIN_HEIGHT:
cv2.rectangle(rgb, (start, start_y), (end, y), bucket.color, 1)
tolerance = VERTICAL_TOLERANCE
"""
for x in range(hsv.shape[1]):
left_bkt = 0
right_bkt = 0
# amt = 0
NOISE_THRESHOLD = 3
for y in range(hsv.shape[0]):
hue = hsv[y, x, 0]
val = hsv[y, x, 2]
if hue > 0 and hue < 180:
if val > NOISE_THRESHOLD:
left_bkt += val
else:
if val > NOISE_THRESHOLD:
right_bkt += val
# amt += hsv[y, x, 2]
horiz_hist.append((left_bkt, right_bkt))
left_active = False
right_active = False
left_start = 0
right_start = 0
TOLERANCE = 20
ACTIVE_TOLERANCE = 480 * 5
DEACTIVE_TOLERANCE = 480 * 5
left_tolerance = TOLERANCE
right_tolerance = TOLERANCE
columns = []
for index, buckets in enumerate(horiz_hist):
left = buckets[0]
right = buckets[1]
if not left_active:
if left > ACTIVE_TOLERANCE:
left_active = True
left_start = index
else:
left_tolerance -= 1
if left < DEACTIVE_TOLERANCE and left_tolerance <= 0:
left_active = False
columns.append((left_start, index, "left"))
left_tolerance = TOLERANCE
if not right_active:
if right > ACTIVE_TOLERANCE:
right_active = True
right_start = index
else:
right_tolerance -= 1
if right < DEACTIVE_TOLERANCE and right_tolerance <= 0:
right_active = False
columns.append((right_start, index, "right"))
right_tolerance = TOLERANCE
for col in columns:
if col[2] == "left":
cv2.rectangle(rgb, (col[0], 5+offset), (col[1], 475-offset), (0, 0, 255), 1)
offset += FIXED_OFFSET
elif col[2] == "right":
cv2.rectangle(rgb, (col[0], 5+offset), (col[1], 475-offset), (255, 0, 0), 1)
offset += FIXED_OFFSET
"""
cv2.imshow('frame1', rgb)
k = cv2.waitKey() & 0xff
| UTF-8 | Python | false | false | 3,705 | py | 16 | box.py | 12 | 0.547099 | 0.51552 | 0 | 154 | 23.058442 | 87 |
NiekKeijzer/hermes | 11,940,009,112,494 | ce9e3197021783bc883ca55fa9228b01b059001a | 66644de0d9495fca87fe1484e41632358e2cf5e2 | /hermes/forms/migrations/0003_auto_20210501_1339.py | 4aad11575e5058b80a4d4f08030eefead69767ec | [
"MIT"
] | permissive | https://github.com/NiekKeijzer/hermes | c1e455f95ae5f563b985d749bbc68f8c6a633f69 | 48c2e015ab5b299dbbc488a8934af76cabf144cb | refs/heads/main | 2023-04-22T21:06:41.098389 | 2021-05-06T17:11:22 | 2021-05-06T17:11:22 | 353,753,796 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.7 on 2021-05-01 13:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("forms", "0002_form_validate_referrer"),
]
operations = [
migrations.RenameField(
model_name="site",
old_name="domain",
new_name="url",
),
]
| UTF-8 | Python | false | false | 360 | py | 68 | 0003_auto_20210501_1339.py | 41 | 0.563889 | 0.511111 | 0 | 18 | 19 | 49 |
dgole/ledBasic | 15,822,659,538,426 | d85c807547cb6a2ab13e04edaf03c93d0c69fc16 | 5cda94b78b05decfb12ad19826cde9c3a76090e9 | /python/myCode/demo1.py | bfd1774d94bcd37068c3d8e4753fea7fb9bf0bd4 | [
"BSD-2-Clause"
] | permissive | https://github.com/dgole/ledBasic | f8b9030565741fd809c1f09d4ce29daf9b68272b | 4a8e77f103022c7f7014ca2b15b73f2b3f56755a | refs/heads/master | 2021-05-07T19:05:05.907164 | 2017-11-20T01:26:09 | 2017-11-20T01:26:09 | 108,885,033 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from neopixel import *
import lib
import numpy as np
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(lib.LED_COUNT, lib.LED_PIN, lib.LED_FREQ_HZ, lib.LED_DMA, lib.LED_INVERT, lib.LED_BRIGHTNESS, lib.LED_CHANNEL, lib.LED_STRIP)
strip.begin()
def setPixelsWithArray(strip, a):
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(int(a[i,0]), int(a[i,1]), int(a[i,2])))
strip.show()
def normalizeArray(a):
return 255 * a/np.amax(a)
a = np.zeros([strip.numPixels(), 3])
center = 30
width = 5
for i in range(center-width, center+width+1):
distance = np.absolute(i-center)
distanceNorm = float(distance)/float(width)
a[i,0] = 1.0 - distanceNorm + 0.2
a = np.square(a)
a = normalizeArray(a)
plusOrMinus = 1
while True:
if center == 59-width or center == 0+width: plusOrMinus*=-1
a = np.roll(a, plusOrMinus, axis=0)
setPixelsWithArray(strip, a)
center+=plusOrMinus
time.sleep(0.00)
| UTF-8 | Python | false | false | 955 | py | 8 | demo1.py | 8 | 0.708901 | 0.682723 | 0 | 35 | 26.285714 | 151 |
vinaychittora/lbr | 11,742,440,616,162 | 2e7a408bd3bc8cb14dc2989f0b91640c80972721 | 67fe82fe52febcea04042463475508e167647b87 | /alembic/versions/3e17098a2ea7_create_user_listing_and_rental_table.py | 4653d823b25b12d7d96e2d626d806c3cf2320c57 | [
"BSD-2-Clause-Views"
] | permissive | https://github.com/vinaychittora/lbr | 0c6f87de67192f8115be9c460754619f04bc022d | 7867f76014e2c51fa9046bef7a3c31aeba6b7c69 | refs/heads/master | 2021-01-17T08:04:19.795503 | 2016-06-21T09:10:48 | 2016-06-21T09:10:48 | 61,619,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """create user, listing and rental table
Revision ID: 3e17098a2ea7
Revises: None
Create Date: 2014-09-10 17:53:40.102969
"""
# revision identifiers, used by Alembic.
revision = '3e17098a2ea7'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'users',
sa.Column('guid', sa.Unicode(200), primary_key=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('password_hash', sa.Unicode(200)),
sa.Column('has_password', sa.Boolean, nullable=False),
sa.Column('name', sa.Unicode(200)),
sa.Column('email', sa.Unicode(200), nullable=False),
sa.Column('account_uri', sa.Unicode(200)),
)
op.create_table(
'listings',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('bike_type', sa.Unicode(200)),
sa.Column('owner_guid', sa.Unicode(200)),
)
op.create_table(
'rentals',
sa.Column('guid', sa.Unicode(200), primary_key=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('listing_guid', sa.Unicode(200), nullable=False),
sa.Column('debit_uri', sa.Unicode(200)),
sa.Column('owner_guid', sa.Unicode(200), nullable=False),
sa.Column('buyer_guid', sa.Unicode(200)),
)
pass
def downgrade():
op.drop_table('users')
op.drop_table('listings')
op.drop_table('rentals')
pass
| UTF-8 | Python | false | false | 1,581 | py | 53 | 3e17098a2ea7_create_user_listing_and_rental_table.py | 31 | 0.617963 | 0.572423 | 0 | 54 | 28.277778 | 67 |
baiyongzhen/senko | 15,788,299,790,113 | c4d94a0d01cce7ce69ac93c2eddfac59a3eaa826 | d7123f0bac683232376162402a89d7bb1ceec5ed | /senko-app/project/migrations/versions/001_Add_users_table.py | 68ffeec92538c4c2c1a86c13add963af3eee4268 | [] | no_license | https://github.com/baiyongzhen/senko | a39a48c1b26317b252df97ba6fea04c77664c40f | 092ab1409f3493e7da3971a16db728ca3f87f9bf | refs/heads/master | 2020-04-23T10:06:09.116089 | 2018-07-30T08:39:37 | 2018-07-30T08:39:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
from sqlalchemy import *
from migrate import *
meta = MetaData()
users = Table(
'users', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('email', String(255), nullable=False, unique=True),
Column('username', String(255), nullable=False, unique=False),
Column('password', String(120), nullable=False, unique=False),
Column('created_at', DateTime, nullable=False, server_default=text('CURRENT_TIMESTAMP')),
Column('updated_at', DateTime, nullable=False, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta.bind = migrate_engine
users.create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
users.drop()
| UTF-8 | Python | false | false | 944 | py | 16 | 001_Add_users_table.py | 9 | 0.688559 | 0.679025 | 0 | 27 | 33.962963 | 124 |
rolinawu/TicTacToe | 5,574,867,590,197 | 98230ac3ac86f861be6ce7cf9b6e77a4deaeece6 | b78835ce271a65b230473336cce40acde39e81df | /Model.py | 8f1d9489518b6a2eaa424c602f4c596e6ff4cf0c | [] | no_license | https://github.com/rolinawu/TicTacToe | d884038dcd7d5b1f4ec7b23ff7c0d79b079637a3 | 673df29952762a7e22ff3c01dcfbffb053679631 | refs/heads/master | 2021-01-10T04:23:54.209906 | 2016-01-02T15:42:34 | 2016-01-02T15:42:34 | 48,497,563 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
this is where all the magic(algorithms) happens
'''
class Calc(object):
def __init__(self):
self.Xcells = []
self.Ocells = []
#self.Emptycells =
def getXcells(self):
'''
Return: the cell numbers of the list of X cells on the board
'''
return self.Xcells
def getOcells(self):
'''
Return: the cell numbers of the list of X cells on the board
'''
return self.Ocells
def addXcells(self, cell):
'''
Parameter: Int
Purpose: add the new cell to list Xcells
'''
self.Xcells.append(cell)
print ('adding %d to Xcells' % cell)
def addOcells(self, cell):
'''
Parameter: Int
Purpose: add the new cell to list Ocells
'''
self.Ocells.append(cell)
print ('adding %d to Ocells' % cell)
#self.Ocells += 2**cell
def cellexist(self, cell):
'''
'''
print ((cell in self.Xcells) or (cell in self.Ocells))
'''
'''
return ((cell in self.Xcells) or (cell in self.Ocells))
def sumofXcells(self):
return sum(map(lambda x: 2**x, self.Xcells))
def sumofOcells(self):
return sum(map(lambda x: 2**x, self.Ocells)) | UTF-8 | Python | false | false | 1,070 | py | 4 | Model.py | 4 | 0.640187 | 0.637383 | 0 | 52 | 19.596154 | 62 |
kkmjkim/coding-problems | 11,295,764,020,448 | bac746904983cd87711ed35024bb4c77b127b27c | 689fa98621bd5ccc68b72bb9e3444076f7a9278b | /programmers/level2/42578_위장m.py | 3f46c1637b9892e0a1a517702b9f355e30803e3c | [] | no_license | https://github.com/kkmjkim/coding-problems | f5a560635eeab38d36f285b94b30ea2b5c31896b | 187a6d100d95554e4062f8a48a28ef2ec15677e4 | refs/heads/main | 2023-07-24T02:46:22.822961 | 2021-08-17T01:17:39 | 2021-08-17T01:17:39 | 370,715,231 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 레벨2-해시: 위장
# https://programmers.co.kr/learn/courses/30/lessons/42578
# 정확성: 100 / 100
def solution2(clothes):
answer = 1
style = {}
for i in clothes:
if i[1] not in style:
style[i[1]] = 1
else:
style[i[1]] += 1
for i in style.values():
answer *= (i + 1)
return str(answer - 1)
print(solution2([["yellow_hat", "headgear"], ["blue_sunglasses", "eyewear"], ["green_turban", "headgear"]]))
| UTF-8 | Python | false | false | 479 | py | 72 | 42578_위장m.py | 71 | 0.546638 | 0.494577 | 0 | 19 | 23.263158 | 108 |
alexeyproskuryakov/read | 2,370,821,968,183 | 0a015a2a6b8774caa58f8b4ff87c282cccd7f0fb | 29e95efb4ba16567b9763d23a48c79e9ed56e5dd | /wsgi/rr_people/__init__.py | ce6e28576b7a7a9f41fdf0aa48390fda80eb8e87 | [] | no_license | https://github.com/alexeyproskuryakov/read | e0ee2b304de9b4b7c7b27bdff0d31f6feca75f14 | 604476145ddb49425b7c49491b8de51ab9846b9b | refs/heads/master | 2022-02-05T02:01:00.825251 | 2016-10-16T01:11:00 | 2016-10-16T01:11:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import random
import re
import praw
import time
from praw.objects import MoreComments
from stemming.porter2 import stem
from wsgi import properties
DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"
USER_AGENTS = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; chromeframe/12.0.742.112)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30618; In",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SHC-KIOSK; SHC-Mac-5FE3; SHC-Unit-K0816; SHC-KMT; .NET C",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; InfoPath",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30618; In",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30618; In",
"Mozilla/5.0 (webOS/1.4.3; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pixi/1.1",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.4) Gecko/20100611 Firefox/3.6.4 GTB7.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR",
]
A_POST = "post"
A_VOTE = "vote"
A_COMMENT = "comment"
A_CONSUME = "consume"
A_SUBSCRIBE = "subscribe"
A_FRIEND = "friend"
A_SLEEP = "sleep"
S_BAN = "ban"
S_WORK = "work"
S_SLEEP = "sleep"
S_UNKNOWN = "unknown"
S_STOP = "stop"
S_SUSPEND = "suspend"
S_TERMINATED = "terminated"
S_END = "end"
START_TIME = "t_start"
END_TIME = "t_end"
LOADED_COUNT = "loaded_count"
IS_ENDED = "ended"
IS_STARTED = "started"
PROCESSED_COUNT = "processed_count"
CURRENT = "current"
log = logging.getLogger("rr_people")
POSTS_TTL = 60 * 10
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class _RedditPostsCache():
__metaclass__ = Singleton
def __init__(self):
self._posts_cache = {}
self._posts_cache_timings = {}
def get_posts(self, sub):
if sub in self._posts_cache:
if (self._posts_cache_timings.get(sub) - time.time()) < POSTS_TTL:
return self._posts_cache[sub]
else:
del self._posts_cache[sub]
del self._posts_cache_timings[sub]
def set_posts(self, sub, posts):
self._posts_cache[sub] = posts
self._posts_cache_timings[sub] = time.time()
class RedditHandler(object):
def __init__(self, user_agent=None):
self.reddit = praw.Reddit(user_agent=user_agent or random.choice(USER_AGENTS))
self.subreddits_cache = {}
self.posts_comments_cache = {}
self.posts_cache = _RedditPostsCache()
def get_subreddit(self, name):
if name not in self.subreddits_cache:
subreddit = self.reddit.get_subreddit(name)
self.subreddits_cache[name] = subreddit
else:
subreddit = self.subreddits_cache.get(name)
return subreddit
def get_hot_and_new(self, subreddit_name, sort=None, limit=properties.DEFAULT_LIMIT):
try:
result = self.posts_cache.get_posts(subreddit_name)
if not result:
subreddit = self.get_subreddit(subreddit_name)
hot = list(subreddit.get_hot(limit=limit))
log.info("%s hot loaded limit: %s, result: %s" % (subreddit_name, limit, len(hot)))
new = list(subreddit.get_new(limit=limit))
log.info("%s new loaded limit: %s, result: %s" % (subreddit_name, limit, len(new)))
result_dict = dict(map(lambda x: (x.fullname, x), hot), **dict(map(lambda x: (x.fullname, x), new)))
log.info("Will search for dest posts candidates at %s posts in %s" % (len(result_dict), subreddit_name))
result = result_dict.values()
if sort:
result.sort(cmp=sort)
self.posts_cache.set_posts(subreddit_name, result)
return result
except Exception as e:
log.exception(e)
return []
def search(self, query):
copies = list(self.reddit.search(query))
return copies
token_reg = re.compile("[\\W\\d]+")
def normalize(comment_body, serialise=lambda x: x):
res = []
if isinstance(comment_body, (str, unicode)):
tokens = token_reg.split(comment_body.lower().strip())
for token in tokens:
if len(token) > 2:
res.append(stem(token))
return serialise(res)
def hash_word(word):
return hash(stem(word))
def tokens_equals(tokens, another_tokens, more_than_perc=50):
o = set(tokens)
t = set(another_tokens)
intersection = o.intersection(t)
return float(len(intersection)) >= ((float(len(o) + len(t)) / 2) * more_than_perc) / 100
def cmp_by_created_utc(x, y):
return int(x.created_utc - y.created_utc)
def cmp_by_comments_count(x, y):
return x.num_comments - y.num_comments
def check_on_exclude(text, exclude_dict):
c_tokens = set(normalize(text))
for token in c_tokens:
if hash(token) in exclude_dict:
return False, None
return True, c_tokens
if __name__ == '__main__':
rh = RedditHandler()
posts = rh.get_hot_and_new("videos", limit=10)
for post in posts:
print post.fullname
| UTF-8 | Python | false | false | 7,426 | py | 29 | __init__.py | 21 | 0.621196 | 0.54794 | 0 | 190 | 38.084211 | 227 |
joylinmimi/YOLO-data-training | 1,340,029,808,654 | 46f1586a1262b650b58f4e2bc1d031c04225215b | e45070e9d28647f98fbd791ff358f2027cb45400 | /remove_unused.py | cf7aa98432dd269b7cc571894727afb0cbd99968 | [] | no_license | https://github.com/joylinmimi/YOLO-data-training | 595c8df0ef3003a05dcd34ec97faf0c417f4fb80 | 5743b15a75a0403aa6c46956ffe7f782dd6c7ac3 | refs/heads/master | 2020-04-15T05:09:25.775893 | 2019-03-08T07:36:07 | 2019-03-08T07:36:07 | 164,410,901 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import shutil, sys
dir='C:\\Users\\USER\\Desktop\\data20181226 - 複製\\data\\img'
for filename in os.listdir(dir):
if filename.endswith(".txt"):
filename2=os.path.splitext(filename)[0]
f = open(filename,"r")
f2 = open(filename2+'-2.txt',"w")
for line in f:
print(line[0])
if line[0]=='4':
f2.write(line)
f.close()
f2.close()
shutil.move(os.path.join(dir, filename2+'-2.txt'), os.path.join(dir, filename))
| UTF-8 | Python | false | false | 445 | py | 6 | remove_unused.py | 6 | 0.646259 | 0.600907 | 0 | 15 | 28.4 | 81 |
MITRECND/crits | 1,846,835,977,460 | ecb878a3b59c383a75b5ad86383fe198b3f82fcd | 7baafcdd99b5c9cea88c42b279d47154b527ac24 | /crits/settings.py | 1f498c49412abb587d1c35a744e54883a35d2107 | [
"MIT"
] | permissive | https://github.com/MITRECND/crits | bbf0752d5de63995ef812d93c73dd47fe006c590 | 81fc042efe61a252ee3433432f7bd7f0f11b217d | refs/heads/master | 2020-04-04T23:15:00.542469 | 2017-05-11T14:51:32 | 2017-05-11T14:51:32 | 20,405,972 | 22 | 6 | null | true | 2014-07-21T22:12:50 | 2014-06-02T14:03:44 | 2014-07-16T07:48:55 | 2014-07-21T22:12:50 | 3,289 | 1 | 1 | 0 | JavaScript | null | null | # CRITs environment chooser
import errno
import glob
import os
import sys
import django
import subprocess
from pymongo import ReadPreference, MongoClient
from mongoengine import connect
from mongoengine import __version__ as mongoengine_version
from pymongo import version as pymongo_version
from distutils.version import StrictVersion
sys.path.insert(0, os.path.dirname(__file__))
# calculated paths for django and the site
# used as starting points for various other paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Version
CRITS_VERSION = '4-master'
#the following gets the current git hash to be displayed in the footer and
#hides it if it is not a git repo
try:
HIDE_GIT_HASH = False
#get the short hand of current git hash
GIT_HASH = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=SITE_ROOT).strip()
#get the long hand of the current git hash
GIT_HASH_LONG = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=SITE_ROOT).strip()
#get the git branch
GIT_BRANCH = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=SITE_ROOT).strip()
except:
#if it is not a git repo, clear out all values and hide them
GIT_HASH = ''
GIT_HASH_LONG = ''
HIDE_GIT_HASH = True
GIT_BRANCH = ''
APPEND_SLASH = True
TEST_RUN = False
# Get Django version
django_version = django.get_version()
#Check mongoengine version (we got it from import)
if StrictVersion(mongoengine_version) < StrictVersion('0.10.0'):
old_mongoengine = True
#raise Exception("Mongoengine versions prior to 0.10 are no longer supported! Please see UPDATING!")
else:
old_mongoengine = False
# Set to DENY|SAMEORIGIN|ALLOW-FROM uri
# Default: SAMEORIGIN
# More details: https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
#X_FRAME_OPTIONS = 'ALLOW-FROM https://www.example.com'
# Setup for runserver or Apache
if 'runserver' in sys.argv:
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
elif 'test' in sys.argv:
TEST_RUN = True
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
else:
DEVEL_INSTANCE = False
SERVICE_MODEL = 'process'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
LOGIN_URL = "/login/"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
# MongoDB Default Configuration
# Tip: To change database settings, override by using
# template from config/database_example.py
MONGO_HOST = 'localhost' # server to connect to
MONGO_PORT = 27017 # port MongoD is running on
MONGO_DATABASE = 'crits' # database name to connect to
MONGO_SSL = False # whether MongoD has SSL enabled
MONGO_USER = '' # username used to authenticate to mongo (normally empty)
MONGO_PASSWORD = '' # password for the mongo user
MONGO_REPLICASET = None # Name of RS, if mongod in replicaset
# File storage backends
S3 = "S3"
GRIDFS = "GRIDFS"
# DB to use for files
FILE_DB = GRIDFS
# S3 buckets
BUCKET_PCAPS = "pcaps"
BUCKET_OBJECTS = "objects"
BUCKET_SAMPLES = "samples"
# Import custom Database config
dbfile = os.path.join(SITE_ROOT, 'config/database.py')
if os.path.exists(dbfile):
execfile(dbfile)
if TEST_RUN:
MONGO_DATABASE = 'crits-unittest'
# Read preference to configure which nodes you can read from
# Possible values:
# primary: queries are sent to the primary node in a replicSet
# secondary: queries are allowed if sent to primary or secondary
# (for single host) or are distributed to secondaries
# if you are connecting through a router
# More info can be found here:
# http://api.mongodb.org/python/current/api/pymongo/index.html
MONGO_READ_PREFERENCE = ReadPreference.PRIMARY
# MongoDB default collections
COL_ACTORS = "actors" # main collection for actors
COL_ACTOR_IDENTIFIERS = "actor_identifiers" # main collection for actor identifiers
COL_ACTOR_THREAT_IDENTIFIERS = "actor_threat_identifiers" # actor threat identifiers
COL_ACTOR_THREAT_TYPES = "actor_threat_types" # actor threat types
COL_ACTOR_MOTIVATIONS = "actor_motivations" # actor motivations
COL_ACTOR_SOPHISTICATIONS = "actor_sophistications" # actor sophistications
COL_ACTOR_INTENDED_EFFECTS = "actor_intended_effects" # actor intended effects
COL_ANALYSIS_RESULTS = "analysis_results" # analysis results
COL_AUDIT_LOG = "audit_log" # audit log entries
COL_BACKDOORS = "backdoors" # backdoors
COL_BUCKET_LISTS = "bucket_lists" # bucketlist information
COL_CAMPAIGNS = "campaigns" # campaigns list
COL_CERTIFICATES = "certificates" # certificates list
COL_COMMENTS = "comments" # comments collection
COL_CONFIG = "config" # config collection
COL_COUNTS = "counts" # general counts for dashboard
COL_DIVISION_DATA = "division_data" # information on divisions within company
COL_DOMAINS = "domains" # root domains with FQDNs and IP information
COL_EFFECTIVE_TLDS = "effective_tlds" # list of effective TLDs from Mozilla to determine root domains
COL_EMAIL = "email" # main email collection
COL_EVENTS = "events" # main events collection
COL_EVENT_TYPES = "event_types" # event types for events
COL_EXPLOITS = "exploits" # exploits
COL_FILETYPES = "filetypes" # list of filetypes in system generated by MapReduce
COL_IDB_ACTIONS = "idb_actions" # list of available actions to be taken with indicators
COL_INDICATORS = "indicators" # main indicators collection
COL_INTERNAL_LOCATIONS = "internal_locations" # site locations for company
COL_IPS = "ips" # IPs collection
COL_LOCATIONS = "locations" # Locations collection
COL_NOTIFICATIONS = "notifications" # notifications collection
COL_OBJECTS = "objects" # objects that are files that have been added
COL_OBJECT_TYPES = "object_types" # types of objects that can be added
COL_PCAPS = "pcaps" # main pcaps collection
COL_RAW_DATA = "raw_data" # main raw data collection
COL_RAW_DATA_TYPES = "raw_data_types" # list of available raw data types
COL_RELATIONSHIP_TYPES = "relationship_types" # list of available relationship types
COL_SAMPLES = "sample" # main samples collection
COL_SCREENSHOTS = "screenshots" # main screenshots collection
COL_SECTOR_LISTS = "sector_lists" # sector lists information
COL_SECTORS = "sectors" # available sectors
COL_SERVICES = "services" # list of services for scanning
COL_SIGNATURES = "signatures" # main signature collection
COL_SIGNATURE_TYPES = "signature_types" # list of available signature types
COL_SIGNATURE_DEPENDENCY = "signature_dependency" # list of available signature dependencies
COL_SOURCE_ACCESS = "source_access" # source access ACL collection
COL_SOURCES = "sources" # source information generated by MapReduce
COL_STATISTICS = "statistics" # list of statistics for different objects (campaigns, for example)
COL_TARGETS = "targets" # target information for use in email
COL_USERS = "users" # main users collection
COL_USER_ROLES = "user_roles" # main user roles collection
COL_YARAHITS = "yarahits" # yara hit counts for samples
# MongoDB connection pool
if MONGO_USER:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL,
replicaset=MONGO_REPLICASET, username=MONGO_USER, password=MONGO_PASSWORD)
else:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL,
replicaset=MONGO_REPLICASET)
# Get config from DB
c = MongoClient(MONGO_HOST, MONGO_PORT, ssl=MONGO_SSL)
db = c[MONGO_DATABASE]
if MONGO_USER:
db.authenticate(MONGO_USER, MONGO_PASSWORD)
coll = db[COL_CONFIG]
crits_config = coll.find_one({})
if not crits_config:
crits_config = {}
# Populate settings
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
# NOTE: we are setting ALLOWED_HOSTS to ['*'] by default which will work
# everywhere but is insecure for production installations (no less secure
# than setting DEBUG to True). This is done because we can't anticipate
# the host header for every CRITs install and this should work "out of
# the box".
ALLOWED_HOSTS = crits_config.get('allowed_hosts', ['*'])
COMPANY_NAME = crits_config.get('company_name', 'My Company')
CLASSIFICATION = crits_config.get('classification', 'unclassified')
CRITS_EMAIL = crits_config.get('crits_email', '')
CRITS_EMAIL_SUBJECT_TAG = crits_config.get('crits_email_subject_tag', '')
CRITS_EMAIL_END_TAG = crits_config.get('crits_email_end_tag', True)
DEBUG = crits_config.get('debug', True)
if crits_config.get('email_host', None):
EMAIL_HOST = crits_config.get('email_host', None)
if crits_config.get('email_port', None):
EMAIL_PORT = int(crits_config.get('email_port', None))
ENABLE_API = crits_config.get('enable_api', False)
ENABLE_TOASTS = crits_config.get('enable_toasts', False)
GIT_REPO_URL = crits_config.get('git_repo_url', '')
HTTP_PROXY = crits_config.get('http_proxy', None)
INSTANCE_NAME = crits_config.get('instance_name', 'My Instance')
INSTANCE_URL = crits_config.get('instance_url', '')
INVALID_LOGIN_ATTEMPTS = crits_config.get('invalid_login_attempts', 3) - 1
LANGUAGE_CODE = crits_config.get('language_code', 'en-us')
LDAP_AUTH = crits_config.get('ldap_auth', False)
LDAP_SERVER = crits_config.get('ldap_server', '')
LDAP_BIND_DN = crits_config.get('ldap_bind_dn', '')
LDAP_BIND_PASSWORD = crits_config.get('ldap_bind_password', '')
LDAP_USERDN = crits_config.get('ldap_userdn', '')
LDAP_USERCN = crits_config.get('ldap_usercn', '')
LOG_DIRECTORY = crits_config.get('log_directory', os.path.join(SITE_ROOT, '..', 'logs'))
LOG_LEVEL = crits_config.get('log_level', 'INFO')
QUERY_CACHING = crits_config.get('query_caching', False)
RT_URL = crits_config.get('rt_url', None)
SECURE_COOKIE = crits_config.get('secure_cookie', True)
SERVICE_DIRS = tuple(crits_config.get('service_dirs', []))
SERVICE_MODEL = crits_config.get('service_model', SERVICE_MODEL)
SERVICE_POOL_SIZE = int(crits_config.get('service_pool_size', 12))
SESSION_TIMEOUT = int(crits_config.get('session_timeout', 12)) * 60 * 60
SPLUNK_SEARCH_URL = crits_config.get('splunk_search_url', None)
TEMP_DIR = crits_config.get('temp_dir', '/tmp')
TIME_ZONE = crits_config.get('timezone', 'America/New_York')
ZIP7_PATH = crits_config.get('zip7_path', '/usr/bin/7z')
ZIP7_PASSWORD = crits_config.get('zip7_password', 'infected')
REMOTE_USER = crits_config.get('remote_user', False)
PASSWORD_COMPLEXITY_REGEX = crits_config.get('password_complexity_regex', '(?=^.{8,}$)((?=.*\d)|(?=.*\W+))(?![.\n])(?=.*[A-Z])(?=.*[a-z]).*$')
PASSWORD_COMPLEXITY_DESC = crits_config.get('password_complexity_desc', '8 characters, at least 1 capital, 1 lowercase and 1 number/special')
DEPTH_MAX = crits_config.get('depth_max', '10')
TOTAL_MAX = crits_config.get('total_max', '250')
REL_MAX = crits_config.get('rel_max', '50')
TOTP = crits_config.get('totp', False)
COLLECTION_TO_BUCKET_MAPPING = {
COL_PCAPS: BUCKET_PCAPS,
COL_OBJECTS: BUCKET_OBJECTS,
COL_SAMPLES: BUCKET_SAMPLES
}
# check Log Directory
if not os.path.exists(LOG_DIRECTORY):
LOG_DIRECTORY = os.path.join(SITE_ROOT, '..', 'logs')
# Custom settings for Django
_TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# DATE and DATETIME Formats
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s.u'
PY_DATE_FORMAT = '%Y-%m-%d'
PY_TIME_FORMAT = '%H:%M:%S.%f'
PY_DATETIME_FORMAT = ' '.join([PY_DATE_FORMAT, PY_TIME_FORMAT])
OLD_PY_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
PY_FORM_DATETIME_FORMATS = [PY_DATETIME_FORMAT, OLD_PY_DATETIME_FORMAT]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, '../extras/www')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/'
STATIC_ROOT = os.path.join(SITE_ROOT, '../extras/www/static')
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
#https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
_TEMPLATE_LOADERS = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.load_template_source',
])
]
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': 'unix:/data/memcached.sock',
# }
#}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
_TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'crits.core.views.base_context',
'crits.core.views.collections',
'crits.core.views.user_context',
]
ROOT_URLCONF = 'crits.urls'
_TEMPLATE_DIRS = [
os.path.join(SITE_ROOT, '../documentation'),
os.path.join(SITE_ROOT, 'core/templates'),
os.path.join(SITE_ROOT, 'actors/templates'),
os.path.join(SITE_ROOT, 'backdoors/templates'),
os.path.join(SITE_ROOT, 'core/dashboard/templates'),
os.path.join(SITE_ROOT, 'campaigns/templates'),
os.path.join(SITE_ROOT, 'certificates/templates'),
os.path.join(SITE_ROOT, 'comments/templates'),
os.path.join(SITE_ROOT, 'config/templates'),
os.path.join(SITE_ROOT, 'domains/templates'),
os.path.join(SITE_ROOT, 'emails/templates'),
os.path.join(SITE_ROOT, 'events/templates'),
os.path.join(SITE_ROOT, 'exploits/templates'),
os.path.join(SITE_ROOT, 'indicators/templates'),
os.path.join(SITE_ROOT, 'ips/templates'),
os.path.join(SITE_ROOT, 'locations/templates'),
os.path.join(SITE_ROOT, 'objects/templates'),
os.path.join(SITE_ROOT, 'pcaps/templates'),
os.path.join(SITE_ROOT, 'raw_data/templates'),
os.path.join(SITE_ROOT, 'relationships/templates'),
os.path.join(SITE_ROOT, 'samples/templates'),
os.path.join(SITE_ROOT, 'screenshots/templates'),
os.path.join(SITE_ROOT, 'services/templates'),
os.path.join(SITE_ROOT, 'signatures/templates'),
os.path.join(SITE_ROOT, 'stats/templates'),
os.path.join(SITE_ROOT, 'targets/templates'),
os.path.join(SITE_ROOT, 'core/templates/dialogs'),
os.path.join(SITE_ROOT, 'campaigns/templates/dialogs'),
os.path.join(SITE_ROOT, 'comments/templates/dialogs'),
os.path.join(SITE_ROOT, 'locations/templates/dialogs'),
os.path.join(SITE_ROOT, 'objects/templates/dialogs'),
os.path.join(SITE_ROOT, 'raw_data/templates/dialogs'),
os.path.join(SITE_ROOT, 'relationships/templates/dialogs'),
os.path.join(SITE_ROOT, 'screenshots/templates/dialogs'),
os.path.join(SITE_ROOT, 'signatures/templates/dialogs'),
]
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'core/static'),
os.path.join(SITE_ROOT, 'actors/static'),
os.path.join(SITE_ROOT, 'backdoors/static'),
os.path.join(SITE_ROOT, 'dashboards/static'),
os.path.join(SITE_ROOT, 'campaigns/static'),
os.path.join(SITE_ROOT, 'certificates/static'),
os.path.join(SITE_ROOT, 'comments/static'),
os.path.join(SITE_ROOT, 'domains/static'),
os.path.join(SITE_ROOT, 'emails/static'),
os.path.join(SITE_ROOT, 'events/static'),
os.path.join(SITE_ROOT, 'exploits/static'),
os.path.join(SITE_ROOT, 'indicators/static'),
os.path.join(SITE_ROOT, 'ips/static'),
os.path.join(SITE_ROOT, 'locations/static'),
os.path.join(SITE_ROOT, 'objects/static'),
os.path.join(SITE_ROOT, 'pcaps/static'),
os.path.join(SITE_ROOT, 'raw_data/static'),
os.path.join(SITE_ROOT, 'relationships/static'),
os.path.join(SITE_ROOT, 'samples/static'),
os.path.join(SITE_ROOT, 'screenshots/static'),
os.path.join(SITE_ROOT, 'services/static'),
os.path.join(SITE_ROOT, 'signatures/static'),
os.path.join(SITE_ROOT, 'config/static'),
os.path.join(SITE_ROOT, 'targets/static'),
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'crits.core.user.CRITsUser'
# http://django-debug-toolbar.readthedocs.org/en/latest/configuration.html#debug-toolbar-panels
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'vcs_info_panel.panels.GitInfoPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'template_timings_panel.panels.TemplateTimings.TemplateTimings',
'template_profiler_panel.panels.template.TemplateProfilerPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.sql.SQLPanel',
]
INTERNAL_IPS = '127.0.0.1'
if old_mongoengine:
INSTALLED_APPS = (
'crits.core',
'crits.dashboards',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crits.actors',
'crits.campaigns',
'crits.certificates',
'crits.domains',
'crits.emails',
'crits.events',
'crits.indicators',
'crits.ips',
'crits.locations',
'crits.objects',
'crits.pcaps',
'crits.raw_data',
'crits.relationships',
'crits.samples',
'crits.screenshots',
'crits.services',
'crits.signatures',
'crits.stats',
'crits.targets',
'tastypie',
'tastypie_mongoengine',
'mongoengine.django.mongo_auth',
'template_timings_panel',
'template_profiler_panel',
'debug_toolbar_mongo',
'vcs_info_panel',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Only needed for mongoengine<0.10
'crits.core.user.AuthenticationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
SESSION_ENGINE = 'mongoengine.django.sessions'
SESSION_SERIALIZER = 'mongoengine.django.sessions.BSONSerializer'
AUTHENTICATION_BACKENDS = (
'crits.core.user.CRITsAuthBackend',
)
else:
INSTALLED_APPS = (
'crits.core',
'crits.dashboards',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crits.actors',
'crits.campaigns',
'crits.certificates',
'crits.domains',
'crits.emails',
'crits.events',
'crits.indicators',
'crits.ips',
'crits.locations',
'crits.objects',
'crits.pcaps',
'crits.raw_data',
'crits.relationships',
'crits.samples',
'crits.screenshots',
'crits.services',
'crits.signatures',
'crits.stats',
'crits.targets',
'tastypie',
'tastypie_mongoengine',
'django_mongoengine',
'django_mongoengine.mongo_auth',
'template_timings_panel',
'template_profiler_panel',
'debug_toolbar_mongo',
'vcs_info_panel',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
SESSION_ENGINE = 'django_mongoengine.sessions'
SESSION_SERIALIZER = 'django_mongoengine.sessions.BSONSerializer'
AUTHENTICATION_BACKENDS = (
#'django_mongoengine.mongo_auth.backends.MongoEngineBackend',
'crits.core.user.CRITsAuthBackend',
)
if REMOTE_USER:
AUTHENTICATION_BACKENDS = (
'crits.core.user.CRITsRemoteUserBackend',
)
if old_mongoengine:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'crits.core.user.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
else:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
MONGODB_DATABASES = {
"default": {
"name": 'crits',
"host": '127.0.0.1',
"password": None,
"username": None,
"tz_aware": True, # if you using timezones in django (USE_TZ = True)
},
}
# Handle logging after all custom configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "%(levelname)s %(asctime)s %(name)s %(message)s"
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'normal': {
'level': LOG_LEVEL,
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': os.path.join(LOG_DIRECTORY, 'crits.log'),
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'crits': {
'handlers': ['normal'],
'propagate': True,
'level': 'DEBUG',
},
},
}
# Handle creating log directories if they do not exist
for handler in LOGGING['handlers'].values():
log_file = handler.get('filename')
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as e:
# If file exists
if e.args[0] == errno.EEXIST:
pass
# re-raise on error that is not
# easy to automatically handle, such
# as permission errors
else:
raise
# CRITs Types
CRITS_TYPES = {
'Actor': COL_ACTORS,
'AnalysisResult': COL_ANALYSIS_RESULTS,
'Backdoor': COL_BACKDOORS,
'Campaign': COL_CAMPAIGNS,
'Certificate': COL_CERTIFICATES,
'Comment': COL_COMMENTS,
'Domain': COL_DOMAINS,
'Email': COL_EMAIL,
'Event': COL_EVENTS,
'Exploit': COL_EXPLOITS,
'Indicator': COL_INDICATORS,
'IP': COL_IPS,
'Notification': COL_NOTIFICATIONS,
'PCAP': COL_PCAPS,
'RawData': COL_RAW_DATA,
'Sample': COL_SAMPLES,
'Screenshot': COL_SCREENSHOTS,
'Signature': COL_SIGNATURES,
'Target': COL_TARGETS,
}
# Custom template lists for loading in different places in the UI
SERVICE_NAV_TEMPLATES = ()
SERVICE_CP_TEMPLATES = ()
SERVICE_TAB_TEMPLATES = ()
# discover services
for service_directory in SERVICE_DIRS:
if os.path.isdir(service_directory):
sys.path.insert(0, service_directory)
for d in os.listdir(service_directory):
abs_path = os.path.join(service_directory, d, 'templates')
if os.path.isdir(abs_path):
_TEMPLATE_DIRS += (abs_path,)
nav_items = os.path.join(abs_path, '%s_nav_items.html' % d)
cp_items = os.path.join(abs_path, '%s_cp_items.html' % d)
view_items = os.path.join(service_directory, d, 'views.py')
if os.path.isfile(nav_items):
SERVICE_NAV_TEMPLATES = SERVICE_NAV_TEMPLATES + ('%s_nav_items.html' % d,)
if os.path.isfile(cp_items):
SERVICE_CP_TEMPLATES = SERVICE_CP_TEMPLATES + ('%s_cp_items.html' % d,)
if os.path.isfile(view_items):
if '%s_context' % d in open(view_items).read():
context_module = '%s.views.%s_context' % (d, d)
_TEMPLATE_CONTEXT_PROCESSORS += (context_module,)
for tab_temp in glob.glob('%s/*_tab.html' % abs_path):
head, tail = os.path.split(tab_temp)
ctype = tail.split('_')[-2]
name = "_".join(tail.split('_')[:-2])
SERVICE_TAB_TEMPLATES = SERVICE_TAB_TEMPLATES + ((ctype, name, tail),)
# Allow configuration of the META or HEADER variable is used to find
# remote username when REMOTE_USER is enabled.
REMOTE_USER_META = 'REMOTE_USER'
# The next example could be used for reverse proxy setups
# where your frontend might pass Remote-User: header.
#
# WARNING: If you enable this, be 100% certain your backend is not
# directly accessible and this header could be spoofed by an attacker.
#
# REMOTE_USER_META = 'HTTP_REMOTE_USER'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
#'APP_DIRS': False,'
'DIRS': _TEMPLATE_DIRS,
'OPTIONS': {
#'dirs' : #_TEMPLATE_DIRS,
'context_processors' : _TEMPLATE_CONTEXT_PROCESSORS,
'debug' : _TEMPLATE_DEBUG,
'loaders' : _TEMPLATE_LOADERS,
},
},
]
if StrictVersion(django_version) < StrictVersion('1.8.0'):
TEMPLATE_DEBUG = _TEMPLATE_DEBUG
TEMPLATE_DIRS = _TEMPLATE_DIRS
TEMPLATE_CONTEXT_PROCESSORS = _TEMPLATE_CONTEXT_PROCESSORS
# Import custom settings if it exists
csfile = os.path.join(SITE_ROOT, 'config/overrides.py')
if os.path.exists(csfile):
execfile(csfile)
| UTF-8 | Python | false | false | 29,697 | py | 285 | settings.py | 174 | 0.626461 | 0.623935 | 0 | 731 | 39.625171 | 142 |
zeculesu/vitamo | 15,942,918,631,725 | af56cfabe06221008c76e6388c5ab74fc6959598 | 49f0a72b785d124fd4304bdb8b5c17257b85112c | /work_api.py | ef0ef600026a2fe5665c2f2862b2f0108e95456b | [] | no_license | https://github.com/zeculesu/vitamo | 2ceb63c764b218aa84db382d721f6e3ea16f5a3b | c77f6ee44153f89f66d92a18a6920ab1850de6b3 | refs/heads/main | 2023-04-19T16:47:16.386537 | 2021-05-17T13:23:04 | 2021-05-17T13:23:04 | 354,602,573 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import request
import requests
from utils import get_response_json
def add_new_users(user_name, email, password):
basic_url = f'{request.host_url}api/users'
response = get_response_json(requests.post(basic_url, data={'username': user_name,
'email': email,
'password': password}))
if not response:
return 'Server Error'
if response.get('message') is not None and response.get('message') != 'OK':
return response['message']
def authorize_user(login, password):
basic_url = f'{request.host_url}api/authorize'
response = get_response_json(requests.get(basic_url, data={'username': login,
'password': password}))
if not response:
return None, 'Not Found error'
if not response.get('token'):
return None, response.get('message', 'Unknown error')
return response['token'], None
def get_users(token):
basic_url = f'{request.host_url}api/users'
response = get_response_json(requests.get(basic_url, data={'token': token}))
if not response:
return None, 'Not Found error'
if response.get('users') is None:
return None, response.get('message', 'Unknown error')
return response['users'], None
def get_chats(token, host_url=None):
host_url = host_url if host_url is not None else request.host_url
url = f'{host_url}api/chats'
response = get_response_json(requests.get(url, data={'token': token}))
if response.get('message') is not None:
return None, response['message']
if not response:
return 'Server Error'
return response.get('chats'), None
def get_chat(chat_id, token):
url = f'{request.host_url}api/chats/{chat_id}'
response = get_response_json(requests.get(url, data={'token': token}))
if response.get('message') is not None:
return None, response['message']
return response.get('chat'), None
def add_chat_api(title, members, logo, token):
basic_url = f'{request.host_url}api/chats'
response = get_response_json(requests.post(basic_url, data={'title': title,
'users': members,
'logo': logo,
'token': token}))
if response.get('message') != 'OK':
return response['message']
return True
def edit_chat_api(chat_id, title, members, logo, token):
basic_url = f'{request.host_url}api/chats/{chat_id}'
response = get_response_json(requests.put(basic_url, data={'title': title,
'users': members,
'logo': logo,
'token': token}))
if response.get('message') != 'OK':
return response['message']
return True
def delete_chat_api(chat_id, token):
basic_url = f'{request.host_url}api/chats/{chat_id}'
response = get_response_json(requests.delete(basic_url, data={'token': token}))
if response.get('message') != 'OK':
return response['message']
return True
def edit_user_api(user_id, token, username=None, password=None, email=None,
description=None, logo=None):
basic_url = f'{request.host_url}api/users/{user_id}'
response = get_response_json(requests.put(basic_url, data={'username': username,
'password': password,
'email': email,
'description': description,
'logo': logo,
'token': token}))
if response.get('message') != 'OK':
return response['message']
return True
| UTF-8 | Python | false | false | 4,145 | py | 33 | work_api.py | 22 | 0.517732 | 0.517732 | 0 | 101 | 40.039604 | 90 |
chris4540/DD2424-dl-proj | 1,821,066,144,146 | d9b59fa5dbd9039a4b725783ed52604e8d8d562c | 11ad14fba26c98a51e4f3968d39679c5609e1003 | /progress_kd/tests/test_aux.py | ff047913583692ae49755f3325808aef3ec2c87e | [
"MIT"
] | permissive | https://github.com/chris4540/DD2424-dl-proj | a42ecbdf61f984f3d8881502a7d874f7daa2dc50 | b928ec9ce792930c4ace269091da2c9b3433928e | refs/heads/master | 2020-09-19T19:16:37.664188 | 2019-11-26T19:50:58 | 2019-11-26T19:50:58 | 224,273,303 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This script is to check if basically the teacher model fits to the auxilary model
"""
import torch
from models.vgg import Vgg
from models.vgg_aux import AuxiliaryVgg
if __name__ == "__main__":
teacher = Vgg('VGG16', batch_norm=True)
chkpt = torch.load("vgg16bn_teacher.tar")
teacher.load_state_dict(chkpt['state_dict'])
aux1 = AuxiliaryVgg(teacher, 1, batch_norm=True)
aux1.drop_teacher_subnet_blk()
data = {
'state_dict': aux1.state_dict()
}
torch.save(data, 'aux1_state.tar')
teacher_state = torch.load("vgg16bn_teacher.tar")['state_dict']
aux1_state = torch.load("aux1_state.tar")['state_dict']
assert torch.equal(
teacher_state['features.7.weight'].float().to('cpu'),
aux1_state['features.10.weight'].float())
# net.drop_teacher_subnet_blk()
# print("# of params = ", get_sum_params(net))
# for k in range(2, 6):
# student = AuxiliaryVgg(net, k)
# student.drop_teacher_subnet_blk()
# print("# of params = ", get_sum_params(student))
# net = student
| UTF-8 | Python | false | false | 1,075 | py | 13 | test_aux.py | 11 | 0.626977 | 0.609302 | 0 | 32 | 32.53125 | 81 |
jdlafferty/covid-19 | 6,244,882,484,029 | 72dfc3322fec85f228ad9b2570eccb1dd3f2dc44 | 3ef6113527aeacade64f5e16af44e8ebb7b1644a | /app.py | a8c33cde9fc5407d21daad7584b927965ef1579a | [] | no_license | https://github.com/jdlafferty/covid-19 | 7e8160981a84b1485e285286a1c595690d55e901 | 53747603c3f2e77cdbf1c5b1171dfc15f81b5c72 | refs/heads/master | 2021-05-18T10:24:11.883251 | 2021-02-28T15:48:38 | 2021-02-28T15:48:38 | 251,209,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template, request
import json
import herepy
import plotly
import datetime
import dateutil.parser
import pandas as pd
import numpy as np
# initial geocoder and read in NYTimes data
geocoderApi = herepy.GeocoderApi('VbY-MyI6ZT9U8h-Y5GP5W1YaOzQuvNnL4aSTulNEyEQ')
def lat_lon_of_address(addr):
response = geocoderApi.free_form(addr)
result = response.as_json_string()
res = eval(result)
(lat, lon) = (res['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']['Latitude'],
res['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']['Longitude'])
return (lat, lon)
def county_state_of_address(addr):
response = geocoderApi.free_form(addr)
result = response.as_json_string()
res = eval(result)
state = res['Response']['View'][0]['Result'][0]['Location']['Address']['AdditionalData'][1]['value']
county = res['Response']['View'][0]['Result'][0]['Location']['Address']['AdditionalData'][2]['value']
return (county, state)
def process_most_recent_data():
df_counties = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv", dtype={"fips": str})
df_states = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv", dtype={"fips": str})
df_geo = pd.read_csv("https://raw.githubusercontent.com/jdlafferty/covid-19/master/data/geo-counties.csv", dtype={"fips": str})
df_census = pd.read_csv("https://raw.githubusercontent.com/jdlafferty/covid-19/master/data/county_2019_census.csv")
last_date = max([dateutil.parser.parse(d) for d in np.array(df_counties['date'])])
most_recent_date = last_date.strftime("%Y-%m-%d")
most_recent_date_long = last_date.strftime("%A %B %-d, %Y")
print("Most recent data: %s" % most_recent_date_long)
df_recent = df_counties[df_counties['date']==most_recent_date]
df_recent = df_recent.sort_values('cases', ascending=False)
df_recent = df_recent.reset_index().drop('index',1)
df_recent = pd.merge(df_recent, df_geo)
df_recent = pd.merge(df_recent, df_census, how='left', on=['county','state'])
df_recent = df_recent[df_recent['county'] != 'Unknown']
df_recent['population'] = np.array(df_recent['population'], dtype='int')
cases = np.array(df_recent['cases'])
population = np.array(df_recent['population'])
cases_per_100k = np.round(100000*np.array(cases/population),1)
df_recent['cases_per_100k'] = cases_per_100k
return (df_recent, most_recent_date_long)
df, most_recent_date = process_most_recent_data()
df.head()
def get_location_of_address(addr, df):
try:
response = geocoderApi.free_form(addr)
result = response.as_json_string()
res = eval(result)
(lat, lon) = (res['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']['Latitude'],
res['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']['Longitude'])
state = res['Response']['View'][0]['Result'][0]['Location']['Address']['AdditionalData'][1]['value']
county = res['Response']['View'][0]['Result'][0]['Location']['Address']['AdditionalData'][2]['value']
if df[(df['county']==county) & (df['state']==state)].shape[0] == 0:
raise Exception('InvalidStateCounty')
return ((lat, lon), (county, state))
except:
raise Exception('InvalidAddress')
def prepare_data_layout(df, address=None, mark='cases', min_cases=1, scale=3.0):
df['text'] = df['county'] + ', ' + df['state'] + '<br>' + \
(df['cases']).astype(str) + ' cases, ' + (df['deaths']).astype(str) + ' deaths<br>' + \
(df['cases_per_100k']).astype(str) + ' cases per 100k people'
df = df[df['cases'] >= min_cases]
df = df[df['county']!='Unknown']
df['type'] = np.zeros(len(df))
if address != None:
try:
((this_lat, this_lon), (this_county, this_state)) = get_location_of_address(address, df)
county_record = df[(df['county']==this_county) & (df['state']==this_state)]
this_text = '%s<br>County: %s' % (address, np.array(county_record['text'])[0])
td = pd.DataFrame(county_record)
cases = np.array([10000])
population = np.array(county_record['population'])
td['cases'] = cases
cases_per_100k = np.round(100000*np.array(cases/population),1)
td['cases_per_100k'] = cases_per_100k
td['text'] = [this_text]
td['type'] = [1]
df = df.append(td)
except:
print("Invalid address: " + address)
colors = ['rgba(255,0,0,0.2)', 'rgba(0,255,0,0.2)']
if mark=='cases':
sizes = df['cases']/scale
else:
sizes = df['cases_per_100k']/scale
data = [dict(type = 'scattergeo',
locationmode = 'USA-states',
lon = df['lon'],
lat = df['lat'],
text = df['text'],
marker = dict(
size = sizes,
color = pd.Series([colors[int(t)] for t in df['type']]),
line = dict(width = 0.5, color = 'black'),
sizemode = 'area'
))
]
title_text = "Data from The New York Times<br>github.com/nytimes/covid-19-data<br>%s" % most_recent_date
layout = dict(
width = 1400,
height = 800,
margin={"r":0,"t":0,"l":0,"b":0},
showlegend = False,
title = dict(
text = title_text,
y = 0.05,
x = 0.85,
xanchor = 'left',
yanchor = 'bottom',
font=dict(
family="Times New Roman",
size=10,
color="#7f7f7f"
)
),
geo = dict(
scope = 'usa',
showland = True,
landcolor = 'rgb(240, 240, 240)'
)
)
return (data, layout)
data, layout = prepare_data_layout(df, mark='cases_per_100k', scale=1.75)
# run flask server
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
addr = request.args.get('address')
marker = request.args.get('marker_view')
options = {}
if marker == 'cases':
mark = 'cases'
scale = 3.0
options['mark'] = 'unchecked'
else:
mark = 'cases_per_100k'
scale = 1.75
options['mark'] = 'checked'
if (addr=='') | (addr==None):
options['location'] = ''
print(options)
else:
print(addr)
options['location'] = addr
print(options)
data, layout = prepare_data_layout(df, address=addr, mark=mark, scale=scale)
return render_template('index.html',
data=json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder),
layout=json.dumps(layout, cls=plotly.utils.PlotlyJSONEncoder),
options=options)
| UTF-8 | Python | false | false | 6,872 | py | 14 | app.py | 5 | 0.577707 | 0.555006 | 0 | 181 | 36.966851 | 132 |
rubnsbarbosa/python3 | 17,549,236,378,093 | 384b72b6fc75b01110ad15440b22b22bdb574ae1 | b53df9ea08473ee9b26876983b22c1de0f27b642 | /02_DecisionStructure/e16.py | 77f1849103f1e0c673739e2d2a5cb1939a95520a | [] | no_license | https://github.com/rubnsbarbosa/python3 | 080c198b88382622c087954c05ff1e3e9643eb15 | d5202edb594290863c7dcd969bb009d9828bee6a | refs/heads/master | 2022-12-03T23:17:36.288105 | 2020-07-23T17:55:39 | 2020-07-23T17:55:39 | 196,201,653 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math as m
a = int(input('Digite o lado A: '))
if a == 0:
exit()
else:
b = float(input('Digite o lado B: '))
c = float(input('Digite o lado C: '))
delta = b**2 - 4 * a * c
def checkRoots(dlt):
if dlt < 0:
print('A equacao nao possui raizes reais')
elif dlt == 0:
root = -b + m.sqrt(dlt) / 2*a
print('A equacao possui apenas uma raiz real: {}'.format(root))
elif dlt > 0:
root1 = -b + m.sqrt(dlt) / 2*a
root2 = -b - m.sqrt(dlt) / 2*a
print('A equacao possui duas raizes reais.\nr1: {} e r2: {}'.format(root1, root2))
checkRoots(delta)
| UTF-8 | Python | false | false | 616 | py | 81 | e16.py | 73 | 0.547078 | 0.522727 | 0 | 23 | 25.782609 | 90 |
taras-ua/SpectralGraphTheory | 9,337,258,944,393 | f95ef56e1b1f68b5de335d1c0575e84e39c19a0f | 23002657ab509db0b5bfe6077b8913f83dcb76bb | /app/views.py | b509345e00104548ec21bbb5786efd506cc049be | [
"Apache-2.0"
] | permissive | https://github.com/taras-ua/SpectralGraphTheory | b08d5147374e17779ae3de0e229225435257483c | c7540d5c674194ec0da965be18bd3becaf69d5f3 | refs/heads/master | 2021-05-15T02:21:26.792375 | 2015-06-07T23:17:48 | 2015-06-07T23:17:48 | 30,770,988 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.http import HttpResponse
from django import forms
import app.controllers.main as controller
import math
class GraphForm(forms.Form):
MODEL_CHOICES = (('erdos-renyi', 'Erdős–Rényi model'), ('bollobas-riordan', 'Bollobás–Riordan model'), ('new-model', 'Model of forgetting'))
model = forms.ChoiceField(widget=forms.Select, choices=MODEL_CHOICES)
nodes = forms.IntegerField(min_value=1)
subnodes = forms.IntegerField(min_value=1, required=False)
initial_weight = forms.FloatField(min_value=math.ldexp(1.0, -53), required=False)
forget_coef = forms.FloatField(min_value=math.ldexp(1.0, -53), required=False)
probability = forms.FloatField(min_value=0, max_value=1, required=False)
def home(request):
gform = GraphForm(initial={'model': 'erdos-renyi'})
return render_to_response('home.html', {'gform': gform}, context_instance=RequestContext(request))
def graph(request):
if request.method == 'GET':
graph_json, degrees, fractions, degreebynode, matrix, name, data, direct, diameter, nodes, edges, subnodes = controller.get_graph(request)
return render_to_response('graph.html', {'nodes': graph_json['nodes'],
'edges': graph_json['links'],
'nodesnumber': nodes,
'edgesnumber': edges,
'directed': direct,
'diameter': diameter,
'degrees': degrees,
'fractions': fractions,
'degreebynode': degreebynode,
'matrix': matrix,
'model': name, 'modeldata': data,
'subnodes': subnodes},
context_instance=RequestContext(request))
if request.method == 'POST':
form = GraphForm(request.POST)
if form.is_valid():
return redirect(controller.build_graph_request(form))
else:
return HttpResponse(status=500)
| UTF-8 | Python | false | false | 2,393 | py | 16 | views.py | 9 | 0.541073 | 0.534786 | 0 | 46 | 50.847826 | 146 |
harrisonBirkner/PythonSP20 | 77,309,443,795 | 9ea0b0fdedd23897d849f409ec30a1f4682d47c7 | 5fd87b2c657efc067c6104127225f1b645a9592a | /DictionaryFun/DictionaryFun/DictionaryFun/DictionaryFun.py | 2eaf972c739db5c2a5bf0db2a51cb2fb0f915e45 | [] | no_license | https://github.com/harrisonBirkner/PythonSP20 | 7d4284f7b4cd18e99d5a4a12d7ca29d4ae4c25d6 | 989a22652953f0223a2c6c8f5b7b3c106627b15d | refs/heads/master | 2023-01-12T12:43:00.245557 | 2020-11-15T20:09:36 | 2020-11-15T20:09:36 | 256,615,492 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Dictionary and Set Examples
#phoneBook = { 'Beatrice': '555-1111', 'Katie':'555-2222', 'Chris':'555-3333'}
#print(phoneBook)
#print(phoneBook['Katie'])
#if 'Chris' not in phoneBook:
# print("No number available for Chris")
#else:
# print(phoneBook['Chris'])
#Try adding to the dictionary
#phoneBook['Wilbur'] = '123-0987'
#phoneBook['Beatrice'] = '867-5309'
#print(phoneBook)
#Delete a value from the phonebook
#del phoneBook['Katie']
#print(phoneBook)
#Find number of elements in our dictionary
#numItems = len(phoneBook)
#print(numItems)
#Creating an empty dictionary
#phoneBook={}
#phoneBook['Chris'] = '555-1111'
#phoneBook['Shelby'] = '555-2222'
#print(" ")
#print(phoneBook)
#print(" ")
# For Loop
#for key in phoneBook:
# print(key)
#Creating a Set
#mySet = set()
#mySet = set ('aabbcc')
#print(mySet)
#mySet = set (['one', 'two', 'three', 45])
#print(mySet)
#Can find the length of the set
#print(len(mySet))
#mySet.add(1)
#print(mySet)
#mySet.update([2,3,4])
#print(mySet)
#mySet.remove(1)
#print(mySet)
#mySet.discard(88)
#print(mySet)
#set1 = set ([1, 2, 3])
#set2 = set ([3, 4, 5])
#set3 = set1.union(set2)
#print (set3)
#set4 = set1.intersection(set2)
#print(set4)
#set5 = set2.difference(set1)
#print(set5)
#Symmetric Difference of Sets
#set1 = set ([1,2,3,4])
#set2 = set ([3,4,5,6])
#set3 = set1 - set2
#print(set3)
varTest = "Bill"
for char7 in varTest:
print(char7.isupper()) | UTF-8 | Python | false | false | 1,428 | py | 33 | DictionaryFun.py | 28 | 0.663165 | 0.597339 | 0 | 80 | 16.8625 | 78 |
rijuvenator/euler | 19,026,705,127,429 | 882e919957b6b4400e83dbd9626461e18e57a0c6 | bafde5e287a6c605fe964837140d13f279ef2101 | /problem030.py | ad03f59bbafdb62235540c17584736da6cfc115d | [] | no_license | https://github.com/rijuvenator/euler | a28efb839dfa6507e21a50f9872b601e90a5918e | d5803dfee6e59db6e0e3bf43b2effcd6d56f0e34 | refs/heads/master | 2021-01-21T04:50:45.145037 | 2018-07-19T12:31:58 | 2018-07-19T12:31:58 | 45,071,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Problem 30: Find the sum of all numbers that are equal to the sum of the fifth powers of their digits.
# Answer: 443839
print '** Problem 30 **'
total = 0
for i in range(2,354295):
if i == sum([int(j)**5 for j in list(str(i))]):
total += i
print 'Sum is',total
| UTF-8 | Python | false | false | 268 | py | 73 | problem030.py | 72 | 0.656716 | 0.585821 | 0 | 10 | 25.8 | 104 |
camerse/WebQueries | 11,321,533,826,829 | 52415f46784183380504b7540edef423115ddea4 | 55266a178575bc89ed30b5fdc0fb084c4c4907a1 | /URLGrab.py | 7ddf1667da87e4e358a37891da1d6ce20e085106 | [] | no_license | https://github.com/camerse/WebQueries | 8673ec45c8f8b2e844cae61f04a1867aa1754ac4 | d3a168f78599d65b5ca0feb87f8efd6bb46ee406 | refs/heads/master | 2020-08-05T05:34:02.303985 | 2017-05-17T17:56:31 | 2017-05-17T17:56:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#! /usr/bin/env python3
'''
This code is modified from pymbook.readthedocs.io
I'm using it as a jumping-off point
'''
import requests
import os
import sys
def download_url(url):
''' download URL, save to current directory '''
req = requests.get(url)
#uses string argument, returns requests.models.Response object
if req.status_code == 404:
print("No such file found at {}".format(url))
return
filename = url.split('/')[-1]
if filename in os.listdir():
print("Filename {} already exists!".format(filename))
return
with open(filename, 'w') as fobj:
fobj.write(req.text)
# type of req.content is bytes
print("Download over.")
if __name__ == '__main__':
if len(sys.argv) != 2:
print("One URL argument please.")
sys.exit(0)
download_url(sys.argv[1])
| UTF-8 | Python | false | false | 926 | py | 2 | URLGrab.py | 1 | 0.582073 | 0.572354 | 0 | 33 | 25.939394 | 66 |
MadanParth786/Codeshef-Problems | 3,221,225,484,723 | 7fcdb63ae33187fc36e03c10e1f03cde10150901 | eb8b2dffe055de423bc320e6ee949a0d0d0c401d | /Long June 2021 Contest/Bitwise tupple.py | 1502f32fec81d5eb04f1fed38839714757e3fe18 | [
"MIT"
] | permissive | https://github.com/MadanParth786/Codeshef-Problems | 7dafe59c1e326f3ad4491f1c2752bc5f821fec6f | 64ec3b9849992f3350dfe67f2dbc6332a665b471 | refs/heads/main | 2023-08-27T03:00:42.725312 | 2021-10-28T16:35:12 | 2021-10-28T16:35:12 | 376,236,369 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | for i in range(int(input())):
a, b = map(int,input().split())
c = pow(2,a,1000000007)-1
print(pow(c,b,1000000007))
| UTF-8 | Python | false | false | 131 | py | 63 | Bitwise tupple.py | 62 | 0.564885 | 0.396947 | 0 | 4 | 31.5 | 36 |
lesliedlc/json | 4,166,118,317,898 | 1c806746a62dec07a6d6e80cd66c103e752703e8 | ea1bbef16d1ffaf1a6fdec4cf855a455c6726418 | /json_USfires_9_14.py | c26d729b7b9dec82226865c14f80ed3655553882 | [] | no_license | https://github.com/lesliedlc/json | 9aae810cc821e311bb86ccd0eec608f8b009f907 | 29037b56d6c0c5e05f64176838f5da02ab0d015e | refs/heads/master | 2022-12-19T05:10:56.209166 | 2020-10-01T02:40:34 | 2020-10-01T02:40:34 | 297,475,446 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
in_file = open("US_fires_9_14.json", "r")
out_file = open("readable_US2_data.json","w")
us2_data = json.load(in_file)
json.dump(us2_data, out_file,indent = 4) #dump contents into the file
fire_list_1 = us2_data[:]
print(type(fire_list_1)) #type
print(len(fire_list_1)) #how many
brightness,lons,lats = [],[],[]
for F in fire_list_1:
brg = F["brightness"]
lon = F["longitude"]
lat = F["latitude"]
if brg > 450:
brightness.append(brg)
lons.append(lon)
lats.append(lat)
print("Brightness:",brightness[:10])
print("Longitude",lons[:10])
print("Latitude",lats[:10])
#import plotly
from plotly.graph_objs import Scattergeo, Layout
from plotly import offline
data = [{
'type':'scattergeo',
'lon':lons,
'lat':lats,
'marker':{
'size':[brg/50 for brg in brightness],
'color':brightness,
'colorscale':'Viridis',
'reversescale':True,
'colorbar':{'title':'Brightness'}
}
}]
my_layout = Layout(title="US Fires - 9/14/2020 through 9/20/2020")
fig = {"data":data, "layout":my_layout}
offline.plot(fig,filename = "US_fires_9_14-20.html") | UTF-8 | Python | false | false | 1,145 | py | 4 | json_USfires_9_14.py | 3 | 0.623581 | 0.5869 | 0 | 51 | 21.470588 | 69 |
356255531/ADP_Programming_Assignment | 19,078,244,737,930 | b2f57ceee043748f9c6594c868eeeaa5b10e83bb | beb22b5d1584d9962aecb547692da8a2679bd172 | /code/Toolbox/Reward.py | a70a35e77b5b7d57676c9fc10cb7883de2987bea | [] | no_license | https://github.com/356255531/ADP_Programming_Assignment | 5fb16c8dff5a884b371a89a819cd3718c0b286d2 | e8d21b7943df806f5232e37795ae3a70a84fddd1 | refs/heads/master | 2020-06-14T10:12:35.390736 | 2017-01-13T16:47:09 | 2017-01-13T16:47:09 | 75,199,031 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __auther__ = "Zhiwei"
class Reward2(object):
"""
Reward rule 2:
when goal state reached, given reward 1,
otherwise punished by -1
Member function:
get_reward(current_state, action, next_state):
"""
def __init__(self, env):
super(Reward2, self).__init__()
self.__env = env
def get_reward(
self,
current_state,
action,
next_state
):
"""
return reward by given states and action, int
"""
if self.__env.is_goal_state(next_state):
return 0
else:
return -1
class Reward1(object):
"""
Reward rule 1:
when goal state reached, given reward 1,
otherwise 0
Member function:
get_reward(current_state, action, next_state):
"""
def __init__(self, env):
super(Reward1, self).__init__()
self.__env = env
def get_reward(
self,
current_state,
action,
next_state
):
"""
return reward by given states and action, int
"""
if self.__env.is_goal_state(next_state):
return 1
else:
return 0
if __name__ == "__main__":
print Reward1.__name__
print Reward2.__name__
| UTF-8 | Python | false | false | 1,335 | py | 12 | Reward.py | 9 | 0.485393 | 0.473408 | 0 | 64 | 19.859375 | 58 |
cherukurukavya/lab-programs2-r151458 | 15,101,105,053,404 | 1c6a49335a73097b5059b6a238952b0cafdc31a7 | 4bb97a99fbbb63f33586e85f371eb27d32768ffc | /DTFT_LIST.py | 8e67200b5979e60fae327ea5c2839ab101f00bac | [] | no_license | https://github.com/cherukurukavya/lab-programs2-r151458 | 3954d673978940fcdf4589623d0f892cfe899b4a | 13f391c3767096801de0171ce6f680ee80f0228c | refs/heads/master | 2020-05-16T10:46:39.259944 | 2019-12-28T06:33:17 | 2019-12-28T06:33:17 | 182,995,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cmath as c
import matplotlib.pyplot as plt
w=np.linspace(-1*np.pi,np.pi,1000)
j=c.sqrt(-1)
#t=np.arange(-400,400,1)
#fs=100
#x=np.sin(2*np.pi*40.0/fs*t)
x=input('enter values')#a=[1,2,3]
z=len(x)
y=[]
M=[]
P=[]
for r in range(1000):
W=w[r]
X=0
for n in range(z):
X+=x[n]*np.exp(-j*W*n)
y.append(X)
M.append(abs(X))
P.append(np.angle(X))
#P.append(X)
plt.subplot(311)
plt.plot(w,y)
plt.subplot(312)
plt.plot(w,M)
plt.subplot(313)
plt.plot(w,P)
plt.show() | UTF-8 | Python | false | false | 488 | py | 24 | DTFT_LIST.py | 23 | 0.639344 | 0.563525 | 0 | 29 | 15.862069 | 34 |
satire6/Anesidora | 13,408,887,920,271 | 2952d9baf3948fc1288fab36b60699c71e2d3130 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/launcher/LauncherTool.py | bd4c83934bf96c1e1f7cc06266ce246b9d288821 | [] | no_license | https://github.com/satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | null | null | null | null | null | null | null | null | null | from direct.showbase.TkGlobal import *
class LauncherTool:
def __init__(self, launcher):
self.launcher = launcher
self.toplevel = Toplevel()
self.frame = Frame(self.toplevel)
self.statusLabel = Label(self.frame, justify=LEFT,
anchor=W, text='Status: Ok')
self.goButton = Button(self.frame, text='Go', command=self.launcher.go)
self.frame.pack(side=LEFT)
self.statusLabel.pack(side=TOP, fill=X)
self.goButton.pack(side=TOP, fill=X)
def setStatus(self, action):
self.statusLabel['text'] = 'Status: ' + action
| UTF-8 | Python | false | false | 635 | py | 1,517 | LauncherTool.py | 1,360 | 0.598425 | 0.598425 | 0 | 19 | 31.894737 | 79 |
davedavedave636/Coding-Projects | 6,992,206,761,988 | 5ea190d25f8550e74137c941b8afe3b01853ddbc | 0642d8032c1e6b7972832a495bb7227e10bf9a09 | /PY4E_Exercises/c6/exercise_c6,3.py | bfa0bb06d427d3a908a001e1994ae7c635cbd7c0 | [] | no_license | https://github.com/davedavedave636/Coding-Projects | e0856f4146d2e0222b1e2a718fc76fc0607afe19 | 8951bd995426b1345c00119b49cb0cf0a0b01b12 | refs/heads/master | 2023-03-05T21:43:34.197400 | 2021-02-16T21:17:02 | 2021-02-16T21:17:02 | 339,529,999 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def count():
word = input('enter a word: ')
let = input('enter a letter: ')
countnum = 0
for letter in word:
if letter == let:
countnum += 1
print('There is/are', countnum, let + "'s")
count()
| UTF-8 | Python | false | false | 236 | py | 30 | exercise_c6,3.py | 29 | 0.521186 | 0.512712 | 0 | 9 | 24.888889 | 47 |
JCHasrouty/CSIS-151-Python | 13,546,326,855,364 | 4ac156c44fa258a1bc5f6bdbce286c37ec38e9bf | aaa1e49c9cc57d27e6e32b7e60df01adb60db565 | /Chapter 9 Homework/Part 3/Employee.py | 128436423149565d5b403a0a09984285569ee965 | [] | no_license | https://github.com/JCHasrouty/CSIS-151-Python | d9e2d472481274da0f62adc1c442ae09690873cf | b4032fac68e27c7291f61b03d7259fc5a9c888b0 | refs/heads/master | 2020-03-07T15:38:41.997555 | 2019-04-20T23:08:42 | 2019-04-20T23:08:42 | 127,560,550 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Employee():
## def __setitem__(self, key, item):
## self.__dict__[key] = item
##
## def __getitem__(self, key):
## return self.__dict__[key]
##
## def __delitem__(self, key):
## del self.__dict__[key]
##
## def clear(self):
## return self.__dict__.clear()
##
## def copy(self):
## return self.__dict__.copy()
##
## def has_key(self, k):
## return k in self.__dict__
##
## def update(self, *args, **kwargs):
## return self.__dict__.update(*args, **kwargs)
##
## def keys(self):
## return self.__dict__.keys()
##
## def values(self):
## return self.__dict__.values()
##
## def items(self):
## return self.__dict__.items()
##
## def pop(self, *args):
## return self.__dict__.pop(*args)
##
## def __cmp__(self, dict_):
## return self.__cmp__(self.__dict__, dict_)
##
## def __contains__(self, item):
## return item in self.__dict__
##
## def __iter__(self):
## return iter(self.__dict__)
##
def __init__(self, emp_ID,department,job_title, salary):
self.emp_ID = emp_ID
self.department = department
self.job_title = job_title
self.salary = salary
# Mutators
def set_emp_name(self,emp_name):
self.emp_name = emp_name
def set_emp_ID(self,emp_ID):
self.emp_ID = emp_ID
def set_department(self,department):
self.department = department
def set_job_title(self,job_title):
self.job_title = job_title
def set_salary(self,salary):
self.salary = salary
#Accessor Methods
def get_emp_name(self):
return self.emp_name
def get_emp_ID(self):
return self.emp_ID
def get_department(self):
return self.department
def get_job_title(self):
return self.job_title
def get_salary(self):
return self.salary
def get_data(self):
print(self.emp_ID,self.department,self.job_title, self.salary)
def __str__(self):
return ("ID# " + self.emp_ID + " Department: " + self.department + " Job Title: " + self.job_title + " Salary: " + self.salary)
| UTF-8 | Python | false | false | 2,166 | py | 35 | Employee.py | 30 | 0.538781 | 0.538781 | 0 | 88 | 23.613636 | 135 |
RiccardOtt/GOR | 10,814,727,669,193 | 947ee3dda1973873dc9be8dda5b58a850f5e9775 | f99221c054dd8c25c204d6be8741e7bee8839455 | /GOR_predicting.py | 43821ad93dfe331851a5cfdcf023b2fd12a4311e | [] | no_license | https://github.com/RiccardOtt/GOR | 90361db6ef0877f967f8c460183b08f138130ae5 | 6de0bbb89027b4e438677d57fe95c74a0c1151e6 | refs/heads/main | 2023-07-04T15:41:39.548836 | 2021-08-10T18:39:39 | 2021-08-10T18:39:39 | 394,749,246 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import numpy as np
import math #math.log(<of what>,<base>)
def matrix_pssm(filename):
seq_prof = []
for j in filename:
L = j.split()
# print(L)
try: L[0]
except: pass
else:
if L[0].isdigit():
seq_prof.append(L[22:-2])
# print(seq_prof)
x = np.array(seq_prof, dtype=np.float64)
x /= 100
# return(x)
# print(x)
if x.sum() != float(0):
return x, True
else:
return None, False
def information(helix, beta, coil, total, secstr):
Mh=np.array(total*secstr[0]) #denominator info function
Me=np.array(total*secstr[1])
Mc=np.array(total*secstr[2])
Mh1=helix/Mh #info function fraction
Me1=beta/Me
Mc1=coil/Mc
Ih_r=np.log(Mh1) #info function matrix
Ie_r=np.log(Me1)
Ic_r=np.log(Mc1)
return(Ih_r, Ie_r, Ic_r)
def cane(profile1, Ihr1, Ier1, Icr1):
pad = np.zeros((8,20))
padding = np.vstack((pad,profile1,pad))
Lp=len(padding)
dssp=''
ss = ['H', 'E', '-']
for e in range(8,Lp-8):
j=e-8
k=e+8
W=np.array(padding[j:k+1])
prediction=[]
Ih=np.sum(W*Ih_r)
Ie=np.sum(W*Ie_r)
Ic=np.sum(W*Ic_r)
prediction=[Ih,Ie,Ic]
SS_pred=max(prediction)
dssp += ss[prediction.index(SS_pred)]
return dssp
if __name__ == '__main__':
HELIX=np.load('GOR_tr_H.npy')
BETA=np.load('GOR_tr_E.npy')
COIL=np.load('GOR_tr_C.npy')
TOTAL=np.load('GOR_tr_R.npy')
SECSTR=np.load('GOR_tr_ProbSS.npy')
fileid=sys.argv[1]
with open(fileid) as filein:
for id in filein:
id=id.rstrip()
profile_file = '/home/riccardo/Documents/Documents/LB2/Castrense/project/jpred4.pssm/' + id + '.pssm'
try:
prof=open(profile_file)
except: continue
else:
profile, ret = matrix_pssm(prof)
if ret:
Ih_r, Ie_r, Ic_r = information(HELIX, BETA, COIL, TOTAL, SECSTR)
dssp = cane(profile, Ih_r, Ie_r, Ic_r) #I(S;R)=out di information(h,e,c,tot,secstr)
print('>'+id+'\n'+dssp)
| UTF-8 | Python | false | false | 1,851 | py | 3 | GOR_predicting.py | 3 | 0.632631 | 0.613182 | 0 | 83 | 21.301205 | 104 |
aleglez22/MasterWorkshop | 18,889,266,182,429 | f8a1c578f8f80edbc20cc9b1d4f6d156b3317506 | 97befe66c292e397eb82736fb8b131a846a91cd8 | /index/views.py | 6937f97876467b2c1602811b85923840f7e9cff7 | [] | no_license | https://github.com/aleglez22/MasterWorkshop | af5299d4ae1363dc5222e5258fe4864119f3db48 | 432617171994aa3978f13cd002e8dc264ab8b4df | refs/heads/master | 2021-09-20T05:31:16.693764 | 2018-08-04T05:08:59 | 2018-08-04T05:08:59 | 84,784,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect , get_object_or_404
from .models import Cliente, Orden, Equipo, Tecnico
from django.views import generic
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
from django import forms
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.writer.excel import save_virtual_workbook
from django.views.generic.edit import CreateView, UpdateView, DeleteView
# Create your views here.
def redirect_by_id(request):
#if request.method == "POST":
if request.POST['id_orden']:
id = request.POST['id_orden']
return redirect('index:Detalle-Ordenes', pk=id)
return redirect('index:home-page')
def ImprimirOrden(request,pk):
import datetime
now = datetime.datetime.now().strftime('%H:%M:%S')
orden=Orden.objects.get(pk = pk)
context={'orden':orden, 'hora':now}
template = 'index/imprimir_orden.html'
return render(request, template,context )
def ExcelExport (request):
from django.db.models import Sum
f_ini = request.POST['fecha_inicial']
f_fin = request.POST['fecha_final']
#configuracion libro
wb = Workbook()
dest_filename = 'file.xlsx'
ws = wb.worksheets[0]
if f_ini and f_fin:
a = Orden.objects.filter(Fecha_creacion__range=(f_ini, f_fin))
else:
a = Orden.objects.all()
suma= a.aggregate(Sum('Costo_reparacion'))
num=0
ws.cell(row=1, column=1, value="desde "+ str(f_ini))
ws.cell(row=1, column=3, value="hasta "+ str(f_fin))
ft = Font(bold=True)
ws.cell(row=3, column=1, value="Id orden").font=ft
ws.cell(row=3, column=2, value="Nombre Cli").font=ft
ws.cell(row=3, column=3, value="Telefono").font=ft
ws.cell(row=3, column=4, value="Equipo").font=ft
ws.cell(row=3, column=5, value="Reparación").font=ft
ws.cell(row=3, column=6, value="Costo").font=ft
ws.cell(row=3, column=7, value="Estado").font=ft
for orden in a:
num= num + 1
valor= orden.Costo_reparacion
ws.cell(row=num+4, column=1, value=str(orden.pk))
ws.cell(row=num+4, column=2, value=str(orden.Cliente.Nombre))
ws.cell(row=num+4, column=3, value=str(orden.Cliente.Telefono1))
ws.cell(row=num+4, column=4, value=str(orden.Equipo))
ws.cell(row=num+4, column=5, value=str(orden.Informe_tecnico))
ws.cell(row=num+4, column=6, value=str(orden.Costo_reparacion))
ws.cell(row=num+4, column=7, value=str(orden.Estado))
ws.cell(row=num+4, column=9, value="Total: "+str(suma['Costo_reparacion__sum']))
return HttpResponse(save_virtual_workbook(wb), content_type='application/vnd.ms-excel')
class ListaOrdenes(generic.ListView):
#definir el template que utilizará y pasará el contexto
template_name = 'index/ordenes.html'
#se define el nombre del contexto
context_object_name='all_ordenes'
# @Override devuelve los objetos que serán renderizados
def get_queryset(self):
return Orden.objects.all().order_by('-Fecha_creacion')
def get_context_data(self, **kwargs):
context = super(ListaOrdenes, self).get_context_data(**kwargs)
context['pendientes'] = Orden.objects.filter(Estado='PROCESANDO')
context['espera'] = Orden.objects.filter(Estado='ESPERA')
return context
#no es factible porque la clase a la que hereda devuelve dos contextos
#y uno no se utiliza en esta, pero valio la pena la practica
class TodasOrdenes(ListaOrdenes):
template_name = 'index/lista_ordenes.html'
class DetalleOrdenes(generic.DetailView):
model=Orden
template_name = 'index/detalle_orden.html'
#no se utiliza (didáctico)
class CrearOrden (CreateView):
model= Orden
#fields Im gonna let users to fill
fields =['Fecha_entrega','Estado_inicial','Falla', 'Costo_reparacion', 'Costo_revision', 'Notas',
'Fecha_ofrecida','Accesorios','Limite_garantia','Informe_tecnico','Tecnico','Equipo','Cliente','Estado']
def get_initial(self):
if self.request.GET.get('txtcliente'):
a=self.request.GET.get('txtcliente')
return {
'Tecnico':4, 'Cliente':a,
}
else:
#C = get_object_or_404(Recipe, slug=self.kwargs.get('slug'))
return {
'Tecnico':4,
}
def agregarClienteOrden(request):
#if request.method == "POST":
if request.POST.get('txtcliente'):
cedula = request.POST.get('txtcliente')
try:
user = Cliente.objects.get(Cedula=cedula)
return redirect('index:Add-Orden2', user.pk)
except Cliente.DoesNotExist:
return redirect('/#no')
return redirect('index:Add-Orden2',0,)
class OrdenForm(forms.ModelForm):
class Meta:
model=Orden
fields =['Fecha_entrega','Estado_inicial','Falla', 'Costo_reparacion', 'Costo_revision', 'Notas',
'Fecha_ofrecida','Accesorios','Limite_garantia','Informe_tecnico','Tecnico','Equipo','Cliente','Estado']
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None) # pop the 'user' from kwargs dictionary
super(OrdenForm, self).__init__(*args, **kwargs)
self.fields['Equipo'] = forms.ModelChoiceField(queryset=Equipo.objects.filter(Cliente=user))
class CrearOrden2 (CreateView):
model= Orden
form_class = OrdenForm
#fields Im gonna let users to fill
# fields =['Fecha_entrega','Estado_inicial','Falla', 'Costo_reparacion', 'Costo_revision', 'Notas',
# 'Fecha_ofrecida','Accesorios','Limite_garantia','Informe_tecnico','Tecnico','Equipo','Cliente','Estado']
def get_initial(self):
if self.kwargs['cliente'] != 0:
a=self.kwargs['cliente']
return {
'Tecnico':4, 'Cliente':a,
}
else:
return {
'Tecnico':4,
}
def get_form_kwargs(self):
kwargs = super(CrearOrden2, self).get_form_kwargs()
kwargs['user'] = self.kwargs['cliente'] # pass the 'user' in kwargs
return kwargs
class EditarOrden (UpdateView):
model= Orden
#fields Im gonna let users to edit
fields =['Falla', 'Costo_reparacion', 'Notas',
'Informe_tecnico','Estado']
class EliminarOrden (DeleteView):
model= Orden
success_url= reverse_lazy("index:home-page")
#sobreescribe metodo get para q actue como post y no se necesite form d confirmacion
#def get(self, request, *args, **kwargs):
# return self.post(request, *args, **kwargs)
#...............Client Views
class ListaCliente(generic.ListView):
#definir el template que utilizará y pasará el contexto
template_name = 'index/clientes.html'
#se define el nombre del contexto
context_object_name='all_clientes'
# @Override devuelve los objetos que serán renderizados
def get_queryset(self):
return Cliente.objects.all()
def get_context_data(self, **kwargs):
context = super(ListaCliente, self).get_context_data(**kwargs)
context['ultimos'] = Cliente.objects.order_by('-Fecha_ingreso')[:5]
return context
class DetalleCliente(generic.ListView):
#definir el template que utilizará y pasará el contexto
template_name = 'index/detalle_cliente.html'
#se define el nombre del contexto
context_object_name='all_equipos'
# @Override devuelve los objetos que serán renderizados
def get_queryset(self):
return Equipo.objects.filter(Cliente=self.kwargs['pk'])#pk -> parametro pasado por url
def get_context_data(self, **kwargs):
context = super(DetalleCliente, self).get_context_data(**kwargs)
context['cliente']= Cliente.objects.get(pk=self.kwargs['pk'])#get porque solo es para uno
context['ordenes']= Orden.objects.filter(Cliente=self.kwargs['pk'])
return context
class CrearCliente (CreateView):
model = Cliente
#fields Im gonna let users to fill
fields =['Nombre','Apellido','Cedula','Direccion','Email', 'Gasto_acumulado', 'Telefono1', 'Telefono2']
class CrearClienteFromOrden(CrearCliente):
success_url = reverse_lazy('index:Add-Orden')
#def get_success_url():
# return reverse('index:Add-Orden')
class EditarCliente (UpdateView):
model= Cliente
#fields Im gonna let users to edit
fields =['Telefono1', 'Telefono2']
class EliminarCliente (DeleteView):
model= Cliente
success_url= reverse_lazy("index:Home-Cliente")
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
#...............Equipo Views
class ListaEquipo(generic.ListView):
#definir el template que utilizará y pasará el contexto
template_name = 'index/equipos.html'
#se define el nombre del contexto
context_object_name='all_equipos'
# @Override devuelve los objetos que serán renderizados
def get_queryset(self):
return Equipo.objects.all()
def get_context_data(self, **kwargs):
context = super(ListaEquipo, self).get_context_data(**kwargs)
context['ultimos'] = Equipo.objects.order_by('-Fecha_creacion')[:5]
return context
class CrearEquipo (CreateView):
model = Equipo
#fields Im gonna let users to fill
fields =['Tipo','Marca','Modelo','Serial', 'Cliente']
class CrearEquipoFromOrden(CrearEquipo):
success_url = reverse_lazy('index:Add-Orden')
class EliminarEquipo (DeleteView):
model= Equipo
success_url= reverse_lazy("index:Home-Equipo")
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
#............................TECNICO Views...........................
class ListaTecnico(generic.ListView):
#definir el template que utilizará y pasará el contexto
template_name = 'index/tecnicos.html'
#se define el nombre del contexto
context_object_name='all_tecnicos'
# @Override devuelve los objetos que serán renderizados
def get_queryset(self):
return Tecnico.objects.all()
class CrearTecnico (CreateView):
model = Tecnico
#fields Im gonna let users to fill
fields =['Nombre','Apellido']
class EliminarTecnico (DeleteView):
model= Tecnico
success_url= reverse_lazy("index:Home-Tecnico")
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
| UTF-8 | Python | false | false | 10,505 | py | 32 | views.py | 17 | 0.653223 | 0.647502 | 0 | 331 | 30.667674 | 112 |
kimiyuki/dotfiles | 14,285,061,250,876 | b55cc2388bb37d63876cf3da6b193ea0423927c9 | 4f97c41df3d9071de4d54cd0a8f252ed78d579cb | /00-ipython-startup.py | c6151b29f2272d2aa34fbf24a3f4c4baedcefd42 | [] | no_license | https://github.com/kimiyuki/dotfiles | d648983655700d59efbc7be3d743713dab5fd219 | ebc290b770f35f709216460504ba1e54e0f5a304 | refs/heads/master | 2022-08-22T03:44:12.969661 | 2022-07-25T02:56:16 | 2022-07-25T02:56:16 | 53,015,945 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | try:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
#get_ipython().run_line_magic('load_ext', 'memory_profiler')
get_ipython().run_line_magic('xmode', 'Plain')
print('autoreload loaded from startup ~/.ipython/...')
except NameError:
print('no ipython')
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas_datareader import data
pd.set_option("display.max_rows", 6)
pd.set_option("display.max_colwidth", 70)
## itertools
from itertools import *
def pair_wise(iterable, tail=True):
a,b = tee(iterable)
next(b, None)
return zip_longest(a,b) if tail else zip(a,b)
def show_df_blk(df, nrow=10, ncol=4):
"""show dataframe with blocks. i: num rows, j: num cols"""
with pd.option_context("display.max_rows", nrow):
for g in pair_wise(range(0, df.shape[1], ncol)):
#print(g)
print(df.iloc[:nrow, slice(*g)], "\n")
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def show_more(df, n=10000):
""" print more because I restrict default print in pandas of display.max_rows"""
with pd.option_context("display.max_rows", n):
#display(df)
print(df)
#return df; if i can preventn to output to console, comment-out it
#get_ipython().run_line_magic('page')
from vega_datasets import data
iris = data('iris')
cars = data('cars')
movies = data('movies')
sp500 = data('sp500')
stock = data('stocks')
import seaborn as sns
#sns.set(style='darkgrid', font='TakaoGothic')
flights = sns.load_dataset("flights")
diamonds = sns.load_dataset("diamonds")
titanic = sns.load_dataset("titanic")
planets = sns.load_dataset("planets")
print('plt, np, pd, sns, altair are loaded')
import requests
from bs4 import BeautifulSoup
| UTF-8 | Python | false | false | 1,983 | py | 19 | 00-ipython-startup.py | 7 | 0.67171 | 0.660615 | 0 | 61 | 31.47541 | 84 |
Samarth-Tripathi/Neural-Network-Projects | 12,919,261,633,829 | 63d6f1f242b89f65d0d817ff378c2c45eb45d519 | 6a4bbb1491bcc17e759839d654f8286e46834fc6 | /Neural_Nets_HW/e4040_hw1_st3029/hw1a.py | 69468d187922132c35f4ee4047d2265fb99b697d | [] | no_license | https://github.com/Samarth-Tripathi/Neural-Network-Projects | fdc63ee4d8d62bca814ae1386b3e2d89dce34e36 | a032fdbb3e5fe6fca1302a983630f2fe7b0164e9 | refs/heads/master | 2021-01-12T02:48:14.457044 | 2017-01-05T17:41:37 | 2017-01-05T17:41:37 | 78,109,281 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from os import walk
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy.linalg as linalg
from PIL import Image
import theano
import theano.tensor as T
'''
Implement the functions that were not implemented and complete the
parts of main according to the instructions in comments.
'''
def reconstructed_image(D,c,num_coeffs,X_mean,n_blocks,im_num,sz):
'''
This function reconstructs an image X_recon_img given the number of
coefficients for each image specified by num_coeffs
'''
'''
Parameters
---------------
c: np.ndarray
a n x m matrix representing the coefficients of all the image blocks.
n represents the maximum dimension of the PCA space.
m is (number of images x n_blocks**2)
D: np.ndarray
an N x n matrix representing the basis vectors of the PCA space
N is the dimension of the original space (number of pixels in a block)
im_num: Integer
index of the image to visualize
X_mean: np.ndarray
a matrix representing the mean block.
num_coeffs: Integer
an integer that specifies the number of top components to be
considered while reconstructing
n_blocks: Integer
number of blocks comprising the image in each direction.
For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4
'''
c_im = c[:num_coeffs,n_blocks*n_blocks*im_num:n_blocks*n_blocks*(im_num+1)]
D_im = D[:,:num_coeffs]
#print c_im.shape
#print D_im.shape
R = np.dot(D_im,c_im) #+ X_mean
X_recon_img = R.T
#print 'X_recon shape = '
#print X_recon_img.shape
#print ''
#print X_mean.shape
X_dim = X_recon_img.size/X_recon_img[0].size
for i in range(X_dim):
X_recon_img[i] += X_mean.flatten()
X_recon_img_2 = np.zeros((256,256))
'''for i in range(256):
for j in range(256):
xi_i = (int) (i/sz)
yi_i = (int) (j/sz)
xj = i - xi_i*sz
yj = j - yi_i*sz
X_recon_img_2[i][j] = X_recon_img[xi_i*n_blocks+yi_i][sz*xj+yj]
'''
#print '******************'
for i in range(X_dim):
p = X_recon_img[i]
p = p.reshape((sz,sz),)
p = p.T
xi_i = (int) (i/n_blocks)
yi_i = i - xi_i*n_blocks
#print i
#print xi_i
#print yi_i
#print ''
for x in range(sz):
for y in range(sz):
X_recon_img_2[(xi_i*sz)+x][(yi_i*sz)+y] = p[x][y]
X_recon_img_2 = X_recon_img_2.T
'''print X_recon_img.shape
print X_recon_img.size/X_recon_img[0].size
X_dim = X_recon_img.size/X_recon_img[0].size
B_dim = (256*256)/(n_blocks*n_blocks)
X_recon_img=X_recon_img.reshape((X_dim,B_dim),)
print X_recon_img.shape'''
'''
# Defining variables
images = T.tensor4('images')
neibs = T.nnet.neighbours.images2neibs(images, neib_shape=(256, 256))
# Constructing theano function
window_function = theano.function([images], neibs, allow_input_downcast=True)
# Input tensor (one image 10x10)
im_val = X_recon_img
# Function application
neibs_val = window_function(im_val)
print neibs_val.shape
print '******* \n'
'''
'''im_new = T.nnet.neighbours.neibs2images(neibs, (8, 8), (256,256))
# Theano function definition
inv_window = theano.function([neibs], im_new)
# Function application
im_new_val = inv_window(X_recon_img)'''
#X_recon_img=X_recon_img.reshape((256,256),)
#TODO: Enter code below for reconstructing the image X_recon_img
#......................
#......................
#X_recon_img = ........
return X_recon_img_2
def plot_reconstructions(D,c,num_coeff_array,X_mean,n_blocks,im_num,sz):
'''
Plots 9 reconstructions of a particular image using D as the basis matrix and coeffiecient
vectors from c
Parameters
------------------------
num_coeff_array: Iterable
an iterable with 9 elements representing the number of coefficients
to use for reconstruction for each of the 9 plots
c: np.ndarray
a l x m matrix representing the coefficients of all blocks in a particular image
l represents the dimension of the PCA space used for reconstruction
m represents the number of blocks in an image
D: np.ndarray
an N x l matrix representing l basis vectors of the PCA space
N is the dimension of the original space (number of pixels in a block)
n_blocks: Integer
number of blocks comprising the image in each direction.
For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4
X_mean: basis vectors represent the divergence from the mean so this
matrix should be added to all reconstructed blocks
im_num: Integer
index of the image to visualize
'''
f, axarr = plt.subplots(3,3)
for i in range(3):
for j in range(3):
plt.axes(axarr[i,j])
plt.imshow(reconstructed_image(D,c,num_coeff_array[i*3+j],X_mean,n_blocks,im_num,sz),cmap='Greys_r')
f.savefig('output/hw1a_{0}_im{1}.png'.format(n_blocks, im_num))
plt.close(f)
def plot_top_16(D, sz, imname):
'''
Plots the top 16 components from the basis matrix D.
Each basis vector represents an image block of shape (sz, sz)
Parameters
-------------
D: np.ndarray
N x n matrix representing the basis vectors of the PCA space
N is the dimension of the original space (number of pixels in a block)
n represents the maximum dimension of the PCA space (assumed to be atleast 16)
sz: Integer
The height and width of each block
imname: string
name of file where image will be saved.
'''
#TODO: Obtain top 16 components of D and plot them
#print 'p_16'
d = D[:16]
#print d.shape
g, axarr2 = plt.subplots(4,4)
for i in range(4):
for j in range(4):
plt.axes(axarr2[i,j])
plt.imshow(d[i*4+j].reshape((sz,sz),),cmap='Greys_r')
g.savefig('output/hw1a_top16_{0}.png'.format(sz))
plt.close(g)
print 'complete'
def main():
'''
Read here all images(grayscale) from Fei_256 folder
into an numpy array Ims with size (no_images, height, width).
Make sure the images are read after sorting the filenames
'''
#TODO: Read all images into a numpy array of size (no_images, height, width)
print 'starting 1'
begFileName = "Fei_256/image"
endFileName = ".jpg"
numberOfImages = 200
imageLength, imageBreadth = 256, 256
npImages = np.zeros((numberOfImages,imageLength,imageBreadth))
i_sort = []
s_sort = []
for c in range(0,numberOfImages) :
i_sort.append(str(c))
i_sort.sort()
for c in i_sort:
s_sort.append(c)
#print s_sort
count=0
for c in s_sort :
im = Image.open(begFileName + c + endFileName)
im= im.convert('L') #convert greyscale
data = np.asarray( im )
npImages[count] = data
count+=1
#print npImages
#print ''
szs = [8, 32, 64]
num_coeffs = [range(1, 10, 1), range(3, 30, 3), range(5, 50, 5)]
#szs = [32]
#num_coeffs = [range(3, 30, 3)]
for sz, nc in zip(szs, num_coeffs):
'''
Divide here each image into non-overlapping blocks of shape (sz, sz).
Flatten each block and arrange all the blocks in a
(no_images*n_blocks_in_image) x (sz*sz) matrix called X
'''
#TODO: Write a code snippet that performs as indicated in the above comment
n_blocks_in_image = (imageLength * imageBreadth) / (sz * sz)
X = np.zeros((numberOfImages*n_blocks_in_image, sz * sz ))
X_iter =0
for c in range (0,numberOfImages) :
im = Image.fromarray(npImages[c])
block_iter = 0
for i in range(0,imageLength,sz):
for j in range(0,imageBreadth,sz):
box = (i, j, i+sz, j+sz)
a = im.crop(box)
data = np.asarray( a )
X[(c)*n_blocks_in_image + block_iter] = data.flatten()
block_iter +=1
#print X
#print ''
#print X.shape
#print ''
X_mean = np.mean(X, 0)
X = X - np.repeat(X_mean.reshape(1, -1), X.shape[0], 0)
'''
Perform eigendecomposition on X^T X and arrange the eigenvectors
in decreasing order of eigenvalues into a matrix D
'''
#TODO: Write a code snippet that performs as indicated in the above comment
xtx = np.dot(X.transpose(), X)
eig_val, eig_vec = np.linalg.eigh(xtx)
idx = eig_val.argsort()[::-1]
eig_val = eig_val[idx]
eig_vec = eig_vec[:,idx]
eig_vecs_sorted = eig_vec
eig_vals_sorted = eig_val
#eig_vals_sorted = np.sort(eig_val)
#eig_vecs_sorted = eig_vec[:, eig_val.argsort()]
'''print ''
print 'eig_vecs'
print eig_vecs_sorted.shape
print ''
print 'eig_vals'
print eig_vals_sorted.shape
print '''
D = eig_vecs_sorted
c = np.dot(D.T, X.T)
#print 'D dims ' + str(D.shape)
#print 'c_dims ' + str(c.shape)
print sz, nc
for i in range(0, numberOfImages, 10):
print "Here " + str(i)
plot_reconstructions(D=D, c=c, num_coeff_array=nc, X_mean=X_mean.reshape((sz, sz)), n_blocks=int(256/sz), im_num=i,sz=sz)
plot_top_16(D, sz, imname='output/hw1a_top16_{0}.png'.format(sz))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 10,129 | py | 11 | hw1a.py | 5 | 0.566788 | 0.549215 | 0 | 333 | 29.399399 | 133 |
ddraa/Algorithm | 3,229,815,451,348 | 896b6b043f3f73b1e6c3318fcbdeadd1cc093446 | 69889d51e933b4e8a1d4c8397a317aa1d1365a5a | /Heap/1655.py | 2217c84c802234200bc5e5ced5708715951918cb | [] | no_license | https://github.com/ddraa/Algorithm | a35c87631420ceccec6f7094da6f2b22ddb66c8c | a97c6628d5389f7f93603a2e95ac3b569057f556 | refs/heads/master | 2023-06-25T17:12:39.925821 | 2021-07-18T05:53:28 | 2021-07-18T05:53:28 | 279,240,088 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from heapq import *
N = int(sys.stdin.readline())
min_h, max_h = [] ,[]
for _ in range(N):
n = int(sys.stdin.readline())
if len(max_h) == len(min_h):
heappush(max_h, -n)
else:
heappush(min_h, n)
if min_h and min_h[0] < -max_h[0]:
heappush(min_h, -heappop(max_h))
heappush(max_h, -heappop(min_h))
print(-max_h[0]) | UTF-8 | Python | false | false | 378 | py | 351 | 1655.py | 340 | 0.539683 | 0.531746 | 0 | 17 | 21.294118 | 40 |
KristofferFJ/PE | 13,391,708,054,315 | 630b5bb1cd7c993b555dfcde7d12e2a573d3f6d6 | 6c8f7f5c3e8a61d3d1c63e7c3f19953df496ae8f | /problems/unsolved_problems/test_116_red,_green_or_blue_tiles.py | f61921ad3f7668d71e08cb5607d8aea2f52fc944 | [] | no_license | https://github.com/KristofferFJ/PE | a5b5f8dabbd0ee6e61506899eabf070ae0cad619 | 4715da077fac3bb8f061f6d773166c575c9c7813 | refs/heads/master | 2022-08-07T03:57:54.712661 | 2022-07-24T20:28:24 | 2022-07-24T20:28:24 | 118,385,527 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
"""
Red, green or blue tiles
A row of five grey square tiles is to have a number of its tiles replaced with coloured oblong tiles chosen from red (length two), green (length three), or blue (length four).
If red tiles are chosen there are exactly seven ways this can be done.
If green tiles are chosen there are three ways.
And if blue tiles are chosen there are two ways.
Assuming that colours cannot be mixed there are 7 + 3 + 2 = 12 ways of replacing the grey tiles in a row measuring five units in length.
How many different ways can the grey tiles in a row measuring fifty units in length be replaced if colours cannot be mixed and at least one coloured tile must be used?
NOTE: This is related to Problem 117.
"""
class Test(unittest.TestCase):
def test(self):
pass
| UTF-8 | Python | false | false | 804 | py | 625 | test_116_red,_green_or_blue_tiles.py | 624 | 0.756219 | 0.746269 | 0 | 18 | 43.666667 | 175 |
vendetta546/codewars | 13,108,240,187,998 | 59672421e2f6d517b2b92d204f2989c1ada7209c | 241a629d2d0f0435e0a25387ae061f0e62beca5a | /Python/6KYU/FizzBuzz.py | 350c06e608d5058950b3c5bdb1baedac40f65fed | [] | no_license | https://github.com/vendetta546/codewars | b5a341c129e671b41cc964f63cbf92b68a7f9b40 | 26cc8282be79c865a44a9b74350fa86ac0c3dfed | refs/heads/master | 2020-09-14T12:53:10.861919 | 2020-03-27T09:21:12 | 2020-03-27T09:21:12 | 223,132,784 | 2 | 0 | null | true | 2019-11-21T09:07:03 | 2019-11-21T09:07:02 | 2019-11-18T22:02:44 | 2018-07-11T21:08:11 | 5,249 | 0 | 0 | 0 | null | false | false | def solution(number):
a=0;b=0;c=0
for x in range(1,number):
if x%15==0:
c += 1
elif x%5==0:
b += 1
elif x%3==0:
a += 1
return [a,b,c] | UTF-8 | Python | false | false | 206 | py | 614 | FizzBuzz.py | 149 | 0.368932 | 0.300971 | 0 | 10 | 19.7 | 29 |
jbathel/holbertonschool-higher_level_programming | 14,164,802,164,131 | cdf0388ae5529800da28d58df726e33f2bafaa64 | 87b12ec1bea4008b0f51ed2c2be632e3121a0d04 | /0x08-python-more_classes/0-rectangle.py | e27ebda8f86e4c6a936e9a6135161432f601c62c | [] | no_license | https://github.com/jbathel/holbertonschool-higher_level_programming | 181d24de8df362842ee3b3a632dd49c38966a662 | 10afef612d8544e55a7c295ba90b1b0e2f2171b8 | refs/heads/master | 2020-07-22T23:51:09.947518 | 2020-02-14T06:42:04 | 2020-02-14T06:42:04 | 207,372,715 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
class Rectangle:
"""Represents an empty class Rectangle"""
pass
| UTF-8 | Python | false | false | 91 | py | 37 | 0-rectangle.py | 31 | 0.681319 | 0.67033 | 0 | 4 | 21.75 | 45 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.