repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
drcloud/junkyard
| 16,844,861,744,204 |
7f932366c755da66a60ad94fab2c299789ba37ea
|
d7760487c64cdd531b20792a4ce231e7320a7d2e
|
/drcloud/client/core.py
|
e84ff4bdb9d4eaab6636d5c31fa6a9a70798a4f7
|
[] |
no_license
|
https://github.com/drcloud/junkyard
|
1fdbbffcbef9d7e9f2894becd9327067f347f325
|
4f2ba6aa67a4fabb46ccd1da08914300f7b756f0
|
refs/heads/master
| 2021-01-10T12:18:43.207313 | 2016-02-29T23:52:50 | 2016-02-29T23:52:50 | 52,833,340 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class System(object):
def __init__(self):
raise NotImplementedError()
def configure(self, redo=False):
"""Initiate system startup and configuration.
Use ``.stabilize()`` to wait for startup to finish. Separating calls
to ``.configure()`` and ``.stabilize()`` allows many systems to be
started in parallel.
"""
raise NotImplementedError()
def retire(self, timeout=None):
"""Release system resources in an orderly manner.
If the system is in the middle of starting, it will finish startup,
stabilize and then shutdown.
"""
raise NotImplementedError()
def stabilize(self, timeout=None):
"""Wait for system state to catch up with specification.
"""
raise NotImplementedError()
def cancel(self):
"""Release system resources with all haste.
"""
raise NotImplementedError()
def status(self):
"""Describes system status with a short status code.
"""
raise NotImplementedError()
|
UTF-8
|
Python
| false | false | 1,070 |
py
| 64 |
core.py
| 49 | 0.614953 | 0.614953 | 0 | 35 | 29.542857 | 76 |
shashankkmr34/LMS_PROJECT
| 8,864,812,509,640 |
4e23348e75557431e35f2ee0db424873ef945ec6
|
de20ecc27ae2d5c4d5df6c92c326cc6bc835bbb6
|
/myapp/views.py
|
f497b04840e6c8405372adab4180dce1615bb34f
|
[] |
no_license
|
https://github.com/shashankkmr34/LMS_PROJECT
|
167cadac69472677686a8d1830d3f2ad629396ac
|
ba8e19f4261ac54058aef5b06f2471b98cd2b792
|
refs/heads/master
| 2022-11-20T20:06:10.322404 | 2020-07-09T20:38:43 | 2020-07-09T20:38:43 | 278,355,054 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, redirect
from .models import Sales
from .models import Leads
from .forms import SalesForm
from .forms import LeadsForm
def welcome(request):
return render(request, "welcome.html")
def load_form_sales(request):
form = SalesForm
return render(request, "index.html", {'form': form})
def load_form_leads(request):
form = LeadsForm
return render(request, "index_leads.html", {'form': form})
def addsales(request):
form = SalesForm(request.POST)
form.save()
return redirect('/show')
def addleads(request):
form = LeadsForm(request.POST)
form.save()
return redirect('/show_leads')
def show(request):
sales = Sales.objects.all
return render(request, 'show.html', {'sales': sales})
def show_leads(request):
leads = Leads.objects.all
return render(request, 'show_leads.html', {'leads': leads})
def edit(request, id):
sales = Sales.objects.get(id=id)
return render(request, 'edit.html', {'sales': sales})
def edit_leads(request, id):
leads = Leads.objects.get(id=id)
return render(request, 'edit_leads.html', {'leads': leads})
def update(request, id):
sales = Sales.objects.get(id=id)
form = SalesForm(request.POST, instance=sales)
form.save()
return redirect('/show')
def update_leads(request, id):
leads = Leads.objects.get(id=id)
form = LeadsForm(request.POST, instance=leads)
form.save()
return redirect('/show_leads')
def delete(request, id):
sales = Sales.objects.get(id=id)
sales.delete()
return redirect('/show')
def delete_leads(request, id):
leads = Leads.objects.get(id=id)
leads.delete()
return redirect('/show_leads')
|
UTF-8
|
Python
| false | false | 1,713 |
py
| 13 |
views.py
| 5 | 0.673672 | 0.673672 | 0 | 75 | 21.84 | 63 |
kylepietz/nba_standings
| 19,636,590,515,066 |
8074cac52b5d5f422d0b2f86fd576464e642775a
|
6ae77810b9a252c9a4b12e1ae06dda3509dc4326
|
/nba_rich-poor.py
|
21079fd66b98f0b6fcfec915bdc1ccb84d261760
|
[] |
no_license
|
https://github.com/kylepietz/nba_standings
|
ae590371702699ec1a7245ef85c9d64a7d4e6d72
|
8572aba809c47701c0e7563d182a3de7723a5e3e
|
refs/heads/master
| 2021-01-21T11:53:43.706066 | 2017-05-19T03:05:36 | 2017-05-19T03:05:36 | 91,759,589 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from bs4 import BeautifulSoup
import urllib.request
import html5lib
#import pandas as pd
#import time
import statistics
from matplotlib import pyplot as plt
D = {}
def getRecords(year):
url = \
"http://www.landofbasketball.com/yearbyyear/" + str(year) + "_" + str(year+1) + "_standings.htm"
req = urllib.request.Request(url, headers={'User-Agent': 'Safari/10.1'})
soup = BeautifulSoup(urllib.request.urlopen(req).read(), "html5lib")
#print(soup.find_all('div', 'rd-100-50'))
winsPerTeam = []
lossesPerTeam = []
if year <= 1970:
standings = soup.find_all('tr')
else:
standings = soup.find_all('tr')[:2]
for row in soup.find_all('tr'):
if len(row) >= 7:
#print(row.contents[5].text)
if row.contents[5].text != 'W':
winsPerTeam += [int(row.contents[5].text)]
if row.contents[7].text != 'L':
lossesPerTeam += [int(row.contents[7].text)]
totalGames = winsPerTeam[0] + lossesPerTeam[0]
totalTeams = len(winsPerTeam)
if year == 1954:
#taking away exceptional case
totalTeams -= 1
winsPerTeam = winsPerTeam[:-1]
lossesPerTeam = lossesPerTeam[:-1]
D[year] = (winsPerTeam, lossesPerTeam, totalGames, totalTeams)
difList = []
for y in range(1946,2017):
getRecords(y)
dif = statistics.stdev(D[y][0])/D[y][2]
difList += [dif]
plt.plot(range(1946,2017),difList)
plt.xlabel('Year')
plt.ylabel('Win Gap')
plt.title('Rich/Poor Gap throughout NBA History')
plt.show()
|
UTF-8
|
Python
| false | false | 1,588 |
py
| 1 |
nba_rich-poor.py
| 1 | 0.602015 | 0.570529 | 0 | 57 | 26.807018 | 104 |
fga-eps-mds/2018.1-Dr-Down
| 12,412,455,497,416 |
0f200321d9e9c0056d11e0ba8d3e88bdd7d44726
|
102a9e14dc7d86c4b397101b426c6846a6949d5d
|
/drdown/appointments/views/view_request.py
|
7d84e5aa7810652c4bbee4f5bbcbdeb1b3ab6853
|
[
"MIT"
] |
permissive
|
https://github.com/fga-eps-mds/2018.1-Dr-Down
|
2371535227aed7c09bbae9fd8871b8eac8068c05
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
refs/heads/develop
| 2023-04-13T18:08:44.880516 | 2018-06-25T23:36:27 | 2018-06-25T23:36:27 | 124,143,479 | 3 | 13 |
MIT
| false | 2021-03-29T17:31:49 | 2018-03-06T21:55:37 | 2019-09-09T19:23:58 | 2021-03-29T17:31:48 | 16,592 | 9 | 8 | 7 |
Python
| false | false |
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView
from django.views.generic import UpdateView
from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from search_views.search import SearchListView
from drdown.users.models.model_patient import Patient
from drdown.users.models.model_health_team import HealthTeam
from search_views.filters import BaseFilter
from drdown.appointments.models import AppointmentRequest
from drdown.appointments.forms.requests_form import RequestSearchForm, \
RequestForm
class RequestFilter(LoginRequiredMixin, BaseFilter):
search_fields = {
'search_speciality': ['speciality'],
'search_name': ['doctor__user__name', 'patient__user__name'],
}
class RequestListView(LoginRequiredMixin, SearchListView):
model = AppointmentRequest
template_name = 'appointments/request_list.html'
form_class = RequestSearchForm
filter_class = RequestFilter
paginate_by = 10
def prepare_queryset(self, request):
user = request.user
if hasattr(user, 'patient'):
queryset = AppointmentRequest.objects.filter(
patient=user.patient
).order_by('id')
elif hasattr(user, 'responsible'):
queryset = AppointmentRequest.objects.filter(
patient__in=user.responsible.patient_set.all()
).order_by('id')
elif hasattr(user, 'employee'):
queryset = AppointmentRequest.objects.filter(
).order_by('risk', 'id')
else:
queryset = AppointmentRequest.objects.none()
return queryset
def get_queryset(self):
return self.prepare_queryset(self.request)
class RequestCreateView(LoginRequiredMixin, CreateView):
model = AppointmentRequest
template_name = 'appointments/request_form.html'
form_class = RequestForm
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
def form_valid(self, form):
speciality = form.instance.speciality
risk = 5
if speciality == AppointmentRequest.CARDIOLOGY:
risk = form.instance.patient.risk.priority_cardiology
if speciality == AppointmentRequest.NEUROLOGY:
risk = form.instance.patient.risk.priority_neurology
if speciality == AppointmentRequest.PEDIATRICS:
risk = form.instance.patient.risk.priority_pediatrics
if speciality == AppointmentRequest.SPEECH_THERAPHY:
risk = form.instance.patient.risk.priority_speech_theraphy
if speciality == AppointmentRequest.PHYSIOTHERAPY:
risk = form.instance.patient.risk.priority_physiotherapy
if speciality == AppointmentRequest.PSYCHOLOGY:
risk = form.instance.patient.risk.priority_psychology
if speciality == AppointmentRequest.GENERAL_PRACTITIONER:
risk = form.instance.patient.risk.priority_general_practitioner
form.instance.risk = risk
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super(RequestCreateView, self).get_context_data(**kwargs)
context['health_team'] = HealthTeam.objects.all()
if hasattr(self.request.user, 'patient'):
context['patients'] = Patient.objects.filter(
user=self.request.user)
elif hasattr(self.request.user, 'responsible'):
context['patients'] = \
self.request.user.responsible.patient_set.all()
return context
def load_doctors(request):
speciality = request.GET.get('speciality')
doctors = HealthTeam.objects.filter(
speciality=speciality
).order_by('user__name')
return render(request,
'appointments/doctors_dropdown_list_options.html',
{'doctors': doctors}
)
class RequestUpdateView(LoginRequiredMixin, UpdateView):
model = AppointmentRequest
template_name = 'appointments/request_form.html'
fields = [
'speciality',
'doctor',
'patient',
'shift',
'day',
'motive',
]
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
class RequestDeleteView(LoginRequiredMixin, DeleteView):
model = AppointmentRequest
template_name = 'appointments/request_confirm_delete.html'
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
class RequestUpdateStatusView(LoginRequiredMixin, UpdateView):
model = AppointmentRequest
template_name = 'appointments/request_confirm_cancel.html'
fields = ['observation']
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
def form_valid(self, form):
form.instance.status = AppointmentRequest.DECLINED
form.save()
return super(RequestUpdateStatusView, self).form_valid(form)
class RequestAfterResultDeleteView(LoginRequiredMixin, DeleteView):
model = AppointmentRequest
template_name = 'appointments/request_after_result_confirm_delete.html'
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
|
UTF-8
|
Python
| false | false | 5,462 |
py
| 312 |
view_request.py
| 154 | 0.674478 | 0.673929 | 0 | 160 | 33.1375 | 75 |
IntegrCiTy/DemoGA
| 15,375,982,926,793 |
41d01e053f4fd764f84be7edaa2a2f46bf24ab2e
|
643285a987e5490d9b6c6a37240ce8088aa6094a
|
/wrappers/power_network.py
|
24a4495edd65ec4f689df857cc08dc66a3f926bb
|
[] |
no_license
|
https://github.com/IntegrCiTy/DemoGA
|
9a93ee957bd56aeca450d9461b886e23a1e24cef
|
f00d7a5ad35c170b468ed1b5b78b4c3c1c2f474f
|
refs/heads/master
| 2020-12-30T11:29:17.134667 | 2017-08-29T08:22:48 | 2017-08-29T08:22:48 | 91,572,349 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import pyfmi
import redis
from obnl.client import ClientNode
import numpy as np
class PowerNetwork(ClientNode):
def __init__(self, host, name, input_attributes=None, output_attributes=None, is_first=False):
super(PowerNetwork, self).__init__(host, name, input_attributes, output_attributes, is_first)
self.redis = redis.StrictRedis(host=host, port=6379, db=0)
def step(self, current_time, time_step):
print('----- ' + self.name + ' -----')
print(self.name, 'time_step', time_step)
print(self.name, 'current_time', current_time)
print(self.name, 'inputs', self.input_values)
p = sum(self.input_values.values())
print(self.name, 'p elec tot', p)
self.redis.rpush('OUT_' + self.name + '_' + 'p_elec_tot', p)
self.redis.rpush('OUT_' + self.name + '_' + 'p_elec_tot' + '_time', current_time)
print('=============')
if __name__ == "__main__":
net = PowerNetwork(host=sys.argv[1],
name='PowerNetwork',
input_attributes=["p_elec_hp_central", "p_elec_hp_cooling", "p_elec_hp_heating"],
is_first=True)
print('Start power network node')
net.start()
|
UTF-8
|
Python
| false | false | 1,236 |
py
| 16 |
power_network.py
| 9 | 0.580906 | 0.576052 | 0 | 36 | 33.333333 | 104 |
Uche-Clare/python-challenge-solutions
| 6,536,940,228,299 |
51c449e0fdff295b79f1f01fa22a6f25bec10a95
|
4fc21c3f8dca563ce8fe0975b5d60f68d882768d
|
/Ekeopara_Praise/Phase 2/STRINGS/Day34 Tasks/Task7.py
|
e7e2508f5ee0f0702bb6a95b592d8d5c2f1319cf
|
[
"MIT"
] |
permissive
|
https://github.com/Uche-Clare/python-challenge-solutions
|
17e53dbedbff2f33e242cf8011696b3059cd96e9
|
49ede6204ee0a82d5507a19fbc7590a1ae10f058
|
refs/heads/master
| 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 |
MIT
| true | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | 2020-05-23T18:42:07 | 2020-05-23T18:42:04 | 5,035 | 0 | 0 | 0 | null | false | false |
'''7.Write a Python program to remove spaces from a given string. '''
def remove_spaces(str1):
str1 = str1.replace(' ','')
return str1
print(remove_spaces("w 3 res ou r ce"))
print(remove_spaces("a b c"))
#Reference: w3resouce
|
UTF-8
|
Python
| false | false | 237 |
py
| 1,262 |
Task7.py
| 1,261 | 0.670886 | 0.64135 | 0 | 10 | 22.8 | 69 |
svboeing/CNN-Sentiment
| 13,443,247,673,993 |
e7afb259dd3809cdc4d0637b04e6d37384608c38
|
82a2bf6dbd9dc9b5eccf6c426d321c1a922eb9a5
|
/unjoint CNN.py
|
6d86310beb43fe39f15303a1fdb1bc755a695350
|
[] |
no_license
|
https://github.com/svboeing/CNN-Sentiment
|
a612f50a84277f2e60d14ed2665a7447f85f4775
|
905ceb7e2a46c07552552798ef69423c67540a85
|
refs/heads/master
| 2020-03-26T23:55:41.874074 | 2019-02-11T07:49:06 | 2019-02-11T07:49:06 | 145,578,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import tensorflow as tf
from collections import Counter
import random
from keras.datasets import imdb
from keras.preprocessing import sequence
from sklearn.metrics import f1_score
n_embedding = 200
num_global_batches = 5000
epochs = 15
# Set CNN parameters:
sent_max_features = 5000
sent_maxlen = 400
# sent_batch_size = 32
# sent_embedding_dims = 50
n_filters = 250
sent_kernel_size = 3
sent_hidden_dims = 250
sent_learning_rate = 0.003
sent_training_steps = 2
sent_width = 3
unique_sorted_words = np.load("/home/boeing/PycharmProjects/CNN/JOINT_sorted_words.npy")
unique = set(unique_sorted_words)
vocab_to_int = {}
int_to_vocab = {}
for i, word in enumerate(unique_sorted_words):
vocab_to_int[word] = i+1 #!!!!!!!!!!!!!!!! # NOW 0 IS RESERVED ONCE AGAIN - SAME AS TAGGER_S
int_to_vocab[i+1] = word #!!!!!!!!!!!!!!!!!!!
# CNN PART
# SHHHHHHHHHHHHHHHHHHHHIiiiiiiIIIIIIIIIIIIIIIIIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
(sent_x_train_raw, sent_y_train), (sent_x_test_raw, sent_y_test) = imdb.load_data(num_words=sent_max_features, index_from=3)
# print(len(x_train), 'train sequences')
# print(len(x_test), 'test sequences')
imdb_w_to_id = imdb.get_word_index()
imdb_w_to_id = {k:(v + 3) for k, v in imdb_w_to_id.items()}
imdb_w_to_id["<PAD>"] = 0
imdb_w_to_id["<START>"] = 1
imdb_w_to_id["<UNK>"] = 2
imdb_id_to_w = {value:key for key, value in imdb_w_to_id.items()}
sent_x_train, sent_x_test = [], []
for i in range(len(sent_x_train_raw)):
sent_x_train.append([vocab_to_int[imdb_id_to_w[id]] for id in sent_x_train_raw[i] if imdb_id_to_w[id] in unique])
sent_x_test.append([vocab_to_int[imdb_id_to_w[id]] for id in sent_x_test_raw[i] if imdb_id_to_w[id] in unique])
#now imdb dataset consists of correct ids of words that appear in text8 -lookup will work
# print('Pad sequences (samples x time)') SHHHHHHHHHHHHHHHHHHHHIiiiiiiIIIIIIIIIIIIIIIIIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
sent_x_train = sequence.pad_sequences(sent_x_train, maxlen=sent_maxlen)
sent_x_test = sequence.pad_sequences(sent_x_test, maxlen=sent_maxlen)
def sent_neural_net(x):
x = tf.nn.embedding_lookup(embedding, x)
x = tf.layers.dropout(inputs=x, rate=0.2)
x = tf.nn.conv1d(value=x, filters=filters, stride=1, padding='VALID')
x = tf.nn.relu(features=x)
x = tf.layers.max_pooling1d(inputs=x, pool_size=sent_maxlen - 2, strides=1,
padding='VALID')
x = tf.squeeze(x, [1])
x = tf.layers.dense(inputs=x, units=250, activation='relu')
x = tf.layers.dropout(inputs=x, rate=0.2)
x = tf.layers.dense(inputs=x, units=1)
return x
# COMMON EMBEDDINGS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
train_graph = tf.Graph()
n_vocab = len(int_to_vocab) + 1
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) #cuz 0 is reserved!!!!!!!!!!
# CNN graph nodes
#sent_a = tf.Variable(tf.random_uniform([n_embedding,], -1, 1))
#sent_a_embedding = tf.multiply(embedding, sent_a)
sent_X = tf.placeholder("int32", [None, sent_maxlen])
sent_Y = tf.placeholder("float32", [None, ])
xavier_init = tf.contrib.layers.xavier_initializer()
# word_embs = tf.Variable(xavier_init([max_features, embedding_dims]))
filters = tf.Variable(xavier_init([sent_width, n_embedding, n_filters])) # embedding_dims
# COMMON
with train_graph.as_default():
sent_logits = tf.squeeze(sent_neural_net(sent_X), [1])
sent_batch_prediction = tf.nn.sigmoid(sent_logits)
# Define loss and optimizer
sent_loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=sent_logits, labels=sent_Y))
sent_optimizer = tf.train.AdamOptimizer(learning_rate=sent_learning_rate)
sent_train_op = sent_optimizer.minimize(sent_loss_op)
def get_joint_batches(sent_x_train, sent_y_train):
sent_batch_size = len(sent_x_train) // num_global_batches
#tagger_batch_size = len(tagger_train_words) // num_global_batches
# only full batches
sent_x_train = sent_x_train[:num_global_batches * sent_batch_size]
sent_y_train = sent_y_train[:num_global_batches * sent_batch_size]
for i in range(num_global_batches): # because of tagger: it looks forwards
# cnn part
sent_x = sent_x_train[i * sent_batch_size:(i + 1) * sent_batch_size]
sent_y = sent_y_train[i * sent_batch_size:(i + 1) * sent_batch_size]
if i % 100 == 0:
print("batch number",i)
yield sent_x, sent_y
sent_eval_batch_size = 64
#with train_graph.as_default():
# saver = tf.train.Saver()
#print(len(train_words), len(sent_x_train), len(sent_y_train), len(tagger_train_words), len(train_labels_id))
with tf.Session(graph=train_graph) as sess:
#iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
#train EVERYTHING
for e in range(1, epochs + 1):
BATCH = get_joint_batches(sent_x_train, sent_y_train)
for x_2, y_2 in BATCH:
sess.run(sent_train_op, feed_dict={sent_X: x_2, sent_Y: y_2})
#iteration += 1
#evaluate CNN
sent_prediction = np.array([])
i = 0
while i * sent_eval_batch_size < len(sent_x_test):
x_batch = sent_x_test[i * sent_eval_batch_size:(i + 1) * sent_eval_batch_size]
y_batch = sent_y_test[i * sent_eval_batch_size:(i + 1) * sent_eval_batch_size]
i += 1
a = sess.run(sent_batch_prediction, feed_dict={sent_X: x_batch, sent_Y: y_batch})
sent_prediction = np.append(sent_prediction, np.asarray(a))
# Obtain label predictions by rounding predictions to int
sent_prediction = [int(round(t)) for t in sent_prediction]
# Use F1 metric:
F1 = f1_score(y_true=sent_y_test, y_pred=sent_prediction, average=None)
print("SENTIMENT F1 score: ", F1)
sess.close()
|
UTF-8
|
Python
| false | false | 5,939 |
py
| 16 |
unjoint CNN.py
| 12 | 0.648762 | 0.632935 | 0 | 172 | 33.52907 | 132 |
gzimin/web-service
| 12,335,146,107,950 |
17cf9ed25ddf22cb7146a7854204ddb8b599f19f
|
ac5ea77a3a65126862849105d0a99412cda2bbab
|
/shop/migrations/0004_auto_20180715_1442.py
|
278e5c08a9037c6953edd8f66322178a8f3d9f3e
|
[] |
no_license
|
https://github.com/gzimin/web-service
|
c9e5cc0c645da4ab0cf8ba08ef4a10270600735a
|
c3869ec1d501546a2d84c8b2df86c511ac6b9fce
|
refs/heads/master
| 2018-10-22T18:31:46.212674 | 2018-07-21T06:40:58 | 2018-07-21T06:40:58 | 140,919,623 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 2.0.7 on 2018-07-15 10:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20180715_1240'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='desription',
new_name='description',
),
]
|
UTF-8
|
Python
| false | false | 370 |
py
| 14 |
0004_auto_20180715_1442.py
| 7 | 0.575676 | 0.491892 | 0 | 18 | 19.555556 | 47 |
kyounginbaek/Openarena_website
| 4,904,852,655,345 |
02df004cab124a54a0b607fb032e340494b0ee25
|
0b4f8d15b3c0d8e878ef8ec2d0e060499286714f
|
/main/migrations/0036_auto_20161101_1503.py
|
b101a87e4fdade3931f14b7fbf62e2a358851f07
|
[] |
no_license
|
https://github.com/kyounginbaek/Openarena_website
|
0f25da947add8a9119c3877b32470f75ba614da5
|
818acce8c9def5f5673cd8dbc8e8d8fff6b1a1ce
|
refs/heads/master
| 2022-10-01T18:45:16.123631 | 2017-07-08T09:21:42 | 2017-07-08T09:21:42 | 62,291,671 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-01 06:03
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('main', '0035_auto_20161101_1442'),
]
operations = [
migrations.AlterField(
model_name='funding',
name='when',
field=models.CharField(default=datetime.datetime(2016, 11, 1, 6, 3, 40, 976263, tzinfo=utc), max_length=40),
),
migrations.AlterField(
model_name='making',
name='summary',
field=models.TextField(default='', max_length=400),
),
migrations.AlterField(
model_name='making',
name='when',
field=models.CharField(default=datetime.datetime(2016, 11, 1, 6, 3, 40, 977141, tzinfo=utc), max_length=40),
),
migrations.AlterField(
model_name='participant',
name='when',
field=models.CharField(default=datetime.datetime(2016, 11, 1, 6, 3, 40, 979723, tzinfo=utc), max_length=40),
),
migrations.AlterField(
model_name='reply',
name='when',
field=models.CharField(default=datetime.datetime(2016, 11, 1, 6, 3, 40, 978467, tzinfo=utc), max_length=40),
),
migrations.AlterField(
model_name='video',
name='when',
field=models.CharField(default=datetime.datetime(2016, 11, 1, 6, 3, 40, 979001, tzinfo=utc), max_length=40),
),
]
|
UTF-8
|
Python
| false | false | 1,624 |
py
| 201 |
0036_auto_20161101_1503.py
| 159 | 0.577586 | 0.497537 | 0 | 47 | 33.553191 | 120 |
cqann/PRGM
| 1,331,439,891,090 |
7eb9f3abf009cf92d14ecb72b374589583cf113f
|
89f0df65abe01e273fd7cf0606727c777352ba47
|
/Python/code_comp/Codeforces/200110_Round_613_d2/prob6.py
|
a55e7141732fe17934f01f24947d2d8d576ace8c
|
[] |
no_license
|
https://github.com/cqann/PRGM
|
486122601b959cfbf7d9d2dc2a37caa858cf15a8
|
7387dafb65895528c042a3f1ab605fa5325056ce
|
refs/heads/master
| 2022-02-16T00:59:32.342327 | 2022-01-27T16:55:46 | 2022-01-27T16:55:46 | 226,111,892 | 0 | 1 | null | false | 2020-11-16T17:41:44 | 2019-12-05T13:45:21 | 2020-11-16T13:04:40 | 2020-11-16T17:41:43 | 129,795 | 0 | 1 | 0 |
Python
| false | false |
def gcd(a,b):
if b == 0:
return a
return gcd(b, a%b)
def lcm(a,b):
return int((a*b)/gcd(a,b))
factors = {1:[1]}
def factors_of(n):
if n in factors:
return factors[n]
ret = [n]
for i in range(n,0,-1):
if n%(n/i) == 0:
factors[n] = factors_of(i) + ret
return factors[n]
n = int(input())
prime_found = False
big1 = 0
big2 = 0
print(factors_of(56))
for a in input().split(" "):
if a > big1:
if prime_found:
if is_prime(a):
big2 = big1
big1 = a
else:
big2 = big1
big1 = a
if is_prime(big2):
prime_found = True
|
UTF-8
|
Python
| false | false | 690 |
py
| 467 |
prob6.py
| 383 | 0.455072 | 0.426087 | 0 | 34 | 19.235294 | 44 |
coveooss/json-schema-for-humans
| 1,236,950,597,824 |
cb3d431d8571faa890cc927f389df08c06424581
|
04667453a54bbefbbfc5b8cb5c2fb5392f9ca8c5
|
/json_schema_for_humans/jinja_filters.py
|
4c19e4ab9a1cea58b03e497324f7ad35f66dc427
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/coveooss/json-schema-for-humans
|
74c375d385d124c6736ea7fe510a9b7a5015c13c
|
6a467492d697c4a44a409409ab0391a4a4acd291
|
refs/heads/main
| 2023-08-09T22:06:21.609887 | 2023-07-17T13:52:54 | 2023-07-17T13:52:54 | 202,809,346 | 371 | 81 |
NOASSERTION
| false | 2023-09-07T14:51:52 | 2019-08-16T22:58:32 | 2023-09-06T15:41:07 | 2023-09-07T14:51:50 | 2,169 | 371 | 76 | 38 |
Python
| false | false |
import re
import json
import yaml
from datetime import datetime
from typing import List, Any
from jinja2 import pass_environment, Environment
from markdown2 import Markdown
from markupsafe import Markup
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.data import YamlLexer
from pytz import reference
from json_schema_for_humans import const
from json_schema_for_humans.generation_configuration import GenerationConfiguration
from json_schema_for_humans.schema.schema_node import SchemaNode
SHORT_DESCRIPTION_NUMBER_OF_LINES = 8
DEFAULT_PATTERN = r"(\[Default - `([^`]+)`\])"
DEPRECATED_MARKER = r"[Deprecated"
def is_combining(schema_node: SchemaNode) -> bool:
"""Test if a schema is one of the combining schema keyword"""
return bool({"anyOf", "allOf", "oneOf", "not"}.intersection(schema_node.keywords.keys()))
def is_text_short(text: str) -> bool:
"""Check if a string is short so that we can decide whether to make the section containing it expandable or not.
The heuristic is counting 1 for each line + 1 for each group of 80 characters a line has
"""
return (
sum((len(line) / const.LINE_WIDTH + 1) for line in str(text).splitlines()) < SHORT_DESCRIPTION_NUMBER_OF_LINES
)
def is_deprecated(_schema: SchemaNode) -> bool:
"""Test. Check if a property is deprecated without looking in description"""
return False
def is_deprecated_look_in_description(schema_node: SchemaNode) -> bool:
"""Test. Check if a property is deprecated looking in description"""
if const.DESCRIPTION not in schema_node.keywords:
return False
return bool(DEPRECATED_MARKER in schema_node.keywords[const.DESCRIPTION].literal)
def get_required_properties(schema_node: SchemaNode) -> List[str]:
required_properties = schema_node.keywords.get("required") or []
if required_properties:
required_properties = [p.literal for p in required_properties.array_items]
return required_properties
def get_first_property(schema_node: SchemaNode) -> Any:
"""
Filter. get first property of given schema no matter the property key
Usage:
md template does not recurse on schema to render the if portion
instead it renders the if in the heading directly
"""
properties = schema_node.properties
if not properties:
return None
first_property_name = next(iter(properties))
return properties[first_property_name]
def get_undocumented_required_properties(schema_node: SchemaNode) -> List[str]:
"""Get the name of the properties that are required but not documented with their own node"""
return [prop for prop in get_required_properties(schema_node) if prop not in schema_node.properties.keys()]
def python_to_json(value: Any) -> Any:
"""Filter. Return the value as it needs to be displayed in JSON
Used to display a string literals more explicitly for default and const values.
"""
return json.dumps(value, indent=4, separators=(",", ": "), ensure_ascii=False)
@pass_environment
def get_description(env: Environment, schema_node: SchemaNode) -> str:
"""Filter. Get the description of a property or an empty string"""
description = schema_node.description
config: GenerationConfiguration = env.globals["jsfh_config"]
if config.default_from_description:
match = re.match(DEFAULT_PATTERN, description)
if match:
description = description[match.span(1)[1] :].lstrip()
if description and config.description_is_markdown and not config.result_extension == "md":
# Markdown templates are expected to already have Markdown descriptions
md: Markdown = env.globals["jsfh_md"]
description = Markup(md.convert(description))
return description
def get_default(schema_node: SchemaNode) -> str:
"""Filter. Return the default value for a property"""
return schema_node.default_value
def get_default_look_in_description(schema_node: SchemaNode) -> str:
"""Filter. Get the default value of a JSON Schema property. If not set, look for it in the description."""
default_value = schema_node.default_value
if default_value:
return default_value
description = schema_node.keywords.get(const.DESCRIPTION)
if not description:
return ""
description = description.literal
match = re.match(DEFAULT_PATTERN, description)
if not match:
return ""
return match.group(2)
def get_numeric_restrictions_text(schema_node: SchemaNode, before_value: str = "", after_value: str = "") -> str:
"""Filter. Get the text to display about restrictions on a numeric type(integer or number)"""
multiple_of = schema_node.keywords.get(const.MULTIPLE_OF)
if multiple_of:
multiple_of = multiple_of.literal
maximum = schema_node.keywords.get(const.MAXIMUM)
if maximum:
maximum = maximum.literal
exclusive_maximum = schema_node.keywords.get(const.EXCLUSIVE_MAXIMUM)
if exclusive_maximum:
exclusive_maximum = exclusive_maximum.literal
minimum = schema_node.keywords.get(const.MINIMUM)
if minimum:
minimum = minimum.literal
exclusive_minimum = schema_node.keywords.get(const.EXCLUSIVE_MINIMUM)
if exclusive_minimum:
exclusive_minimum = exclusive_minimum.literal
# Fix minimum and exclusive_minimum both there
if minimum is not None and exclusive_minimum is not None:
if minimum <= exclusive_minimum:
exclusive_minimum = None
else:
minimum = None
minimum_fragment = ""
if minimum is not None:
minimum_fragment += f"greater or equal to {before_value}{minimum}{after_value}"
if exclusive_minimum is not None:
minimum_fragment += f"strictly greater than {before_value}{exclusive_minimum}{after_value}"
# Fix maximum and exclusive_maximum both there
if maximum is not None and exclusive_maximum is not None:
if maximum > exclusive_maximum:
exclusive_maximum = None
else:
maximum = None
maximum_fragment = ""
if maximum is not None:
maximum_fragment += f"lesser or equal to {before_value}{maximum}{after_value}"
if exclusive_maximum is not None:
maximum_fragment += f"strictly lesser than {before_value}{exclusive_maximum}{after_value}"
result = "Value must be "
touched = False
if minimum_fragment:
touched = True
result += minimum_fragment
if maximum_fragment:
if touched:
result += " and "
touched = True
result += maximum_fragment
if multiple_of:
if touched:
result += " and "
result += f"a multiple of {before_value}{multiple_of}{after_value}"
return result if touched else ""
def deprecated(config, schema: SchemaNode) -> bool:
return is_deprecated_look_in_description(schema) if config.deprecated_from_description else is_deprecated(schema)
def first_line(example_text: str, max_length: int = 0) -> str:
"""Filter. Retrieve first line of string + add ... at the end if text has multiple lines cut line at max_length"""
lines = example_text.splitlines()
result = lines[0]
etc = (max_length and len(result) > max_length) or len(lines) > 1
return f"{result[:max_length]}{' ...' if etc else ''}"
def get_local_time() -> str:
return datetime.now(tz=reference.LocalTimezone()).strftime("%Y-%m-%d at %H:%M:%S %z")
def highlight_json_example(example_text: str) -> str:
"""Filter. Return an highlighted version of the provided JSON text"""
return highlight(example_text, JavascriptLexer(), HtmlFormatter())
def yaml_example(example_text: str) -> str:
"""Filter. Return a YAML version of the provided JSON text"""
loaded_example = json.loads(example_text)
if not isinstance(loaded_example, dict):
# YAML dump does not like things that are not object
return str(loaded_example)
return yaml.dump(loaded_example, allow_unicode=True, sort_keys=False)
def highlight_yaml_example(example_text: str) -> str:
"""Filter. Return a highlighted YAML version of the provided JSON text"""
return highlight(yaml_example(example_text), YamlLexer(), HtmlFormatter())
|
UTF-8
|
Python
| false | false | 8,320 |
py
| 241 |
jinja_filters.py
| 27 | 0.698077 | 0.696274 | 0 | 224 | 36.142857 | 118 |
Code-Institute-Submissions/snAPP
| 3,676,492,009,601 |
7f7e7ba57fcf04599e999a326c493bd34abc53ce
|
be3c0e5deab36d48ea71f83c85b12cdc80fe5ef9
|
/bugtickets/views.py
|
a8208863bdadf4d0561bff22978580b42e3f0d32
|
[] |
no_license
|
https://github.com/Code-Institute-Submissions/snAPP
|
f548193a4c27a56bf3d6ee259ea7d3793d6dfaa9
|
bdea392764f5608d47055c1233dfc72811e4e4cd
|
refs/heads/master
| 2020-03-27T04:51:32.650201 | 2018-08-24T09:42:25 | 2018-08-24T09:42:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, reverse, redirect, get_object_or_404
from .models import BugTicket, BugUpvote, Comment
from .forms import ReportBugForm, CommentForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.messages import success, warning, error
from django.utils import timezone
import datetime
from bugtickets.charts import config_bugline_chart, config_bugpie_chart, config_bugbar_chart
import pygal
from pygal.style import Style
from .models import BugTicket
@login_required
def report_bug(request, pk=None):
"""
Create a Bug Report ticket
"""
if request.method == 'POST':
report_form = ReportBugForm(request.POST, request.FILES)
if report_form.is_valid():
submit = report_form.save(commit=False)
submit.created_by = request.user
submit.save()
return redirect('get_bug_listing')
else:
report_form = ReportBugForm()
return render(request, 'report_form.html', {'report_form': report_form})
@login_required
def upvote_bug(request, id=None):
"""
Enable user to upvote a bug and render line chart data
"""
bug = get_object_or_404(BugTicket, pk=id)
"""Prevent user upvoting own reports"""
if bug.created_by == request.user:
messages.error(request, "You cannot upvote your own bug report.")
else:
user = request.user
vote = bug.upvote(user)
"""Prevent double upvotes and validate upvotes"""
if vote == 'already_upvoted':
messages.success(request, "You have already upvoted this ticket.")
else:
messages.success(request, "Your upvote has been counted. Thanks")
bugs = BugTicket.objects.filter(pk=id)
return render(request, "bug_report.html", {'bugs': bugs})
@login_required
def add_comment_to_bug(request, pk):
"""
Enable user to add comments to bug reports
"""
post = BugTicket.objects.get(pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.bug_ticket = post
comment.author = request.user
comment.save()
return redirect('bug_report', pk=post.pk)
else:
form = CommentForm()
return render(request, "add_comments_to_bug_form.html", {'form': form, 'post': post})
@login_required
def bug_report(request, pk=id):
"""
Enable user to report a bug_report
"""
bugs = BugTicket.objects.filter(id=pk)
# return message if bug does not exist
if not bugs:
messages.success(request, "There is no bug with that identity. Please search again.")
return redirect('get_bug_listing')
else:
return render(request, "bug_report.html", {'bugs': bugs})
@login_required
def get_bug_listing(request):
"""
List bugs with most recent on top and render chart data
"""
"""order bugs by date reported"""
bugs = BugTicket.objects.filter(date_created__lte=timezone.now()).order_by('-date_created')
"""retrieve data on snAPP admin activity for charts"""
bug_line_data = config_bugline_chart()
bug_pie_data = config_bugpie_chart()
bug_bar_data = config_bugbar_chart()
return render(request, "bug_listing.html", {
'bugs': bugs,
'bug_line_data': bug_line_data,
'bug_pie_data': bug_pie_data,
'bug_bar_data': bug_bar_data,
})
|
UTF-8
|
Python
| false | false | 3,554 |
py
| 92 |
views.py
| 49 | 0.642093 | 0.640405 | 0 | 110 | 31.318182 | 97 |
kg55555/pypractice
| 6,021,544,157,650 |
2a55fcc8005986ab0c18555c5e3f4956685d40c5
|
52177c5edd2a339580b8bd730f3141f4f06a9a3e
|
/Part 1/Chapter 2/exercise_2.5+6.py
|
69aba66b09b9c4456cf4a059158f831dc4871028
|
[
"MIT"
] |
permissive
|
https://github.com/kg55555/pypractice
|
2cf317798afc6c2c1194a6817fd569a80610d753
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
refs/heads/master
| 2022-08-20T20:38:38.104618 | 2020-05-30T00:33:05 | 2020-05-30T00:33:05 | 266,683,672 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
author = "Cooper"
quote = "We used to look up and wonder about our place in the stars. \nNow we just look down, and worry about our place in the dirt"
print(f"{author.title()} once said: '{quote}'")
|
UTF-8
|
Python
| false | false | 198 |
py
| 102 |
exercise_2.5+6.py
| 101 | 0.712121 | 0.712121 | 0 | 3 | 65.333333 | 132 |
Prashu94/Learnings
| 12,128,987,645,495 |
eb6367f8a3fd28186013da0e69488149c2f06fba
|
e533a9f2d1d2a17c32c074208396aa6f16bb81fc
|
/Python_1/MachineLearningModels.py
|
fb43a9f5a51f370030b1e2584be8a172d88b23e6
|
[] |
no_license
|
https://github.com/Prashu94/Learnings
|
dd5b91800277c1283020b829751fc05b000d1bc1
|
b42b0278ac6ce0b39657e0c21532ba15540a3d46
|
refs/heads/master
| 2021-05-12T12:27:14.134831 | 2018-12-25T14:58:31 | 2018-12-25T14:58:31 | 117,414,594 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 13:52:37 2018
@author: user
"""
import csv
import pandas as pd
import numpy as np
import random as rnd
import os
import re
#Visualization Import
import seaborn as sns
import matplotlib.pyplot as plt
import scikitplot as skplt
# Supervised Machine Learning Models
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron, SGDClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import feature_selection
#import xgboost as xgb
#from xgboost.sklearn import XGBClassifier # <3
# Unsupervised Models
from sklearn.decomposition import PCA
# Evalaluation
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, roc_curve, auc
# Grid
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.feature_selection import RFE
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import scipy.stats as st
# Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
# Esemble Voting
#from mlxtend.classifier import EnsembleVoteClassifier
#from sklearn import metrics
#from sklearn.metrics import classification_report, accuracy_score
# Stacking
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from matplotlib.colors import ListedColormap
# Warnings
import warnings
warnings.filterwarnings('ignore')
import time
import datetime
import platform
start = time.time()
print('Version :', platform.python_version())
print('Compiler :', platform.python_compiler())
print('Build :', platform.python_build())
print("\nCurrent date and time using isoformat:")
print(datetime.datetime.now().isoformat())
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (16, 8)
save = True
# Master Parameters:
n_splits = 5 # Cross Validation Splits
n_iter = 80 # Randomized Search Iterations
scoring = 'accuracy' # Model Selection during Cross-Validation
rstate = 27 # Random State used
testset_size = 0.30
# Trees Parameters
n_tree_range = st.randint(600, 1200)
# XGboost boosting rounds
num_rounds = 1000
"""
Loading and Preprocessing
"""
PATH = "G:\\extra things\\Knowledge\\Python_Practice\\"
train_df = pd.read_csv(PATH+'train.csv',index_col = 'PassengerId')
test_df = pd.read_csv(PATH+'test.csv',index_col = 'PassengerId')
train_df.describe()
test_df.describe()
"""
Pre-Processing
combine train/test data to simulatneously apply transformations
"""
Survived = train_df['Survived'].copy()
train_df = train_df.drop('Survived',axis=1).copy()
df = pd.concat([train_df,test_df])
df.describe()
df.info()
traindex = train_df.index
testdex = test_df.index
#removes the object from the memeory
del train_df
del test_df
"""
To understand Feature Engineering
"""
#Feature Engineering
full_data = [train_df , test_df]
full_data
train_df.info()
#1.PClass
train_df[['Pclass','Survived']].groupby(['Pclass'],as_index = 'False').mean()
#2.Sex
train_df[['Sex','Survived']].groupby(['Sex'],as_index = 'False').mean()
#3.SibSp and Parch -Sibling/Spouse, Parent/Children
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize','Survived']].groupby(['FamilySize'],as_index = 'False').mean()
#4.Categorize people as per their lonliness
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1,'IsAlone'] = 1
train_df[['IsAlone','Survived']].groupby(['IsAlone'],as_index='False').mean()
"""
Feature Engineering
"""
df.info()
df.describe()
#Family Size
df['FamilySize'] = df ['SibSp']+df['Parch']+1
#Name Length
df['NameLength'] = df['Name'].apply(len)
#IsAlone?
df['IsAlone'] = 0
df.loc[df['FamilySize']==1,'IsAlone'] = 1
df
#Title
df['Title'] = 0
df['Title'] = df.Name.str.extract('([A-Za-z]+)\.',expand=True)
df['Title'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Dona'],['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr'],inplace=True)
df[['Title','Age']].groupby(['Title'],as_index= 'False').mean()
#Age
df.loc[(df.Age.isnull()) & (df.Title=='Mr'),'Age']=df.Age[df.Title=='Mr'].mean()
df.loc[(df.Age.isnull()) & (df.Title=='Mrs'),'Age']=df.Age[df.Title=='Mrs'].mean()
df.loc[(df.Age.isnull()) & (df.Title=='Miss'),'Age']=df.Age[df.Title=='Miss'].mean()
df.loc[(df.Age.isnull()) & (df.Title=='Master'),'Age']=df.Age[df.Title=='Master'].mean()
df.loc[(df.Age.isnull()) & (df.Title=='Other'),'Age']=df.Age[df.Title=='Other'].mean()
df =df.drop('Name',axis=1)
#Categorical Variable-Emabraked (2NA values)
df['Embarked'] = df['Embarked'].fillna(df['Embarked'].mode().iloc[0])
#Continuous Variable: Fare
df['Fare'] = df['Fare'].fillna(df['Fare'].mean())
#Assigning Binary to string (Sex)variable.
df['Sex'] =df['Sex'].map({'female' : 1, 'male' :0}).astype(int)
#Title
df['Title'] = df['Title'].map({'Mr':0,'Mrs':1,'Miss':2,'Master':3,'Other':4})
df['Title'] = df['Title'].fillna(df['Title'].mode().iloc[0])
df['Title'] = df['Title'].astype(int)
#Embarked
df['Embarked'] = df['Embarked'].map({'Q':0,'S':1,'C':2}).astype(int)
#We can get rid of Ticket and Cabin variable
df = df.drop(['Ticket','Cabin'],axis=1)
df.head()
"""
After doing feature Engineering we can visualize to see the state of variable, which is neccessary for good output of prediction through machine learning.(Clue:Lookout for Bell Curve for variables, containing min,max,25%,50%,75%,count)
Helps in finding the bias of variable in getting good predictive ability of the models
"""
"""
using traindex to get the state of variable
"""
#Histogram
pd.concat([df.loc[traindex,:],Survived],axis=1).hist()
plt.show()
#Correlation- we see closer to zero corelation for FamilySize
sns.heatmap(pd.concat([df.loc[traindex,:],Survived],axis=1).corr(),annot=True,fmt = ".2f")
#Scaling between -1 and 1, good practice for continuous variables
from sklearn import preprocessing
for col in ['Fare','Age','NameLength']:
transf =df[col].reshape(-1,1)
scaler = preprocessing.StandardScaler().fit(transf)
df[col] = scaler.transform(transf)
#After preprocessing,split the data into train/test data_again
train_df = df.loc[traindex,:]
train_df['Survived']=Survived
test_df = df.loc[testdex,:]
train_df.info()
test_df.info()
#Decide on the dependent and independent variable
X = train_df.drop(['Survived'],axis=1)
y=train_df['Survived']
print ("X,y Test Shape: ",X.shape,y.shape,test_df.shape)
#Storage for models and results
results = pd.DataFrame(columns=['Model','Para','Test_Score','CV Mean','CV Std_Dev'])
ensemble_methods ={}
#Imbalanced DEpendent variable
print("Dependent Variable Distribution")
print(y.value_counts(normalize = True)*100)
print("0 = Died \n1 = Survived")
#Dimensionality Reductions: Principal Components
print("Feature Count (With One Hot Encoding):",X.shape[1])
levels = [2,4,6,8,10,12]
for x in levels:
pca = PCA(n_components = x)
fit = pca.fit(train_df)
print(("{} Components \n Explained Variance: {}\n").format(x,fit.explained_variance_ratio_))
#Stratified Train/Test Split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=testset_size,stratify=y,random_state=rstate)
X_train.shape,y_train.shape,X_test.shape,y_test.shape
cv = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.2, random_state=rstate)
"""
Helper Functions
Compute,print and save models' Evaluation
"""
def save(model,modelname):
global results
#Once best_model is found,establish more evaluation metrics
model.best_estimator_.fit(X_train,y_train)
scores = cross_val_score(model.best_estimator_,X_train,y_train,cv = 5,scoring=scoring,verbose =0)
CV_scores = scores.mean()
STDev = scores.std()
Test_scores = model.score(X_test,y_test)
#CV and Save Scores
results = results.append({'Model':modelname,'Para':model.best_params_,'Test_Score':Test_scores,'CVMean':CV_scores,'CV STDEV': STDev},ignore_index =True)
ensemble_methods[modelname]=model.best_estimator_
#Print Evaluation
print("\n Evaluation Method: {}",format(scoring))
print("Optimal Model Parameters: {}",format(grid.best_params_))
print("Train CV Accuracy: %0.2f (+/- %0.2f) [%s]" % (CV_scores,STDev,modelname))
print("Test_Score: ",Test_scores)
#Sckit Confusion Matrix
model.best_estimator_.fit(X_train,y_train)
pred = model.predict(X_test)
skplt.metrics.plot_confusion_matrix(y_test,pred,title = "{} Confusion matrix".format(modelname),normalize=True,figsize=(6,6),text_fontsize='large')
plt.show()
df1 = pd.DataFrame(columns =['PassengerId','Survived'])
def norm_save(model,score,modelname):
global results
global df1
model.fit(X,y)
submission = model.predict(test_df)
df1 =df1.append({'PassengerId':test_df.index,'Survived':submission})
CV_score = score.mean()
Test_scores = model.score(X_test,y_test)
STDev = score.std()
#CV and save Scores
Test_Score = model.score(X_test,y_test)
results = results.append({'Model':modelname,'Para':model.best_params_,'Test_Score':Test_scores,'CVMean':CV_scores,'CV STDEV': STDev},ignore_index =True)
ensemble_methods[modelname] = model
print("\n Evaluation Method: {}",format(scoring))
print("Optimal Model Parameters: {}",format(grid.best_params_))
print("Train CV Accuracy: %0.2f (+/- %0.2f) [%s]" % (CV_scores,STDev,modelname))
print("Test_Score: ",Test_scores)
#Sckit Confusion Matrix
model.best_estimator_.fit(X_train,y_train)
pred = model.predict(X_test)
skplt.metrics.plot_confusion_matrix(y_test,pred,title = "{} Confusion matrix".format(modelname),normalize=True,figsize=(6,6),text_fontsize='large')
plt.show()
def eval_plot(model):
skplt.metrics.plot_roc_curve(y_test,model.predict_proba(X_test))
plt.show()
#Non-Pramateric models
"""
#K-Nearest Neighbors
"""
param_grid = {'n_neighbors': st.randint(1,40),
#Increasing this value reduces the bias and increases the variance,dont overfit
'weights': ['uniform','distance']
}
#Hyper-prameter Tuning with Cross-Validation
grid = RandomizedSearchCV(KNeighborsClassifier(),
param_grid,#HyperParmeters
cv = cv,#crossValidations splits
scoring = scoring,#Vest validation selection metric
verbose=1,#Frequency of model updates
n_iter=n_iter,#Number of hyperparameters combinations tried
random_state=rstate
)
#Execute Tuning on entire training set
grid.fit(X_train,y_train)
save(grid,"KNN")
results
|
UTF-8
|
Python
| false | false | 11,705 |
py
| 228 |
MachineLearningModels.py
| 177 | 0.674669 | 0.664673 | 0 | 385 | 28.407792 | 235 |
sunlab-osu/CliniRC
| 4,260,607,578,281 |
864ad21c5b77b20ecae94cffacd27c9785f51d2f
|
9b013d971610b3f616e7d20d49dcdbedea77d828
|
/src/split_sections.py
|
81e6d501be2517aaeee6bb6db25e99e4a693bab1
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/sunlab-osu/CliniRC
|
ce3ac57fe63d019734654ce5331ea46dfd46b880
|
d156306666272ee898a0b7f57ce63d519bf1f521
|
refs/heads/main
| 2023-07-03T05:44:16.504892 | 2021-08-11T14:54:35 | 2021-08-11T14:54:35 | 395,024,654 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from section_split_utils import *
#Change to emrQA json filename
emrqa_dir = 'data/datasets/'
emrqa_filename = 'data.json'
#Loading emrQA datasets from data directory
datasets = load_emrqa_datasets(emrqa_dir+emrqa_filename)
emrqa_json = datasets[emrqa_dir + emrqa_filename]
#Splitting emrQA questions by clinical record sections
emrqa_datasets, orig_num_answers, new_num_answers, errors = create_split_docs_emrqa(emrqa_json)
print("Number of errors from extracting correct sections for each answer: {}".format(errors))
print("Number of answers in original dataset: {}".format(orig_num_answers))
print("Number of answers in new dataset (Should be more): {}".format(new_num_answers))
#Transforming to Squad format, preprocessing the context/answers and filtering long questions
headerless_squad_emrqa, answers_checked, long_answers = transform_emrqa_to_squad_format(emrqa_datasets)
print("Number of removed answers due to length: {}".format(long_answers))
#Verifying QA Pair Counts
num_qas, num_contexts = count_squad_format_qas_and_contexts(headerless_squad_emrqa)
print("Number of QA pairs in new SQUAD format dataset: {}".format(num_qas))
print("Number of contexts in new SQUAD format dataset: {}".format(num_contexts))
#Adding Header to each sub emrQA dataset and create medication.json and relations.json
new_emrqa = add_header_and_save(headerless_squad_emrqa, emrqa_dir)
|
UTF-8
|
Python
| false | false | 1,383 |
py
| 10 |
split_sections.py
| 6 | 0.778742 | 0.778742 | 0 | 30 | 45.133333 | 103 |
eudoxos/vprof
| 19,499,151,564,274 |
6837d30d6ed04e8d9b2453445c755a49dcfd1cf2
|
74d72efdb28d37a53957b2c7ab3fb438672e6b29
|
/vprof/base_profiler.py
|
657d047b0a48e7cf17093e9af61dd4d674df21cf
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
https://github.com/eudoxos/vprof
|
3d4d838d0144f7e65714c82110c1703e0a9ce19f
|
8c0b137377105db866ec1bd029faf5a6cf19814a
|
refs/heads/master
| 2021-07-08T03:17:47.477886 | 2017-10-05T13:00:34 | 2017-10-05T13:00:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Base class for a profile wrapper."""
import multiprocessing
import os
import pkgutil
import sys
import zlib
def get_pkg_module_names(package_path):
"""Returns module filenames from package.
Args:
package_path: Path to Python package.
Returns:
A set of module filenames.
"""
module_names = set()
for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]):
filename = os.path.join(fobj.path, '%s.py' % modname)
if os.path.exists(filename):
module_names.add(os.path.abspath(filename))
return module_names
def hash_name(name):
"""Computes hash of the name."""
return zlib.adler32(name.encode('utf-8'))
class ProcessWithException(multiprocessing.Process):
"""Process subclass that propagates exceptions to parent process.
Also handles sending function output to parent process.
Args:
parent_conn: Parent end of multiprocessing.Pipe.
child_conn: Child end of multiprocessing.Pipe.
result: Result of the child process.
"""
def __init__(self, result, *args, **kwargs):
super(ProcessWithException, self).__init__(*args, **kwargs)
self.parent_conn, self.child_conn = multiprocessing.Pipe()
self.result = result
def run(self):
try:
self.result.update(
self._target(*self._args, **self._kwargs))
self.child_conn.send(None)
except Exception as exc: # pylint: disable=broad-except
self.child_conn.send(exc)
@property
def exception(self):
"""Returns exception from child process."""
return self.parent_conn.recv()
@property
def output(self):
"""Returns target function output."""
return self.result._getvalue() # pylint: disable=protected-access
def run_in_separate_process(func, *args, **kwargs):
"""Runs function in separate process.
This function is used instead of a decorator, since Python multiprocessing
module can't serialize decorated function on all platforms.
"""
manager = multiprocessing.Manager()
manager_dict = manager.dict()
process = ProcessWithException(
manager_dict, target=func, args=args, kwargs=kwargs)
process.start()
process.join()
exc = process.exception
if exc:
raise exc
return process.output
class BaseProfiler(object):
"""Base class for a profile wrapper."""
def __init__(self, run_object):
"""Initializes wrapper.
Args:
run_object: object that will be profiled.
"""
self._set_run_object_type(run_object)
if self._is_run_obj_module:
self._globs = {
'__file__': self._run_object,
'__name__': '__main__',
'__package__': None,
}
program_path = os.path.dirname(self._run_object)
if sys.path[0] != program_path:
sys.path.insert(0, program_path)
if not self._is_run_obj_function:
self._replace_sysargs()
self._object_name = None
def _set_run_object_type(self, run_object):
"""Sets type flags depending on run_object type."""
self._is_run_obj_function, self._is_run_obj_package = False, False
self._is_run_obj_module = False
if isinstance(run_object, tuple):
self._run_object, self._run_args, self._run_kwargs = run_object
self._is_run_obj_function = True
else:
self._run_object, _, self._run_args = run_object.partition(' ')
if os.path.isdir(self._run_object):
self._is_run_obj_package = True
elif os.path.isfile(self._run_object):
self._is_run_obj_module = True
def _replace_sysargs(self):
"""Replaces sys.argv with proper args to pass to script."""
if self._run_args:
sys.argv[:] = [self._run_object] + self._run_args.split()
else:
sys.argv[:] = [self._run_object]
def profile_package(self):
"""Profiles package specified by filesystem path.
Runs object specified by self._run_object as a package specified by
filesystem path. Must be overridden.
"""
raise NotImplementedError
def profile_module(self):
"""Profiles module.
Runs object specified by self._run_object as a Python module.
Must be overridden.
"""
raise NotImplementedError
def profile_function(self):
"""Profiles function.
Runs object specified by self._run_object as a Python function.
Must be overridden.
"""
raise NotImplementedError
def _get_dispatcher(self):
"""Returns dispatcher depending on self._run_object value."""
if self._is_run_obj_function:
self._object_name = '%s (function)' % self._run_object.__name__
return self.profile_function
elif self._is_run_obj_package:
self._object_name = '%s (package)' % self._run_object
return self.profile_package
self._object_name = '%s (module)' % self._run_object
return self.profile_module
def run(self):
"""Runs profiler and returns collected stats."""
dispatcher = self._get_dispatcher()
return dispatcher()
|
UTF-8
|
Python
| false | false | 5,348 |
py
| 2 |
base_profiler.py
| 2 | 0.603029 | 0.602094 | 0 | 164 | 31.609756 | 78 |
DaHuO/Supergraph
| 6,038,724,049,363 |
5e385862f671488edc937727ae9bc3357ce4719e
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_RandomNickName_sheep.py
|
29f3ef3928b69710332d388757a36c8ff6b88929
|
[] |
no_license
|
https://github.com/DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | false | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | 2019-02-21T15:42:08 | 2021-03-19T21:55:45 | 38,414 | 0 | 0 | 2 |
Python
| false | false |
def solve(start):
if start == 0:
return "INSOMNIA"
remaining_digits = [x for x in range(0, 10)]
counter = 1
current = start
tmp = 0
while len(remaining_digits) > 0:
tmp = [int(i) for i in list(str(current))]
#print tmp
for item in tmp:
if item in remaining_digits:
remaining_digits.remove(item)
current = int(start) * counter
counter = counter + 1
return int(''.join(map(str, tmp)))
def main():
exists = set()
trueExists = exists
create = set()
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
#l, d, n = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
#print l,d,n
for j in xrange(1, t+1):
start = int(raw_input())
#print start
counter = 0
answer = solve(start)
print "Case #{}: {}".format(j, answer)
if __name__ == "__main__" :
main()
|
UTF-8
|
Python
| false | false | 1,149 |
py
| 30,073 |
16_0_1_RandomNickName_sheep.py
| 16,513 | 0.5396 | 0.529156 | 0 | 38 | 29.105263 | 98 |
smchang/bracketracker
| 17,729,625,012,089 |
44c1c918591b2824e590fe82c06ad9f404de5673
|
0dfa8a67556d038fbe832131309df21640601a0a
|
/main.py
|
cbf4f129e563978cacf9af8a13266a5439372e69
|
[] |
no_license
|
https://github.com/smchang/bracketracker
|
bd9633147051ed1f33cc39fde85a5e76a7b4cdd2
|
6d4f393f8adb96afe2856a8d00a081067029bf10
|
refs/heads/master
| 2021-01-13T01:47:44.678392 | 2012-05-14T20:23:25 | 2012-05-14T20:23:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from flask import Flask, redirect, request, render_template, url_for, session, jsonify
from models import Member
from models import Tournament
from models import Notification
import os
import shelve
import uuid
tournamentDB = shelve.open('db/tournaments.dbm','w', writeback=True)
app = Flask(__name__)
app.secret_key = '\xe0e\xb1[\xae\xdb\xc6\xa6\xd5\xb0\xae\x87#\xeeM\xff\x17\xa7&9{-\xc7\x81'
@app.route('/home')
@app.route('/')
def home():
print session
if 'id' not in session.keys():
print "is null session"
seedSession()
else:
print "is not null session"
print session
print 'tournaments for home:', tournamentDB[session['id']]['your_tournaments']
return render_template('home.html',your_tournaments=tournamentDB[session['id']]['your_tournaments'],
notifications = tournamentDB[session['id']]['notifications'],
all_tournaments=tournamentDB[session['id']]['all_tournaments'])
@app.route('/create', methods=['GET','POST'])
def create():
print request.method
if request.method == 'POST':
name = request.form['name']
pwd = request.form['password']
desc = request.form['description']
type = request.form['type']
members = request.form['members']
print "creating tournament"
print " ",name
print " ",pwd
print " ",desc
print " ",type
print " ",members
newTournament = Tournament(name,name,type,desc)
tournamentDB[session['id']]['your_tournaments'][name] = newTournament
addMembersToTournament(name, members)
return render_template('createTournament.html')
@app.route('/addMembers/<tournament>',methods=['POST'])
def addMember(tournament):
print 'adding members'
if tournament in tournamentDB[session['id']]['your_tournaments'].keys():
print "adding members to tournament"
members = request.form['members']
addMembersToTournament(tournament, members)
print members
else:
print 'tournament does not exist'
return jsonify(msg='added members')
def addMembersToTournament(tournament, members):
members = members.split(',')
members = filter (lambda a: a!='', members)
print members
tournamentDB[session['id']]['your_tournaments'][tournament].players = members
def addAdminsToTournament(tournament, members):
members = members.split(',')
members = filter (lambda a: a!='', members)
print members
tournamentDB[session['id']]['your_tournaments'][tournament].admins = members
@app.route('/addTournament',methods=['POST'])
def addTournament():
name = request.form['name']
pwd = request.form['password']
desc = request.form['description']
type = request.form['type']
admins = request.form['admins']
players = request.form['players']
print "adding tournament"
print " ",name
print " ",pwd
print " ",desc
print " ",type
print " ",admins
print " ",players
newTournament = Tournament(name,name,type,desc, admins=admins, players=players)
tournamentDB[session['id']]['your_tournaments'][name] = newTournament
addMembersToTournament(name, players)
addAdminsToTournament(name, admins)
if name=="Office Ping Pong":
print "joining office ping pong"
tournamentDB[session['id']]['all_tournaments'].pop('pingPong')
if name=="Foosball":
tournamentDB[session['id']]['notifications'].pop('Tournament Invite')
return jsonify(msg="added tournament")
@app.route('/join')
def join():
return render_template('joinTournament.html')
@app.route('/join/foosball')
def join_foosball():
return render_template('joinFoosball.html')
@app.route('/staticRobin/<name>', methods=['GET','POST'])
def funfun(name):
return render_template('staticRobin.html',tournament = tournamentDB[session['id']]['your_tournaments'][name])
@app.route('/singleElim/<name>', methods=['GET','POST'])
def singleElim(name):
return render_template('singleElim.html',tournament=tournamentDB[session['id']]['your_tournaments'][name])
@app.route('/doubleElim/<name>', methods=['GET','POST'])
def doubleElim(name):
return render_template('doubleElim.html', tournament=tournamentDB[session['id']]['your_tournaments'][name])
@app.route('/roundrobin/<name>', methods=['GET','POST'])
def roundrobin(name):
if request.method == 'POST':
print "POSTING"
if 'promote' in request.form:
p = request.form['promote']
print "making admin:", p
if p in tournamentDB[session['id']]['your_tournaments'][name].players:
tournamentDB[session['id']]['your_tournaments'][name].players.remove(p)
if p in tournamentDB[session['id']]['your_tournaments'][name].booted:
tournamentDB[session['id']]['your_tournaments'][name].booted.remove(p)
tournamentDB[session['id']]['your_tournaments'][name].admins.append(p)
elif 'demote' in request.form:
d = request.form['demote']
print "booting:",d
if d in tournamentDB[session['id']]['your_tournaments'][name].players:
tournamentDB[session['id']]['your_tournaments'][name].players.remove(d)
if d in tournamentDB[session['id']]['your_tournaments'][name].admins:
tournamentDB[session['id']]['your_tournaments'][name].admins.remove(d)
tournamentDB[session['id']]['your_tournaments'][name].booted.append(d)
elif 'win' in request.form:
print 'adding win to tournament'
win = request.form['win']
s1 = request.form['s1']
s2 = request.form['s2']
if s1 == 'NaN':
s1 = '--'
else:
s1 = int(s1)
if s2 == 'NaN':
s2 = '--'
else:
s2 = int(s2)
print win,s1,s2
tournamentDB[session['id']]['your_tournaments'][name].wins.append(int(win))
tournamentDB[session['id']]['your_tournaments'][name].s1.append(s1)
tournamentDB[session['id']]['your_tournaments'][name].s2.append(s2)
print tournamentDB[session['id']]['your_tournaments'][name].wins,tournamentDB[session['id']]['your_tournaments'][name].s1,tournamentDB[session['id']]['your_tournaments'][name].s2
return render_template('roundrobin.html', tournament=tournamentDB[session['id']]['your_tournaments'][name])
@app.route('/friends')
def friends():
return render_template('comingSoon.html', page="Friends")
#using settings page as a site reset - clears session variable
@app.route('/settings')
def settings():
if 'id' in session.keys():
session.pop('id')
return render_template('comingSoon.html', page="Settings")
@app.route('/profile')
def profile():
return render_template('comingSoon.html', page="Profile")
@app.route('/removeNotification',methods=['POST'])
def removeNotification():
name = request.form['title']
type = request.form['type']
# print "removing notification", name
notification = tournamentDB[session['id']]['notifications'].pop(name)
if notification.type=="score":
print "notification is a score"
if type=="Accept":
tournamentDB[session['id']]['your_tournaments'][notification.tournament.id].wins.append(10)
tournamentDB[session['id']]['your_tournaments'][notification.tournament.id].s1.append(4)
tournamentDB[session['id']]['your_tournaments'][notification.tournament.id].s2.append(5)
else:
tournamentDB[session['id']]['your_tournaments'][notification.tournament.id] = notification.tournament
print "notification is tournament"
return jsonify(msg="removed notification")
def seedSession():
id = str(uuid.uuid4())
print id
session['id'] = id
tournamentDB[id] = {}
print 'seeding'
roundRobin = Tournament('roundRobin','Round Robin','roundrobin',"Description 1",
admins=['Larry'],
players = ['Moe','Curly','Adam','Billy','Carl','Dave'],
booted = ['Eric','Fred','George'],
icon="roundrobinIcon",
wins=[21,25,44,55],s1=[21,21,21,21],s2=[4,1,7,2],
state='active')
soccer = Tournament('soccer','Soccer','doubleElim',"Soccer Description",
admins = ['Moe'],
players = ['Curly','Billy','Carl','Larry (You)'],
booted=['Fred'],
icon="soccerIcon",
state='active')
chess = Tournament('chess','Chess','singleElim',"Chess Description",
admins = ['Moe'],
players = ['Dave', 'George','Larry (You)'],
icon="chessIcon",
state='active')
funfun = Tournament('funfun','FunFun','staticRobin',"FunFun Description",
admins = ['Moe'],
players = ['Larry (You)','Curly','Adam','Billy'],
icon="funfunIcon",
state='active')
pingPong = Tournament('pingPong','Office Ping Pong', 'staticRobin',"Come play in our little office ping pong\
tournament. It'll be lots of fun. Let's see who's the best.",
admins=['Moe'],
players=['Curly','Adam','Billy','Carl','Dave'],
invited=['Eric','Fred@bedrock.com','George@spacelysprockets.com','Harry@hogwarts.edu'],
state='join')
pingPong2 = Tournament('pingPong2','MIT Ping Pong','staticRobin',description="No description",\
admins=['President Hockfield'],password="password123",state='join')
basketball = Tournament('basketball','IM Basketball','singleElim',description="We pretend we can ball",\
admins=['The Committee'], password="password321",state='join')
foosball = Tournament('foosball','Foosball','staticRobin',"Just a small foosball tournament between friends",
admins=['Jeff'],
players=['Joe','James','Larry (You)','Jake','Jared'],
state='active')
scoreNotification = Notification('Game Completed','Round Robin Tournament: You vs. Moe','3:5',type="score", tournament=roundRobin)
invite = Notification('Tournament Invite','You received an invitation to join the tournament:','Foosball\
Tournament',type="invite",tournament=foosball)
tournamentDB[id]['your_tournaments'] = {}
tournamentDB[id]['your_tournaments']['roundRobin'] = roundRobin
tournamentDB[id]['your_tournaments']['soccer'] = soccer
tournamentDB[id]['your_tournaments']['chess'] = chess
tournamentDB[id]['your_tournaments']['funfun'] = funfun
tournamentDB[id]['notifications'] = {}
tournamentDB[id]['notifications'][scoreNotification.title] = scoreNotification
tournamentDB[id]['notifications'][invite.title] = invite
tournamentDB[id]['all_tournaments'] = {}
tournamentDB[id]['all_tournaments']['pingPong'] = pingPong
tournamentDB[id]['all_tournaments']['pingPong2'] = pingPong2
tournamentDB[id]['all_tournaments']['basketball'] = basketball
print "done seeding", tournamentDB[id]
if __name__ == '__main__':
port = int(os.environ.get('PORT',5000))
app.run(host='0.0.0.0', port=port,debug=True)
|
UTF-8
|
Python
| false | false | 11,613 |
py
| 25 |
main.py
| 9 | 0.608456 | 0.601137 | 0.001378 | 265 | 42.822642 | 190 |
shenaishiren/python-project
| 5,454,608,471,940 |
ec25e86ac9f811e02c6a5574f6b86c5fa52b97a4
|
2b0d897e517a8e7966b420c59d0ecc5871f45117
|
/ICQU/app/news/views.py
|
0a8b635e1d851489333e360d6fd6ab81598e9215
|
[] |
no_license
|
https://github.com/shenaishiren/python-project
|
37c229033d86bff1c60091d392d289fe6b914283
|
6768fa1fc2e5187932354a63024c73a8eb7e34a8
|
refs/heads/master
| 2020-12-24T16:25:20.749638 | 2016-04-05T05:12:34 | 2016-04-05T05:12:34 | 42,302,647 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding=utf-8
from config import MONGO_URI, MONGO_PORT, \
GENERIC_MONGO_DB, JOB_MONGO_DB, ACADEMIC_MONGO_DB, \
GENERIC_COLLECTION_NAME, JOB_COLLECTION_NAME, ACADEMIC_COLLECTION_NAME
from flask import Flask, render_template, current_app, g
from datetime import datetime
from pymongo import MongoClient
from functools import wraps
from . import news
import re
@news.before_request
def before_request():
g.client = MongoClient(MONGO_URI, MONGO_PORT) # 请求上下文的全局变量
g.generic_news_info = []
g.job_news_info = []
g.academic_news_info = []
g.regex = re.compile(r'<img[\s\S]+?src=\"(.*?)\"[\s\S]*>')
@news.teardown_request
def teardown_request(exception=None):
g.client.close()
def get_news_info(prefix):
def decorator(func):
@wraps(func)
def handler_args(*args, **kwargs):
db = g.client[eval(prefix.upper() + "_MONGO_DB")]
coll = db[eval(prefix.upper() + "_COLLECTION_NAME")]
for new in coll.find().sort('recruit_time', -1):
del new["_id"]
if (prefix == "generic" or prefix == "academic"):
headimg = g.regex.findall(new["body"])
if not headimg:
new["headerimg"] = "/static/academicnews/head.jpg"
else:
new["headerimg"] = "http://news.cqu.edu.cn" + headimg[0]
eval("g."+prefix+"_news_info").append(new)
key = "time_pub"
if prefix == "job":
key = "recruit_time"
eval("g."+prefix+"_news_info").sort(lambda y,x: cmp(datetime.strptime(x[key], "%Y-%m-%d"), datetime.strptime(y[key], "%Y-%m-%d")))
eval("g."+prefix+"_news_info").append(len(eval("g."+prefix+"_news_info")))
return func(*args, **kwargs)
return handler_args
return decorator
@news.route("/generic/page/<int:index>", methods=["GET"])
@get_news_info("generic")
def generic_news(index):
# print len(g.generic_news_info)
INDEX = int(index)
START_PRE_PAGE = 0+10*INDEX
END_PRE_PAGE = 10+10*INDEX
count = g.generic_news_info[-1]
if count < END_PRE_PAGE:
END_PRE_PAGE = count
return render_template("generic_news/news.html", news=g.generic_news_info[:-1], page=INDEX,
start=START_PRE_PAGE, end=END_PRE_PAGE, count=count)
@news.route("/job/page/<int:index>", methods=["GET"])
@get_news_info("job")
def job_news(index):
INDEX = int(index)
START_PRE_PAGE = 0+10*INDEX
END_PRE_PAGE = 10+10*INDEX
# print count
count = g.job_news_info[-1]
if count<END_PRE_PAGE:
END_PRE_PAGE = count
return render_template("job_news/jobnews.html", news=g.job_news_info[:-1], page=INDEX,
start=START_PRE_PAGE, end=END_PRE_PAGE, count=count)
@news.route("/academic/page/<int:index>", methods=["GET"])
@get_news_info("academic")
def academic_news(index):
INDEX = int(index)
START_PRE_PAGE = 0+10*INDEX
END_PRE_PAGE = 10+10*INDEX
count = g.academic_news_info[-1]
if count<END_PRE_PAGE:
END_PRE_PAGE = count
return render_template("academic_news/academicnews.html", news=g.academic_news_info[:-1], page=INDEX,
start=START_PRE_PAGE, end=END_PRE_PAGE, count=count)
@news.route("/generic/page/info/<int:order>", methods=["GET"])
@get_news_info("generic")
def generic_info(order):
real_order = int(order)-1
return render_template("generic_news/careful_info.html", order=real_order, news=g.generic_news_info[:-1])
@news.route("/job/page/info/<int:order>", methods=["GET"])
@get_news_info("job")
def job_info(order):
real_order = int(order)-1
return render_template("job_news/jobinfo.html", order=real_order, news=g.job_news_info[:-1])
@news.route("/academic/page/info/<int:order>", methods=["GET"])
@get_news_info("academic")
def academic_info(order):
real_order = int(order)-1
return render_template("academic_news/careful_info.html", order=real_order, news=g.academic_news_info[:-1])
|
UTF-8
|
Python
| false | false | 4,052 |
py
| 70 |
views.py
| 42 | 0.613591 | 0.604663 | 0 | 112 | 35 | 142 |
riniguez91/empty-spain-back
| 8,048,768,724,030 |
a8b159d6bd4170e870eb1ba7727393420b11ab3c
|
da9b5ce27df43ed9735235eb6c67b66bdb7f806d
|
/api/scrapers/cope.py
|
67c30c343b02f8a7f8c2a6f6c0264944cc375536
|
[
"MIT"
] |
permissive
|
https://github.com/riniguez91/empty-spain-back
|
0647322f595a1e547fd22e36888eb9d954459373
|
7a48b1d01c9e90203a6b043b6f54a4f3600f8691
|
refs/heads/main
| 2023-06-14T03:08:45.011132 | 2021-07-05T17:25:05 | 2021-07-05T17:25:05 | 357,646,969 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium import webdriver
import time
import requests
from bs4 import BeautifulSoup
import re
import json
link = []
def cope_content(user_input):
#Headless para evitar que se lance la ventana de chrome, ahorrando recursos ya que no se necesitan para la Interfaz Gráfica de Usuario
options = webdriver.ChromeOptions()
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
options.add_argument('user-agent={}'.format(user_agent))
options.add_argument('--incognito')
options.add_argument('--headless')
options.add_argument('--enable-javascript')
PATH = 'C:/WebDriver/bin/chromedriver.exe'
pagina = "https://www.cope.es/emisoras/" + user_input
driver = webdriver.Chrome(PATH, options=options)
driver.get(pagina) #Lanzar la URL
time.sleep(1)
#driver.find_element_by_xpath('//*[@id="qc-cmp2-ui"]/div[2]/div/button[2]').click() #Aceptar Cookies
driver.find_element_by_xpath('//*[@id="didomi-notice-agree-button"]').click() #Aceptar Cookies
return driver.page_source #Recoger todo el html de la pagina
def text(user_input):
noticias = {}
noticias["COPE News in " + user_input] = []
page_source = cope_content(user_input.lower()) #Se llama a la funcion 'cope_content' para obtener el contenido de la pagina donde estan las noticias
soup = BeautifulSoup(page_source, 'lxml')
contenedor = soup.find_all(class_="lateral right article") #Div donde estan las noticias
for i in contenedor:
titulo = i.find(class_="title")
subtitulo = i.find(class_="subtitle")
link = i.find('a').attrs['href']
link_completo = "https://www.cope.es/" + str(link)
noticias["COPE News in " + user_input].append({
'Name': titulo.text,
'Subtitle':'' if not subtitulo else subtitulo.text,
'URL':link_completo
})
return json.dumps(noticias, indent=3)
###########
#user_input = str(input("Introduce el nombre del pueblo donde desea buscar noticias: "))
#print(text("Madrid"))
|
UTF-8
|
Python
| false | false | 2,157 |
py
| 31 |
cope.py
| 29 | 0.661874 | 0.647495 | 0 | 50 | 42.14 | 158 |
Niikcety/CinemaReservation
| 6,176,163,008,140 |
b590e9ca6cf8ac23164b4c8d13d653a4caede59e
|
2f3f6b18ef8b94f1b37e8c841808290f868ff0a1
|
/main.py
|
6910439bef6e51b3f8d7155992372938ecc28405
|
[] |
no_license
|
https://github.com/Niikcety/CinemaReservation
|
23f3a829c1d895db80bea54cea3ff22419b3c61d
|
b6ea0393dbc8fe91413a7213cc82aebd20c91d71
|
refs/heads/master
| 2022-07-16T14:58:20.958493 | 2020-05-13T15:39:49 | 2020-05-13T15:39:49 | 261,232,283 | 0 | 0 | null | false | 2020-05-13T15:38:12 | 2020-05-04T16:05:17 | 2020-05-08T13:10:53 | 2020-05-13T15:38:12 | 32 | 0 | 0 | 0 |
Python
| false | false |
import ipdb
from menu.model import MenuModel
menu = MenuModel()
menu.controller.db.create_tables()
menu.controller.db.fill_tables()
menu.start()
menu.main_menu()
|
UTF-8
|
Python
| false | false | 163 |
py
| 24 |
main.py
| 22 | 0.773006 | 0.773006 | 0 | 8 | 19.375 | 34 |
hamielHong/douban_spider
| 6,760,278,545,162 |
1a6549fbc1b3791890095bbf9ed919e4a0dc8ff4
|
e251d6a25d371506968aa9dc6620413b5103e8da
|
/html_parser.py
|
31b60bd1119315e410c295772567b570ca0782a5
|
[] |
no_license
|
https://github.com/hamielHong/douban_spider
|
e17b050bc04828e02a941696c1911e6b87a6da14
|
a9ebc2119d6b1af2cdca383b88acebf70a981bf9
|
refs/heads/master
| 2021-07-23T13:12:51.710067 | 2017-11-01T03:16:14 | 2017-11-01T03:16:14 | 109,080,024 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from bs4 import BeautifulSoup
import re
import urllib.parse
class HtmlParser(object):
def parse(self, html_cont):
if html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
new_data = self._get_new_data(soup)
return new_data
def _get_new_data(self, soup):
eachCommentList = []
comment_div_lits = soup.find_all('div', class_='comment')
if comment_div_lits is None:
return
for item in comment_div_lits:
if item.find('p').get_text() is not None:
eachCommentList.append(item.find('p').get_text())
return eachCommentList
|
UTF-8
|
Python
| false | false | 702 |
py
| 5 |
html_parser.py
| 3 | 0.591168 | 0.588319 | 0 | 30 | 22.366667 | 77 |
pantsbuild/pants
| 18,932,215,852,934 |
7063273ebe50b95468f925530e738465e1a2dc9d
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/javascript/resolve.py
|
21b9a63b69c32d7cc6135b01229c4b65958d5815
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 |
Apache-2.0
| false | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | 2023-09-14T08:48:22 | 2023-09-14T19:33:33 | 157,747 | 2,809 | 572 | 966 |
Python
| false | false |
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Iterable
from pants.backend.javascript import nodejs_project
from pants.backend.javascript.nodejs_project import AllNodeJSProjects, NodeJSProject
from pants.backend.javascript.package_json import (
FirstPartyNodePackageTargets,
NodePackageNameField,
OwningNodePackage,
OwningNodePackageRequest,
PackageJsonSourceField,
)
from pants.backend.javascript.subsystems.nodejs import UserChosenNodeJSResolveAliases
from pants.build_graph.address import Address
from pants.engine.fs import PathGlobs
from pants.engine.internals.selectors import Get
from pants.engine.rules import Rule, collect_rules, rule
from pants.engine.target import Target, WrappedTarget, WrappedTargetRequest
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
@dataclass(frozen=True)
class RequestNodeResolve:
address: Address
@dataclass(frozen=True)
class ChosenNodeResolve:
project: NodeJSProject
@property
def resolve_name(self) -> str:
return self.project.default_resolve_name
@property
def file_path(self) -> str:
return os.path.join(self.project.root_dir, self.project.lockfile_name)
def get_lockfile_glob(self) -> PathGlobs:
return PathGlobs([self.file_path])
async def _get_node_package_json_directory(req: RequestNodeResolve) -> str:
wrapped = await Get(
WrappedTarget,
WrappedTargetRequest(req.address, description_of_origin="the `ChosenNodeResolve` rule"),
)
target: Target | None
if wrapped.target.has_field(PackageJsonSourceField) and wrapped.target.has_field(
NodePackageNameField
):
target = wrapped.target
else:
owning_pkg = await Get(OwningNodePackage, OwningNodePackageRequest(wrapped.target.address))
target = owning_pkg.ensure_owner()
return os.path.dirname(target[PackageJsonSourceField].file_path)
@rule
async def resolve_for_package(
req: RequestNodeResolve, all_projects: AllNodeJSProjects
) -> ChosenNodeResolve:
directory = await _get_node_package_json_directory(req)
project = all_projects.project_for_directory(directory)
return ChosenNodeResolve(project)
class NodeJSProjectResolves(FrozenDict[str, NodeJSProject]):
pass
@rule
async def resolve_to_projects(
all_projects: AllNodeJSProjects, user_chosen_resolves: UserChosenNodeJSResolveAliases
) -> NodeJSProjectResolves:
def get_name(project: NodeJSProject) -> str:
return user_chosen_resolves.get(
os.path.join(project.root_dir, project.lockfile_name), project.default_resolve_name
)
return NodeJSProjectResolves((get_name(project), project) for project in all_projects)
class FirstPartyNodePackageResolves(FrozenDict[str, Target]):
pass
@rule
async def resolve_to_first_party_node_package(
resolves: NodeJSProjectResolves, all_first_party: FirstPartyNodePackageTargets
) -> FirstPartyNodePackageResolves:
by_dir = {first_party.residence_dir: first_party for first_party in all_first_party}
return FirstPartyNodePackageResolves(
(resolve, by_dir[project.root_dir]) for resolve, project in resolves.items()
)
def rules() -> Iterable[Rule | UnionRule]:
return [*collect_rules(), *nodejs_project.rules()]
|
UTF-8
|
Python
| false | false | 3,476 |
py
| 1,910 |
resolve.py
| 1,494 | 0.756329 | 0.754603 | 0 | 105 | 32.104762 | 99 |
sivakrishnar/pythonplay
| 9,113,920,629,757 |
ff71f7789dc80b7a18ed94cca753ffa010b7ca48
|
4bce552bdabb0cea260e6999bc5e8acb3f496f5b
|
/insertion_sort.py
|
06e4dcade31cc2062bb7901907a92290d003e0f8
|
[
"MIT"
] |
permissive
|
https://github.com/sivakrishnar/pythonplay
|
82248d1c40c379f48d817ee9c035cf28fee61d9d
|
903afc3f0baa4107230af5487df3efaef6169412
|
refs/heads/master
| 2022-09-15T02:42:03.090793 | 2020-05-28T07:15:35 | 2020-05-28T07:15:35 | 266,028,924 | 0 | 0 |
MIT
| false | 2020-05-28T07:15:36 | 2020-05-22T05:48:10 | 2020-05-27T07:20:47 | 2020-05-28T07:15:36 | 6 | 0 | 0 | 0 |
Python
| false | false |
def insertion_sort(ls):
for index in range(1, len(ls)):
for sub_index in range(0, index):
if ls[index] < ls[sub_index]:
ls[index], ls[sub_index] = ls[sub_index], ls[index]
return ls
if __name__ == "__main__":
import random
print(insertion_sort([random.randint(-1000, 1000) for x in range(50)]))
|
UTF-8
|
Python
| false | false | 347 |
py
| 5 |
insertion_sort.py
| 4 | 0.570605 | 0.536023 | 0 | 10 | 33.8 | 75 |
iori422/51jkfw
| 19,000,935,319,536 |
0ccae886a1fd27cda411db1d7fa24a678f5b822d
|
46119f4175cc34772c070030341713dbe175c68c
|
/Script/soprt_combo.py
|
2875fd6231f6aa512456cfc3506706d20fa1030b
|
[] |
no_license
|
https://github.com/iori422/51jkfw
|
2c146a116a745383b7c5bd48eea5ef0ac34c33f0
|
f82f25b97d879938408ae96a4a3ec7a2db94c157
|
refs/heads/master
| 2021-08-23T23:15:37.787234 | 2017-12-07T01:52:02 | 2017-12-07T01:52:02 | 113,388,991 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from selenium import webdriver
import xlrd
import pandas as pd
fname =R"D:\2\20170418.xlsx"
bk = xlrd.open_workbook(fname)
sport = range(bk.nsheets)
try:
sh = bk.sheet_by_name(u"成年人")
except:
print "11111111111111111111:"+ fname
nrows = sh.nrows
ncols = sh.ncols
print "nrows %d, ncols %d" % (nrows,ncols)
for i in range(347):
cell_value = sh.cell_value(i,1)
|
UTF-8
|
Python
| false | false | 410 |
py
| 39 |
soprt_combo.py
| 37 | 0.668317 | 0.584158 | 0 | 20 | 19.05 | 42 |
gcman/project-euler
| 9,002,251,468,173 |
ae1f5c422429e4cdcdeb0095ff1523fe1b75721a
|
f6de30ffb0d9d0f70d4a73d519f9499f7572df29
|
/48-Self-Powers/main.py
|
37647b6b40d68eba795e8c94a1f7e9710e8beff5
|
[] |
no_license
|
https://github.com/gcman/project-euler
|
09a8a16a56535adf23444a7fe810728bc813d771
|
a49993c2c1e284a905dff0ee5bfe591409f68c24
|
refs/heads/master
| 2020-03-25T12:52:16.165138 | 2019-03-09T20:10:10 | 2019-03-09T20:10:10 | 143,798,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
N = int(input())
M = 10**10
S = 0
for n in range(1, N+1):
# Take powers mod 10^10
# Mod 10^10 at the end
S = (S + pow(n, n, M)) % M
print(S)
|
UTF-8
|
Python
| false | false | 153 |
py
| 89 |
main.py
| 88 | 0.503268 | 0.405229 | 0 | 8 | 18.125 | 30 |
Aunsiels/smart_plans
| 9,775,345,607,860 |
0ce8b53a3b70e48a864acdc3fd61888318957709
|
1b4bcec4fe9f6ae4710b1311a12286af9b468c99
|
/code/experiments/reduced_rule.py
|
82bda6bd9d8abf0e27a794a5dc94806326620e93
|
[] |
no_license
|
https://github.com/Aunsiels/smart_plans
|
859345438d97d78af9bad6736810ae1aefa9ff81
|
f206d23c2c468c802c14df49dd97be69380578af
|
refs/heads/master
| 2021-03-27T20:36:19.323650 | 2018-06-15T17:19:09 | 2018-06-15T17:19:09 | 104,898,472 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class ReducedRule(object):
"""ReducedRule
Superobject of all possible reduced forms. They can be of
four types :
* Consumption
* Production
* End
* Duplication
"""
def isConsommation(self):
"""isConsommation Whether the rule is a consumption rule or not"""
return False
def isDuplication(self):
"""isDuplication Whether the rule is a duplication rule or not"""
return False
def isProduction(self):
"""isProduction Whether the rule is a production rule or not"""
return False
def isEndRule(self):
"""isEndRule Whether the rule is an end rule or not"""
return False
|
UTF-8
|
Python
| false | false | 696 |
py
| 76 |
reduced_rule.py
| 63 | 0.62069 | 0.62069 | 0 | 25 | 26.84 | 74 |
cocoslime/point-in-PC-polygon
| 4,466,766,022,830 |
be525c69ee099dea5cbcdc1dfe4551cb40832fbb
|
f698fa596c27b9873dc556194849cfb13bb5bc8f
|
/pointcloud-polygon-generator/problem3-extrude-solid-generator.py
|
29eb5f824bca5320f68bffcbf43f40767fe202e9
|
[] |
no_license
|
https://github.com/cocoslime/point-in-PC-polygon
|
2499760f21376fb3e67ccc15d90e85a7f29612b2
|
6191963e427746928c4a7ae2932432dda4f4c7bf
|
refs/heads/master
| 2020-03-09T16:10:40.772608 | 2018-11-22T02:02:59 | 2018-11-22T02:02:59 | 128,877,817 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
import numpy as np
import csv
MAX_HEIGHT = 100
MIN_HEIGHT = 10
CONVEX_OPT = 'convex'
polygons_csv = open("../data/problem2/" + CONVEX_OPT + "/polygon.csv", newline='')
polygons_reader = csv.reader(polygons_csv, quoting=csv.QUOTE_NONNUMERIC)
solids_csv = open("../data/problem3/extruded/" + CONVEX_OPT + "_solids.csv", 'w', encoding='utf-8', newline='')
solids_writer = csv.writer(solids_csv)
for rid, row in enumerate(polygons_reader):
'''
convex_ratio, numOfSides, height, [polygon_coords]
'''
num_sides = row[1]
coords = row[2:]
new_row = row[0:2]
new_row.append(random.randrange(MIN_HEIGHT, MAX_HEIGHT))
new_row.extend(coords)
solids_writer.writerow(new_row)
solids_csv.close()
|
UTF-8
|
Python
| false | false | 745 |
py
| 41 |
problem3-extrude-solid-generator.py
| 37 | 0.667114 | 0.651007 | 0 | 27 | 26.296296 | 111 |
raphaelfv/busdata
| 10,479,720,214,497 |
82e45e2a1855062cd99ecaeefff865ffddfa835f
|
e0018032775a6ccf5955b5e6d3c1bcc0574e3e7b
|
/busdata/first_load.py
|
7a5c697f217fe420bc49321a2766b6c198e03d72
|
[] |
no_license
|
https://github.com/raphaelfv/busdata
|
47349a7911aea88995a631d358878097a69cb956
|
fbe91e222603caf78b68c17546dd7096e827953d
|
refs/heads/master
| 2021-01-23T04:09:05.845816 | 2019-04-26T21:57:11 | 2019-04-26T21:57:11 | 86,159,227 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from busdata.models import *
def criarEmpresas():
nomeEmpresasList = ['Redentor','Barra','Feital','Vera Cruz']
empresasInativasList = ['Feital']
for nome in nomeEmpresasList:
if not Empresa.objects.filter(nome=nome).exists():
novaEmpresa = Empresa(nome=nome)
if nome in empresasInativasList:
novaEmpresa.is_active = False
novaEmpresa.save()
print("[first_load @ criarEmpresas] Nova empresa criada: ",novaEmpresa)
def criarConsorcios():
dictConsorciosList = []
dictConsorciosList.append({'nome': 'Intermunicipal'})
dictConsorciosList.append({'nome': 'BRT', 'cor': '#0000ff'})
dictConsorciosList.append({'nome': 'Santa Cruz', 'cor': '#e31919'})
dictConsorciosList.append({'nome': 'Transcarioca', 'cor': '#0d6fa8'})
dictConsorciosList.append({'nome': 'Internorte', 'cor': '#a2b719'})
dictConsorciosList.append({'nome': 'Intersul', 'cor': '#fdc418'})
for d in dictConsorciosList:
if not Consorcio.objects.filter(nome=d['nome']).exists():
novoObj = Consorcio(nome=d['nome'])
if 'cor' in d:
novoObj.cor = d['cor']
novoObj.save()
print("[first_load @ criarConsorcios] Novo consorcio criado: ",novoObj)
def criarFabricantes():
nomesList = ['Neobus','Caio','Marcopolo','Comil','Mascarello',]
for nome in nomesList:
if not Fabricante.objects.filter(nome=nome).exists():
novoObj = Fabricante(nome=nome)
novoObj.save()
print("[first_load @ criarFabricantes] Nova fabricante criada: ", novoObj)
def criarCarrocerias():
dictCarroceriasList = []
dictCarroceriasList.append({'nome': 'Mega', 'fabricante': 'Neobus'})
dictCarroceriasList.append({'nome': 'Mega Plus', 'fabricante': 'Neobus'})
dictCarroceriasList.append({'nome': 'Spectrum City', 'fabricante': 'Neobus'})
dictCarroceriasList.append({'nome': 'Mega BRT', 'fabricante': 'Neobus'})
dictCarroceriasList.append({'nome': 'Mega BRS', 'fabricante': 'Neobus'})
dictCarroceriasList.append({'nome': 'Paradiso', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Torino', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Viaggio', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Viale', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Ideale', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Senior', 'fabricante': 'Marcopolo'})
dictCarroceriasList.append({'nome': 'Apache Vip', 'fabricante': 'Caio'})
dictCarroceriasList.append({'nome': 'Millennium', 'fabricante': 'Caio'})
dictCarroceriasList.append({'nome': 'Mondego', 'fabricante': 'Caio'})
dictCarroceriasList.append({'nome': 'Foz Super', 'fabricante': 'Caio'})
dictCarroceriasList.append({'nome': 'Foz', 'fabricante': 'Caio'})
dictCarroceriasList.append({'nome': 'Svelto', 'fabricante': 'Comil'})
dictCarroceriasList.append({'nome': 'Campione', 'fabricante': 'Comil'})
dictCarroceriasList.append({'nome': 'Svelto Midi', 'fabricante': 'Comil'})
dictCarroceriasList.append({'nome': 'GranVia Midi', 'fabricante': 'Mascarello'})
dictCarroceriasList.append({'nome': 'Roma', 'fabricante': 'Mascarello'})
for d in dictCarroceriasList:
if not Carroceria.objects.filter(nome=d['nome']).exists():
novoObj = Carroceria(nome=d['nome'])
fabricanteObj = Fabricante.objects.filter(nome=d['fabricante'])
if fabricanteObj:
novoObj.fabricante = fabricanteObj.first()
else:
print("- ! - [first_load @ criarCarrocerias] Erro: Carroceria não foi criada: ", d)
continue
novoObj.save()
print("[first_load @ criarCarrocerias] Nova carroceria criada: ",novoObj)
|
UTF-8
|
Python
| false | false | 3,897 |
py
| 12 |
first_load.py
| 9 | 0.643994 | 0.63886 | 0 | 72 | 53.111111 | 99 |
r12habh/djangoProject
| 4,707,284,195,021 |
4ca99dd20927812ed9daab38e4c314233c97943c
|
c7450b5faad0dd32247628e95c002fb750dc232e
|
/pages/views.py
|
46de9c0a92f046ce35e67021a94d65d4f1e6c9d5
|
[] |
no_license
|
https://github.com/r12habh/djangoProject
|
135829da13e38f68cfc2e5117cc3052d5c26a68f
|
eb42b7188bc1a9ed3ee36811efaa5c00e6af138d
|
refs/heads/main
| 2023-06-12T00:57:00.447978 | 2021-07-08T08:16:02 | 2021-07-08T08:16:02 | 384,019,193 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def home_view(request, *args, **kwargs):
print(args, kwargs)
print(request.user)
return render(request, 'home.html', {})
def about_view(request, *args, **kwargs):
context = {
'text': 'This is about us.',
'number': 432534264,
'list': [12, 23, 34, 'abc']
}
return render(request, 'about.html', context)
def contact_view(request, *args, **kwargs):
return render(request, 'contact.html', {})
def social_view(request, *args, **kwargs):
return render(request, 'social.html', {})
|
UTF-8
|
Python
| false | false | 636 |
py
| 3 |
views.py
| 1 | 0.63522 | 0.611635 | 0 | 26 | 23.461538 | 49 |
KomalBharadva/Analysis-of-Household-Power-Consumption-using-Clustering-and-MapReduce
| 8,083,128,501,516 |
dbbbbd0e0fcab9e026bba922d1497412bb432e1b
|
036bd2d0227a2ee58a2faebe9f27fc020af1104a
|
/DataPrep.py
|
de4926c91f52339f09acd8c11c6424f00df50c6f
|
[] |
no_license
|
https://github.com/KomalBharadva/Analysis-of-Household-Power-Consumption-using-Clustering-and-MapReduce
|
e5464d7ac4e1905e89e14efceebc590aa9a99dea
|
9ac0722a2002a309fcaf6f259cf3f841b4792e92
|
refs/heads/main
| 2023-07-08T04:18:09.751507 | 2021-08-04T17:45:49 | 2021-08-04T17:45:49 | 392,774,626 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Importing all the necessay libraries
import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
# Loading the dataset
df = pd.read_csv('household_power_consumption.txt', parse_dates={'DateTime' : ['Date', 'Time']}, infer_datetime_format=True,
low_memory=False, na_values=['nan','?'], sep=';')
# Dropping the null values
df.dropna(inplace = True)
# Selecting all the columns except for datetime column
df1 = df.loc[:, 'Global_active_power':'Sub_metering_3']
# Scaling whole data and converting it into dataframe
X = scale(df1)
df1 = pd.DataFrame(X)
# Selecting all the columns except for Cluster column
final_df = df1.round(3)
# Saving the final data into text file
np.savetxt(r'dataset.txt', final_df, fmt = '%1.3f', delimiter = ', ')
|
UTF-8
|
Python
| false | false | 795 |
py
| 10 |
DataPrep.py
| 9 | 0.695597 | 0.685535 | 0 | 19 | 39.842105 | 125 |
pbhatt48/MachineLearningAZ
| 9,689,446,227,660 |
a5ed851bb4f2ab453b12659576a69b9b95f39f79
|
637d7735a172b8aa042daef50faa74d286ec433e
|
/DataPreprocessing/data_preprocessing_template.py
|
8b4d92976e75e2e4aca028b5f22fbd0f2c0c1f2f
|
[] |
no_license
|
https://github.com/pbhatt48/MachineLearningAZ
|
74e4dc3ba6d895af776e0cb22848357385d5bf19
|
847f4fbf2b06116a97dce92aaa0831ff163b3f29
|
refs/heads/master
| 2020-04-26T19:09:43.213683 | 2019-04-23T13:57:27 | 2019-04-23T13:57:27 | 173,765,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#dataset = pd.read_csv('/Users/sadichha/UdacityClasses/UdemyClass/ML_Practices/MachineLearningAZ/DataPreprocessing/Data.csv')
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
#Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
X[:, 1:] = imputer.fit_transform(X[:, 1:])
#lets add label encoder.
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
#creating labelencoder for Y
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
#create test and train data
from sk
|
UTF-8
|
Python
| false | false | 937 |
py
| 11 |
data_preprocessing_template.py
| 7 | 0.753469 | 0.743863 | 0 | 32 | 28.28125 | 125 |
polyeuct/eden
| 4,690,104,327,531 |
e9dd6601cfcfa5bd47f3a5a143b72a691004ab3f
|
efa4c0985144f96fd3f70868fb667ea1c72164e4
|
/Stepik/Lesson 10.py
|
9bb10e754835e29cb876241ea23d93282ea6c81d
|
[] |
no_license
|
https://github.com/polyeuct/eden
|
be19c199a3d333460f473bdbee1b5ea7bcc01ad3
|
468100057b56be375898b38a393a14c65173b359
|
refs/heads/master
| 2023-05-15T17:18:15.955449 | 2021-06-11T12:03:51 | 2021-06-11T12:03:51 | 301,976,194 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
lst = input().split()
x = input()
if lst.count(x) == 0:
print("Отсутствует")
else:
ind = 0
for i in lst:
if i == x:
print(lst.index(x, ind), end=" ")
ind += 1
|
UTF-8
|
Python
| false | false | 210 |
py
| 17 |
Lesson 10.py
| 17 | 0.462312 | 0.447236 | 0 | 10 | 18.9 | 45 |
georsara1/Kaggle_projects
| 14,431,090,131,160 |
7aab919c59d9f466cbf4629bde7d1716959ad3cf
|
c3fa1b2984186c8103edc0b5dfd7d6e4fe7eb405
|
/Google_Analytics/future_not_set.py
|
2fdbd1af89f6b4fed6a6e3e8e2027ed98afb59aa
|
[] |
no_license
|
https://github.com/georsara1/Kaggle_projects
|
e8141cccd6625c035ca9277a8d33dbf12b72eabe
|
59abe9ec6a35e1b4b423eac47985b645861ae801
|
refs/heads/master
| 2022-01-05T13:41:27.543818 | 2019-07-20T14:57:27 | 2019-07-20T14:57:27 | 197,943,913 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
import gc
import time
from pandas.core.common import SettingWithCopyWarning
import warnings
import lightgbm as lgb
from sklearn.model_selection import GroupKFold
# I don't like SettingWithCopyWarnings ...
warnings.simplefilter('error', SettingWithCopyWarning)
gc.enable()
train = pd.read_csv('extracted_fields_train.gz',
dtype={'date': str, 'fullVisitorId': str, 'sessionId':str}, nrows=None)
test = pd.read_csv('extracted_fields_test.gz',
dtype={'date': str, 'fullVisitorId': str, 'sessionId':str}, nrows=None)
def get_folds(df=None, n_splits=5):
"""Returns dataframe indices corresponding to Visitors Group KFold"""
# Get sorted unique visitors
unique_vis = np.array(sorted(df['fullVisitorId'].unique()))
# Get folds
folds = GroupKFold(n_splits=n_splits)
fold_ids = []
ids = np.arange(df.shape[0])
for trn_vis, val_vis in folds.split(X=unique_vis, y=unique_vis, groups=unique_vis):
fold_ids.append(
[
ids[df['fullVisitorId'].isin(unique_vis[trn_vis])],
ids[df['fullVisitorId'].isin(unique_vis[val_vis])]
]
)
return fold_ids
y_reg = train['totals.transactionRevenue'].fillna(0)
del train['totals.transactionRevenue']
if 'totals.transactionRevenue' in test.columns:
del test['totals.transactionRevenue']
train['target'] = y_reg
for df in [train, test]:
df['date'] = pd.to_datetime(df['visitStartTime'], unit='s')
df['sess_date_dow'] = df['date'].dt.dayofweek
df['sess_date_hours'] = df['date'].dt.hour
df['sess_date_dom'] = df['date'].dt.day
df.sort_values(['fullVisitorId', 'date'], ascending=True, inplace=True)
df['next_session_1'] = (
df['date'] - df[['fullVisitorId', 'date']].groupby('fullVisitorId')['date'].shift(1)
).astype(np.int64) // 1e9 // 60 // 60
df['next_session_2'] = (
df['date'] - df[['fullVisitorId', 'date']].groupby('fullVisitorId')['date'].shift(-1)
).astype(np.int64) // 1e9 // 60 // 60
y_reg = train['target']
del train['target']
# https://www.kaggle.com/prashantkikani/teach-lightgbm-to-sum-predictions-fe
def browser_mapping(x):
browsers = ['chrome', 'safari', 'firefox', 'internet explorer', 'edge', 'opera', 'coc coc', 'maxthon', 'iron']
if x in browsers:
return x.lower()
elif ('android' in x) or ('samsung' in x) or ('mini' in x) or ('iphone' in x) or ('in-app' in x) or (
'playstation' in x):
return 'mobile browser'
elif ('mozilla' in x) or ('chrome' in x) or ('blackberry' in x) or ('nokia' in x) or ('browser' in x) or (
'amazon' in x):
return 'mobile browser'
elif ('lunascape' in x) or ('netscape' in x) or ('blackberry' in x) or ('konqueror' in x) or ('puffin' in x) or (
'amazon' in x):
return 'mobile browser'
elif '(not set)' in x:
return x
else:
return 'others'
def adcontents_mapping(x):
if ('google' in x):
return 'google'
elif ('placement' in x) | ('placememnt' in x):
return 'placement'
elif '(not set)' in x or 'nan' in x:
return x
elif 'ad' in x:
return 'ad'
else:
return 'others'
def source_mapping(x):
if ('google' in x):
return 'google'
elif ('youtube' in x):
return 'youtube'
elif '(not set)' in x or 'nan' in x:
return x
elif 'yahoo' in x:
return 'yahoo'
elif 'facebook' in x:
return 'facebook'
elif 'reddit' in x:
return 'reddit'
elif 'bing' in x:
return 'bing'
elif 'quora' in x:
return 'quora'
elif 'outlook' in x:
return 'outlook'
elif 'linkedin' in x:
return 'linkedin'
elif 'pinterest' in x:
return 'pinterest'
elif 'ask' in x:
return 'ask'
elif 'siliconvalley' in x:
return 'siliconvalley'
elif 'lunametrics' in x:
return 'lunametrics'
elif 'amazon' in x:
return 'amazon'
elif 'mysearch' in x:
return 'mysearch'
elif 'qiita' in x:
return 'qiita'
elif 'messenger' in x:
return 'messenger'
elif 'twitter' in x:
return 'twitter'
elif 't.co' in x:
return 't.co'
elif 'vk.com' in x:
return 'vk.com'
elif 'search' in x:
return 'search'
elif 'edu' in x:
return 'edu'
elif 'mail' in x:
return 'mail'
elif 'ad' in x:
return 'ad'
elif 'golang' in x:
return 'golang'
elif 'direct' in x:
return 'direct'
elif 'dealspotr' in x:
return 'dealspotr'
elif 'sashihara' in x:
return 'sashihara'
elif 'phandroid' in x:
return 'phandroid'
elif 'baidu' in x:
return 'baidu'
elif 'mdn' in x:
return 'mdn'
elif 'duckduckgo' in x:
return 'duckduckgo'
elif 'seroundtable' in x:
return 'seroundtable'
elif 'metrics' in x:
return 'metrics'
elif 'sogou' in x:
return 'sogou'
elif 'businessinsider' in x:
return 'businessinsider'
elif 'github' in x:
return 'github'
elif 'gophergala' in x:
return 'gophergala'
elif 'yandex' in x:
return 'yandex'
elif 'msn' in x:
return 'msn'
elif 'dfa' in x:
return 'dfa'
elif '(not set)' in x:
return '(not set)'
elif 'feedly' in x:
return 'feedly'
elif 'arstechnica' in x:
return 'arstechnica'
elif 'squishable' in x:
return 'squishable'
elif 'flipboard' in x:
return 'flipboard'
elif 't-online.de' in x:
return 't-online.de'
elif 'sm.cn' in x:
return 'sm.cn'
elif 'wow' in x:
return 'wow'
elif 'baidu' in x:
return 'baidu'
elif 'partners' in x:
return 'partners'
else:
return 'others'
train['device.browser'] = train['device.browser'].map(lambda x: browser_mapping(str(x).lower())).astype('str')
train['trafficSource.adContent'] = train['trafficSource.adContent'].map(
lambda x: adcontents_mapping(str(x).lower())).astype('str')
train['trafficSource.source'] = train['trafficSource.source'].map(lambda x: source_mapping(str(x).lower())).astype(
'str')
test['device.browser'] = test['device.browser'].map(lambda x: browser_mapping(str(x).lower())).astype('str')
test['trafficSource.adContent'] = test['trafficSource.adContent'].map(
lambda x: adcontents_mapping(str(x).lower())).astype('str')
test['trafficSource.source'] = test['trafficSource.source'].map(lambda x: source_mapping(str(x).lower())).astype('str')
def process_device(data_df):
print("process device ...")
data_df['source.country'] = data_df['trafficSource.source'] + '_' + data_df['geoNetwork.country']
data_df['campaign.medium'] = data_df['trafficSource.campaign'] + '_' + data_df['trafficSource.medium']
data_df['browser.category'] = data_df['device.browser'] + '_' + data_df['device.deviceCategory']
data_df['browser.os'] = data_df['device.browser'] + '_' + data_df['device.operatingSystem']
return data_df
train = process_device(train)
test = process_device(test)
def custom(data):
print('custom..')
data['device_deviceCategory_channelGrouping'] = data['device.deviceCategory'] + "_" + data['channelGrouping']
data['channelGrouping_browser'] = data['device.browser'] + "_" + data['channelGrouping']
data['channelGrouping_OS'] = data['device.operatingSystem'] + "_" + data['channelGrouping']
for i in ['geoNetwork.city', 'geoNetwork.continent', 'geoNetwork.country', 'geoNetwork.metro',
'geoNetwork.networkDomain', 'geoNetwork.region', 'geoNetwork.subContinent']:
for j in ['device.browser', 'device.deviceCategory', 'device.operatingSystem', 'trafficSource.source']:
data[i + "_" + j] = data[i] + "_" + data[j]
data['content.source'] = data['trafficSource.adContent'] + "_" + data['source.country']
data['medium.source'] = data['trafficSource.medium'] + "_" + data['source.country']
return data
train = custom(train)
test = custom(test)
train['mean_hits_per_day'] = train.groupby(['sess_date_dom'])['totals.hits'].transform('mean')
test['mean_hits_per_day'] = test.groupby(['sess_date_dom'])['totals.hits'].transform('mean')
train['totals.pageviews'] = train['totals.pageviews'].fillna(0)
test['totals.pageviews'] = test['totals.pageviews'].fillna(0)
train['mean_pageviews_per_day'] = train.groupby(['sess_date_dom'])['totals.pageviews'].transform('mean')
test['mean_pageviews_per_day'] = test.groupby(['sess_date_dom'])['totals.pageviews'].transform('mean')
from itertools import combinations
def numeric_interaction_terms(df, columns):
for c in combinations(columns,2):
df['{} / {}'.format(c[0], c[1]) ] = df[c[0]] / df[c[1]]
df['{} * {}'.format(c[0], c[1]) ] = df[c[0]] * df[c[1]]
df['{} - {}'.format(c[0], c[1]) ] = df[c[0]] - df[c[1]]
return df
LOG_NUMERIC_COLUMNS = ['visitNumber', 'totals.hits', 'totals.pageviews']
train = numeric_interaction_terms(train,LOG_NUMERIC_COLUMNS)
test = numeric_interaction_terms(test,LOG_NUMERIC_COLUMNS)
excluded_features = [
'date', 'fullVisitorId', 'sessionId', 'totals.transactionRevenue',
'visitId', 'visitStartTime', 'vis_date', 'nb_sessions', 'max_visits'
]
categorical_features = [
_f for _f in train.columns
if (_f not in excluded_features) & (train[_f].dtype == 'object')
]
for f in categorical_features:
train[f], indexer = pd.factorize(train[f])
test[f] = indexer.get_indexer(test[f])
xgb_params = {
'objective': 'reg:linear',
'booster': 'gbtree',
'learning_rate': 0.02,
'max_depth': 22,
'min_child_weight': 57,
'gamma' : 1.45,
'alpha': 0.0,
'lambda': 0.0,
'subsample': 0.67,
'colsample_bytree': 0.054,
'colsample_bylevel': 0.50,
'n_jobs': -1,
'random_state': 456
}
folds = get_folds(df=train, n_splits=5)
train_features = [_f for _f in train.columns if _f not in excluded_features]
print(train_features)
importances = pd.DataFrame()
oof_reg_preds = np.zeros(train.shape[0])
sub_reg_preds = np.zeros(test.shape[0])
for fold_, (trn_, val_) in enumerate(folds):
trn_x, trn_y = train[train_features].iloc[trn_], y_reg.iloc[trn_]
val_x, val_y = train[train_features].iloc[val_], y_reg.iloc[val_]
reg = lgb.LGBMRegressor(
num_leaves=30,
learning_rate=0.01,
n_estimators=2000,
subsample=.9,
colsample_bytree=.9,
random_state=1
)
reg.fit(
trn_x, np.log1p(trn_y),
eval_set=[(val_x, np.log1p(val_y))],
early_stopping_rounds=50,
verbose=100,
eval_metric='rmse'
)
imp_df = pd.DataFrame()
imp_df['feature'] = train_features
imp_df['gain'] = reg.booster_.feature_importance(importance_type='gain')
imp_df['fold'] = fold_ + 1
importances = pd.concat([importances, imp_df], axis=0)
oof_reg_preds[val_] = reg.predict(val_x, num_iteration=reg.best_iteration_)
oof_reg_preds[oof_reg_preds < 0] = 0
_preds = reg.predict(test[train_features], num_iteration=reg.best_iteration_)
_preds[_preds < 0] = 0
sub_reg_preds += np.expm1(_preds) / len(folds)
mean_squared_error(np.log1p(y_reg), oof_reg_preds) ** .5
import warnings
warnings.simplefilter('ignore', FutureWarning)
importances['gain_log'] = np.log1p(importances['gain'])
mean_gain = importances[['gain', 'feature']].groupby('feature').mean()
importances['mean_gain'] = importances['feature'].map(mean_gain['gain'])
plt.figure(figsize=(8, 12))
sns.barplot(x='gain_log', y='feature', data=importances.sort_values('mean_gain', ascending=False))
train['predictions'] = np.expm1(oof_reg_preds)
test['predictions'] = sub_reg_preds
# Aggregate data at User level
trn_data = train[train_features + ['fullVisitorId']].groupby('fullVisitorId').mean()
# Create a list of predictions for each Visitor
trn_pred_list = train[['fullVisitorId', 'predictions']].groupby('fullVisitorId')\
.apply(lambda df: list(df.predictions))\
.apply(lambda x: {'pred_'+str(i): pred for i, pred in enumerate(x)})
# Create a DataFrame with VisitorId as index
# trn_pred_list contains dict
# so creating a dataframe from it will expand dict values into columns
trn_all_predictions = pd.DataFrame(list(trn_pred_list.values), index=trn_data.index)
trn_feats = trn_all_predictions.columns
trn_all_predictions['t_mean'] = np.log1p(trn_all_predictions[trn_feats].mean(axis=1))
trn_all_predictions['t_median'] = np.log1p(trn_all_predictions[trn_feats].median(axis=1))
trn_all_predictions['t_sum_log'] = np.log1p(trn_all_predictions[trn_feats]).sum(axis=1)
trn_all_predictions['t_sum_act'] = np.log1p(trn_all_predictions[trn_feats].fillna(0).sum(axis=1))
trn_all_predictions['t_nb_sess'] = trn_all_predictions[trn_feats].isnull().sum(axis=1)
full_data = pd.concat([trn_data, trn_all_predictions], axis=1)
del trn_data, trn_all_predictions
gc.collect()
sub_pred_list = test[['fullVisitorId', 'predictions']].groupby('fullVisitorId')\
.apply(lambda df: list(df.predictions))\
.apply(lambda x: {'pred_'+str(i): pred for i, pred in enumerate(x)})
sub_data = test[train_features + ['fullVisitorId']].groupby('fullVisitorId').mean()
sub_all_predictions = pd.DataFrame(list(sub_pred_list.values), index=sub_data.index)
for f in trn_feats:
if f not in sub_all_predictions.columns:
sub_all_predictions[f] = np.nan
sub_all_predictions['t_mean'] = np.log1p(sub_all_predictions[trn_feats].mean(axis=1))
sub_all_predictions['t_median'] = np.log1p(sub_all_predictions[trn_feats].median(axis=1))
sub_all_predictions['t_sum_log'] = np.log1p(sub_all_predictions[trn_feats]).sum(axis=1)
sub_all_predictions['t_sum_act'] = np.log1p(sub_all_predictions[trn_feats].fillna(0).sum(axis=1))
sub_all_predictions['t_nb_sess'] = sub_all_predictions[trn_feats].isnull().sum(axis=1)
sub_full_data = pd.concat([sub_data, sub_all_predictions], axis=1)
del sub_data, sub_all_predictions
gc.collect()
train['target'] = y_reg
trn_user_target = train[['fullVisitorId', 'target']].groupby('fullVisitorId').sum()
from xgboost import XGBRegressor
folds = get_folds(df=full_data[['totals.pageviews']].reset_index(), n_splits=5)
oof_preds = np.zeros(full_data.shape[0])
oof_preds1 = np.zeros(full_data.shape[0])
both_oof = np.zeros(full_data.shape[0])
sub_preds = np.zeros(sub_full_data.shape[0])
vis_importances = pd.DataFrame()
for fold_, (trn_, val_) in enumerate(folds):
print("-" * 20 + "Fold :" + str(fold_) + "-" * 20)
trn_x, trn_y = full_data.iloc[trn_], trn_user_target['target'].iloc[trn_]
val_x, val_y = full_data.iloc[val_], trn_user_target['target'].iloc[val_]
xg = XGBRegressor(**xgb_params, n_estimators=1000)
reg = lgb.LGBMRegressor(
num_leaves=31,
learning_rate=0.03,
n_estimators=1000,
subsample=.9,
colsample_bytree=.9,
random_state=1
)
print("-" * 20 + "LightGBM Training" + "-" * 20)
reg.fit(
trn_x, np.log1p(trn_y),
eval_set=[(trn_x, np.log1p(trn_y)), (val_x, np.log1p(val_y))],
eval_names=['TRAIN', 'VALID'],
early_stopping_rounds=50,
eval_metric='rmse',
verbose=100
)
print("-" * 20 + "Xgboost Training" + "-" * 20)
xg.fit(
trn_x, np.log1p(trn_y),
eval_set=[(trn_x, np.log1p(trn_y)), (val_x, np.log1p(val_y))],
early_stopping_rounds=50,
eval_metric='rmse',
verbose=100
)
imp_df = pd.DataFrame()
imp_df['feature'] = trn_x.columns
imp_df['gain'] = reg.booster_.feature_importance(importance_type='gain')
imp_df['fold'] = fold_ + 1
vis_importances = pd.concat([vis_importances, imp_df], axis=0)
oof_preds[val_] = reg.predict(val_x, num_iteration=reg.best_iteration_)
oof_preds1[val_] = xg.predict(val_x)
oof_preds[oof_preds < 0] = 0
oof_preds1[oof_preds1 < 0] = 0
both_oof[val_] = oof_preds[val_] * 0.6 + oof_preds1[val_] * 0.4
# Make sure features are in the same order
_preds = reg.predict(sub_full_data[full_data.columns], num_iteration=reg.best_iteration_)
_preds[_preds < 0] = 0
pre = xg.predict(sub_full_data[full_data.columns])
pre[pre < 0] = 0
sub_preds += (_preds / len(folds)) * 0.6 + (pre / len(folds)) * 0.4
print("LGB ", mean_squared_error(np.log1p(trn_user_target['target']), oof_preds) ** .5)
print("XGB ", mean_squared_error(np.log1p(trn_user_target['target']), oof_preds1) ** .5)
print("Combine ", mean_squared_error(np.log1p(trn_user_target['target']), both_oof) ** .5)
vis_importances['gain_log'] = np.log1p(vis_importances['gain'])
mean_gain = vis_importances[['gain', 'feature']].groupby('feature').mean()
vis_importances['mean_gain'] = vis_importances['feature'].map(mean_gain['gain'])
plt.figure(figsize=(8, 25))
sns.barplot(x='gain_log', y='feature', data=vis_importances.sort_values('mean_gain', ascending=False).iloc[:300])
sub_full_data['PredictedLogRevenue'] = sub_preds
sub_full_data[['PredictedLogRevenue']].to_csv('future.csv', index=True)
|
UTF-8
|
Python
| false | false | 17,252 |
py
| 31 |
future_not_set.py
| 31 | 0.627927 | 0.615755 | 0 | 481 | 34.864865 | 119 |
baibhab007/Database-operation
| 3,607,772,560,187 |
bfca9de088a0f1fd5578db938e934bd44d6ef09b
|
3579a43ce5ebeb13a02dd184dc3227d02560cc8f
|
/connectDB_createTB.py
|
7c109241ce9466adcd8e5304a86f6310972ebe42
|
[] |
no_license
|
https://github.com/baibhab007/Database-operation
|
5dc350737c56a470b369ea5363abf182503d3416
|
c75acd71b0f0b5f78dd8c43fcca699fa29040eff
|
refs/heads/master
| 2020-07-14T19:54:46.330689 | 2019-08-30T13:35:58 | 2019-08-30T13:35:58 | 205,388,523 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sqlite3
def register(NAME, AGE, SEX, INITIAL_AMOUNT):
con = sqlite3.connect('TEST.db')
cursor = con.cursor()
sql1 = 'DROP TABLE IF EXISTS CUSTOMER'
sql2 = '''
CREATE TABLE CUSTOMER (
NAME CHAR(20) NOT NULL,
AGE INT,
SEX CHAR(1),
INITIAL_AMOUNT FLOAT
)
'''
# cursor.execute(sql1)
# cursor.execute(sql2)
rec = (NAME, AGE, SEX, INITIAL_AMOUNT)
sql = '''
INSERT INTO CUSTOMER VALUES ( ?, ?, ?, ?)
'''
try:
cursor.execute(sql, rec)
con.commit()
print("Thanks for registering.")
except Exception as e:
print("Error Message :", str(e))
con.rollback()
con.close()
|
UTF-8
|
Python
| false | false | 756 |
py
| 5 |
connectDB_createTB.py
| 4 | 0.511905 | 0.5 | 0 | 39 | 18.384615 | 52 |
dream36va/plen-ControlServer
| 2,396,591,787,064 |
5ef983e864be26549ce8a30d93f2dca5965f7902
|
0c2867e3ff96090b67998dd7c27410e8e3e39baf
|
/control_server/drivers/__init__.py
|
643ac5e311e22f94bf787d7f9cba6f12df7342e6
|
[
"MIT"
] |
permissive
|
https://github.com/dream36va/plen-ControlServer
|
3bf49166b8607d3467b36e2de4800535e2e9a6c8
|
c6b0d884d8e7117e09cce9422c69556dd8901c7b
|
refs/heads/master
| 2020-06-25T02:32:24.287098 | 2018-03-30T05:30:58 | 2018-03-30T05:40:04 | 199,171,734 | 1 | 0 |
NOASSERTION
| true | 2019-07-27T13:59:38 | 2019-07-27T13:59:37 | 2018-12-14T14:19:13 | 2018-03-30T05:43:50 | 1,103 | 0 | 0 | 0 | null | false | false |
# -*- coding: utf-8 -*-
'''
@file __init__.py
@brief Provide data transfer driver mapping.
'''
__author__ = 'Kazuyuki TAKASE'
__copyright__ = 'PLEN Project Company, and all authors.'
__license__ = 'The MIT License'
from drivers.null.core import NullDriver
from drivers.usb.core import USBDriver
DRIVER_MAP = {
'null': NullDriver,
'usb' : USBDriver
}
|
UTF-8
|
Python
| false | false | 369 |
py
| 54 |
__init__.py
| 36 | 0.650407 | 0.647696 | 0 | 20 | 17.5 | 56 |
YuneYune/python-project-lvl2
| 6,554,120,139,882 |
4206e726e76d3e0b6598a9bf0139f8ea5952d482
|
8a5e849c0f02eb5f64cec5f9668053935f655b81
|
/tests/test_gendiff.py
|
265ae2cc9c821a04462d64d2e1cf32a1888ce5fc
|
[] |
no_license
|
https://github.com/YuneYune/python-project-lvl2
|
971d9f0dc623efd2f9732161001922fa947aebd6
|
95e1e90247b0771eda86fa870c892e9b7bc2c87b
|
refs/heads/main
| 2023-06-29T22:16:09.577904 | 2022-01-22T20:11:19 | 2022-01-22T20:11:19 | 336,018,170 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
"""Tests."""
from gendiff.gendiff import generate_diff
def extract_exp_value(path):
"""Extract expected value which is store in txt file.
Args:
path (str): The path to file where expected value is store.
Returns:
(str): Expected value.
"""
with open(path) as expected_txt_diff:
return expected_txt_diff.read()
def test_stylish_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.json'
second_path = 'tests/fixtures/file2.json'
diff = generate_diff(first_path, second_path)
exp_diff = extract_exp_value('tests/fixtures/exp_stylish_diff.txt')
assert diff == exp_diff
def test_stylish_nested_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.json'
second_path = 'tests/fixtures/nested2.json'
diff = generate_diff(first_path, second_path)
exp_diff = extract_exp_value('tests/fixtures/exp_stylish_nested_diff.txt')
assert diff == exp_diff
def test_stylish_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.yml'
second_path = 'tests/fixtures/file2.yml'
diff = generate_diff(first_path, second_path)
exp_diff = extract_exp_value('tests/fixtures/exp_stylish_diff.txt')
assert diff == exp_diff
def test_stylish_nested_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.yml'
second_path = 'tests/fixtures/nested2.yml'
diff = generate_diff(first_path, second_path)
exp_diff = extract_exp_value('tests/fixtures/exp_stylish_nested_diff.txt')
assert diff == exp_diff
def test_plain_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.yml'
second_path = 'tests/fixtures/file2.yml'
diff = generate_diff(first_path, second_path, 'plain')
exp_diff = extract_exp_value('tests/fixtures/exp_plain_diff.txt')
assert diff == exp_diff
def test_plain_nested_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.yml'
second_path = 'tests/fixtures/nested2.yml'
diff = generate_diff(first_path, second_path, 'plain')
exp_diff = extract_exp_value('tests/fixtures/exp_plain_nested_diff.txt')
assert diff == exp_diff
def test_plain_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.json'
second_path = 'tests/fixtures/file2.json'
diff = generate_diff(first_path, second_path, 'plain')
exp_diff = extract_exp_value('tests/fixtures/exp_plain_diff.txt')
assert diff == exp_diff
def test_plain_nested_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.json'
second_path = 'tests/fixtures/nested2.json'
diff = generate_diff(first_path, second_path, 'plain')
exp_diff = extract_exp_value('tests/fixtures/exp_plain_nested_diff.txt')
assert diff == exp_diff
def test_json_formatter_nested_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.json'
second_path = 'tests/fixtures/nested2.json'
diff = generate_diff(first_path, second_path, 'json')
exp_diff = extract_exp_value('tests/fixtures/exp_json_nested_diff.txt')
assert diff == exp_diff
def test_json_formatter_json_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.json'
second_path = 'tests/fixtures/file2.json'
diff = generate_diff(first_path, second_path, 'json')
exp_diff = extract_exp_value('tests/fixtures/exp_json_diff.txt')
assert diff == exp_diff
def test_json_formatter_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/file1.yml'
second_path = 'tests/fixtures/file2.yml'
diff = generate_diff(first_path, second_path, 'json')
exp_diff = extract_exp_value('tests/fixtures/exp_json_diff.txt')
assert diff == exp_diff
def test_json_formatter_nested_yaml_diff():
"""Tests of generate_diff function.
Returns answer of assert.
"""
first_path = 'tests/fixtures/nested1.yml'
second_path = 'tests/fixtures/nested2.yml'
diff = generate_diff(first_path, second_path, 'json')
exp_diff = extract_exp_value('tests/fixtures/exp_json_nested_diff.txt')
assert diff == exp_diff
|
UTF-8
|
Python
| false | false | 4,771 |
py
| 18 |
test_gendiff.py
| 13 | 0.671977 | 0.666737 | 0 | 162 | 28.450617 | 78 |
TesterCC/Python2Scripts
| 6,657,199,309,851 |
a74c9c93f7c40c63cdccf8b3fe1470616fe7103f
|
2d9ec9278ede1dd086fc7629de96481160597a3f
|
/gloryroad/xiaoz/lesson14_4.py
|
cdba98960cfd2bfab0a53b873a409767f55a9100
|
[] |
no_license
|
https://github.com/TesterCC/Python2Scripts
|
226ac6d217e767c517b4b798ac67e493348cf316
|
69c2fded6835762c693c1e42a2cbac8ddd2d6e74
|
refs/heads/master
| 2021-01-25T04:09:36.864279 | 2019-04-24T09:59:09 | 2019-04-24T09:59:09 | 93,401,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf-8
'''
Created on 2016年3月16日
@author: PavilionLYX
'''
import MySQLdb
conn=MySQLdb.connect(host="127.0.0.1",user="root",
passwd="yanxi76543210",
db="test",port=3306,charset="utf8")
cur=conn.cursor()
print cur.execute("select * from user");
#打印表中全部数据,要先execute,否则会报错
print cur.fetchall()
print cur.fetchall()
#其实位置为0
cur.scroll(0,mode='absolute')
print cur.fetchmany(1) #只取一条数据
cur.scroll(0,mode='relative')
print cur.fetchmany(1)
cur.scroll(0,mode='absolute')
row = cur.fetchone()
while row:
print row[2] #gender
row = cur.fetchone()
#关闭游标
cur.close()
#关闭数据库连接
conn.close()
|
UTF-8
|
Python
| false | false | 731 |
py
| 358 |
lesson14_4.py
| 335 | 0.659476 | 0.607088 | 0 | 41 | 14.853659 | 56 |
Jankus1994/Coreference
| 326,417,522,714 |
d6c742836157ef5fd8978a734cbf721262e0b23c
|
671a669cc862f68d736a98b3d95bedf96cd7b09e
|
/CoNLL/conll_prodrop_feature_printer.py
|
be1d325b622a238ec880d1bbfbd75f2adfa55819
|
[] |
no_license
|
https://github.com/Jankus1994/Coreference
|
e258b68c0a75ee3102614220f27c5d163e745c41
|
41b13ce6422ac6c3d139474641e75e502c446162
|
refs/heads/master
| 2021-01-23T01:55:56.732336 | 2018-05-03T18:06:40 | 2018-05-03T18:06:40 | 85,945,883 | 0 | 1 | null | false | 2017-03-23T13:08:14 | 2017-03-23T12:15:12 | 2017-03-23T12:26:51 | 2017-03-23T13:07:59 | 0 | 0 | 0 | 1 |
Python
| null | null |
# Jan Faryad
# 22. 3. 2018
""" inhereted feature printer for dropped personal pronouns """
from udapi.block.demo.Coreference.CoNLL.conll_feature_printer import Conll_feature_printer
from udapi.block.demo.Coreference.CoNLL.conll_specific_selectors import Conll_prodrop_training_selector
class Conll_prodrop_feature_printer( Conll_feature_printer):
def __init__( self, **kwargs):
super().__init__( **kwargs)
self.selector = Conll_prodrop_training_selector()
|
UTF-8
|
Python
| false | false | 479 |
py
| 59 |
conll_prodrop_feature_printer.py
| 59 | 0.741127 | 0.726514 | 0 | 12 | 38.916667 | 103 |
satyanarayan-rao/tf_nucleosome_dynamics
| 10,728,828,334,069 |
d4079c84bc1a6e2cb0b03d26e6412871f725e4ed
|
4ec101ac9e7fdc57510182243ace54747b5c404e
|
/snakemakes/tcga_atac_boxplot_analysis.smk
|
68939168171304c69c4d3e706e716943bb2449e4
|
[] |
no_license
|
https://github.com/satyanarayan-rao/tf_nucleosome_dynamics
|
e2b7ee560091b7a03fa16559096c1199d03362de
|
00bdaa23906460a3e5d95ac354830120c9dd108e
|
refs/heads/main
| 2023-04-07T17:29:40.644432 | 2021-04-12T14:16:20 | 2021-04-12T14:16:20 | 356,676,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
rule extract_disease_subtype_count_matrix:
input:
raw_count_file = "/beevol/home/satyanarr/data/data_from_papers/corces_mr_et_al_science_2018/cancer_type_specific_count_matrices/BRCA_raw_counts.txt",
patient_id_file = lambda wildcards: config["tcga_atac_params"][wildcards.disease_subtype]
params:
output:
disease_subtype_count_matrix = "tcga_atac_seq_analysis/raw_count_matrix_{disease_subtype}.tsv"
shell:
"sh scripts/exctact_disease_subtype_columns.sh {input.raw_count_file} {input.patient_id_file} {output.disease_subtype_count_matrix}"
rule intesect_binding_sites_with_count_matrix:
input:
disease_subtype_count_matrix = "tcga_atac_seq_analysis/raw_count_matrix_{disease_subtype}.tsv",
bed_file = lambda wildcards: config["bedfile_annotation"][wildcards.bed]
params:
chunk_size = 10000
output:
tfbs_mapped_to_count_matrix = "tcga_atac_seq_analysis/tfbs_{bed}_mapped_to_{disease_subtype}.tsv"
shell:
"sh scripts/intersect_tfbs_with_count_matrix.sh {input.bed_file}"
" {input.disease_subtype_count_matrix} {output.tfbs_mapped_to_count_matrix}"
rule prepare_file_for_boxplot:
input:
tfbs_mapped_to_count_matrix = "tcga_atac_seq_analysis/tfbs_{bed}_mapped_to_{disease_subtype}.tsv"
params:
output:
long_listed_tsv_with_class_label = "tcga_atac_seq_analysis/boxplot_data_for_{bed}_mapped_to_{disease_subtype}.tsv"
shell:
"python scripts/preapre_boxplot_data.py {input.tfbs_mapped_to_count_matrix}"
" {output.long_listed_tsv_with_class_label} "
|
UTF-8
|
Python
| false | false | 1,613 |
smk
| 155 |
tcga_atac_boxplot_analysis.smk
| 153 | 0.695598 | 0.690019 | 0 | 30 | 52.766667 | 158 |
nancyagrwal/Information-Retrieval
| 7,241,314,906,591 |
7b4ad883bc594bf91941b41d84428aae6c927273
|
664598daf3572b3860e4f8bc49e91c38dad55e17
|
/WebCrawler/Task1.py
|
1d4ad2ec925ccac39083e2fe90ee40311889e33e
|
[] |
no_license
|
https://github.com/nancyagrwal/Information-Retrieval
|
34ed84f38112adb44f096d90447fd9001beeecae
|
275c2d70c206f66aa82b3fcd58d2922041d83f92
|
refs/heads/master
| 2021-01-20T00:28:52.235989 | 2018-08-17T14:46:41 | 2018-08-17T14:46:41 | 89,137,614 | 3 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
from bs4 import BeautifulSoup
from time import sleep
import socket
import os
import sys
import time
def crawler(url, crawled):
# politeness policy of 1 sec between HTTP requests:
time.sleep(1)
source_txt = requests.get(url)
plain_txt = source_txt.text.encode('utf-8')
# implementing the beautiful soup library for parsing of data.
soup = BeautifulSoup(plain_txt, "lxml")
collection = []
for txt in soup.findAll('a'): # finding all the elements on the page
var = txt.get('href')
if var is not None:
# we do not need images and colon and main page
if '.jpg' not in var and 'JPG' not in var and '.jpeg' not in var and 'Main_Page' not in var and ':' not in var :
if var.find('/wiki/') is 0:
if '#' in var:
# # is used as an anchor to jump to an element with the same name/id
var = var.split('#')
var = var[0]
else:
var = var
a = 'https://en.wikipedia.org' + var
if a not in collection + crawled:
# if the url is not in crawled and collection,append it to collection set.
collection.append(a)
return collection
def get_url_for_5levels():
count = 0
seed_url = 'https://en.wikipedia.org/wiki/Sustainable_energy'
url_to_process= [seed_url]
crawled = list()
while count < 5:
print count
processed_list = []
for item in url_to_process:
if item not in crawled:
crawled.append(item)
# the new list for crawling is the collection of already crawled and processed url's
new_crawled_list = crawled + processed_list + url_to_process
processed_list += crawler(item , new_crawled_list)
url_to_process = processed_list
if len(crawled) >= 1000:
# do not crawl for more than 1000 urls
break
if len(crawled) >= 1000:
# do not crawl for more than 1000 urls
break
count += 1
crawled_count = 1
for d in crawled:
# politeness policy of 1 sec
time.sleep(1)
pg = requests.get(d)
txt = pg.text.encode('utf-8')
#scanner
s = open("HTMLFile%s.txt" %crawled_count, "w")
s.write(txt)
crawled_count+=1
s.close
print len(crawled)
file = open('Task1.txt','w')
for text in crawled:
file.writelines(text+'\n') # writing to the file
get_url_for_5levels()
|
UTF-8
|
Python
| false | false | 2,358 |
py
| 671 |
Task1.py
| 25 | 0.622561 | 0.608567 | 0 | 84 | 27.071429 | 117 |
wwwzxaaa/Python-data-processing
| 1,176,821,043,580 |
bcfea85e1357f1ab7a2ee00f0ebee9e374f3e974
|
679d03b46eea15d1c1c4e617126fdca58be4b0c0
|
/quyang.py
|
c2c075b678a6f8d2fcda73f6ea2f4451361087a6
|
[] |
no_license
|
https://github.com/wwwzxaaa/Python-data-processing
|
1180910e8a3a73783bbf706adea61c97821b5697
|
3c0aef24e7ee95e21f6b8179effe46d84e8a7941
|
refs/heads/master
| 2021-07-07T19:57:49.264794 | 2021-01-21T04:46:55 | 2021-01-21T04:46:55 | 226,469,309 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
f1 = open("E:\work\drug\drugs.txt","r")
list_d = f1.readlines()
len1 = len(list_d)
drugs = []
for d in range(len1):
drugs.append(list_d[d].strip("\n"))
len_d = len(drugs)
print(drugs)
print(len_d)
f2 = open("E:\work\drug\effect.txt","r")
list_s= f2.readlines()
len2 = len(list_s)
side_effect = []
for s in range(len2):
side_effect.append(list_s[s].strip("\n"))
len_s = len(side_effect)
print(len_s)
drug_side_effect = []
for i in range(len_d):
for j in range(len_s):
drug_side_effect.append(drugs[i]+'\t'+side_effect[j])
len_ds = len(drug_side_effect)
print (len_ds)
#
fw = open("sample1.txt","w")
for line in range(len_ds):
fw.writelines(drug_side_effect[line] + "\n")
fw.close()
print("save")
|
UTF-8
|
Python
| false | false | 754 |
py
| 59 |
quyang.py
| 56 | 0.603448 | 0.591512 | 0 | 30 | 23 | 61 |
jyleong/DailyCodingProblems
| 15,779,709,862,513 |
3df2e70ed545cc1fb7e91bf6089eaed0758cbadf
|
b53e25313d8afff95cb5510bfa9b3e4616123662
|
/daily_coding_problem_163.py
|
d4cdc019755d34130f8c344c258169d9ddd760fc
|
[] |
no_license
|
https://github.com/jyleong/DailyCodingProblems
|
9f3a640654c43b36e320118576cbe8535c858a70
|
7798b2597c686ff3e030eec600a208c8cd467983
|
refs/heads/master
| 2021-07-02T21:26:53.535762 | 2020-10-14T00:54:35 | 2020-10-14T00:54:35 | 181,254,199 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Given an arithmetic expression in Reverse Polish Notation, write a program to evaluate it.
The expression is given as a list of numbers and operands. For example: [5, 3, '+'] should return 5 + 3 = 8.
For example, [15, 7, 1, 1, '+', '-', '/', 3, '*', 2, 1, 1, '+', '+', '-'] should return 5,
since it is equivalent to ((15 / (7 - (1 + 1))) * 3) - (2 + (1 + 1)) = 5.
You can assume the given expression is always valid.
'''
import unittest
def calculate(a, b, op):
if op == '+':
return a + b
elif op == '-':
return a - b
elif op == '*':
return a * b
else: # op == '/'
return a / b
EXPRESSION_SET = set(['+', '/', '*', '-'])
def eval_expression(arr):
operand_stack = []
for item in arr:
if item in EXPRESSION_SET:
op_2 = operand_stack.pop()
op_1 = operand_stack.pop()
result = calculate(op_1, op_2, item)
operand_stack.append(result)
else:
operand_stack.append(item)
return operand_stack[0]
class DailyCodingProblemTest(unittest.TestCase):
def test_case_1(self):
test = [5, 3, '+']
result = 8
self.assertEqual(eval_expression(test), result)
def test_case_2(self):
test = [15, 7, 1, 1, '+', '-', '/', 3, '*', 2, 1, 1, '+', '+', '-']
result = 5
self.assertEqual(eval_expression(test), result)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 1,445 |
py
| 117 |
daily_coding_problem_163.py
| 115 | 0.524567 | 0.493426 | 0 | 54 | 25.777778 | 108 |
kdheepak/psst
| 15,831,249,465,957 |
0798d5943575088dc4d28abaa74f206df8ba95fd
|
853c9bfad727fb08dbd533ddfb9406bffa7ab579
|
/tests/test_generator_view.py
|
38e9979aef1d081ef1e35135359801bf81e1499f
|
[
"MIT"
] |
permissive
|
https://github.com/kdheepak/psst
|
ad3a5987eba3fabc32c7219e98c72e5750597f2a
|
36d7abfe35d7841939205d6b7613735cb9f817db
|
refs/heads/master
| 2020-06-27T09:16:24.241389 | 2018-04-12T17:03:02 | 2018-04-12T17:03:31 | 94,248,671 | 8 | 1 |
MIT
| false | 2018-02-27T13:37:36 | 2017-06-13T19:14:54 | 2017-09-29T12:34:17 | 2018-02-27T13:37:36 | 1,614 | 0 | 0 | 0 |
Python
| false | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_psst
----------------------------------
Tests for `psst` module.
"""
import numpy as np
import pytest as pt
import traitlets as T
import psst
from psst.case.generator import Generator, GeneratorView, GeneratorCostView
from .test_generator import default_generator
@pt.fixture(scope="module")
def dg():
return default_generator()
@pt.fixture()
def default_generator_view(dg):
gv = GeneratorView(
model=dg
)
return gv
@pt.fixture()
def default_generator_cost_view(dg):
gv = GeneratorCostView(
model=dg
)
return gv
def test_generator_view(default_generator_view):
gv = default_generator_view
g = gv.model
assert isinstance(gv.model, Generator)
assert gv._title.value == 'Generator:'
assert gv._name.value == g.name
assert gv._maximum_real_power.value == gv._initial_real_power.max
assert gv._maximum_real_power.value == gv._minimum_real_power.max
assert gv._maximum_real_power.value == gv._ramp_up_rate.max
assert gv._maximum_real_power.value == gv._ramp_down_rate.max
assert g.maximum_real_power == gv._maximum_real_power.value
assert g.name == gv._name.value
assert g.generation_type == gv._generation_type.value
assert g.initial_status == gv._initial_status.value
assert g.minimum_real_power == gv._minimum_real_power.value
assert g.initial_real_power == gv._initial_real_power.value
assert g.minimum_up_time == gv._minimum_up_time.value
assert g.minimum_down_time == gv._minimum_down_time.value
assert g.nsegments == gv._nsegments.value
assert g.ramp_up_rate == gv._ramp_up_rate.value
assert g.ramp_down_rate == gv._ramp_down_rate.value
assert g.startup_time == gv._startup_time.value
assert g.shutdown_time == gv._shutdown_time.value
assert g.noload_cost == gv._noload_cost.value
assert g.startup_cost == gv._startup_cost.value
def test_generator_costview_generator_view(
default_generator_cost_view,
default_generator_view
):
gcv = default_generator_cost_view
gv = default_generator_view
assert gv.model == gcv.model
assert gcv._scale_x.max == gv._maximum_real_power.value
assert np.all(gcv._scatter.x == gv.model.cost_curve_points)
assert np.all(gcv._scatter.y == gv.model.cost_curve_values)
assert np.all(gcv._scatter.x == gcv._lines.x)
assert np.all(gcv._scatter.y == gcv._lines.y)
gcv._lines.x = [0, 10, 20, 30]
gcv._lines.y = [0, 10, 20, 30]
assert np.all(gcv._scatter.x == gv.model.cost_curve_points)
assert np.all(gcv._scatter.y == gv.model.cost_curve_values)
assert np.all(gcv._scatter.x == gcv._lines.x)
assert np.all(gcv._scatter.y == gcv._lines.y)
|
UTF-8
|
Python
| false | false | 2,763 |
py
| 74 |
test_generator_view.py
| 42 | 0.664857 | 0.659428 | 0 | 100 | 26.62 | 75 |
TechDomani/cows_and_bulls_python
| 15,668,040,713,783 |
f9740da29e4f0fdd14814f75b678dc20fa01d7c2
|
70c68e4f4b7491cf79d3e7b18e4d3b0aaa46db76
|
/favouriteSubject.py
|
145942e36a783f78d3d84a2fb184aa50726fb691
|
[] |
no_license
|
https://github.com/TechDomani/cows_and_bulls_python
|
344f3ca36a0d2f72130234131b5dc304317870a1
|
e144ce15e15ebb4aff399c22e13afb082f434079
|
refs/heads/main
| 2023-08-29T17:30:10.219099 | 2021-10-03T21:24:15 | 2021-10-03T21:24:15 | 408,854,022 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/python3
favouriteSubject = 'unknown'
while True:
favouriteSubject = input('What is your favourite subject? ')
if (favouriteSubject.lower() == 'computing'):
print('Well done. You got the answer right. ' +
'Computing is the best subject.')
break
print('Sorry ' + favouriteSubject +
' is not the right answer. Please try again.')
|
UTF-8
|
Python
| false | false | 358 |
py
| 5 |
favouriteSubject.py
| 4 | 0.678771 | 0.675978 | 0 | 12 | 28.916667 | 62 |
viranca/CS4240_Deep_Learning_Project
| 15,719,580,315,763 |
fdaef46ee6d816f8546a80209af4ad6c6592e590
|
b73c087a0d8f568832c9fbec5e726b57657bacf8
|
/influence-aware-memory_Original_Work/environments/warehouse/test.py
|
b4468e55674cc788a9d365b70ee3d3d4fd95b138
|
[] |
no_license
|
https://github.com/viranca/CS4240_Deep_Learning_Project
|
c19f77833959ca4616660d03da402b43b804c88a
|
1177c217657726c04071ec600d8aff8b762852da
|
refs/heads/main
| 2023-04-09T06:01:38.296177 | 2021-04-16T19:31:25 | 2021-04-16T19:31:25 | 339,204,257 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from warehouse import Warehouse
import numpy as np
warehouse = Warehouse()
warehouse.reset()
#################### Test _place_robots function ######################
for robot in warehouse.robots:
assert robot._robot_domain[0] <= robot.get_position[0] <= robot._robot_domain[2] and \
robot._robot_domain[1] <= robot.get_position[1] <= robot._robot_domain[3], \
'place robots test: failed. Robot {} is not within its designated domain'.format(robot.id)
print('place robots test: passed')
###################### Test _add_items function #######################
item_rows = np.arange(0, warehouse.n_rows, warehouse.distance_between_shelves)
for item in warehouse.items:
assert item.get_position[0] in item_rows, \
'add items test: failed. Item {} is not on a shelf'.format(item.id)
warehouse._add_items()
print('add items test: passed')
###################### Test remove_items function #####################
warehouse = Warehouse()
warehouse.reset()
pos = warehouse.items[0].get_position
warehouse.robots[0]._pos = pos
warehouse._remove_items()
state = warehouse._get_state()
assert state[pos[0],pos[1], 0] == 0, 'remove items test: failed'
print('remove items test: passed')
################### Test compute rewards function #####################
warehouse = Warehouse()
warehouse.reset()
learning_robot_id = warehouse.learning_robot_id
pos = warehouse.items[0].get_position
robot = warehouse.robots[learning_robot_id]
robot._pos = pos
n_items = robot.items_collected
reward = warehouse._compute_reward(robot)
assert reward == 1, 'compute rewards test: failed. Wrong reward'
assert robot.items_collected > n_items, \
'compute rewards: failed'
warehouse = Warehouse()
warehouse.reset()
robot = warehouse.robots[learning_robot_id]
pos = warehouse.items[0].get_position
robot._pos = pos
for i in range(warehouse.max_n_items):
warehouse._compute_reward(robot)
assert robot.done == True, \
'compute rewards test: failed. Agent is not done after max_n_items were collected'
print('compute rewards test: passed')
####################### Test action fucntion ##########################
warehouse = Warehouse()
warehouse.reset()
# action 0
initial_positions = []
for robot in warehouse.robots:
initial_positions.append(robot.get_position)
actions = dict(enumerate(np.zeros(len(warehouse.robots), dtype=np.int)))
warehouse.step(actions)
for robot, initial_position in zip(warehouse.robots, initial_positions):
assert robot.get_position[1] - 1 == initial_position[1], "action test: failed"
# action 1
initial_positions = []
for robot in warehouse.robots:
initial_positions.append(robot.get_position)
actions = dict(enumerate(np.ones(len(warehouse.robots), dtype=np.int)))
warehouse.step(actions)
for robot, initial_position in zip(warehouse.robots, initial_positions):
assert robot.get_position[1] + 1 == initial_position[1], "action test: failed"
# action 2
initial_positions = []
for robot in warehouse.robots:
initial_positions.append(robot.get_position)
actions = dict(enumerate(2*np.ones(len(warehouse.robots), dtype=np.int)))
warehouse.step(actions)
for robot, initial_position in zip(warehouse.robots, initial_positions):
assert robot.get_position[0] + 1 == initial_position[0], "action test: failed"
# action 3
initial_positions = []
for robot in warehouse.robots:
initial_positions.append(robot.get_position)
actions = dict(enumerate(3*np.ones(len(warehouse.robots), dtype=np.int)))
warehouse.step(actions)
for robot, initial_position in zip(warehouse.robots, initial_positions):
assert robot.get_position[0] - 1 == initial_position[0], "action test: failed"
print('action test: passed')
######################## Test action space #############################
warehouse = Warehouse()
warehouse.reset()
print(warehouse.action_space)
############################ Test graph ###############################
robot = warehouse.robots[1]
graph = warehouse._create_graph(robot)
breakpoint()
|
UTF-8
|
Python
| false | false | 3,942 |
py
| 137 |
test.py
| 17 | 0.682395 | 0.673262 | 0 | 92 | 41.847826 | 96 |
lfranz922/Raspberry-Pi-Iperf3
| 18,451,179,504,345 |
be926698af932470049bec7a5ee808d56e373d25
|
f7d77bf33834cc1cd43f7666e99fb3d14684f7cb
|
/iperfScript.py
|
39a2f82a112013148be80f46602c43b939fd2813
|
[] |
no_license
|
https://github.com/lfranz922/Raspberry-Pi-Iperf3
|
efa3ee5abf77ff771f63fff53f602d387e503d03
|
30738413dbcef20d4613dba6499a5df5d207c578
|
refs/heads/main
| 2023-08-16T07:34:31.231489 | 2021-10-15T17:25:52 | 2021-10-15T17:25:52 | 341,027,240 | 0 | 0 | null | false | 2021-03-12T03:50:01 | 2021-02-21T23:28:20 | 2021-03-04T04:57:33 | 2021-03-12T03:49:44 | 173 | 0 | 0 | 0 |
Python
| false | false |
import time
import re
import subprocess
from datetime import datetime
#TODO:
"""
Made by Lukas Franz
Things to add:
- a better output screen (I'm thinking the speeds with a green background if iperf has a good speed and red if its slow/off)
- turn into a proper script
- when run from cmd line it throws an error
- fix the ping function to work for linux
Things that could be expanded on in the future:
- interface with automation (send logs/speed maybe as JSON)
-
"""
class Port:
"""
A port class that stores the ip (inet) of a port
"""
Ip = None
def __init__(self, ip):
"""
creates a new port
"""
Ip = ip
def getIp():
"""
returns the value of Ip
"""
return Ip
class Ports:
"""
An object that stores a list of all active ports
"""
ports = [] #stores the IPs of all ethernet Ports
def __init__(self):
"""
Creates a Ports object and fills it with all active IPs
"""
self.getIps()
def getIps(self):
"""
uses ifconfig to get all current ethernet ports and their IPs and places the IPs in the ports variable in Ports
returns a list of all active ethernet ports' IPs
"""
self.ports = []
eth = []
i = 0
searching = True
#cmd = f"ifconfig eth{i} | grep 'inet '| cut -d: -f2"
for i in range(2):
#print(i)
temp = subprocess.Popen(["ifconfig", f"eth{i}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
#temp = subprocess.Popen(["ifconfig eth0"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
for line in temp.readlines():
if 'error' in line.decode('utf-8'):
searching = False
break
eth.append(line.decode('utf-8'))
#print(line.decode('utf-8'))
#print(eth)
for i in range(len(eth)):
if len(re.findall(r"eth\d", eth[i])) > 0:
print("ladies and gentlemen; we got him")
#print(eth[i])
inet = re.findall(r"inet \d+.\d+.\d+.\d+", eth[i+1])
#print(inet[0])
self.ports.append(inet[0][5:])
print("found ports:", self.ports)
return(self.ports)
def ping(ip1, ip2):
"""
Returns True if host (str) responds to a ping request.
"""
cmd = ['ping', '-c 1', '-I' + ip1, ip2]
try:
print("trying to connect")
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
lines = out.readlines()
print(lines[-1].decode("utf-8"))
if 'Cannot assign requested address' in lines[-1].decode("utf-8"): #TODO change this to deal with linux string
print("Ports could not connect")
return False
for line in lines:
print(line.decode("utf-8")[:-2])
except:
print("Ports could not connect: exception")
return False
return True
def areConnected(self):
"""
tests if any of the ports in self are connected to eachother
"""
try:
print(Ports.ports)
for p1 in self.ports:
for p2 in self.ports:
if p1 == p2:
print("ports are same")
continue
elif Ports.ping(p1, p2):
connected_ports = [p1, p2]
print("ports connected")
return True
except:
print("ports not connected")
pass
return False
EXPECTED_MIN_SPEED = 900 #can be changed to whatever we want
class main:
threads = []
ports = []
def __init__(self, run, labels):
global run
self.labels = labels
main.clearFileContents("logs.txt")
subprocess.Popen(['killall iperf3'], shell = True)
time.sleep(1)
ips = Ports()
ips.getIps()
print("ips has ports:", ips.ports)
mode = getMode()
print("Script told to run:", run)
while run:
while (not ips.areConnected() and run):
print("connecting Ports...")
time.sleep(0.5)
print("=================================\n")
print(" ports connected")
print("\n=================================")
while (not self.startTwoWayTCP(ips) and run):
time.sleep(0.5)
continue
time.sleep(0.5)
while (self.isTCPRunning() and run):
time.sleep(0.5)
print("Script Running: ", run)
try:
for i in range(4):
labels[i].configure(text=str(self.speeds[i]))
except:
print("exception happened. run: ", run)
pass
print("Script told to run:", run)
def startTwoWayTCP(self, ips):
"""
initates a 2 Way TCP test with the first 2 ips from the ports list #can be changed
returns True if the test started running False otherwise
"""
self.threads = []
print("=================================\n")
print(" starting TCP test")
print("\n=================================")
self.threads.append(subprocess.Popen([f'iperf3 -s -B {ips.ports[0]} -f m --logfile Server1.txt'], shell = True, stdout = None))
self.threads.append(subprocess.Popen([f'iperf3 -s -B {ips.ports[1]} -f m --logfile Server2.txt'], shell = True, stdout = None))
self.threads.append(subprocess.Popen([f'iperf3 -c {ips.ports[0]} -B {ips.ports[1]} -f m -t 0 -V --logfile Client1.txt'],
shell = True, stdout = None))
self.threads.append(subprocess.Popen([f'iperf3 -c {ips.ports[1]} -B {ips.ports[0]} -f m -t 0 -V --logfile Client2.txt'],
shell = True, stdout = None))
time.sleep(2)
return self.isTCPRunning()
def get_speeds():
speeds = []
for file in LogTypes.getLogFileNames():
with open(file, 'r') as f:
try:
last_line = f.read().splitlines()[-1] #this could be traded out for reading from CMD line
#print(file, last_line)
except:
last_line = "iperf3: exiting"
print("file is empty")
try:
if "iperf3: exiting" not in last_line and last_line != "iperf3: error - unable to connect to server: Cannot assign requested address":
speed = re.findall(r"\d+.?\d+ [A-Z]?bits/sec", last_line)
print(speed)
number = re.findall(r"\d+.?\d+", speed[-1])
speeds.append(float(numer[-1]))
else:
print(file[0:-4] + " 2-way TCP test is not running")
main.clearFileContents(file)
subprocess.Popen(['killall iperf3'], shell = True)
running = False
except:
print("file contains unexpected strings")
subprocess.Popen(['killall iperf3'], shell = True)
return speeds
def isTCPRunning(self):
"""
Returns True if a 2 way TCP test is currently running False otherwise
Prints the speeds of the test if it is running
"""
speeds = []
running = True
print("------------------------------------------------------------------------------------------------------------------")
for file in LogTypes.getLogFileNames():
with open(file, 'r') as f:
try:
last_line = f.read().splitlines()[-1] #this could be traded out for reading from CMD line
print(file, last_line)
except:
last_line = "iperf3: exiting"
print("file is empty")
try:
if "iperf3: exiting" not in last_line and "iperf3: error" not in last_line:
speed = re.findall(r"\d+.?\d+ [A-Z]?bits/sec", last_line)
print(speed)
number = re.findall(r"\d+.?\d+", speed[-1])
speeds.append(float(number[-1]))
if float(number[-1]) > EXPECTED_MIN_SPEED:
print(file[0:-4] + " 2-way TCP test is running")
elif float(number[-1]) > 1:
print(file[0:-4] + " 2-way TCP test is not running well")
else:
print("\n\n\nTCP TEST IS TOO LOW\n\n\n")
for t in self.threads:
t.kill()
subprocess.Popen(['killall iperf3'], shell = True) #could be turned into its own function
running = False
else:
print(file[0:-4] + " 2-way TCP test is not running")
main.clearFileContents(file)
subprocess.Popen(['killall iperf3'], shell = True)
running = False
except:
print("file contains unexpected strings")
subprocess.Popen(['killall iperf3'], shell = True)
main.clearFileContents(file)
for t in self.threads:
t.kill()
self.speeds = speeds
print("2 way TCP test has speeds: ", speeds)
print("------------------------------------------------------------------------------------------------------------------")
now = datetime.now()
timestamp = datetime.timestamp(now)
log_file = open("logs.txt", 'a+')
if not running:
log_file.write(str(timestamp) + ": Iperf went down\n")
time.sleep(0.25)
else:
log_file.write(str(timestamp) + ": " + str(speeds)+"\n")
log_file.close()
return running
def clearFileContents(fName):
"""
Empties a test file with the given name
"""
with open(fName, "w"):
pass
class LogTypes():
"""
An object that stores the arbitrary names of each output file for iperf to
write to
"""
def getLogFileNames():
"""
returns a list of the 4 log file names as strs with their extension/file type (.txt)
"""
return ["Server1.txt", "Server2.txt", "Client1.txt", "Client2.txt"]
def getNames():
"""
returns a list of the 4 log types as strs
"""
return ["Server1", "Server2", "Client1", "Client2"]
def start(GUI):
GUI.script = main()
#main() #runs main
|
UTF-8
|
Python
| false | false | 11,143 |
py
| 6 |
iperfScript.py
| 3 | 0.47752 | 0.467558 | 0 | 312 | 34.714744 | 154 |
make-42/PFSA
| 2,199,023,307,882 |
4f03085e94495fe84360d567a285ba0b31b8467f
|
08dec8ca3cbfdeb54b32ba34bed16fddc4baa247
|
/imggenerate.py
|
a98f87077068adf183456268d0db790333dd2978
|
[] |
no_license
|
https://github.com/make-42/PFSA
|
70fb602116414a0f85047ef859715fbc5cae1e5d
|
884b507c3eb65bec5a6fbf114c4753b01ce0f77a
|
refs/heads/master
| 2022-10-14T02:56:20.591375 | 2022-10-05T09:57:55 | 2022-10-05T09:57:55 | 186,132,497 | 0 | 1 | null | false | 2022-10-05T09:57:57 | 2019-05-11T12:51:31 | 2019-05-26T10:29:26 | 2022-10-05T09:57:56 | 423,592 | 0 | 1 | 0 |
Python
| false | false |
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import pathlib
import os
import sys
def generate(path, textsize):
path = pathlib.Path(path)
fnt = ImageFont.truetype('C:\\tmp\\PFSA\\font.ttf', 100)
fntb = ImageFont.truetype('C:\\tmp\\PFSA\\font.ttf', textsize)
background = Image.open("C:\\tmp\\PFSA\\background.jpg").convert("RGBA")
paddingx = 300
paddingy = 50
lineseparation = textsize*2.5
blur = 1
print("Step 2 : Cropping")
background = background.crop((0,0,background.size[0],300+(len(os.listdir(str(path)))*lineseparation)))
print("Step 3 : Blur")
for blurcount in range(blur):
background = background.filter(ImageFilter.BLUR)
print("Step 4 : Drawing")
d = ImageDraw.Draw(background)
offset = fnt.getoffset(str(path))
d.line((0,90,background.size[0],90), fill=(255,255,255))
if len(str(path)) >= 70:
fnt = ImageFont.truetype('C:\\tmp\\PFSA\\font.ttf', round(100/(len(str(path))/75)))
d.text((paddingy*2,125),str(path), font=fnt, fill=(255,255,255))
for x in range(len(os.listdir(str(path)))):
d.line((0,paddingx+((x)*lineseparation),background.size[0],paddingx+((x)*lineseparation)), fill=(255,255,255))
d.text((paddingy ,paddingx+((x)*lineseparation)), os.listdir(str(path))[x], font=fntb, fill=(255,255,255))
return background
|
UTF-8
|
Python
| false | false | 1,358 |
py
| 6 |
imggenerate.py
| 4 | 0.656848 | 0.60162 | 0 | 29 | 45.758621 | 118 |
som1234567/final_project2
| 9,182,640,099,489 |
c19010ecd36ff4e8268ae0c2a2f7a1888440aca4
|
5d47b7077f1ed278d2c469767c227aae5e128a2b
|
/online_store/users/migrations/0004_profile_phone.py
|
4f3de39e6103a321dd3fc7402c4515d87de8f75b
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/som1234567/final_project2
|
3d78869a1060d8f93c55ddde7dd8ed739779505e
|
3d61404f2fe4b9e4503087e523ea16d6c411f57f
|
refs/heads/main
| 2023-02-02T19:12:09.075542 | 2020-12-17T09:25:51 | 2020-12-17T09:25:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.1.3 on 2020-11-18 00:53
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_profile_country'),
]
operations = [
migrations.AddField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=17, null=True, validators=[django.core.validators.RegexValidator(message='Enter a valid international mobile phone number starting with +(country code)', regex='^[+]*[(]{0,1}[0-9]{1,4}[)]{0,1}[-\\s\\./0-9]*$')]),
),
]
|
UTF-8
|
Python
| false | false | 629 |
py
| 48 |
0004_profile_phone.py
| 28 | 0.626391 | 0.577107 | 0 | 19 | 32.105263 | 270 |
pmorris2012/AccordionLungs
| 13,769,665,195,250 |
26c7ef204bfa35779f1d0731015a5e66c59ce278
|
577581a985c995e5b1ff1081e3b10a4aa10ca9ed
|
/main.py
|
e67c8b041840d4949651f92ecb34be1600705047
|
[] |
no_license
|
https://github.com/pmorris2012/AccordionLungs
|
c6ef6957df4e867da63cf7f9fe3bf78e913913ac
|
d34d3b3b3e05f25776909bc701f3d995c341ac3a
|
refs/heads/master
| 2016-08-09T13:26:15.342817 | 2016-01-25T04:39:26 | 2016-01-25T04:39:26 | 50,325,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
if __name__ == "__main__":
lower_blue = np.array([90, 50, 50], dtype=np.uint8)
upper_blue = np.array([135,255,255], dtype=np.uint8)
lower_red1 = np.array([165, 100, 100], dtype=np.uint8)
upper_red1 = np.array([180,255,255], dtype=np.uint8)
cap = cv2.VideoCapture(0)
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',-1,20.0, (640, 480))
while True:
_, frame = cap.read()
#frame = cv2.GaussianBlur(frame,(9,9), 5)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_red1, upper_red1)
mask_inv = cv2.bitwise_not(mask)
blue = cv2.bitwise_and(frame, frame, mask=mask)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
img1 = cv2.bitwise_and(gray, gray, mask=mask_inv)
img2 = cv2.bitwise_and(blue, blue, mask=mask)
dst = cv2.add(img1, img2)
out.write(dst)
cv2.imshow("res2", dst)
if cv2.waitKey(1) & 0xFF == ord("c"):
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
UTF-8
|
Python
| false | false | 1,164 |
py
| 1 |
main.py
| 1 | 0.593643 | 0.520619 | 0 | 32 | 35.40625 | 59 |
hdantas/fuzzing-exercise
| 9,921,374,490,447 |
16613b9be8a503e16792be50a762593f12d3f7d8
|
e2ba16364cccba8130d9ecd9667724c6c8fe2ee6
|
/code/readfile.py
|
132654050fb434407ca1cc3d08c9eb01832c8ab9
|
[] |
no_license
|
https://github.com/hdantas/fuzzing-exercise
|
f28b931e7168cf27e3c0aa315f0984e390f9a16d
|
f9ec274e3601713725f1c5811d5b9bef7088b0eb
|
refs/heads/master
| 2021-01-18T18:25:31.374765 | 2014-06-20T13:11:59 | 2014-06-20T13:11:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import getopt
import hashlib
import os
import sys
import time
import webbrowser
import re
USAGE_STR = 'readfile.py -i <inputfile>'
INPUT_FILE = 'output.txt'
class ReadFile:
LINES_EASTEREGG = 42
TEXT_EASTEREGG = "PWN1337"
MAX_TEXT = 1337
PATTERN = '[A-Z]{2}[A-Za-z][0-9]{3}[A-Za-z0-9]'
LEN_PATTERN = 7
def readfile(self, inputfile, encoded=True):
if not os.path.isfile(inputfile):
print "Can't find file " + inputfile + '\n'
return
f = open(inputfile, 'r')
lines = f.readlines()
if encoded:
try:
lines = lines[0].decode("hex").splitlines()
except:
print "Failed to parse.''"
return
for line in lines:
if line.count('\t') != 3:
print "Failed to parse line '" + line[:-1] + "'"
return
tokens = line.strip().split('\t') #remove trailing & leading whitespaceand tokenize on \t
length = tokens[0]
text = tokens[1]
shasum = tokens[2]
utctime = tokens[3]
if len(length) >= ReadFile.MAX_TEXT or len(text) >= ReadFile.MAX_TEXT or len(shasum) >= ReadFile.MAX_TEXT or len(utctime) >= ReadFile.MAX_TEXT:
raise Exception('please help...')
if ReadFile.TEXT_EASTEREGG.find(text) != -1 and len(text) > 0:
print "Right on! You found an easter egg! You deserve a break."
webbrowser.open("https://xkcd.com/327/")
sys.exit(2)
if ReadFile.LINES_EASTEREGG == len(lines):
print "Nice! You just found an easter egg!"
webbrowser.open("https://xkcd.com/571/")
sys.exit(2)
length_test = length.isdigit() and length == str(len(text))
#make sure all the text (in sequences of 7 words) match the regex pattern
text_test = len(text) == len(re.findall(ReadFile.PATTERN, text)) * ReadFile.LEN_PATTERN
shasum_test = shasum == hashlib.sha1(text + utctime).hexdigest()
utctime_test = self.testtime(utctime)
if not(length_test and text_test and shasum_test and utctime_test):
print "Failed to parse on line '" + line[:-1] + "'"
return
print "Parsed " + inputfile + " correctly!"
def testtime(self, time_str):
try:
struct = time.strptime(time_str, "%d%m%Y:%H%M%S")
except:
return False
length = len(time_str) == 15
day = int(time_str[0:2]) == struct.tm_mday
month = int(time_str[2:4]) == struct.tm_mon
year = int(time_str[4:8]) == struct.tm_year
colon = time_str[8] == ':'
hour = int(time_str[9:11]) == struct.tm_hour
minute = int(time_str[11:13]) == struct.tm_min
second = int(time_str[13:15]) == struct.tm_sec
return length and year and month and day and colon and hour and minute and second
def main():
inputfile = INPUT_FILE
encoded = True
try:
opts, args = getopt.getopt(sys.argv[1:],"hei:",["help", "encoded", "ifile="])
except getopt.GetoptError as err:
print str(err)
print USAGE_STR
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', "--help"):
print USAGE_STR
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-e", "--encoded"):
encoded = not encoded
print "Reading from " + inputfile
newfile = ReadFile()
newfile.readfile(inputfile, encoded)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 3,758 |
py
| 10 |
readfile.py
| 6 | 0.531932 | 0.516232 | 0 | 121 | 30.066116 | 155 |
werellel/Algorithm
| 12,859,132,133,359 |
13c9322c2300e88b0d78c9e5466d618d7739fbc8
|
01f97cd4c342a00bdfa97e3ebfd7fe97edbc5de5
|
/hackerrank/arrays/new_year_chaos.py
|
23d1b3f924edebeea9493b98f0ff06abd5ea10cb
|
[] |
no_license
|
https://github.com/werellel/Algorithm
|
722217935b417a87ea71f5af629e04cb209ba3ac
|
37472a174f838efab96dc9a8988749ceb78b1806
|
refs/heads/master
| 2022-12-17T22:12:12.172059 | 2020-09-07T06:24:10 | 2020-09-07T06:24:10 | 259,062,636 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
def minimumBribes(q):
move = 0
for idx, num in enumerate(q):
if num - (idx+1) > 2:
return 'Too chaotic'
for idx2 in range(max(num-2,0), idx):
if q[idx2] > num:
move += 1
return move
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
print(minimumBribes(q))
#!/bin/python3
import math
import os
import random
import re
import sys
def minimumBribes(q):
move = 0
for idx, num in enumerate(q):
if num - (idx+1) > 2:
return 'Too chaotic'
for idx2 in range(max(num-2,0), idx):
if q[idx2] > num:
move += 1
return move
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
print(minimumBribes(q))
|
UTF-8
|
Python
| false | false | 1,042 |
py
| 22 |
new_year_chaos.py
| 22 | 0.508637 | 0.491363 | 0 | 54 | 18.296296 | 52 |
fgtorres/iHospital
| 12,816,182,445,527 |
cc5224fd06603cadca7663453f8a1a085ff01b88
|
d1c6ff25f0ad139883d5c313e188ec2f347dba07
|
/scripts/leitor.py
|
801aba14ac19f73fdc056e8677541222ad45450e
|
[] |
no_license
|
https://github.com/fgtorres/iHospital
|
e1a7800a865d2ed9e942b755a9086ef2e40b9592
|
8629b544fc31d9043192e940dc5cffbfceb357a8
|
refs/heads/master
| 2020-03-18T09:52:13.775351 | 2018-12-05T22:31:34 | 2018-12-05T22:31:34 | 134,584,194 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
f = open('hospitais.json','r' , encoding='utf8').read()
r = open('nome_hospitais.txt','w')
arquivo_json = json.loads(f)
for linha in arquivo_json:
nome_hospital=linha['nome']
r.write(nome_hospital+'\n')
r.close()
|
UTF-8
|
Python
| false | false | 235 |
py
| 58 |
leitor.py
| 28 | 0.659574 | 0.655319 | 0 | 10 | 22.5 | 55 |
KandyKad/Python-3rd-Sem
| 9,045,201,170,427 |
2c6876f2a864cd0ad8d4797e9430f080a1790a78
|
289e359b1c40a5b434c925267db30bc8d5299807
|
/Lab6/A6_2_py.py
|
b1bf65bdb12b77d166669333c8e5eabfe7bd120b
|
[] |
no_license
|
https://github.com/KandyKad/Python-3rd-Sem
|
fb960c8e018bb96d173759b10863d776d5574c8f
|
1c54cf903e466f86906828a239b008c4dbe946b0
|
refs/heads/master
| 2021-01-07T11:57:56.355322 | 2020-02-21T16:27:48 | 2020-02-21T16:27:48 | 241,684,095 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
a = input("Enter words: ")
mylist = a.split()
newlist = []
for i in mylist:
if i not in newlist:
newlist.append(i)
newlist.sort()
for i in newlist:
print(i, ":", mylist.count(i))
|
UTF-8
|
Python
| false | false | 204 |
py
| 84 |
A6_2_py.py
| 67 | 0.578431 | 0.578431 | 0 | 9 | 20.666667 | 34 |
richardbw/graph_event
| 1,554,778,196,270 |
f0ba5fafd437947a8f7c0b70621509a7db9d42b6
|
ca688be0060c94cbe93ca76d845a26e57369a9de
|
/src/pipegraph.py
|
fa7bcbaef0f2949f3a9e844b95aa6a7bf938f8b5
|
[] |
no_license
|
https://github.com/richardbw/graph_event
|
d72985e9945f3b681fd009a181cdbcfd7f764734
|
355a47cb8be719760e17493e21f8b1306465ec2a
|
refs/heads/master
| 2021-01-22T22:57:33.761860 | 2010-03-08T03:54:48 | 2010-03-08T03:54:48 | 551,951 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
pipegraph -r "reg(ex)" [other_options]
pipegraph -p "preconf_id"
pipegraph -h
Read input lines from <stdin> and graph numbers, extracted with a regex.
Note that the regex format must have only one 'capturing group' that can
evaluate to a number. The regex format is python, similar to perl.
See:
http://docs.python.org/library/re.html
http://www.regular-expressions.info/python.html
Examples
$ cat gig_be.log | pipegraph -r ".*numberOfEvents\(\):\s*(\d+)"
$ tail -f gig_be.log | pipegraph -r ".*VTUC_MS.*numberOfEvents.*(\d+)"
$ tail -f gig_be.log | pipegraph -p "evt"
"""
import os, sys,logging, gtk, pango, gobject, re, ConfigParser
from optparse import OptionParser
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
__author__ = "rbw"
__license__ = "GPL"
__version__ = "0.1.1a"
__maintainer__ = "rbw"
__email__ = "rbw@sla-mobile.com.my"
__status__ = "Alpha"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_log = logging.getLogger('pipegraph')
loghndlr = logging.StreamHandler(sys.stdout)
loghndlr.setFormatter(logging.Formatter("%(asctime)s|%(levelname)s> %(message)s"))
_log.addHandler(loghndlr)
#window defaults:
WIN_HEIGHT = 300
WIN_WIDTH = 600
WIN_TITLE = "Data count"
HORIZ_SPACE = 1
LINE_COLOUR = "red"
CONFIG = ConfigParser.ConfigParser()
MAX_STDIN_LINES = 10000 #number of times to loop through stdin on startup, before giving up
MAX_EVT_COUNT = 0
MIN_EVT_COUNT = sys.maxint
#gridlines:
NO_H_BLOCKS = 3
NO_V_BLOCKS = 5
gobject.threads_init()
def expose_handler(drawingArea, event) : #{{{
window = drawingArea.window
#_log.debug("drawingArea: "+str(drawingArea)+ ", window: "+str(window))
w = window.get_size()[0] -1
h = window.get_size()[1] -1
xgc = window.new_gc()
xgc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
window.draw_rectangle(xgc, False, 0, 0, w, h)
attr = pango.AttrList()
attr.insert(pango.AttrForeground(0, 0, 0, 0, -1))
layout = drawingArea.create_pango_layout("Max: "+str(MAX_EVT_COUNT))
layout.set_alignment(pango.ALIGN_LEFT)
layout.set_font_description(pango.FontDescription("Courier New 8"))
layout.set_attributes(attr)
window.draw_layout(xgc, 1, 1, layout)
layout.set_text("Min: "+str(MIN_EVT_COUNT))
window.draw_layout(xgc, 1, h - layout.get_pixel_size()[1], layout)
# Horizontal lines:
for i in range(1, NO_H_BLOCKS):
h1 = (h/NO_H_BLOCKS)*i
window.draw_line(xgc, 0, h1, w, h1)
# Vertical lines:
for i in range(1, NO_V_BLOCKS):
v = (w/NO_V_BLOCKS)*i
window.draw_line(xgc, v, 0, v, h)
xgc.set_rgb_fg_color(gtk.gdk.color_parse(LINE_COLOUR))
global GRAPH_DATA_ARR, HORIZ_SPACE
for i in range(1, len(GRAPH_DATA_ARR)):
x = i * HORIZ_SPACE
window.draw_line(xgc,
x - HORIZ_SPACE, getY(i - 1, h),
x , getY(i , h),
);
if len(GRAPH_DATA_ARR*HORIZ_SPACE) > w:
drawingArea.set_size_request(len(GRAPH_DATA_ARR*HORIZ_SPACE), h)
def getY(i, h):
global GRAPH_DATA_ARR
global MAX_EVT_COUNT
global MIN_EVT_COUNT
if MAX_EVT_COUNT == MIN_EVT_COUNT:
return int(h * .5)
y = h - (
int(h * ( float(GRAPH_DATA_ARR[i]-MIN_EVT_COUNT) / float(MAX_EVT_COUNT-MIN_EVT_COUNT) ) )
)
return y
#}}}
def save_drawingarea(widget, data=None):
chooser = gtk.FileChooserDialog(
title="Save graph as PNG",
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK
))
filter = gtk.FileFilter()
filter.set_name("PNG Image (*.png)")
filter.add_mime_type("image/png")
filter.add_pattern("*.png")
chooser.add_filter(filter)
#filter = gtk.FileFilter()
#filter.set_name("All files (*.*)")
#filter.add_pattern("*")
#chooser.add_filter(filter)
response = chooser.run()
png_file = chooser.get_filename() if chooser.get_filename().lower().endswith('.png') \
else ( chooser.get_filename() + '.png' )
chooser.destroy()
if response != gtk.RESPONSE_OK: return
_log.debug( "Will save to: "+ png_file )
#http://www.daa.com.au/pipermail/pygtk/2002-November/003841.html
_log.debug("drawingArea size: "+ str(DRAWING_AREA.size_request()))
w = DRAWING_AREA.size_request()[0] -1
h = DRAWING_AREA.size_request()[1] -1
pixbuf = gtk.gdk.Pixbuf (
gtk.gdk.COLORSPACE_RGB,
has_alpha=False,
bits_per_sample=8,
width=w, height=h)
pixbuf.get_from_drawable (DRAWING_AREA.window, DRAWING_AREA.window.get_colormap(), 0, 0, 0, 0, w, h)
pixbuf.save (png_file, "png")
DRAWING_AREA = gtk.DrawingArea()
def buildWin(): #{{{
w = gtk.Window()
w.set_title(WIN_TITLE)
w.set_default_size(WIN_WIDTH, WIN_HEIGHT)
w.set_icon(w.render_icon(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON))
w.connect('destroy', gtk.main_quit)
DRAWING_AREA.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("white"))
DRAWING_AREA.connect("expose-event", expose_handler)
DRAWING_AREA.show()
s = gtk.ScrolledWindow()
s.set_policy(gtk.POLICY_ALWAYS, gtk.POLICY_NEVER)
s.set_shadow_type(gtk.SHADOW_ETCHED_IN)
s.add_with_viewport(DRAWING_AREA)
b = gtk.Button("Quit")
b.connect_object("clicked", lambda w: w.destroy(), w)
b.show()
b1 = gtk.Button("Save snapshot to file...")
b1.connect_object("clicked", save_drawingarea, w)
b1.show()
h = gtk.HBox(homogeneous=False, spacing=5)
h.pack_start(b1)
h.pack_start(b)
v = gtk.VBox(False,spacing=1)
v.show()
v.pack_start(s, True, True, 0)
v.pack_start(h, False, False, 0)
w.add(v)
w.show_all()
return w
#}}}
#{{{ stdin_handler
GRAPH_DATA_ARR = []
current_line =""
def stdin_handler(stdin, condition):
global current_line
global GRAPH_DATA_ARR
global MAX_EVT_COUNT
global MIN_EVT_COUNT
byte = stdin.read(1)
#print byte,
if byte != '':
if byte != '\n':
current_line += byte
else:
print current_line
m = REGEX.search(current_line)
if m is not None:
datum = int(m.group(1))
if datum > MAX_EVT_COUNT: MAX_EVT_COUNT = datum
if datum < MIN_EVT_COUNT: MIN_EVT_COUNT = datum
GRAPH_DATA_ARR.append(datum)
current_line = ""
return True # run again
else:
current_line = ""
return False # stop looping (or else gtk+ goes CPU 100%)
#}}}
#{{{ List preset
CONFIG_PRESET_PREF="preset:"
def show_presets():
print ""
print "List of preset configurations in config file:"
print "---------------------------------------------"
for section in CONFIG.sections():
if section.startswith(CONFIG_PRESET_PREF):
print "-", section[len(CONFIG_PRESET_PREF):]
sys.exit(2)
#}}}
def parseCmdLine(): #{{{
global REGEX, HORIZ_SPACE, WIN_TITLE, WIN_HEIGHT, WIN_WIDTH, LINE_COLOUR
parser = OptionParser(usage=__doc__, version=__version__)
parser.add_option("-r", "--regex", dest="regex", help="Regex to extract number", default=REGEX)
parser.add_option("-t", "--title", dest="win_title", help="Window title", metavar="'title'", default=WIN_TITLE)
parser.add_option("-y", dest="win_height", help="Window height", metavar="nn", default=WIN_HEIGHT)
parser.add_option("-x", dest="win_width", help="Window width", metavar="nn", default=WIN_WIDTH)
parser.add_option("-s", dest="horiz_space", help="Horizonatal space increment", metavar="nn", default=HORIZ_SPACE)
parser.add_option("-c", dest="line_colour", help="Line colour ('green', 'black', etc.)", metavar="colour", default=LINE_COLOUR)
parser.add_option("-l", dest="list_preset", help="List preset configurations", default=False, action="store_true")
parser.add_option("-p", "--preset", dest="preset", help="Use named preset", metavar="'PresetName'" )
parser.add_option( "--debug", dest="debug", help="Enable debug mode", default=False, action="store_true")
(options, args) = parser.parse_args()
if options.debug:
_log.setLevel(logging.DEBUG)
_log.debug( "Debug enabled" )
else:
_log.setLevel(logging.INFO)
if options.list_preset:
show_presets()
elif options.preset is not None:
section = CONFIG_PRESET_PREF+options.preset
if not CONFIG.has_section(section):
_log.error("Preset section '"+section+"' not found in config file.")
sys.exit(217)
WIN_TITLE = CONFIG.get(section, "win_title")
WIN_HEIGHT = int(CONFIG.get(section, "win_height"))
WIN_WIDTH = int(CONFIG.get(section, "win_width"))
LINE_COLOUR = CONFIG.get(section, "line_colour")
HORIZ_SPACE = int(CONFIG.get(section, "horiz_space"))
REGEX = re.compile(CONFIG.get(section, "regex"))
else:
WIN_TITLE = options.win_title
WIN_HEIGHT = int(options.win_height)
WIN_WIDTH = int(options.win_width)
LINE_COLOUR = options.line_colour
HORIZ_SPACE = int(options.horiz_space)
if options.regex is None:
_log.error("No Regex parameter ('-r'). Use '-h' to see commandline options.")
sys.exit(209)
REGEX = re.compile(options.regex)
_log.debug("REGEX: "+ str(REGEX.pattern))
if REGEX.groups != 1:
_log.error("Regex("+options.regex+") must have only one group: "+str(REGEX.groups)+"")
#sys.stderr.write("\nERROR: Regex("+options.regex+") must have only one group: "+str(REGEX.groups)+"\n")
sys.exit(194)
#}}}
def getPresetConfig(): #{{{
basic_config_name = "pipegraph.ini"
config_in_home_dir = os.path.expanduser('~')+os.sep+'.'+basic_config_name
if os.path.isfile(config_in_home_dir):
_log.debug("Found config in HOME dir: %s"%config_in_home_dir)
CONFIG.read(config_in_home_dir)
return
config_in_curr_dir = sys.path[0]+os.sep+basic_config_name
if os.path.isfile(config_in_curr_dir):
_log.debug("Found config in current dir: %s"%config_in_curr_dir)
CONFIG.read(config_in_curr_dir)
return
_log.warning("Unable to read preset config file ("+basic_config_name+") in $HOME or current dir.")
#sys.exit(263)
#}}}
REGEX = None
def main(argv):
getPresetConfig()
parseCmdLine()
line_count = 0
while stdin_handler(sys.stdin, None):
line_count += 1
if line_count == MAX_STDIN_LINES : break # prevent inadvertant endless loop
_log.debug("Finished reading pre-existing stdin input")
window = buildWin()
gobject.io_add_watch(sys.stdin, gobject.IO_IN, stdin_handler)
gtk.main()
if __name__ == "__main__":
main(sys.argv[1:])
|
UTF-8
|
Python
| false | false | 11,467 |
py
| 2 |
pipegraph.py
| 1 | 0.578704 | 0.570507 | 0 | 351 | 31.638177 | 164 |
darkismus/mooc-ohjelmointi-21
| 19,481,971,656,584 |
22c8cb9d4b89f932c7448d272d680ec0f6604b9a
|
361ac3fcf36d80c792b60b7e2284cb1dc8d77944
|
/osa05-07_sudoku_osa5/src/sudoku_lisays_ja_tulostus.py
|
938ace5d158587c9f4d96f5854bdb52af6c92966
|
[] |
no_license
|
https://github.com/darkismus/mooc-ohjelmointi-21
|
48cc20391db4240104549d4f3834a67c77976f6d
|
5f72dd9cff78704a2a0f5bc1cc18c7740ce50c51
|
refs/heads/main
| 2023-08-01T03:35:13.244978 | 2021-09-14T10:49:37 | 2021-09-14T10:49:37 | 368,469,947 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# tee ratkaisu tänne
def lisays(sudoku: list, rivi_nro: int, sarake_nro: int, luku:int):
sudoku[rivi_nro][sarake_nro] = luku
def tulosta(sudoku: list):
vali = 0
alekkain = 0
for i in sudoku:
# print(i)
for j in i:
if j != 0:
print(f"{j} ", end="")
else:
print("_ ", end="")
vali += 1
if vali % 3 == 0:
print(" ", end="")
alekkain += 1
if alekkain % 3 == 0:
print()
print()
if __name__ == "__main__":
sudoku = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
tulosta(sudoku)
lisays(sudoku, 0, 0, 2)
lisays(sudoku, 1, 2, 7)
lisays(sudoku, 5, 7, 3)
print()
print("Kolme numeroa lisätty:")
print()
tulosta(sudoku)
|
UTF-8
|
Python
| false | false | 1,109 |
py
| 212 |
sudoku_lisays_ja_tulostus.py
| 177 | 0.387534 | 0.298103 | 0 | 46 | 23.086957 | 67 |
LeiWong/zb_work
| 3,161,095,947,651 |
8840172a528690373d977603f43173ee93f04d60
|
dd7c4d8dac99ea2306d3248cbaab120169b8142a
|
/scripts/marketbox-medical-svr/django_server/apps/account/migrations/0042_auto_20170717_1130.py
|
dd21888169550fd2ab9370bb3dc6a36dfc4f57e3
|
[] |
no_license
|
https://github.com/LeiWong/zb_work
|
0842dececf9ab9912f6b6cd070e1a9ae23e6ec7a
|
02e295e7856834bf7406210a14f78153829db1d4
|
refs/heads/master
| 2019-12-18T02:14:41.413971 | 2019-08-23T10:08:07 | 2019-08-23T10:08:07 | 88,497,628 | 0 | 0 | null | false | 2019-10-22T21:20:20 | 2017-04-17T10:26:15 | 2019-08-23T10:08:09 | 2019-10-22T21:20:19 | 9,895 | 0 | 0 | 1 |
Python
| false | false |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-17 11:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0041_auto_20170714_1831'),
]
operations = [
migrations.AlterField(
model_name='userusage',
name='event_uuid',
field=models.CharField(max_length=36, unique=True),
),
]
|
UTF-8
|
Python
| false | false | 468 |
py
| 410 |
0042_auto_20170717_1130.py
| 390 | 0.602564 | 0.532051 | 0 | 20 | 22.4 | 63 |
adamcunnington/foodbank-southlondon
| 627,065,225,223 |
0189052485e358721942589379145dc29fc1ca0b
|
f237511b9c8d5d332fde5eb4808c2aef97935917
|
/backend/foodbank_southlondon/bff/models.py
|
ddb3f2abcc53f07accef39b2333be20e810cd600
|
[] |
no_license
|
https://github.com/adamcunnington/foodbank-southlondon
|
ad764f32590c02bdd492837d8b69a8fdf787f630
|
474e4941c74147a778113e5f4f97f75fb97873e6
|
refs/heads/master
| 2023-04-02T00:51:32.577370 | 2023-01-05T20:34:04 | 2023-01-05T20:34:04 | 258,849,120 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from typing import Dict
import copy
from flask_restx import fields # type: ignore
import flask_restx # type: ignore
from foodbank_southlondon.api import models
from foodbank_southlondon.api.requests import models as requests_models
from foodbank_southlondon.api.events import models as events_models
from foodbank_southlondon.bff import rest
_pagination = rest.model("ResultsPage", models.pagination_fields)
def _clone_field_without_attribute(field: fields.Raw) -> fields.Raw:
field_copy = copy.copy(field)
field_copy.attribute = None
return field_copy
def _clone_fields_without_attribute(model: flask_restx.Model) -> Dict:
return {k: _clone_field_without_attribute(v) for k, v in model.items()}
action = rest.model("Action", {
"request_ids": fields.List(events_models.event["request_id"]),
"event_name": fields.String(required=True, description="The name of the action event",
example=events_models.Action.DELETE_REQUEST.value.event_name, enum=events_models.ACTION_NAMES),
"event_data": events_models.event["event_data"]
})
status = rest.model("Status", {
"request_ids": fields.List(events_models.event["request_id"]),
"event_name": fields.String(required=True, description="The name of the status event",
example=events_models.Action.DELETE_REQUEST.value.event_name, enum=events_models.STATUS_NAMES),
"event_data": events_models.event["event_data"]
})
_event = rest.model("EventSummary", {
"event_timestamp": events_models.event["event_timestamp"],
"event_name": events_models.event["event_name"],
"event_data": events_models.event["event_data"]
})
_summary = rest.inherit("Summary", _event, {
"request_id": requests_models.request["request_id"],
"client_full_name": _clone_field_without_attribute(requests_models.request["client_full_name"]),
"voucher_number": _clone_field_without_attribute(requests_models.request["voucher_number"]),
"postcode": _clone_field_without_attribute(requests_models.request["postcode"]),
"packing_date": _clone_field_without_attribute(requests_models.request["packing_date"]),
"time_of_day": _clone_field_without_attribute(requests_models.request["time_of_day"]),
"household_size": _clone_field_without_attribute(requests_models.request["household_size"]),
"congestion_zone": _clone_field_without_attribute(requests_models.request["congestion_zone"]),
"flag_for_attention": _clone_field_without_attribute(requests_models.request["flag_for_attention"]),
"signposting_call": _clone_field_without_attribute(requests_models.request["signposting_call"]),
"collection_centre": _clone_field_without_attribute(requests_models.request["collection_centre"]),
"collection_centre_abbr": fields.String(required=False, description="The short name for the collection centre",
example="VXH"),
"phone_number": _clone_field_without_attribute(requests_models.request["phone_number"])
})
page_of_summary = rest.inherit("SummaryPage", _pagination, {
"form_submit_url": fields.String(required=True, description="The URL that users can use to submit entries in the form.",
example="https://docs.google.com/forms/d/e/asdasdasd989123123lkf_skdjfasd/viewform"),
"items": fields.List(fields.Nested(_summary))
})
_request = rest.model("ClientRequest", _clone_fields_without_attribute(requests_models.request))
_similar_request_summary = rest.model("SimilarClientRequestSummary", {
"request_id": requests_models.request["request_id"],
"timestamp": _clone_field_without_attribute(requests_models.request["timestamp"]),
"client_full_name": _clone_field_without_attribute(requests_models.request["client_full_name"]),
"postcode": _clone_field_without_attribute(requests_models.request["postcode"]),
"voucher_number": _clone_field_without_attribute(requests_models.request["voucher_number"])
})
details = rest.model("ClientRequestDetails", {
"request": fields.Nested(_request),
"events": fields.List(fields.Nested(_event)),
"similar_request_ids": fields.List(fields.Nested(_similar_request_summary))
})
|
UTF-8
|
Python
| false | false | 4,211 |
py
| 114 |
models.py
| 75 | 0.712895 | 0.710758 | 0 | 84 | 49.130952 | 127 |
varshaa123/OpenQuiz-Portal
| 19,559,281,091,911 |
8a64bbcaca2f7b33fb1107282f399affef24a7e2
|
68637a9f2e66639a65898055fc233ca21736d88f
|
/OpenQuiz-Portal/OpenQuiz/create_tables.py
|
e30dd1c32cb2da33986feaff604550e2771715ae
|
[] |
no_license
|
https://github.com/varshaa123/OpenQuiz-Portal
|
6b20e275ace14dd6541a4e63337ee5e1c843cad2
|
b0025f2966ce15a87c056d8bfebeeb779c0d8cba
|
refs/heads/master
| 2023-03-19T20:21:37.199523 | 2019-04-24T07:26:41 | 2019-04-24T08:10:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pymysql.cursors
import time
def connect_db():
connection = pymysql.connect(host='sql12.freesqldatabase.com',
port=3306,
user='sql12288801',
password='IIRcqAD4VW',
db='sql12288801',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return connection
def execute_query(query):
conn = connect_db()
with conn.cursor() as cursor:
try:
cursor.execute(query)
conn.commit()
conn.close()
except Exception as e:
print(e)
return str(e)
def createFacultyTable():
query = """
CREATE TABLE IF NOT EXISTS faculty (
fid INTEGER PRIMARY KEY AUTO_INCREMENT,
fname varchar(30),
email varchar(30) UNIQUE,
dept varchar(30)
);
"""
return execute_query(query)
def createCourseTable():
query = """
CREATE TABLE IF NOT EXISTS course (
cid varchar(30) PRIMARY KEY,
cname varchar(30),
ic_id INTEGER,
FOREIGN KEY (ic_id) REFERENCES faculty(fid)
);
"""
return execute_query(query)
def createQuizTable():
query = """
CREATE TABLE IF NOT EXISTS quiz (
qid INTEGER PRIMARY KEY AUTO_INCREMENT,
fid INTEGER,
cid varchar(30),
qname varchar(30),
start varchar(30),
end varchar(30),
FOREIGN KEY (fid) REFERENCES faculty(fid),
FOREIGN KEY (cid) REFERENCES course(cid)
);
"""
return execute_query(query)
def createProblemTable():
query = """
CREATE TABLE IF NOT EXISTS problem (
pid INTEGER PRIMARY KEY AUTO_INCREMENT,
qid INTEGER,
statement varchar(30),
option1 varchar(2),
option2 varchar(2),
option3 varchar(2),
option4 varchar(2),
ans varchar(2),
positive INTEGER,
negative INTEGER,
FOREIGN KEY (qid) REFERENCES quiz(qid)
);
"""
return execute_query(query)
def createStudentTable():
query = """
CREATE TABLE IF NOT EXISTS student (
sid varchar(30) PRIMARY KEY,
sname varchar(30)
);
"""
return execute_query(query)
def createFacultyCourseTable():
query = """
CREATE TABLE IF NOT EXISTS facultycourse (
fid INTEGER,
cid varchar(30),
FOREIGN KEY (fid) REFERENCES faculty(fid),
FOREIGN KEY (cid) REFERENCES course(cid)
);
"""
return execute_query(query)
def createStudentCourseTable():
query = """
CREATE TABLE IF NOT EXISTS studentcourse (
sid varchar(30),
cid varchar(30),
FOREIGN KEY (sid) REFERENCES student(sid),
FOREIGN KEY (cid) REFERENCES course(cid)
);
"""
return execute_query(query)
def createResponseTable():
query = """
CREATE TABLE IF NOT EXISTS response (
sid varchar(30),
pid INTEGER,
qid INTEGER,
option1 varchar(2),
FOREIGN KEY (sid) REFERENCES student(sid),
FOREIGN KEY (qid) REFERENCES quiz(qid),
FOREIGN KEY (pid) REFERENCES problem(pid)
);
"""
return execute_query(query)
def createMarklistTable():
query = """
CREATE TABLE IF NOT EXISTS marklist (
qid INTEGER,
sid varchar(30),
marks INTEGER,
FOREIGN KEY (sid) REFERENCES student(sid),
FOREIGN KEY (qid) REFERENCES quiz(qid)
);
"""
return execute_query(query)
def createLogsTable():
query = '''
CREATE TABLE IF NOT EXISTS logs (
query varchar(30),
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
);
'''
return execute_query(query)
createCourseTable()
createFacultyTable()
createFacultyCourseTable()
createMarklistTable()
createProblemTable()
createQuizTable()
createResponseTable()
createStudentCourseTable()
createStudentTable()
createLogsTable()
|
UTF-8
|
Python
| false | false | 3,998 |
py
| 22 |
create_tables.py
| 13 | 0.585543 | 0.567534 | 0 | 167 | 22.94012 | 68 |
TinDang97/hackerrank_solution
| 506,806,182,317 |
83177870e88b4d4503bd2d69c76426c8bfdc13f7
|
38c7216cc145d49ed2aaa03675e2050f8087f6f8
|
/sherlock_and_array.py
|
991c684066182234ba005ea7e3c9e2f3efc099ad
|
[
"MIT"
] |
permissive
|
https://github.com/TinDang97/hackerrank_solution
|
1f1ee9baff3030078e2d3888971067f094e522e3
|
c93c2d9090733b239d420ec9f1fdc0a176e18af6
|
refs/heads/main
| 2023-02-04T11:39:23.750372 | 2020-12-20T17:49:43 | 2020-12-20T17:49:43 | 322,764,033 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/bin/python3
# problem link: https://www.hackerrank.com/challenges/sherlock-and-array/problem
import math
import os
import random
import re
import sys
# Complete the balancedSums function below.
def balancedSums(arr):
left_sum = 0
right_sum = sum(arr)
for elm in arr:
if abs((right_sum - elm) - left_sum) < 0.001:
return "YES"
left_sum += elm
right_sum -= elm
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
T = int(input().strip())
for T_itr in range(T):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = balancedSums(arr)
fptr.write(result + '\n')
fptr.close()
|
UTF-8
|
Python
| false | false | 742 |
py
| 4 |
sherlock_and_array.py
| 3 | 0.579515 | 0.571429 | 0 | 36 | 19.611111 | 80 |
hriks/geocoding
| 12,867,722,036,162 |
47b7509178da8ca9f1cfd4a05048cee615cb6269
|
dd753f01a4a7616e8efb01d8542e4f560d0b8fdf
|
/geocoding/urls.py
|
7a1abe4991d5fa9d8a3022b822a2a5409598c27e
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/hriks/geocoding
|
31d605aef01f5ee2924dbba0fb7e5ba5694e9a61
|
3a1c2365da60fd6c643ef23d422fa26d68594299
|
refs/heads/master
| 2020-06-30T02:16:15.231620 | 2019-08-06T12:05:26 | 2019-08-06T12:05:26 | 200,690,534 | 1 | 0 |
Apache-2.0
| false | 2019-12-04T23:56:48 | 2019-08-05T16:22:42 | 2019-08-06T12:05:29 | 2019-12-04T23:56:46 | 34 | 0 | 0 | 1 |
Python
| false | false |
from django.urls import path, include, re_path
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) + [
path('admin/', admin.site.urls),
re_path('', include('location.urls'))
]
|
UTF-8
|
Python
| false | false | 324 |
py
| 11 |
urls.py
| 7 | 0.734568 | 0.734568 | 0 | 11 | 28.454545 | 57 |
perfsonar/pscheduler
| 12,403,865,571,708 |
30b8d20e63d8873de2d834486a76df478a589fdd
|
9110f6b1251e002ee7daf6e2c5c1d7ab5fedfcf8
|
/pscheduler-server/pscheduler-server/api-server/pschedulerapiserver/json.py
|
d65d46d3e83c17b368b7a4a73838dc13d0bb0d6c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
https://github.com/perfsonar/pscheduler
|
8a62c076ce8a6e4a51042ed294468616885819ad
|
f6d04c0455e5be4d490df16ec1acb377f9025d9f
|
refs/heads/master
| 2023-08-11T02:12:15.487628 | 2023-07-24T15:16:00 | 2023-07-24T15:16:00 | 49,273,408 | 53 | 41 |
Apache-2.0
| false | 2023-07-24T14:09:18 | 2016-01-08T13:25:33 | 2023-07-14T11:49:13 | 2023-07-24T14:09:17 | 299,608 | 48 | 32 | 122 |
Python
| false | false |
#
# JSON-Related Functions
#
import pscheduler
from flask import request
from .args import *
from .dbcursor import dbcursor_query
from .response import *
from .util import *
def json_dump(dump):
return pscheduler.json_dump(dump,
pretty=arg_boolean('pretty')
)
def json_query_simple(query, query_args=[], empty_ok=False, key=None):
"""Do a SQL query that selects one column and dump those values as
a JSON array"""
if request.method != 'GET':
return not_allowed()
cursor = dbcursor_query(query, query_args)
if cursor.rowcount == 0:
cursor.close()
if empty_ok:
# This is safe to return unsanitized
return ok_json([], sanitize=False)
else:
return not_found()
result = []
for row in cursor:
result.append(row[0])
cursor.close()
return ok_json_sanitize_checked(result, key)
def json_query(query, query_args=[], name='name', single=False, key=None):
"""Do a SQL query that selects one column containing JSON and dump
the results, honoring the 'expanded' and 'pretty' arguments. If
the 'single' argument is True, the first-returned row will be
returned as a single item instead of an array."""
if request.method != 'GET':
return not_allowed()
cursor = dbcursor_query(query, query_args)
if single and cursor.rowcount == 0:
cursor.close()
return not_found()
result = []
for row in cursor:
this = base_url(None if single else row[0][name])
row[0]['href'] = this
result.append( row[0] if single or is_expanded() else this)
cursor.close()
return ok_json_sanitize_checked((result[0] if single else result), key)
|
UTF-8
|
Python
| false | false | 1,788 |
py
| 799 |
json.py
| 514 | 0.61745 | 0.613535 | 0 | 65 | 26.507692 | 75 |
matan-h/mhyt
| 2,473,901,170,577 |
caa5914ccbf4e6a06d1e0772ff87cc9dd8ed1c64
|
5b63d27b793fa34185516febdd8aa719158a899e
|
/setup.py
|
86cc2e5ed1a07c93766b2b58370fa2796390ccfc
|
[
"MIT"
] |
permissive
|
https://github.com/matan-h/mhyt
|
0c04c692251920090559bf8a25478338b2164aed
|
c8c77cf810ee6cad69c61385cd143dcb48770c2a
|
refs/heads/master
| 2023-01-22T16:26:21.817116 | 2020-12-02T07:07:20 | 2020-12-02T07:07:20 | 317,636,999 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='mhyt', # How you named your package folder (MyLib)
version='3.5.4', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description='download files from youtube using simple code', # Give a short description about your library
author='matan h', # Type in your name
author_email='matan.honig2@gmail.com', # Type in your E-Mail
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/matan-h/mhyt",
packages=['mhyt'],
install_requires=["youtube-dl","imageio_ffmpeg"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.0',
)
|
UTF-8
|
Python
| false | false | 994 |
py
| 3 |
setup.py
| 2 | 0.672032 | 0.66499 | 0 | 22 | 44.181818 | 111 |
berlin-leaks/berlinleaks.com
| 8,031,588,864,699 |
66a0cf81c3a5f830defa8dcd65beab2c3da9b0aa
|
bd9cf09e67b5cf3e36989259854f89482bfffa7d
|
/website/setup.py
|
5d984950fa0187c47f68643e7cbba1d91633ddb5
|
[] |
no_license
|
https://github.com/berlin-leaks/berlinleaks.com
|
825200e2d1e4ceeef0212949b99675ce8b8be835
|
5b4fa0849a4a49ae2dd43d7c506990340dee44e6
|
refs/heads/master
| 2018-03-10T05:00:50.516777 | 2016-10-15T10:06:12 | 2016-10-15T10:06:12 | 50,425,462 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = [
'--doctest-modules',
'--strict',
# '--fulltrace', # useful for debugging
]
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
tests_require = [
'coverage==4.1',
'pytest==2.9.1',
]
install_requires = [
'PyYAML==3.11',
'pytz',
'Flask==0.11',
'Flask-Babel==0.11.1',
# transitive dependencies from Flask
'click==6.6',
'itsdangerous==0.24',
'Jinja2==2.8',
'MarkupSafe==0.23',
'py==1.4.31',
'Werkzeug==0.11.10',
# transitive dependencies from Flask-Babel
'pytz==2016.4',
]
setup(
name="berlin-leaks-website",
version="0.0.0",
author="Heartsucker",
author_email="berlinleaks@riseup.net",
description="Website for BerlinLeaks",
install_requires=install_requires,
tests_require=tests_require,
extras_require=dict(
tests=tests_require,
),
cmdclass={'test': PyTest},
)
|
UTF-8
|
Python
| false | false | 1,458 |
py
| 55 |
setup.py
| 11 | 0.602195 | 0.571331 | 0 | 63 | 22.142857 | 59 |
MCV-2020-M1-Project/Team2
| 15,685,220,594,905 |
e340f05c9c576b04ea1f822fe8b836d3cfdfe626
|
6005c91ce4982da9005ca82f1b68f179afe3c099
|
/week1/src/m1_w1.py
|
8cd1da428ac96969e135abd5a11e10cdb683ddc0
|
[] |
no_license
|
https://github.com/MCV-2020-M1-Project/Team2
|
c4838251b5f7aa83f6953f96ad57257413fa34a6
|
5c4daa9fe312359471d7fd2c06752bed4ee0b752
|
refs/heads/master
| 2023-01-01T14:43:02.094208 | 2020-10-26T15:40:46 | 2020-10-26T15:40:46 | 302,331,044 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
""" MCV - M1: Introduction to human and computer vision
Week 1 - Content Based Image Retrieval
Team 2 - Lali Bibilashvili
Victor Casales
Jaume Pregonas
Lena Tolstoy
m1_w1.py: main program
"""
""" Imports """
import argparse
import os
import numpy as np
import sys
import cv2
sys.path.append(os.getcwd()[:os.getcwd().index('src')])
import src.functions as functions
from pandas import Series
""" Constants """
DESCRIPTORS = ("1D_hist", "2D_hist", "3D_hist")
#COLOR_SPACE = ("CieLAB", "YCbCr", "RGB")
MEASURES = ("euclidean", "l1", "x2", "hist_intersection", "hellinger", "kl_divergence")
""" Global variables """
""" Classes """
""" Functions """
def build_arg_parser(ap): # here you can add all the flags you want our script to execute
# script execution example: python m1_w1.py -t 1 -src "path/to/files" -any_extra_flag
# python m1_w1.py --task 1 --source "path/to/files" -d "descriptor_name"
ap.add_argument("-t", "--task", required=True, dest="task", \
help="number of the task to execute: 1-6")
ap.add_argument("-src", "--source", required=True, dest="src", \
help="path to the folder with the images to analyse")
ap.add_argument("-d", "--descriptor", required=False, dest="descriptor", \
help="descriptor name, possible descriptors: " + str(DESCRIPTORS))
#ap.add_argument("-c", "--color", required=False, dest="color", \
# help="color space, possible color spaces: " + str(COLOR_SPACE))
ap.add_argument("-m", "--measure", required=False, dest="measure", \
help="measure name, possible measures: " + str(MEASURES))
ap.add_argument("-src2", "--source2", required=False, dest="src2", \
help="path to the bbdd for task 3")
ap.add_argument("-plot", "--plot", required=False, dest="plot",\
help="allows plotting the results from the tasks")
ap.add_argument("-store", "--store", required=False, dest="store",\
help="stores the results from the tasks in the results folder (see documentation)")
def load_images_from_folder(folder):
images = dict()
if not os.path.isdir(folder):
sys.exit('Src path doesn\'t exist')
for filename in os.listdir(folder):
img = functions.cv2.imread(os.path.join(folder,filename))
if img is not None:
images[filename] = img
else:
print("Image "+filename+" couldn't be open")
if len(images) == 0:
sys.exit('The folder: '+ folder + 'doesn\'t contain any images')
return images
""" Main """
def main():
ap = argparse.ArgumentParser()
build_arg_parser(ap)
args = ap.parse_args()
images = load_images_from_folder(args.src)
if args.task == "1": #generates image descriptors (histograms)
if args.descriptor is None or args.descriptor not in DESCRIPTORS:
ap.error('A correct descriptor must be provided for task 1, possible descriptors: ' + str(DESCRIPTORS))
#elif args.descriptor == "3D_hist" and (args.color is None or args.color not in COLOR_SPACE):
# ap.error('A correct color space must be provided for 3D histograms, possible color spaces: ' + str(COLOR_SPACE))
else:
functions.task1(images, args.descriptor, False, True)
elif args.task == "2":
print("Nothing to show here, execute task 3")
elif args.task == "3":
if args.src2 is None:
ap.error('A source path with the museum images must be provided in order to execute task 3')
#elif args.descriptor is None or args.descriptor not in DESCRIPTORS:
# ap.error('A correct descriptor must be provided for task 3, possible descriptors: ' + str(DESCRIPTORS))
elif args.measure is None or args.measure not in MEASURES:
ap.error('A correct measure must be provided for task 3, possible measures: ' + str(MEASURES))
else:
images_bbdd = load_images_from_folder(args.src2)
functions.task3(images_bbdd, images, args.measure)
elif args.task == "4":
if args.src2 is None:
ap.error('A source path with the museum images must be provided in order to execute task 3')
elif args.measure is None or args.measure not in MEASURES:
ap.error('A correct measure must be provided for task 3, possible measures: ' + str(MEASURES))
else:
images_bbdd = load_images_from_folder(args.src2)
functions.task4(images_bbdd, images, args.measure)
elif args.task == "5":
if args.descriptor is None or args.descriptor not in DESCRIPTORS:
ap.error('A correct descriptor must be provided for task 1, possible descriptors: ' + str(DESCRIPTORS))
else:
functions.task5(images, args.descriptor)
elif args.task == "6":
precision, recall, f1 = functions.task6(images, args.descriptor)
avg_p = Series([precision.values()]).mean()
avg_r = Series([recall.values()]).mean()
avg_f1 = Series([f1.values()]).mean()
print("precision -> "+avg_p+", recall -> "+avg_r+", f1 -> "+avg_f1)
else:
ap.error("Task must be a number between 1 and 6")
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 5,427 |
py
| 10 |
m1_w1.py
| 9 | 0.609361 | 0.598858 | 0 | 129 | 41.077519 | 146 |
Mini-Proyectos/laboratorio2-jesus-cesar
| 7,292,854,514,591 |
c5780a1f4e356f4e1eb36b147d65a99bfd83639c
|
9d4c3cb3beaad142144e9b0f3b479eab39d7b54b
|
/Busquedas.py
|
8ab8e91685bbb5f1f4311284561a65ad473a00a2
|
[] |
no_license
|
https://github.com/Mini-Proyectos/laboratorio2-jesus-cesar
|
1c7ef212abffe45eca6b4831eef1c8237e2162c3
|
7cffac36336e7ccba0a23d75d8653ea7d380abf4
|
refs/heads/master
| 2020-03-31T14:43:30.433965 | 2018-10-09T21:13:18 | 2018-10-09T21:13:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def InsertionSort (a:[int],p:int,r:int):
j=p
for j in range(len(a)):
key=a[j]
i=j-1
while i>=p and a[i]>key:
a[i+1]=a[i]
i-=1
a[i+1]=key
|
UTF-8
|
Python
| false | false | 266 |
py
| 3 |
Busquedas.py
| 3 | 0.304511 | 0.289474 | 0 | 9 | 28.444444 | 40 |
Mangul-Lab-USC/db.microbiome
| 18,253,611,032,088 |
ac7ecc63fa46a8b44f9cede8b2b301d56ddc648f
|
e3d2b88cc05e19204bc5144bece8df6917b111d5
|
/Fungi/code/fungi_stats_helper_functions.py
|
2c47ccee825b58677da34930724fe122efe5fab9
|
[] |
no_license
|
https://github.com/Mangul-Lab-USC/db.microbiome
|
085e32f5a495472f98c575fa5ded29f09b6ef811
|
18a8228596e373be789bf706f63ac8ca4b99b17f
|
refs/heads/master
| 2020-04-03T17:16:56.992288 | 2020-02-28T21:13:00 | 2020-02-28T21:13:00 | 155,438,676 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
def is_mitochnondria(category):
if category == 'mitochondrial' or category == 'mitochondrion' or category == 'Mt':
return True
else:
return False
def is_plasmid(category):
if category == 'plasmid':
return True
else:
return False
def is_contig(category):
if category == 'contig' or category == 'scaffold' or category == 'sca':
return True
else:
return False
def is_chromosome(category):
if category == 'chromosome' or category == 'chr':
return True
else:
return False
def determine_sequence_lengths(prev_dna_type, nucleotide_count, chrom_lengths, mt_lengths, plasmid_lengths, contig_lengths):
if prev_dna_type == "":
return
elif prev_dna_type == "chromosome":
chrom_lengths.append(nucleotide_count)
elif prev_dna_type == "mitochondria":
mt_lengths.append(nucleotide_count)
elif prev_dna_type == "plasmid":
plasmid_lengths.append(nucleotide_count)
elif prev_dna_type == "contig":
contig_lengths.append(nucleotide_count)
|
UTF-8
|
Python
| false | false | 991 |
py
| 28 |
fungi_stats_helper_functions.py
| 15 | 0.711403 | 0.711403 | 0 | 40 | 23.8 | 124 |
midonet/python-neutron-plugin-midonet
| 6,519,760,397,634 |
1a036182879dae7ecdf2f2ea065d07c12a98d4fa
|
71811fe92c39606c3ba671848521c3b6e78f814e
|
/midonet/neutron/db/migration/alembic_migration/versions/4cedd30aadf6_add_task_type_flush.py
|
66e70c1f36ca1f9473d9620606c83bc8bad9c3a5
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/midonet/python-neutron-plugin-midonet
|
c14b74c3f861f41b604adfafb6342fb213e5ccbf
|
974a6afe45a252cfa5c7c875f389eb4d7a72987f
|
refs/heads/master
| 2021-01-21T22:58:17.727303 | 2015-07-16T11:32:30 | 2015-07-16T11:32:30 | 25,900,226 | 0 | 3 | null | false | 2015-02-20T04:28:14 | 2014-10-29T02:17:52 | 2015-02-19T14:13:15 | 2015-02-19T14:13:14 | 1,167 | 2 | 4 | 0 |
Python
| null | null |
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add task type FLUSH
Revision ID: 4cedd30aadf6
Revises: 25aeae45d4ad
Create Date: 2014-10-29 11:50:24.064368
"""
# revision identifiers, used by Alembic.
revision = '4cedd30aadf6'
down_revision = '25aeae45d4ad'
from alembic import op
def upgrade():
op.execute("INSERT INTO midonet_task_types (id, name) VALUES (4, 'flush')")
def downgrade():
op.execute("DELETE FROM midonet_task_types WHERE name='flush'")
pass
|
UTF-8
|
Python
| false | false | 1,010 |
py
| 18 |
4cedd30aadf6_add_task_type_flush.py
| 17 | 0.743564 | 0.69703 | 0 | 36 | 27.055556 | 79 |
Amal-R-Jayakumar/Space-Invader
| 17,239,998,734,662 |
69b72b37935e4a8de50e2d5277e00e13cc9c31d4
|
0e922da73439c85a4a408eea1b762a3a67018063
|
/main.py
|
1f67b0d90a2771677eccefc0de7b4b68971909bf
|
[] |
no_license
|
https://github.com/Amal-R-Jayakumar/Space-Invader
|
e3806887f1f8326c7acec94db8aec505f5650911
|
9493725f65aedc9435aeec42680fe71bf1aa3380
|
refs/heads/master
| 2023-01-07T08:59:47.513544 | 2020-11-02T02:36:54 | 2020-11-02T02:36:54 | 308,503,946 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pygame
import random
import math
from pygame import mixer
# initialize pygame
pygame.init()
# VARIABLES
# Screen
width = 800
height = 600
# Player
playerX = 370
playerY = 480
change_to_playerX_position = 0
# Enemy
enemyX = []
enemyY = []
change_to_enemyX_position = []
change_to_enemyY_position = []
enemy_image = []
num_of_enimies = 6
# Bullet
# Ready = Bullet is invisible
# Fire = Bullet is fired
bulletX = 0
bulletY = 480
change_to_bulletX_position = 0
change_to_bulletY_position = 20
bullet_state = "ready"
# Score_printing
score_value = 0
font = pygame.font.Font('freesansbold.ttf', 32)
textX = 10
textY = 10
# Game Over Text
game_over_font = pygame.font.Font('freesansbold.ttf', 64)
def show_score(x, y):
score = font.render("Score: "+str(score_value), True, (255, 255, 255))
screen.blit(score, (x, y))
def game_over_text():
game_over = game_over_font.render("GAME OVER", True, (255, 255, 255))
screen.blit(game_over, (200, 250))
# create the screeen with 800x600 px resolution
screen = pygame.display.set_mode((width, height))
# Background
background = pygame.image.load("assets/background.png")
# Music
mixer.music.load('assets/background.wav')
mixer.music.play(-1)
#Title and Icon
pygame.display.set_caption("Space Invader")
icon = pygame.image.load("assets/ufo.png")
pygame.display.set_icon(icon)
# player
player_image = pygame.image.load("assets/player.png")
def player(x, y):
screen.blit(player_image, (x, y))
# Enemy
for _ in range(num_of_enimies):
enemy_image.append(pygame.image.load("assets/alien.png"))
enemyX.append(random.randint(0, 800))
enemyY.append(random.randint(50, 150))
change_to_enemyX_position.append(6)
change_to_enemyY_position.append(20)
def enemy(x, y, i):
screen.blit(enemy_image[i], (x, y))
# Bullet
bullet_image = pygame.image.load("assets/bullet.png")
def fire_bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bullet_image, (x+20, y))
# Collition between bullet and enemy/alien
def isCollition(enemyX, enemyY, bulletX, bulletY):
distance = math.sqrt(pow(enemyX - bulletX, 2) + pow(enemyY - bulletY, 2))
if distance < 30:
return True
else:
return False
#######################################
# game loop (it is an infinite loop...)
game_is_running = True
while game_is_running:
# R G B
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_is_running = False
# checking for keystrokes...
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
change_to_playerX_position = -6
if event.key == pygame.K_RIGHT:
change_to_playerX_position = 6
if event.key == pygame.K_SPACE:
if bullet_state is 'ready':
bullet_sound = mixer.Sound("assets/laser.wav")
bullet_sound.play()
# Get the current X- Coordinate of the bullet
bulletX = playerX
fire_bullet(bulletX, bulletY)
if event.type == pygame.KEYUP:
if event.type == pygame.KEYUP or event.type == pygame.KEYUP:
change_to_playerX_position = 0
playerX += change_to_playerX_position
############################################################
# Restricting the player form going beyond screen boundary
# For the SPACESHIP
if playerX <= 0:
playerX = 0
elif playerX >= width-64:
playerX = width-64
############################################
# Enemy movement and boundarry restriction
for enemy_list_var in range(num_of_enimies):
# GAME OVER text
if enemyY[enemy_list_var] > 440:
for j in range(num_of_enimies):
enemyY[j] == 2000
game_over_text()
break
enemyX[enemy_list_var] += change_to_enemyX_position[enemy_list_var]
if enemyX[enemy_list_var] <= 0:
change_to_enemyX_position[enemy_list_var] = 5
enemyY[enemy_list_var] += change_to_enemyY_position[enemy_list_var]
elif enemyX[enemy_list_var] >= width-64:
change_to_enemyX_position[enemy_list_var] = -5
enemyY[enemy_list_var] += change_to_enemyY_position[enemy_list_var]
collision = isCollition(enemyX[enemy_list_var], enemyY[enemy_list_var],
bulletX, bulletY)
# Explosion
if collision:
collision_sound = mixer.Sound("assets/explosion.wav")
collision_sound.play()
bulletY = 480
bullet_state = "ready"
score_value += 1
enemyX[enemy_list_var] = random.randint(0, width)
enemyY[enemy_list_var] = random.randint(50, 150)
enemy(enemyX[enemy_list_var], enemyY[enemy_list_var], enemy_list_var)
player(playerX, playerY)
# Bullet Movement
if bulletY <= 0:
bulletY = 480
bullet_state = "ready"
if bullet_state is "fire":
fire_bullet(bulletX, bulletY)
bulletY -= change_to_bulletY_position
show_score(textX, textY)
pygame.display.update()
##############################################
|
UTF-8
|
Python
| false | false | 5,334 |
py
| 1 |
main.py
| 1 | 0.592426 | 0.570116 | 0 | 198 | 25.939394 | 79 |
constantinpape/elf
| 15,298,673,526,004 |
8016a3c742583a4946f4ea020a2cb6bb725d169f
|
25018f31d21722f92d0716d131e13c9e1e0a5582
|
/elf/evaluation/rand_index.py
|
b6d0ed24f70315dd93686957dc52bd1f53c3630d
|
[
"MIT"
] |
permissive
|
https://github.com/constantinpape/elf
|
20d43273d6d673b9ce649d9bdf210d78b49f3a58
|
636415993e8b2de3d916a22ebf8719aaf656d8c0
|
refs/heads/master
| 2023-08-05T12:19:49.769606 | 2023-07-25T08:00:14 | 2023-07-25T08:00:14 | 200,736,827 | 34 | 17 |
MIT
| false | 2023-09-13T19:14:01 | 2019-08-05T22:22:58 | 2023-08-01T10:38:43 | 2023-09-13T19:13:53 | 677 | 35 | 17 | 16 |
Python
| false | false |
from .util import contigency_table, compute_ignore_mask
def compute_rand_scores(a_dict, b_dict, p_counts, n_points):
# compute the rand-primitves
a_counts = a_dict.values()
sum_a = float(sum(c * c for c in a_counts))
b_counts = b_dict.values()
sum_b = float(sum(c * c for c in b_counts))
sum_ab = float(sum(c * c for c in p_counts))
prec = sum_ab / sum_b
rec = sum_ab / sum_a
# compute rand scores:
# adapted rand index and randindex
ari = (2 * prec * rec) / (prec + rec)
ri = 1. - (sum_a + sum_b - 2 * sum_ab) / (n_points * n_points)
ari = 1. - ari
return ari, ri
def rand_index(segmentation, groundtruth, ignore_seg=None, ignore_gt=None):
""" Compute rand index derived scores between two segmentations.
Computes adapted rand error and rand index.
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth
ignore_seg [listlike] - ignore ids for segmentation (default: None)
ignore_gt [listlike] - ignore ids for groundtruth (default: None)
Retuns:
float - adapted rand error
float - rand index
"""
ignore_mask = compute_ignore_mask(segmentation, groundtruth,
ignore_seg, ignore_gt)
if ignore_mask is not None:
segmentation = segmentation[ignore_mask]
groundtruth = groundtruth[ignore_mask]
else:
# if we don't have a mask, we need to make sure the segmentations are
segmentation = segmentation.ravel()
groundtruth = groundtruth.ravel()
# compute ids, counts and overlaps making up the contigency table
a_dict, b_dict, _, p_counts = contigency_table(groundtruth, segmentation)
n_points = segmentation.size
# compute and return rand scores
ari, ri = compute_rand_scores(a_dict, b_dict, p_counts, n_points)
return ari, ri
|
UTF-8
|
Python
| false | false | 1,928 |
py
| 173 |
rand_index.py
| 151 | 0.638485 | 0.636411 | 0 | 57 | 32.824561 | 77 |
SoullessStone/LowPowerExam
| 13,494,787,271,640 |
54f7dcc29dfd3c70384fb66d97cef0f734c4fa61
|
34ba1935665f8b4b5a42077c307476253c9895fb
|
/tests/test_twos_complement.py
|
9d3321235bacfb11104073071295203e1a944c5d
|
[] |
no_license
|
https://github.com/SoullessStone/LowPowerExam
|
5adaa0c13c67811925fad48769ddd148de7ae4e0
|
9a7212078f82a8ad8c081f688107bed16ae1e222
|
refs/heads/main
| 2023-06-05T00:55:29.370579 | 2021-06-20T16:12:52 | 2021-06-20T16:12:52 | 374,308,225 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from twos_complement import twos_complement
class test_twos_complement(unittest.TestCase):
def test_1(self):
result = twos_complement(1)
self.assertEqual("[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]", str(result))
def test_max(self):
result = twos_complement(2 ** 15 - 1)
self.assertEqual("[0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]", str(result))
def test_minus_1(self):
result = twos_complement(-1)
self.assertEqual("[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]", str(result))
def test_minus_30(self):
result = twos_complement(-30)
self.assertEqual("[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1. 0.]", str(result))
def test_minus_min(self):
result = twos_complement(-2 ** 15)
self.assertEqual("[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]", str(result))
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 952 |
py
| 40 |
test_twos_complement.py
| 40 | 0.529412 | 0.429622 | 0 | 30 | 30.733333 | 90 |
envar/pycal
| 773,094,133,689 |
18f865b245fd92d3da6347370f1a1ea68221bedc
|
59b04e331d26a6d26907b0b71fb36d8a05580e0c
|
/pycal.py
|
223ebe867f3e7efa247cc602a78107a28e10de47
|
[] |
no_license
|
https://github.com/envar/pycal
|
68376eaa5df65d388e974de95d89f1fffe38df7d
|
d9b64cd02f7227e05f175d41d9cfce338612a4e0
|
refs/heads/master
| 2016-07-26T16:13:07.267132 | 2015-06-19T21:00:52 | 2015-06-19T21:00:52 | 37,625,772 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""Python Google Calendar.
Usage:
pycal.py init
pycal.py getevents [-lacedot] [--date=DATE | --from=DATE --to=DATE]
[--status=STATUS...] [--calendars=CALENDARS...]
pycal.py addevent --from=DATE --to=DATE --summary=SUMMARY
[--attendees=ATTENDEES] [--location=LOCATION]
[--description=DESCRIPTION]
pycal.py getcalendars [--calendars=CALENDARS...]
pycal.py (-h | --help)
pycal.py --version
Options:
-a Show attendees, only in long listing format
-c Show colored output
-d Show description, only in long listing format
-e Output with header line, only in long listing format
-l Use a long listing format
-o Show location, only in long listing format
-t Print in table format, only in long listing format
--attendees=ATTENDEES Specify attendees, comma separated
--calendars=CALENDARS Filter results by CALENDARS, fuzzy matching supported [default: all]
--description=DESCRIPTION Specify DESCRIPTION
--location=LOCATION Specify LOCATION
--date=DATE Filter results by DATE or specify DATE in addevent
--status=STATUS Filter attendees where STATUS is needsAction, declined,
tentative, or accepted. Only when -a is specified
--summary=SUMMARY Specify SUMMARY
-h --help Show this screen.
--version Show version.
"""
import os
import sys
import shutil
import json
from datetime import datetime, timedelta
import time
import pytz
import rfc3339
from fuzzywuzzy import fuzz
import itertools
import googauth
from googleapiclient.errors import HttpError
# TODO implement schema validation example in docopt
from docopt import docopt
import tabulate
import termcolor
import textwrap
def handle_http_error(err):
if len(err.args) == 3:
(resp, content, uri) = err.args
else:
(resp, content) = err.args
content = json.loads(content.decode('utf-8'))
for e in content['error']['errors']:
print(e)
def myprint(*args, indent=0, bullet='', **kwargs):
"""Wrapper function to cprint
"""
text = ' '*indent + bullet + ' '.join(args)
termcolor.cprint(text, **kwargs)
def parse_datetime(date_time_str):
"""Parse date into datetime object.
"""
err = False
try:
date_time = datetime.strptime(date_time_str, '%Y-%m-%d')
return date_time
except ValueError:
err = True
try:
date_time = datetime.strptime(date_time_str, '%Y-%m-%dT%H:%M')
return date_time
except ValueError:
err = True
try:
date_time = datetime.strptime(date_time_str, '%H:%M')
return date_time
except ValueError:
err = True
if err:
print('Error parsing date time string:', date_time_str)
sys.exit(1)
def local_to_utc(date_time, localtz):
date_time = localtz.localize(date_time)
date_time = date_time.astimezone(pytz.utc)
date_time = date_time.replace(tzinfo=None)
return date_time
def linewrap_table(table, col_widths):
new_table = []
for line in table:
new_lines = []
for i, cell in enumerate(line):
wrapped_lines = textwrap.wrap(cell, col_widths[i])
new_lines.append(wrapped_lines)
# transpose list
new_lines = [list(x) for x in itertools.zip_longest(*new_lines)]
new_table.append(new_lines)
# join list of lists together
new_table = [j for i in new_table for j in i]
return new_table
class Cal:nat
"""Interact with google calendar
"""
def __init__(self):
self.service = googauth.get_service()
home_dir = os.path.expanduser('~')
config_path = os.path.join(home_dir, '.pycal', 'config')
with open(config_path, 'r') as f:
config = json.load(f)
self.localtz = pytz.timezone(config['tz'])
def get_calendars(self):
try:
calendars_result = self.service.calendarList().list().execute()
calendars = calendars_result.get('items', [])
calendars = self.sort_calendars(calendars)
return calendars
except HttpError as err:
handle_http_error(err)
return
def filter_calendars_by_summary(self, calendars, summaries):
"""filter calendars using summary. Summary can be a list
"""
# TODO consider using regex
calendars_result = []
for calendar in calendars:
for summary in summaries:
if fuzz.partial_ratio(summary, calendar['summary']) > 60:
calendars_result.append(calendar)
return calendars_result
def sort_calendars(self, calendars):
return sorted(calendars, key=lambda calendar: calendar['accessRole'])
def print_calendars(self, calendars):
calendars = self.sort_calendars(calendars)
myprint('Calendars', attr='underline')
for calendar in calendars:
myprint(calendar['summary'], bullet='+ ')
def get_events(self, start, end, calendars):
"""Get events in specified range from given calendars. Note that
calendars is a list
"""
# convert to utc string
start = local_to_utc(start, self.localtz).isoformat()+'Z'
end = local_to_utc(end, self.localtz).isoformat()+'Z'
events = []
for calendar in calendars:
try:
events_result = self.service.events().list(
calendarId=calendar['id'],
timeMin=start,
timeMax=end,
singleEvents=True).execute()
except HttpError as err:
handle_http_error(err)
events_result = events_result.get('items', [])
events_result = self.sort_events(events_result)
# add calendar information to events
for i, event in enumerate(events_result):
events_result[i]['calendarId'] = calendar['id']
events_result[i]['calendarSummary'] = calendar['summary']
events.extend(events_result)
return events
def sort_events(self, events):
return sorted(events, key=self.get_timestamp)
def filter_events_by_status(self, events, status):
"""Filter attendees in events by status
"""
# TODO consider using regex
events_result = []
for event in events:
attendees = event.get('attendees', [])
attendees_result = []
for attendee in attendees:
if attendee['responseStatus'] in status:
attendees_result.append(attendee)
event['attendees'] = attendees_result
events_result.append(event)
return events_result
def get_timestamp(self, event):
if event['start'].get('dateTime'):
date_time = rfc3339.parse_datetime(event['start']['dateTime'])
else:
date = rfc3339.parse_date(event['start']['date'])
date_time = datetime(date.year, date.month, date.day, 0, 0, 0)
#date_time = local_to_utc(date_time, self.localtz)
return time.mktime(date_time.utctimetuple())
def long_print_events(self, events, header=False, showAttendees=False, showLocation=False, showDescription=False, showTable=False, colored=False):
headers = ['calendar', 'date', 'time', 'summary']
tablefmt = 'plain'
datetimefmt = '%b %d %H:%M'
weights = []
table = []
for event in events:
if event.get('start').get('dateTime'):
start_date_time = rfc3339.parse_datetime(event['start']['dateTime'])
else:
start_date = rfc3339.parse_date(event['start']['date'])
start_date_time = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0)
start_time_str = start_date_time.strftime('%H:%M')
start_date_str = start_date_time.strftime('%b %d')
if event.get('end').get('dateTime'):
end_date_time = rfc3339.parse_datetime(event['end']['dateTime'])
else:
end_date = rfc3339.parse_date(event['end']['date'])
end_date_time = datetime(end_date.year, end_date.month, end_date.day, 0, 0, 0)
end_time_str = end_date_time.strftime('%H:%M')
end_date_str = end_date_time.strftime('%b %d')
time_str = start_time_str + '-' + end_time_str + ' '
calendar_id = event['calendarId']
calendar_summary = event['calendarSummary']
event_summary = event['summary'].strip()
line = [calendar_summary, start_date_str, time_str, event_summary]
if showAttendees:
headers.append('attendees')
attendees = []
for a in event.get('attendees', []):
if 'displayName' in a:
attendee = a['displayName']
else:
attendee = a['email']
if colored:
status = a['responseStatus']
if status == 'declined':
attendee = termcolor.colored(attendee, 'red')
elif status == 'tentative':
attendee = termcolor.colored(attendee, 'yellow')
elif status == 'accepted':
attendee = termcolor.colored(attendee, 'green')
attendees.append(attendee)
attendees = ','.join(attendees)
line.append(attendees)
if showLocation:
headers.append('location')
location = event.get('location', '')
line.append(location)
if showDescription:
headers.append('description')
description = event.get('description', '')
line.append(description)
table.append(line)
# clear headers if not required
if not header:
headers = []
# calculate widths of columns
if showAttendees:
weights.append(6)
if showLocation:
weights.append(2)
if showDescription:
weights.append(3)
maxx, maxy = shutil.get_terminal_size()
widths = [10, 6, 11, 10]
# if using grid change 2 to 4
remainder = maxx - sum(widths) - 2*len(weights)
extra_widths = [int(x*remainder/sum(weights)) for x in weights]
widths += extra_widths
if showTable:
table = linewrap_table(table, widths)
print(tabulate.tabulate(table, headers, tablefmt))
def print_events(self, events):
old_calendar_id = ''
old_date_str = ''
for event in events:
if event.get('start').get('dateTime'):
start_date_time = rfc3339.parse_datetime(event['start']['dateTime'])
else:
start_date = rfc3339.parse_date(event['start']['date'])
start_date_time = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0)
start_time_str = start_date_time.strftime('%H:%M')
start_date_str = start_date_time.strftime('%b %d')
if event.get('end').get('dateTime'):
end_date_time = rfc3339.parse_datetime(event['end']['dateTime'])
else:
end_date = rfc3339.parse_date(event['end']['date'])
end_date_time = datetime(end_date.year, end_date.month, end_date.day, 0, 0, 0)
end_time_str = end_date_time.strftime('%H:%M')
end_date_str = end_date_time.strftime('%b %d')
calendar_id = event['calendarId']
calendar_summary = event['calendarSummary']
event_summary = event['summary'].strip()
#check it its a new calendar
if not old_calendar_id == calendar_id:
print()
myprint(calendar_summary, attrs=['underline'])
old_calendar_id = calendar_id
# check if its a new day
if not start_date_str == old_date_str:
print()
myprint(start_date_str, indent=4)
old_date_str = start_date_str
time_str = start_time_str + '-' + end_time_str + ' '
myprint(time_str, event_summary, indent=8)
def add_event(self, start, end, summary, calendarId='primary', attendees=None, location=None, description=None):
start_str = rfc3339.datetimetostr(start)
end_str = rfc3339.datetimetostr(end)
body = {
'start': {'dateTime': start_str},
'end': {'dateTime': end_str},
'summary': summary
}
if attendees:
body['attendees'] = attendees
if location:
body['location'] = location
if description:
body['description'] = description
self.service.events().insert(calendarId, body)
def init():
# paths
home_dir = os.path.expanduser('~')
config_dir = os.path.join(home_dir, '.pycal')
if not os.path.exists(config_dir):
os.makedirs(config_dir)
config_path = os.path.join(config_dir, 'config')
cred_path = os.path.join(config_dir, 'calendar-pycal.json')
# get configuration
config = {}
print("""To find your tz, visit:
[http://en.wikipedia.org/wiki/List_of_tz_database_time_zones]""")
config['tz'] = input('Enter a tz: ')
with open(config_path, 'w+') as f:
json.dump(config, f)
# authorize with google
service = googauth.get_service()
if service:
print('Successfully configured pycal.')
if __name__ == '__main__':
args = docopt(__doc__, version="Pycal 1.0")
print(args)
if args['init']:
init()
cal = Cal()
if args['--date']:
datemin = parse_datetime(args['--date'])
datemax = datemin + timedelta(1)
if args['--from']:
datemin = parse_datetime(args['--from'])
datemax = parse_datetime(args['--to'])
else:
datemin = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
datemax = datemin + timedelta(1)
if args['getevents']:
calendars = cal.get_calendars()
# filter calendars if not all
if not args['--calendars'] == ['all']:
calendars = cal.filter_calendars_by_summary(calendars, args['--calendars'])
events = cal.get_events(datemin, datemax, calendars)
if args['--status']:
events = cal.filter_events_by_status(events, args['--status'])
if args['-l']:
cal.long_print_events(events,
header=args['-e'],
showAttendees=args['-a'],
showDescription=args['-d'],
showLocation=args['-o'],
showTable=args['-t'],
colored=args['-c'])
else:
cal.print_events(events)
if args['addevent']:
cal.add_event(datemin,
datemax,
args['--summary'],
calendarId='primary',
attendees=args['--attendees'],
location=args['--location'],
description=args['--description']
)
if args['getcalendars']:
calendars = cal.get_calendars()
if args['--calendars']:
calendars = cal.filter_calendars_by_summary(calendars, args['--calendars'])
cal.print_calendars(calendars)
|
UTF-8
|
Python
| false | false | 15,967 |
py
| 4 |
pycal.py
| 2 | 0.552076 | 0.546064 | 0 | 479 | 32.331942 | 150 |
Shivani-01/Python-Assignment
| 3,238,405,350,552 |
6903930d7bf1c59ef82b45da16bd897525b5ca18
|
23dd82c8a821ca63a24bc2fb28fb69a30090d28d
|
/Python-Asignment/Module 1/exercise5/reverse_number.py
|
5744dbbd697ca45e9b502d336be5ed686c5a80be
|
[] |
no_license
|
https://github.com/Shivani-01/Python-Assignment
|
b5f42db549cb763fd4d825a949c25b3b620e9a9b
|
06cf05804817f770fde70202e14b0ea784473d03
|
refs/heads/master
| 2021-07-09T00:13:29.094881 | 2021-03-24T05:06:50 | 2021-03-24T05:06:50 | 235,023,067 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n=int(input("Enter 5 digit number"))
a=0
while(n!=0):
r=n%10
a=a*10+r
n//=10
print(a)
|
UTF-8
|
Python
| false | false | 105 |
py
| 51 |
reverse_number.py
| 46 | 0.504762 | 0.419048 | 0 | 7 | 13 | 36 |
theNicelander/advent-of-code-2020
| 15,375,982,931,836 |
3bf58a30be4da76981dff80f582dbf45b7a59103
|
b46899e383993c960b2e13f5fc948d027cfc5886
|
/day08/day08.py
|
1dfc92ba3505c972f438f19156d349570d8afd16
|
[] |
no_license
|
https://github.com/theNicelander/advent-of-code-2020
|
d15c85ce16e6902bfa7c6b9c628f979886cbed0d
|
eeb979de0ba3bc17a8f30c531a16154994276681
|
refs/heads/main
| 2023-02-01T18:18:03.492242 | 2020-12-08T10:10:25 | 2020-12-08T10:10:25 | 317,574,814 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from utils.files import read_data_into_list
class Game:
def __init__(self, data):
self.instructions = self._process_into_dict(data)
self.no_instructions = len(self.instructions)
self.processed_instructions = []
self.accumulator = 0
self.index = 0
def run(self) -> int:
while self.index <= self.no_instructions:
if self.index in self.processed_instructions:
print("Already processed instruction")
print("REACHED END")
break
else:
self.processed_instructions.append(self.index)
self._process_instruction()
return self.accumulator
def _process_instruction(self):
instruction_dict = self.instructions[self.index]
for operation, amount in instruction_dict.items():
if operation == "nop":
self.index += 1
if operation == "acc":
self.accumulator += amount
self.index += 1
if operation == "jmp":
self.index += amount
@staticmethod
def _process_into_dict(data):
instructions = []
for d in data:
operation, amount = d.split(" ")
amount = int(amount.replace("+", ""))
instructions.append({operation: amount})
print(instructions)
return instructions
if __name__ == "__main__":
data = read_data_into_list("input.txt")
print("Solution 1", Game(data).run())
|
UTF-8
|
Python
| false | false | 1,524 |
py
| 8 |
day08.py
| 8 | 0.552493 | 0.549213 | 0 | 49 | 30.102041 | 62 |
gentinettagian/complexity_of_qsvms
| 17,102,559,802,476 |
e0d3b3423b5648935768f0264bcb99162decf303
|
6e7e7265db94f0f9a77f28c51602e5efc62d9665
|
/approx_qsvm/hyper_params_test.py
|
a9c931eb0c263db78f2a8cbaa14839f3c8b06a6b
|
[] |
no_license
|
https://github.com/gentinettagian/complexity_of_qsvms
|
f391372d2178726c9f985a5aab8fa9bbda6c6fb6
|
0fc00525819c224b317ecf9cfb85ee5bdbace3e3
|
refs/heads/main
| 2023-04-18T05:34:39.396900 | 2022-04-08T14:12:32 | 2022-04-08T14:12:32 | 464,407,353 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Necessary imports
import numpy as np
import pandas as pd
import pickle
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_machine_learning.algorithms.classifiers import VQC
from qiskit.algorithms.optimizers import SPSA
from qiskit_machine_learning.utils.loss_functions import CrossEntropyLoss
class HyperParamsTest():
"""
Running approx QSVM Tests
"""
def __init__(self, d=2, seed = 42, reps = 3, initial_weights = None, batch_size = 5, num_steps = 1000, tol = [1e-4], R = None) -> None:
"""
d: variational form number of parameters
seed: random seed used to sample shots and generate data
reps: repetitions of the variational form
initial_weights: initial weights for the trainable parameters
batch_size: batch size used in SGD
num_steps: number of maximal steps in optimization
tol: array of tolerances used as stopping criteria
R: number of shots used in the simulator. If None, statevector is used
"""
# QASM-simulator used for the SPSA optimization
if R == None:
self._backend = QuantumInstance(Aer.get_backend('statevector_simulator'))
else:
self._backend = QuantumInstance(Aer.get_backend('qasm_simulator'), shots=R)
self.d = d
self.seed = seed
self._reps = reps
self.batch_size = batch_size
# Seed for initial weights should be different from the generated data
np.random.seed(2*seed)
if initial_weights is None:
initial_weights = 0.1*(2*np.random.rand(self.d*(reps + 1)) - 1)
self._weight = initial_weights
print(self._weight)
# variational quantum circuit used to perform optimization
self._model = VQC(self.d,reps=self._reps,quantum_instance=self._backend,initial_point=self._weight, batch_size=self.batch_size)
self._x_test = None
self._y_test = None
self._x_train = None
self._y_train = None
self._true_theta = None
self._num_steps = num_steps
self._num_evals = 0
self._tol = tol
self._conv_steps = np.zeros(len(tol))
self._final_loss = np.zeros(len(tol))
self._final_acc = np.zeros(len(tol))
self._loss = CrossEntropyLoss()
# Dictionary containing data accumulated during training
self.history = {'accuracy' : [],
'loss' : [],
'accuracy_control' : [],
'loss_control' : [],
'params' : [],
'params_control' : [],
'h' : [],
'h_sv' : [],
'theta_true' : [],
'h_true' : []
}
def generate_data(self, M = 100, M_test = 10, margin = 0.1, seed=41):
"""
Generate artificial data
"""
X,y,_,theta = get_data_generated(self._model.neural_network,margin=margin,M=M+M_test,return_theta=True,seed=seed)
self._x_train = X[:M,:]
self._y_train = y[:M]
if M_test > 0:
self._x_test = X[M:,:]
self._y_test = y[M:]
self._true_theta = theta
self.history['theta_true'] = theta
self._true_h = self._model.neural_network.forward(self._x_train, self._true_theta)
self.history['h_true'] = self._true_h
print(X.shape, y.shape)
return X,y,theta
def fit_model(self):
"""
Perform SPSA optimization using QASM simulator
"""
if self._x_train is None:
RuntimeError('Data not generated')
self._model.fit(self._x_train,self._y_train)
h_fit = self._model.neural_network.forward(self._x_train,self._weight)
return h_fit
def run_experiment(self, M = 100, M_test = 10, margin = 0.1):
"""
Runs the experiment by generating data, fitting with SPSA and
controling with gradient descent and statevector
"""
# Callback used to save data on the fly
def callback(*args):
self.history["params"].append(args[1])
self._weight = args[1]
self._num_evals = args[0]
self.history["loss"].append(args[2])
n = len(self.history['loss'])
print(n, args[2])
if n < 2:
return False
error = np.linalg.norm(self.history['params'][-1] - self.history['params'][-2])/len(self._weight)
for i, t in enumerate(self._tol):
if self._conv_steps[i] == 0 and error < t:
h_pred = self._model.neural_network.forward(self._x_train, self._weight)
loss = np.mean(self._loss.evaluate(h_pred, self._y_train))
y_pred = [[0,1] if p[0] < p[1] else [1,0] for p in h_pred]
acc = np.sum(y_pred == self._y_train)/(2*len(y_pred))
self.history["accuracy"].append(acc)
print(f"{n}, Accuracy: {acc}, Loss: {loss}")
self._conv_steps[i] = n
self._final_acc[i] = acc
self._final_loss[i] = loss
print(f'Tolerance {t} reached.')
if np.all(self._conv_steps > 0):
return True
else:
return False
optimizer = SPSA(maxiter=self._num_steps,termination_checker=callback)
self._model = VQC(self.d,reps=self._reps,quantum_instance=self._backend,initial_point=self._weight,optimizer=optimizer, batch_size=self.batch_size)
if self._x_train is None:
self.generate_data(M, M_test, margin,seed=self.seed)
print('Starting qasm fit')
h_fit = self.fit_model()
self.history['h'] = h_fit
return h_fit, self._conv_steps, self._final_loss, self._final_acc
def save(self, filename):
"""
Saves the history dictionary to a pickle file.
"""
f = open(f'features={self.d}/d={int(self.d * (self._reps+1))}/dumps/{filename}.pkl','wb')
pickle.dump(self.history,f)
def get_data_generated(qnn, M=100, margin=0.1, bias=0, shuffle=True, seed=41, return_theta=False,one_hot=True):
"""returns a toy dataset (binary classification) generated with respect to a specific quantum neural network, such
that the QNN can obtain 100% classification accuracy on the train set
:param qnn: an instance of the QuantumNeuralNetwork class
:param M: int, the desired size of the generated dataset
:param margin: float in [-0.5, 0.5], the margin around 0.5 probability prediction where no data are included
:param shuffle: bool, whether the data is ordered by class or shuffled
:param seed: int, the random seed
"""
rng = np.random.default_rng(seed)
assert M % 2 == 0, 'M has to be even'
# fix the variational form in the given QNN
theta = rng.uniform(0, 2*np.pi, size=len(qnn.weight_params))
class_0 = []
class_1 = []
# loop until the two lists both contain M/2 elements
while len(class_0) < M//2 or len(class_1) < M//2:
# generate a random point
x = rng.uniform(0, 1, size=len(qnn.input_params))
y_prob = qnn.forward(np.array([x]),theta).flatten()
# strict class membership criteria if margin > 0
criterion_0 = y_prob[0] < y_prob[1] - margin/2 + bias
criterion_1 = y_prob[1] < y_prob[0] - margin/2 + bias
# can only be true for a negative margin. Then randomly choose the class membership
if criterion_0 and criterion_1:
if np.random.choice([True, False]) and len(class_0) < M//2:
class_0.append(x)
elif len(class_1) < M//2:
class_1.append(x)
# class 0
if criterion_0 and not criterion_1 and len(class_0) < M//2:
class_0.append(x)
# class 1
if criterion_1 and not criterion_0 and len(class_1) < M//2:
class_1.append(x)
# generate the sorted X and y arrays
y = np.zeros(M, dtype=int) - 1
y[M//2:] = 1
X = np.array(class_0 + class_1)
if shuffle:
inds = rng.choice(M, M, replace=False)
X = X[inds]
y = y[inds]
if one_hot:
y_one_hot = np.array([[1 if yi == -1 else 0, 1 if yi == 1 else 0] for yi in y])
y = y_one_hot
if return_theta:
return X, y, 'generated', theta
else:
return X, y, 'generated'
def M_test(margin):
np.random.seed(42)
seeds = np.random.randint(0,100000,10)
reps = 3
features = 2
sep = 'separable' if margin > 0 else 'overlap'
try:
df = pd.read_csv(f'features={features}/d={features*(reps+1)}/M_{sep}.csv')
except:
df = pd.DataFrame(columns=['Seed','M','Tol','Convergence','Loss','Accuracy'])
n = 1000
#batches = [1,3,5,10,20]
Ms = 2**np.arange(6,12)
tol = [1e-2, 1e-3, 1e-4]
for s in seeds:
for M in Ms:
print(f'Seed {s}, {M} data points.')
if np.any((df['Seed'] == s) & (df['M'] == M)):
continue
test = HyperParamsTest(d=features,num_steps=n,seed=s,reps=reps, tol=tol)
h, convergences, losses, accuracies = test.run_experiment(margin=margin, M=M)
test.save(f'{sep}_M_seed_{s}_M_{M}_steps')
for i, t in enumerate(tol):
df = df.append({'Seed':s,'M': M,'Tol': t, 'Convergence': convergences[i],'Loss': losses[i],'Accuracy': accuracies[i]}, ignore_index=True)
df.to_csv(f'features={features}/d={features*(reps+1)}/M_{sep}.csv',index=False)
def d_test(margin):
np.random.seed(42)
seeds = np.random.randint(0,100000,10)
features = 2
M = 256
sep = 'separable' if margin > 0 else 'overlap'
try:
df = pd.read_csv(f'features={features}/d_{sep}.csv')
except:
df = pd.DataFrame(columns=['Seed','d','Tol','Convergence','Loss','Accuracy'])
n = 1000
ds = [1,3,7,15,31]
tol = [1e-2, 1e-3, 1e-4]
for s in seeds:
for d in ds:
print(f'Seed {s}, {d} repetitions.')
if np.any((df['Seed'] == s) & (df['d'] == features*(d+1))):
continue
test = HyperParamsTest(d=features,num_steps=n,seed=s,reps=d, tol=tol)
h, convergences, losses, accuracies = test.run_experiment(margin=margin, M=M)
#test.save(f'{sep}_M_seed_{s}_M_{M}_steps')
for i, t in enumerate(tol):
df = df.append({'Seed':s,'d': features*(d+1),'Tol': t, 'Convergence': convergences[i],'Loss': losses[i],'Accuracy': accuracies[i]}, ignore_index=True)
df.to_csv(f'features={features}/d_{sep}.csv',index=False)
if __name__ == '__main__':
for margin in [0.1, -0.1]:
d_test(margin) # d-dependence
M_test(margin) # M-dependence
|
UTF-8
|
Python
| false | false | 11,070 |
py
| 35 |
hyper_params_test.py
| 11 | 0.552304 | 0.534327 | 0 | 317 | 33.921136 | 166 |
benhuckell/Mech325Design
| 10,161,892,632,067 |
c4ff3b2c032bc315492a1603cd4f2d8a651b3fc1
|
341280e4ce6e494f792d8c350b48754c185559d3
|
/Gears/GearBoxObject.py
|
867e24b2f7d74e9cb1bab6e2ed30aceef6119e8c
|
[] |
no_license
|
https://github.com/benhuckell/Mech325Design
|
af727fb0822a4467b74effb9446d3ca3c0e3138a
|
e0bb5d43f69302fdb2122ed1cba46962a75ef08c
|
refs/heads/master
| 2020-08-02T02:39:00.013683 | 2019-11-18T02:37:41 | 2019-11-18T02:37:41 | 211,210,895 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from conversions import *
import matplotlib.pyplot as plt
class gearBoxObject():
"""[Gearbox object for any configuration of gears]
Returns:
[Gearbox Object] -- [Object representing the properties of any gear configuration]
"""
def __init__(self, gearsList, indexCombination):
"""[Constructor]
Arguments:
gearsList {[list of json dictionaries]} -- [list of gears from the json file]
indexCombination {[list of ints]} -- [indices for this configuration of the gears]
"""
self.indexCombination = indexCombination
self.gearSet = []
for index in indexCombination:
gearsList[index]["material"] = gearsList[index]["material"].split(" ")[0]
self.gearSet.append(gearsList[index])
self.gearPairs = {}
pairIndex = 0
while pairIndex < len(self.gearSet):
self.gearPairs[pairIndex] = {}
self.gearPairs[pairIndex]["gears"] = [self.gearSet[pairIndex], self.gearSet[pairIndex + 1]]
pairIndex += 2
def validGearBoxPitch(self):
"""[Checks to see if the gearbox is valid, eg: having the same pitches]
Returns:
[boolean] -- [True or False about whether the gearbox is valid]
"""
for pairNumber, gearPair in self.gearPairs.items():
firstGear = gearPair["gears"][0]
secondGear = gearPair["gears"][1]
if firstGear["pitch"] != secondGear["pitch"]:
return False
return True
def calc(self, omega, torqueInput):
"""[Does all the calculations for the gear set given an omega and input torque]
Arguments:
omega {[double]} -- [input rotational rpm]
torqueInput {[double]} -- [input torque]
Returns:
[double] -- [the final output omega]
[double] -- [the final output torque]
[gear pair] -- [a dictionary of the gear pairs with updated value, eg: tangential velocity and force]
"""
omegaSoFar = omega
torqueSoFar = torqueNmToPoundFeet(torqueInput) * self.gearPairs[0]["gears"][0]["efficiency"]
for pairIndex, gearPair in self.gearPairs.items():
firstGear = gearPair["gears"][0]
secondGear = gearPair["gears"][1]
gearOmegaRatio = firstGear["teeth"] / secondGear["teeth"]
gearTorqueRatio = secondGear["teeth"] / firstGear["teeth"]
tangentialForce = torqueSoFar / (firstGear["pitch_diameter"] / 2)
tangentialVelocity = omegaSoFar * (firstGear["pitch_diameter"] / 2)
self.gearPairs[pairIndex]["tangential_force"] = tangentialForce
self.gearPairs[pairIndex]["tangential_velocity"] = tangentialVelocity
torqueSoFar = torqueSoFar * gearTorqueRatio * secondGear["efficiency"]
omegaSoFar = omegaSoFar * gearOmegaRatio
finalOmega = omegaSoFar
finalTorque = torqueSoFar
return finalOmega, torquePoundFeetToNm(finalTorque), self.gearPairs
def createOmegaTorqueGraph(self, torqueList, omegaList, showPlot = False):
"""[Creates the omegavs torque graph for the input motor values for this configuration of gears]
Arguments:
torqueList {[list of double]} -- [list of the possible input torque values of the motor]
omegaList {[list of double]} -- [list of the possible input rpm values of the motor]
Keyword Arguments:
showPlot {bool} -- [requires to be truw in order to show the plot] (default: {False})
Returns:
[list of double] -- [list of omega outputs]
[list of double] -- [list of torque outputs]
"""
omegaOutputList = []
torqueOutputList = []
for index in range(0, len(torqueList)):
omega = omegaList[index]
torque = torqueList[index]
outputOmega, outputTorque, gearPairs = self.calc(omega, torque)
# Here we will check the values returned by cailin
passStressChecks = True
# If we pass, we will add the values
# Otherwise we will simply add (0,0) to the set
if passStressChecks:
omegaOutputList.append(outputOmega)
torqueOutputList.append(outputTorque)
else:
omegaOutputList.append(0)
torqueOutputList.append(0)
if showPlot:
plt.plot(omegaOutputList, torqueOutputList)
plt.show()
plt.clf()
return omegaOutputList, torqueOutputList
|
UTF-8
|
Python
| false | false | 4,728 |
py
| 17 |
GearBoxObject.py
| 10 | 0.592428 | 0.588832 | 0 | 124 | 37.129032 | 113 |
anotherpedro/aiscalator
| 8,349,416,459,285 |
45ec3e93db4abe27335a62a906ab3a4513ea32b7
|
76feb49d5a46d5a6b03b0de74918af817af1aa9b
|
/docs/conf.py
|
f69271959d7cf3a27d9275cf212f0a9985cf8510
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/anotherpedro/aiscalator
|
147775884ac22c7e559f24a10d3bf2f4eee3d4b0
|
7cd5a28913cddba9a2ff731839703628820996f4
|
refs/heads/master
| 2023-01-11T22:20:51.785925 | 2020-07-24T18:42:17 | 2020-07-24T18:42:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'numpydoc.numpydoc'
]
# Whether to create a Sphinx table of contents for the lists of class methods
# and attributes. If a table of contents is made, Sphinx expects each entry
# to have a separate page. True by default.
numpydoc_class_members_toctree = False
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = 'AIscalator'
year = '2018'
author = 'Christophe Duong'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.18'
pygments_style = 'trac'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
extlinks = {
'issue': ('https://github.com/Aiscalate/aiscalator/issues/%s', '#'),
'pr': ('https://github.com/Aiscalate/aiscalator/pull/%s', 'PR #'),
}
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/Aiscalate/aiscalator/'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
UTF-8
|
Python
| false | false | 2,017 |
py
| 52 |
conf.py
| 26 | 0.692613 | 0.685672 | 0 | 67 | 29.104478 | 77 |
suman-shruti/Python
| 7,086,696,087,530 |
194fecebfe62f8e5d471c80c22d69b869021bcd1
|
055d44b4d3fb40f191df1fe709811170473f4aa0
|
/khansole_academy.py
|
cba64d6b7b23f4c775ba92632b8850623fbe59db
|
[] |
no_license
|
https://github.com/suman-shruti/Python
|
020756e1509ecea199c81246f9a013511aa918a8
|
4725ab949e550b0c66c70a5b16951ba398d173b2
|
refs/heads/master
| 2022-07-11T13:20:46.894039 | 2020-05-18T04:11:52 | 2020-05-18T04:11:52 | 252,541,319 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
File: khansole_academy.py
-------------------------
Add your comments here.
"""
import random
def main():
result = 1
while result <= 3:
num1 = random.randint(10, 99)
num2 = random.randint(10, 99)
num3 = num1 + num2
print("what is " + str(num1) + "+" + str(num2) + "?")
answer = int(input("Your answer: "))
if answer == num3:
print("Correct! You've gotten " + str(result) + " correct in a row.")
result += 1
else:
print("Incorrect. " + "The expected answer is " + str(num3))
main()
else:
print("Congratulations! You mastered addition. ")
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 803 |
py
| 18 |
khansole_academy.py
| 17 | 0.520548 | 0.495641 | 0 | 33 | 23.333333 | 81 |
limbma/scikit-neuralnetwork
| 10,376,641,031,837 |
737ce4656970f28df6328640f9b83d526603f070
|
4b2c8b6698f9546c693fbf8deb2951185bc06698
|
/sknn/tests/test_ae.py
|
ebab1f4a3cd59cd127cbcedc916a9bdc95829cec
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/limbma/scikit-neuralnetwork
|
73f1289651af60473206930572cac6747787dcaa
|
d221c579a1d12c321713c19ea895988224a4c2c1
|
refs/heads/master
| 2021-01-22T13:51:47.709146 | 2015-05-19T07:31:09 | 2015-05-19T07:31:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from nose.tools import (assert_raises, assert_equals)
import numpy
from sknn.ae import AutoEncoder as AE, Layer as L
class TestAutoEncoder(unittest.TestCase):
def test_LifeCycle(self):
ae = AE(layers=[L("Sigmoid", units=8)])
del ae
def test_FitData(self):
X = numpy.zeros((8,4))
ae = AE(layers=[L("Sigmoid", units=8)], n_iter=1)
ae.fit(X)
class TestParameters(unittest.TestCase):
def test_CostFunctions(self):
X = numpy.zeros((8,12))
for t in ['msre', 'mbce']:
ae = AE(layers=[L("Sigmoid", units=4, cost=t)], n_iter=1)
y = ae.fit_transform(X)
assert_equals(type(y), numpy.ndarray)
assert_equals(y.shape, (8, 4))
def test_LayerTypes(self):
X = numpy.zeros((8,12))
for l in ['autoencoder', 'denoising']:
ae = AE(layers=[L("Sigmoid", type=l, units=4)])
y = ae.fit_transform(X)
assert_equals(type(y), numpy.ndarray)
assert_equals(y.shape, (8, 4))
def test_UnknownCostFunction(self):
assert_raises(NotImplementedError, L, "Sigmoid", cost="unknown")
def test_UnknownType(self):
assert_raises(NotImplementedError, L, "Sigmoid", type="unknown")
|
UTF-8
|
Python
| false | false | 1,283 |
py
| 4 |
test_ae.py
| 2 | 0.586906 | 0.572876 | 0 | 45 | 27.511111 | 72 |
zhaoalex/mri-superresolution
| 5,188,320,518,129 |
119d0d39a24470e0606bdd89ae097049bb448519
|
dd7339bb13dfa133331d13cb2f56f79fc038a6e0
|
/super-resolution/SRGAN/solver.py
|
b5bc4abb993b0a3464960170c1e3f4dd2afde0a4
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/zhaoalex/mri-superresolution
|
e9c084fc2a4a62941dda82d4c372f19b44dcbd80
|
61f3650c62a7b5e032b249380abf523ec2a95200
|
refs/heads/fix-bicubic
| 2022-10-18T16:53:03.504563 | 2020-06-13T00:05:54 | 2020-06-13T00:05:54 | 269,297,699 | 3 | 4 | null | false | 2020-06-07T22:37:39 | 2020-06-04T08:04:34 | 2020-06-07T22:30:55 | 2020-06-07T22:37:38 | 670,391 | 1 | 0 | 0 |
Python
| false | false |
from __future__ import print_function
from math import log10
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision.models.vgg import vgg16
from SRGAN.model import Generator, Discriminator
from progress_bar import progress_bar
from numpy import argmax
from shutil import copyfile
from os import makedirs
class SRGANTrainer(object):
def __init__(self, config, training_loader, testing_loader):
super(SRGANTrainer, self).__init__()
self.GPU_IN_USE = torch.cuda.is_available()
self.device = torch.device('cuda' if self.GPU_IN_USE else 'cpu')
self.netG = None
self.netD = None
self.lr = config.lr
self.nEpochs = config.nEpochs
self.epoch_pretrain = 10
self.criterionG = None
self.criterionD = None
self.optimizerG = None
self.optimizerD = None
self.feature_extractor = None
self.scheduler = None
self.seed = config.seed
self.upscale_factor = config.upscale_factor
self.num_residuals = 16
self.training_loader = training_loader
self.testing_loader = testing_loader
self.load = config.load
self.model_path = 'models/SRGAN/' + str(self.upscale_factor)
makedirs(self.model_path, exist_ok=True)
def build_model(self):
self.netG = Generator(n_residual_blocks=self.num_residuals, upsample_factor=self.upscale_factor, base_filter=64, num_channel=1).to(self.device)
self.netD = Discriminator(base_filter=64, num_channel=1).to(self.device)
self.feature_extractor = vgg16(pretrained=True)
self.netG.weight_init(mean=0.0, std=0.2)
self.netD.weight_init(mean=0.0, std=0.2)
self.criterionG = nn.MSELoss()
self.criterionD = nn.BCELoss()
torch.manual_seed(self.seed)
if self.GPU_IN_USE:
torch.cuda.manual_seed(self.seed)
self.feature_extractor.cuda()
cudnn.benchmark = True
self.criterionG.cuda()
self.criterionD.cuda()
self.optimizerG = optim.Adam(self.netG.parameters(), lr=self.lr, betas=(0.9, 0.999))
self.optimizerD = optim.SGD(self.netD.parameters(), lr=self.lr / 100, momentum=0.9, nesterov=True)
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerG, milestones=[50, 75, 100], gamma=0.5) # lr decay
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerD, milestones=[50, 75, 100], gamma=0.5) # lr decay
@staticmethod
def to_data(x):
if torch.cuda.is_available():
x = x.cpu()
return x.data
def save(self, epoch):
g_model_out_path = self.model_path + "/g_model_{}.pth".format(epoch)
d_model_out_path = self.model_path + "/d_model_{}.pth".format(epoch)
torch.save(self.netG, g_model_out_path)
torch.save(self.netD, d_model_out_path)
print("Checkpoint saved to {}".format(g_model_out_path))
print("Checkpoint saved to {}".format(d_model_out_path))
def pretrain(self):
self.netG.train()
for batch_num, (data, target) in enumerate(self.training_loader):
data, target = data.to(self.device), target.to(self.device)
self.netG.zero_grad()
loss = self.criterionG(self.netG(data), target)
loss.backward()
self.optimizerG.step()
progress_bar(batch_num, len(self.training_loader), 'Loss: %.4f' % (loss / (batch_num + 1)))
def train(self):
# models setup
self.netG.train()
self.netD.train()
g_train_loss = 0
d_train_loss = 0
for batch_num, (data, target) in enumerate(self.training_loader):
# setup noise
real_label = torch.ones(data.size(0), data.size(1)).to(self.device)
fake_label = torch.zeros(data.size(0), data.size(1)).to(self.device)
data, target = data.to(self.device), target.to(self.device)
# Train Discriminator
self.optimizerD.zero_grad()
d_real = self.netD(target)
d_real_loss = self.criterionD(d_real, real_label)
d_fake = self.netD(self.netG(data))
d_fake_loss = self.criterionD(d_fake, fake_label)
d_total = d_real_loss + d_fake_loss
d_train_loss += d_total.item()
d_total.backward()
self.optimizerD.step()
# Train generator
self.optimizerG.zero_grad()
g_real = self.netG(data)
g_fake = self.netD(g_real)
gan_loss = self.criterionD(g_fake, real_label)
mse_loss = self.criterionG(g_real, target)
g_total = mse_loss + 1e-3 * gan_loss
g_train_loss += g_total.item()
g_total.backward()
self.optimizerG.step()
progress_bar(batch_num, len(self.training_loader), 'G_Loss: %.4f | D_Loss: %.4f' % (g_train_loss / (batch_num + 1), d_train_loss / (batch_num + 1)))
print(" Average G_Loss: {:.4f}".format(g_train_loss / len(self.training_loader)))
def test(self):
self.netG.eval()
avg_psnr = 0
with torch.no_grad():
for batch_num, (data, target) in enumerate(self.testing_loader):
data, target = data.to(self.device), target.to(self.device)
prediction = self.netG(data)
mse = self.criterionG(prediction, target)
psnr = 10 * log10(1 / mse.item())
avg_psnr += psnr
progress_bar(batch_num, len(self.testing_loader), 'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))
print(" Average PSNR: {:.4f} dB".format(avg_psnr / len(self.testing_loader)))
return avg_psnr / len(self.testing_loader)
def run(self):
self.build_model()
all_epoch_psnrs = []
for epoch in range(1, self.epoch_pretrain + 1):
print("\n===> Pretrain epoch {} starts:".format(epoch))
self.pretrain()
print("{}/{} pretrained".format(epoch, self.epoch_pretrain))
for epoch in range(1, self.nEpochs + 1):
print("\n===> Epoch {} starts:".format(epoch))
self.train()
epoch_psnr = self.test()
all_epoch_psnrs.append(epoch_psnr)
self.scheduler.step()
# if epoch == self.nEpochs:
self.save_model(epoch)
best_epoch = argmax(all_epoch_psnrs) + 1
print("Best epoch: model_{} with PSNR {}".format(best_epoch, all_epoch_psnrs[best_epoch - 1]))
copyfile(self.model_path + "/model_{}.pth".format(best_epoch), self.model_path + "/best_model.pth")
with open(self.model_path + '/metrics.txt', 'w+') as metricsfile:
print("Saving metrics")
for i, psnr in enumerate(all_epoch_psnrs):
metricsfile.write("{},{}\n".format(i+1, psnr))
metricsfile.write("Best epoch: model_{} with PSNR {}\n".format(best_epoch, all_epoch_psnrs[best_epoch - 1]))
|
UTF-8
|
Python
| false | false | 7,055 |
py
| 58 |
solver.py
| 19 | 0.591212 | 0.579164 | 0 | 170 | 40.5 | 160 |
GabrielNew/Python3-Basics
| 17,952,963,322,656 |
dfccd5e85dfcaf9ccffd042f91f30308fb5a7e5b
|
7f757f11dd91042e97dcc9415ac15819d01845b7
|
/World 2/ex060.py
|
84953181fd12f077ad66ee3f7e1ec28922067d3a
|
[] |
no_license
|
https://github.com/GabrielNew/Python3-Basics
|
5bd931d391dafd3b589cbd925a96cc28c447ad61
|
25afc6c511965ad7e629c40bc7da0727cb1130ad
|
refs/heads/master
| 2020-05-30T19:20:27.726693 | 2019-12-09T04:22:34 | 2019-12-09T04:22:34 | 189,922,425 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
num = int(input('Digite um número para o cálculo do fatorial: '))
fat = 1
print(f'{num}! = ',end = '')
while num:
fat *= num
if num > 1:
print(f'{num} x ', end='')
else:
print(f'{num} = ', end='')
num -= 1
print(f'{fat}')
|
UTF-8
|
Python
| false | false | 283 |
py
| 83 |
ex060.py
| 83 | 0.480427 | 0.466192 | 0 | 15 | 17.733333 | 65 |
cash2one/xai
| 15,281,493,684,047 |
ac50bd465abbcbe46ba6fec9c001583032bdca77
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_hydrofoils.py
|
c655751047f1027f8d641017a32e399f0076606a
|
[
"MIT"
] |
permissive
|
https://github.com/cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from xai.brain.wordbase.nouns._hydrofoil import _HYDROFOIL
#calss header
class _HYDROFOILS(_HYDROFOIL, ):
def __init__(self,):
_HYDROFOIL.__init__(self)
self.name = "HYDROFOILS"
self.specie = 'nouns'
self.basic = "hydrofoil"
self.jsondata = {}
|
UTF-8
|
Python
| false | false | 259 |
py
| 37,275 |
_hydrofoils.py
| 37,266 | 0.675676 | 0.675676 | 0 | 10 | 24.7 | 58 |
webclinic017/pyTrade-ML
| 2,688,649,569,082 |
a4cd8bc3ae54ee5858ddf4e55e6ca21202303882
|
927a0ac9e17521f62cd8c3ad28b97b87705c759d
|
/src/pytrademl/utilities/key_utilities.py
|
bcc124a33c467bf20f90af85f26d6e3f619732e0
|
[
"MIT"
] |
permissive
|
https://github.com/webclinic017/pyTrade-ML
|
e130f045cfa52ae3638d96ff2c01af634e959707
|
7438baf402ac471f6da3686d7a01ed9eeb3fba88
|
refs/heads/master
| 2023-02-25T04:47:24.686831 | 2021-01-30T21:43:20 | 2021-01-30T21:43:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""!
All functions related to the generation and storing of AlphaVantage keys.
# https://www.alphavantage.co/documentation/
# https://github.com/RomelTorres/alpha_vantage
# https://alpha-vantage.readthedocs.io/en/latest/genindex.html
# https://alpha-vantage.readthedocs.io/en/latest/source/alpha_vantage.html#module-alpha_vantage.timeseries
"""
from pytrademl.utilities.object_utilities import import_object, export_object
from pathlib import Path
root_dir = Path(__file__).resolve().parent.parent
def add_key(key):
"""!
Add a new key to the list.
"""
key_list = import_object(root_dir / "KEYS")
if key_list:
if key in key_list:
print("Key", key, "already stored.")
flag = 0
else:
key_list.append(key)
flag = export_object(root_dir / "KEYS", key_list)
else:
print("Generating new key list.")
key_list = list()
key_list.append(key)
flag = export_object(root_dir / "KEYS", key_list)
return flag
def load_key(index=0):
"""!
Load a key in the list by index
"""
print(root_dir)
key_list = import_object(root_dir / "KEYS")
if key_list:
print("Found available keys:", key_list)
return key_list[index]
else:
return None
def remove_key(key):
key_list = import_object(root_dir / "KEYS")
if key_list:
if key in key_list:
key_list.remove(key)
flag = export_object(root_dir / "KEYS", key_list)
else:
print("Key", key, "not found.")
flag = 0
else:
flag = 1
return flag
def unittest():
add_key("test")
print(load_key())
remove_key('test')
if __name__ == "__main__":
# add_key("XXXXXXXXXXXXXXXXX")
print(load_key())
|
UTF-8
|
Python
| false | false | 1,791 |
py
| 15 |
key_utilities.py
| 13 | 0.597432 | 0.595198 | 0 | 65 | 26.553846 | 106 |
Exacte/CP114
| 8,658,654,115,532 |
b84818aecb93b088157bd0a8afdfb54f5fcb0603
|
138c622c317baa656e0e4f58da155a81cc4c1eec
|
/coop8200_a10/coop8200_a10/src/testing.py
|
62caf8d62b1ba8e82bd754131a72cf960888b388
|
[] |
no_license
|
https://github.com/Exacte/CP114
|
48ebefc80c467a1f618dec55ec09581fb5dbcc30
|
3d362d866503a44292ccab542e747afc9979d1e4
|
refs/heads/master
| 2020-05-15T08:01:16.868548 | 2019-04-18T20:29:13 | 2019-04-18T20:29:13 | 182,152,833 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
-------------------------------------------------------
[filename].py
[description of main program]
-------------------------------------------------------
Author: Mason Cooper
ID: 140328200
Email: coop8200@mylaurier.ca
Version: 2015-03-26
-------------------------------------------------------
"""
from utilities2 import array_to_pt
from pt_linked import PT
a = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
pt = PT()
array_to_pt(a, pt)
f = open("otoos610.txt", "r", encoding="utf-8")
line = f.readline().strip()
while line != "":
for i in range(len(line)):
pt.retrieve(line[i])
line = f.readline()
f.close()
pt.levelorder()
|
UTF-8
|
Python
| false | false | 641 |
py
| 42 |
testing.py
| 36 | 0.4883 | 0.447738 | 0 | 28 | 21.928571 | 55 |
shohirose/flexlm-python-scripts
| 13,391,708,036,260 |
ad590104a0938b7f9079d8472da286748aa19b95
|
63c31d96b53cbd9ed28df89490fcd2d486c8d868
|
/setup.py
|
153471e3b829696a47462f3d9e75a68e702c1010
|
[
"Unlicense"
] |
permissive
|
https://github.com/shohirose/flexlm-python-scripts
|
09b8d6906b955b834f9b7b139656bc13dfa7e9c2
|
84725896dc316ceeb0d782318db61d9e45e440b5
|
refs/heads/main
| 2023-06-30T04:17:05.604070 | 2021-07-30T15:13:43 | 2021-07-30T15:13:43 | 386,241,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from setuptools import setup, find_packages
setup(
name="flexlmtools",
version="0.1.0",
install_requires=[],
extras_require={
"develop": ["pytest"]
},
author="Sho Hirose",
author_email="sho.hirose@gmail.com",
description="Package for Flexlm License Manager",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Unlicense",
"Operating System :: OS Independent"
],
python_requires='>=3.6'
)
|
UTF-8
|
Python
| false | false | 524 |
py
| 7 |
setup.py
| 5 | 0.610687 | 0.599237 | 0 | 20 | 25.25 | 53 |
kiza054/flask-rest-mongodb
| 2,937,757,640,184 |
9d9057036c67726c1f52e95d83d4916628d0040d
|
b913b6837ec34dba8d80b055c0a26a562b7ca49f
|
/app.py
|
50e9453c2546f6aac3d3f85621a919e431050241
|
[] |
no_license
|
https://github.com/kiza054/flask-rest-mongodb
|
e1d432214d5e5563bb5b39f526c0a7d0cfbffbf8
|
6db58ee1553c3055e25494e4511ea7c39a5394c6
|
refs/heads/master
| 2020-09-13T22:20:59.159215 | 2019-11-20T12:13:22 | 2019-11-20T12:13:22 | 222,921,587 | 0 | 0 | null | true | 2019-11-20T11:24:17 | 2019-11-20T11:24:16 | 2019-11-19T13:02:12 | 2019-11-19T13:02:06 | 7,492 | 0 | 0 | 0 | null | false | false |
from flask import Flask
from flask_pymongo import PyMongo
app = Flask(__name__)
app.secret_key = "thisisasecret"
app.config["MONGO_URI"] = "mongodb://localhost:27017/apidemo2019"
mongo = PyMongo(app)
|
UTF-8
|
Python
| false | false | 200 |
py
| 1 |
app.py
| 1 | 0.75 | 0.705 | 0 | 7 | 27.714286 | 65 |
towicode/Dynamic-Sig
| 7,885,559,981,546 |
84cf488c6eaa7decc71bf070ae4eb18f40e6e0f8
|
117a9cf9026a57894ac5872de3dcbb0c1cb54820
|
/dynamic_sigs/api.py
|
072c5926ecdc5c9b69925dead24cc39f6e7a7407
|
[] |
no_license
|
https://github.com/towicode/Dynamic-Sig
|
4e2d82c3f407761f0278ea7216398622d0773c8f
|
4c89cc7c8c5987bc2ad93332ade729c1831db721
|
refs/heads/master
| 2016-04-14T08:53:30.072881 | 2015-10-23T18:19:47 | 2015-10-23T18:19:47 | 44,413,689 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import generics, mixins
from rest_framework.response import Response
from .models import Signature
from .models import Triboter
from .models import UserFact
from .models import CachedImage
from .serializers import factHashSerializer
class Submit(mixins.CreateModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
serializer_class = factHashSerializer
def post(self, request, *args, **kwargs):
required_fields = ['name', 'value']
signature_string = request.data['related_sig']
tribot_string = request.data['owner']
try:
sig = Signature.objects.get(name=signature_string)
except:
return Response("Invalid Signature", status=404)
try:
tribot = Triboter.objects.get(username=tribot_string)
except:
tribot = Triboter(username=tribot_string)
tribot.save()
try:
cached = CachedImage.objects.get(triboter=tribot, signature=sig)
cached.delete()
except:
pass
# we'll try update the hashmap and create it otherwise
x = request.POST.getlist('facts')
print x
for req in x:
req = req.split()
req = {
"name": req[0],
"value": req[1],
}
print req['name']
try:
fact = UserFact.objects.get(owner=tribot, related_sig=sig, name=req['name'])
except:
fact = UserFact()
for field in required_fields:
if field not in req:
return Response('Please fill out "%s" required fields.' % field, status=400)
else:
setattr(fact, field, req[field])
fact.owner = tribot
fact.related_sig = sig
fact.save()
return Response(status=201)
|
UTF-8
|
Python
| false | false | 1,903 |
py
| 11 |
api.py
| 9 | 0.574356 | 0.568576 | 0 | 55 | 33.6 | 96 |
sislandavys11/Atividades_Python
| 11,768,210,439,875 |
f456ce7e9030c79a365cb36139154d0ccd677a5d
|
26c666df4cbafbccad52ac32f7b5add1483f518d
|
/exemploTurtle2.py
|
d3347af81e2fc5ca34b4a6989a51e2e785d7df7b
|
[] |
no_license
|
https://github.com/sislandavys11/Atividades_Python
|
b507030338de9e29b0cf9a99b068a15602b36555
|
138f61897e819dec380f5c90ac76f42c0193a603
|
refs/heads/master
| 2023-06-02T06:01:57.284679 | 2021-06-18T22:18:18 | 2021-06-18T22:18:18 | 376,972,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import turtle
janela = turtle.Screen()
janela.bgcolor("lightblue")
tata = turtle.Turtle()
tata.shape("turtle")
tata.speed(3)
tata.stamp()
tata.color("darkblue")
tata.forward(150)
tata.left(120)
tata.forward(150)
tata.left(120)
tata.forward(150)
janela.bgcolor("lightyellow")
tata.color("red")
tata.goto(0,0)
tata.forward(150)
tata.left(120)
tata.forward(150)
tata.left(120)
tata.forward(150)
janela.bgcolor("lightblue")
tata.color("yellow")
tata.forward(150)
tata.left(120)
tata.forward(150)
tata.left(120)
tata.forward(150)
janela.exitonclick()
|
UTF-8
|
Python
| false | false | 550 |
py
| 26 |
exemploTurtle2.py
| 26 | 0.747273 | 0.66 | 0 | 33 | 15.666667 | 29 |
lenstronomy/lenstronomy
| 15,805,479,662,392 |
0e1fe2d6c9ab942c1b7947c72e0651f7ff53990c
|
b2896af28db99c619dd6a0a30d602f77421d3d34
|
/lenstronomy/Util/simulation_util.py
|
e14cb6f380d34838851002c9bb6b662bbf2208da
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/lenstronomy/lenstronomy
|
f9c55ef8b2b08540f6a59add74bc0872c0ed5422
|
73c9645f26f6983fe7961104075ebe8bf7a4b54c
|
refs/heads/main
| 2023-08-19T07:48:12.889355 | 2023-08-16T01:24:16 | 2023-08-16T01:24:16 | 80,772,893 | 41 | 23 |
BSD-3-Clause
| false | 2023-09-14T19:23:34 | 2017-02-02T22:01:06 | 2023-09-04T16:27:05 | 2023-09-14T19:23:33 | 45,915 | 157 | 86 | 28 |
Python
| false | false |
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
import numpy as np
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
def data_configure_simple(numPix, deltaPix, exposure_time=None, background_rms=None, center_ra=0, center_dec=0,
inverse=False):
"""
configures the data keyword arguments with a coordinate grid centered at zero.
:param numPix: number of pixel (numPix x numPix)
:param deltaPix: pixel size (in angular units)
:param exposure_time: exposure time
:param background_rms: background noise (Gaussian sigma)
:param center_ra: RA at the center of the image
:param center_dec: DEC at the center of the image
:param inverse: if True, coordinate system is ra to the left, if False, to the right
:return: keyword arguments that can be used to construct a Data() class instance of lenstronomy
"""
# 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, center_ra=center_ra, center_dec=center_dec, subgrid_res=1, inverse=inverse)
# mask (1= model this pixel, 0= leave blanck)
# exposure_map = np.ones((numPix, numPix)) * exposure_time # individual exposure time/weight per pixel
kwargs_data = {
'background_rms': background_rms,
'exposure_time': exposure_time
, 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord
, 'image_data': np.zeros((numPix, numPix))
}
return kwargs_data
@export
def simulate_simple(image_model_class, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None,
no_noise=False, source_add=True, lens_light_add=True, point_source_add=True):
"""
:param image_model_class:
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param no_noise:
:param source_add:
:param lens_light_add:
:param point_source_add:
:return:
"""
image = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_add=source_add, lens_light_add=lens_light_add, point_source_add=point_source_add)
# add noise
if no_noise:
return image
else:
poisson = image_util.add_poisson(image, exp_time=image_model_class.Data.exposure_map)
bkg = image_util.add_background(image, sigma_bkd=image_model_class.Data.background_rms)
return image + bkg + poisson
|
UTF-8
|
Python
| false | false | 2,689 |
py
| 484 |
simulation_util.py
| 428 | 0.682782 | 0.676088 | 0 | 64 | 41.015625 | 242 |
ryanzicky/awesome-python-login-model
| 19,593,640,838,431 |
51acab3bc0cca30450eadb9a3716cfbeb63c5cdf
|
77d636b2f23b34e46ea42792ca2b99d64cc79690
|
/sina/sina.py
|
13983d873511484d28324ad416a7ac061127f346
|
[
"MIT"
] |
permissive
|
https://github.com/ryanzicky/awesome-python-login-model
|
906bc3db34d7276eb1902cc887e9c4806e672583
|
56f61fd992fd0e7bafc3116ee6c5455e2d148846
|
refs/heads/master
| 2020-07-07T12:06:44.071946 | 2019-08-16T07:22:30 | 2019-08-16T07:22:30 | 203,342,985 | 2 | 0 |
NOASSERTION
| true | 2019-08-20T09:23:21 | 2019-08-20T09:23:21 | 2019-08-20T09:23:19 | 2019-08-16T07:22:31 | 8,345 | 0 | 0 | 0 | null | false | false |
# 这里需要使用getpass模块才能使输入密码不可见
import getpass
import requests
import hashlib
import time
"""
info:
author:CriseLYJ
github:https://github.com/CriseLYJ/
update_time:2019-3-7
"""
def get_login(phone, pwd):
new_time = str(int(time.time()))
sign = new_time + '_' + hashlib.md5((phone + pwd + new_time).encode("utf-8")).hexdigest()
print(sign)
url = "https://appblog.sina.com.cn/api/passport/v3_1/login.php"
data = {
"cookie_format": "1",
"sign": sign,
"pin": "e3eb41c951f264a6daa16b6e4367e829",
"appver": "5.3.2",
"appkey": "2546563246",
"phone": phone,
"entry": "app_blog",
"pwd": pwd
}
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; nxt-al10 Build/LYZ28N) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/39.0.0.0 Mobile Safari/537.36 sinablog-android/5.3.2 (Android 5.1.1; zh_CN; huawei nxt-al10/nxt-al10)",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"
}
r = requests.post(url=url, data=data, headers=headers)
print(r.json())
if __name__ == '__main__':
phone = input("你输入你的账号:")
# 这里输入密码不可见
pwd = getpass.getpass("password:")
get_login(phone, pwd)
|
UTF-8
|
Python
| false | false | 1,300 |
py
| 45 |
sina.py
| 38 | 0.613636 | 0.54789 | 0 | 44 | 27 | 242 |
casperboone/dltpy
| 3,745,211,531,189 |
fb11a87c9d2baddea81f9e4f17706035adb691ef
|
5e253222b9be2ac0171cb482edb8bb4853ca6c5f
|
/input-preparation/embedder.py
|
ea313e69315c154e203ed0d861b5b848f9fc5214
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
https://github.com/casperboone/dltpy
|
40c36ef8d50ed22891fd825407835b6460a3c97a
|
d61c6f1541d7f0cfdfbf4aeddf640a662a49ff5d
|
refs/heads/master
| 2022-10-15T19:32:06.315831 | 2019-12-27T11:52:55 | 2019-12-27T11:52:55 | 209,561,837 | 14 | 10 |
MIT
| false | 2022-09-30T18:42:37 | 2019-09-19T13:30:46 | 2022-06-07T01:23:57 | 2022-09-30T18:42:35 | 28,525 | 13 | 5 | 4 |
Jupyter Notebook
| false | false |
from gensim.models import Word2Vec
import pandas as pd
import multiprocessing
import os
from time import time
import config
class HelperIterator:
"""
Subclass for type Hinting the iterators listed below
"""
pass
class LanguageIterator(HelperIterator):
"""
Helper Iterator that iterates over the whole collection of descriptions language.
"""
def __init__(self, param_df: pd.DataFrame, return_df: pd.DataFrame) -> None:
self.param_df = param_df
self.return_df = return_df
def __iter__(self):
for func_descr_sentence in self.return_df['func_descr']:
yield func_descr_sentence.split()
for param_descr_sentence in self.param_df['arg_comment']:
yield param_descr_sentence.split()
for return_descr_sentence in self.return_df['return_descr']:
yield return_descr_sentence.split()
class CodeIterator(HelperIterator):
"""
Helper Iterator that iterates over the whole collection of the code expressions.
"""
def __init__(self, param_df: pd.DataFrame, return_df: pd.DataFrame) -> None:
self.param_df = param_df
self.return_df = return_df
def __iter__(self):
for return_expr_sentences in self.return_df['return_expr_str']:
yield return_expr_sentences.split()
for func_name_sentences in self.return_df['name']:
yield func_name_sentences.split()
for arg_names_sentences in self.return_df['arg_names_str']:
yield arg_names_sentences.split()
class Embedder:
"""
Create embeddings for the code names and docstring names using Word2Vec.
"""
def __init__(self, param_df: pd.DataFrame, return_df: pd.DataFrame) -> None:
self.param_df = param_df
self.return_df = return_df
def train_model(self, corpus_iterator: HelperIterator, model_path_name: str) -> None:
"""
Train a Word2Vec model and save the output to a file.
:param corpus_iterator: class that can provide an iterator that goes through the corpus
:param model_path_name: path name of the output file
"""
cores = multiprocessing.cpu_count()
w2v_model = Word2Vec(min_count=5,
window=5,
size=config.W2V_VEC_LENGTH,
workers=cores-1)
t = time()
w2v_model.build_vocab(sentences=corpus_iterator)
print('Time to build vocab: {} mins'.format(round((time() - t) / 60, 2)))
t = time()
w2v_model.train(sentences=corpus_iterator,
total_examples=w2v_model.corpus_count,
epochs=20,
report_delay=1)
print('Time to train model: {} mins'.format(round((time() - t) / 60, 2)))
w2v_model.save(model_path_name)
def train_language_model(self) -> None:
"""
Train a Word2Vec model for the descriptions and save to file.
"""
self.train_model(LanguageIterator(self.param_df, self.return_df), config.W2V_MODEL_LANGUAGE_DIR)
def train_code_model(self) -> None:
"""
Train a Word2Vec model for the code expressions and save to file.
"""
self.train_model(CodeIterator(self.param_df, self.return_df), config.W2V_MODEL_CODE_DIR)
if __name__ == '__main__':
param_df = pd.read_csv(config.ML_PARAM_DF_PATH)
param_df = param_df.dropna()
return_df = pd.read_csv(config.ML_RETURN_DF_PATH)
return_df = return_df.dropna()
if not os.path.isdir(config.OUTPUT_EMBEDDINGS_DIRECTORY):
os.mkdir(config.OUTPUT_EMBEDDINGS_DIRECTORY)
embedder = Embedder(param_df, return_df)
embedder.train_code_model()
embedder.train_language_model()
w2v_language_model = Word2Vec.load(config.W2V_MODEL_LANGUAGE_DIR)
w2v_code_model = Word2Vec.load(config.W2V_MODEL_CODE_DIR)
print("W2V statistics: ")
print("W2V language model total amount of words : " + str(w2v_language_model.corpus_total_words))
print("W2V code model total amount of words : " + str(w2v_code_model.corpus_total_words))
print(" ")
print("Top 20 words for language model:")
print(w2v_language_model.wv.index2entity[:20])
print("\n Top 20 words for code model:")
print(w2v_code_model.wv.index2entity[:20])
|
UTF-8
|
Python
| false | false | 4,346 |
py
| 35 |
embedder.py
| 16 | 0.627243 | 0.615969 | 0 | 131 | 32.160305 | 104 |
weifanghuang/python_learn
| 1,176,821,076,948 |
73f9b1bb29cb15ab95ea585a60af367d4c78e18c
|
d218d3c2931724101cc3f80ac38632b7092c29eb
|
/a_new_learn/guess_number_game.py
|
a7d401ab2ddd96f3ea3ae745dc835f03f43c1dff
|
[] |
no_license
|
https://github.com/weifanghuang/python_learn
|
f77f912ce61747f6f292e540201a9de17d0110c9
|
92a505a23e75a01638af2f7a4f2a79de0f0865cb
|
refs/heads/master
| 2020-09-24T23:19:39.911136 | 2019-12-05T13:35:59 | 2019-12-05T13:35:59 | 225,867,266 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
times = 3
number = random.randint(1, 10)
import kim
guess = 0
print("May guess what number is in my mind now: ")
while (guess != number) and (times > 0):
temp = input()
while not temp.isdigit():
temp = input("sorry entered is incorrect,please enter an integer")
guess = int(temp)
times = times - 1
if guess == number:
print("Congratulations, you are my Ms Right")
print("Give me five")
else:
if guess > number:
print("Sorry the number you entered is too large")
else:
print("Sorry the number you entered is too small")
if times > 0:
print("Please try again: ")
else:
print("Chance is running out")
print("Game over")
|
UTF-8
|
Python
| false | false | 761 |
py
| 112 |
guess_number_game.py
| 109 | 0.588699 | 0.578187 | 0 | 27 | 27.185185 | 74 |
MRQiangZ/Dissertation_P6
| 7,181,185,348,192 |
8c94dd39c47549971dcbd0641a19aa11daf523f8
|
78c4ae7c20dfc4c2fe54769cfc317a9526c9ea6e
|
/marital_age.py
|
411e8b55d1b9ce6b71c116a104f07037a00b2819
|
[] |
no_license
|
https://github.com/MRQiangZ/Dissertation_P6
|
d42c68664f8854d300d4ea835c58f6675c30a7c7
|
cbdd3fbe06eaeca66aed9289c57f85d696648739
|
refs/heads/master
| 2020-07-04T17:57:38.633989 | 2019-08-14T14:10:22 | 2019-08-14T14:10:22 | 202,363,543 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 16:06:19 2019
@author: zhangqiang
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
data = pd.read_csv('../data/data.csv')
marital = []
age = []
marital_index = {1:'Never married',2:'Married',
3:'Widowed',8:'Other',9:'Not Known '}
marital_status = [1,2,3,8,9]
print(len(data))
for (index,cont) in data.iterrows():
#print(i)
marital.append(marital_index[cont.fake_marital])
age.append(cont.fake_age)
plt.rcParams['figure.figsize'] = (9, 5)
plt.xlabel('Marital status')
plt.ylabel('Age')
plt.scatter(marital,age)
pdf = PdfPages('../fig/fig_marital_age.pdf')
pdf.savefig()
plt.close()
pdf.close()
|
UTF-8
|
Python
| false | false | 785 |
py
| 16 |
marital_age.py
| 16 | 0.66242 | 0.630573 | 0 | 35 | 21.457143 | 54 |
mashikro/code-challenges
| 18,769,007,109,950 |
450d3748a97e333103d32bf1d2f24b1098bba622
|
c81380900c5b3542013d447c556021df0570c8b9
|
/missing_int.py
|
3712b3768a11f2e4d3243d533b440c94417167dd
|
[] |
no_license
|
https://github.com/mashikro/code-challenges
|
ca91dfeb4f44a029f6d3552710f793595ea9bdb6
|
4580ebbfa7330d96175f70a084765127c6c6371d
|
refs/heads/master
| 2021-07-18T07:37:59.729513 | 2021-02-14T00:51:39 | 2021-02-14T00:51:39 | 238,508,293 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Write a function:
# def solution(A)
# that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
# For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
# Given A = [1, 2, 3], the function should return 4.
# Given A = [−1, −3], the function should return 1.
# Write an efficient algorithm for the following assumptions:
# N is an integer within the range [1..100,000];
# each element of array A is an integer within the range [−1,000,000..1,000,000].
# print(format_arr([-1, -3]))
def find_missing_int(A):
if not A:
return 1
A.sort()
#case where all nums are negative
if A[-1] <= 0:
return 1
for num in range(1, A[-1]):
# print('num=', num)
if num>0 and num not in A:
return num
return A[-1]+1
# print(find_missing_int([-1, -3])) #1
# print(find_missing_int([-1, -3, 0])) #1
print(find_missing_int([-1, -3, 0, 1])) #2
print(find_missing_int([1, 3,2])) #4
print(find_missing_int([7,8,9,11,12]))#1
def find_missing_int_(A):
if not A:
return 1
#case where all nums are negative
elif max(A) <= 0:
return 1
else:
pos_nums = []
for num in A:
if num >0:
pos_nums.append(num)
if min(pos_nums) != 1:
return 1
else:
next_possible_smallest = 2 # the next smallest number
while next_possible_smallest in pos_nums:
next_possible_smallest+=1
return next_possible_smallest
|
UTF-8
|
Python
| false | false | 1,428 |
py
| 128 |
missing_int.py
| 127 | 0.637834 | 0.582278 | 0 | 69 | 19.57971 | 120 |
knimini/python-szkolenia
| 9,345,848,846,852 |
6bf37dd7d6c725e8f636d765e7a21f11695e5b86
|
fba0b793bc0be417027913545b7617e7b531bb36
|
/tut2/zagadnienia.py
|
1c6c50090ea99c8b4e5cb12dc62caa6d1c5ce0e6
|
[] |
no_license
|
https://github.com/knimini/python-szkolenia
|
8355f3ac12da7e8d4bdbbca9b2419776a561d95e
|
4b7b011dc320b165269e5633cc006e8de45ccfa6
|
refs/heads/master
| 2020-07-19T23:09:42.178939 | 2016-12-12T19:26:49 | 2016-12-12T19:26:49 | 73,751,283 | 1 | 1 | null | false | 2016-12-05T22:08:11 | 2016-11-14T22:08:02 | 2016-11-14T22:15:00 | 2016-12-05T22:08:11 | 45 | 0 | 1 | 0 |
Python
| null | null |
'generatory wyjatki list comperhensions klasy'
'''
List comperhension
'''
l = []
for i in range(5):
l.append(i**2)
l = [x**2 for x in range(5)]
zdanie = 'Chciałbym aby te zdanie było w uppercase'
nowe_zdanie = ''
for slowo in zdanie.split():
nowe_zdanie += slowo.upper() + ' '
nowe_zdanie = ' '.join(slowo.upper() for slowo in zdanie.split())
'''
stworzyć listę kolejnych potęg 2
'''
'''
Exceptions
'''
try:
10/0
except ZeroDivisionError:
print('Nie można dzielić przez 0')
def divide(a, b):
try:
print(a/b)
except TypeError as e:
print(e)
'''
input: [1, 2, 3]
output: [1, 3, 5]
input: [a, b, c]
output: ['', 'b', 'cc']
'''
def iterating_list(seq):
try:
return [int(item) + it for it, item in enumerate(seq)]
except ValueError:
return [item * it for it, item in enumerate(seq)]
'''
Generators
'''
def simple_gen(n):
for i in range(n):
yield i
my_gen = simple_gen(5)
print(next(my_gen))
for val in my_gen:
print(val)
def fib(n):
a, b = 0, 1
for _ in range(n):
yield a
a, b = b, a + b
'''
Classes
overide magic methods
inheritance
'''
class Vehicle:
def do_sound(self):
print(self.sound)
class Car(Vehicle):
sound = 'wrum wrum'
def __init__(self, color):
self.color = color
class Motorcycle(Vehicle):
sound = 'brum brum'
def __init__(self, color):
self.color = color
|
UTF-8
|
Python
| false | false | 1,448 |
py
| 32 |
zagadnienia.py
| 25 | 0.580847 | 0.568355 | 0 | 95 | 14.136842 | 65 |
em1382/kattis
| 3,951,369,961,247 |
f5f6b108448ae0b205e7bce7452ab74b6d7c26cc
|
ae0f090bc05de25fcab770114cf14ea9c0b54621
|
/solutions/areal.py
|
3d4dc2883bb569eb9136d2b1062a79e16f97f3a3
|
[] |
no_license
|
https://github.com/em1382/kattis
|
6401cf95ad210cfdd94dfe9bfdac639afff7b9b1
|
d6c50c2eee49a090ae57728f511310058bb77158
|
refs/heads/master
| 2021-01-17T06:50:41.176673 | 2017-07-11T19:15:27 | 2017-07-11T19:15:27 | 52,895,047 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import math
print(math.sqrt((float(input()))) * 4)
|
UTF-8
|
Python
| false | false | 51 |
py
| 57 |
areal.py
| 52 | 0.666667 | 0.647059 | 0 | 2 | 24.5 | 38 |
cloudcores/CuAssembler
| 14,001,593,429,619 |
60be8520a513bc24cfe60e1b5f03bb7e75b60e5d
|
38b09903e527fe12273eb6f93cf0ff9ca0a07af4
|
/CuAsm/CuAsmParser.py
|
850d17d843fca0800133884badc8507053e9aff0
|
[
"MIT"
] |
permissive
|
https://github.com/cloudcores/CuAssembler
|
ae297f538f2ecdc13b8565e78358c643fb558da6
|
96a9f72baf00f40b9b299653fcef8d3e2b4a3d49
|
refs/heads/master
| 2023-04-27T17:00:14.135871 | 2023-04-20T11:34:25 | 2023-04-20T11:34:25 | 194,510,421 | 258 | 47 |
MIT
| false | 2023-04-20T11:34:27 | 2019-06-30T11:54:25 | 2023-04-19T06:57:00 | 2023-04-20T11:34:25 | 3,050 | 211 | 46 | 2 |
Python
| false | false |
# -*- coding: utf-8 -*-
import re
import os
from io import BytesIO
from collections import OrderedDict, defaultdict
from elftools.elf.elffile import ELFFile
from CuAsm.CuKernelAssembler import CuKernelAssembler
from CuAsm.CuInsAssemblerRepos import CuInsAssemblerRepos
from CuAsm.CuSMVersion import CuSMVersion
from CuAsm.CuNVInfo import CuNVInfo
from CuAsm.CuAsmLogger import CuAsmLogger
from CuAsm.CubinFile import PROGRAM_HEADER_TAG
from CuAsm.config import Config
from CuAsm.common import splitAsmSection, alignTo, bytes2Asm
from CuAsm.CuControlCode import c_ControlCodesPattern
m_hex = re.compile(r'\b0x[a-fA-F0-9]+\b')
m_int = re.compile(r'\b[0-9]+\b')
m_intval = re.compile(r'\b(0x[a-fA-F0-9]+)|([0-9]+)\b')
def updateDictWithInput(din, dout, label='', kprefix=''):
''' Update a dict with input from another dict.
The key will be prefixed with kprefix.
the value will be converted to int if possible (for hex or dec int).
label is only used for error tracing.
'''
for k,v in din.items():
kp = kprefix + k
if kp not in dout:
# CuAsmLogger.logWarning('Unknown header attribute (%s) for %s!!!'%(k,label))
pass
if isinstance(v, str):
if m_hex.match(v):
vv = int(v, 16)
elif m_int.match(v):
vv = int(v)
else:
vv = v
else:
vv = v
dout[kp] = vv
def buildStringDict(bytelist):
''' build strings dict from b'\x00' joined byte list.
The dict key/value is just the offset/value of the string.
'''
p = 0
counter = 0
sdict = OrderedDict()
while True:
counter += 1
pnext = bytelist.find(b'\x00', p)
if pnext<0:
break
s = bytelist[p:pnext] # not include the ending b'\x00'
sdict[p] = s.decode()
p = pnext+1
return sdict
class CuAsmSymbol(object):
'''
typedef struct
{
Elf64_Word st_name; /* Symbol name */
unsigned char st_info; /* Type and Binding attributes */
unsigned char st_other; /* Reserved */
Elf64_Half st_shndx; /* Section table index */
Elf64_Addr st_value; /* Symbol value */
Elf64_Xword st_size; /* Size of object (e.g., common) */
} Elf64_Sym;
//
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
typedef uint64_t Elf64_Off;
typedef int32_t Elf64_Sword;
typedef int64_t Elf64_Sxword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Lword;
typedef uint64_t Elf64_Xword;
All internal symbols should also be defined as labels.
The label offset is just the symbol value, and the section where the label
is defined will affect the behavior of jump/branch instructions.
FIXME: Currently some attributes in st_other (such as "STO_CUDA_ENTRY") cannot be
recognized by pyelftools, thus may be lost if parsed and built again.
'''
# TODO: Not implemented yet, just copied from cubin
SymbolTypes = {'@function' :0,
'@object' :1,
'@"STT_CUDA_TEXTURE"':2,
'@"STT_CUDA_SURFACE"':3}
def __init__(self, name):
self.name = name
self.type = None
self.value = None
self.size = None
self.sizeval = None
self.other = None
self.index = None #
self.entry = Config.defaultSymbol.copy()
def __str__(self):
s = 'name=%s, type=%s, value=%s, size(%s)=%s'%(
self.name, self.type, self.value, self.sizeval, self.size)
return s
def build(self):
''' Build symbol entry.
TODO: not implemented, symtab entries are copied from cubin
but value/size may be updated
'''
return Config.CubinELFStructs.Elf_Sym.build(self.entry)
@staticmethod
def buildSymbolDict(strtab, symbytes):
symdict = OrderedDict()
symsize = Config.CubinELFStructs.Elf_Sym.sizeof()
index = 0
for p in range(0, len(symbytes), symsize):
sym = Config.CubinELFStructs.Elf_Sym.parse(symbytes[p:p+symsize])
nameidx = sym['st_name']
if nameidx not in strtab:
raise Exception('Unknown symbol @%#x with name string index 0x%x!'%(p, nameidx))
name = strtab[nameidx]
if name in symdict:
raise Exception('Duplicate symbol @%#x with name %s!', p, name)
symdict[name] = index, sym
index += 1
return symdict
@staticmethod
def resetSymtabEntryValueSize(bio, base_offset, value, size):
''' reset Symbol entry value/size in symtab byte stream.
bio: BytesIO stream
base_offset: base offset of current entry
value/size: symbol value/size to be set
'''
p = bio.tell() # save current pos
bio.seek(base_offset + 8) # +8 is offset for the value
bio.write(int.to_bytes(value, 8, 'little'))
bio.write(int.to_bytes(size, 8, 'little'))
bio.seek(p) # restore pos
class CuAsmLabel(object):
''' A label is defined by "label:"
Every symbol (non-external) is also a label, the symbol value is just the label offset.
'''
def __init__(self, name, section, offset, lineno):
self.name = name
self.section = section
self.offset = offset
self.lineno = lineno
CuAsmLogger.logSubroutine('Line %6d: New Label "%s" at section "%s":%#x'%(lineno, name, section.name, offset))
def __str__(self):
s = 'Label @Line %4d in section %s : %-#7x(%6d) %s'%(self.lineno, self.section.name, self.offset, self.offset,
self.name)
return s
class CuAsmFixup(object):
''' Fixups are a set of undetermined values during the first scan.
Some fixups can be evaluated after first scan. Then the true values will be filled.
There are also some fixups cannot be determined during compile time, thus they will
go to relocations and the true values will be filled by the program loader.
'''
def __init__(self, section, offset, expr, dtype, lineno):
self.section = section
self.offset = offset
self.lineno = lineno
self.dtype = dtype
self.expr = expr
self.value = None
CuAsmLogger.logSubroutine('Line %6d: New Fixup "%s" at section "%s":%#x'%(lineno, expr, section.name, offset))
def __str__(self):
s = 'section=%s, offset=%d, lineno=%d, dtype=%s, expr=%s, value=%s'%(
self.section.name, self.offset, self.lineno, self.dtype, self.expr, self.value)
return s
class CuAsmSection(object):
'''
Section header struct (Only ELF64 supported):
typedef struct
{
Elf64_Word sh_name; /* Section name */
Elf64_Word sh_type; /* Section type */
Elf64_Xword sh_flags; /* Section attributes */
Elf64_Addr sh_addr; /* Virtual address in memory */
Elf64_Off sh_offset; /* Offset in file */
Elf64_Xword sh_size; /* Size of section */
Elf64_Word sh_link; /* Link to other section */
Elf64_Word sh_info; /* Miscellaneous information */
Elf64_Xword sh_addralign; /* Address alignment boundary */
Elf64_Xword sh_entsize; /* Size of entries, if section has table */
} Elf64_Shdr;
'''
def __init__(self, sname, stype, sflags):
'''Construct an ELF section.
Currently there are 3 systems for section headers.
1. self.name/type/flags/... work for predefined directives, such as .section/.sectioninfo
2. self.header['name']... work for supplementary directives, namely .section_*
3. self.__mSectionHeader is the struct form for building header bytes
Only 1 and 2 can be set in assembly, 1 has higher priority.
Information from 1 and 2 will be combined to form the final header.
Surely there are redundencies here, but it's the safest way to keep some attributes
set by ptxas, yet still give user enough flexibility to modify them.
'''
self.name = sname
self.type = stype # “A” stands for SHF_ALLOC
# “W” for SHF_WRITE
# “X” for SHF_EXECINSTR
self.flags = [sflags] # some extra flags may be appended later
self.info = []
self.offset = None
self.size = None
self.addralign = None
self.entsize = 0
self.header = {}
self.extra = {} # barnum/regnum, only for update nvinfo
#
self.padsize = 0
self.padbytes = b''
self.__isTextSection = sname.startswith('.text')
self.__mSectionHeader = Config.defaultSectionHeader.copy()
self.__mData = BytesIO()
def updateHeader(self):
'''Update section header with user inputs.
TODO: currently only offset/size will be updated.
'''
updateDictWithInput(self.header, self.__mSectionHeader,
label='section %s'%self.name, kprefix = 'sh_')
# maybe we can just update self.header?
if self.header['type'] == 'SHT_NULL':
self.__mSectionHeader['sh_offset'] = 0
else:
self.__mSectionHeader['sh_offset'] = self.offset
self.__mSectionHeader['sh_size'] = self.getDataSize() #self.size
def getHeaderStruct(self):
return self.__mSectionHeader
def updateResourceInfo(self):
'''Update register/barrier number.
Examples:
.sectionflags @"SHF_BARRIERS=1"
.sectioninfo @"SHI_REGISTERS=12"
'''
#
p_regnum = re.compile(r'@"SHI_REGISTERS=(\d+)"')
p_barnum = re.compile(r'@"SHF_BARRIERS=(\d+)"')
regnum = None
barnum = 0 # There may be no barrier used in a kernel
for info in self.info:
res = p_regnum.match(info)
if res is not None:
regnum = int(res.groups()[0])
for flag in self.flags:
res = p_barnum.match(flag)
if res is not None:
barnum = int(res.groups()[0])
if regnum is None:
raise Exception("Unknown register number for section %s!"%self.name)
elif regnum > 255 or regnum<0: # TODO: use MAX_REG_COUNT instead?
raise Exception("Invalid register number %d for section %s!"%(regnum, self.name))
else:
rinfo = self.header['info']
self.header['info'] = (rinfo & 0x00ffffff) + (regnum<<24)
self.extra['regnum'] = regnum
if barnum>15: # always rewrite bar number~
raise Exception("Invalid barrier number %d for section %s!"%(barnum, self.name))
else:
rflag = self.header['flags']
self.header['flags'] = (rflag&0xff0fffff) + (barnum<<20)
self.extra['barnum'] = barnum
def buildHeader(self):
''' Build section header bytes with current header struct. '''
self.updateHeader()
# print(self.__mSectionHeader)
return Config.CubinELFStructs.Elf_Shdr.build(self.__mSectionHeader)
def emitBytes(self, bs):
self.__mData.write(bs)
def updateForFixup(self, offset, bs):
''' Update corresponding bytes for fixup.
Input:
offset the absolute w.r.t the beginning of the section
bs bytes to be updated
'''
blen = len(bs)
if (offset+blen) > self.getDataSize():
raise Exception('Fixup out of boundary!')
# save original pos
opos = self.tell()
self.__mData.seek(offset)
# value is guaranteed within range during fixup evaluation.
self.__mData.write(bs)
self.__mData.seek(opos)
def emitAlign(self, align):
''' Set alignment of next bytes.
Note: When current position is section start, the alignment is the addralign of current section.
Then the padding is done to previous section.
'''
pos = self.tell()
if pos == 0:
self.addralign = align
self.header['addralign'] = align
else:
ppos, padsize = alignTo(pos, align)
if ppos > pos: # do padding with required 0-bytes/nops
self.emitBytes(b'\x00' * (ppos-pos))
def emitPadding(self, bs):
''' This is only for .text sections.
Emitting padding here will change the size of current text section.
For non-text sections, the padding should be done without changing the size.
'''
pos = self.tell()
self.seek(0, 2) # seek to end
self.emitBytes(bs)
self.seek(pos) # restore original position
def seek(self, pos, whence=0):
return self.__mData.seek(pos, whence)
def tell(self):
return self.__mData.tell()
def getData(self):
return self.__mData.getvalue()
def writePaddedData(self, stream):
if self.header['type'] == 'SHT_NOBITS': # nobits sections will not write to file.
return
else:
stream.write(self.__mData.getvalue())
stream.write(self.padbytes)
def setData(self, bs):
''' Update section data with given bytes. '''
self.__mData = BytesIO(bs)
self.size = len(bs)
def getDataSize(self):
''' Get memory size of current section.
For section of type nobits, no actual file contents.
'''
return len(self.__mData.getvalue())
def getPaddedDataSize(self):
return self.getDataSize() + self.padsize
def getRegNum(self):
return self.extra['regnum']
def __str__(self):
s = 'Section:\n'
s += ' name : %s\n' % self.name
s += ' type : %s\n' % self.type
s += ' flags : %s\n' % str(self.flags)
s += ' info : %s\n' % self.info
s += ' offset : %s\n' % self.offset
s += ' addralign : %s\n' % self.addralign
return s
class CuAsmSegment(object):
def __init__(self, p_type, p_flags):
self.header = {'type':p_type, 'flags':p_flags}
self.__mSegmentHeader = Config.defaultSegmentHeader.copy()
def updateHeader(self):
''' Update header with inputs'''
updateDictWithInput(self.header, self.__mSegmentHeader,
label='segment', kprefix = 'p_')
def getHeaderStruct(self):
return self.__mSegmentHeader
def build(self):
return Config.CubinELFStructs.Elf_Phdr.build(self.__mSegmentHeader)
class CuAsmRelocation(object):
''' Relocation class.
Relocation is a special section that may modify some contents of its linked section.
This procedure is generally done during loading, the modified contents are typically
the real memory address of some symbols.
typedef struct
{
Elf64_Addr r_offset; /* Address of reference */
Elf64_Xword r_info; /* Symbol index and type of relocation */
} Elf64_Rel;
typedef struct
{
Elf64_Addr r_offset; /* Address of reference */
Elf64_Xword r_info; /* Symbol index and type of relocation */
Elf64_Sxword r_addend; /* Constant part of expression */
} Elf64_Rela;
Relocations are typically for some dynamic variables (symbols).
Sources of relocations:
1. .dword/.word defined values in normal sections
2. 32lo@* or 32hi@* kind of operands in text sections
such as :
/*0040*/ MOV R2, 32@lo(flist) ;
/*0060*/ MOV R3, 32@hi(flist) ;
RELA is a relocation section with extra offsets, such as:
/*00f0*/ MOV R20, 32@lo((_Z4testPiS_S_ + .L_6@srel)) ;
/*0100*/ MOV R21, 32@hi((_Z4testPiS_S_ + .L_6@srel)) ;
3. `(symbol) in text sections (for symbols not defined in current section)
'''
REL_TYPES = {
'R_CUDA_32' : 1,
'R_CUDA_64' : 2,
'R_CUDA_G64' : 4,
'R_CUDA_TEX_HEADER_INDEX' : 6,
'R_CUDA_SURF_HEADER_INDEX': 52,
'R_CUDA_ABS32_20' : 42,
'R_CUDA_ABS32_LO_20' : 43,
'R_CUDA_ABS32_HI_20' : 44,
'R_CUDA_ABS32_LO_32' : 56,
'R_CUDA_ABS32_HI_32' : 57,
'R_CUDA_ABS47_34' : 58}
def __init__(self, section, offset, relsymname, relsymid, reltype, reladd=None):
self.section = section
self.offset = offset
self.relsymname = relsymname
self.relsymid = relsymid
self.reltype = reltype
self.reladd = reladd # reladd=None means rel, otherwise rela
CuAsmLogger.logSubroutine('New Relocation "%s" at section "%s":%#x'%(relsymname, section.name, offset))
def isRELA(self):
return self.reladd is not None
def buildEntry(self):
''' Build binary entry of current relocation.
Examples:
_Z4testPiS_S_, Container({'r_offset': 528, 'r_info': 124554051586, 'r_info_sym': 29, 'r_info_type': 2})
_Z4testPiS_S_, Container({'r_offset': 2288, 'r_info': 124554051641, 'r_info_sym': 29, 'r_info_type': 57, 'r_addend': 2352})
'''
if self.isRELA(): # RELA
rela = Config.defaultRela.copy()
rela['r_offset'] = self.offset
rela['r_info_sym'] = self.relsymid
rela['r_info_type'] = self.REL_TYPES[self.reltype]
rela['r_info'] = (rela['r_info_sym']<<32) + rela['r_info_type']
rela['r_addend'] = self.reladd
# print(rela)
return Config.CubinELFStructs.Elf_Rela.build(rela)
else: # REL
rel = Config.defaultRel.copy()
rel['r_offset'] = self.offset
rel['r_info_sym'] = self.relsymid
rel['r_info_type'] = self.REL_TYPES[self.reltype]
rel['r_info'] = (rel['r_info_sym']<<32) + rel['r_info_type']
# print(rel)
return Config.CubinELFStructs.Elf_Rel.build(rel)
def __str__(self):
s = '@section %s: offset=%s, relsym=%d(%s), reltype=%s, reladd=%s'%(
self.section.name,
self.offset,
self.relsymid,
self.relsymname,
self.reltype,
self.reladd)
return s
class CuAsmFile(object):
def __init__(self):
self.mSMVersion = None # sm version
self.headerflags = None
self.elftype = None
self.fileHeader = {} # unprocessed elf file header
self.__mFileHeader = Config.defaultCubinFileHeader.copy()
self.__mSectionList = OrderedDict()
self.__mSegmentList = []
self.__mLastSection = None
self.__mCurrSection = None
self.__mBuf = BytesIO() # global buffer for whole elf file, but without current section
def buildFileHeader(self):
self.__mFileHeader['e_ident']['EI_OSABI'] = self.fileHeader['ident_osabi']
self.__mFileHeader['e_ident']['EI_ABIVERSION'] = self.fileHeader['ident_abiversion']
self.__mFileHeader['e_type'] = self.fileHeader['type']
self.__mFileHeader['e_machine'] = self.fileHeader['machine']
self.__mFileHeader['e_version'] = self.fileHeader['version']
self.__mFileHeader['e_entry'] = self.fileHeader['entry']
self.__mFileHeader['e_phoff'] = self.fileHeader['phoff']
self.__mFileHeader['e_shoff'] = self.fileHeader['shoff']
self.__mFileHeader['e_flags'] = self.fileHeader['flags']
self.__mFileHeader['e_ehsize'] = self.fileHeader['ehsize']
self.__mFileHeader['e_phentsize'] = self.fileHeader['phentsize']
self.__mFileHeader['e_phnum'] = self.fileHeader['phnum']
self.__mFileHeader['e_shentsize'] = self.fileHeader['shentsize']
self.__mFileHeader['e_shnum'] = self.fileHeader['shnum']
self.__mFileHeader['e_shstrndx'] = self.fileHeader['shstrndx']
return Config.CubinELFStructs.Elf_Ehdr.build(self.__mFileHeader)
def getFileHeaderStruct(self):
return self.__mFileHeader
def emitAlign(self, align):
''' padding last section to required alignments.
Return the padded length.
'''
pos = self.tell()
ppos = align * ((pos+align-1) // align)
if ppos > pos: # do padding with required 0-bytes/nops
if self.__mLastSection is not None:
padbytes = self.__mLastSection.genSectionPaddingBytes(ppos - pos)
else:
padbytes = b'\x00' * (ppos - pos)
self.__mBuf.write(padbytes)
return ppos-pos
def seek(self, offset):
self.__mBuf.seek(offset)
def tell(self):
return self.__mBuf.tell()
def saveAsCubin(self, cubinname):
with open(cubinname, 'wb') as fout:
fout.write(self.__mBuf.getvalue())
class CuAsmParser(object):
''' Parser for cuasm file.'''
#### static variables, mostly re patterns
m_cppcomment = re.compile(r'//.*$') # cpp style line comments
m_ccomment = re.compile(r'\/\*.*?\*\/') # c style line
m_bracomment = re.compile(r'\(\*.*\*\)') # notes for bra targets in sm_5x/6x
# such as (*"INDIRECT_CALL"*)
m_directive = re.compile(r'(\.[a-zA-Z0-9_]+)\s*(.*)')
m_label = re.compile(r'([a-zA-Z0-9._$@#]+?)\s*:\s*(.*)') # "#" for offset label auto rename
m_symbol = re.compile(r'[a-zA-Z0-9._$@]+') #???
m_byte = re.compile(r'\b0x[a-fA-F0-9]{2}\b')
m_short = re.compile(r'\b0x[a-fA-F0-9]{4}\b')
m_word = re.compile(r'\b0x[a-fA-F0-9]{8}\b')
m_dword = re.compile(r'\b0x[a-fA-F0-9]{16}\b') # arch dependent?
m_zero = re.compile(r'\b[1-9][0-9]*\b')
m_sufrel = re.compile(r'\[20@lo\(0x0\)=fun@R_CUDA_SURF_HEADER_INDEX\((\w+)\)\]')
m_texrel = re.compile(r'\[20@lo\(0x0\)=(\w+)\]')
# dtype that may take relocation arguments.
rel_dtypes = {'dword':0, 'word' :1}
dtype_pattern = {'byte' : (m_byte , 1),
'short' : (m_short, 2),
'word' : (m_word , 4),
'dword' : (m_dword, 8)}
#### constructors, and parsing entries
def __init__(self):
self.__mCuInsAsmRepos = None
# directive dict
self.__dirDict = {
# predefined directives in nvdisasm
'.headerflags' : self.__dir_headerflags, # set ELF header
'.elftype' : self.__dir_elftype, # set ELF type
'.section' : self.__dir_section, # declare a section
'.sectioninfo' : self.__dir_sectioninfo, # set section info
'.sectionflags' : self.__dir_sectionflags, # set section flags
'.sectionentsize' : self.__dir_sectionentsize, # set section entsize
'.align' : self.__dir_align, # set alignment
'.byte' : self.__dir_byte, # emit bytes
'.short' : self.__dir_short, # emit shorts
'.word' : self.__dir_word, # emit word (4B?)
'.dword' : self.__dir_dword, # emit dword (8B?)
'.type' : self.__dir_type, # set symbol type
'.size' : self.__dir_size, # set symbol size
'.global' : self.__dir_global, # declare a global symbol
'.weak' : self.__dir_weak, # declare a weak symbol
'.zero' : self.__dir_zero, # emit zero bytes
'.other' : self.__dir_other, # set symbol other
# supplementary directives defined by cuasm
# all for setting some ELF/Section/Segment header attributes
# some may with same funtionality as predefined directives
# predefined directives of nvdisasm have higher priority
'.__elf_ident_osabi' : (lambda args: self.__dir_elfheader('ident_osabi' , args)),
'.__elf_ident_abiversion' : (lambda args: self.__dir_elfheader('ident_abiversion', args)),
'.__elf_type' : (lambda args: self.__dir_elfheader('type' , args)),
'.__elf_machine' : (lambda args: self.__dir_elfheader('machine' , args)),
'.__elf_version' : (lambda args: self.__dir_elfheader('version' , args)),
'.__elf_entry' : (lambda args: self.__dir_elfheader('entry' , args)),
'.__elf_phoff' : (lambda args: self.__dir_elfheader('phoff' , args)),
'.__elf_shoff' : (lambda args: self.__dir_elfheader('shoff' , args)),
'.__elf_flags' : (lambda args: self.__dir_elfheader('flags' , args)),
'.__elf_ehsize' : (lambda args: self.__dir_elfheader('ehsize' , args)),
'.__elf_phentsize' : (lambda args: self.__dir_elfheader('phentsize' , args)),
'.__elf_phnum' : (lambda args: self.__dir_elfheader('phnum' , args)),
'.__elf_shentsize' : (lambda args: self.__dir_elfheader('shentsize' , args)),
'.__elf_shnum' : (lambda args: self.__dir_elfheader('shnum' , args)),
'.__elf_shstrndx' : (lambda args: self.__dir_elfheader('shstrndx' , args)),
#
'.__section_name' : (lambda args: self.__dir_sectionheader('name' , args)),
'.__section_type' : (lambda args: self.__dir_sectionheader('type' , args)),
'.__section_flags' : (lambda args: self.__dir_sectionheader('flags' , args)),
'.__section_addr' : (lambda args: self.__dir_sectionheader('addr' , args)),
'.__section_offset' : (lambda args: self.__dir_sectionheader('offset' , args)),
'.__section_size' : (lambda args: self.__dir_sectionheader('size' , args)),
'.__section_link' : (lambda args: self.__dir_sectionheader('link' , args)),
'.__section_info' : (lambda args: self.__dir_sectionheader('info' , args)),
'.__section_entsize' : (lambda args: self.__dir_sectionheader('entsize' , args)),
#
'.__segment' : self.__dir_segment,
'.__segment_offset' : (lambda args: self.__dir_segmentheader('offset' , args)),
'.__segment_vaddr' : (lambda args: self.__dir_segmentheader('vaddr' , args)),
'.__segment_paddr' : (lambda args: self.__dir_segmentheader('paddr' , args)),
'.__segment_filesz' : (lambda args: self.__dir_segmentheader('filesz' , args)),
'.__segment_memsz' : (lambda args: self.__dir_segmentheader('memsz' , args)),
'.__segment_align' : (lambda args: self.__dir_segmentheader('align' , args)),
'.__segment_startsection' : (lambda args: self.__dir_segmentheader('startsection' , args)),
'.__segment_endsection' : (lambda args: self.__dir_segmentheader('endsection' , args))}
def reset(self):
self.__mLineNo = 0
self.__mInTextSection = False
self.__mCurrSection = None
self.__mCurrSegment = None
self.__mCuAsmFile = CuAsmFile()
self.__mSectionDict = OrderedDict()
self.__mSymbolDict = OrderedDict()
self.__mSegmentList = []
self.__mFixupList = [] # Fixup values that should be modified
self.__mLabelDict = OrderedDict() # labels
self.__mSecSizeLabel = OrderedDict() # labels that defined at last of one section
self.__mRelList = [] # relocations
self.__mNVInfoOffsetLabels = {} # key:sectionname, value: tuple(NVInfo_Attr, prefix)
self.__mInsIndex = 0 # Current instruction index
self.m_Arch = None
self.__mPadSizeBeforeSecHeader = 0 # number of padding bytes before section header
# TODO: not implemented yet
# current all the entries are copied from cubin
# self.__mStrList = [] # string may have identical entries
# self.__mShstrDict = OrderedDict() # entries
@CuAsmLogger.logTimeIt
def parse(self, fname):
''' Parsing input file
General parsing work flow:
- scan whole file, gathering file headers, section headers/contents, segment headers
build fixup lists, split kernel text sections for kernel assembler.
- build internal tables, such as .shstrtab, .strtab. .symtab (Currently just copied except symbol size)
- build kernel text sections, update .nv.info sections if necessary.
update relocations if there are any.
- evaluate fixups, patching the bytes of corresponding section data.
- build relocation sections
- layout sections, update file header, section header, segment header accordingly
- write to file/stream
'''
self.reset()
CuAsmLogger.logEntry('Parsing file %s'%fname)
self.__mFilename = fname
if not os.path.isfile(fname):
raise self.__assert(False, "Cannot find input cuasm file %s!!!"%fname)
else:
with open(fname, 'r') as fin:
self.__mLines = fin.readlines()
self.__preScan()
self.__gatherTextSectionSizeLabel()
self.__buildInternalTables()
self.__evalFixups() #
self.__parseKernels()
#
self.__buildRelocationSections()
# Section layouting should be called when all sizes of sections are determined.
# But section contents can be modified (but not resized)
#
# The layout will also determine the size label of text sections
# which may affect the symbol size in symtab
self.__layoutSections()
self.__updateSymtab()
@CuAsmLogger.logTimeIt
def saveAsCubin(self, fstream):
if isinstance(fstream, str):
fout = open(fstream, 'wb')
needClose = True
CuAsmLogger.logEntry('Saving as cubin file %s...'%fstream)
else:
fout = fstream
needClose = False
CuAsmLogger.logEntry('Saving as cubin file to stream...')
disppos = lambda s: CuAsmLogger.logSubroutine("%#08x(%08d) : %s"%(fout.tell(), fout.tell(), s))
# write ELF file header
disppos('FileHeader')
fout.write(self.__mCuAsmFile.buildFileHeader())
# write section data
for sname,sec in self.__mSectionDict.items():
disppos('SectionData %s'%sname)
sec.writePaddedData(fout)
# write padding bytes before section header
if self.__mPadSizeBeforeSecHeader > 0:
disppos('Padding %d bytes before section header' % self.__mPadSizeBeforeSecHeader)
fout.write(b'\x00' * self.__mPadSizeBeforeSecHeader)
# write section headers
for sname,sec in self.__mSectionDict.items():
disppos('SectionHeader %s'%sname)
fout.write(sec.buildHeader())
# write segment headers
for seg in self.__mSegmentList:
disppos('Segment')
fout.write(seg.build())
if needClose:
fout.close()
def setInsAsmRepos(self, fname, arch):
self.__mCuInsAsmRepos = CuInsAssemblerRepos(fname, arch=arch)
#### Procedures, every function is a seperate parsing step.
@CuAsmLogger.logTraceIt
def __preScan(self):
''' first scan to gather sections/symbol
build all entries for labels.
'''
for line in self.__mLines:
nline = CuAsmParser.stripComments(line).strip()
self.__mLineNo += 1
if len(nline)==0: # skip blank/all-comments lines
continue
ltype = self.__getLineType(nline)
if ltype is None:
self.__assert(False, "Unreconized line contents:\n %s"%line)
elif ltype == 'label':
res = self.m_label.match(nline)
rlabel = res.groups()[0]
pos = self.__tellLocal()
label = self.__checkNVInfoOffsetLabels(self.__mCurrSection, rlabel, pos)
if label not in self.__mLabelDict:
self.__mLabelDict[label] = CuAsmLabel(label, self.__mCurrSection,
pos, self.__mLineNo)
else:
v = self.__mLabelDict[label]
self.__assert(False, 'Redefinition of label %s! First occurrence in Line%d!'%
(v.name, v.lineno))
elif ltype == 'directive':
res = self.m_directive.match(nline)
cmd = res.groups()[0]
# print('Run directive %s @line %d.'%(cmd, self.__mLineNo))
self.__assert(cmd in self.__dirDict, 'Unknown directive %s!!!' %cmd)
farg = res.groups()[1].strip()
if len(farg) == 0:
args = []
else:
args = re.split(r'\s*,\s*', farg)
# run the directive
self.__dirDict[cmd](args)
elif ltype == 'code':
# During prescan, write all zeros for placeholder
pos = self.m_Arch.getInsOffsetFromIndex(self.__mInsIndex)
self.__mCurrSection.seek(pos)
# all contents of .text section will be re-written
self.__emitBytes(b'\x00'*self.m_Arch.getInstructionLength())
self.__mInsIndex += 1
elif ltype == 'blank':
continue
@CuAsmLogger.logTraceIt
def __gatherTextSectionSizeLabel(self):
self.__mSecSizeLabel = OrderedDict()
for label, labelobj in self.__mLabelDict.items():
secname = labelobj.section.name
if not secname.startswith('.text'):
continue
if labelobj.offset == self.__mSectionDict[secname].getDataSize():
# print(f'Size label {label} for {secname}!')
self.__mSecSizeLabel[secname] = labelobj
@CuAsmLogger.logTraceIt
def __parseKernels(self):
# scan text sections to assemble kernels
section_markers = splitAsmSection(self.__mLines)
regnumdict = {}
for secname in section_markers:
if secname.startswith('.text.'):
section = self.__mSectionDict[secname]
m0, m1 = section_markers[secname]
self.__mCurrSection = section
self.__parseKernelText(section, m0, m1)
section.updateResourceInfo()
kname = secname[6:] # strip ".text."
symidx = self.__getSymbolIdx(kname)
regnumdict[symidx] = section.extra['regnum']
sec = self.__mSectionDict['.nv.info']
# print(sec.getData().hex())
nvinfo = CuNVInfo(sec.getData(), self.m_Arch)
self.m_Arch.setRegCountInNVInfo(nvinfo, regnumdict)
sec.setData(nvinfo.serialize())
@CuAsmLogger.logTraceIt
def __buildInternalTables(self):
''' Build .shstrtab/.strtab/.symtab entries.
'''
self.__mShstrtabDict = buildStringDict(self.__mSectionDict['.shstrtab'].getData())
self.__mStrtabDict = buildStringDict(self.__mSectionDict['.strtab'].getData())
self.__mSymtabDict = CuAsmSymbol.buildSymbolDict(self.__mStrtabDict,
self.__mSectionDict['.symtab'].getData())
# @CuAsmLogger.logTraceIt
def __parseKernelText(self, section, line_start, line_end):
CuAsmLogger.logProcedure('Parsing kernel text of "%s"...'%section.name)
kasm = CuKernelAssembler(ins_asm_repos=self.__mCuInsAsmRepos, version=self.m_Arch)
p_textline = re.compile(r'\[([\w:-]+)\](.*)')
ins_idx = 0
for lineidx in range(line_start, line_end):
line = self.__mLines[lineidx]
nline = CuAsmParser.stripComments(line).strip()
self.__mLineNo = lineidx + 1
if len(nline)==0 or (self.m_label.match(nline) is not None) or (self.m_directive.match(nline) is not None):
continue
res = p_textline.match(nline)
if res is None:
self.__assert(False, 'Unrecognized code text!')
ccode_s = res.groups()[0]
icode_s = res.groups()[1]
if c_ControlCodesPattern.match(ccode_s) is None:
self.__assert(False, f'Illegal control code text "{ccode_s}"!')
addr = self.m_Arch.getInsOffsetFromIndex(ins_idx)
c_icode_s = self.__evalInstructionFixup(section, addr, icode_s)
#print("Parsing %s : %s"%(ccode_s, c_icode_s))
try:
kasm.push(addr, c_icode_s, ccode_s)
except Exception as e:
self.__assert(False, 'Error when assembling instruction "%s":\n %s'%(nline, e))
ins_idx += 1
# rewrite text sections
codebytes = kasm.genCode()
section.seek(0)
section.emitBytes(codebytes)
# update offsets in NVInfo
kname = section.name[6:] # strip '.text.'
info_sec = self.__mSectionDict['.nv.info.' + kname]
if kname in self.__mNVInfoOffsetLabels:
offset_label_dict = self.__mNVInfoOffsetLabels[kname]
offset_label_dict.update(kasm.m_ExtraInfo)
else:
offset_label_dict = kasm.m_ExtraInfo.copy()
nvinfo = CuNVInfo(info_sec.getData(), self.m_Arch)
nvinfo.updateNVInfoFromDict(offset_label_dict)
info_sec.setData(nvinfo.serialize())
@CuAsmLogger.logTraceIt
def __sortSections(self):
''' Sort the sections. (TODO: Not implemented yet, all sections are kept as is.)
Some section orders may do not matter, but the ELF segments may have some requirements ??? (TODO: checkit.)
This is a sample layout of sections:
Index Offset Size ES Align Type Flags Link Info Name
1 40 2d9 0 1 STRTAB 0 0 0 .shstrtab
2 319 416 0 1 STRTAB 0 0 0 .strtab
3 730 2e8 18 8 SYMTAB 0 2 10 .symtab
4 a18 2a0 0 1 PROGBITS 0 0 0 .debug_frame
5 cb8 b4 0 4 CUDA_INFO 0 3 0 .nv.info
6 d6c 6c 0 4 CUDA_INFO 0 3 17 .nv.info._Z4testPiS_S_
7 dd8 40 0 4 CUDA_INFO 0 3 1b .nv.info._Z5childPii
8 e18 40 0 4 CUDA_INFO 0 3 1c .nv.info._Z5stestfPf
9 e58 4 0 4 CUDA_INFO 0 3 1a .nv.info._Z2f3ii
a e5c 4 0 4 CUDA_INFO 0 3 18 .nv.info._Z2f1ii
b e60 4 0 4 CUDA_INFO 0 3 19 .nv.info._Z2f2ii
c e68 40 10 8 REL 0 3 14 .rel.nv.constant0._Z4testPiS_S_
d ea8 50 10 8 REL 0 3 17 .rel.text._Z4testPiS_S_
e ef8 60 18 8 RELA 0 3 17 .rela.text._Z4testPiS_S_
f f58 20 10 8 REL 0 3 1b .rel.text._Z5childPii
10 f78 30 10 8 REL 0 3 1d .rel.nv.global.init
11 fa8 60 10 8 REL 0 3 4 .rel.debug_frame
12 1008 118 0 4 PROGBITS 2 0 0 .nv.constant3
13 1120 8 0 8 PROGBITS 2 0 17 .nv.constant2._Z4testPiS_S_
14 1128 188 0 4 PROGBITS 2 0 17 .nv.constant0._Z4testPiS_S_
15 12b0 16c 0 4 PROGBITS 2 0 1b .nv.constant0._Z5childPii
16 141c 170 0 4 PROGBITS 2 0 1c .nv.constant0._Z5stestfPf
17 1600 900 0 80 PROGBITS 6 3 18000011 .text._Z4testPiS_S_
18 1f00 80 0 80 PROGBITS 6 3 18000012 .text._Z2f1ii
19 1f80 200 0 80 PROGBITS 6 3 18000013 .text._Z2f2ii
1a 2180 200 0 80 PROGBITS 6 3 18000014 .text._Z2f3ii
1b 2380 180 0 80 PROGBITS 6 3 a000016 .text._Z5childPii
1c 2500 100 0 80 PROGBITS 6 3 8000017 .text._Z5stestfPf
1d 2600 24 0 8 PROGBITS 3 0 0 .nv.global.init
1e 2624 40 0 4 NOBITS 3 0 0 .nv.global
'''
# TODO:
# section_weights = ['.shstrtab', '.strtab', '.symtab', '.debug_frame', '.nv.info']
pass
@CuAsmLogger.logTraceIt
def __buildRelocationSections(self):
relSecDict = defaultdict(lambda : [])
for rel in self.__mRelList:
if rel.isRELA():
sname = '.rela' + rel.section.name
else:
sname = '.rel' + rel.section.name
# FIXME: insert REL/RELA sections if necessary
relSecDict[sname].append(rel)
# CHECK: The order of rel entries probably does not matter
# But to reduce unmatchness w.r.t. original cubin
# The order is reversed as the official toolkit does.
for sname in relSecDict:
section = self.__mSectionDict[sname]
rellist = relSecDict[sname]
nrel = len(rellist)
for i in range(nrel):
rel = rellist.pop() # FIFO of list
section.emitBytes(rel.buildEntry())
@CuAsmLogger.logTraceIt
def __evalFixups(self):
for i,fixup in enumerate(self.__mFixupList):
try:
# check relocation
# Relocation rules for fixups (NOT include the text section):
# 1. dtype in dword/word
# 2. expr is non-literal (0x**)
# 3. expr not started with index@, no @srel present
#
# CHECK: what if "Symbol + label@srel ? "
# seems still a relocation, but the value is the label value instead of zero.
expr = fixup.expr
if fixup.dtype not in self.rel_dtypes or expr.startswith('index@'):
val, _ = self.__evalExpr(expr)
fixup.value = val
self.__updateSectionForFixup(fixup)
else: #
# TODO: check other types of relocations
# Check relocations for texture/surface references
if fixup.dtype == 'word':
res = self.m_texrel.match(expr)
if res is not None:
symname = res.groups()[0]
relsymid = self.__getSymbolIdx(symname)
reltype = 'R_CUDA_TEX_HEADER_INDEX'
rel = CuAsmRelocation(fixup.section, fixup.offset, symname, relsymid, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
continue # go process next fixup
res2 = self.m_sufrel.match(expr)
if res2 is not None:
symname = res2.groups()[0]
relsymid = self.__getSymbolIdx(symname)
reltype = 'R_CUDA_SURF_HEADER_INDEX'
rel = CuAsmRelocation(fixup.section, fixup.offset, symname, relsymid, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
continue # go process next fixup
# check explicit types of relocations
# Example : fun@R_CUDA_G64(C1)
# Seems only appear in debug version?
p_rel = re.compile(r'fun@(\w+)\(([^\)])\)')
res_rel = p_rel.match(expr)
if res_rel:
reltype = res_rel.groups()[0]
symname = res_rel.groups()[1]
symidx = self.__getSymbolIdx(symname)
rel = CuAsmRelocation(fixup.section, fixup.offset, symname, symidx, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
continue
# check other types of relocations
val, vs = self.__evalExpr(expr)
if isinstance(vs[0], str): # symbol name in vs[0]
symname = vs[0]
relsymid = self.__getSymbolIdx(symname) # index of symbol
if fixup.dtype=='word':
reltype='R_CUDA_32'
elif fixup.dtype=='dword':
reltype='R_CUDA_64'
else:
self.__assert(False, 'Unknown data type for relocation: %s'%fixup.dtype)
rel = CuAsmRelocation(fixup.section, fixup.offset, symname, relsymid, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
if val is not None: # symbol + label@srel, seems the label value is filled.
fixup.value = val
self.__updateSectionForFixup(fixup)
except Exception as e:
self.__assert(False, 'Error when evaluating fixup @line%d: expr=%s, msg=%s'
%(fixup.lineno, fixup.expr, e))
@CuAsmLogger.logTraceIt
def __updateSymtab(self):
bio = BytesIO(self.__mSectionDict['.symtab'].getData())
symsize = Config.CubinELFStructs.Elf_Sym.sizeof()
for i, s in enumerate(self.__mSymtabDict):
symid, syment = self.__mSymtabDict[s]
if s in self.__mLabelDict:
syment['st_value'] = self.__mLabelDict[s].offset
if s in self.__mSymbolDict: # symbols explicitly defined in assembly
symobj = self.__mSymbolDict[s]
symobj.value = self.__mLabelDict[s].offset
symobj.sizeval, _ = self.__evalExpr(symobj.size)
syment['st_size'] = symobj.sizeval
# print(syment)
CuAsmSymbol.resetSymtabEntryValueSize(bio, i*symsize, symobj.value, symobj.sizeval)
else: # some symbols does not have corresponding labels, such as vprintf
pass
self.__mSectionDict['.symtab'].setData(bio.getvalue())
@CuAsmLogger.logTraceIt
def __layoutSections(self):
''' Layout section data, do section padding if needed. Update section header.offset/size.
Update segment range accordingly.
Update ELF file header accordingly.
'''
# initialize the offset as the ELF header size
elfheadersize = Config.CubinELFStructs.Elf_Ehdr.sizeof()
file_offset = elfheadersize
mem_offset = elfheadersize
prev_sec = None
sh_edges = {} # key=secname, value = (file_start, file_end, mem_start, mem_end)
# First pass to get the size of every section
# NOTE: the size of current section depends the padding, which is determined by next section
# Seems only for text section? For other sections, padding will not count in size?
for secname, sec in self.__mSectionDict.items():
if secname == '':
continue
# print(secname)
align = sec.addralign
if prev_sec is not None and prev_sec.name.startswith('.text'):
align = 128
file_offset, mem_offset = self.__updateSectionPadding(prev_sec, file_offset, mem_offset, align)
sec.size = sec.getDataSize()
sec.offset = file_offset
sec.header['size'] = sec.size
sec.header['offset'] = sec.offset
prev_sec = sec
sh_edges[secname] = (file_offset, 0, mem_offset, 0)
mem_offset += sec.size
if sec.header['type'] != 'SHT_NOBITS':
file_offset += sec.size
# ???
if prev_sec is not None and prev_sec.name.startswith('.text'):
file_offset, mem_offset = self.__updateSectionPadding(prev_sec, file_offset, mem_offset, 128)
# Section pass to build the section edges, for locating segment range
for secname, sec in self.__mSectionDict.items():
if secname == '':
continue
sec.size = sec.getDataSize()
sec.header['size'] = sec.size
if sec.header['type'] != 'SHT_NOBITS':
fsize = sec.size
msize = fsize
else:
fsize = 0
msize = sec.size
file_pos, _, mem_pos, _ = sh_edges[secname]
sh_edges[secname] = (file_pos, file_pos + fsize, mem_pos, mem_pos + msize)
# FIXME: better alignment for headers ?
file_offset, self.__mPadSizeBeforeSecHeader = alignTo(file_offset, 8)
# Current only the normal order is support:
# ELFHeader -> SectionData -> SectionHeader -> SegmentHeader
# Other orders may be possible, but not supported yet.
SecHeaderLen = len(self.__mSectionDict) * Config.CubinELFStructs.Elf_Shdr.sizeof()
self.__mCuAsmFile.fileHeader['shoff'] = file_offset
phoff = file_offset + SecHeaderLen
phlen = self.__mCuAsmFile.fileHeader['phentsize'] * self.__mCuAsmFile.fileHeader['phnum']
self.__mCuAsmFile.fileHeader['phoff'] = phoff
sh_edges[PROGRAM_HEADER_TAG] = phoff, phoff+phlen, phoff, phoff+phlen
for seg in self.__mSegmentList:
if seg.header['type'] == 'PT_PHDR':
seg.header['offset'] = file_offset + SecHeaderLen
seg.header['filesz'] = Config.CubinELFStructs.Elf_Phdr.sizeof() * len(self.__mSegmentList)
seg.header['memsz'] = seg.header['filesz']
elif seg.header['type'] == 'PT_LOAD':
# if startsection is empty, this segment is empty
# Seems a convention of compiler?
if seg.header['startsection'] != '' and seg.header['endsection'] != '':
file_start0, file_end0, mem_start0, mem_end0 = sh_edges[seg.header['startsection']]
file_start1, file_end1, mem_start1, mem_end1 = sh_edges[seg.header['endsection']]
seg.header['offset'] = file_start0
seg.header['filesz'] = file_end1 - file_start0
seg.header['memsz'] = mem_end1 - mem_start0
else:
msg = 'Unknown segment type %s!'%seg.header['type']
CuAsmLogger.logError(msg)
raise Exception(msg)
# update header
seg.updateHeader()
#### Directives
def __dir_headerflags(self, args):
self.__assertArgc('.headerflags', args, 1, allowMore=False)
self.__mCuAsmFile.headerflags = args[0]
def __dir_elftype(self, args):
self.__assertArgc('.elftype', args, 1, allowMore=False)
self.__mCuAsmFile.elftype = args[0]
def __dir_section(self, args):
self.__assertArgc('.section', args, 3, allowMore=False)
# for implict sections, quotes are used for embracing the section name
# mainly for the NULL section with empty name ""
# thus the quotes will be stripped
secname = args[0].strip('"')
self.__assert(secname not in self.__mSectionDict, 'Redefinition of section "%s"!'%secname)
self.__mCurrSection = CuAsmSection(secname, args[1], args[2])
CuAsmLogger.logSubroutine('Line %6d: New section "%s"'%(self.__mLineNo, secname))
self.__mSectionDict[secname] = self.__mCurrSection
if args[0].startswith('.text.'):
self.__mInTextSection = True
self.__mInsIndex = 0
else:
self.__mInTextSection = False
def __dir_sectionflags(self, args):
self.__assertArgc('.sectionflags', args, 1, allowMore=False)
self.__mCurrSection.flags.append(args[0])
def __dir_sectionentsize(self, args):
self.__assertArgc('.sectionentsize', args, 1, allowMore=False)
self.__mCurrSection.entsize = int(args[0])
def __dir_sectioninfo(self, args):
self.__assertArgc('.sectioninfo', args, 1, allowMore=False)
self.__assert(self.__mCurrSection is not None, "No active section!")
# TODO: parse info, check correctness
self.__mCurrSection.info.append(args[0])
def __dir_byte(self, args):
self.__assertArgc('.word', args, 1, allowMore=True)
self.__emitTypedBytes('byte', args)
def __dir_dword(self, args):
''' currently 1 dword = 8 bytes
NOTE: .dword may reference a relocation symbol.
'''
self.__assertArgc('.dword', args, 1, allowMore=True)
self.__emitTypedBytes('dword', args)
def __dir_align(self, args):
''' .align directive may have different operations, depending on the context.
Usually .align will pad current buffer with zeros/nops to required alignment.
But for the first .align directive of a section, it also sets the alignment
requirement of current section, which means the padding is done to last
section, thus will not affect the local offset of current section.
For `.align` inside a section, the padding counts to the local offset,
thus will affect all the local fixup values.
'''
self.__assertArgc('.align', args, 1, allowMore=False)
try:
align = int(args[0])
except:
self.__assert(False, ' unknown alignment (%s)!' % args[0])
self.__assert(align &(align-1) == 0, ' alignment(%d) should be power of 2!' % align)
self.__mCurrSection.emitAlign(align)
def __dir_short(self, args):
self.__assertArgc('.short', args, 1, allowMore=True)
self.__emitTypedBytes('short', args)
def __dir_word(self, args):
self.__assertArgc('.word', args, 1, allowMore=True)
self.__emitTypedBytes('word', args)
def __dir_type(self, args):
''' .type will define the symbol type.
Example: .type flist ,@object
.type $str ,@object
.type vprintf,@function
'''
self.__assertArgc('.type', args, 2, allowMore=False)
symbol = args[0]
if symbol not in self.__mSymbolDict:
self.__mSymbolDict[symbol] = CuAsmSymbol(symbol)
stype = args[1]
self.__assert(stype in CuAsmSymbol.SymbolTypes,
'Unknown symbol type %s! Available: %s.'%(stype, str(CuAsmSymbol.SymbolTypes)))
self.__mSymbolDict[symbol].type = stype
def __dir_size(self, args):
self.__assertArgc('.size', args, 2, allowMore=False)
symbol = args[0]
if symbol not in self.__mSymbolDict:
self.__mSymbolDict[symbol] = CuAsmSymbol(symbol)
# NOTE: the size of a symbol is probably an expression
# this will be evaluted when generating symbol tables
self.__mSymbolDict[symbol].size = args[1]
def __dir_global(self, args):
'''.global defines a global symbol.
A global symbol is visible to linker. For a cubin, it can be accessed by
the driver api function `cuModuleGetGlobal`.
'''
self.__assertArgc('.global', args, 1, allowMore=False)
symbol = args[0]
if symbol not in self.__mSymbolDict:
self.__mSymbolDict[symbol] = CuAsmSymbol(symbol)
CuAsmLogger.logSubroutine('Line %6d global symbol %s'%(self.__mLineNo, symbol))
self.__mSymbolDict[symbol].isGlobal = True
def __dir_weak(self, args):
'''.weak defines a weak symbol.
A weak symbol is declared in current module, but may be overwritten by strong symbols.
Currently no scope is implemented, thus
'''
self.__assertArgc('.weak', args, 1, allowMore=False)
symbol = args[0]
if symbol not in self.__mSymbolDict:
self.__mSymbolDict[symbol] = CuAsmSymbol(symbol)
CuAsmLogger.logWarning('Line %d: Weak symbol found! The implementation is not complete, please be cautious...'%self.__mLineNo)
CuAsmLogger.logSubroutine('Line %6d: New weak symbol "%s"'%(self.__mLineNo, symbol))
self.__mSymbolDict[symbol].isGlobal = True
def __dir_zero(self, args):
'''.zero emit zeros of specified length (in bytes).'''
self.__assertArgc('.zero', args, 1, allowMore=False)
try:
# .zero only accepts a literal, no fixup allowed
size = int(args[0])
self.__emitBytes(b'\x00'*size)
except:
self.__assert(False, 'Unknown arg (%s) for .zero!'% args[0])
def __dir_other(self, args):
'''.other defines some properties of a symbol.
Examples:
.other _Z4testPiS_S_, @"STO_CUDA_ENTRY STV_DEFAULT"
.other _Z5childPii , @"STO_CUDA_ENTRY STV_DEFAULT"
.other _Z5stestfPf , @"STO_CUDA_ENTRY STV_DEFAULT"
'''
self.__assertArgc('.other', args, 2, allowMore=False)
symbol = args[0]
if symbol not in self.__mSymbolDict:
#self.__mSymbolDict[symbol] = CuAsmSymbol()
self.__assert(False, 'Undefined symbol %s!!!'%symbol)
self.__mSymbolDict[symbol].other = args[1]
def __dir_elfheader(self, attrname, args):
self.__assertArgc('.__elf_'+attrname, args, 1, allowMore=False)
self.__mCuAsmFile.fileHeader[attrname] = self.__cvtValue(args[0])
if attrname == 'flags':
flags = int(args[0], 16)
smversion = flags & 0xff
self.m_Arch = CuSMVersion(smversion)
if (not hasattr(self, '__mCuInsAsmRepos')
or self.__mCuInsAsmRepos is None
or (self.__mCuInsAsmRepos.getSMVersion() != self.m_Arch) ):
CuAsmLogger.logSubroutine('Setting CuInsAsmRepos to default dict...')
self.__mCuInsAsmRepos = CuInsAssemblerRepos(arch=self.m_Arch)
self.__mCuInsAsmRepos.setToDefaultInsAsmDict()
def __dir_sectionheader(self, attrname, args):
self.__assertArgc('.__section_'+attrname, args, 1, allowMore=False)
self.__mCurrSection.header[attrname] = self.__cvtValue(args[0])
def __dir_segment(self, args):
self.__assertArgc('.__segment', args, 2, allowMore=False)
segment = CuAsmSegment(args[0].strip('"'), args[1])
self.__mSegmentList.append(segment)
self.__mCurrSegment = segment
self.__mCurrSection = None
def __dir_segmentheader(self, attrname, args):
self.__assertArgc('.__segment_'+attrname, args, 1, allowMore=False)
self.__mCurrSegment.header[attrname] = self.__cvtValue(args[0])
#### Subroutines
def __assert(self, flag, msg=''):
if not flag:
full_msg = 'Assertion failed in:\n'
full_msg += f' File {self.__mFilename}:{self.__mLineNo} :\n'
full_msg += f' {self.__mLines[self.__mLineNo-1].strip()}\n'
full_msg += f' {msg}'
CuAsmLogger.logError(full_msg)
raise Exception(full_msg)
def __assertArgc(self, cmd, args, argc, allowMore=True):
''' Check the number of arguments.'''
if allowMore:
flag = len(args)>=argc
es = 'at least '
else:
flag = len(args)==argc
es = ''
self.__assert(flag, '%s requires %s%d args! %d given: %s.'
%(cmd, es, argc, len(args), str(args)) )
def __tellLocal(self):
''' tell current pos inside current active section.'''
if self.__mCurrSection is not None:
return self.__mCurrSection.tell()
else:
raise Exception("Cannot tell local pos without active section!")
def __evalVar(self, var):
"""Evaluate a single variable
Args:
var ([string]): the variable expression
Returns:
(value, is_sym)
"""
# symbol
if var in self.__mSymtabDict:
is_sym = True
else:
is_sym = False
# int literal
if m_intval.match(var):
return eval(var), is_sym
if var.endswith('@srel'):
label = var.replace('@srel', '')
if label not in self.__mLabelDict:
raise Exception('Unknown expression %s'%var)
return self.__mLabelDict[label].offset, is_sym
if var in self.__mLabelDict:
return self.__mLabelDict[var].offset, is_sym
raise Exception('Unknown expression %s'%var)
def __evalExpr(self, expr):
''' Evaluate the expression.
value = value_a ((+|-) value_b)?
Return: Tuple(value, Tuple(value_a, op, value_b) )
For symbol at position a, the original symbol string will be returned as value a.
Examples:
Expr Value Section
index@(symbol) symbol index non-text
(.Label) label offset
(.L0-.L1)
NOTE: This subroutine has no context info, making it hard to interprete
thus all exceptions should be captured in __evalFixups, showing the full context
'''
# For expr: index@(symbol)
if expr.startswith('index@'): # index of symbol
symname = expr[6:].strip(' ()')
index = self.__getSymbolIdx(symname)
if index is None:
raise Exception('Unknown symbol "%s"!!!'%symname)
return index, (index, None, None)
rexpr = expr.strip('`() ')
res = re.match(r'([.\w$@]+)\s*(\+|-)*\s*([.\w$@]+)*', rexpr) # FIXME: what if the imme is negative???
if res is None:
raise Exception('Unknown expr %s !!!'%expr)
else:
a = res.groups()[0]
op = res.groups()[1]
b = res.groups()[2]
aval, a_issym = self.__evalVar(a)
if op is None: # only one var
if a_issym: # one symbol, definitely a relocation
return aval, (a , None, None)
else: # one label
return aval, (aval, None, None)
else: #
bval, b_issym = self.__evalVar(b) # in general context, the second var should not be symbol?
# but it's possible in size expression
if a_issym:
a_realval = a
else:
a_realval = aval
if op == '+':
return aval + bval, (a_realval, '+', bval)
elif op == '-':
return aval - bval, (a_realval, '-', bval)
else: # never reach here, only +/- can be matched by re pattern.
raise Exception('Unknown expr.op "%s"'%op)
def __getSymbolIdx(self, symname):
''' Get symbol index in symtab. '''
if symname in self.__mSymtabDict:
return self.__mSymtabDict[symname][0]
else:
return None
def __evalInstructionFixup(self, section, offset, s):
''' Check fixups inside an instruction.
Examples:
RET.REL.NODEC R20 `(_Z4testPiS_S_);
BRA `(.L_14);
Relocations:
32@hi($str) => REL
32@lo((_Z4testPiS_S_ + .L_8@srel)) => RELA
`(vprintf) => REL
TODO: How to determine the type of `(.LABEL) ???
For symbol or label defined in the same section, it's a fixup
Otherwise, it seems a relocation. (To be checked...)
'''
p_ins_rel32 = re.compile(r'(32@hi|32@lo)\(([^\)]+)\)+')
r1 = p_ins_rel32.search(s)
if r1:
expr = r1.groups()[1]
val, val_sep = self.__evalExpr(expr)
symname = val_sep[0]
symidx = self.__getSymbolIdx(val_sep[0])
relkey = r1.groups()[0]
reltype = self.m_Arch.getInsRelocationType(relkey)
if val_sep[1] is not None:
rela = CuAsmRelocation(section, offset, symname, symidx, reltype=reltype, reladd=val_sep[2])
self.__mRelList.append(rela)
else:
rel = CuAsmRelocation(section, offset, symname, symidx, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
ns = p_ins_rel32.sub('0x0', s)
return ns
p_ins_label = re.compile(r'`\(([^\)]+)\)')
r2 = p_ins_label.search(s)
if r2:
# print(s)
label = r2.groups()[0]
self.__assert((label in self.__mLabelDict) or (label in self.__mSymtabDict),
'Unknown label (%s) !!!'%label)
# global symbols, no corresponding label (such as vprintf)
if (label not in self.__mLabelDict) and (label in self.__mSymtabDict):
# print(s)
symname = label
symidx = self.__getSymbolIdx(symname)
reltype = self.m_Arch.getInsRelocationType('target')
rel = CuAsmRelocation(section, offset, symname, symidx, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
ns = p_ins_label.sub('0x0', s)
return ns
clabel = self.__mLabelDict[label]
if section.name == clabel.section.name: # hardcoded target in current section
val = clabel.offset
ns = p_ins_label.sub('%#x'%val, s)
return ns
else: # relocations, since the target is in another section
symname = label
symidx = self.__getSymbolIdx(symname)
reltype = self.m_Arch.getInsRelocationType('target')
rel = CuAsmRelocation(section, offset, symname, symidx, reltype=reltype, reladd=None)
self.__mRelList.append(rel)
ns = p_ins_label.sub('0x0', s)
return ns
# No fixup patterns found
return s
def __updateSectionForFixup(self, fixup):
''' Update the corresponding section location for fixup.'''
_, blen = self.dtype_pattern[fixup.dtype]
bs = int.to_bytes(fixup.value, blen, 'little')
fixup.section.updateForFixup(fixup.offset, bs)
CuAsmLogger.logSubroutine('Eval fixup "%s" @line%d to %#x'%(fixup.expr, fixup.lineno, fixup.value))
# print(fixup)
def __emitBytes(self, bs):
'''emit bytes to current section.'''
self.__mCurrSection.emitBytes(bs)
def __getLineType(self, line):
'''There can be three line types:
1. Directive: starts with ".\w+", but no following ":"
2. Label: label name followed by ":"
3. Instruction text: only in section with name prefix ".text",
and not a label line
(4. Blank lines, skipped)
**NOTE**: usually all blanks lines will be skipped by the parser
'''
if len(line)==0:
return 'blank'
elif self.m_label.match(line) is not None:
return 'label'
elif self.m_directive.match(line) is not None:
return 'directive'
elif self.__mInTextSection:
return 'code'
else:
return None
#raise Exception("Unrecognized line contents!")
def __emitTypedBytes(self, dtype, args):
dp, dsize = self.dtype_pattern[dtype]
for arg in args:
# TODO: check contents of arg is really a fixup/relocation(may not defined yet!) ?
#if dp.match(arg):
# self.__emitBytes(bytes.fromhex(arg[2:]))
if arg.startswith('0x'):
argv = int(arg, 16)
arg_byte = argv.to_bytes(dsize, 'little')
self.__emitBytes(arg_byte)
else:
# NOTE: currently all unknowns go to fixup list,
# fixup will handle the relocations if needed.
# all fixup values will be updated by the assembler
fixup = CuAsmFixup(self.__mCurrSection, self.__tellLocal(),
arg, dtype, self.__mLineNo)
self.__mFixupList.append(fixup)
# emit zeros as placeholder
self.__emitBytes(b'\x00'*dsize)
def __cvtValue(self, s):
''' Convert input string to int if possible.'''
if m_intval.match(s):
return eval(s)
elif s.startswith('"') and s.endswith('"'):
return s.strip('"')
else:
return s
def __pushSectionSizeLabel(self):
'''Identify the last label that marks the end of a text section.
DEPRECATED !!!
The text section size label will be gathered in the procedure __gatherTextSectionSizeLabel()
'''
if self.__mCurrSection is not None and self.__mCurrSection.name.startswith('.text') and self.__mLabelDict is not None:
key, lastlabel = self.__mLabelDict.popitem()
if self.__mCurrSection.name == lastlabel.section.name and lastlabel.offset == self.__mCurrSection.tell():
self.__mSecSizeLabel[self.__mCurrSection.name] = lastlabel
self.__mLabelDict[key] = lastlabel # push it back
else:
self.__mLabelDict[key] = lastlabel # push it back
def __genSectionPaddingBytes(self, sec, size):
'''Generate padding bytes for section with given size.'''
if sec.name.startswith('.text'):
padbytes = self.m_Arch.getPadBytes()
else:
padbytes = b'\x00'
if size % len(padbytes) != 0:
raise Exception('Invalid padding size for section %s'%sec.name)
npad = size // len(padbytes)
return npad * padbytes
def __updateSectionPadding(self, sec, file_offset, mem_offset, align):
''' Update section padding with size.
For text sections: padding to the original section data, update size
For other sections: padding to seperate padbytes, keep size unchanged
For nobits sections: do nothing.
'''
if sec is None:
return file_offset, mem_offset
if sec.name.startswith('.text'):
align = max(align, sec.addralign)
file_offset, fpadsize = alignTo(file_offset, align)
mem_offset, mpadsize = alignTo(mem_offset, align)
sec.emitPadding(self.__genSectionPaddingBytes(sec, fpadsize))
# FIXME: This treatment is weird, but the text sections seems always aligned
# and last label of .text section seems to be the padded offset.
#
# Update size label offset, it will be used in symbol size evaluation.
# I don't quite understand why it's this way, but let's just keep it as is.
if sec.name in self.__mSecSizeLabel:
sizelabel = self.__mSecSizeLabel[sec.name]
# NOTE: donot use sec.size here
sizelabel.offset = sec.getDataSize()
CuAsmLogger.logSubroutine(f'Reset size label "{sizelabel.name}" of {sec.name} to {sec.getDataSize()}!')
elif sec.header['type'] == 'SHT_NOBITS':
mem_offset, mpadsize = alignTo(mem_offset, align)
sec.padsize = mpadsize
sec.padbytes = mpadsize * b'\x00'
else:
file_offset, fpadsize = alignTo(file_offset, align)
mem_offset, mpadsize = alignTo(mem_offset, align)
sec.padsize = fpadsize
sec.padbytes = fpadsize * b'\x00'
sec.updateHeader()
return file_offset, mem_offset
def __calcSegmentRange(self, sec_start, sec_end):
inRange = False
seg_off = 0
filesz = 0
memsz = 0
for sname, sec in self.__mSectionDict.items():
if sname == sec_start:
inRange = True
seg_off = sec.offset
f_off = seg_off
m_off = seg_off
if inRange:
psize = sec.getPaddedDataSize()
m_off += psize
if sec.header['type'] != 'SHT_NOBITS':
f_off += psize
if sname == sec_end:
inRange = False
break
filesz = f_off - seg_off
memsz = m_off - seg_off
return seg_off, filesz, memsz
def __checkNVInfoOffsetLabels(self, section, labelname, offset):
''' Check whether the label is a NVInfoOffsetLabel, push to label offset dict if necessary.
Valid offset label should be in form:
.CUASM_OFFSET_LABEL.{SectionName}.{NVInfoAttributeName}.{Identifier}
Identifier should be unique for every offset label (label cannot be defined twice).
(A grammar sugar is to use "#", which will be replaced by "L+{LineNo}" such as "L000002f8"
Example:
.CUASM_OFFSET_LABEL._Z4testPiS_S_.EIATTR_EXIT_INSTR_OFFSETS.0:
.CUASM_OFFSET_LABEL._Z4testPiS_S_.EIATTR_EXIT_INSTR_OFFSETS.#:
Return: real label name
'''
# TODO: some offset labels (such as EXIT, CTAID.Z) may be detected automatically
if not labelname.startswith('.CUASM_OFFSET_LABEL'):
return labelname
self.__assert(section.name.startswith('.text'), 'CUASM_OFFSET_LABEL should be defined in a text section!')
kname = section.name[6:]
vs = labelname[1:].split('.')
self.__assert(len(vs)==4, 'Offset label should be in form: .CUASM_OFFSET_LABEL.{SectionName}.{NVInfoAttributeName}.{Identifier}')
self.__assert(vs[1] == kname, 'CUASM_OFFSET_LABEL should include kernel name in second dot part!')
if kname not in self.__mNVInfoOffsetLabels:
self.__mNVInfoOffsetLabels[kname] = {}
# .CUASM_OFFSET_LABEL._Z4testPiS_S_.EIATTR_EXIT_INSTR_OFFSETS.0:
attr = vs[2]
if attr in self.__mNVInfoOffsetLabels[kname]:
self.__mNVInfoOffsetLabels[kname][attr].append(offset)
else:
self.__mNVInfoOffsetLabels[kname][attr] = [offset]
if vs[3] == '#':
lstr = 'L%08x'%self.__mLineNo
return labelname[:-1] + lstr
else:
return labelname
#### Help functions to display some internal states.
def dispFixupList(self):
print('Fixup list:')
if self.__mFixupList is None or len(self.__mFixupList)==0:
print(' ' + str(self.__mFixupList))
for i,f in enumerate(self.__mFixupList):
print("Fixup %3d: %s"%(i, str(f)))
print()
def dispRelocationList(self):
print('Relocation list:')
if self.__mRelList is None or len(self.__mRelList)==0:
print(' No relocations.')
for i,r in enumerate(self.__mRelList):
print('Relocation %3d: %s'%(i, r))
print()
def dispSectionList(self):
print('Section list:')
sdict = self.__mSectionDict
if sdict is None or len(sdict) == 0:
print(' No sections found.')
return
print(' Idx Offset Size ES AL Type Flags Link Info Name')
i = 0
for s in sdict:
sec = sdict[s]
ss = '%4x' % i
ss += ' {offset:6x} {size:6x} {entsize:4x}'.format(**sec.header)
ss += ' {:3x}'.format(sec.addralign)
if isinstance(sec.header['type'], str):
ss += ' {type:12s}'.format(**sec.header)
else:
ss += ' {type:<12x}'.format(**sec.header)
ss += ' {flags:6x}'.format(**sec.header)
ss += ' {link:6x} {info:8x}'.format(**sec.header)
ss += ' ' + sec.name
print(ss)
i += 1
print()
def dispSymbolDict(self):
print('\nSymbols:')
for i,s in enumerate(self.__mSymbolDict):
symbol = self.__mSymbolDict[s]
print('Symbol %3d: %s'%(i,symbol))
print()
def dispSymtabDict(self):
print('\nSymtab:')
for s in self.__mSymtabDict:
symid, syment = self.__mSymtabDict[s]
print('Symbol %3d (%s): %s'%(symid, s, syment))
if s in self.__mSymbolDict:
print(' %s'%self.__mSymbolDict[s])
print()
def dispLabelDict(self):
print('\nLabels: ')
for i,l in enumerate(self.__mLabelDict):
v = self.__mLabelDict[l]
print('Label %3d: %s'%(i, str(v)))
print()
def dispSegmentHeader(self):
print('Segment headers:')
for seg in self.__mSegmentList:
print(seg.header)
def dispFileHeader(self):
print('File header:')
print(self.__mCuAsmFile.fileHeader)
def dispTables(self):
# self.buildInternalTables()
print('.shstrtab:')
for i, idx in enumerate(self.__mShstrtabDict):
print('%3d \t0x%x \t%s'%(i, idx, self.__mShstrtabDict[idx]))
print('.strtab:')
for i, idx in enumerate(self.__mStrtabDict):
print('%3d \t0x%x \t%s'%(i, idx, self.__mStrtabDict[idx]))
print('.symtab')
for i, s in enumerate(self.__mSymtabDict):
print('%3d \t%s'%(i, s))
@CuAsmLogger.logTimeIt
def saveCubinCmp(self, cubinname, sav_prefix):
''' A simple helper function to display current contents vs cubin in bytes. '''
fasm = open(sav_prefix+'_asm.txt', 'w')
fbin = open(sav_prefix+'_bin.txt', 'w')
felf = open(cubinname, 'rb')
ef = ELFFile(felf)
fasm.write('FileHeader:\n' + str(self.__mCuAsmFile.getFileHeaderStruct()) + '\n')
fbin.write('FileHeader:\n' + str(ef.header) + '\n' )
# write section headers+data
for sname,sec in self.__mSectionDict.items():
fasm.write('# Section %s\n'%sname)
fasm.write(str(sec.getHeaderStruct()) + '\n')
if sec.getHeaderStruct()['sh_type'] != 'SHT_NOBITS':
fasm.write(bytes2Asm(sec.getData()) +'\n\n')
else:
fasm.write('\n')
# write segment headers
for seg in self.__mSegmentList:
fasm.write(str(seg.getHeaderStruct())+'\n')
# write section headers+data
for sec in ef.iter_sections():
fbin.write('# Section %s\n'%sec.name)
fbin.write(str(sec.header) + '\n')
if sec.header['sh_type'] != 'SHT_NOBITS':
fbin.write(bytes2Asm(sec.data()) + '\n\n')
else:
fbin.write('\n')
# write segment headers
for seg in ef.iter_segments():
fbin.write(str(seg.header) + '\n')
fasm.close()
fbin.close()
felf.close()
@staticmethod
def stripComments(s):
''' Strip comments of a line.
NOTE: cross line comments are not supported yet.
'''
s = CuAsmParser.m_cppcomment.subn(' ', s)[0] # replace comments as a single space, avoid unwanted concatination
s = CuAsmParser.m_ccomment.subn(' ', s)[0]
s = CuAsmParser.m_bracomment.subn(' ', s)[0]
s = re.subn(r'\s+', ' ', s)[0] # replace one or more spaces/tabs into one single space
return s.strip()
if __name__ == '__main__':
pass
|
UTF-8
|
Python
| false | false | 84,235 |
py
| 63 |
CuAsmParser.py
| 37 | 0.522494 | 0.510276 | 0.000142 | 2,050 | 39.08439 | 137 |
sastatic/news_summarizer
| 13,245,679,163,346 |
59bca76992c5df1ff3e97410de33c67b2b201022
|
461f92ba380754cc35b883efe8aa4f30827a060e
|
/NewsSummarizer/populate.py
|
18b26111d2de6ce47f4896784a8b8caff31d7152
|
[] |
no_license
|
https://github.com/sastatic/news_summarizer
|
b66f7f60c472bebb47677d0c89d70b3733a6048b
|
9c07222f8b23e0914054453d58b03ce06728b988
|
refs/heads/master
| 2020-07-18T02:32:36.213599 | 2019-09-04T10:06:03 | 2019-09-04T10:06:03 | 206,155,771 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .dbconfig import dbconfig
import time
import requests
import scrapy
import json
import sumy
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
def Summarizer(arg):
parser = PlaintextParser.from_string(arg,Tokenizer("english"))
summarizer = LexRankSummarizer()
#Summarize the document with 2 sentences
summary = summarizer(parser.document, 4)
string_summary = ""
for sentence in summary:
string_summary += str(sentence) + '.'
return string_summary
def getTag(arg):
s = ""
f = 0
for i in range(len(arg)):
if(f == 1 and arg[i] == '/'):
break
if(f == 1) :
s += arg[i]
if(arg[i] == '/' and f == 0):
f = 1
continue
return s
def HeadLines():
main_url = "https://eventregistry.org/api/v1/article/getArticles?query=%7B%22%24query%22%3A%7B%22%24and%22%3A%5B%7B%22%24or%22%3A%5B%7B%22categoryUri%22%3A%22dmoz%2FBusiness%22%7D%2C%7B%22categoryUri%22%3A%22dmoz%2FHealth%22%7D%2C%7B%22categoryUri%22%3A%22dmoz%2FSociety%22%7D%2C%7B%22categoryUri%22%3A%22dmoz%2FScience%22%7D%2C%7B%22categoryUri%22%3A%22dmoz%2FSports%22%7D%5D%7D%2C%7B%22lang%22%3A%22eng%22%7D%5D%7D%7D&dataType=news&resultType=articles&articlesSortBy=date&articlesCount=50&includeArticleCategories=true&includeArticleLocation=true&includeArticleImage=true&articleBodyLen=-1&includeConceptImage=true&apiKey=7a0f2d98-d08b-4b08-b1f2-830bd7ae6883"
fetchHeadlines = requests.get(main_url).json()
article = fetchHeadlines["articles"]["results"]
data = []
for ar in article:
val = {
"head_line" : ar["title"],
"content" : Summarizer(ar["body"]),
"tag" : getTag(ar["categories"][0]["label"]),
"img" : ar["image"],
"dateTime" : ar["dateTime"],
"src" : ar["source"]["uri"]
}
data.append(val)
return data
def update_news():
global db, dbconnected
if not dbconnected:
print("not connected to database, no caching of result")
return
db.News.drop()
datas = HeadLines()
for data in datas:
db.News.insert_one(data).inserted_id
print ("News successfully updated.")
def schedule():
global db, dbconnected, data
while True:
time.sleep(60*60*24)
update_news()
def retrieve_data():
global db, dbconnected
if not dbconnected:
print("not connected to database, no caching of result")
return None, False
cursor = db.News.find({})
collections = {}
for document in cursor:
pid = str(document['_id'])
collection = {}
for key, value in document.items():
if key == '_id':
continue
collection[key] = str(value)
collections[pid] = collection
return collections, dbconnected
db, dbconnected = dbconfig('newsum', 'mongodb://sarwar:sarwar123@ds255577.mlab.com:55577/newsum?retryWrites=false', 55577, 'sarwar', 'sarwar123')
|
UTF-8
|
Python
| false | false | 3,121 |
py
| 5 |
populate.py
| 3 | 0.634412 | 0.580904 | 0 | 93 | 32.55914 | 665 |
Vincennes-Technology/lab5-sensors-with-sockets-cbyrer
| 19,636,590,504,598 |
7037d41832901984c84dd13b168cc7a19a881de1
|
b83adf51712c364961781ac791895186ef24be95
|
/TempSensor.py
|
fe56563c5f817cdd180e273086af1a2d500ffa6d
|
[] |
no_license
|
https://github.com/Vincennes-Technology/lab5-sensors-with-sockets-cbyrer
|
a0b7b8166c31abfdee1cc40151386d75cb57f614
|
74fcf4bca55d5925bbcc4ceeb4bf181a39105013
|
refs/heads/master
| 2020-03-10T02:50:44.770953 | 2018-04-11T20:33:24 | 2018-04-11T20:33:24 | 129,147,718 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Original code https://www.sunfounder.com/learn/sensor-kit-v2-0-for-raspberry-
#pi-b-plus/lesson-26-ds18b20-temperature-sensor-sensor-kit-v2-0-for-b-plus.html
#Edited by Clayton Byrer to show Farenheight displayed on LCD
#----------------------------------------------------------------
# Note:
# ds18b20's data pin must be connected to pin7.
# replace the 28-XXXXXXXXX as yours.
#Connect the 3 pins as follows
#Data pin connected to GPIO 4
#----------------------------------------------------------------
import os
import Adafruit_CharLCD as LCD
import socket
import time
ds18b20 = ''
lcd = LCD.Adafruit_CharLCDPlate()
SERVERIP = '10.0.0.43'
n = 0
def setup():
global ds18b20
for i in os.listdir('/sys/bus/w1/devices'):
if i != 'w1_bus_master1':
ds18b20 = i
def read():
#global ds18b20
location = '/sys/bus/w1/devices/' + ds18b20 + '/w1_slave'
tfile = open(location)
text = tfile.read()
tfile.close()
secondline = text.split("\n")[1]
temperaturedata = secondline.split(" ")[9]
temperature = float(temperaturedata[2:])
temperature = temperature / 1000
farenheight = temperature * 1.8 + 32
return farenheight
def loop():
n = 0
while True:
if read() != None:
print (("Current temp \n : %0.3f F" % read()))
lcd.message("Current temp \n : %0.3f F" % read())
# original code from Python in a Nutshell 2nd Ed. page 527
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((SERVERIP, 8881))
print (("%d : Connected to server" % n,))
data = "'Temp Sensor','n', 'Current temp \n : %0.3f F'" % read()
sock.sendall(data)
print ((" Sent:", data))
sock.close()
n += 1
time.sleep(30)
def destroy():
pass
if __name__ == '__main__':
try:
setup()
loop()
except KeyboardInterrupt:
destroy()
|
UTF-8
|
Python
| false | false | 2,003 |
py
| 2 |
TempSensor.py
| 1 | 0.551672 | 0.511732 | 0 | 69 | 28.043478 | 79 |
Vanclief/estafeta-wrapper
| 4,793,183,534,433 |
86b119b4c1fab555f76fe12280f6f6fb8fd18fa0
|
7310d00e051398d622ab5db892d70f5e609c07c4
|
/estafeta_wrapper/tracking.py
|
64a41205f1847bec4c1395c20fb3869dc02a0658
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
https://github.com/Vanclief/estafeta-wrapper
|
057702ac4c4dcb6b37d155dc92f3ec1a9954fa32
|
6402e0248d9aecda278d3126f901e949b8b766c5
|
refs/heads/master
| 2021-06-29T19:42:05.417569 | 2017-09-15T02:18:45 | 2017-09-15T02:18:45 | 100,381,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from requests import Session
import zeep
from lxml import etree
from zeep.transports import Transport
from datetime import datetime
def track(login,
password,
subscriber_id,
waybill,
production=False):
""" Given a tracking number for a waybill, it returns it's current
status.
"""
if production:
wsdl = 'https://tracking.estafeta.com/Service.asmx?wsdl'
else:
base_dir = os.path.dirname(os.path.abspath(__file__)) + '/wsdl'
wsdl = base_dir + '/Service.asmx.wsdl.xml'
client = zeep.Client(wsdl=wsdl)
factory0 = client.type_factory('ns0')
# Datos de la lista de guías
waybill_type = 'G'
string = [waybill]
array_of_string = factory0.ArrayOfString(string=string)
waybill_list = factory0.WaybillList(waybillType=waybill_type,
waybills=array_of_string)
# Datos de la búsqueda
s_type = 'L' # List, change for an enum with the proper options
search_type = factory0.SearchType(waybillList=waybill_list,
type=s_type)
# Configuración de la búsqueda
history_configuration = factory0.HistoryConfiguration(includeHistory=1,
historyType='ALL')
filter_type = factory0.Filter(filterInformation=0)
search_configuration = factory0.SearchConfiguration(includeDimensions=True,
includeWaybillReplaceData=False,
includeReturnDocumentData=False,
includeMultipleServiceData=False,
includeInternationalData=False,
includeSignature=False,
includeCustomerInfo=True,
historyConfiguration=history_configuration,
filterType=filter_type)
return(client.service.ExecuteQuery(login=login,
password=password,
suscriberId=subscriber_id,
searchType=search_type,
searchConfiguration=search_configuration))
|
UTF-8
|
Python
| false | false | 2,471 |
py
| 6 |
tracking.py
| 3 | 0.515606 | 0.511552 | 0 | 55 | 43.854545 | 99 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.