repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JanaRankova/pyladies | 16,217,796,512,512 | 9c56992d10b6aeef854ae5854a83f784dc90a02f | f44a783125105e858843082a2b526d613ba0be01 | /06_ulohy/piskvorky1.py | c0bb4690257fe908ae8c14d872fd0f3744ca9f9c | []
| no_license | https://github.com/JanaRankova/pyladies | ab09f1392212f72f2952e73525a93be0ede2484d | ef034b63fbafc5fc76c580fb4999dcf500e1feb8 | refs/heads/master | 2020-08-15T15:07:46.326357 | 2019-12-30T18:04:58 | 2019-12-30T18:04:58 | 215,361,294 | 0 | 0 | null | false | 2019-12-30T18:05:00 | 2019-10-15T17:47:29 | 2019-12-30T18:02:58 | 2019-12-30T18:04:59 | 42 | 0 | 0 | 0 | Python | false | false | hracie_pole = '--------------------'
def vyhodnot(hracie_pole):
"""Vezme momentalny stav hracieho pola a podla podmienok vo funkcii, urci vyhercu."""
stav = ''
if 'xxx' in hracie_pole:
stav = 'x'
elif 'ooo' in hracie_pole:
stav = 'o'
elif '-' not in hracie_pole:
stav = '!'
else:
stav = '-'
return stav
print(vyhodnot(hracie_pole)) | UTF-8 | Python | false | false | 393 | py | 38 | piskvorky1.py | 33 | 0.536896 | 0.536896 | 0 | 16 | 23.625 | 89 |
emaballarin/RADLER | 14,439,680,063,489 | 6ea02a30b241e7e2f6833b124fe912327aa2d328 | a5b8bc5953b5205419554e0c7e8853cc4ba9a124 | /src/radler_ae_pretrain.py | b106376787f535942082637b04c5aef76002ca8d | [
"MIT"
]
| permissive | https://github.com/emaballarin/RADLER | c2d8595d72f6465418a34d0f8cacd1db22cdca22 | 5a059939a2ddb02c49689bd9d867aa9935589faa | refs/heads/master | 2020-06-28T18:26:01.567885 | 2020-06-08T03:28:56 | 2020-06-08T03:28:56 | 200,308,007 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ---------------------------------------------------------------------------- #
# #
# RADLER ~ (adversarially) Robust Adversarial Distributional LEaRner #
# #
# |> Pretraining through AutoEncoding <| #
# #
# (C) 2019-* Emanuele Ballarin <emanuele@ballarin.cc> #
# (C) 2019-* AI-CPS@UniTS Laboratory (a.k.a. Bortolussi Group) #
# #
# Distribution: MIT License #
# (Full text: https://github.com/emaballarin/RADLER/blob/master/LICENSE) #
# #
# Eventually-updated version: https://github.com/emaballarin/RADLER #
# #
# ---------------------------------------------------------------------------- #
# Adapted from W. Falcon:
# https://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09
# ------- #
# IMPORTS #
# ------- #
from __future__ import print_function
import torch
import torch as th
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import pytorch_lightning as pl
if __name__ == "__main__":
import architectures as myarchs
import weights_util as wutil
else:
from src import architectures as myarchs
from src import weights_util as wutil
# -------------------- #
# NETWORK ARCHITECTURE #
# -------------------- #
# Building blocks implemented in another module
# ------------------- #
# DATASET BOILERPLATE #
# ------------------- #
# A fake dataset composed all of the same in-out pair, which is the only example (to be autoencoded)
class MyStupidDataset(Dataset):
def __init__(self):
# Such example:
self.my_single_example = (
wutil.dictmodel_flatten(th.load("mnist_cnn_small.pt"), th_device="cuda")
.clone()
.detach()
)
def __getitem__(self, index):
return self.my_single_example, self.my_single_example
def __len__(self):
return 1000
# AUTOENCODER (PyTorch Lightning module; just by putting E & D scaffolds together)
class Autoencoder(pl.LightningModule):
def __init__(self, data_size=20522, code_size=2):
super(Autoencoder, self).__init__()
self.data_size = data_size
self.code_size = code_size
self.encoder = myarchs.AE_Encoder(data_size, code_size)
self.decoder = myarchs.AE_Decoder(code_size, data_size)
def forward(self, x):
# Encoding module
x = self.encoder(x)
# Decoding module
x = self.decoder(x)
return x
def encode(self, x):
return self.encoder(x)
def decode(self, code):
return self.decoder(code)
def MSE_loss(self, given_in, given_out):
return (torch.nn.MSELoss(reduction="sum"))(given_in, given_out)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
copied_input = self.forward(x)
loss = self.MSE_loss(copied_input, y)
logs = {"train_loss": loss}
return {"loss": loss, "log": logs}
# def validation_step(self, val_batch, batch_idx):
# x, y = val_batch
# copied_input = self.forward(x)
# loss = self.MSE_loss(copied_input, y)
# return {"val_loss": loss}
#
# def validation_end(self, outputs):
# avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
# tensorboard_logs = {"val_loss": avg_loss}
# return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
def train_dataloader(self):
my_dataset = MyStupidDataset()
my_train = DataLoader(my_dataset, shuffle=False, batch_size=64)
return my_train
# def val_dataloader(self):
# my_dataset = MyStupidDataset()
# my_val = DataLoader(my_dataset, shuffle=False, batch_size=64)
# return my_val
# def test_dataloader(self):
# my_dataset = MyStupidDataset()
# my_test = DataLoader(my_dataset, shuffle=False, batch_size=64)
# return my_test
def configure_optimizers(self):
# The lightningModule HAS the parameters
# (remember that we had the __init__ and forward method but we're just not showing it here)
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3, weight_decay=1e-5)
return optimizer
# train
model = Autoencoder()
trainer = pl.Trainer(
# We are forcing pure overfitting here; still
# we don't want too much of it.
max_epochs=15, # Autostopped, eventually (but it's balanced after all)
gpus=1,
)
trainer.fit(model)
# For some curious reason (a.k.a. purposeful overfitting), the decoding part is
# almost centered at [0.0, 0.0] to produce the best replication of the in-out
# Save model (decoding part)
th.save(model.decoder.state_dict(), "bottleneck.pt")
| UTF-8 | Python | false | false | 5,265 | py | 19 | radler_ae_pretrain.py | 17 | 0.535613 | 0.527825 | 0 | 155 | 32.967742 | 101 |
Tecplot/handyscripts | 5,720,896,458,849 | 6386954e4995350c85cf6cec422c709145b3531f | 4ad94b71e30883d6df07a3277265bd6fb7457ba7 | /python/examples/doc_examples/data/dataset_variable_names_tuple.py | aa42eef38f35393f479285754263ab374bdbd3a5 | [
"MIT"
]
| permissive | https://github.com/Tecplot/handyscripts | 7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c | 84a89bfecff5479a0319f08eb8aa9df465283830 | refs/heads/master | 2023-08-22T15:29:22.629644 | 2023-08-12T01:19:59 | 2023-08-12T01:19:59 | 149,826,165 | 89 | 64 | MIT | false | 2022-01-13T01:11:02 | 2018-09-21T22:47:23 | 2022-01-12T19:41:56 | 2022-01-13T01:11:01 | 12,260 | 58 | 47 | 1 | Jupyter Notebook | false | false | from os import path
import tecplot as tp
examples_dir = tp.session.tecplot_examples_directory()
datafile = path.join(examples_dir,'SimpleData','DownDraft.plt')
dataset = tp.data.load_tecplot(datafile)
result = tp.data.query.probe_at_position(0,0.1,0.3)
data = dataset.VariablesNamedTuple(*result.data)
# prints: (RHO, E) = (1.17, 252930.37)
msg = '(RHO, E) = ({:.2f}, {:.2f})'
print(msg.format(data.RHO, data.E))
| UTF-8 | Python | false | false | 415 | py | 314 | dataset_variable_names_tuple.py | 287 | 0.706024 | 0.662651 | 0 | 12 | 33.583333 | 63 |
qingshangithub/Smart-car-tracking-with-opencv | 19,026,705,149,551 | 458c9de713b120f8e0d9a237eea7de21fa71b2fb | c5e2ca3242cf86c4d6d9e5cff65763784aaaa708 | /NNCProject/Lower/motor_multiPro/speedsensor/test.py | 48db76f53a3cb325a45c095b83a8b459520f291c | []
| no_license | https://github.com/qingshangithub/Smart-car-tracking-with-opencv | bed8a3417102572963dc35bd6bdb80e226a93142 | 9d11d9b3f22acfc0f24002e6b420cbdc5d95f9cf | refs/heads/master | 2021-04-26T22:57:11.912021 | 2018-03-05T10:35:30 | 2018-03-05T10:35:30 | 123,901,874 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pigpio
from time import sleep
from multiprocessing import Process
p = pigpio.pi()
p.set_mode(17, pigpio.INPUT)
def cbf(g, l, tick):
print(tick)
cb = p.callback(17, pigpio.RISING_EDGE, cbf)
if __name__ == '__main__':
sleep(20)
| UTF-8 | Python | false | false | 244 | py | 40 | test.py | 23 | 0.668033 | 0.643443 | 0 | 13 | 17.769231 | 44 |
usyyy/python | 17,557,826,309,209 | 8a4bf4df04f08e0b9b6458f4ae30f9f027d76afb | 80cc3be9871b9d306dcebb1df7ddcbf9284a9524 | /fluent-python/my-code/01-python-data-model/01-a-pythonic-deck.py | dbcb61a4ef3eb3acadb3506cfc408f0ae2e4ecac | []
| no_license | https://github.com/usyyy/python | c4d79cb4211eaece960d64649a83db1060df30fa | 9532c5b95036342d9345c4ed2d55b16f507dade6 | refs/heads/master | 2020-01-23T22:01:02.662730 | 2017-12-06T22:37:02 | 2017-12-06T22:37:02 | 74,713,627 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections
from random import choice
from math import hypot
# Example 1.1 - A deck as a sequence of cards
Card = collections.namedtuple('Card', ['rank', 'suit'])
class FrenchDeck:
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
#
beer_card = Card('7', 'diamonds')
beer_card
#
deck = FrenchDeck()
len(deck)
#
deck[0]
deck[-1]
#
choice(deck)
choice(deck)
choice(deck)
#
deck[:3]
deck[12::13] # pick the aces by starting on 12 and skipping 13 cards at a time
#
for card in deck:
print(card)
for card in reversed(deck):
print(card)
#
Card('Q', 'hearts') in deck
Card('7', 'beasts') in deck
#
suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)
def spades_high(card):
rank_value = FrenchDeck.ranks.index(card.rank)
return rank_value * len(suit_values) + suit_values[card.suit]
for card in sorted(deck, key=spades_high):
print(card)
# Example 1.2 - A Simple 2d vector class
class Vector:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
# string representation of the object
# %r returns 1 not '1' since they are needed as integers
def __repr__(self):
return 'Vector (%r, %r)' % (self.x, self.y)
def __abs__(self):
return hypot(self.x, self.y)
def __bool__(self):
# TODO: How does abs(self) return 0.0 (I've checked that it does)
return bool(abs(self))
def __add__(self, other):
x = self.x + other.y
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar)
| UTF-8 | Python | false | false | 1,892 | py | 59 | 01-a-pythonic-deck.py | 53 | 0.609937 | 0.593552 | 0 | 94 | 19.12766 | 86 |
detian08/mcl | 3,547,642,993,752 | ecf7afa8cc47355febbf1968a8783586d0099af2 | 99e57f00fcaf4469c1c1b79f2d17176aaef9a790 | /loan/wizard/__init__.py | 73594a5199bed8d87c64e42931c7ccf1dc9fa32c | []
| no_license | https://github.com/detian08/mcl | d007ffd0e869f3bd9a8c74bc8473119901f0de2a | 32d61148326c931aca0107c3894061773f287e33 | refs/heads/master | 2022-03-23T19:36:29.608645 | 2019-12-11T10:15:50 | 2019-12-11T10:15:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import hr_loan_wizard
| UTF-8 | Python | false | false | 29 | py | 287 | __init__.py | 188 | 0.758621 | 0.758621 | 0 | 1 | 28 | 28 |
akbarlintang/perumahan | 16,449,724,778,184 | e981ad2971bf95bb5ad26fb4b7a1eb93a1732c2b | 73f6ba42a793d18ad5b4c44cfdc278e51aa1b9b0 | /perum/views.py | e8828e9cea66ce406f19303c074bfd490595440b | []
| no_license | https://github.com/akbarlintang/perumahan | e14eb922a86c76581d8faae5700ff21e83ba13ee | 66c908a382bc32e9b9abc69b3a6f22eab12d8d2c | refs/heads/main | 2022-12-30T08:11:23.856824 | 2020-10-22T15:33:30 | 2020-10-22T15:33:30 | 306,377,878 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.db.models import Sum
from django.forms import inlineformset_factory
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib import messages
from .models import *
from .forms import *
from .filters import *
from .decorators import *
import datetime
# Create your views here.
@unauthenticated_user
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username atau Password salah!')
context = {}
return render(request, 'perum/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
@unauthenticated_user
def registerPage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
group = Group.objects.get(name='customer')
user.groups.add(group)
Pelanggan.objects.create(
user=user,
)
messages.success(request, 'Akun berhasil dibuat untuk ' + username)
return redirect('login')
context = {'form':form}
return render(request, 'perum/register.html', context)
@login_required(login_url='login')
@admin_only
def home(request):
pelanggans = Pelanggan.objects.all()
administrasis = Administrasi.objects.all()
today = datetime.date.today()
bulan = Administrasi.objects.filter(tanggal__year=today.year, tanggal__month=today.month).aggregate(Sum('biaya_angsur'))['biaya_angsur__sum']
tahun = Administrasi.objects.filter(tanggal__year=today.year).aggregate(Sum('biaya_angsur'))['biaya_angsur__sum']
context = {'pelanggans':pelanggans, 'bulan':bulan, 'tahun':tahun, 'administrasis':administrasis}
return render(request, 'perum/dashboard.html', context)
@login_required(login_url='login')
def unit(request):
units = Unit.objects.all().order_by('no_unit')
unitFilter = UnitFilter(request.GET, queryset=units)
units = unitFilter.qs
context = {'units':units, 'unitFilter':unitFilter}
return render(request, 'perum/unit.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def pelanggan(request):
pelanggans = Pelanggan.objects.all()
pelangganFilter = PelangganFilter(request.GET, queryset=pelanggans)
pelanggans = pelangganFilter.qs
context = {'pelanggans':pelanggans, 'pelangganFilter':pelangganFilter}
return render(request, 'perum/pelanggan.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['admin'])
def administrasi(request):
administrasis = Administrasi.objects.all()
administrasiFilter = AdministrasiFilter(request.GET, queryset=administrasis)
administrasis = administrasiFilter.qs
context = {'administrasis':administrasis, 'administrasiFilter':administrasiFilter}
return render(request, 'perum/administrasi.html', context)
def pemesanan(request):
pemesanan = Booking.objects.all().order_by('tanggal')
context = {'pemesanan':pemesanan}
return render(request, 'perum/pemesanan.html', context)
def akun(request, pk):
akun = Pelanggan.objects.get(id=pk)
pelanggans = Pelanggan.objects.filter(id=pk)
adm = Administrasi.objects.filter(nama_id=pk)
context = {'pelanggans':pelanggans, 'akun':akun, 'adm':adm}
return render(request, 'perum/akun.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def profil(request):
nama = request.user.profile.nama
no_telp = request.user.profile.no_telp
email = request.user.profile.email
no_unit = request.user.profile.no_unit
context = {'nama':nama, 'no_telp':no_telp, 'email':email, 'no_unit':no_unit}
return render(request, 'perum/profil.html', context)
@login_required(login_url='login')
def angsuran(request, pk):
adm = Administrasi.objects.filter(nama_id=pk)
context = {'adm':adm}
return render(request, 'perum/angsuran.html', context)
def infoUnit(request, pk):
unit = Unit.objects.filter(id=pk)
context = {'unit':unit}
return render(request, 'perum/info_unit.html', context)
def createBooking(request, pk):
unit = Booking.objects.filter(id=pk)
form = BookingForm()
if request.method == 'POST':
form = BookingForm(request.POST)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form, 'unit':unit}
return render(request, 'perum/form_booking.html', context)
def createPelanggan(request):
form = PelangganForm()
if request.method == 'POST':
form = PelangganForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
context = {'form':form}
return render(request, 'perum/form_pelanggan.html', context)
def ubahPelanggan(request, pk):
pelanggan = Pelanggan.objects.get(id=pk)
form = PelangganForm(instance=pelanggan)
if request.method == 'POST':
form = PelangganForm(request.POST, instance=pelanggan)
if form.is_valid():
form.save()
return redirect('home')
context = {'form':form}
return render(request, 'perum/form_pelanggan.html', context)
def hapusPelanggan(request, pk):
pelanggan = Pelanggan.objects.get(id=pk)
if request.method == "POST":
pelanggan.delete()
return redirect('pelanggan')
context = {'pelanggan':pelanggan}
return render(request, 'perum/hapus_pelanggan.html', context)
def createUnit(request):
form = UnitForm()
if request.method == 'POST':
form = UnitForm(request.POST)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form}
return render(request, 'perum/form_unit.html', context)
def ubahUnit(request, pk):
unit = Unit.objects.get(id=pk)
form = UnitForm(instance=unit)
if request.method == 'POST':
form = UnitForm(request.POST, instance=unit)
if form.is_valid():
form.save()
return redirect('/unit')
context = {'form':form}
return render(request, 'perum/form_unit.html', context)
def hapusUnit(request, pk):
unit = Unit.objects.get(id=pk)
if request.method == "POST":
unit.delete()
return redirect('/unit')
context = {'unit':unit}
return render(request, 'perum/hapus_unit.html', context)
def hapusPemesanan(request, pk):
pemesanan = Booking.objects.get(id=pk)
if request.method == "POST":
pemesanan.delete()
return redirect('/pemesanan')
context = {'pemesanan':pemesanan}
return render(request, 'perum/hapus_pemesanan.html', context)
def createAdministrasi(request):
form = AdministrasiForm()
if request.method == 'POST':
form = AdministrasiForm(request.POST)
if form.is_valid():
form.save()
return redirect('/administrasi')
context = {'form':form}
return render(request, 'perum/form_administrasi.html', context)
def pembayaran(request, pk):
administrasis = Administrasi.objects.filter(id=pk)
context = {'administrasis':administrasis}
return render(request, 'perum/pembayaran.html', context)
def pemasukan(request):
today = datetime.datetime.today()
bulan = Administrasi.objects.filter(tanggal__year=today.year).filter(tanggal__month=today.month).aggregate(Sum('biaya_angsur'))
tahun = Administrasi.objects.filter(tanggal__year=today.year).aggregate(Sum('biaya_angsur'))
hari = Administrasi.objects.filter(tanggal__year=today, tanggal__month=today, tanggal__date=today).aggregate(Sum('biaya_angsur'))
| UTF-8 | Python | false | false | 7,587 | py | 52 | views.py | 36 | 0.734282 | 0.734282 | 0 | 261 | 28.068966 | 142 |
andrebola/main-recsys-cocoplaya | 9,191,230,041,151 | 7bab7a800ddfcf61447566dcc560e4cf4e55df45 | 245540330cbfe32b2431dd655632c6b0c6200f69 | /model_main.py | 0f76e4280c19f3e2488e9e0508b02a748bfc80e7 | [
"Apache-2.0"
]
| permissive | https://github.com/andrebola/main-recsys-cocoplaya | eec1f05186d7acaaebb72a40030126b95ccdb1ce | 02df2bebac5ea75006ddbc86404f3ff886af396e | refs/heads/master | 2022-12-14T23:42:16.614712 | 2018-06-30T13:32:09 | 2018-06-30T13:32:09 | 139,248,485 | 3 | 0 | Apache-2.0 | false | 2021-06-01T22:20:20 | 2018-06-30T12:40:58 | 2019-04-18T06:01:25 | 2021-06-01T22:20:18 | 8 | 3 | 0 | 1 | Python | false | false | import pickle
import os
import re
import json
import numpy as np
import datetime
from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import MinMaxScaler, QuantileTransformer, LabelBinarizer
from sklearn.decomposition import TruncatedSVD
from scipy import sparse
from lightfm import LightFM
from collections import defaultdict
SEED = 10
def normalize_name(name):
name = name.lower()
name = re.sub(r"[.,\/#!$%\^\*;:{}=\_`~()@]", ' ', name)
name = re.sub(r'\s+', ' ', name).strip()
return name
def process_mpd(playlists_path, target_playlists, output_file, prev_songs_window):
max_prev_song = 0
previous_tracks = defaultdict(lambda: defaultdict(int))
playlists_tracks = []
playlists = []
playlists_extra = {'name': []}
filenames = os.listdir(playlists_path)
for filename in sorted(filenames):
if filename.startswith("mpd.slice.") and filename.endswith(".json"):
fullpath = os.sep.join((playlists_path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
for playlist in mpd_slice['playlists']:
nname = normalize_name(playlist['name'])
playlists_extra['name'].append(nname)
tracks = defaultdict(int)
sorted_tracks = sorted(playlist['tracks'], key=lambda k: k['pos'])
prev_track = []
for track in sorted_tracks:
tracks[track['track_uri']] += 1
curr_prev_tracks = len(prev_track)
for i, song_in_window in enumerate(prev_track):
previous_tracks[song_in_window][track['track_uri']] += (i+1)/curr_prev_tracks
previous_tracks[track['track_uri']][song_in_window] += (i+1)/curr_prev_tracks
#previous_tracks[song_in_window][track['track_uri']] += 1
#previous_tracks[track['track_uri']][song_in_window] += 1
max_prev_song = max(max_prev_song, previous_tracks[track['track_uri']][song_in_window])
max_prev_song = max(max_prev_song, previous_tracks[song_in_window][track['track_uri']])
if len(prev_track) == prev_songs_window:
prev_track.pop(0)
prev_track.append(track['track_uri'])
playlists_tracks.append(tracks)
playlists.append(str(playlist['pid']))
top_pop = []
for i in previous_tracks.keys():
top_pop.append((i, np.sum(list(previous_tracks[i].values()))))
top_pop = sorted(top_pop, key=lambda x:x[1], reverse=True)[:10000]
top_pop = [t[0] for t in top_pop]
# Add playlists on testing set
test_playlists = []
test_playlists_tracks = []
target = json.load(open(target_playlists))
train_playlists_count = len(playlists)
test_playlists_recommended_sum = []
for playlist in target["playlists"]:
nname = ""
if 'name' in playlist:
nname = normalize_name(playlist['name'])
playlists_extra['name'].append(nname)
playlists.append(str(playlist['pid']))
test_playlists.append(str(playlist['pid']))
if len(playlist['tracks']) == 0:
test_playlists_recommended_sum.append(top_pop)
test_playlists_tracks.append({})
continue
tracks = defaultdict(int)
for track in playlist['tracks']:
tracks[track['track_uri']] += 1
#playlists_tracks.append(tracks)
test_playlists_tracks.append(tracks)
recommended_pop = defaultdict(list)
for t in tracks.keys():
for pt in previous_tracks[t].keys():
if pt not in tracks:
recommended_pop[pt].append(previous_tracks[t][pt] /max_prev_song)
recommended_pop_sum = [(t, np.sum(recommended_pop[t])) for t in recommended_pop.keys()]
recommended_pop_sum = sorted(recommended_pop_sum, key=lambda x:x[1], reverse=True)
recommended_pop_sum = [t[0] for t in recommended_pop_sum]
test_playlists_recommended_sum.append(recommended_pop_sum)
print ("Data loaded. Creating features matrix")
dv = DictVectorizer()
interaction_matrix = dv.fit_transform(playlists_tracks+[{}]*10000)
lb = LabelBinarizer(sparse_output=True)
pfeat_train = lb.fit_transform(playlists_extra['name'][:1000000]+[""]*10000)
pfeat_test = lb.transform(playlists_extra['name'])
print ("pfeat_train", pfeat_train.shape)
print ("pfeat_test", pfeat_test.shape)
playlist_features = pfeat_train
# Need to hstack playlist_features
eye = sparse.eye(playlist_features.shape[0], playlist_features.shape[0]).tocsr()
playlist_features_concat = sparse.hstack((eye, playlist_features))
print ("Features matrix created. Training model")
model = LightFM(loss='warp', no_components=200, max_sampled=30, item_alpha=1e-06, user_alpha=1e-06, random_state=SEED)
model = model.fit(interaction_matrix, user_features=playlist_features_concat, epochs=150, num_threads=32)
# freeze the gradient and optimize held-out users
model.item_embedding_gradients = np.finfo(np.float32).max * np.ones_like(model.item_embedding_gradients)
model.item_bias_gradients = np.finfo(np.float32).max * np.ones_like(model.item_bias_gradients)
model.item_alpha = 0.0
model.user_alpha = 0.0
model.user_embedding_gradients[:1000000,:] = np.finfo(np.float32).max * np.ones_like(model.user_embedding_gradients[:1000000,:])
model.user_bias_gradients[:1000000] = np.finfo(np.float32).max * np.ones_like(model.user_bias_gradients[:1000000])
# Use the trained model to get a representation of the playlists on challenge set
interaction_matrix = dv.transform(playlists_tracks+test_playlists_tracks)
playlist_features = pfeat_test
playlist_features_concat = sparse.hstack((eye, playlist_features))
model.user_embeddings[-10000:] = ((model.random_state.rand(10000, model.no_components) - 0.5) / model.no_components).astype(np.float32)
model = model.fit_partial(interaction_matrix, user_features=playlist_features_concat, epochs=150, num_threads=32)
print ("Model Trained")
user_biases, user_embeddings = model.get_user_representations(playlist_features_concat)
item_biases, item_embeddings = model.get_item_representations()
fuse_perc = 0.7
with open(output_file, 'w') as fout:
print('team_info,cocoplaya,main,andres.ferraro@upf.edu', file=fout)
for i, playlist in enumerate(test_playlists):
playlist_pos = train_playlists_count+i
y_pred = user_embeddings[playlist_pos].dot(item_embeddings.T) + item_biases
topn = np.argsort(-y_pred)[:len(test_playlists_tracks[i])+4000]
rets = [(dv.feature_names_[t], float(y_pred[t])) for t in topn]
songids = [s for s, _ in rets if s not in test_playlists_tracks[i]]
songids_dict = {s:1 for s in songids}
max_score = max(len(songids), len(test_playlists_recommended_sum[i]))
pop_sum = {s:(max_score - p) for p,s in enumerate(test_playlists_recommended_sum[i])}
fuse_sum = []
for p, s in enumerate(songids):
pop_val_sum = 0
if s in pop_sum:
pop_val_sum = pop_sum[s]
fuse_sum.append((s,((max_score - p)*fuse_perc + pop_val_sum*(1-fuse_perc) ) / 2))
for s in pop_sum.keys():
if s not in songids_dict:
fuse_sum.append((s,(pop_sum[s]*(1-fuse_perc) ) / 2))
fuse_sum = sorted(fuse_sum, key=lambda x:x[1], reverse=True)
print(' , '.join([playlist] + [x[0] for x in fuse_sum[:500]]), file=fout)
if __name__ == '__main__':
playlists_file = './mpd/data/'
target_playlists = 'eval_data/challenge_set.json'
output_file = 'output/output_main_final_sum_window_10.csv'
process_mpd(playlists_file, target_playlists, output_file, 10)
| UTF-8 | Python | false | false | 8,148 | py | 2 | model_main.py | 1 | 0.624325 | 0.607757 | 0 | 173 | 46.098266 | 139 |
mwouts/jupytext | 6,150,393,177,714 | c3b973b70edb356c3f74bcbd0908401f4f680729 | 6950d17118b97259e181cfc1e6ba3becf6fab753 | /tests/notebooks/mirror/ipynb_to_sphinx/sample_rise_notebook_66.py | 7de7c1af489b6441c5ac9ea88e3bd9d5f01be008 | [
"MIT"
]
| permissive | https://github.com/mwouts/jupytext | 8f38d974320e17d9bfdc02a91707b5d7cba999cc | 28cc7de53d403838caf24c3470df95e94a82d132 | refs/heads/main | 2023-09-04T04:20:37.143750 | 2023-08-26T20:39:32 | 2023-08-26T21:01:59 | 137,444,487 | 6,292 | 451 | MIT | false | 2023-09-12T14:41:06 | 2018-06-15T05:25:36 | 2023-09-12T09:31:00 | 2023-09-12T14:41:05 | 24,266 | 6,135 | 381 | 115 | Python | false | false | # ---
# jupyter:
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
A markdown cell
"""
1+1
###############################################################################
# Markdown cell two
| UTF-8 | Python | false | false | 242 | py | 433 | sample_rise_notebook_66.py | 162 | 0.371901 | 0.355372 | 0 | 16 | 14.125 | 79 |
niryRemyNimbol/masterarbeit | 13,821,204,767,087 | 759f2070fbcb3e0d26074a24bde9e9152a8e9e33 | 94bc505d5c9117ab6cae344274dc3391f32856d7 | /mrf_lstm_run.py | ebe475508777077742d38835c0079c54a685e19a | []
| no_license | https://github.com/niryRemyNimbol/masterarbeit | 3bd6cffe2cf7bc47651c2e23dd2e8819cd541f09 | 31fe9821ae7957583f723773e09831716c59b254 | refs/heads/master | 2020-04-05T04:39:47.855887 | 2019-05-21T12:45:05 | 2019-05-21T12:45:05 | 156,560,515 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
import dic
import rnn_functions
import display_functions
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score as r2
data_path = '../recon_q_examples/data/Exam52004/Series5/recon_data'
mask_path = '../recon_q_examples/data/Exam52004/Series5/mask.dat'
map_path = '../recon_q_examples/data/Exam52004/Series5/dm_qmaps.dat'
dl_path = '../recon_q_examples/data/Exam52004/Series5/nn_qmaps.dat'
size = 200
Nreps = 1000
series = dic.read_mrf_data(data_path, Nreps, size)
mask = dic.load_relaxation_times_map(mask_path, size, method=2)
dm_map = dic.load_relaxation_times_map(map_path, size, method=0)
dl_map = dic.load_relaxation_times_map(dl_path, size, method=1)
num_in = 100
num_fc = 64
timesteps = 10
num_hidden = 8
num_out = 2
times_max = np.array([4., .6])
#times_max = np.array([4., 1.])
X = tf.placeholder("float", [None, timesteps, num_in])
net = rnn_functions.LSTM(X, timesteps, num_hidden, num_out, activation=tf.sigmoid, fc=True, tr=True)
#out = times_max * net
out = [times_max * net_ for net_ in net]
saver = tf.train.Saver()
epoch = 1940
with tf.Session() as sess:
rnn_functions.load_lstm(saver, sess, epoch, '_sigmoid_mape_tr')
times = sess.run(out, feed_dict={X: series[:timesteps*num_in, :].T.reshape((series.shape[1], timesteps, num_in))})
img = times[9].reshape((size, size, 2), order='C')
#img = times.reshape((size, size, 2), order='C')
img[:, :, 0] *= mask * 1e3
img[:, :, 1] *= mask * 1e3
img_dl = dl_map * 1e3
img_dl[:, :, 0] = mask * img_dl[:, :, 0].T
img_dl[:, :, 1] = mask * img_dl[:, :, 1].T
img_dm = dm_map * 1e3
img_dm[:, :, 0] = img_dm[:, :, 0].T
img_dm[:, :, 1] = img_dm[:, :, 1].T
true_t1 = np.array([604, 596,1448, 1262, 444, 754, 903, 1276, 1034, 745, 1160, 966])
true_t2 = np.array([95, 136, 390, 184, 154, 116, 137, 204, 167, 157, 214, 224])
corners = display_functions.detect_phantom_tubes(img, mask, 28, 2)
img_gt, _, _ = display_functions.compare_to_gt(img, mask, corners, 28, true_t1, true_t2)
fig_t1, ax_t1, fig_t2, ax_t2 = display_functions.plot_results(img, phantom=True, gt=img_gt)
fig_t1_dm, ax_t1_dm, fig_t2_dm, ax_t2_dm = display_functions.plot_comparison_method(img_dm, img, phantom=True, gt=img_gt)
fig_t1_dl, ax_t1_dl, fig_t2_dl, ax_t2_dl = display_functions.plot_comparison_method(img_dl, img, phantom=True, gt=img_gt, method=1)
#display_functions.draw_bounding_boxes(ax_t1, corners, 28)
#display_functions.draw_bounding_boxes(ax_t2, corners, 28)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
imgs = []
for k in range(len(times)):
img = times[k].reshape((size, size, 2), order='C')
img[:, :, 0] *= mask * 1e3
img[:, :, 1] *= mask * 1e3
imgs.append(img)
#fig_tr, ax_tr = plt.subplots(10, 6, figsize=(18, 30))
#for k in range(len(imgs)):
# t1 = ax_tr[k][0].imshow(imgs[k][:, :, 0], cmap='hot', vmax=4000, vmin=0)
# t1_err = ax_tr[k][1].imshow(np.abs(imgs[k][:, :, 0] - img_dm[:, :, 0])/(img_dm[:, :, 0] + 1e-6) * 1e2, cmap='Reds', vmax=100, vmin=0)
# ax_tr[k][2].scatter(img_dm[:, :, 0], imgs[k][:, :, 0], c='b', marker='.', alpha=0.1)
# t2 = ax_tr[k][3].imshow(imgs[k][:, :, 1], cmap='copper', vmax=300, vmin=0)
# t2_err = ax_tr[k][4].imshow(np.abs(imgs[k][:, :, 1] - img_dm[:, :, 1])/(img_dm[:, :, 1] + 1e-6) * 1e2, cmap='Reds', vmax=100, vmin=0)
# ax_tr[k][5].scatter(img_dm[:, :, 1], imgs[k][:, :, 1], c='b', marker='.', alpha=0.1)
# r2_t1 = r2(img_dm[:, :, 0], imgs[k][:, :, 0])
# r2_t2 = r2(img_dm[:, :, 1], imgs[k][:, :, 1])
# ax_tr[k][0].text(-35, 100, r'\Huge {:d}'.format((k+1)))
# ax_tr[k][2].text(1, 3550, r'\Large R2 = {:5f}'.format(r2_t1))
# ax_tr[k][2].set_xlabel(r'\Large DM (ms)')
# ax_tr[k][2].set_ylabel(r'\Large LSTM (ms)')
# ax_tr[k][2].set_xbound(lower=0, upper=4000)
# ax_tr[k][2].set_ybound(lower=0, upper=4000)
# ax_tr[k][2].plot([x for x in range(4000)], [x for x in range(4000)], 'g--')
# asp = np.diff(ax_tr[k][2].get_xlim())[0] / np.diff(ax_tr[k][2].get_ylim())[0]
# ax_tr[k][2].set_aspect(asp)
# ax_tr[k][2].ticklabel_format(style='sci', axis='both', scilimits=(3, 3))
# ax_tr[k][0].set_axis_off()
# ax_tr[k][1].set_axis_off()
# ax_tr[k][5].text(1, 550, r'\Large R2 = {:5f}'.format(r2_t2))
# ax_tr[k][5].set_xlabel(r'\Large DM (ms)')
# ax_tr[k][5].set_ylabel(r'\Large LSTM (ms)')
# ax_tr[k][5].set_xbound(lower=0, upper=600)
# ax_tr[k][5].set_ybound(lower=0, upper=600)
# ax_tr[k][5].plot([x for x in range(600)], [x for x in range(600)], 'g--')
# asp = np.diff(ax_tr[k][5].get_xlim())[0] / np.diff(ax_tr[k][5].get_ylim())[0]
# ax_tr[k][5].set_aspect(asp)
# ax_tr[k][5].ticklabel_format(style='sci', axis='both', scilimits=(2, 2))
# ax_tr[k][5].set_xticks(ax_tr[k][5].get_yticks()[1:-1])
# ax_tr[k][3].set_axis_off()
# ax_tr[k][4].set_axis_off()
#fig_tr.colorbar(t1, fraction=0.05, pad=-0.05, ax=ax_tr[9][0], orientation='horizontal')
#fig_tr.colorbar(t1_err, fraction=0.05, pad=-0.05, ax=ax_tr[9][1], orientation='horizontal')
#fig_tr.colorbar(t2, fraction=0.05, pad=-0.05, ax=ax_tr[9][3], orientation='horizontal')
#fig_tr.colorbar(t2_err, fraction=0.05, pad=-0.05, ax=ax_tr[9][4], orientation='horizontal')
#ax_tr[0][0].set_title(r'\Huge \textbf{T1 (ms)}')
#ax_tr[0][3].set_title(r'\Huge \textbf{T2 (ms)}')
#ax_tr[0][1].set_title(r'\Huge \textbf{T1 Error (\%)')
#ax_tr[0][4].set_title(r'\Huge \textbf{T2 Error (\%)')
#fig_comp, ax_comp = plt.subplots(2, 4, figsize=(20, 10))
#t1 = ax_comp[0][0].imshow(imgs[9][:, :, 0], cmap='hot', vmax=4000, vmin=0)
#t1_err = ax_comp[0][2].imshow(np.abs(imgs[9][:, :, 0] - img_dm[:, :, 0])/(img_dm[:, :, 0] + 1e-6) * 1e2, cmap='Reds', vmax=100, vmin=0)
#t1_dm = ax_comp[0][1].imshow(img_dm[:, :, 0], cmap='hot', vmax=4000, vmin=0)
#ax_comp[0][3].scatter(img_dm[:, :, 0], imgs[9][:, :, 0], c='b', marker='.', alpha=0.1)
#t2 = ax_comp[1][0].imshow(imgs[9][:, :, 1], cmap='copper', vmax=300, vmin=0)
#t2_err = ax_comp[1][2].imshow(np.abs(imgs[9][:, :, 1] - img_dm[:, :, 1])/(img_dm[:, :, 1] + 1e-6) * 1e2, cmap='Reds', vmax=100, vmin=0)
#t2_dm = ax_comp[1][1].imshow(img_dm[:, :, 1], cmap='copper', vmax=300, vmin=0)
#ax_comp[1][3].scatter(img_dm[:, :, 1], imgs[9][:, :, 1], c='b', marker='.', alpha=0.1)
#r2_t1 = r2(img_dm[:, :, 0], imgs[9][:, :, 0])
#r2_t2 = r2(img_dm[:, :, 1], imgs[9][:, :, 1])
#ax_comp[0][3].text(1, 3550, r'\huge R2 = {:5f}'.format(r2_t1))
#ax_comp[0][3].set_xlabel(r'\huge DM (ms)')
#ax_comp[0][3].set_ylabel(r'\huge LSTM (ms)')
#ax_comp[0][3].set_xbound(lower=0, upper=4000)
#ax_comp[0][3].set_ybound(lower=0, upper=4000)
#ax_comp[0][3].plot([x for x in range(4000)], [x for x in range(4000)], 'g--')
#ax_comp[0][0].set_axis_off()
#ax_comp[0][1].set_axis_off()
#ax_comp[0][2].set_axis_off()
#ax_comp[1][3].text(1, 550, r'\huge R2 = {:5f}'.format(r2_t2))
#ax_comp[1][3].set_xlabel(r'\huge Dictionary matching (ms)')
#ax_comp[1][3].set_ylabel(r'\huge LSTM (ms)')
#ax_comp[1][3].set_xbound(lower=0, upper=600)
#ax_comp[1][3].set_ybound(lower=0, upper=600)
#ax_comp[1][3].plot([x for x in range(600)], [x for x in range(600)], 'g--')
#ax_comp[1][0].set_axis_off()
#ax_comp[1][1].set_axis_off()
#ax_comp[1][2].set_axis_off()
#fig_comp.colorbar(t1, fraction=0.05, pad=-0.05, ax=ax_comp[0][0], orientation='horizontal')
#fig_comp.colorbar(t1_dm, fraction=0.05, pad=-0.05, ax=ax_comp[0][1], orientation='horizontal')
#fig_comp.colorbar(t1_err, fraction=0.05, pad=-0.05, ax=ax_comp[0][2], orientation='horizontal')
#fig_comp.colorbar(t2, fraction=0.05, pad=-0.05, ax=ax_comp[1][0], orientation='horizontal')
#fig_comp.colorbar(t2_dm, fraction=0.05, pad=-0.05, ax=ax_comp[1][1], orientation='horizontal')
#fig_comp.colorbar(t2_err, fraction=0.05, pad=-0.05, ax=ax_comp[1][2], orientation='horizontal')
#ax_comp[0][0].text(-80, 100, r'\Huge \textbf{T1 (ms)}')
#ax_comp[1][0].text(-80, 100, r'\Huge \textbf{T2 (ms)}')
#ax_comp[0][0].set_title(r'\Huge \textbf{LSTM}')
#ax_comp[0][1].set_title(r'\Huge \textbf{DM}')
#ax_comp[0][2].set_title(r'\Huge \textbf{Absolute Percentage Error')
| UTF-8 | Python | false | false | 8,009 | py | 22 | mrf_lstm_run.py | 21 | 0.602697 | 0.520664 | 0 | 156 | 50.339744 | 138 |
Godys05/smartSlideshow | 3,624,952,447,062 | 102b441762f07e2ca2a8d64db28192bd7b10c082 | 548f0a265ddff2199a6a3de98218209bf348c504 | /Weather.py | 62cf01ff4435be296b4f107063ceddc2020cb583 | []
| no_license | https://github.com/Godys05/smartSlideshow | d611707ac5276ee4917ec3d5f37584f603d33a19 | 397220995a45f60f3cbae31653f943622b3ccb2f | refs/heads/master | 2022-04-22T18:13:33.256990 | 2020-04-23T18:09:26 | 2020-04-23T18:09:26 | 258,274,945 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
class Weather:
def __init__(self, main='s'):
self.main = main | UTF-8 | Python | false | false | 85 | py | 6 | Weather.py | 5 | 0.588235 | 0.588235 | 0 | 4 | 20.5 | 33 |
PGCodehub/Image_Classification_Project | 13,804,024,913,357 | d22ede76ed32e301286fa03ffdb7cf029b40ecf8 | a41dd194efad4407fae4ffb646b84e4222cd487d | /predict.py | 24ad1d3b01e3218f46950b614af4dc0b577a12af | []
| no_license | https://github.com/PGCodehub/Image_Classification_Project | f457c6eb7f88f953b9cf3f6409001abe3d0b5dd6 | af1646588abda01f1a43f999ae5c9db3a31ccc2a | refs/heads/master | 2020-04-25T12:05:41.235293 | 2019-02-26T19:04:58 | 2019-02-26T19:04:58 | 172,767,284 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Created by pramod G
#All imports
from time import time,sleep
import argparse
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import json
#for keeping workspace live during non activity
from workspace_utils import active_session
# Main program function defined below
def main():
start_time = time()
#taking the input
in_arg = get_input_args()
#print(in_arg.hidden_units, in_arg.dir)
#Set which device to run on
if in_arg.gpu:
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
print("Sorry but there is no gpu available" )
print("Program is terminated")
exit()
else:
device = torch.device("cpu")
#Load the desired model
loaded_model = loadCheckpoint(in_arg.checkpointpath,device)
#print("Accuracy of newly loaded model :")
#Acurracy(loaded_model,device)
#read catagories names
catnames = in_arg.category_names
with open(catnames, 'r') as f:
cat_to_name = json.load(f)
#preprocess the data
probs, classes = predict(in_arg.img, loaded_model, device , in_arg.top_k )
probs = probs.cpu()
probs = probs.numpy()
Topprob = probs.argmax()
print("The most likely image class is {} and it's associated probability is {}".format(classes[Topprob],probs[0][Topprob]))
print("The top {} classes along with associated probabilities are {}, {}".format(in_arg.top_k ,classes,probs))
TopKClasses = [cat_to_name[clas] for clas in classes]
for i,clas in enumerate(TopKClasses):
print(" The top {} class probably is {}".format(i+1,clas))
end_time = time()
ttime = end_time - start_time
hh = round(ttime/(3600))
mm = round((ttime%3600)/60)
ss = round((ttime%3600)%60)
tot_time = "{}:{}:{}".format(hh,mm,ss)
print("\n** Total Elapsed Runtime:", tot_time)
def get_input_args():
parser = argparse.ArgumentParser(description = " This program is for predicting when given a image by specifying path to Imagefile and a checkpoint to create model from ")
parser.add_argument( 'img',type = str, default = 'flowers/test/100/image_07899.jpg', metavar='' , help = 'This is to specify Path to the imagefile need to be predicted (default if running from workspace- \'paind-project/flowers/test/100/image_07899.jpg\')')
parser.add_argument( 'checkpointpath', type = str, default = 'CLcheckpoint.pth', metavar='' , help = 'This is to specify checkpoint from which tained model will be loaded (default-\'CLcheckpoint.pth\')')
parser.add_argument( '----category_names',type = str, default = 'cat_to_name.json', metavar='' , help = 'This is to specify to load a JSON file that maps the class values to other category names (default-\'cat_to_name.json.pth\')')
parser.add_argument( '--top_k', type = int, default = 5, metavar='' , help = 'This is to specify no of top probabilities you want to predict (default-\'5\')')
#for flags like to be trained on gpu or not like that
group = parser.add_mutually_exclusive_group()
group.add_argument( '--gpu', action = 'store_true' , help = 'This is to set the flag if you want to use gpu or not(default if not specified-\'cpu\'')
arg = parser.parse_args()
return arg
def preprocess(dir):
#transforms for testing sets
Pre_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
image = Image.open(dir)
# Load the datasets with ImageFolder
PreImage = Pre_transforms(image)[:3,:,:]
return PreImage
def predict(image_path, model , device ,topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
image = preprocess(image_path)
#print(image.shape)
image.unsqueeze_(0)
#print(image.shape)
image = image
image = image.to(device)
#model.type(torch.DoubleTensor)
model.to(device)
model.eval()
with torch.no_grad():
output = model.forward(image)
prob = torch.exp(output)
probs , indices = torch.topk(prob , topk)
indice = indices.cpu()
indice = indice.numpy()[0]
# Convert indices to classes
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
#print(idx_to_class)
classes = [idx_to_class[index] for index in indice]
return probs , classes
#function that loads a checkpoint and rebuilds the model
def loadCheckpoint(filepath,device):
if device == 'cuda:0':
checkpoint = torch.load(filepath)
else:
checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)
arch = checkpoint['model']
model = models.__dict__[arch](pretrained=True)
if(arch == 'resnet50'):
model.fc = checkpoint['classifier']
if(arch == 'densenet121'):
model.classifier = checkpoint['classifier']
if(arch == 'vgg16'):
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_Dict'])
#optimizer = checkpoint['optimizer_state_dict']
epochs = checkpoint['epochs']
model.class_to_idx = checkpoint['class_to_idx']
for param in model.parameters():
param.requires_grad = False
#model.to()
return model
print("The model is loaded from {} file..".format(filepath))
#Defining Classifier with network class style
class Network(nn.Module):
def __init__(self,input_size, hiddenlayers, output_size, drop_p = 0.5):
super().__init__()
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hiddenlayers[0])])
layersizes = zip(hiddenlayers[:-1],hiddenlayers[1:])
#print(layersizes)
self.hidden_layers.extend([nn.Linear(h1,h2) for h1,h2 in layersizes])
self.output = nn.Linear(hiddenlayers[-1],output_size)
self.dropout = nn.Dropout(p = drop_p)
def forward(self , x):
for lin in self.hidden_layers:
x = F.relu(lin(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim= 1)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 6,961 | py | 3 | predict.py | 2 | 0.601063 | 0.588278 | 0 | 216 | 31.226852 | 262 |
ByteLorde/RSEnterprise | 13,958,643,741,172 | 044256da2e98d9101c671edc4e7b257061064509 | f4fcceabcadc8dfb4d06cad2e8a5a6535253c0b5 | /src/base/modules/Drawable/Label/Label.py | b2484e6d65d936cdc802b29239a121b3ca8e94ea | []
| no_license | https://github.com/ByteLorde/RSEnterprise | 7fa56ce9a78ed6e6609a1045cc2c805ab277000d | 7ceb1e46339dc6c8f0e212fcfc95edf25aac7fcf | refs/heads/master | 2020-03-23T14:15:37.691099 | 2019-09-04T18:15:16 | 2019-09-04T18:15:16 | 141,665,879 | 0 | 1 | null | false | 2018-08-08T02:14:01 | 2018-07-20T05:05:32 | 2018-07-25T01:05:42 | 2018-08-08T02:14:01 | 1,679 | 0 | 1 | 0 | Python | false | null | import cv2
from src.base.modules.Drawable.Color.Color import Color
class Label:
HERSHEY_SIMPLEX = 0
HERSHEY_PLAIN = 1
HERSHEY_DUPLEX = 2
HERSHEY_COMPLEX = 3
HERSHEY_TRIPLEX = 4
HERSHEY_COMPLEX_SMALL = 5
HERSHEY_SCRIPT_SIMPLEX = 6
HERSHEY_SCRIPT_COMPLEX = 7
FONT_ITALIC = 16
def __init__(self, text, color=Color.GREEN, style=HERSHEY_SIMPLEX, scale=1, thickness=1):
self.text = text
self.color = color
self.style = style
self.scale = scale
self.thickness = thickness
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getTextSize(self):
return cv2.getTextSize(self.text, self.style, self.scale, self.thickness)
def drawComponent(self, image):
cv2.putText(image, self.text, (x, y), self.style, self.scale, self.color, self.thickness)
def getThickness(self):
return self.thickness
def setThickness(self, thickness):
self.thickness = thickness
def getScale(self):
return self.scale
def setScale(self, scale):
self.scale = scale
def getStyle(self):
return self.style
def setStyle(self, style):
self.style = style
def getColor(self):
return self.color
def setColor(self, color):
self.color = color
| UTF-8 | Python | false | false | 1,380 | py | 33 | Label.py | 32 | 0.621739 | 0.61087 | 0 | 59 | 22.389831 | 97 |
typemegan/Python | 18,124,762,005,692 | bfb6cdd76314a13dba63f9e91d7096ff7d8805fe | af4d2a0cefb304b5f38a4f0c170b799d7bb8f059 | /CorePythonProgramming_2nd/ch02_start/print_test.py | 839073f2c92b972766270b7df77ae28380b530b7 | []
| no_license | https://github.com/typemegan/Python | e577842447c81dcb980198b33f844b4fab888517 | 9bfc48ddc3b050e629a9b84b41df57f35f990d2e | refs/heads/master | 2020-12-13T18:25:13.379953 | 2019-06-07T04:01:24 | 2019-06-07T04:01:24 | 37,420,605 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf-8
num = 10
decorate = '*'*num + 'ex%d' + '*'*num
#-----------------ex8:逗号-----------------
title = decorate % 8
print title
print "Mary had a little lamb."
print "Its fleece was white as %s." % 'snow'
print "And everywhere that Mary went."
print "."*10
end1 = 'C'
end2 = 'h'
end3 = 'e'
end4 = 'e'
end5 = 's'
end6 = 'e'
end7 = 'B'
end8 = 'u'
end9 = 'r'
end10 = 'g'
end11 = 'e'
end12 = 'r'
#不换行输出,print后跟逗号
#当要print内容过长时,可用逗号‘,’来换行输入
print end1 + end2 + end3 + end4 + end5 + end6,
#末尾的逗号表示不换行,空格后继续输出下一行
print end7 + end8 + end9 + end10 + end11 + end12
print "hello",
print "Python",
print "!"
print ""
#------------------ex9:三引号---------------
title = decorate % 9
print title
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "\nJan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days:",days
print "Here are the months:",months
print "Here are the months:%r" % months
#输出:'\nJan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug'
#%r只输出原始数据,不对转义符后内容进行转换
#多行输入并多行输出
#成对三双引号包含所有内容"""xxx"""
print """
There's something on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want,or 5, or 6.
"""
#多行输入但输出一行
print(
"test",
"example",
"is right?"
)
#加‘\n’后仍是只输出一行
print(
"hello\n",
"Python\n",
"how are you today\n"
)
print ""
#-----------------ex10 转义符---------------
title = decorate % 10
print title
#转义单/双引号
print "single quote 'i'" #‘i’
print "double quote -"u"." #u没有打印出来
print "single quote \'i\'" #‘i’
print "double quote -\"u\"." #-"u".
print ""
print "I am 6'2\" tall."
print 'I am 6\'2" tall.'
print ""
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\na line."
backslash_cat = "I'm \\ a \\ cat."
#在三引号中可以不对引号转义
#三单引号或三双引号:效果一样
fat_cat = '''
I'll do a list:
\t* Cat food
\t* Fishies
\t* Ca[nip\n\t* Grass
\t* 'cake'
\t* 'dessert:"chocolate"'
\t* "drinks:'milk'"
\t* "..."
'''
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
quote_escape = "escape single:\'%s\'\rescape double:\"my little %s.\"\n"
print '%%r-:%r' % (quote_escape % ("cat","cat"))
print "%%s-:%s" % (quote_escape % ("dog",'dog'))
#while True:
# for i in ["/","-","|","\\","|"]:
# print "%s\r" % i,
| UTF-8 | Python | false | false | 2,511 | py | 274 | print_test.py | 263 | 0.591264 | 0.568276 | 0 | 122 | 16.819672 | 72 |
fossabot/Airflow-DAG | 7,679,401,533,495 | dbfc6c003c1468115b1a63beb9d9a4ba791f5b70 | 09eebd3cadc58f0765602d29f603db4528f2873e | /dags/bash_test_triggers.py | b49ffb5cc86d228aae535787c603cbac57da3517 | [
"Apache-2.0"
]
| permissive | https://github.com/fossabot/Airflow-DAG | ef52675b770870e36e579e8816a19626f00deac9 | feb015251093afbb2d51325e56f7764a1ad8cdc9 | refs/heads/master | 2020-06-17T07:02:04.223922 | 2019-07-08T15:22:39 | 2019-07-08T15:22:39 | 195,838,983 | 0 | 0 | Apache-2.0 | true | 2019-07-08T15:22:34 | 2019-07-08T15:22:34 | 2019-07-06T21:17:41 | 2019-07-06T21:17:40 | 45 | 0 | 0 | 0 | null | false | false | from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
# Define the DAG...
# Create the default arguments
from airflow.operators.dagrun_operator import TriggerDagRunOperator
default_args = {
'owner': 'hashmap-airflow',
'depends_on_past': False,
'start_date': datetime(2019, 7, 3),
'retries': 0,
'retry_delay': timedelta(minutes=1),
}
# create the DAG instance
dag_layer_1 = DAG(dag_id='hw_bash_layer_1',
default_args=default_args,
schedule_interval=timedelta(1))
dag_layer_2 = DAG(dag_id='hw_bash_layer_2',
default_args=default_args,
schedule_interval=None)
dag_layer_3 = DAG(dag_id='hw_bash_layer_3',
default_args=default_args,
schedule_interval=None)
# Set start data
# These are passed in as args. Seems that they aren't sent that way is a bug.
dag_layer_1.start_date = default_args['start_date']
dag_layer_2.start_date = default_args['start_date']
dag_layer_3.start_date = default_args['start_date']
# This path is used in the code below. This should identify where the code is
# being executed from.
path = '/Users/johnaven/Sandbox/bash_dag_example'
# STDOUT 'Hello World' with redirect to out.txt
create_file= BashOperator(
task_id='save-bash',
bash_command='echo "Hello John" > {path}/out_tr.txt'.format(path=path)
)
# print the contents of out.txt to STDOUT
print_file=BashOperator(
task_id='print-file',
bash_command='cat {path}/out_tr.txt'.format(path=path)
)
# clone/copy the data into another file
copy_file=BashOperator(
task_id='copy-file',
bash_command='cp {path}/out_tr.txt {path}/out_tr_copy.txt'.format(path=path)
)
# delete the files that were created
delete_files = BashOperator(
task_id='delete-files',
bash_command='rm -f {path}/out_tr.txt && rm -f {path}/out_tr_copy.txt'.format(path=path)
)
# Create Triggers
trigger_layer_2 = TriggerDagRunOperator(
task_id='trigger-layer2',
trigger_dag_id='hw_bash_layer_2'
)
trigger_layer_3 = TriggerDagRunOperator(
task_id='trigger-layer-3',
trigger_dag_id='hw_bash_layer_3'
)
# Assign the operators to a DAG
create_file.dag = dag_layer_1
trigger_layer_2.dag = dag_layer_1
print_file.dag = dag_layer_2
copy_file.dag = dag_layer_2
trigger_layer_3.dag = dag_layer_2
delete_files.dag = dag_layer_3
# Set any upstream requirements - e.g. especially for the triggers
trigger_layer_2.set_upstream(task_or_task_list=[create_file])
trigger_layer_3.set_upstream(task_or_task_list=[print_file,
copy_file])
| UTF-8 | Python | false | false | 2,667 | py | 5 | bash_test_triggers.py | 2 | 0.67754 | 0.664792 | 0 | 90 | 28.633333 | 92 |
DaiJitao/web_spammer_detection | 15,693,810,537,107 | 2a9d6814c583645399bdfb8a9f75b5443eb5a202 | a04c62fe53387f41e893c86d3603f65ff1a70ae0 | /server/data_annotation/semantic_similarity.py | bf6650eda6d2ef46c5bf211ce9ba7427dc9d9551 | []
| no_license | https://github.com/DaiJitao/web_spammer_detection | 233f2b228cff6a1c71745179ee313f9daaf287cb | 5d6c72dbdabdbb97ae0bf05468838a435dc4b3a5 | refs/heads/master | 2020-05-25T23:29:21.309615 | 2019-07-22T01:01:07 | 2019-07-22T01:01:07 | 188,034,797 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from conf.config import stop_words_file, fanChengCheng_db, redis_host, redis_port, incsv_fanChengCheng, zhaiTianLin_db, \
incsv_haiTianLin, jueDiQiuSheng_db, incsv_jueDiQiuSheng, zhangDanFeng_db, incsv_zhangDanFeng
import jieba_fast as jieba
from tools.utils import list_all_users_text, RedisClient, list_all_users, Indicators
import redis
import pickle
''' 链接redis '''
client = redis.Redis(host=redis_host, port=redis_port, db=fanChengCheng_db, decode_responses=True)
''' 数据标注 '''
class SemanticSimilarity(object):
def __init__(self):
pass
def cut(self, text):
'''
分词
:param text:
:return:
'''
try:
d = [line.rstrip() for line in open(stop_words_file, mode='r', encoding='utf-8')] # 停用词
stop_words = {}.fromkeys(d)
cut_words = jieba.cut(text)
except Exception as e:
print(e)
cut_words_clean = []
for seg in cut_words:
if seg not in stop_words:
cut_words_clean.append(seg) # 去除停用词
return cut_words_clean
def common_words(self, cutwds_lst1, cutwds_lst2):
'''
找出共现词
:param cutwds_lst1:
:param cutwds_lst2:
:return: []
'''
cmn_words = []
tmp = {}.fromkeys(cutwds_lst2)
for words in cutwds_lst1:
if words in tmp:
cmn_words.append(words)
return cmn_words
def short_words_num(self, lst1, lst2):
return min(len(lst1), len(lst2))
def ratio(self, texts, thred=0.8):
'''
计算语义重复率
:param texts: [ doc, doc ]
:return:
'''
cut_texts = []
# 对所有文本进行分词
for text in texts:
seg = self.cut(text)
cut_texts.append(seg)
ratios = [] # 该用户的比率
size = len(cut_texts)
for i in range(size):
for j in range(i + 1, size):
common_words_ = self.common_words(cut_texts[i], cut_texts[j])
n = self.short_words_num(cut_texts[i], cut_texts[j])
if n != 0:
ratio = len(common_words_) / n
else:
ratio = 0.0
if ratio > thred:
ratio = float("%.3f" % ratio)
ratios.append(ratio)
return ratios
class EventSemanticSim():
''' 统计每个事件的语义重复率 '''
def __init__(self, user_file, user_db):
self.user_file = user_file
self.user_client = RedisClient(host=redis_host, port=redis_port, db=user_db)
def save_redis(self):
ss = SemanticSimilarity() # 初始化
all_users = list_all_users(self.user_file)
for key in all_users:
texts = list_all_users_text(self.user_file, key) # 获取所有文本
rs = ss.ratio(texts)
if len(rs) > 0:
rs = [str(i) for i in rs] # 转换为字符串
temp = ",".join(rs)
else:
temp = "低于80%"
uid = str(key) + "_" + Indicators.semantic
# print(uid, " rs:", rs, " temp:", temp)
self.user_client.set(uid, temp)
print("写入语义重复率redis成功!")
if __name__ == "__main__":
# event = EventSemanticSim(user_file=incsv_fanChengCheng, user_db=fanChengCheng_db)
# event.save_redis()
event = EventSemanticSim(user_file=incsv_zhangDanFeng, user_db=zhangDanFeng_db)
event.save_redis()
event = EventSemanticSim(user_file=incsv_haiTianLin, user_db=zhaiTianLin_db)
event.save_redis()
event = EventSemanticSim(user_file=incsv_jueDiQiuSheng, user_db=jueDiQiuSheng_db)
event.save_redis() | UTF-8 | Python | false | false | 3,808 | py | 27 | semantic_similarity.py | 18 | 0.546652 | 0.540889 | 0 | 117 | 30.153846 | 121 |
sanyamjain335/SNS-Assignment | 14,293,651,166,186 | 0bf98e82a9de440af428448c3e633aee30776ad0 | 5d2c6c4072510d1883ee30ddf7da2c263598c47c | /serverlib.py | 503371a426ed80babb4ebb5ea28fca97cfb24437 | []
| no_license | https://github.com/sanyamjain335/SNS-Assignment | 0ce1f3d2433872b9b4d97d0f3ca1e4684d826654 | 3e5d4b39eba7b8c6f46f5ce5676570bcd50487e8 | refs/heads/main | 2023-02-25T02:46:41.875400 | 2021-01-27T13:34:51 | 2021-01-27T13:34:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import clientlib
import socket,pickle
import json
import random
import crypto
import time
HOST = '127.0.0.1'
PORT = 54005
class group:
def __init__(self,name):
self.grname = name
self.membercount=0
self.memberdic = {}
self.nounce=str(random.randint(0,655365))
def addmember(self,name,port):
self.memberdic[name]=port
self.membercount=self.membercount+1
return self.nounce
def getportlist(self):
return [self.memberdic[name] for name in self.memberdic.keys()]
def message(self,conn,data):
if data['type'] =='file':
chunk_array=[]
while(True):
msg=conn.recv(1024)
if not msg:
break
chunk_array.append(msg)
for port in self.getportlist():
if port==data['initiator-port']:
continue
client_sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client_sock.connect(('127.0.0.1',port))
messageObj={}
messageObj['groupname'] = self.grname
if data['type'] =='text':
messageObj['type'] = 'text'
messageObj['encrypted'] = data['msg']
client_sock.sendall(pickle.dumps(messageObj))
else:
messageObj['type'] = 'file'
messageObj['filename']=data['filename']
client_sock.sendall(pickle.dumps(messageObj))
for item in chunk_array:
time.sleep(1)
client_sock.sendall(item)
if data['type']=='file':
chunk_array.clear()
client_sock.close()
class server:
def __init__(self):
self.s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
self.s.bind((HOST,PORT))
self.clientlist={}
self.grouplist={}
self.s.listen()
while True:
conn,addr=self.s.accept()
data = pickle.loads(conn.recv(1024))
if data['choice']=='signin':
if data['name'] in self.clientlist.keys() and self.clientlist[data['name']]['pswd']==data['pswd'] and 'online' not in self.clientlist[data['name']].keys():
self.clientlist[data['name']]['online']=1
conn.send(b"1")
else:
conn.send(b"0")
if data['choice']=='signup':
if(data['name'] in self.clientlist.keys()):
conn.send(b'0')
else:
self.clientlist[data['name']]=data
print(data['name'])
conn.send(b"1")
if data['choice']=='get-client-port':
print(data['name'])
print(self.clientlist[data['name']])
data['port'] = self.clientlist[data['name']]['port']
conn.sendall(pickle.dumps(data))
if data['choice']=='join-group':
if data['groupname'] in self.grouplist.keys():
self.grouplist[data['groupname']].addmember(data['name'],data['port'])
conn.sendall(self.grouplist[data['groupname']].nounce.encode())
else:
g = group(data['groupname'])
g.addmember(data['name'],data['port'])
self.grouplist[data['groupname']]=g
conn.sendall(g.nounce.encode())
print([name for name in self.grouplist.keys()])
if data['choice']=='list-group':
print([name for name in self.grouplist.keys()])
groupstring = [[self.grouplist[name].grname,self.grouplist[name].membercount] for name in self.grouplist.keys()]
conn.sendall(str(groupstring).encode())
if data['choice']=='message-group':
print(data)
g = self.grouplist[data['groupname']]
if(data['name'] in g.memberdic.keys()):
g.message(conn,data)
conn.close()
| UTF-8 | Python | false | false | 4,170 | py | 11 | serverlib.py | 6 | 0.509353 | 0.49952 | 0 | 103 | 39.31068 | 171 |
GuilhermeVBeira/Aulas-Django | 5,076,651,351,419 | 6edfb7e032f7c49a38e8e464c7e6e7df6747afe3 | af3eae2d55e83622bdbeb2188079d2feb98eb617 | /aula11/apps.py | 3fe432c7a4d3917373e18b06eb82b338d9f30f86 | []
| no_license | https://github.com/GuilhermeVBeira/Aulas-Django | 16d1582040d83dd5b6b8f6194a8261be63a95569 | 80e784fdc2d8a6903b44463f8b4ade514bd809f1 | refs/heads/master | 2022-12-06T09:03:27.858413 | 2021-06-09T19:32:36 | 2021-06-09T19:32:36 | 240,728,073 | 2 | 2 | null | false | 2022-11-22T05:26:37 | 2020-02-15T14:36:02 | 2021-06-09T19:32:39 | 2022-11-22T05:26:34 | 981 | 2 | 0 | 2 | Python | false | false | from django.apps import AppConfig
class Aula11Config(AppConfig):
name = "aula11"
def ready(self):
import aula11.signals
| UTF-8 | Python | false | false | 139 | py | 87 | apps.py | 75 | 0.690647 | 0.647482 | 0 | 8 | 16.375 | 33 |
massimo-nocentini/microkanrenpy | 10,024,453,677,728 | 63eb372d08729adb11b5de4c71f5968728509e00 | 6960e497327f51d99e82acf899fa45b4bc38af62 | /src/sexp_test.py | afba8136c973807a09072b3644012801ad5ef273 | []
| no_license | https://github.com/massimo-nocentini/microkanrenpy | 6a635f02d6bb645e1d0eb0b2abc5282934a49589 | 62c71ae015c0ecc0330d70a2eeb6084430a5bb47 | refs/heads/master | 2021-06-07T06:02:06.137934 | 2020-12-04T10:47:31 | 2020-12-04T10:48:03 | 90,005,342 | 11 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from functools import partialmethod
from muk.sexp import *
class sexp_tests(unittest.TestCase):
def test_null_list(self):
self.isomorphism(l=[], c=[])
def test_singleton_proper_list_to_cons(self):
self.isomorphism(l=[1], c=cons(1, []))
def test_plain_proper_list_to_cons(self):
self.isomorphism(l=[1,2,3], c=cons(1, cons(2, cons(3, []))))
def test_plain_improper_list_to_cons(self):
self.isomorphism(l=(1,2,3), c=cons(1, cons(2, 3)))
def test_nested_improper_list_to_cons(self):
self.isomorphism(l=(1,[2,3], 4), c=cons(1, cons(cons(2, cons(3, [])), 4)))
def test_more_nested_improper_list_to_cons(self):
self.isomorphism(l=([3],(4,5), 6), c=cons(cons(3, []), cons(cons(4, 5), 6)))
def test_shadow_proper_list_using_improper_list_notation(self):
# pay attention, this is not an isomorphism, the next test shows the
# natural way of writing, without shadowing. The broken direction is
# represented by function `cons_to_list` which doesn't shadow objs it
# produces.
self.assertEqual(list_to_cons(([3],(4,5), [6])), cons(cons(3, []), cons(cons(4, 5), cons(6, []))))
def test_more_nested_improper_lists_into_proper_list_to_cons(self):
self.isomorphism(l=[[3],(4,5), 6], c=cons(cons(3, []), cons(cons(4, 5), cons(6, []))))
def test_invalid_improper_list(self):
with self.assertRaises(ImproperListError):
list_to_cons(l=(3,))
def test_invalid_improper_cons(self):
with self.assertRaises(ImproperListError):
cons_to_list(c=cons(3, ()))
def isomorphism(self, l, c):
self.assertEqual(c, list_to_cons(l))
self.assertEqual(l, cons_to_list(c))
def test_tuple_wrapping_and_ctor_call(self):
class A(tuple):
__int__ = partialmethod(sum)
a = (1,2,3,4) # vanilla tuple obj
self.assertEqual(tuple, type(a))
self.assertEqual(A, type(A(a)))
self.assertEqual(10, int(A(a)))
| UTF-8 | Python | false | false | 2,061 | py | 21 | sexp_test.py | 7 | 0.60165 | 0.575449 | 0 | 56 | 35.714286 | 106 |
yuexishuihan/yuexishuihan.github.io | 6,030,134,100,921 | 590f0e66f61cbc5d93a1d95342fe1e31b0a6c6fc | 0d437f1bcedbdf4e5e0e85d325062d6dc09825d6 | /Socket通信/udp聊天_1.py | 62b1fba846d6337957fd982ea6f082e5f2632ae3 | []
| no_license | https://github.com/yuexishuihan/yuexishuihan.github.io | dbe862cea471d756d7770168aab9b82794c63a96 | ad9af9cc9481e0fdd4695b8c872f5c2ddd9ea51b | refs/heads/master | 2020-04-09T06:31:27.285478 | 2019-02-27T08:31:31 | 2019-02-27T08:31:31 | 160,116,540 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
def send_msg(udp_socket):
'''发送消息'''
# 获取对方的IP/PORT
dest_ip = input("请输入对方IP:")
dest_port = int(input("请输入对方端口号:"))
# socket套接字可以同时进行收发数据
# 2.使用套接字发送数据
# 从键盘获取数据
send_date = input("请输入要发送的内容:")
udp_socket.sendto(send_date.encode("utf-8"),(dest_ip,dest_port))
def recv_msg(udp_socket):
'''接收消息'''
# 3.使用套接字接收数据
recv_data = udp_socket.recvfrom(1024)
print("%s:%s" % (str(recv_data[1]),recv_data[0].decode("utf-8")))
def main():
# 1.创建udp套接字
udp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# 绑定信息
udp_socket.bind("",8080)
# 循环处理
while True:
print("-----xx聊天室------")
print("1.发送消息")
print("2.接收消息")
print("0.退出聊天室")
op = input("请输入功能序号:")
if op == "1":
# 发送
send_msg(udp_socket)
elif op == "2"
# 接收并显示
recv_msg(udp_socket)
elif op == "0":
break
else:
print("输入有误请重新输入。。。")
# 4.关闭套接字
udp_socket.close()
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 1,382 | py | 57 | udp聊天_1.py | 55 | 0.506318 | 0.486462 | 0 | 55 | 19.163636 | 69 |
wlWarren/mail_classifizer | 3,530,463,141,284 | 207730e79a2c2a57bfa23dd338c0c3b309663b9a | 959983112b37db56c0ee7f0e3940f9aad99fd169 | /word_vector.py | 037ae9ef56a1b2ee2667adb855c53c546f2fbea5 | []
| no_license | https://github.com/wlWarren/mail_classifizer | 3e102d89e02e241faf2be78fb84cc3e274ee6716 | 68202f7b88d229e22a309cfe24501fa81a5006be | refs/heads/master | 2021-08-30T05:36:09.306868 | 2017-12-16T06:28:00 | 2017-12-16T06:28:00 | 114,438,000 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding:utf-8
import jieba
import jieba.posseg as pseg
import sklearn.feature_extraction.text
import json
from scipy import sparse, io
from sklearn.externals import joblib
# 非 tf-idf 词向量
class Counter_Vectorizer(sklearn.feature_extraction.text.CountVectorizer):
def build_analyzer(self):
def analyzer(doc):
# 去标点
words = pseg.cut(doc)
new_doc = ''.join(w.word for w in words if w.flag != 'x')
words = jieba.cut(new_doc)
return words
return analyzer
# 用tf-idf生成词向量
class TfidfVectorizer(sklearn.feature_extraction.text.TfidfVectorizer):
def build_analyzer(self):
# 生成词向量前需要进行切词
def analyzer(doc):
# 将标点符号去掉
words = pseg.cut(doc)
new_doc = ''.join(w.word for w in words if w.flag != 'x')
words = jieba.cut(new_doc)
return words
return analyzer
# 生成词向量并进行存储
def vector_word():
with open('RawData/train_content_5000.json', 'r') as f:
content = json.load(f)
with open('RawData/train_label_5000.json', 'r') as f:
label = json.load(f)
vec_tfidf = TfidfVectorizer(min_df=2, max_df=0.8,max_features=2000)
tfidf = vec_tfidf.fit(content)
# 存储分词模型
# joblib.dump(tfidf,'model/word_vector_model_60w.pkl')
data_tfidf = tfidf.transform(content)
data_tfidf_dense = data_tfidf.todense()
name_tfidf_feature = vec_tfidf.get_feature_names()
io.mmwrite('XGBoost/word_vector/word_vector.mtx', data_tfidf)
'''
# 稀疏矩阵存储
io.mmwrite('word_vector/word_vector.mtx', data_tfidf)
with open('word_vector/train_label.json', 'w') as f:
json.dump(label, f)
# 存入特征词
with open('word_vector/vector_type.json', 'w') as f:
json.dump(name_tfidf_feature, f)
'''
def dispose_new_doc():
tfidf = joblib.load('word_vector_model.pkl')
doc = '您好!紫荆x号本周日x日妇女节有活动,女士到场都有花送,小孩有礼物,下午x:xx还会有抽奖活动哦,有兴趣可过来玩噢!联系人:黄秀秀。x'
transform_document = [doc]
new_data_tfidf = (tfidf.transform(transform_document)).todense()
print (new_data_tfidf)
if '__main__' == __name__:
vector_word()
print ('word_vector Finish')
# dispose_new_doc()
| UTF-8 | Python | false | false | 2,415 | py | 13 | word_vector.py | 8 | 0.633318 | 0.625057 | 0 | 68 | 31.044118 | 79 |
dellielo/katatest | 3,221,225,484,093 | 272dd0edf4b41ad62cb7879172ffc613beb7ed51 | b882ffe25d7aadd0286bb9f00d8d7df0b3ad29db | /tests/test_katatest.py | b53c014cb0fdbbfb13fde097d480c2874c57b8f3 | [
"MIT"
]
| permissive | https://github.com/dellielo/katatest | 36a503a0fa5790fa11f567bb11c9fd939f210aaa | 83f4499a0e3f9d950ebc6c8bd02604181836d23a | refs/heads/master | 2020-09-09T15:07:34.091054 | 2019-11-18T13:09:46 | 2019-11-18T13:09:46 | 221,474,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from katatest.cli import main
def test_basics():
assert_equal(0, price([]))
assert_equal(8, price([1]))
assert_equal(8, price([2]))
assert_equal(8, price([3]))
assert_equal(8, price([4]))
assert_equal(8 * 3, price([1, 1, 1]))
def test_simple_discount():
assert_equal(8 * 2 * 0.95, price([0, 1]))
assert_equal(8 * 3 * 0.9, price([0, 2, 4]))
assert_equal(8 * 4 * 0.8, price([0, 1, 2, 4]))
assert_equal(8 * 5 * 0.75, price([0, 1, 2, 3, 4]))
def test_several_discount():
assert_equal(8 + (8 * 2 * 0.95), price([0, 0, 1]))
assert_equal(2 * (8 * 2 * 0.95), price([0, 0, 1, 1]))
assert_equal((8 * 4 * 0.8) + (8 * 2 * 0.95), price([0, 0, 1, 2, 2, 3]))
assert_equal(8 + (8 * 5 * 0.75), price([0, 1, 1, 2, 3, 4]))
def test_edge_cases():
assert_equal(2 * (8 * 4 * 0.8), price([0, 0, 1, 1, 2, 2, 3, 4]))
assert_equal(3 * (8 * 5 * 0.75) + 2 * (8 * 4 * 0.8),
price([0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3, 3,
4, 4, 4, 4]))
def test_main():
main([])
| UTF-8 | Python | false | false | 1,048 | py | 5 | test_katatest.py | 1 | 0.483779 | 0.351145 | 0 | 37 | 27.297297 | 73 |
caiwjohn/embedded_pred | 8,615,704,428,635 | 7a955cc141a044f71276acf6f740b228647d0cc6 | ec2dd6cb985439bd8f70be558900a8e2f119f691 | /get_embeddings.py | 141cea5b62e6ec098b67801c32f265d16e9ecfa3 | [
"MIT"
]
| permissive | https://github.com/caiwjohn/embedded_pred | 5512c758fe5715ea14fce2131c563c85bab2879d | 89c2d2bfb9db4589afc26dbb74faf5926fe36b84 | refs/heads/master | 2022-07-17T22:46:51.432284 | 2020-05-18T19:09:29 | 2020-05-18T19:09:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from keras.models import Model, load_model
from load_data import load_csv, get_onehot
import numpy as np
import csv
#Saves <ex_per_class> sample embeddings per class
#EDIT THESE PARAMETERS (see README)-------------------------------------
model_name = 'my_model'
num_classes = 100
ex_per_class = 100
data_file = 'my_dir/test.csv'
name_file = 'my_dir/dna_100class_names.csv' #a csv of (class number, name) for each class
out_file = 'my_dir/embed_'+model_name+'.csv'
is_dna_data = True
mask = True
mask_len = 113
seq_len = 4500
model_file = 'model_dir/'+model_name+'.h5'
#----------------------------------------------------------------------
model = load_model(model_file)
embed_model = Model(inputs=model.input, outputs=model.get_layer("lstm_2").output)
embed_model.summary()
counts = np.zeros(num_classes)
data = load_csv(data_file)
chosen_data = []
for (x, y) in data:
if counts[y] < ex_per_class:
chosen_data.append((x,y))
counts[y] += 1
x, y, m = get_onehot(chosen_data, None, is_dna_data=is_dna_data, seq_len=seq_len, mask_len=mask_len if mask else None)
embed = embed_model.predict([x,m] if mask else x)
print(embed.shape)
names = dict()
with open(name_file, 'r') as infile:
r = csv.reader(infile)
for row in r:
y = int(row[0])
names[y] = row[1]
with open(out_file, 'w') as outfile:
w = csv.writer(outfile)
for (i, (x, y)) in enumerate(chosen_data):
w.writerow([y,names[y]]+embed[i].tolist())
| UTF-8 | Python | false | false | 1,425 | py | 10 | get_embeddings.py | 9 | 0.633684 | 0.618947 | 0 | 52 | 26.403846 | 118 |
ten2net/Leetcode-solution | 13,365,938,256,520 | 9bc64a4a24b73344f6206f6ba0ffc58b8e991bb8 | 51f7752df6a6e2b4dcee7ea585bacf7b9cb5ea14 | /116. Populating Next Right Pointers in Each Node.py | 7a45110922629443243db1374ea90bb47eb2f6ff | [
"MIT"
]
| permissive | https://github.com/ten2net/Leetcode-solution | a9ba7235987c0fdd1860d88ae461a4ea1fb979e4 | 97e84daa2926a9cd2036e0dee36dfe5773114b15 | refs/heads/master | 2021-01-21T20:29:42.570931 | 2016-12-06T10:29:18 | 2016-12-06T10:29:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
level_order = []
from collections import deque
queue = deque()
if root is not None:
queue.append(root)
while len(queue) > 0:
top = queue.popleft()
level_order.append(top)
if top.left is not None:
queue.append(top.left)
if top.right is not None:
queue.append(top.right)
cnt = 1
level = 1
for i in range(len(level_order)):
node = level_order[i]
cnt -= 1
if cnt == 0:
node.next = None
level *= 2
cnt = level
else:
node.next = level_order[i+1] | UTF-8 | Python | false | false | 992 | py | 223 | 116. Populating Next Right Pointers in Each Node.py | 222 | 0.497984 | 0.490927 | 0 | 37 | 25.837838 | 47 |
TKhyarn/flappy_unity_api | 2,920,577,769,163 | 8dd117641619382947a46c962849336b3359c824 | f0dad14660be9c32208cbeaeae287504cad11b68 | /run.py | 3de91072667b5c1a96f16d85a74c00e1303bd17c | []
| no_license | https://github.com/TKhyarn/flappy_unity_api | 477d68f075eb6f4e1144fb09506a4718d201b1aa | 3c786b315fe09a3e2a9329a67f6287086165d0cc | refs/heads/master | 2021-08-16T22:53:34.155948 | 2017-11-20T13:27:58 | 2017-11-20T13:27:58 | 110,249,040 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!flask/bin/python
from app import app
# not suitable for prod environment
app.run(host='0.0.0.0', debug=True) | UTF-8 | Python | false | false | 110 | py | 5 | run.py | 4 | 0.745455 | 0.709091 | 0 | 4 | 26.75 | 35 |
we333/python_study | 11,381,663,368,173 | 7edc62dd1e5049e3842667052381353ed2050d3e | cf5c2fde7dc38e33457f063b807a728d137bd92e | /sql/__init__.py | 7a2f90013ec1c083a73cbae13ff989b9a2dcc909 | []
| no_license | https://github.com/we333/python_study | bde9b5f5f6953c49fba2588ceafdb31401e612de | a1c4a87c28f545b205d7d3a64393649637ab1f18 | refs/heads/master | 2018-09-10T03:33:54.635543 | 2016-07-17T10:53:57 | 2016-07-17T10:53:57 | 62,631,352 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!usr/bin/env python
#_*_ coding:utf-8 _*_
'''
Created on 2016年7月12日
@author: wantone
'''
| UTF-8 | Python | false | false | 108 | py | 39 | __init__.py | 32 | 0.558824 | 0.480392 | 0 | 8 | 10.5 | 21 |
archit-dwevedi/parking_lot | 13,907,104,130,496 | 3f3e328804aff3eb5ee034e8fa65260baca9627f | e33c8af70e79622b0f4818854c13e5e55aaab437 | /parking/managers.py | a91f0d05a24d78e084a31797a37db0e7f2241e17 | []
| no_license | https://github.com/archit-dwevedi/parking_lot | a4f275b147e5cdba2a7cc364e0537815582d2737 | e6b5244ec6740b0b32c67ec2434713dcf0312931 | refs/heads/main | 2023-07-17T05:13:47.097863 | 2021-08-09T13:50:59 | 2021-08-09T13:50:59 | 394,254,205 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class ParkingQuerySet(models.QuerySet):
def get_empty_parking_space(self, parking, vechile_type):
from .models import ParkingSpace
return ParkingSpace.objects.filter(
parking=parking,
vechile_type=vechile_type,
parked_car__isnull=True
).first()
def get_parked_car_space(self, parking, vechile):
from .models import ParkingSpace
return ParkingSpace.objects.filter(parking=parking, parked_car=vechile).first()
class ParkingManager(models.Manager):
def get_queryset(self):
return ParkingQuerySet(self.model, using=self._db)
def park_car(self, parking, vechile):
if not parking:
raise Exception("Parking not found")
if not vechile:
raise Exception("Vechile is not found")
parked_space = self.all().get_parked_car_space(parking, vechile)
if parked_space:
raise Exception("Car is already parked")
space = self.all().get_empty_parking_space(parking, vechile.vechile_type)
if not space:
raise Exception("No Parking spots are empty")
space.parked_car = vechile
space.save()
return space
def exit_car(self, parking, vechile):
if not parking:
raise Exception("Parking not found")
if not vechile:
raise Exception("Vechile is not found")
parked_space = self.all().get_parked_car_space(parking, vechile)
if not parked_space:
raise Exception("Car is not parked")
parked_space.parked_car = None
parked_space.save()
return parked_space
class RateManager(models.Manager):
def get_price_for_parked_car(self, parking, vechile_type, hours):
rate = self.filter(
parking=parking,
vechile_type=vechile_type,
start_hours__lte=hours,
end_hours__gte=hours
).first()
if not rate:
return 100
return rate.price
| UTF-8 | Python | false | false | 2,037 | py | 22 | managers.py | 20 | 0.617084 | 0.615611 | 0 | 66 | 29.863636 | 87 |
tzyl/hackerrank-python | 9,783,935,517,607 | 2d483fb7e5ec427defd4b0112eaa2737113c7a59 | 8862efe34f13477aafe388d30e3b8a5fc8bab253 | /week_of_code/30/candy_replenishing_robot.py | a505f80439fd9717a6e06f4686f09ffe22b588f2 | []
| no_license | https://github.com/tzyl/hackerrank-python | 71a80cbd1a0c931e5112e7c0cdb2e339c4e50eb3 | f3f2990de738403bccaedd4005368aa3456b94d3 | refs/heads/master | 2021-05-14T11:32:44.268500 | 2018-01-05T12:49:59 | 2018-01-05T12:49:59 | 116,384,579 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n, t = input().strip().split(' ')
n, t = [int(n), int(t)]
c = [int(s) for s in input().strip().split()]
candies = n
added = 0
for i in range(t - 1):
candies -= c[i]
if candies < 5:
added += n - candies
candies = n
print(added)
| UTF-8 | Python | false | false | 262 | py | 44 | candy_replenishing_robot.py | 44 | 0.496183 | 0.484733 | 0 | 11 | 21.818182 | 45 |
leocvml/chatBot | 9,594,956,951,913 | fdf963d0fcadf4c018a8720e7e819ac0ef2a9721 | be45b42dae8f1654051fed2187f1dabffaf132cb | /chatbotFinalProject/discriminator.py | 1975fd8724a8b23fae44ac0ff26c4e577ca1b476 | []
| no_license | https://github.com/leocvml/chatBot | a97198e83ecdb59bacd7481d56119ecc6bf548fc | 38863f7fa737e4dfb762d186d215dda75c921955 | refs/heads/master | 2021-07-05T06:04:58.287319 | 2019-01-24T03:12:10 | 2019-01-24T03:12:10 | 131,238,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append('..')
import collections
import mxnet as mx
from mxnet import autograd, gluon, init, metric, nd
from mxnet.gluon import loss as gloss, nn, rnn
from mxnet.contrib import text
import os
import random
import zipfile
demo = True
if demo:
with zipfile.ZipFile('data/aclImdb_tiny.zip', 'r') as zin:
zin.extractall('data/')
def readIMDB(dir_url, seg='train'):
pos_or_neg = ['pos', 'neg']
data = []
for label in pos_or_neg:
files = os.listdir(
'data/' + dir_url + '/' + seg + '/' + label + '/')
for file in files:
with open('data/' + dir_url + '/' + seg + '/' + label + '/'
+ file, 'r', encoding='utf8') as rf:
review = rf.read().replace('\n', '')
if label == 'pos':
data.append([review, 1])
elif label == 'neg':
data.append([review, 0])
return data
if demo:
train_data = readIMDB('aclImdb_tiny/', 'train')
test_data = readIMDB('aclImdb_tiny/', 'test')
else:
train_data = readIMDB('aclImdb/', 'train')
test_data = readIMDB('aclImdb/', 'test')
random.shuffle(train_data)
random.shuffle(test_data)
def tokenizer(text):
return [tok.lower() for tok in text.split(' ')]
train_tokenized = []
for review, score in train_data:
train_tokenized.append(tokenizer(review))
test_tokenized = []
for review, score in test_data:
test_tokenized.append(tokenizer(review))
token_counter = collections.Counter()
def count_token(train_tokenized):
for sample in train_tokenized:
for token in sample:
if token not in token_counter:
token_counter[token] = 1
else:
token_counter[token] += 1
count_token(train_tokenized)
vocab = text.vocab.Vocabulary(token_counter, unknown_token='<unk>',
reserved_tokens=None)
def encode_samples(tokenized_samples, vocab):
features = []
for sample in tokenized_samples:
feature = []
for token in sample:
if token in vocab.token_to_idx:
feature.append(vocab.token_to_idx[token])
else:
feature.append(0)
features.append(feature)
return features
def pad_samples(features, maxlen=500, padding=0):
padded_features = []
for feature in features:
if len(feature) > maxlen:
padded_feature = feature[:maxlen]
else:
padded_feature = feature
# 添加 PAD 符号使每个序列等长(长度为 maxlen )。
while len(padded_feature) < maxlen:
padded_feature.append(padding)
padded_features.append(padded_feature)
return padded_features
ctx = mx.gpu()
train_features = encode_samples(train_tokenized, vocab)
test_features = encode_samples(test_tokenized, vocab)
train_features = nd.array(pad_samples(train_features, 500, 0), ctx=ctx)
test_features = nd.array(pad_samples(test_features, 500, 0), ctx=ctx)
train_labels = nd.array([score for _, score in train_data], ctx=ctx)
test_labels = nd.array([score for _, score in test_data], ctx=ctx)
# print(train_features[0])
# print(train_labels[0])
glove_embedding = text.embedding.create(
'glove', pretrained_file_name='glove.6B.100d.txt', vocabulary=vocab)
class SentimentNet(nn.Block):
def __init__(self, vocab, embed_size, num_hiddens, num_layers,
bidirectional, **kwargs):
super(SentimentNet, self).__init__(**kwargs)
with self.name_scope():
self.embedding = nn.Embedding(len(vocab), embed_size)
self.encoder = rnn.LSTM(num_hiddens, num_layers=num_layers,
bidirectional=bidirectional,
input_size=embed_size)
self.decoder = nn.Dense(num_outputs, flatten=False)
def forward(self, inputs, begin_state=None):
embeddings = self.embedding(inputs)
states = self.encoder(embeddings)
# 连结初始时间步和最终时间步的隐藏状态。
encoding = nd.concat(states[0], states[-1])
outputs = self.decoder(encoding)
return outputs
num_outputs = 2
lr = 0.1
num_epochs = 100
batch_size = 10
embed_size = 100
num_hiddens = 100
num_layers = 2
bidirectional = True
net = SentimentNet(vocab, embed_size, num_hiddens, num_layers, bidirectional)
net.initialize(init.Xavier(), ctx=ctx)
# 设置 embedding 层的 weight 为预训练的词向量。
net.embedding.weight.set_data(glove_embedding.idx_to_vec.as_in_context(ctx))
# 训练中不迭代词向量(net.embedding中的模型参数)。
net.embedding.collect_params().setattr('grad_req', 'null')
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
loss = gloss.SoftmaxCrossEntropyLoss()
# def eval_model(features, labels):
# l_sum = 0
# l_n = 0
# accuracy = metric.Accuracy()
# for i in range(features.shape[0] // batch_size):
# X = features[i*batch_size : (i+1)*batch_size].as_in_context(ctx).T
# y = labels[i*batch_size :(i+1)*batch_size].as_in_context(ctx).T
# output = net(X)
# l = loss(output, y)
# l_sum += l.sum().asscalar()
# l_n += l.size
# accuracy.update(preds=nd.argmax(output, axis=1), labels=y)
# return l_sum / l_n, accuracy.get()[1]
# net.load_params('classfication.params')
# for epoch in range(1, num_epochs + 1):
# for i in range(train_features.shape[0] // batch_size):
# X = train_features[i*batch_size : (i+1)*batch_size].as_in_context(
# ctx).T
# y = train_labels[i*batch_size : (i+1)*batch_size].as_in_context(
# ctx).T
# with autograd.record():
# l = loss(net(X), y)
# l.backward()
# trainer.step(batch_size)
# train_loss, train_acc = eval_model(train_features, train_labels)
# test_loss, test_acc = eval_model(test_features, test_labels)
# print('epoch %d, train loss %.6f, acc %.2f; test loss %.6f, acc %.2f'
# % (epoch, train_loss, train_acc, test_loss, test_acc))
# net.save_params('classfication.params')
# def inference(set):
# feature = []
# for token in set:
# if token in vocab.token_to_idx:
# feature.append(vocab.token_to_idx[token])
# else:
# feature.append(0)
# return feature
#
#
# review = ['i', 'think', 'it', 'is','low']
# inf_data = inference(review)
#
#
# print(nd.argmax(net(nd.reshape(
# nd.array( inf_data, ctx=ctx),
# shape=(-1, 1))), axis=1).asscalar()) | UTF-8 | Python | false | false | 6,820 | py | 5 | discriminator.py | 3 | 0.58271 | 0.573587 | 0 | 197 | 31.949239 | 77 |
atlpatchin/bookstore | 8,366,596,340,691 | 60aa8196c85ed44311b592a47e30552a63ac0ace | 50e60e5343561cc660624b6966e52c5d78480a71 | /bookstores/comments/urls.py | 62fb6bb1773bdc9a469dc173b7bdcb4779cbcf98 | []
| no_license | https://github.com/atlpatchin/bookstore | 25917c4b70c706a4d5a2230ec9ed6a0e93a43248 | 7ef2c10af63197a1cf0d66fb1533bbf431a48066 | refs/heads/master | 2020-04-23T21:16:19.343309 | 2019-02-20T06:01:35 | 2019-02-20T06:01:35 | 171,464,859 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url
from cart import views
urlpatterns = [
]
| UTF-8 | Python | false | false | 77 | py | 10 | urls.py | 7 | 0.74026 | 0.74026 | 0 | 6 | 11.666667 | 32 |
Perf-Org-5KRepos/aerial_wildlife_detection | 1,752,346,666,487 | 3757784b54d55727e45c921df668a8e44aa14091 | a5ca5b46535d3db0a9490c9f476db4da755540c7 | /projectCreation/migrate_aide.py | 882d485b92c2d3297fb0c9d4f92604e3653b0b15 | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | https://github.com/Perf-Org-5KRepos/aerial_wildlife_detection | a6f33d3c64878373dbee7b305ad735e1ec829a78 | 7f9fc9236dd89b9ff73902e7001b5fd40d428971 | refs/heads/master | 2022-10-19T16:38:20.584294 | 2020-06-08T17:41:29 | 2020-06-08T17:41:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Run this file whenever you update AIDE to bring your existing project setup up-to-date
with respect to changes due to newer versions.
2019-20 Benjamin Kellenberger
'''
import os
import argparse
MODIFICATIONS_sql = [
'ALTER TABLE {schema}.annotation ADD COLUMN IF NOT EXISTS meta VARCHAR; ALTER TABLE {schema}.image_user ADD COLUMN IF NOT EXISTS meta VARCHAR;',
'ALTER TABLE {schema}.labelclass ADD COLUMN IF NOT EXISTS keystroke SMALLINT UNIQUE;',
'ALTER TABLE {schema}.image ADD COLUMN IF NOT EXISTS last_requested TIMESTAMPTZ;',
'ALTER TABLE {schema}.image_user ADD COLUMN IF NOT EXISTS num_interactions INTEGER NOT NULL DEFAULT 0;',
'ALTER TABLE {schema}.annotation ADD COLUMN IF NOT EXISTS autoConverted boolean;',
'ALTER TABLE {schema}.image_user ADD COLUMN IF NOT EXISTS first_checked TIMESTAMPTZ;',
'ALTER TABLE {schema}.image_user ADD COLUMN IF NOT EXISTS total_time_required BIGINT;'
]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update AIDE database structure.')
parser.add_argument('--settings_filepath', type=str, default='config/settings.ini', const=1, nargs='?',
help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')
args = parser.parse_args()
if not 'AIDE_CONFIG_PATH' in os.environ:
os.environ['AIDE_CONFIG_PATH'] = str(args.settings_filepath)
from util.configDef import Config
from modules import Database
config = Config()
dbConn = Database(config)
if dbConn.connectionPool is None:
raise Exception('Error connecting to database.')
dbSchema = config.getProperty('Database', 'schema')
# make modifications one at a time
for mod in MODIFICATIONS_sql:
dbConn.execute(mod.format(schema=dbSchema), None, None)
print('Project {} is now up-to-date for the latest changes in AIDE.'.format(config.getProperty('Project', 'projectName'))) | UTF-8 | Python | false | false | 2,030 | py | 55 | migrate_aide.py | 39 | 0.713793 | 0.709852 | 0 | 48 | 41.3125 | 171 |
StanfordAHA/lake | 7,610,682,053,653 | 2a85bfdce47b70abdd79d002247bcbd94a1c99e7 | c648f20f35a37f0330cf2cbb0cb58ff99e2dc77f | /run_lake.py | c449dc4f5971558dff857f9163c61faefb339b35 | [
"BSD-3-Clause"
]
| permissive | https://github.com/StanfordAHA/lake | 8aeef4d7d627137de96e70fa5dc2c9e27c1158da | 7ba7e047af450545636b669bb368e0cfaf3a2eb4 | refs/heads/master | 2023-09-01T02:39:32.676499 | 2023-08-07T18:25:36 | 2023-08-07T18:25:36 | 199,787,393 | 15 | 2 | BSD-3-Clause | false | 2023-08-29T18:21:26 | 2019-07-31T05:45:32 | 2022-10-13T13:09:43 | 2023-08-29T18:21:26 | 4,235 | 14 | 2 | 18 | Python | false | false | import kratos
from lake.modules.passthru import *
from lake.top.lake_top import *
from lake.modules.sram_stub import *
#lake_top = LakeTop(16)
#lake_top_src = kratos.verilog(lake_top, optimize_passthrough=False)
#print(lake_top_src["LakeTop"])
sramstub = SRAMStub(16, 512)
sramstub_src = kratos.verilog(sramstub, optimize_passthrough=False)
print(sramstub_src["SRAMStub"])
| UTF-8 | Python | false | false | 375 | py | 209 | run_lake.py | 202 | 0.765333 | 0.746667 | 0 | 12 | 30.25 | 68 |
echo9527git/jpress_selenium | 10,333,691,361,336 | 0cf61cda33a3a0f7dcb2ab2a66bcdb64634d7383 | fdd70d7c0b293d66ca88d47ba2a3d2437db051d5 | /util/my_js_utils.py | d6d5c7f1a5dc429c429b1bfb03094486b6e76da0 | []
| no_license | https://github.com/echo9527git/jpress_selenium | aa88c2cf72c606fe65cc697859c96f2ec46987a8 | a7bc4f71520de3fa8754a686f16d0e975b4cf3df | refs/heads/master | 2022-12-08T07:36:56.767594 | 2020-08-31T00:03:56 | 2020-08-31T00:03:56 | 284,938,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
from time import sleep,strftime,localtime,time
import os
def _set_index(index=None):
"""
index判断赋值
:param index:
:return:
"""
if index is None:
index = 0
else:
index = index
return index
def clear(driver: webdriver, css,index=None, describe=None):
"""
用途:用来清除输入框的内容
:param driver: webdriver
:param css: css选择器
:param index: 列表的下标
:param describe: 描述信息
:return:
"""
js = """var elm = document.querySelectorAll("{css}")[{index}];
elm.style.border="2px solid red";
elm.value = "";""".format(css=css, index=_set_index(index))
driver.execute_script(js)
def input(value,driver: webdriver, css,index=None, describe=None):
"""
用途:输入框中输入内容
:param self:
:param value:待输入的数据
:param driver: webdriver
:param css: css选择器
:param index: 列表的下标
:param describe: 描述信息
:return:
"""
js = """var elm = document.querySelectorAll("{css}")[{index}];
elm.style.border="2px solid red";
elm.value = "{value}";""".format(css=css, index=_set_index(index), value=value)
driver.execute_script(js)
def click(driver: webdriver, css,index=None, describe=None):
"""
用途:由于web自动化的最大问题就是稳定性比较差,有些时候使用selenium无法点击元素,因此我们可以使用JS实现元素的点击操作
:param self:
:param driver: webdriver
:param css: css选择器
:param index: 列表的下标
:param describe: 描述信息
:return:
"""
js = """var elm = document.querySelectorAll("{css}")[{index}];
elm.style.border="2px solid red";
elm.click();""".format(css=css, index=_set_index(index))
driver.execute_script(js)
def remove_attribute(attribute,driver: webdriver, css,index=None, describe=None):
"""
用途:以下方法可以删除元素的任何属性,主要用来移除时间控件的readonly属性
:param attribute:元素的某个属性,比如readonly,value,name等
:param driver: webdriver
:param css: css选择器
:param index: 列表的下标
:param describe: 描述信息
:return:
"""
# _index_(index)
js = """
var elm = document.querySelectorAll("{css}")[{index}];
elm.removeAttribute("{attr}");
""".format(css=css, index=_set_index(index), attr=attribute)
driver.execute_script(js)
def remove_attr(element, attribute,driver: webdriver):
"""
用途:以下方法可以删除元素的任何属性
:param element: 需要被删除属性的控件
:param attribute: 需要删除的属性
:param driver: webdriver
:return:
"""
js = """
arguments[0].removeAttribute("{attr}");
""".format(attr=attribute)
driver.execute_script(js, element)
def scroll_to_xy(driver: webdriver,x, y):
'''
用途:通过制定xy坐标来滑动web页面
:param driver: webdriver
:param x:屏幕向右移动的距离
:param y:屏幕向下移动的距离
:return:
1、滚动到文档中的某个坐标
window.scrollTo(x-coord,y-coord )
window.scrollTo(options)
·x-coord 是文档中的横轴坐标。
·y-coord 是文档中的纵轴坐标。
·options 是一个包含三个属性的对象:
·top 等同于 y-coord
·left 等同于 x-coord
·behavior 类型String,表示滚动行为,支持参数 smooth(平滑滚动),instant(瞬间滚动),默认值auto,实测效果等同于instant
例子:
window.scrollTo( 0, 1000 );
// 设置滚动行为改为平滑的滚动
window.scrollTo({
top: 1000,
behavior: "smooth"
});
'''
js = """
window.scrollTo("{}", "{}")
""".format(x, y)
driver.execute_script(js)
# TODO:没整明白到底怎么个滑动法
def window_scroll(driver: webdriver,element, x, y):
"""
用途:指定元素移动的某位置
:param driver: webdriver
:param element: 指定元素
:param x:屏幕向右移动的距离
:param y:屏幕向下移动的距离
:return:
"""
js = """
arguments[0].scrollTo("{}", "{}")
""".format(x, y)
driver.execute_script(js, element)
def scroll_to_element(driver: webdriver,element):
"""
用途:滚动屏幕使得元素上下、左右居中
:param driver:
:param element:
:return:
"""
js = "arguments[0].scrollIntoView({behavior: 'smooth', block: 'center', inline: 'center'});"
driver.execute_script(js, element)
def height_light(driver: webdriver, css,index=0):
"""
用途:方便用户查看当前操作的是哪个页面元素,也方便测试人员定位问题
:param driver: webdriver
:param css: css选择器
:param index: 列表的下标
:return:
"""
js = """
var element = document.querySelectorAll("{css}")[{index}];
element.style.border="4px solid red";
""".format(css=css, index=index)
driver.execute_script(js)
def height_lig(driver: webdriver,element):
"""
用途:指定元素高亮
:param driver: webdriver
:param element: 需要高亮的元素
:return:
"""
js = """
arguments[0].style.border="2px solid red";
"""
driver.execute_script(js, element)
def save_screenshot(driver: webdriver,sub_filename,element,describe=None):
"""
用途:对当前屏幕截图并保存到当前路径,同时标记被操作的元素为高亮,图片文件名格式为:当前时间+sub_filename.png
:param driver: webdriver
:param sub_filename: 文件的部分名称,为了区分建议用selector命名
:param element: 需要被高亮的元素
:param describe: 方法的描述,同时也作为png图片文件名的一部分
:return:
"""
name = os.path.abspath('screenshot')+'/'
# name = "C:/Users/Administrator/PycharmProjects/haige_selenium/screenshot/"
st = strftime("%Y-%m-%d-%H-%M-%S",localtime(time()))
file_name = name+st+'-'+sub_filename+describe+".png"
height_lig(driver,element)
driver.save_screenshot(file_name); | UTF-8 | Python | false | false | 6,338 | py | 11 | my_js_utils.py | 9 | 0.615517 | 0.611494 | 0 | 195 | 25.774359 | 96 |
miruts-xz/BeXAI | 472,446,420,844 | f98af63fa28b95cc3790a5f7142cfadc77aa96bf | 48e73339e7c99b452dcdeef901ece52be3a7ac57 | /src/method.py | 4bdfb2ecbf1c3439bc3a6b97a3922abdc825decd | []
| no_license | https://github.com/miruts-xz/BeXAI | d154b035384b93a913cc963f4f15650cdccfa5b3 | 8ee51a2befc8b1d65cad17fa006d3014a03ffcaa | refs/heads/master | 2023-06-19T18:27:17.563681 | 2021-07-21T14:00:32 | 2021-07-21T14:00:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Created by mire on 7/21/21. Copyright 2021, All rights reserved.
import shap
import methods
supported_methods = {
'shap': methods.Shape,
'lime': methods.Lime,
'kernelshap': methods.KernelShap
}
class Explainer:
def __init__(self, name):
assert (name in supported_methods[name].keys(),
f'This Method is not supported at the moment. Methods support are {list(supported_methods.keys())}')
self.name = name
self.method = lambda clf, data: supported_methods[name](clf, data)
| UTF-8 | Python | false | false | 532 | py | 9 | method.py | 9 | 0.659774 | 0.642857 | 0 | 18 | 28.555556 | 116 |
Meet2147/yt_download | 17,428,977,288,062 | 8665f739effb25f94d6a7ca36a6da44cb0228227 | 8973cce5e27e44c13505f08b69e57dc906d2a0b9 | /app.py | bd62b2eb11659b4a8ee772a676b2583ddac96a78 | [
"MIT"
]
| permissive | https://github.com/Meet2147/yt_download | 236557468f0200c454c394193eb8b1c9ded08a1d | 3277a1d682dcb57ca30f268b0e6486762f643f09 | refs/heads/main | 2023-04-30T08:48:23.971366 | 2021-05-09T03:30:30 | 2021-05-09T03:30:30 | 365,654,704 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template, request, session, redirect, send_file, url_for
from pytube import YouTube
app = Flask(__name__)
app.config['SECRET_KEY'] = "AlmastYogi"
def download(url):
video = url.streams.first()
filepath = video.download()
return filepath
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
session['link'] = request.form.get('url')
url = YouTube(session['link'])
return render_template("see_video.html",url=url)
return render_template("index.html")
@app.route("/see_video", methods=['GET','POST'])
def see_video():
if request.method == 'POST':
url = YouTube(session['link'])
itag = request.form.get('itag')
video = url.streams.get_by_itag(itag)
filename = video.download()
return send_file(filename, as_attachment=True)
return redirect(url_for('index'))
| UTF-8 | Python | false | false | 922 | py | 2 | app.py | 1 | 0.631236 | 0.631236 | 0 | 30 | 29.733333 | 88 |
cliftonpalmer/python-diff | 11,647,951,328,977 | aeb6403fb6424bc725671b1a2407adc5583744b2 | c4d3f09408c7bbd46e8e72e063239edc0f0d9c22 | /diff.py | bff8f17954c3013729ff36b7cc9e2690b59e9866 | []
| no_license | https://github.com/cliftonpalmer/python-diff | da6fabd2499f4c70d3b7012b088b5a69bdc0bf01 | 445c4c36b15b35553988c1a8c99790e330511ffc | refs/heads/master | 2020-04-12T12:28:31.423480 | 2019-10-18T01:43:36 | 2019-10-18T01:43:36 | 162,492,760 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def index_by_newlines(a, sign):
return [ [i, v, sign] for i,v in enumerate(a.split("\n")) ]
def diff(a, b):
# separate left and right into arrays indexed by line
a = index_by_newlines(a, '-')
b = index_by_newlines(b, '+')
# remove matching lines
for va in a[:]:
for vb in b:
if va[1] == vb[1]:
a.remove(va)
b.remove(vb)
continue
# print in line index order with right or left symbols, + or -
c = a + b
c.sort(key=lambda v: v[0])
return c
a = """a
b
c"""
b = """a
a
x
b"""
for v in diff(a,b):
print(v[2] + v[1])
| UTF-8 | Python | false | false | 630 | py | 2 | diff.py | 1 | 0.5 | 0.492063 | 0 | 33 | 18.090909 | 66 |
JayjeetAtGithub/spack | 523,986,051,252 | 4669b07c5b5df14e9a0812549dc24c2162cc2826 | fb2cc597f319380d228fc15c4008760a82203687 | /var/spack/repos/builtin/packages/py-geocube/package.py | 123a8149e46cb1582260f56f466cffcc25657712 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
]
| permissive | https://github.com/JayjeetAtGithub/spack | c41b5debcbe139abb2eab626210505b7f930d637 | 6c2df00443a2cd092446c7d84431ae37e64e4296 | refs/heads/develop | 2023-03-21T02:35:58.391230 | 2022-10-08T22:57:45 | 2022-10-08T22:57:45 | 205,764,532 | 0 | 0 | MIT | true | 2019-09-02T02:44:48 | 2019-09-02T02:44:47 | 2019-07-12T08:48:51 | 2019-07-12T08:48:49 | 15 | 0 | 0 | 0 | null | false | false | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyGeocube(PythonPackage):
"""Tool to convert geopandas vector data into rasterized xarray data."""
homepage = "https://github.com/corteva/geocube"
pypi = "geocube/geocube-0.0.17.tar.gz"
maintainers = ["adamjstewart"]
version("0.3.2", sha256="71ff0228f1ef44e3a649d29a045ff7e2a2094a5cfca30fadab8f88f4ec23a41d")
version("0.3.1", sha256="5c97131010cd8d556a5fad2a3824452120640ac33a6a45b6ca9ee3c28f2e266f")
version("0.0.17", sha256="bf8da0fa96d772ebaea0b98bafa0ba5b8639669d5feb07465d4255af177bddc0")
depends_on("python@3.7:", type=("build", "run"))
depends_on("python@3.8:", when="@0.1.1:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-appdirs", type=("build", "run"))
depends_on("py-click@6.0:", type=("build", "run"))
depends_on("py-datacube", when="@:0.1", type=("build", "run"))
depends_on("py-geopandas@0.7:", type=("build", "run"))
depends_on("py-odc-geo", when="@0.2:", type=("build", "run"))
depends_on("py-rasterio", type=("build", "run"))
depends_on("py-rioxarray@0.4:", type=("build", "run"))
depends_on("py-scipy", when="@0.0.18:", type=("build", "run"))
depends_on("py-xarray@0.17:", type=("build", "run"))
depends_on("py-pyproj@2:", type=("build", "run"))
depends_on("py-numpy@1.20:", when="@0.3:", type=("build", "run"))
| UTF-8 | Python | false | false | 1,587 | py | 8,218 | package.py | 6,591 | 0.659735 | 0.550725 | 0 | 34 | 45.676471 | 96 |
creedasaurus/stoopclub_Chat | 7,310,034,383,448 | be14609a6755c502f1b7798f4ede942b793dff64 | 98d7ba89d515f3abd8ad0812d61baf3063a166ba | /hello_stoopers.py | f13c6206a2aaf34ae4ebaf1d01ff03826e990be7 | []
| no_license | https://github.com/creedasaurus/stoopclub_Chat | ff5b6c57d61bb05c59e4bcc360d1c3c272c0a4e9 | 042011b5523170a8d732f4c6e42357acf9cefa07 | refs/heads/master | 2016-08-11T14:56:34.252603 | 2015-11-04T08:16:55 | 2015-11-04T08:16:55 | 45,523,127 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | newNumber = 26
print "Hello Stoop Troopers" | UTF-8 | Python | false | false | 43 | py | 2 | hello_stoopers.py | 1 | 0.790698 | 0.744186 | 0 | 2 | 21 | 28 |
dimonrtm/ml_ | 17,308,718,214,466 | 4629581944a644b721714440da8b32d62a774bc9 | 9f6b2cf2a8a12c3cfa97dabf548973837eaf9043 | /Программирование на Python/Погружение в Python/Задания/Неделя1/Задание1/solution.py | d9c47ef1c8b312b76f425b3f4e98c74bef5688e9 | []
| no_license | https://github.com/dimonrtm/ml_ | 56b242caed4d60d40befb3f5235e43ac40f6a139 | 49e211ec83115bfc90ad44854b7759df31d3de33 | refs/heads/master | 2020-08-31T05:20:43.477108 | 2020-07-04T18:22:21 | 2020-07-04T18:22:21 | 218,602,334 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
digit_string=sys.argv[1]
sum=0
for number in digit_string:
sum+=int(number)
print(sum) | UTF-8 | Python | false | false | 101 | py | 88 | solution.py | 24 | 0.732673 | 0.712871 | 0 | 6 | 16 | 27 |
Plavit/cs50ai | 3,075,196,620,010 | 53f373ae815bff538628576a6b3b332c4ec599ea | 5053e4440b8c31ab65a35497da279399f03cb359 | /Project 2a - Pagerank/pagerank.py | 86ee864b3f2de49b511946ebd9bbdc24bc06a2ba | []
| no_license | https://github.com/Plavit/cs50ai | b8770b86e25774bf9b45abd85a60e90476ca9cf0 | ef1024ee0b36bbceaaebe443915fb0498752221b | refs/heads/master | 2023-02-10T12:12:37.347996 | 2021-01-03T19:12:17 | 2021-01-03T19:12:17 | 323,460,918 | 8 | 1 | null | false | 2021-01-02T11:09:22 | 2020-12-21T22:21:31 | 2021-01-02T11:09:11 | 2021-01-02T11:09:22 | 23,804 | 0 | 0 | 3 | Python | false | false | import copy
import os
import random
import re
import sys
DAMPING = 0.85
SAMPLES = 10000
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
# Variable setup
target = {}
links = corpus[page]
# Without links, each page has same probability
if not links:
for x in corpus:
target[x] = 1.0 / len(corpus)
else:
# Otherwise, with probability `1 - damping_factor`, choose
# a link at random chosen from all pages in the corpus.
for x in corpus:
target[x] = (1 - damping_factor) / len(corpus)
# plus, add another probability per link: with probability `damping_factor`, choose a link at random
# linked to by `page`
for x in links:
target[x] += damping_factor / len(links)
# Debug - total sum
# total=0
# for y in corpus:
# total+=target[y]
# print(total)
# Return a probability distribution over which page to visit next
return target
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Variable setup
page_ranks = {}.fromkeys(corpus.keys(), 0)
target_page = random.choices(list(corpus.keys()))[0]
# For n pages as defined by input
for i in range(1, n):
# Sample transition model, starting with random target page initialized before
current_dist = transition_model(corpus, target_page, damping_factor)
for page in page_ranks:
# For each page, add pagerank value
page_ranks[page] = (((i - 1) * page_ranks[page]) + current_dist[page]) / i
target_page = random.choices(list(page_ranks.keys()), weights=list(page_ranks.values()), k=1)[0]
# Debug - total sum
total = 0
for page in page_ranks:
total += page_ranks[page]
print("Debug 1, total sum: ", total)
# Return a dictionary where keys are page names, and values are
# their estimated PageRank value (a value between 0 and 1). All
# PageRank values should sum to 1.
return page_ranks
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Variable setup
total_pages = len(corpus)
page_ranks = {}.fromkeys(corpus.keys(), 1.0 / total_pages)
too_rough = True
# Until reaching set precision threshold, iterate
while too_rough:
old_distribution = copy.deepcopy(page_ranks)
# PageRank values for each page by iteratively updating
# PageRank values until convergence, with same rules as previous function
for page in corpus:
# Determine link weight for page
link_weight = 0
for p in corpus:
if page in corpus[p]:
link_weight += page_ranks[p] / len(corpus[p])
# Iteratively add pagerank to pages
page_ranks[page] = ((1 - damping_factor) / total_pages) + (damping_factor * link_weight)
# Until estimate is precise enough
too_rough = (abs(old_distribution[page] - page_ranks[page]) > 0.0001)
# Debug - total sum
total = 0
for page in page_ranks:
total += page_ranks[page]
print("Debug 2, total sum: ", total)
# Return a dictionary where keys are page names, and values are
# their estimated PageRank value (a value between 0 and 1). All
# PageRank values should sum to 1. (should be very similar in results to previous function)
return page_ranks
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 5,690 | py | 7 | pagerank.py | 6 | 0.627592 | 0.618981 | 0 | 173 | 31.890173 | 108 |
yomhub/Tensorflow_research | 4,174,708,258,760 | baf840af748c2230fef4231bff5f47bedaa51311 | de86f9f9dd620212c96fc3bbc28bdbc7432aa237 | /lib/tflib/log_tools.py | 7fc6066e1c69ff10ecfe8c364bac1338fc3ec9fa | []
| no_license | https://github.com/yomhub/Tensorflow_research | 235fa5513abeea64e44291e6705fb136cf108af4 | 2f8102039168ade5481745e4aa59c7e6a0cba59b | refs/heads/master | 2022-11-24T05:08:11.009741 | 2020-06-14T01:21:54 | 2020-06-14T01:21:54 | 280,804,212 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from matplotlib.collections import PolyCollection
from datetime import datetime
from mpl_toolkits.mplot3d import Axes3D
__DEF_LINE_STY = [
'solid', # _____
'dotted', # .......
'dashdot', # __.__.__.
'dashed', # __ __ __
]
__DEF_COLORS = [
'r','b','g','c','y','m']
def str2time(instr):
ymd,hms=instr.split('-')
return datetime(int(ymd[:4]), int(ymd[4:6]), int(ymd[6:]), int(hms[:2]), int(hms[2:4]), int(hms[4:6]))
def str2num(instr):
return [int(s) for s in instr.split() if s.isdigit()]
def auto_scalar(dic_data, step=0, logname=None):
if(type(dic_data)==list):
if(logname==None):
logname="auto_scalar"
cont = 0
for itm in dic_data:
tf.summary.scalar(logname+"_list_{}".format(cont),itm,step=step)
cont += 1
elif(type(dic_data)==dict):
for itname in dic_data:
tf.summary.scalar(itname,dic_data[itname],step=step)
else:
if(logname==None):
logname="auto_scalar"
tf.summary.scalar(logname,dic_data,step=step)
def auto_image(img_data, name=None, step=0, max_outputs=None, description=None):
"""
Args:
img_data: tensor with shape (h,w,3 or 1) or (N,h,w,3 or 1)
name: log name
step: int of step
max_outputs: int of max_outputs
"""
if(len(img_data)==3):
img_data = tf.reshape(img_data,[1,]+img_data.shape)
if(tf.reduce_max(img_data)>1.0):
img_data = img_data / 256.0
max_outputs = img_data.shape[0] if max_outputs==None else max_outputs
name = "auto_image" if name==None else name
tf.summary.image(name,img_data,step,max_outputs,description)
def auto_histogram(dic_data, step=0, logname=None):
if(type(dic_data)==list):
if(logname==None):
logname="auto_scalar"
cont = 0
for itm in dic_data:
tf.summary.histogram(logname+"_list_{}".format(cont),itm,step=step)
cont += 1
elif(type(dic_data)==dict):
for itname in dic_data:
tf.summary.histogram(itname,dic_data[itname],step=step)
else:
if(logname==None):
logname="auto_scalar"
tf.summary.histogram(logname,dic_data,step=step)
def save_image(img, savedir):
if(len(img.shape)==4):
img = tf.reshape(img,img.shape[1:])
tf.io.write_file(savedir,tf.io.encode_jpeg(tf.cast(img,tf.uint8)))
def plt_func_lines(funcs,xarry=None,cols=None,figure=None,fig_size=None,save_name=None):
"""
Draw plt by function
Args:
fig_size: width, height
"""
fg = plt.figure(figsize=fig_size) if(figure==None)else figure
if(xarry==None):
xarry=np.range(-5.0,5.0,100)
if(type(funcs)!=list):
funcs = [funcs]
for i in range(len(funcs)):
fg.plot(
xarry,funcs[i](xarry),
color=__DEF_COLORS[i%len(__DEF_COLORS)],
linewidth=1.0,
linestyle=__DEF_LINE_STY[i%len(__DEF_LINE_STY)])
if(save_name!=None):fg.savefig(save_name)
return fg
def plt_points_lines(points,xarry=None,xcfg=None,figure=None,fig_size=None,save_name=None):
"""
Draw plt by function
Args:
points:
1D array (Ny,) with [y0,y1...]:
if xarry is [x0,x1...], use xarry.
Or calculate xarry by xcfg
2D array (2,Nyx) with [[y0,y1...],[x0,x1...]]
List of 1D/2D arrays:
draw multi lines in figure
xarry:
1D array (Nx,) with [x0,x1...]
xcfg:
if points is 1D and xarry==None, (start,end) will be use
"""
fg = plt.figure(figsize=fig_size) if(figure==None)else figure
if(type(points)!=list):
points = [points]
for i in range(len(points)):
if(len(points[i].shape)==1):
if(xarry!=None and xarry.shape[0]==points[i].shape[0]): xs = xarry
elif(xcfg!=None): xs = np.linspace(xcfg[0],xcfg[1],points[i].shape[0])
ys = points[i]
else:
xs = points[i][1]
ys = points[i][0]
fg.plot(xs,ys,
color=__DEF_COLORS[i%len(__DEF_COLORS)],
linewidth=1.0,
linestyle=__DEF_LINE_STY[i%len(__DEF_LINE_STY)])
if(save_name!=None):fg.savefig(save_name)
return fg
def rf_helper(net_list,ord_len,panding=True):
"""
Args:
net_list: list pramaters of network
[kernel size, stride size]
ord_num: int, coordinate range
panding: True or False
Print coordinate in each layer
"""
if(type(net_list)!=list or len(net_list)<2):return
panding = bool(panding)
ord_len = int(ord_len)
rf_st = [[1,1]]
cod_table = np.arange(ord_len,dtype=np.int)
cod_table = np.stack((cod_table,cod_table),axis=-1)
cod_table = [cod_table.tolist()]
for i in range(len(net_list)):
rf,st = rf_st[i]
ksize,strsize = net_list[i]
crf = rf + (ksize-1)*st
cst = st*strsize
rf_st.append([crf,cst])
p_harf_k = int(ksize/2) if((ksize-int(ksize/2)*2)!=0)else int(ksize/2)-1
harf_k = ksize - 1 - p_harf_k
max_cod = len(cod_table[i])-1
stp = 0 if panding else p_harf_k
edp = max_cod if panding else max_cod - harf_k
tmp = []
while(stp<edp):
c_ctp = max(0,stp-p_harf_k)
c_edp = min(max_cod,stp + harf_k)
tmp.append([cod_table[i][c_ctp][0],cod_table[i][c_edp][1]])
stp+=strsize
cod_table.append(tmp)
return rf_st,cod_table
def resize_visualize_helper(img,model,gtbox=None,mask=None):
"""
Helper for feature part in unet in resize task.
Args:
img: input image
gtbox: (N,4) with [y1,x1,y2,x2] in [0,1]
mask: pixel mask
module: model with output
{}
"""
linewidth = 1.3
plt.figure(figsize=(8,4))
# fg = plt.figure(figsize=(8,4))
plt.subplot(3,3,1,
# figure=fg
)
divnum = 3*3-1
base_scale = 32 # vgg net based scale
if(type(img)!=list):img=[img]
if(gtbox!=None and type(gtbox)!=list):gtbox=[gtbox]
if(mask!=None and type(mask)!=list):mask=[mask]
for j in range(divnum):
plt.subplot(3,3,j+1,
# figure=fg
)
plt.xlabel('layer',
# figure=fg
)
plt.ylabel('energy',
# figure=fg
)
plt.title('Scaler {}/{}'.format(j+1,divnum),
# figure=fg
)
dx=np.arange(5)
for i in range(len(img)):
coe_x = int(max(int(img[i].shape[-2]/base_scale),divnum)/divnum)
coe_y = int(max(int(img[i].shape[-3]/base_scale),divnum)/divnum)
for j in range(divnum):
img_size = [coe_y*base_scale,coe_x*base_scale]
tmp = tf.image.resize(img[i],img_size)
rt = model(tmp)
mp = tf.cast(rt['scr'][:,:,:,1]>rt['scr'][:,:,:,0],tf.float32)
mp = tf.reshape(mp,mp.shape+[1])
mp = tf.broadcast_to(mp,mp.shape[:-1]+[3])
mp = tf.concat([mp,tf.image.resize(tmp,mp.shape[-3:-1])/255.0],axis=2)
tf.summary.image(
name = 'Score|Img image size {}.'.format(img_size),
data = mp,step=0)
# if(dx==None):dx=np.arange(len(rt['ftlist']))
dmin = []
dmean = []
dmax = []
for o in range(len(rt['ftlist'])):
dmin += [tf.reduce_min(rt['ftlist'][o]).numpy()]
dmax += [tf.reduce_max(rt['ftlist'][o]).numpy()]
dmean += [tf.reduce_mean(rt['ftlist'][o]).numpy()]
dmin = np.asarray(dmin)
dmean = np.asarray(dmean)
dmax = np.asarray(dmax)
plt.subplot(3,3,j+1,
# figure=fg
)
plt.plot(dx,dmean,
color=__DEF_COLORS[i%len(__DEF_COLORS)],
linewidth=linewidth,
linestyle=__DEF_LINE_STY[0],
label='mean',
# figure=fg
)
plt.plot(dx,dmin,
color=__DEF_COLORS[i%len(__DEF_COLORS)],
linewidth=linewidth,
linestyle=__DEF_LINE_STY[1],
label='min',
# figure=fg
)
plt.plot(dx,dmax,
color=__DEF_COLORS[i%len(__DEF_COLORS)],
linewidth=linewidth,
linestyle=__DEF_LINE_STY[2],
label='max',
# figure=fg
)
plt.show()
plt.savefig('logfig.png')
# fg.show()
print("")
def sequence_visualize_helper(img,model,gtbox=None,mask=None):
"""
Helper for feature part in unet in sequence task.
Args:
img: list of image sequence
gtbox: (N,4) with [y1,x1,y2,x2] in [0,1]
mask: pixel mask
module: model with output
{}
"""
def polygon_under_graph(xlist, ylist):
'''
Construct the vertex list which defines the polygon filling the space under
the (xlist, ylist) line graph. Assumes the xs are in ascending order.
'''
return [(xlist[0], 0.)] + list(zip(xlist, ylist)) + [(xlist[-1], 0.)]
def cc(arg):
'''
Shorthand to convert 'named' colors to rgba format at 60% opacity.
'''
return mcolors.to_rgba(arg, alpha=0.6)
fig_min = plt.figure(num='min')
fig_max = plt.figure(num='max')
fig_mean = plt.figure(num='mean')
# mean, min, max
ax_min = fig_min.gca(projection='3d')
ax_max = fig_max.gca(projection='3d')
ax_mean = fig_mean.gca(projection='3d')
# ax1 = fig.add_subplot(1,3,1,projection='3d')
# ax2 = fig.add_subplot(1,3,2,projection='3d')
# ax3 = fig.add_subplot(1,3,3,projection='3d')
linewidth = 1.3
if(type(img)!=list):img=[img]
if(gtbox!=None and type(gtbox)!=list):gtbox=[gtbox]
if(mask!=None and type(mask)!=list):mask=[mask]
dmin = []
dmean = []
dmax = []
for i in range(len(img)):
rt = model(img[i])
mp = tf.cast(rt['scr'][:,:,:,1]>rt['scr'][:,:,:,0],tf.float32)
mp = tf.reshape(mp,mp.shape+[1])
mp = tf.broadcast_to(mp,mp.shape[:-1]+[3])
mp = tf.concat([
mp,
# tf.broadcast_to(rt['mask'],rt['mask'].shape[:-1]+[3]),
tf.image.resize(img[i],mp.shape[-3:-1])/255.0],
axis=2)
tf.summary.image(
name = 'Score|Edg|Img image.',
data = mp,step = i,
max_outputs=20
)
tf.keras.preprocessing.image.save_img(
path='fg{}.jpg'.format(i),
x=tf.reshape(mp,mp.shape[1:]).numpy(),
# scale=False,
)
tmp_dmin = []
tmp_dmax = []
tmp_dmean = []
for j in range(len(rt['ftlist'])):
tmp_dmin += [tf.reduce_min(rt['ftlist'][j]).numpy()]
tmp_dmax += [tf.reduce_max(rt['ftlist'][j]).numpy()]
tmp_dmean += [tf.reduce_mean(rt['ftlist'][j]).numpy()]
dmin += [tmp_dmin]
dmax += [tmp_dmax]
dmean += [tmp_dmean]
# convert d[img][layer] to d[layer][img]
dmin = np.asarray(dmin).transpose((1,0))
dmax = np.asarray(dmax).transpose((1,0))
dmean = np.asarray(dmean).transpose((1,0))
zs = range(dmin.shape[0]) # layers num
xs = np.arange(len(img))
verts_min = []
verts_max = []
verts_mean = []
cols = []
for i in zs:
verts_min.append(polygon_under_graph(xs,dmin[i]))
verts_max.append(polygon_under_graph(xs,dmax[i]))
verts_mean.append(polygon_under_graph(xs,dmean[i]))
cols += [cc(__DEF_COLORS[i%len(__DEF_COLORS)])]
poly_min = PolyCollection(verts_min, facecolors=cols)
poly_max = PolyCollection(verts_max, facecolors=cols)
poly_mean = PolyCollection(verts_mean, facecolors=cols)
ax_min.add_collection3d(poly_min, zs=zs, zdir='y')
ax_min.set_xlabel('Images')
ax_min.set_xlim(0, len(img))
ax_min.set_ylabel('Layers')
ax_min.set_ylim(0, dmin.shape[0])
ax_min.set_zlabel('Mean')
ax_min.set_zlim(dmin.min()-1.0, dmin.max()+1.0)
ax_max.add_collection3d(poly_max, zs=zs, zdir='y')
ax_max.set_xlabel('Images')
ax_max.set_xlim(0, len(img))
ax_max.set_ylabel('Layers')
ax_max.set_ylim(0, dmin.shape[0])
ax_max.set_zlabel('Max')
ax_max.set_zlim(dmax.min()-1.0, dmax.max()+1.0)
ax_mean.add_collection3d(poly_mean, zs=zs, zdir='y')
ax_mean.set_xlabel('Images')
ax_mean.set_xlim(0, len(img))
ax_mean.set_ylabel('Layers')
ax_mean.set_ylim(0, dmin.shape[0])
ax_mean.set_zlabel('Mean')
ax_mean.set_zlim(dmean.min()-1.0, dmean.max()+1.0)
# plt.show()
# plt.savefig('logfig.png')
# fg.show()
print("") | UTF-8 | Python | false | false | 11,644 | py | 31 | log_tools.py | 27 | 0.594297 | 0.575146 | 0 | 384 | 29.325521 | 104 |
luoxuwei/Compiler | 13,572,096,656,598 | e798e369bd133ee0e0a105ffe477eccbf6551d0c | 7588f030cb6f5c2692c14ec3caedf9f11a312cb1 | /pythonvm/test_case/27_test_allocate_instance.py | e44cfaa1d69dcdaaa1380f617f85f94bb2905ad8 | []
| no_license | https://github.com/luoxuwei/Compiler | cceee6af426ba9d9b2f13d22fcf6e06a692953ba | d11da97ab9ef2f1a61d0244d0080c6b3f90fd475 | refs/heads/master | 2022-12-12T12:20:27.988873 | 2022-11-26T16:29:00 | 2022-11-26T16:29:00 | 252,653,543 | 4 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = int()
print(a)
b = str()
print(b)
c = list()
print(c)
d = dict()
print(d)
| UTF-8 | Python | false | false | 78 | py | 271 | 27_test_allocate_instance.py | 217 | 0.538462 | 0.538462 | 0 | 8 | 8.75 | 10 |
ajevnisek/FederatedLearning | 10,144,712,802,245 | 857d19b5a0240b13be940fe037d5d78f1c15582f | 5e6895248c79c6f5860e371edb9d325aa26f92f2 | /models/influence.py | 8f0427a4c3780fd17d901050b4d16f2634a05511 | []
| no_license | https://github.com/ajevnisek/FederatedLearning | c613e743ef1368b75e77d0cd69419745e3e40e8e | 6330befb1629a5f652bc1c1fea80f1830aec00a0 | refs/heads/master | 2022-04-13T16:29:10.895522 | 2019-12-21T08:12:05 | 2019-12-21T08:12:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import six
import torch
from torch.autograd import grad
from torch.autograd import Variable
import torch.nn.functional as F
from models import utility
import argparse
import copy
import numpy as np
from utils.options import args_parser
import pickle
from torch.utils import data
from torch.utils.data import Dataset
from torch.autograd import Variable
import torch.nn.functional as F
import random
def hvp(y, w, v):
first_grads = grad(y, w, retain_graph=True, create_graph=True)
grad_v = 0
for g, v in six.moves.zip(first_grads, v):
grad_v += torch.sum(g * v)
grad(grad_v, w, create_graph=True)
return grad(grad_v, w, create_graph=True)
def grad_z(z, t, model, gpu=-,create_graph=True):
device = torch.device('cuda:{}'.format(gpu) if torch.cuda.is_available() and gpu != -1 else 'cpu')
model.eval()
z, t = Variable(z, volatile=False).to(device), Variable(t, volatile=False).to(device)
y = model(z)
loss = F.nll_loss(y, t, weight=None, reduction='mean')
return list(grad(loss, list(model.parameters()), create_graph=create_graph))
def stest(v,model,z_loader,gpu,damp=0.01,scale=25.0,repeat=5):
h_estimate=v.copy()
train_set=z_loader
device = torch.device('cuda:{}'.format(gpu) if torch.cuda.is_available() and gpu != -1 else 'cpu')
for i in utility.create_progressbar(repeat, desc='s_test'):
j=random.randint(0,len(z_loader))
data, target= train_set.dataset[j]
data = train_set.collate_fn([data])
target= train_set.collate_fn([target])
x, t = Variable(data, volatile=False).to(device), Variable(target, volatile=False).to(device)
y = model(x)
loss = F.nll_loss(y, t, weight=None, reduction='mean')
hv = hvp(loss, list(model.parameters()), h_estimate)
h_estimate = [_v + (1 - damp) * h_estimate - _hv / scale for _v, h_estimate, _hv in six.moves.zip(v, h_estimate, hv)]
return h_estimate
| UTF-8 | Python | false | false | 1,987 | py | 13 | influence.py | 12 | 0.65224 | 0.6462 | 0 | 48 | 39.354167 | 125 |
codingpurush/class11python | 6,021,544,187,161 | a2103a25913f074a8715c7f50b2ad8ef97d51033 | 48eb7ce8a065e4b851bd0eefa70d14a91af2d800 | /fibonnacci_Nth.py | d5bd0b8fee5516792da25fb3758d5c0627f14282 | []
| no_license | https://github.com/codingpurush/class11python | c67d03048684e1cf4e28e4d7c0708abe31bf0f5d | 66255e7ba597eb369bea920deb901875b19a8613 | refs/heads/master | 2020-12-29T06:10:17.134618 | 2020-02-06T13:15:24 | 2020-02-06T13:15:24 | 238,485,561 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #PRINT Nth FIBONACCI NUMBER
x=int(input())
a=-1
b=1
c=a+b
for i in range(2,x+1):
#print(c)
a=b
b=c
c=a+b
if(i==x):
print(c)
| UTF-8 | Python | false | false | 129 | py | 10 | fibonnacci_Nth.py | 10 | 0.589147 | 0.55814 | 0 | 12 | 9.75 | 27 |
marikapartyka/lungs | 3,083,786,523,044 | 77ff5618a1d32611022a7c2d0cc659d3b34d89e4 | 1c4c31e86e4b9f92d558722d04d9f436e4b078ef | /explain.py | fff593ead9d3586c5e0b2aa59c022f8ce22999a8 | []
| no_license | https://github.com/marikapartyka/lungs | fc2b2213cb6033c23327ae686e65a3e12735f609 | 6589b1a606d0a0b5ec0966e1bef87dec0a15f7fc | refs/heads/master | 2023-01-09T14:31:12.279023 | 2020-11-11T19:17:07 | 2020-11-11T19:17:07 | 312,063,687 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
from _train import data, X, y, pipeline, pipeline_for_encoded_data,encoded_X
import pandas as pd
import sklearn
import numpy as np
from lime import lime_tabular
import dalex as dx
model = pipeline
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, train_size=0.80)
model.fit(X_train, y_train)
# create an explainer for the model:
exp = dx.Explainer(model, X, y, label = "Lung's Cancer MLP Pipeline")
# BreakDown and BreakDownInt methods
def BreakDown(number_of_observation):
bd = exp.predict_parts(pd.DataFrame(X_test.iloc[number_of_observation,:]).T, type='break_down')
bd.plot()
def BreakDownI(number_of_observation):
bd_interactions = exp.predict_parts(pd.DataFrame(X_test.iloc[number_of_observation,:]).T,
type='break_down_interactions')
bd_interactions.plot()
#SHAP
def Shap(number_of_observation):
sh = exp.predict_parts(pd.DataFrame(X_test.iloc[number_of_observation,:]).T, type='shap', B = 10)
sh.plot(bar_width = 16)
Shap(4)
# sh.result.loc[sh.result.B == 0, ]
# #lime
# # preparing categorical_features for lime method
# categorical_features = [3,7,9,10]
# categorical_names = {}
# categorical_names = {}
# for feature in categorical_features:
# Y = X.copy()
# le = sklearn.preprocessing.LabelEncoder()
# Y.iloc[:, feature] = Y.iloc[:, feature].astype(str)
# le.fit(Y.iloc[:, feature])
# # Y.iloc[:, feature] = le.transform(Y.iloc[:, feature])
# categorical_names[feature] = le.classes_
# stadia = [float('nan'), 'IA1', 'IA2', 'IA3', 'IB', 'IIA', 'IIB', 'IIIA', 'IIIB', 'IVA', 'IVB']
# categorical_names.update({11:np.array(stadia, dtype=object)})
# categorical_features2 = [3,7,9,10,11]
# encoder = lambda x: model.named_steps["encoder"].transform(x)
# scaler = lambda x: model.named_steps["scaler"].transform(x)
# predict_fn = lambda x: model.named_steps["nn"].predict_proba(x)
# X_train_enc = encoder(X_train)
# X_test_enc = encoder(X_test)
# X_train_sc = scaler(X_train_enc)
# X_test_sc = scaler(X_test_enc)
# explainer_lime = lime_tabular.LimeTabularExplainer(X_train_sc,class_names=["NO", "YES"],
# feature_names=X_train.columns,
# categorical_features=categorical_features2,
# categorical_names=categorical_names,
# verbose=False)
# def Lime(number_of_observation):
# exp_lime = explainer_lime.explain_instance(X_test_sc[number_of_observation],predict_fn)
# exp_lime.show_in_notebook(show_table=True, show_all=False)
# Lime(4)
def CeterisParibus(number_of_observation):
cp = exp.predict_profile(pd.DataFrame(X_test.iloc[number_of_observation,:]).T)
cp.plot()
CeterisParibus(4)
def VariableImp():
vi = exp.model_parts()
vi.plot(max_vars=10)
VariableImp()
def PartialDp():
pdp_num = exp.model_profile(type = 'partial')
pdp_num.result["_label_"] = 'pdp'
pdp_num.plot()
PartialDp()
| UTF-8 | Python | false | false | 3,170 | py | 9 | explain.py | 4 | 0.623659 | 0.613249 | 0 | 111 | 27.495495 | 101 |
Alpine-DAV/ascent | 17,669,495,464,138 | a3aed1e909ae3a9ef53150b14bb9af503f9c5445 | 5de7df0be411b4bad61f927cae845bdb8223308f | /src/examples/tutorial/ascent_intro/python/conduit_example2.py | cfd0910e805659a8284114c8ba8d540e08153dec | [
"BSD-3-Clause",
"Zlib"
]
| permissive | https://github.com/Alpine-DAV/ascent | cb40429167a93c62f78fe650a0121258be279162 | e52b7bb8c9fd131f2fd49edf58037cc5ef77a166 | refs/heads/develop | 2023-09-06T07:57:11.558238 | 2023-08-25T16:05:31 | 2023-08-25T16:05:31 | 81,366,855 | 151 | 61 | NOASSERTION | false | 2023-09-13T19:31:09 | 2017-02-08T19:21:22 | 2023-09-10T23:54:29 | 2023-09-13T19:31:09 | 111,927 | 155 | 58 | 166 | C++ | false | false | ###############################################################################
# Copyright (c) Lawrence Livermore National Security, LLC and other Ascent
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Ascent.
###############################################################################
import conduit
import numpy as np
#
# Using hierarchical paths imposes a tree structure
#
n = conduit.Node()
n["dir1/dir2/val1"] = 100.5;
print(n.to_yaml())
| UTF-8 | Python | false | false | 551 | py | 1,345 | conduit_example2.py | 1,035 | 0.540835 | 0.528131 | 0 | 15 | 35.666667 | 79 |
enverygtlr/simple-Ray-tracer-in-webgl | 12,756,052,870,415 | c85bb8a7144c6d363bdadc2d288269339a40cee1 | d5c10a5e222e5e6bee50d6d296e129986384ea02 | /new/untitled folder/untitled folder/pythonscript.py | 13e65853240c0a99cbbd02df3d18bfde1c97b337 | []
| no_license | https://github.com/enverygtlr/simple-Ray-tracer-in-webgl | 37c48210e3a7e4e6de52d3bc25c62836ec2e06b6 | 5604e3910954d91c1cf2c9db22e9bda1c8162fbc | refs/heads/master | 2023-02-24T15:58:39.122964 | 2021-01-27T11:49:07 | 2021-01-27T11:49:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def genPairs(n):
for i in range(0, n):
for j in range(i+1, n):
yield (i, j)
for p in genPairs(5):
print(p) | UTF-8 | Python | false | false | 135 | py | 8 | pythonscript.py | 5 | 0.496296 | 0.474074 | 0 | 6 | 21.5 | 31 |
williamy1996/AutoExpression | 6,777,458,418,199 | 022def6b9f8db5e3774a404aab43580159ac681f | aa0bdafb61b8ea16e21daf4f9bfc2d87044eb57e | /solnml/utils/saveloadmodel.py | 6344380279173b2bbbc26da96dfa9e85fa5d2018 | [
"MIT"
]
| permissive | https://github.com/williamy1996/AutoExpression | c32b5105aa9ba71d14269d04745b5fbf8e75c646 | b470d9ff67074c8b076abbc1dce359db9a36f921 | refs/heads/master | 2022-12-14T20:22:37.529136 | 2020-09-03T10:04:10 | 2020-09-03T10:04:10 | 292,527,971 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import os
import sys
import time
import pickle
import numpy as np
import pandas as pd
import json
import pickle as pkl
from sklearn.datasets import load_iris
from sklearn.metrics import balanced_accuracy_score
from sklearn.model_selection import train_test_split
from solnml.utils.data_manager import DataManager
from solnml.estimators import Classifier
class Ensemble_models:
def __init__(self,ensemble_info,mdl_list):
self.ensemble_info = ensemble_info
self.model_list = mdl_list
def predict_proba(self,test_x):
#ONLY FOR CLF MODEL
if(self.ensemble_info['task_type'] == 'RGS'):
print('Regression model does not have \'predict_proba\'.')
return 'Regression model does not have \'predict_proba\'.'
if(self.ensemble_info['ensemble_method']=='none'):
femdl = self.model_list[0]
y_predict = predict_proba_from_path(femdl,test_x)
return y_predict
if(self.ensemble_info['ensemble_method']=='bagging'):
y_predict = []
for femdl in self.model_list:
y_predict.append(predict_proba_from_path(femdl,test_x))
y_predict = np.array(y_predict)
return np.average(y_predict,axis=0)
if(self.ensemble_info['ensemble_method']=='ensemble_selection'):
y_predict = []
weights = np.array(pd.read_json(self.ensemble_info['ensemble_weights']))[:,0]
i = 0
for femdl in self.model_list:
y_predict.append(predict_proba_from_path(femdl,test_x)*weights[i])
i+=1
y_predict = np.array(y_predict)
return np.sum(y_predict,axis=0)
if(self.ensemble_info['ensemble_method']=='stacking'):
meta_learner = pickle.load(open(self.ensemble_info['meta_learner_path'],'rb'))
kfold = self.ensemble_info['kfold']
femdl = self.model_list[0]
y_predict = predict_proba_from_path(femdl,test_x)
n_dim = y_predict.shape[1]
sample_dim = y_predict.shape[0]
y_predict = []
if(n_dim==2):
n_dim = 1
i=0
for femdl in self.model_list:
if(i == 0):
new_sumpredict = np.zeros([sample_dim,n_dim])
new_predict = predict_proba_from_path(femdl,test_x)
if(n_dim==1):
new_predict = new_predict[:,1:]
new_sumpredict = new_sumpredict + new_predict/kfold
i+=1
if(i==kfold):
i=0
y_predict.append(new_sumpredict)
y_predict = np.hstack(y_predict)
y_pred = meta_learner.predict_proba(y_predict)
return y_pred
if(self.ensemble_info['ensemble_method']=='blending'):
meta_learner = pickle.load(open(self.ensemble_info['meta_learner_path'],'rb'))
femdl = self.model_list[0]
y_predict = predict_proba_from_path(femdl,test_x)
n_dim = y_predict.shape[1]
if(n_dim==2):
n_dim = 1
y_predict = []
for femdl in self.model_list:
new_predict = predict_proba_from_path(femdl,test_x)
if(n_dim==1):
new_predict = new_predict[:,1:]
y_predict.append(new_predict)
y_predict = np.hstack(y_predict)
y_pred = meta_learner.predict_proba(y_predict)
return y_pred
def predict(self,test_x):
if(self.ensemble_info['task_type'] == 'CLF'):
return np.argmax(self.predict_proba(test_x),axis=1)
if(self.ensemble_info['ensemble_method']=='none'):
femdl = self.model_list[0]
y_predict = predict_from_path(femdl,test_x)
return y_predict
if(self.ensemble_info['ensemble_method']=='bagging'):
y_predict = []
for femdl in self.model_list:
y_predict.append(predict_from_path(femdl,test_x))
y_predict = np.array(y_predict)
return np.average(y_predict,axis=0)
if(self.ensemble_info['ensemble_method']=='ensemble_selection'):
y_predict = []
weights = np.array(pd.read_json(self.ensemble_info['ensemble_weights']))[:,0]
i = 0
for femdl in self.model_list:
y_predict.append(predict_from_path(femdl,test_x)*weights[i])
i+=1
y_predict = np.array(y_predict)
return np.sum(y_predict,axis=0)
if(self.ensemble_info['ensemble_method']=='stacking'):
meta_learner = pickle.load(open(self.ensemble_info['meta_learner_path'],'rb'))
kfold = self.ensemble_info['kfold']
femdl = self.model_list[0]
y_predict = predict_from_path(femdl,test_x)
n_dim = y_predict.shape[1]
sample_dim = y_predict.shape[0]
y_predict = []
if(n_dim==2):
n_dim = 1
i=0
for femdl in self.model_list:
if(i == 0):
new_sumpredict = np.zeros([sample_dim,n_dim])
new_predict = predict_from_path(femdl,test_x)
if(n_dim==1):
new_predict = new_predict[:,1:]
new_sumpredict = new_sumpredict + new_predict/kfold
i+=1
if(i==kfold):
i=0
y_predict.append(new_sumpredict)
y_predict = np.hstack(y_predict)
y_pred = meta_learner.predict(y_predict)
return y_pred
if(self.ensemble_info['ensemble_method']=='blending'):
meta_learner = pickle.load(open(self.ensemble_info['meta_learner_path'],'rb'))
femdl = self.model_list[0]
y_predict = predict_from_path(femdl,test_x)
n_dim = y_predict.shape[1]
if(n_dim==2):
n_dim = 1
y_predict = []
for femdl in self.model_list:
new_predict = predict_from_path(femdl,test_x)
if(n_dim==1):
new_predict = new_predict[:,1:]
y_predict.append(new_predict)
y_predict = np.hstack(y_predict)
y_pred = meta_learner.predict(y_predict)
return y_pred
def save_model(mdl,save_dir):
mdl_list = ''
fe_list = ''
if not os.path.exists(save_dir):
os.makedirs(save_dir)
info = mdl.get_ens_model_info()
if(info is None):
f_ens_info = open(save_dir +'/ens_info','w')
ens_dict = {}
ens_dict['ensemble_method'] = 'none'
f_ens_info.write(json.dumps(ens_dict))
f_ens_info.close()
os.system('cp '+ mdl.best_algo_path + ' '+save_dir +'/')
os.system('cp '+ mdl.best_fe_path + ' '+save_dir +'/')
f_mdl_list = open(save_dir +'/model_list','w')
f_mdl_list.write(os.path.basename(mdl.best_algo_path))
f_mdl_list.close()
f_fe_list = open(save_dir +'/fe_list','w')
f_fe_list.write(os.path.basename(mdl.best_fe_path))
f_fe_list.close()
return
f_ens_info = open(save_dir +'/ens_info','w')
ens_dict = {}
if(mdl.task_type == 4):
ens_dict['task_type'] = 'RGS'
else:
ens_dict['task_type'] = 'CLF'
ens_met = info['ensemble_method']
ens_dict['ensemble_method'] = ens_met
if(ens_met=='bagging'):
f_ens_info.write(json.dumps(ens_dict))
if(ens_met=='ensemble_selection'):
ens_dict['ensemble_weights'] = pd.DataFrame(info['ensemble_weights']).to_json()
f_ens_info.write(json.dumps(ens_dict))
if(ens_met=='stacking'):
meta_learner_path = save_dir +'/'+os.path.basename(info['meta_learner_path'])
os.system('cp '+ info['meta_learner_path'] + ' '+save_dir +'/')
ens_dict['meta_learner_path'] = meta_learner_path
ens_dict['kfold'] = info['kfold']
f_ens_info.write(json.dumps(ens_dict))
if(ens_met=='blending'):
meta_learner_path = save_dir +'/'+os.path.basename(info['meta_learner_path'])
os.system('cp '+ info['meta_learner_path'] + ' '+save_dir +'/')
ens_dict['meta_learner_path'] = meta_learner_path
f_ens_info.write(json.dumps(ens_dict))
f_ens_info.close()
if(ens_met=='stacking'):
for conf in info['config']:
for partpath in conf[-2]:
os.system('cp '+ partpath + ' '+save_dir +'/')
mdl_list += (os.path.basename(partpath)+'\n')
for partpath in conf[-1]:
os.system('cp '+ partpath + ' '+save_dir +'/')
fe_list += (os.path.basename(partpath)+'\n')
else:
for conf in info['config']:
os.system('cp '+ conf[-2] + ' '+save_dir +'/')
os.system('cp '+ conf[-1] + ' '+save_dir +'/')
mdl_list += (os.path.basename(conf[-2])+'\n')
fe_list += (os.path.basename(conf[-1])+'\n')
f_mdl_list = open(save_dir +'/model_list','w')
f_mdl_list.write(mdl_list)
f_mdl_list.close()
f_fe_list = open(save_dir +'/fe_list','w')
f_fe_list.write(fe_list)
f_fe_list.close()
def predict_proba_from_path(femdl,test_x):
fe = femdl[0].replace('\n','')
base_fe = pickle.load(open(fe,'rb'))
test_x_tf = base_fe.operate(test_x)
mdl = femdl[1].replace('\n','')
base_model = pickle.load(open(mdl,'rb'))
return base_model.predict_proba(test_x_tf)
def predict_from_path(femdl,test_x):
fe = femdl[0].replace('\n','')
base_fe = pickle.load(open(fe,'rb'))
test_x_tf = base_fe.operate(test_x)
mdl = femdl[1].replace('\n','')
base_model = pickle.load(open(mdl,'rb'))
return base_model.predict(test_x_tf)
def load_model(save_dir):
f_ens_info = open(save_dir +'/ens_info','r')
ens_info = json.loads(f_ens_info.read())
f_ens_info.close()
mdl_list = []
f_mdl_list = open(save_dir +'/model_list','r')
for mdl in f_mdl_list:
mdl.replace('\n','')
mdl_list.append(save_dir +'/'+mdl)
f_mdl_list.close()
fe_list = []
f_fe_list = open(save_dir +'/fe_list','r')
for fe in f_fe_list:
fe.replace('\n','')
fe_list.append(save_dir +'/'+fe)
f_fe_list.close()
mdl_list = [[fe_list[i],mdl_list[i]] for i in range(len(mdl_list))]
return Ensemble_models(ens_info,mdl_list)
class bio_models():
def __init__(self,ensemble_info, mdl_list, imp_ope_list, fb):
self.ensemble_info = ensemble_info
self.mdl_list = mdl_list
self.fb = fb
self.imp_ope_list = imp_ope_list
def predict_proba(self,test_x):
#ONLY FOR CLF MODEL
if self.ensemble_info['task_type'] == 'RGS':
print('Regression model does not have \'predict_proba\'.')
return 'Regression model does not have \'predict_proba\'.'
if self.fb is not None:
test_x = self.fb.transform(test_x)
y_pred = None
for key in self.mdl_list:
if(np.sum(np.isnan(test_x)) > 0):
test_x_filled = self.imp_ope_list[key].fit_transform(test_x)
else:
test_x_filled = test_x
if y_pred is None:
y_pred = self.mdl_list[key].predict_proba(test_x_filled)
else:
y_pred += self.mdl_list[key].predict_proba(test_x_filled)
y_pred = y_pred/len(self.mdl_list)
return y_pred
def predict(self,test_x):
if self.ensemble_info['task_type'] == 'CLF':
return np.argmax(self.predict_proba(test_x),axis=1)
if self.fb is not None:
test_x = self.fb.transform(test_x)
y_pred = None
for key in self.mdl_list:
if(np.sum(np.isnan(test_x)) > 0):
test_x_filled = self.imp_ope_list[key].fit_transform(test_x)
else:
test_x_filled = test_x
if y_pred is None:
y_pred = self.mdl_list[key].predict(test_x_filled)
else:
y_pred += self.mdl_list[key].predict(test_x_filled)
y_pred = y_pred/len(self.mdl_list)
return y_pred
def save(biomdl, save_dir, task_type):
print("PLEASE SAVE THE MODEL IN A NEW FOLDER OR AN EMPTY FOLDER")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
f_ens_info = open(save_dir +'/ens_info','w')
ens_dict = {}
ens_dict['task_type'] = task_type
ens_dict['impute_method'] = biomdl.impute_method
f_ens_info.write(json.dumps(ens_dict))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if biomdl.fb_operator is not None:
pkl.dump(biomdl.fb_operator,open(save_dir+'/fb.pkl','wb'))
for key in biomdl.impute_method:
pkl.dump(biomdl.impute_operator[key],open(save_dir+'/'+key+'.pkl','wb'))
save_model(biomdl.mdl[key],save_dir+'/'+key+'_models')
def load(save_dir):
f_ens_info = open(save_dir +'/ens_info','r')
ens_info = json.loads(f_ens_info.read())
f_ens_info.close()
mdl_list = {}
imp_ope_list = {}
if os.path.exists(save_dir+'/fb.pkl'):
fb = pkl.load(open(save_dir+'/fb.pkl','rb'))
else:
fb = None
for method in ens_info['impute_method']:
mdl_list[method] = load_model(save_dir+'/'+method+'_models')
imp_ope_list[method] = pkl.load(open(save_dir+'/'+method+'.pkl','rb'))
return bio_models(ens_info,mdl_list,imp_ope_list,fb)
| UTF-8 | Python | false | false | 13,740 | py | 70 | saveloadmodel.py | 33 | 0.537918 | 0.533479 | 0 | 364 | 36.747253 | 90 |
feifeizhuge/data-analysis-with-pandas | 11,312,943,899,243 | 9b85704fc7c624610cf97ca1a5b987e935cf05b8 | d8e1421132868213806326dba7001f3aea6e8a51 | /data-cleaning/data_load_bus.py | 0cb760e05747c08c6b79808c17c617806a75b479 | []
| no_license | https://github.com/feifeizhuge/data-analysis-with-pandas | 28d0eacf9ab249e7a24fec148ccbc3de7e5e7257 | 952fb7946a4525c40016b27c6ad013a30c3458bd | refs/heads/master | 2020-06-14T18:57:31.014431 | 2019-09-22T18:49:29 | 2019-09-22T18:49:29 | 195,094,370 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 20:18:30 2019
@author: hechen
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
current_path = os.path.dirname(__file__)
csv_path = current_path + '/2016_02_10_0000_1687_export.csv'
Data = pd.read_csv(csv_path, ';')
"""
['Unix Timestamp [ms]',
'Time [Europe/Amsterdam]',
'Battery - Avg cell voltage [mV]',
'Battery - Avg cell temp [°C]',
'Battery - Max. charge current [A]',
'Battery - Max. discharge current [A]',
'Battery - Cell nr. max volt',
'Battery - Max cell voltage [mV]',
'Battery - Cell nr. min volt',
'Battery - Min cell voltage [mV]',
'Battery - Max. charge voltage [V]',
'Battery - Current [A]',
'Battery - Battery power [kW]',
'Battery - Voltage [V]',
'Battery - State of Charge [%]',
'Battery - State of Health [%]',
'Primove - Charger wayside present',
'Primove - Charging state',
'Primove - Charging current [A]',
'Primove - Pick-up temp [°C]',
'Primove - Rectrifier temp [°C]',
'Primove - Charging voltage [V]',
'Primove - Pick-up position [mm]',
'Primove - Pick-up position control',
'Battery - TCU compressor status',
'Battery - TCU heater status',
'Vehicle - Accelerator pedal position [%]',
'Vehicle - Accelerator pedal switch',
'Engine - Actual Engine Torque [%]',
'Vehicle - Ambient Air Temperature [°C]',
'Air pressure - Pressure front axle left [bar]',
'Air pressure - Pressure front axle right [bar]',
'Air pressure - Pressure rear axle left [bar]',
'Air pressure - Pressure rear axle right [bar]',
'Air pressure - Pressure brake circuit 1 [bar]',
'Air pressure - Pressure brake circuit 2 [bar]',
'Vehicle - Brake pedal switch', 'Vehicle - Current Gear',
'Engine - Driver Engine Torque demand [%]',
'Doors - Enable status Doors 1', 'Doors - Enable status Doors 2',
'Doors - Enable status Doors 3', 'Doors - Enable status Doors 4',
'Doors - Enable status Doors 5', 'Engine - Motor Speed RPM [RPM]',
'Vehicle - High resolution vehicle distance [km]',
'Doors - Lock status Doors 1', 'Doors - Lock status Doors 2',
'Doors - Lock status Doors 3', 'Doors - Lock status Doors 4',
'Doors - Lock status Doors 5', 'Doors - Open status Doors 1',
'Doors - Open status Doors 2', 'Doors - Open status Doors 3',
'Doors - Open status Doors 4', 'Doors - Open status Doors 5',
'Doors - Positions Doors', 'Doors - Status Doors',
'Vehicle - Tachograph Speed [km/h]',
'Vehicle - Wheel based Speed [km/h]', 'IVH - Altitude [m]',
'IVH - GPS Course [°]', 'IVH - GPS position', 'IVH - GPS speed [km/h]',
'IVH - Satellites', 'IVH - 24V Battery [V]', 'IVH - Online status',
'HVAC - AC compressor state [%]', 'Auxiliary - Air compressor state',
'Vehicle - Brake pedal position [%]', 'HVAC - Cabin air temp [°C]',
'HVAC - Cabin air temp setpoint [°C]', 'HVAC - Condenser fan state',
'HVAC - Evaporator fan state', 'HVAC - Floor air heater state',
'Vehicle - Ignition State',
'HVAC - Outlet air temperature floor unit [°C]',
'HVAC - Outlet air temperature roof unit [°C]',
'HVAC - Recirculation Air flap position [%]',
'Auxiliary - Steering pump state',
'HVAC - Water heater outlet temp [°C]',
'Auxiliary - Total Power 24V [kW]', 'Auxiliary - Total Power HV [kW]',
'Powertrain - Inverter temperature [°C]',
'Powertrain - Motor temperatur status',
'Powertrain - Traction Power [kW]', 'Vehicle - Vehicle state'],
dtype='object')
"""
#%% obversation
# Modifications to the data or indices of the copy will
# not be reflected in the original object
X = Data.copy(deep=True)
# feststellen simpling frequency
print(X['Time [Europe/Amsterdam]'][0:40])
#%%
# slice operation, simple every 20 points
X = X[0::20]
'''
print(X['Time [Europe/Amsterdam]'][0:40])
X = X[0::20]
print(X['Time [Europe/Amsterdam]'][0:50])
X['Unix Timestamp [ms]'][0:20]
sjdada
X['Unix Timestamp [ms]'][0:20]
pd.to_datetime(1455072686227)
pd.to_datetime(1455072686)
pd.to_datetime(1455072686227,unit='s')
pd.to_datetime(1455072686227,unit='ms')
X['Unix Timestamp [ms]'][0:20]
pd.to_datetime(1455072686227,unit='ms')
print(X['Time [Europe/Amsterdam]'][0:50])
pd.to_datetime(1455072686227,unit='ms')
pd.to_datetime(1455072686227/1000,unit='s')
1455072686227/1000
int(1455072686227/1000)
pd.to_datetime(1455072686227//1000,unit='s')
pd.to_datetime(X['Unix Timestamp [ms]'][0:30]//1000,unit='s')
'''
#%%
#time = Data['seconds']
#time = (time-time.iloc[0]) / 60
#my_x_ticks = np.arange(time.iloc[0], time.iloc[-1], 1)
#
#plt.plot(time, Data['GPS Speed'])
#plt.grid(axis='x')
#plt.xticks(my_x_ticks)
| UTF-8 | Python | false | false | 4,958 | py | 3 | data_load_bus.py | 2 | 0.614918 | 0.567415 | 0 | 131 | 36.732824 | 78 |
huyanhai/spider-music | 6,906,307,414,796 | ba43709f4da2e19307b3ba604caaeb92b52b38ba | 42fb81f32a776809079aa343e4dd7d521b1998b0 | /sider_scrapy/spiders/music.py | c8ceb5fc3d5d4892b9da50491bb83772320901fc | []
| no_license | https://github.com/huyanhai/spider-music | 8849d1ebbc4060219ad70fde2268d00c55330a4b | cfc87b6daadfc9ecc238e835e1f1079fa866d94d | refs/heads/master | 2020-06-28T09:28:09.290872 | 2019-08-05T08:57:16 | 2019-08-05T08:57:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import requests
import json
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from sider_scrapy.items import QqmusicItem
class MusicSpider(CrawlSpider):
name = 'music'
allowed_domains = ['y.qq.com']
start_urls = ['https://y.qq.com/n/yqq/album/003L8BAv3UNXWx.html']
# def __init__(self):
# self.song_data = []
rules = (
Rule(LinkExtractor(allow=r'album/.+\.html'),callback='parse_item', follow=False),
)
def start_requests(self):
URL = 'https://u.y.qq.com/cgi-bin/musicu.fcg'
formdata = {
'-':'getUCGI12417882050721563',
'g_tk':'155229945',
'format':'json',
'inCharset':'utf8',
'outCharset':'utf-8',
'notice':'0',
'platform':'yqq.json',
'needNewCode':'0',
}
for item in range(1,5):
item = item*20
for ids in range(1,7):
formdata['data'] = '{"new_album":{"module":"newalbum.NewAlbumServer","method":"get_new_album_info","param":{"area":%d,"start":0,"num":%d}},"new_album_tag":{"module":"newalbum.NewAlbumServer","method":"get_new_album_area","param":{}},"comm":{"ct":24,"cv":0}}' %(ids,item)
result = requests.get(URL,params=formdata)
datas = json.loads(result.text)
for id in datas['new_album']['data']['albums']:
url = 'https://y.qq.com/n/yqq/album/%s.html' %id['mid']
self.start_urls.append(url)
yield self.make_requests_from_url(url)
def parse_item(self, response):
URL_LINK = "https://u.y.qq.com/cgi-bin/musicu.fcg"
URL_LYR = "https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_new.fcg"
result = response
song_mid = result.xpath("//span[@class='songlist__songname_txt']/a/@href").get().split('/')[-1].split('.')[0]
singer_name = result.xpath("//div[@class='data__cont']/div[@class='data__singer']/a/text()").get()
song_name = result.xpath("//div[@class='data__cont']//h1/text()").get()
more_data = result.xpath("//div[@class='data__cont']//li/text()").getall()
company = result.xpath("//div[@class='data__cont']//li/a/text()").get()
post = response.urljoin(result.xpath("//span[@class='data__cover']/img/@src").get())
more_data = list(map(lambda x:x.split(':')[1],more_data))
schools = more_data[0]
language = more_data[1]
send_time = more_data[2]
song_type = None
qq_pages = 'https://y.qq.com/n/yqq/album/%s.html' %song_mid
formdata_link = {
'-':'getplaysongvkey',
'g_tk':'155229945',
'loginUin': '810839700',
'hostUin':'0',
'format':'json',
'inCharset':'utf8',
'outCharset':'utf-8',
'notice':'0',
'platform':'yqq.json',
'needNewCode':'0',
'data':'{"req":{"module":"CDN.SrfCdnDispatchServer","method":"GetCdnDispatch","param":{"guid":"986239112","calltype":0,"userip":""}},"req_0":{"module":"vkey.GetVkeyServer","method":"CgiGetVkey","param":{"guid":"986239112","songmid":["%s"],"songtype":[0],"uin":"810839700","loginflag":1,"platform":"20"}},"comm":{"uin":810839700,"format":"json","ct":24,"cv":0}}' %song_mid
}
formdata_lry = {
'-':'MusicJsonCallback_lrc',
'g_tk':'155229945',
'songmid':song_mid,
'format':'json',
'inCharset':'utf8',
'outCharset':'utf-8',
'notice':'0',
'platform':'yqq.json',
'needNewCode':'0',
}
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Referer':'https://y.qq.com/portal/player.html'
}
if len(more_data) > 4:
song_type = more_data[4]
print('正在爬取歌曲%s' %song_name)
res_link = requests.get(URL_LINK,params=formdata_link) # 获取歌曲地址
res_lyr = requests.get(URL_LYR,params=formdata_lry,headers=headers) # 获取歌词
datas_link = json.loads(res_link.text)['req_0']['data']
datas_lyr = json.loads(res_lyr.text)['lyric']
song_link = 'http://isure.stream.qqmusic.qq.com/' + datas_link['midurlinfo'][0]['purl']
item = QqmusicItem(singer_name=singer_name,song_name=song_name,schools=schools,language=language,send_time=send_time,song_type=song_type,company=company,post=post,song_mid=song_mid,song_link=song_link,qq_pages=qq_pages,lyric=datas_lyr)
yield item | UTF-8 | Python | false | false | 4,734 | py | 8 | music.py | 6 | 0.556809 | 0.521702 | 0 | 99 | 46.484848 | 383 |
Dituohgasirre/python | 9,698,036,202,454 | 346616db94cff25f16ee05ffd974b5924403425e | 8e7a2b9efbc0d25111f01f4cddb781961032685a | /python-1025/python/1_base_syntax/homework.py | 56368582436dac9d44afcfe1e4ce98f4cc01c885 | []
| no_license | https://github.com/Dituohgasirre/python | e044aa2e1fb2233b6ccd59701b834ab01e4e24c2 | 05f036d2723f75cd89e4412aaed7ee0ba5d3a502 | refs/heads/master | 2023-06-03T13:50:18.641433 | 2021-06-17T10:23:40 | 2021-06-17T10:23:40 | 366,942,423 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import kyo
import menu
def ball(index, args):
"""
一个袋子里有3个红球, 3个绿球,6个黄球, 一次从袋子里取6个球
列出所有可能的颜色组合
"""
r = 0
y = 0
while r < 4:
g = 0
while g < 4:
y = 6 - r - g
print("红球: %d, 绿球: %d, 黄球: %d" % (r, g, y))
g += 1
r += 1
def mul9x9(index, args):
"""
9x9乘法
"""
i = 1
while i < 10:
j = 1
while j <= i:
if i % 2 == 0:
print("%d X %d = %2d " % (i, j, i * j), end='')
else:
print("\033[32;40m%d X %d = %2d\033[0m " % (i, j, i * j), end='')
j += 1
print()
i += 1
def blackFri(index, args):
"""
已知1900年一月一号是星期一
问今年有多少个黑色星期五(黑色星期五每个月13号是星期五)
"""
year = int(input("请输入年: "))
days = kyo.countYearDays(1900, year) + 13
m = 1
while m <= 12:
if (days + kyo.countMonthDays(year, m)) % 7 == 5:
print("%d 年 %d 月 13号是黑色星期五!" % (year, m))
m += 1
def fish(index, args):
"""
某人从2000年一月一号开始过着3天打鱼两天晒网的日子
输入年月日判断此人在打鱼还是晒网
"""
def core(year, month, day):
day += kyo.countYearDays(2000, year) + kyo.countMonthDays(year, month)
return "打鱼" if 1 <= day % 5 <= 3 else "晒网"
try:
print(core(*kyo.checkDate(input("请输入年月日: ").split('-'))))
# print(core(*kyo.checkDate(input("请输入年月日: "))))
except:
print("日期不合法....")
# print(core(*[int(x) for x in input("请输入年月日:").split()]))
# year, month, day = [int(x) for x in input("请输入年月日: ").split()]
# print(core(year, month, day))
#自动测试代码
# for i in range(1, 16):
# print("%d-%d-%d: " % (year, month, i), core(year, month, i))
def main():
def end(index, args):
print("--------------------------------------------")
print("本题测试完成, 回车继续, q为退出: ", end='')
return True if input() == 'q' else False
menuList = menu.add("拿球", ball)
menu.add("买鸡", items=menuList)
menu.add("9x9乘法表", mul9x9, items=menuList)
menu.add("黑色星期五", blackFri, items=menuList)
menu.add("打鱼晒网", fish, items=menuList)
menu.add("最大公约数", items=menuList)
menu.add("4, 5, 6, 7组合数", items=menuList)
menu.add("输出1-1/2+1/3-1/4+1/5...+1/99-1/100结果", items=menuList)
menu.add("日历", items=menuList)
menu.add("回文数", items=menuList)
menu.add("完数", items=menuList)
menu.add("水仙花数", items=menuList)
menu.add("八进制转十进制", items=menuList)
menu.add("菜单打印图形", items=menuList)
menu.add("把八进制数转化为十进制数输出", items=menuList)
menu.add("实现菜单打印图形", items=menuList)
menu.add("退出", items=menuList)
menu.run(menuList, "====== 第一天练习题 ======", end)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,271 | py | 347 | homework.py | 244 | 0.501279 | 0.465839 | 0 | 108 | 24.333333 | 81 |
iluoxw/OurServer | 9,938,554,340,682 | a511da40c36be7c0eff00be83b3c64be0c43be17 | 2ced9f032b61792d81aced126dfeb1d40eeb065e | /User/migrations/0001_initial.py | bdf95fc8d9e65dcad2418f1dafc2004765c6de32 | []
| no_license | https://github.com/iluoxw/OurServer | c35e2d334df86f278156a2f32479caf353ad1646 | a3a76f1c8b61bd81c9a64774d7b0c3717755c9cb | refs/heads/master | 2020-05-21T03:48:50.868345 | 2017-03-10T14:28:10 | 2017-03-10T14:28:10 | 84,567,482 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('delete_flag', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='GroupPrivileges',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('groupId', models.IntegerField()),
('privilegesId', models.IntegerField()),
('delete_flag', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='Privileges',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('description', models.TextField()),
('delete_flag', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=32)),
('name', models.CharField(max_length=32)),
('password', models.CharField(max_length=32)),
('delete_flag', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='UserGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('userId', models.IntegerField()),
('groupId', models.IntegerField()),
('delete_flag', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='UserPrivileges',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('userId', models.IntegerField()),
('privilegesId', models.IntegerField()),
('delete_flag', models.CharField(max_length=4)),
],
),
]
| UTF-8 | Python | false | false | 2,614 | py | 4 | 0001_initial.py | 3 | 0.521423 | 0.51492 | 0 | 67 | 38.014925 | 114 |
ygzylmz/theNextTrace | 9,105,330,692,341 | d6f7ae3fbb5de7652b1d1a67ef23609644c0b94c | 7447ddef42bb3dc859a48d8a05e7b4f0832ecb55 | /blog/migrations/0025_auto_20201014_1845.py | 306cd6472ae4cc678c76a93f84e27e1063f8aa4d | []
| no_license | https://github.com/ygzylmz/theNextTrace | f72feca5402b203afea91a3fe8d719d2e1ed9744 | f46efcb9575c83d865ee6b841943ea0292a1f094 | refs/heads/master | 2022-12-31T11:36:35.116518 | 2020-10-22T14:20:44 | 2020-10-22T14:20:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2020-10-14 18:45
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0024_auto_20200914_1043'),
]
operations = [
migrations.AlterField(
model_name='blogdizi',
name='content',
field=ckeditor.fields.RichTextField(max_length=10000, null=True, verbose_name='İcerik'),
),
migrations.AlterField(
model_name='blogfilm',
name='content',
field=ckeditor.fields.RichTextField(max_length=10000, null=True, verbose_name='İcerik'),
),
migrations.AlterField(
model_name='bloggezi',
name='content',
field=ckeditor.fields.RichTextField(max_length=10000, null=True, verbose_name='İcerik'),
),
migrations.AlterField(
model_name='bloghayat',
name='content',
field=ckeditor.fields.RichTextField(max_length=10000, null=True, verbose_name='İcerik'),
),
]
| UTF-8 | Python | false | false | 1,137 | py | 22 | 0025_auto_20201014_1845.py | 17 | 0.599294 | 0.553398 | 0 | 36 | 30.472222 | 100 |
arnabs542/Leetcode-38 | 16,080,357,596,918 | dcf8bd6b88034b5eb09ab3480fa86aaddcdd933d | c6ec292a52ea54499a35a7ec7bc042a9fd56b1aa | /Python/90.py | 06ec0bed0222cb95586ce2e133a8a11258bb8a3d | []
| no_license | https://github.com/arnabs542/Leetcode-38 | ad585353d569d863613e90edb82ea80097e9ca6c | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | refs/heads/master | 2023-02-01T01:18:45.851097 | 2020-12-19T03:46:26 | 2020-12-19T03:46:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
self.results = []
visited = [False] * len(nums)
self.dfs(nums, visited, [], 0)
return self.results
def dfs(self, nums, visited, prev, start):
self.results.append(prev)
if start == len(nums):
return
for i in range(start, len(nums)):
if i > 0 and nums[i - 1] == nums[i] and not visited[i - 1]:
continue
visited[i] = True
self.dfs(nums, visited, prev + [nums[i]], i + 1)
visited[i] = False
| UTF-8 | Python | false | false | 658 | py | 364 | 90.py | 362 | 0.484802 | 0.477204 | 0 | 19 | 32.947368 | 71 |
nusaibsqli/TCPShell | 13,176,959,697,811 | a73239e6da0227a91cf3eff6380aa0454122ad18 | c31ba668f35095075186b3bfd9250c97a6b2d80c | /generate_password.py | d0cc6b91394f2b210eabb78f2a849429109b1e54 | []
| no_license | https://github.com/nusaibsqli/TCPShell | 1e765b04cc740e92483784c0c6d4d9e659cc6047 | 9a15b36d0cceac6d46580058003b7093842ca705 | refs/heads/master | 2021-09-15T12:50:22.202323 | 2018-06-02T01:47:50 | 2018-06-02T01:47:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
clrPasswd = raw_input("Enter Password: ")
encPasswd = ''
for char in clrPasswd:
_tmp = (ord(char) << 1) | (ord(char) >> 7)
#print _tmp
encPasswd += chr(_tmp)
encPasswd = '\\x' + '\\x'.join(x.encode('hex') for x in encPasswd)
print encPasswd
| UTF-8 | Python | false | false | 248 | py | 5 | generate_password.py | 1 | 0.616935 | 0.608871 | 0.008065 | 11 | 21.454545 | 66 |
shuuki4/MachineLearningInAction | 5,291,399,734,393 | 45f5f8dba6fe2d64b122628bc792e21bdfd6773a | 661399426fed233f508430a211cc952339faa7da | /ID3DecisionTree+TreePlot.py | b189a01efc1334020afbd111c5b3ebcf889d750f | []
| no_license | https://github.com/shuuki4/MachineLearningInAction | 1dd1776ae962c0a8e28bb0115e72d369e30f0bbd | 6d539aa099593d555fd704f9707b9bdb24274fd5 | refs/heads/master | 2021-03-12T22:55:02.952965 | 2015-09-13T16:13:44 | 2015-09-13T16:13:44 | 42,063,251 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
## This code generates an ID3 Decision Tree and plot this tree by matplotlib ##
## TreePlotting Codes are from text, rest by YuKiSa ##
from numpy import *
import matplotlib.pyplot as plt
from math import log
## Tree Plot Section ##
## tree plot codes are from MLinA text ##
## @author : Peter Harington ##
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
thisDepth = 1 + getTreeDepth(secondDict[key])
else: thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
## ID3 Decision Tree Section ##
## dataSet = [[feature1, feature2, ..., featureN, label], ... [feature1, ..., label]]
def calcShannonEnt(dataSet) :
m = len(dataSet)
labelCount = {};
for featureVec in dataSet :
labelCount[featureVec[-1]] = labelCount.get(featureVec[-1], 0.0)+1.0;
totalEnt = 0.0;
for label in labelCount.keys() :
p = labelCount[label]/float(m)
totalEnt -= p*log(p, 2)
return totalEnt
def dataSplit(dataSet, divFeatureIndex, value) :
returnList = []
for featureVec in dataSet :
if featureVec[divFeatureIndex]==value :
newfeatVec = featureVec[:divFeatureIndex]
newfeatVec.extend(featureVec[divFeatureIndex+1:])
returnList.append(newfeatVec)
return returnList
def chooseBestFeature(dataSet) :
bestEntropy = calcShannonEnt(dataSet)
bestFeatureIndex = -1
for i in range(len(dataSet[0])-1) :
featList = [example[i] for example in dataSet] # fetch all available featureList
featList = set(featList)
nowEntropy = 0.0
for feature in featList :
nowEntropy += calcShannonEnt(dataSplit(dataSet, i, feature))
if (bestEntropy > nowEntropy) :
bestEntropy = nowEntropy
bestFeatureIndex = i
return bestFeatureIndex
def majorityVote(dataSet) :
labelCount = {}
bestCount = -1
bestLabel = dataSet[0][-1]
for vec in dataSet :
label = vec[-1]
labelCount[label] = labelCount.get(label, 0)+1
if bestCount < labelCount[label] :
bestCount = labelCount[label]
bestLabel = label
return bestLabel
def generateTree(dataSet, labels) :
## stop condition ##
# condition 1 : no more benefit on dividing
bestFeatureIndex = chooseBestFeature(dataSet)
if bestFeatureIndex==-1 : return majorityVote(dataSet)
# condition 2 : only one kind left
kindList = [vec[-1] for vec in dataSet]
kindSet = set(kindList)
if (len(kindSet)==1) : return kindSet[0]
# condition 3 : no more labels
if (len(labels)==0) : return majorityVote(dataSet)
returnTree = {}
returnTreeElement = {}
availableFeatureValSet = [vec[bestFeatureIndex] for vec in dataSet]
availableFeatureValSet = set(availableFeatureValSet)
newLabels = labels[:bestFeatureIndex]
newLabels.extend(labels[bestFeatureIndex+1:])
for val in availableFeatureValSet :
returnTreeElement[val] = generateTree(dataSplit(dataSet, bestFeatureIndex, val), newLabels)
returnTree[labels[bestFeatureIndex]] = returnTreeElement
return returnTree
def myData() :
return [[1,1,'yes'], [1,1,'yes'], [1,0,'no'], [0,1,'no'], [0,1,'no']]
print createPlot( generateTree(myData(), ['feature1', 'feature2'])) | UTF-8 | Python | false | false | 5,965 | py | 4 | ID3DecisionTree+TreePlot.py | 4 | 0.692037 | 0.675105 | 0 | 160 | 36.28125 | 122 |
enadol/bundesliga-visual | 8,237,747,287,291 | c3ea216b82df004e44e69c4443d47682009dc68b | d13b8d97fee668ecfbeab20c446f319c6bad9266 | /provisionaales/bljson-puntos.py | 3f0125ebfdb095491bef33725ad202b7ebad3bd3 | []
| no_license | https://github.com/enadol/bundesliga-visual | 6fb3e8ad6c62ddb1e34679c773151ba28e4977bd | 2bfbd397b1139ed2b7cd7bcefb22d3fc65c1fb9f | refs/heads/master | 2016-09-14T04:51:14.652863 | 2016-05-07T18:49:00 | 2016-05-07T18:49:00 | 58,158,705 | 0 | 0 | null | false | 2016-05-07T18:49:01 | 2016-05-05T20:30:29 | 2016-05-05T20:30:42 | 2016-05-07T18:49:00 | 406 | 0 | 0 | 0 | Python | null | null | import urllib
import json
url="2015-2016/bl.json"
uh=urllib.urlopen(url)
data=uh.read()
js=json.loads(data)
i=0
ganador=None
perdedor=None
puntoslocal=0
puntosvisitante=0
sumavisitante=0
puntos=0
sumalocal=0
def getPuntos(equipo, afavor, encontra):
puntos=0
if afavor > encontra:
puntos=3
ganador=equipo
perdedor=contrario
elif afavor==encontra:
puntos=1
ganador="Empate"
perdedor=ganador
else:
puntos=0
ganador=contrario
perdedor=equipo
return puntos
def getPuntosAcumulados(puntoslocal, puntos):
puntoslocal=puntoslocal+puntos
return puntoslocal
jornadainput=raw_input("Ingrese la jornada: ")
jornada=int(jornadainput)-1
equipoinput=raw_input("Ingrese el equipo: ")
if jornada <0 or jornada >=34:
print "No se jugó la jornada "+jornadainput+" en ese torneo. Verifique y vuelva a ingresar."
jornada=None
else:
for fecha in range(0,jornada+1):
jornadascompletas=js['rounds'][fecha]['name']
for i in range(0,9):
equipo=js['rounds'][fecha]['matches'][i]['team1']['name']
afavor=js['rounds'][fecha]['matches'][i]['score1']
contrario=js['rounds'][fecha]['matches'][i]['team2']['name']
encontra=js['rounds'][fecha]['matches'][i]['score2']
if equipo>0 and equipo==equipoinput:
puntoslocal=getPuntos(equipo, afavor, encontra)
sumalocal=getPuntosAcumulados(sumalocal, puntoslocal)
if equipo>0 and contrario==equipoinput:
puntosvisitante=getPuntos(contrario, encontra, afavor)
sumavisitante=getPuntosAcumulados(sumavisitante, puntosvisitante)
sumapuntos=sumalocal+sumavisitante
print "El equipo "+equipoinput+" sumaba "+str(sumalocal)+" como local a la "+jornadascompletas
print "El equipo "+equipoinput+" sumaba "+str(sumavisitante)+" como visitante a la "+jornadascompletas
print "El equipo "+equipoinput+" sumaba "+str(sumapuntos)+" puntos en total a la "+jornadascompletas
uh.close()
| UTF-8 | Python | false | false | 1,894 | py | 21 | bljson-puntos.py | 15 | 0.73083 | 0.713908 | 0 | 75 | 24.213333 | 104 |
fooofei/py_pieces | 13,340,168,465,165 | 170095026f051f3389e0ae50f053e64eedf17158 | e132739824d2e13eeed79975603133e58e5f4027 | /iterator_itertools_tee.py | 557b460597c56a8232667d3483020064068fe866 | []
| no_license | https://github.com/fooofei/py_pieces | 929274ef5415b89d175c38b8cf92ab13db6cbc6a | 264b2fdb203132d09398250b2b509f23285e4add | refs/heads/master | 2021-01-23T19:12:16.586391 | 2021-01-11T03:10:57 | 2021-01-11T03:10:57 | 83,008,648 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
'''
use itertools.tee to copy an generator
适合多个 iterator 步差不大的, <10 的
1 生成两个 generator 但是实例并没有增加
2 两个 generator 持有同一份实例的引用 在一个 generator 中修改了一个实例 另一个 generator 也会受到修改影响
3 官方警告使用 itertools.tee 分裂之后 母 generator 不再使用 https://docs.python.org/2/library/itertools.html
4 官方建议 如果其中 1 个 iterator 走的太快 其他的 iterator 没走 使用 list() 比 tee() 更快
'''
import itertools
import sys
import unittest
import copy
count = 0
class Instance(object):
def __init__(self, v):
global count
print('instance init {}'.format(count))
count += 1
self._v = v
@property
def v(self):
return self._v
def __del__(self):
print('instance del')
def __str__(self):
return super(Instance, self).__str__()
def __repr__(self):
return '{}'.format(self.v)
class MyTestCase(unittest.TestCase):
def test_fetch_iterator_value(self):
a = [Instance(i) for i in range(10)]
ai = iter(a)
self.assertEqual(a, list(ai))
self.assertEqual([], list(ai))
def test_tee_iterator1(self):
a = [Instance(i) for i in range(10)]
ai = iter(a)
ai1, ai2 = itertools.tee(ai)
self.assertEqual(a, list(ai1))
self.assertEqual([], list(ai))
def test_tee_iterator2(self):
a = [Instance(i) for i in range(10)]
ai = iter(a)
ai1, ai2 = itertools.tee(ai)
self.assertEqual(a, list(ai1))
self.assertEqual(a, list(ai2))
def test_tee_iterator3(self):
a = [Instance(i) for i in range(10)]
ai = iter(a)
ai1, ai2 = itertools.tee(ai)
self.assertEqual(a, list(ai))
self.assertEqual([], list(ai1))
self.assertEqual([], list(ai2))
def test_tee_iterator4(self):
a = [Instance(i) for i in range(10)]
ai = iter(a)
ai1, ai2 = itertools.tee(ai)
# this will effect a and list(ai2)
for v in ai1:
if v.v == 5:
v._v = 55
self.assertEqual(a, list(ai2))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 2,300 | py | 77 | iterator_itertools_tee.py | 70 | 0.565403 | 0.54455 | 0 | 96 | 20.979167 | 97 |
kopok2/PythonCourse | 16,381,005,306,576 | ccc7cce39e26b05570e878851b951747a8aae00f | cdf2e3236e93ab33660cd7dc48e10ca8978ae9c3 | /Lecture9/python_dynamics.py | 58f42539fab0365fc8a7fc31ec09f40bc39ad01d | [
"MIT"
]
| permissive | https://github.com/kopok2/PythonCourse | ef163250a31e23c4815d6073c2059db994ddd35a | f741b17adaa7290380e83408ed24100127837ad7 | refs/heads/master | 2021-07-08T05:12:48.356642 | 2020-11-30T09:24:01 | 2020-11-30T09:24:01 | 215,096,810 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
"""
Module aimed to demonstrate dynamic nature of Python's code.
"""
import datetime
class Lecture:
def __init__(self, no):
self.no = no
if datetime.datetime.now().hour > 14:
def __str__(self):
return f"Lecture {self.no} has finished."
else:
def __str__(self):
return f"Lecture {self.no} has started."
if __name__ == '__main__':
l9 = Lecture(9)
print(l9)
| UTF-8 | Python | false | false | 444 | py | 33 | python_dynamics.py | 14 | 0.554054 | 0.540541 | 0 | 23 | 18.304348 | 60 |
inverseTrig/leet_code | 4,879,082,853,432 | 16e79eda9fde0b41ee55610ed21aac070c5db946 | eba1bc6c853cf92a002eb3cf7a4ff71507fd1072 | /202_happy_number.py | 47181a54a3d22469308f66df3c740b190867d87f | []
| no_license | https://github.com/inverseTrig/leet_code | ab3b155d4ec581684be5ae0413c49d94492cbe1b | 213a221f1577dd3346da794ef73e802af803d58b | refs/heads/master | 2021-12-21T11:30:09.755104 | 2021-12-16T04:46:10 | 2021-12-16T04:46:10 | 86,788,182 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def isHappy(self, n: int) -> bool:
seen = set()
while n != 1:
nstr = str(n)
nxt = 0
for each in nstr:
nxt += int(each) ** 2
n = nxt
if n in seen:
return False
else:
seen.add(n)
return True
sol = Solution()
print(sol.isHappy(n=19))
print(sol.isHappy(n=2))
| UTF-8 | Python | false | false | 423 | py | 168 | 202_happy_number.py | 168 | 0.41844 | 0.404255 | 0 | 21 | 19.142857 | 38 |
coralhalperin/slides | 14,388,140,491,208 | 2dbc17fbe59e00939c708c65ab0e6c73f7e9655f | 2120db28dbe55b8a9185ae8aa42a769985491996 | /python/examples/lists/master_mind.py | 7286bcf73ed3ebe72aa9feed42b8c8b66d710cf2 | []
| no_license | https://github.com/coralhalperin/slides | 7b149cb61840eb1724cb1571bba2503b19278780 | 0c83e1d1e771c9d4b110542b2ad64bbc6bd7b81a | refs/heads/main | 2023-01-04T14:02:11.406990 | 2020-10-25T06:20:07 | 2020-10-25T06:20:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
width = 4
USED = '_'
hidden = random.sample(range(10), width)
# print(hidden)
while True:
# print(hidden)
inp = input("your guess ({} digits):".format(width))
if inp == 'x':
print("Bye")
exit()
if len(inp) != width:
print("We need exactly {} characters".format(width))
continue
guess = []
for cr in inp:
guess.append(int(cr))
# guess = list(map(int, inp))
# print(guess)
if hidden == guess:
print("Match!")
break
my_hidden = hidden[:]
my_guess = guess[:]
result = ''
for i in range(width):
if my_hidden[i] == my_guess[i]:
result += '*'
my_hidden[i] = USED
my_guess[i] = USED
for i in range(width):
if my_guess[i] == USED:
continue
if my_guess[i] in my_hidden:
loc = my_hidden.index(my_guess[i])
my_hidden[loc] = USED
guess[i] = USED
result += '+'
print(''.join(result))
| UTF-8 | Python | false | false | 1,032 | py | 67 | master_mind.py | 23 | 0.492248 | 0.489341 | 0 | 48 | 20.5 | 60 |
ethz-asl/libpointmatcher | 25,769,818,495 | cca6a31e8b02ebcb47f04d09058d5b8c95ad29a6 | e7d1531b4b0bcecb4fb436cafbb8a3fe363c02c7 | /examples/python/icp_simple.py | b68367dd3c6c06bcd351b02d1a07d5b2ad014775 | [
"BSD-3-Clause"
]
| permissive | https://github.com/ethz-asl/libpointmatcher | 91d9c63f285964da00a9d95bf906adc832ffa44c | 3ace94a6959137d8d6a9a7278bf2644ffdcf73d3 | refs/heads/master | 2023-09-02T20:24:16.436314 | 2023-08-11T21:52:32 | 2023-08-11T21:52:32 | 1,266,625 | 1,446 | 567 | BSD-3-Clause | false | 2023-08-13T21:00:25 | 2011-01-18T10:00:28 | 2023-08-11T11:22:40 | 2023-08-13T21:00:23 | 19,865 | 1,413 | 522 | 84 | C++ | false | false | # Code example for ICP taking 2 points clouds (2D or 3D) relatively close
# and computing the transformation between them.
from pypointmatcher import pointmatcher as pm
PM = pm.PointMatcher
DP = PM.DataPoints
# Path of output directory (default: tests/icp_simple/)
# The output directory must already exist
# Leave empty to save in the current directory
output_base_directory = "tests/icp_simple/"
# Name of output files (default: test)
output_base_file = "test"
# Toggle to switch between 2D and 3D clouds
is_3D = True
if is_3D:
# Load 3D point clouds
ref = DP(DP.load('../data/car_cloud400.csv'))
data = DP(DP.load('../data/car_cloud401.csv'))
test_base = "3D"
else:
# Load 2D point clouds
ref = DP(DP.load('../data/2D_twoBoxes.csv'))
data = DP(DP.load('../data/2D_oneBox.csv'))
test_base = "2D"
# Create the default ICP algorithm
icp = PM.ICP()
# See the implementation of setDefault() to create a custom ICP algorithm
icp.setDefault()
# Compute the transformation to express data in ref
T = icp(data, ref)
# Transform data to express it in ref
data_out = DP(data)
icp.transformations.apply(data_out, T)
# Save files to see the results
ref.save(f"{output_base_directory + test_base}_{output_base_file}_ref.vtk")
data.save(f"{output_base_directory + test_base}_{output_base_file}_data_in.vtk")
data_out.save(f"{output_base_directory + test_base}_{output_base_file}_data_out.vtk")
print(f"Final {test_base} transformations:\n{T}\n".replace("[", " ").replace("]", " "))
| UTF-8 | Python | false | false | 1,514 | py | 341 | icp_simple.py | 276 | 0.703435 | 0.690885 | 0 | 49 | 29.897959 | 87 |
rajesh305k/python_programs_9.26.2021 | 6,356,551,600,791 | a188758b8312f16373e6a2a307a85a76918d9d60 | d97e1acb6450c6303b44c0c3a4b8142eb0c79cae | /Ram/Assignment/Day4/assignment1.py | a556229f06ed4e85e3557eb912caa41e2b89593c | []
| no_license | https://github.com/rajesh305k/python_programs_9.26.2021 | 58e63e6ff5b5a83436e75f5e2ee52c8570ee9df0 | 61a69cd1e9ec0d91a36d1cb5501db11c0d017778 | refs/heads/main | 2023-08-24T08:11:14.789908 | 2021-10-30T04:47:44 | 2021-10-30T04:47:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | amount=int(input("Enter amount :"))
# 1.Write a program to find credited amount,balance using **if else and operators.**
# Note:variables:**amount,balance,credited_amount**,
# Formula: **credited_amount=amount**,**balance+=credited_amount**,
# **amount** should be greater than zero
balance=10000
if(amount!=0):
if(amount>0):
credited_amount=amount
balance+=credited_amount
print("Balance : ",balance)
print("credited_amount : ",credited_amount)
elif(abs(amount)<balance):
debited_amount=abs(amount)
balance-=debited_amount
print("Debited Amount :",debited_amount)
print("balance Amount :",balance)
else:
print("you have eceeded your limit")
else:
print("Please enter a valid amount")
| UTF-8 | Python | false | false | 781 | py | 66 | assignment1.py | 63 | 0.654289 | 0.644046 | 0 | 22 | 34.5 | 84 |
angkoonhian/hacknroll2021 | 11,364,483,487,053 | 04dbcd3eaf50637768a9fb6058d2c5a79c13cc87 | d27f3f6340e4124d9949d7d53b41e9e02296e75b | /word_generator.py | b8f45d921f64bf2671d82f671c211d86025f7f39 | []
| no_license | https://github.com/angkoonhian/hacknroll2021 | 60f683b52c1478215a6a613e32abb6b8fbd36b7c | 9f1da6503cd7b5872f11e8e4cbc3c4d7f001c20c | refs/heads/main | 2023-02-17T07:13:27.781828 | 2021-01-09T03:01:10 | 2021-01-09T03:01:10 | 325,207,862 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from transformers import GPT2Tokenizer
from transformers import TFGPT2Model, TFGPT2LMHeadModel
import tensorflow as tf
import numpy as np
import os
#Install the GPT-2 model/tokenizer and load it into memory
def load_model_tokenizer_GPT2():
"""
Loads GPT-2 model from local memory. Replace with gpt2
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
tokenizer = GPT2Tokenizer.from_pretrained(f'{dir_path}\\gpt2_model')
model = TFGPT2LMHeadModel.from_pretrained(f'{dir_path}\\gpt2_model')
return tokenizer, model
#Tokenizer any given text and return
def tokenize_text(tokenizer, text):
#Using tensorflow backend
#Removing space
if text[-1] == " ":
text = text[:-1]
tokenized = tokenizer.encode(text, return_tensors='tf')
return tokenized
#Next Word algorithm
def next_word_prediction(tokenizer, model, text, num_results = 3):
tokens = tokenize_text(tokenizer, text)
output = model(tokens)
#Returns the logits of predictions for the last word in the sequence
next_word_logits = output.logits[:, -1, :]
softmaxed_next_word = tf.nn.softmax(next_word_logits)
most_likely_words = tf.math.top_k(softmaxed_next_word, num_results)
prob_most_likely_words = np.array(most_likely_words.values).squeeze()
index_most_likely_words = np.array(most_likely_words.indices).squeeze()
prob_word_dic = {}
for i in range(num_results):
prob = prob_most_likely_words[i]
word = tokenizer.decode(int(index_most_likely_words[i]))
prob_word_dic["word" + str(i)] = word
return prob_word_dic
| UTF-8 | Python | false | false | 1,601 | py | 7 | word_generator.py | 5 | 0.697064 | 0.687695 | 0 | 41 | 38.04878 | 76 |
taketakeyyy/atcoder | 575,525,663,496 | e37ac520ca730519e598c9c6f44b622a8e8b2558 | 66213c48da0b752dc6c350789935fe2b2b9ef5ca | /abc/170/b.py | 07d4688555b7b28318e0c248936b34546fadd460 | []
| no_license | https://github.com/taketakeyyy/atcoder | 28c58ae52606ba85852687f9e726581ab2539b91 | a57067be27b27db3fee008cbcfe639f5309103cc | refs/heads/master | 2023-09-04T16:53:55.172945 | 2023-09-04T07:25:59 | 2023-09-04T07:25:59 | 123,848,306 | 0 | 0 | null | false | 2019-04-21T07:39:45 | 2018-03-05T01:37:20 | 2019-04-20T14:48:39 | 2019-04-21T07:39:44 | 193 | 0 | 0 | 0 | Python | false | false | # -*- coding:utf-8 -*-
def solve():
X, Y = list(map(int, input().split()))
for kame in range(0, X+1):
turu = X - kame
if kame*4 + turu*2 == Y:
print("Yes")
return
print("No")
if __name__ == "__main__":
solve()
| UTF-8 | Python | false | false | 271 | py | 1,957 | b.py | 1,170 | 0.424354 | 0.405904 | 0 | 16 | 15.9375 | 42 |
gustcomer/pynnystock_analyser | 15,779,709,874,271 | 369ce2c1c917a98a99ef9ade3c47ad2e98691d81 | c68506d19ffdc22032a17dd97c5aff23bc7e1301 | /pynnystock/strategies/stophighpre/ParametersSHP.py | d290a92660f363b3ffa56034fd0288f106b00982 | []
| no_license | https://github.com/gustcomer/pynnystock_analyser | a31c0cfb3f75fdffa8cf4fa98c717f20e4525b89 | 5ffacae8db1ee21dcc491317554ae469ad82233a | refs/heads/master | 2023-03-22T01:39:35.920679 | 2021-03-12T20:54:03 | 2021-03-12T20:54:03 | 324,470,954 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ...Parameters import Parameters
class ParametersSHP(Parameters):
def __init__(self):
super().__init__() # inicializa tudo mas algumas variaveis da super vão ficar de fora. Bad software engineering practice
# repetimos a declaração de alguns parâmetros só pra enfatizar. Bad practice.
self.short_after = 0.1
self.exit_target = 0.3
self.exit_stop_margin = 0.1 # se high do pre for $5.00, stop vai ficar em $5.50
def setAlgoParameters( self,
short_after = 0.1,
exit_target = 0.3,
exit_stop_margin = 0.1):
self.short_after = short_after
self.exit_target = exit_target
self.exit_stop_margin = exit_stop_margin
def __repr__(self):
s='PARÂMETROS PARA ALGORITMO DO TIPO STOP AT HIGH OF PRE-MARKET\n'
s = s + 'FILTERING PARAMETERS\n'
s = s + f"prevol_threshold: {self.prevol_threshold}\n"
s = s + f"open_dolar_threshold: {self.open_dolar_threshold}\n"
s = s + f"gap_threshold: {self.gap_threshold}\n"
s = s + f"F_low_threshold: {self.F_low_threshold}\n"
s = s + f"F_high_threshold: {self.F_high_threshold}\n"
s = s + f"\n"
s = s + f'TRADING PARAMETERS\n'
s = s + f"short_after: {self.short_after}\n"
s = s + f"exit_target: {self.exit_target}\n"
s = s + f"exit_stop_margin: {self.exit_stop_margin}\n"
s = s + f"\n"
s = s + f'SIMULATION PARAMETERS\n'
s = s + f"start_money: {self.start_money}\n"
s = s + f"allocation: {self.allocation}\n"
s = s + f"locate_fee: {self.locate_fee}\n"
s = s + f"commission: {self.commission}\n"
return s | UTF-8 | Python | false | false | 1,527 | py | 49 | ParametersSHP.py | 32 | 0.648258 | 0.636423 | 0 | 46 | 32.086957 | 122 |
chitakeo/Nagayama_2019 | 3,238,405,389,670 | 88e51b77d5fd511bc52c2f9fb042fe57b1be7ade | cc9a8298c4ce507d092c4bc83fd3659ea37a519d | /cat/cat4.py | d16914357d74f76b970d3de42e04a22b76dd8b0e | []
| no_license | https://github.com/chitakeo/Nagayama_2019 | 2d86eed47aeceb536aebc893b1bc0f14feab9799 | 29bd6316a73ed1da2c3967db689d81beec6bcbae | refs/heads/master | 2020-12-28T01:46:48.511229 | 2020-02-16T13:58:16 | 2020-02-16T13:58:16 | 238,141,590 | 0 | 0 | null | false | 2020-02-16T13:58:17 | 2020-02-04T06:46:53 | 2020-02-16T09:38:13 | 2020-02-16T13:58:16 | 31,374 | 0 | 0 | 0 | Python | false | false | import numpy as np
import cv2
import os
from matplotlib import pyplot as plt
kan_file_path = '../save/fe.jpg' #完成図保存のファイルパス&ファイル名
mach_file_path = '../save/zikken.jpg' #マッチング結果保存のファイルパス&ファイル名
pazu_file_path = '../pazu/kousiki.png' #パズルのファイルパス&ファイル名
pice_file_path = '../pazu' #ピースのファイルパス&ファイル名
pice = ['/rt.png', '/rrb.png', '/lt.png', '/lb.png'] #4隅の画像のファイルパス
#SIFT
def sif(gray1, gray2):
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(gray1,None)
kp2, des2 = sift.detectAndCompute(gray2,None)
bf = cv2.BFMatcher()
matches =bf.knnMatch(des1,des2, k=2)
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
good = sorted(good, key = lambda x:x[0].distance)
return good, kp1, kp2
#マッチング後の組み立て
def create(img1t,img1,img2,gray1,kp1,kp2, n):
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good[:5],None,flags=2)
cv2.imwrite(mach_file_path, img3)
img1_pt = [list(map(int, kp1[m[0].queryIdx].pt)) for m in good]
img2_pt = [list(map(int, kp2[m[0].trainIdx].pt)) for m in good]
#ここからマッチングがうまくいったと仮定している
#元画像の縦と横の距離を計算
img1_hei = img1_pt[0][1] - img1_pt[1][1]
img1_wei = img1_pt[0][0] - img1_pt[1][0]
#ピースの縦と横の距離を計算
img2_hei = img2_pt[0][1] - img2_pt[1][1]
img2_wei = img2_pt[0][0] - img2_pt[1][0]
#ピースと元画像の縦と横の比率を計算
height2, weight2 = img2.shape[:2]
if img1_hei == 0 and img2_hei == 0:
if img1_wei == 0 and img2_wei == 0:
pass
else:
wei = abs(img2_wei) / abs(img1_wei)
img2 = cv2.resize(img2, (int(weight2/wei), int(height2)))
elif img1_wei == 0 and img2_wei == 0:
hei = abs(img2_hei) / abs(img1_hei)
img2 = cv2.resize(img2, (int(weight2), int(height2/hei)))
elif img1_pt[0][1] != img2_pt[0][1] and img1_pt[0][0] != img2_pt[0][0]:
wei = abs(img2_wei) / abs(img1_wei)
hei = abs(img2_hei) / abs(img1_hei)
img2 = cv2.resize(img2, (int(weight2/wei), int(height2/hei)))
height, weight = img1t.shape[:2]
#全体画像と同じ大きさの画像を作成、あるなら作らない
if os.path.isfile(kan_file_path) :
imageArray = cv2.imread(kan_file_path)
else:
imageArray = np.ones((height, weight, 3), np.uint8)*255
height2,weight2 = img2.shape[:2]
hei41 = int(height / 4)
hei43 = int(hei41 * 3)
wei41 = int(weight / 4)
wei43 = int(wei41 * 3)
#nが0の時が左上,1の時左下、2の時右上、3の時右下
if n == 0:
h1 = 0
h2 = height2
w1 = 0
w2 = weight2
elif n == 1:
h1 = height - height2
h2 = height
w1 = 0
w2 = weight2
elif n == 2:
h1 = 0
h2 = height2
w1 = weight - weight2
w2 = weight
elif n == 3:
h1 = height - height2
h2 = height
w1 = weight - weight2
w2 = weight
imageArray[h1 : h2 , w1 : w2 ] = img2
cv2.imwrite(kan_file_path, imageArray)
return imageArray
#4隅の画像がどこか探す
def find4(gray1):
good = []
max_len = 0
for i in range(4):
f = pice_file_path + pice[i]
img2_1 = cv2.imread(f)
gray2_1 = cv2.cvtColor(img2_1, cv2.COLOR_BGR2GRAY)
check = []
check, kp3, kp4 = sif(gray1, gray2_1)
print(len(check))
if max_len < len(check):
max_len = len(check)
img2 = img2_1
good = check
kp1 = kp3
kp2 = kp4
return good, img2, kp1, kp2
if __name__ == '__main__':
img1 = cv2.imread(pazu_file_path) # 元画像
height, weight = img1.shape[:2]
hei41 = int(height / 4)
hei43 = int(hei41 * 3)
wei41 = int(weight / 4)
wei43 = int(wei41 * 3)
img1_lt = img1[0 : hei41 , 0 : wei41]
img1_lb = img1[hei43 : height , 0 : wei41]
img1_rt = img1[0 : hei41 , wei43 : weight]
img1_rb = img1[hei43 : height , wei43 : weight]
coner = [img1_lt, img1_lb, img1_rt, img1_rb] #左上、左下、右上、右下の順番にして
for n in range(4):
img1_tk = coner[n]
gray1 = cv2.cvtColor(img1_tk, cv2.COLOR_BGR2GRAY)
good = []
good, img2, kp1, kp2 = find4(gray1)
dst = create(img1,img1_tk, img2, gray1, kp1, kp2, n)
while(1):
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',dst)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows() | UTF-8 | Python | false | false | 4,345 | py | 7 | cat4.py | 6 | 0.63109 | 0.55272 | 0 | 158 | 23.556962 | 72 |
WillNye/duolingo_x | 8,761,733,285,529 | 004d1ccd7375c327d089998c19e25b353ba201c9 | 7ea6d43d98813d9839f3a32e136ca04933922737 | /training/models.py | f6c449e19cd157838fb18952520d0f1452f6a4a8 | []
| no_license | https://github.com/WillNye/duolingo_x | d5e60749acc7e75fa0aca9994cc9ad83695dd211 | 923f38de89c60e0f8c945df4a13b360c1369f960 | refs/heads/master | 2021-01-13T13:04:21.456410 | 2017-01-14T03:01:05 | 2017-01-14T03:01:05 | 78,687,778 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from datetime import datetime as dt
class Language(models.Model):
name = models.CharField(max_length=250, null=False)
audio_base = models.CharField(max_length=250, null=False)
def __str__(self):
return self.name
class Phrase(models.Model):
english_translation = models.CharField(max_length=750, null=False)
foreign_translation = models.CharField(max_length=750, null=False)
language = models.ForeignKey(Language, null=False)
audio_id = models.CharField(max_length=250, null=False)
def __str__(self):
return self.english_translation
class PhraseStats(models.Model):
user = models.ForeignKey(User, null=False)
phrase = models.ForeignKey(Phrase, null=False)
is_make_streak = models.BooleanField(default=False, null=False)
streak_number = models.IntegerField(default=0, null=False)
last_heard = models.DateTimeField(default=dt.now(), null=False)
def __str__(self):
if self.is_make_streak:
stat_str = "Made {} in a row"
else:
stat_str = "Missed {} in a row"
return stat_str.format(str(self.streak_number))
| UTF-8 | Python | false | false | 1,205 | py | 7 | models.py | 5 | 0.685477 | 0.672199 | 0 | 37 | 31.540541 | 70 |
daniel-l-merhi/Python-and-ROS-Coursework | 14,259,291,435,794 | b4f792aad8b88d7cd9b93f0b552da2b2bc259752 | 5508e79fb43398f0622d0df986354f530fe6032c | /Python for Robotics/robot_control/get_laser_method.py | 1e4af28b341ce1452c0b3f24c7c2b8ee7d520468 | []
| no_license | https://github.com/daniel-l-merhi/Python-and-ROS-Coursework | 14ac226379da6e8a9a0319244166e7a712df7f88 | bc9956b264f5388184c0388d7106d89c162d718b | refs/heads/main | 2023-08-06T12:51:35.236840 | 2021-10-09T02:25:00 | 2021-10-09T02:25:00 | 415,173,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from robot_control_class import RobotControl
rc = RobotControl(robot_name="summit")
def return_laser(a, b, c):
laser_list = [rc.get_laser_summit(a), rc.get_laser_summit(b), rc.get_laser_summit(c)]
return laser_list
l = return_laser(0, 360, 400)
print (l) | UTF-8 | Python | false | false | 267 | py | 43 | get_laser_method.py | 31 | 0.692884 | 0.666667 | 0 | 12 | 21.333333 | 89 |
vigman-xxxw/python-code | 936,302,905,518 | bd629e12fc3d65e0111493f1f9b86c728db02632 | 16d33ca26751e50d518f79bcb8f1c05dd7e79f7b | /二分法.py | 20af27a85f4cb0833f444355126f9c70e5b7a8a2 | []
| no_license | https://github.com/vigman-xxxw/python-code | 6017fac6c7c019384957c63b38ef76b6919726d3 | 004d18576065ba94044659d4b83a361675a0f178 | refs/heads/master | 2023-08-19T01:53:52.624302 | 2021-10-11T09:03:53 | 2021-10-11T09:03:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 给定数组A 目标值T 查找T在A中位置
def binary_search(arr, key):
start = 0
end = len(arr) - 1
while start <= end:
mid = int((start + end) / 2)
if arr[mid] < key:
start = mid + 1
elif arr[mid] > key:
end = mid - 1
else:
return mid
return -1
list1 = [i for i in range(100)]
print(list1)
b = binary_search(list1, 5)
| UTF-8 | Python | false | false | 413 | py | 66 | 二分法.py | 66 | 0.49354 | 0.459948 | 0 | 19 | 19.315789 | 36 |
MarcoBorsato/AOC2020 | 13,795,434,976,011 | 8c9334e962500e0e8f12851fdd6e0af220e892dc | ec56e52fdb18c80c2a6eda74a66a8e6e6229c6f5 | /AOCday6.py | e958cbfd2d7d764ca9ed32d4b73abbb15a120ee8 | []
| no_license | https://github.com/MarcoBorsato/AOC2020 | 7d3a8896a60e737b574b3a8b0cffe7820228f95f | f813048f233b7833aedd1ebe126b0bd1cd50502c | refs/heads/master | 2023-03-19T16:02:15.871020 | 2021-03-10T22:27:29 | 2021-03-10T22:27:29 | 344,621,179 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | def readInput(input):
data = []
person = []
line = ""
for i in input.readlines():
i = i.strip()
if i == "":
data.append(person)
person = []
else:
person.append(i)
data.append(person)
return data
def countAnswers1(input: list[str]) -> int:
data = readInput(input)
answers = []
stripped = ""
count = 0
for i in data:
print(i)
for s in "".join(i):
if s not in stripped:
stripped += s
count += len(stripped)
stripped = ""
return count
def countAnswers2(data: list[str]) -> int:
data = readInput(data)
count = 0
result = 0
for i in data:
for j in range(len(i)):
if j == 0:
answer = set(i[j])
else:
answer = answer.intersection(i[j])
count += len(answer)
return count
if __name__ == '__main__':
input4 = open('inputday6.txt')
print(countAnswers2(input4))
| UTF-8 | Python | false | false | 1,023 | py | 7 | AOCday6.py | 7 | 0.482893 | 0.473118 | 0 | 47 | 20.765957 | 50 |
nasawz/wiki.catke | 5,841,155,567,762 | 9cefe78c2e5477a86cc64fbe92e11912f89e6104 | 9933cb7f69b8e13a970901012fa3672120d944a3 | /diy/syncdb.py | 261cc82eba45f63affc579aae1d3ec5f2a9f3fef | []
| no_license | https://github.com/nasawz/wiki.catke | a78689f1e67fd48c134e95276552012f2d4c61dc | 35d43fe9df92d9413f564b40e18028a931a6e652 | refs/heads/master | 2020-02-03T15:21:18.574729 | 2013-02-04T08:36:56 | 2013-02-04T08:36:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
'''
同步数据表
@author vfasky@gmail.com
'''
import os
import sys
# 设置系统编码为utf8
reload(sys)
sys.setdefaultencoding('utf8')
# 加入第三方类库搜索路径
sys.path.append(os.path.join(os.path.dirname(__file__), 'modules'))
# 加载程序配置
from wiki import config
# 引入Database
from xcat import Database
# 加载数据库配置
Database.load_config(
config.settings['database'].get(config.settings['run_mode'], False)
)
Database.connect()
import wiki.models
import wiki.models.wiki
for m in dir(wiki.models):
model = getattr(wiki.models, m)
if m != 'Model' and str(type(model)) == "<class 'peewee.BaseModel'>":
if model.table_exists() == False:
try:
model.create_table()
except Exception, e:
pass
elif str(type(model)) == "<type 'module'>":
for m2 in dir(model):
model2 = getattr(model, m2)
if m2 not in ('Model') and str(type(model2)) == "<class 'peewee.BaseModel'>":
if model2.table_exists() == False:
try:
model2.create_table()
except Exception, e:
pass
# 执行安装
if 0 == wiki.models.Role.select().count():
ar = wiki.models.Role()
ar.code = 'admin'
ar.name = '管理者'
ar.save()
ar = wiki.models.Role()
ar.code = 'user'
ar.name = '用户'
ar.save()
Database.close()
| UTF-8 | Python | false | false | 1,487 | py | 35 | syncdb.py | 24 | 0.566403 | 0.558507 | 0 | 66 | 20.106061 | 89 |
SuhashisAcharya/automation | 7,524,782,746,848 | 24ab24e059f116b78bc9da4dd37522d3be4ba4a0 | 8bb594bed8e458019d828864cbcd548c8d538e5a | /Infrastructure/validation_matrix_server/media_stream_validator/media_stream_validator | 562257f35b581e879f25745a0b2d9b544f58e55a | []
| no_license | https://github.com/SuhashisAcharya/automation | ff1b8e6ffbd573a2c4edd95575627333fefd0128 | db312f4fc168d297fc027298c865aa3e462d5d25 | refs/heads/master | 2020-03-03T00:38:22.010621 | 2016-06-06T05:31:00 | 2016-06-06T05:31:00 | 65,850,723 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys
import pexpect
import json
import re
from time import gmtime, strftime
#print len(sys.argv)
#print str(sys.argv)
if ( 6 > len(sys.argv) ):
print 'Insufficient number of commands'
sys.exit(1)
manifest_url=sys.argv[1]
apple_ip=sys.argv[2]
apple_user=sys.argv[3]
apple_pwd=sys.argv[4]
automation_user=sys.argv[5]
status_file_path=sys.argv[6]
def media_stream_validator(manifest_url, apple_ip, apple_user, apple_pwd, status_file_path):
#mediastreamvalidator -O /Users/rebaca/ http://192.168.4.128/AZ_HLS_FILE_BASED_TC2/master/master.m3u8
#hlsreport.py validation_data.json
cmd = '/usr/local/bin/mediastreamvalidator -O /var/log/validation_data.json ' + manifest_url
(command_output1, exitstatus) = \
pexpect.run("ssh " + apple_user + "@" + apple_ip + " '" + cmd + "'", \
events={'Password':'' + apple_pwd + '\n'}, \
timeout=660, withexitstatus=1)
cmd = '/Users/rebaca/hlsreport_updated.py /var/log/validation_data.json'
(command_output2, exitstatus) = \
pexpect.run("ssh " + apple_user + "@" + apple_ip + " '" + cmd + "'", \
events={'Password':'' + apple_pwd + '\n'}, \
timeout=660, withexitstatus=1)
cmd = 'cat /var/log/validation_data_1.json'
(command_output, exitstatus) = \
pexpect.run("ssh " + apple_user + "@" + apple_ip + " '" + cmd + "'", \
events={'Password':'' + apple_pwd + '\n'}, \
timeout=660, withexitstatus=1)
status_file_and_path = status_file_path + '/' + 'MSV_' + automation_user;
status_file_and_path = status_file_and_path + strftime("_%m-%d-%H-%M-%S", gmtime())
status_file_and_path = status_file_and_path + '.log'
file_des = open(status_file_and_path,'w')
file_des.write(command_output)
hls_report_json_data=command_output.splitlines()[1]
must_fix_errors_present = re.search(r'"errorStatusCode": -1', command_output, re.I)
should_fix_errors_present = re.search(r'"errorStatusCode": 0', command_output, re.I)
hls_report_json_object=json.loads(hls_report_json_data)
if must_fix_errors_present:
hls_report_json_object["must_fix_errors_present"] = "YES"
else:
hls_report_json_object["must_fix_errors_present"] = "NO"
if should_fix_errors_present:
hls_report_json_object["should_fix_errors_present"] = "YES"
else:
hls_report_json_object["should_fix_errors_present"] = "NO"
mustfix_shouldfix_msg_json_obj=json.loads("{}")
'''
message_list=hls_report_json_object["messages"]
shouldfix_msg_list_obj = []
mustfix_msg_list_obj = []
if message_list == NONE:
pass
else:
for read_message in message_list
message_type=read_message["errorStatusCode"]
if message_type == "-1"
mustfix_msg_list_obj.append(read_message["errorComment"])
elif message_type == "0"
shouldfix_msg_list_obj.append(read_message["errorComment"])
mustfix_shouldfix_msg_json_obj["mustfix_messages"]=mustfix_msg_list_obj
mustfix_shouldfix_msg_json_obj["shouldfix_messages"]=shouldfix_msg_list_obj
'''
#hls_report_json_object=json.loads(hls_report_json_data)
mustfix_shouldfix_msg_json_obj=json.loads("{}")
# Getting messages from top level hierarchy
message_list=hls_report_json_object["messages"]
shouldfix_msg_list_obj = []
mustfix_msg_list_obj = []
if message_list == None:
pass
else:
for read_message in message_list:
message_type=read_message["errorStatusCode"]
if message_type == -1:
mustfix_msg_list_obj.append(read_message["errorComment"])
elif message_type == 0:
shouldfix_msg_list_obj.append(read_message["errorComment"])
message_object=json.loads("{}")
message_object["mustfix_messages"]=mustfix_msg_list_obj
message_object["shouldfix_messages"]=shouldfix_msg_list_obj
mustfix_shouldfix_msg_json_obj["messages"]=message_object
# Getting messages from each varient and creating json object
varients_list=hls_report_json_object["variants"]
varients_list_created=[]
#print json.dumps(varients_list)
if varients_list == None:
pass
else:
for read_varient in varients_list:
message_list=read_varient["messages"]
bitrate=read_varient["measuredMaxBitrate"]
shouldfix_msg_list_obj = []
mustfix_msg_list_obj = []
message_object=json.loads("{}")
if message_list == None:
pass
else:
for read_message in message_list:
message_type=read_message["errorStatusCode"]
if message_type == -1:
mustfix_msg_list_obj.append(read_message["errorComment"])
elif message_type == 0:
shouldfix_msg_list_obj.append(read_message["errorComment"])
message_object["playlistMaxBitrate"]=bitrate
message_object["mustfix_messages"]=mustfix_msg_list_obj
message_object["shouldfix_messages"]=shouldfix_msg_list_obj
varients_list_created.append(message_object)
mustfix_shouldfix_msg_json_obj["variants"]=varients_list_created
if must_fix_errors_present:
mustfix_shouldfix_msg_json_obj["must_fix_errors_present"] = "YES"
else:
mustfix_shouldfix_msg_json_obj["must_fix_errors_present"] = "NO"
if should_fix_errors_present:
mustfix_shouldfix_msg_json_obj["should_fix_errors_present"] = "YES"
else:
mustfix_shouldfix_msg_json_obj["should_fix_errors_present"] = "NO"
print json.dumps(mustfix_shouldfix_msg_json_obj)
#end of creating json object contains list of all the messages
if must_fix_errors_present or should_fix_errors_present:
print 'FAIL'
else:
print 'PASS'
#print json.dumps(hls_report_json_object)
'''
wrong_playlist = re.search(r'Failed to download playlist.*', command_output, re.I)
valid_playlist = re.search(r'Playlist.*OK', command_output, re.I)
not_run = re.search(r': command not found', command_output, re.I)
if wrong_playlist:
#print 'Invalid Playlist Provided: %s' %(wrong_playlist.group())
print 'FAIL'
elif valid_playlist:
fatal_msg = re.search(r'fatal.*', command_output, re.I)
error_msg = re.search(r'error.*', command_output, re.I)
bfr_error = re.search(r'error.*Decreasing DTS were detected in track 0.*', command_output, re.I)
fail_msg = re.search(r'fail.*', command_output, re.I)
warn_msg = re.search(r'warning.*', command_output, re.I)
#print 'bfr_error: %s' %(bfr_error.group()))
#print 'error_msg: %s' %(error_msg.group()))
if error_msg and bfr_error:
if error_msg.group() == bfr_error.group():
bframe_input_error = True
else:
bframe_input_error = False
if fatal_msg:
pass
#print 'Fatal Message: %s' %(fatal_msg.group())
if error_msg:
pass
#print 'Error Message: %s' %(error_msg.group())
if fail_msg:
pass
#print 'Failure Message: %s' %(fail_msg.group())
if warn_msg:
pass
#print 'Warning Message: %s' %(warn_msg.group())
if fatal_msg or (error_msg and bframe_input_error == False) or fail_msg:
print 'FAIL'
else:
print 'PASS'
else:
print 'NOT_PERFORMED'
'''
#media_stream_validator('http://192.168.3.222/automation/main/mbnebfr/5/master/dummy.m3u8', '192.168.4.113', 'rebaca', 'rebaca')
media_stream_validator(manifest_url, apple_ip, apple_user, apple_pwd, status_file_path)
| UTF-8 | Python | false | false | 8,037 | 134 | media_stream_validator | 79 | 0.59898 | 0.59027 | 0 | 232 | 33.642241 | 128 |
|
devesh37/HackerRankProblems | 10,067,403,355,660 | 363e8330029df04f82b127159adf0093172ff0ef | d65f58cb47b308dc7130a4033c5ec1de61d9814f | /Datastructure_HackerRank/Array/arrayRotation.py | 5dd164efb2caa0a39fa9a4d00aa6c25fb74120db | []
| no_license | https://github.com/devesh37/HackerRankProblems | 1b1fee329d2fae31dea524d272d5c1b596e107ed | 8f7cdd1d96c169ea9071ac8e6ea7dff0788d046f | refs/heads/master | 2022-12-22T18:26:46.688790 | 2020-09-23T06:02:44 | 2020-09-23T06:02:44 | 263,381,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
def left_rotation(a,n,r):
array=[0]*n
i=0
while(i<n):
array[(i+n-r)%n]=a[i]
i+=1
for i in array:
print(i,end=' ')
if __name__ == '__main__':
nd = input().split()
n = int(nd[0])
r = int(nd[1])
a = list(map(int, input().rstrip().split()))
left_rotation(a,n,r) | UTF-8 | Python | false | false | 396 | py | 25 | arrayRotation.py | 25 | 0.510101 | 0.494949 | 0 | 27 | 13.703704 | 48 |
kss2153/hubspot-mr | 18,262,200,954,009 | 1cd1182d243fc86bb66b4d26f0bdab9fe4c0dfd6 | 441543345b718b43d52704200a7a87d98804bb93 | /src/app/db.py | 1c16b40bf0ff9b37296aaa7bf6e747044491238d | []
| no_license | https://github.com/kss2153/hubspot-mr | eed0a25c936bdca5d4b0c23e5f471e441224d604 | b0b85bff42bf6e6a95c56f85090c5bbb1ff4e15d | refs/heads/master | 2023-05-28T20:42:21.141819 | 2019-09-24T22:19:20 | 2019-09-24T22:19:20 | 210,515,808 | 0 | 0 | null | false | 2023-05-01T20:36:40 | 2019-09-24T05:02:12 | 2020-01-21T15:09:38 | 2023-05-01T20:36:39 | 20,778 | 0 | 0 | 1 | Python | false | false | import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
db_user = os.environ.get('CLOUD_SQL_USERNAME')
db_password = os.environ.get('CLOUD_SQL_PASSWORD')
db_name = os.environ.get('CLOUD_SQL_DATABASE_NAME')
db_host = os.environ.get('CLOUD_SQL_HOST')
engine_url = 'mysql+pymysql://{}:{}@{}/{}'.format(db_user, db_password, db_host, db_name)
engine = create_engine(engine_url, pool_size=3)
SqlSession = sessionmaker(bind=engine)
| UTF-8 | Python | false | false | 460 | py | 9 | db.py | 6 | 0.734783 | 0.732609 | 0 | 13 | 34.384615 | 89 |
mennthor/tdepps | 13,125,420,065,650 | ccd34e57f587e6658f7ea61f5529d79ca90d2248 | 490cd642d522ab512ab435580ea92bbae789414c | /tdepps/grb/llh.py | c98fafaed6ebb69255406621a5d9adb62ca897a1 | []
| no_license | https://github.com/mennthor/tdepps | c4f86d11e61b99c1e27d6df2a8f013ea8f1c0a78 | 100035f95321506057677806d25f1695cfea58a9 | refs/heads/master | 2021-09-22T21:05:21.801613 | 2018-08-06T09:38:26 | 2018-08-06T09:38:26 | 90,803,720 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from __future__ import division, absolute_import
import math
import numpy as np
import scipy.optimize as sco
from ..base import BaseLLH, BaseMultiLLH
from ..utils import fill_dict_defaults, all_equal, dict_map
class GRBLLH(BaseLLH):
"""
Stacking GRB LLH
Stacking weights are a-priori fixed with w_theo * w_dec and only a single
signal strength parameter ns is fitted.
"""
def __init__(self, llh_model, llh_opts=None):
"""
Parameters
----------
llh_model : BaseModel instance
Model providing LLH args and signal over background ratio.
llh_opts : dict, optional
LLH options:
- 'sob_rel_eps', optional: Relative threshold under which a single
signal over background ratio is considered zero for speed reasons.
- 'sob_abs_eps', optional: Absolute threshold under which a single
signal over background ratio is considered zero for speed reasons.
- 'ns_bounds', optional: ``[lo, hi]`` bounds for the ``ns`` fit
parameter.
- 'minimizer', optional: String selecting a scipy minizer.
- 'minimizer_opts', optional: Options dict for the scipy minimizer.
"""
self._needed_args = ["src_w_dec", "src_w_theo", "nb"]
self.model = llh_model
self.llh_opts = llh_opts
@property
def needed_args(self):
return self._needed_args
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if not all_equal(self._needed_args, model.provided_args):
raise(KeyError("Model `provided_args` don't match `needed_args`."))
# Cache fixed src weights over background estimation, shape (nsrcs, 1)
args = model.get_args()
src_w = args["src_w_dec"] * args["src_w_theo"]
# Shape is (1, nsrcs) for stacking GRB LLH
self._src_w_over_nb = ((src_w / np.sum(src_w)) / args["nb"])[:, None]
self._model = model
@property
def llh_opts(self):
return self._llh_opts.copy()
@llh_opts.setter
def llh_opts(self, llh_opts):
required_keys = []
opt_keys = {
"sob_rel_eps": 0,
"sob_abs_eps": 1e-3,
"ns_bounds": [0., None],
"minimizer": "L-BFGS-B",
"minimizer_opts": {
"ftol": 1e-15, "gtol": 1e-10, "maxiter": int(1e3)
},
}
llh_opts = fill_dict_defaults(llh_opts, required_keys, opt_keys)
if (llh_opts["sob_rel_eps"] < 0 or llh_opts["sob_rel_eps"] > 1):
raise ValueError("'sob_rel_eps' must be in [0, 1]")
if llh_opts["sob_abs_eps"] < 0:
raise ValueError("'sob_abs_eps' must be >= 0.")
if len(llh_opts["ns_bounds"]) != 2:
raise ValueError("'ns_bounds' must be `[lo, hi]`.")
if type(llh_opts["minimizer_opts"]) is not dict:
raise ValueError("'minimizer_opts' must be a dictionary.")
self._llh_opts = llh_opts
def lnllh_ratio(self, ns, X, band_select=True):
""" Public method wrapper """
sob = self._soverb(X, band_select=band_select)
return self._lnllh_ratio(ns, sob)
def fit_lnllh_ratio(self, ns0, X, band_select=True):
""" Fit TS with optimized analytic cases """
def _neglnllh(ns, sob):
""" Wrapper for minimizing the negative lnLLH ratio """
lnllh, lnllh_grad = self._lnllh_ratio(ns, sob)
return -lnllh, -lnllh_grad
if len(X) == 0: # Fit is always 0 if no events are given
return 0., 0.
# Get the best fit parameter and TS. Analytic cases are handled:
# For nevts = [1 | 2] we get a [linear | quadratic] equation to solve.
sob = self._soverb(X, band_select=band_select)
nevts = len(sob)
# Test again, because we applied some threshold cuts
if nevts == 0:
return 0., 0.
elif nevts == 1:
sob = sob[0]
if sob <= 1.: # sob <= 1 => ns <= 0, so fit will be 0
return 0., 0.
else:
ns = 1. - (1. / sob)
ts = 2. * (-ns + math.log(sob))
return ns, ts
elif nevts == 2:
sum_sob = sob[0] + sob[1]
if sum_sob <= 1.: # More complicated to show but same as above
return 0., 0.
else:
a = 1. / (sob[0] * sob[1])
c = sum_sob * a
ns = 1. - 0.5 * c + math.sqrt(c * c / 4. - a + 1.)
ts = 2. * (-ns + np.sum(np.log1p(ns * sob)))
return ns, ts
else: # Fit other cases
res = sco.minimize(fun=_neglnllh, x0=[ns0],
jac=True, args=(sob,),
bounds=[self._llh_opts["ns_bounds"]],
method=self._llh_opts["minimizer"],
options=self._llh_opts["minimizer_opts"])
ns, ts = res.x[0], -res.fun[0]
if ts < 0.:
# Some times the minimizer doesn't go all the way to 0., so
# TS vals might end up negative for a truly zero fit result
ts = 0.
return ns, ts
def _soverb(self, X, band_select=True):
""" Make an additional cut on small sob values to save time """
if len(X) == 0: # With no events given, we can skip this step
return np.empty(0, dtype=np.float)
# Stacking case: Weighted signal sum per source
sob = self._model.get_soverb(X, band_select=band_select)
sob = np.sum(sob * self._src_w_over_nb, axis=0)
if len(sob) < 1:
return np.empty(0)
# Apply a SoB ratio cut, to save computation time on events that don't
# contribute anyway. We have a relative and an absolute threshold
sob_max = np.amax(sob)
if sob_max > 0:
sob_rel_mask = (sob / sob_max) < self._llh_opts["sob_rel_eps"]
else:
sob_rel_mask = np.zeros_like(sob, dtype=bool)
sob_abs_mask = sob < self._llh_opts["sob_abs_eps"]
return sob[~(sob_rel_mask | sob_abs_mask)]
def _lnllh_ratio(self, ns, sob):
""" Calculate TS = 2 * ln(L1 / L0) """
x = ns * sob
ts = 2. * (-ns + np.sum(np.log1p(x)))
# Gradient in ns (chain rule: ln(ns * a + 1)' = 1 / (ns * a + 1) * a)
ns_grad = 2. * (-1. + np.sum(sob / (x + 1.)))
return ts, np.array([ns_grad])
class MultiGRBLLH(BaseMultiLLH):
"""
Class holding multiple GRBLLH objects, implementing the combined GRBLLH
from all single GRBLLHs.
"""
def __init__(self, llh_opts=None):
self._ns_weights = None
self.llh_opts = llh_opts
@property
def names(self):
return list(self._llhs.keys())
@property
def llhs(self):
return self._llhs
@property
def model(self):
return dict_map(lambda key, llh: llh.model, self._llhs)
@property
def needed_args(self):
return dict_map(lambda key, llh: llh.needed_args, self._llhs)
@property
def llh_opts(self):
return self._llh_opts.copy()
@llh_opts.setter
def llh_opts(self, llh_opts):
required_keys = []
opt_keys = {
"ns_bounds": [0., None],
"minimizer": "L-BFGS-B",
"minimizer_opts": {
"ftol": 1e-15, "gtol": 1e-10, "maxiter": int(1e3)
},
}
llh_opts = fill_dict_defaults(llh_opts, required_keys, opt_keys)
if len(llh_opts["ns_bounds"]) != 2:
raise ValueError("'ns_bounds' must be `[lo, hi]`.")
if type(llh_opts["minimizer_opts"]) is not dict:
raise ValueError("'minimizer_opts' must be a dictionary.")
self._llh_opts = llh_opts
def fit(self, llhs):
"""
Takes multiple single GRBLLHs in a dict and manages them.
Parameters
----------
llhs : dict
LLHs to be managed by this multi LLH class. Names must match with
dict keys of provided multi-injector data.
"""
for name, llh in llhs.items():
if not isinstance(llh, GRBLLH):
raise ValueError("LLH object " +
"`{}` is not of type `GRBLLH`.".format(name))
# Cache ns plit weights used in combined LLH evaluation
self._ns_weights = self._ns_split_weights(llhs)
self._llhs = llhs
return
def lnllh_ratio(self, ns, X):
"""
Combine LLH contribution from fitted single LLH instances.
Parameters
----------
ns : float
Total expected signal events ``ns``.
X : dict of recarrays
Fixed data to evaluate the LHL at
"""
# Loop over ln-LLHs and add their contribution
ts = 0.
ns_grad = 0.
# Add up LLHs for each single LLH
for key, llh in self._llhs.items():
ns_w = self._ns_weights[key]
ts_i, ns_grad_i = llh.lnllh_ratio(ns=ns * ns_w, X=X[key])
ts += ts_i
ns_grad += ns_grad_i * ns_w # Chain rule
return ts, ns_grad
def fit_lnllh_ratio(self, ns0, X):
"""
Fit single ns parameter simultaneously for all LLHs.
TODO: This relies on calls into private LLH methods directly using sob
for speed reasons. Maybe we can change that.
"""
def _neglnllh(ns, sob_dict):
""" Multi LLH wrapper directly using a dict of sob values """
ts = 0.
ns_grad = 0.
for key, sob in sob_dict.items():
ts_i, ns_grad_i = self._llhs[key]._lnllh_ratio(
ns * self._ns_weights[key], sob)
ts -= ts_i
ns_grad -= ns_grad_i * self._ns_weights[key] # Chain rule
return ts, ns_grad
# No events given for any LLH, fit is zero
if sum(map(len, X.values())) == 0:
return 0., 0.
# Get soverb separately for all LLHs
sob = []
# sob_dict is only used if we fit, because we need sob unweighted there
sob_dict = {}
for key, llh in self._llhs.items():
sob_i = llh._soverb(X[key])
sob.append(self._ns_weights[key] * sob_i)
if len(sob_i) > 0:
# If sob is empty for a LLH, it would return (0, [0]) anyway,
# so just add the existing ones. ns_weights are added in
# correctly in the fit function later
sob_dict[key] = sob_i
sob = np.concatenate(sob)
nevts = len(sob)
# Test again, because we may have applied sob threshold cuts per LLH
if nevts == 0:
return 0., 0.
elif nevts == 1:
# Same case as in single LLH because sob is multi year weighted
sob = sob[0]
if sob <= 1.: # sob <= 1 => ns <= 0, so fit will be 0
return 0., 0.
else:
ns = 1. - (1. / sob)
ts = 2. * (-ns + math.log(sob))
return ns, ts
elif nevts == 2:
# Same case as in single LLH because sob is multi year weighted
sum_sob = sob[0] + sob[1]
if sum_sob <= 1.: # More complicated to show but same as above
return 0., 0.
else:
a = 1. / (sob[0] * sob[1])
c = sum_sob * a
ns = 1. - 0.5 * c + math.sqrt(c * c / 4. - a + 1.)
ts = 2. * (-ns + np.sum(np.log1p(ns * sob)))
return ns, ts
else: # Fit other cases
res = sco.minimize(fun=_neglnllh, x0=[ns0],
jac=True, args=(sob_dict,),
bounds=[self._llh_opts["ns_bounds"]],
method=self._llh_opts["minimizer"],
options=self._llh_opts["minimizer_opts"])
if not res.success:
def _neglnllh_numgrad(ns, sob_dict):
""" Use numerical gradient if LINESRCH problem arises. """
return _neglnllh(ns, sob_dict)[0]
res = sco.minimize(fun=_neglnllh_numgrad, x0=[ns0],
jac=False, args=(sob_dict,),
bounds=[self._llh_opts["ns_bounds"]],
method=self._llh_opts["minimizer"],
options=self._llh_opts["minimizer_opts"])
ns, ts = res.x[0], -res.fun[0]
if ts < 0.:
# Some times the minimizer doesn't go all the way to 0., so
# TS vals might end up negative for a truly zero fit result
ts = 0.
return ns, ts
def _ns_split_weights(self, llhs):
"""
Set up the ``ns`` splitting weights: The weights simply renormalize the
source weights for all single LLHs over all samples.
Parameters
----------
llhs : dict of LLH instances
Single LLH instances that shall be combined.
Returns
-------
ns_weigths : dict of array-like
Weight per LLH to split up ``ns`` among different samples.
"""
ns_weights = {}
ns_w_sum = 0
for key, llh in llhs.items():
args = llh.model.get_args()
ns_weights[key] = np.sum(args["src_w_dec"] * args["src_w_theo"])
ns_w_sum += ns_weights[key]
# Normalize weights over all sample source weights
return dict_map(lambda key, nsw: nsw / ns_w_sum, ns_weights)
| UTF-8 | Python | false | false | 13,802 | py | 31 | llh.py | 21 | 0.511882 | 0.501377 | 0 | 367 | 36.607629 | 80 |
wikimedia/revscoring | 6,253,472,425,015 | 14ee62824c45cdb8434fd97ba4b477677918e0cc | 496b5270078183b3ffc3e9a2eda76e95e18061f1 | /revscoring/features/bytes/revision_oriented.py | 4f426400d21df25c44b62c85dd7c9cafef68fd49 | [
"MIT"
]
| permissive | https://github.com/wikimedia/revscoring | 87d0e9e856150b017b87f43e110b2062b4c90f07 | 5a3618e4ce6f93f1e571abf8b3600726d7295138 | refs/heads/master | 2023-08-23T03:48:07.172353 | 2023-01-24T10:58:33 | 2023-01-24T10:58:33 | 23,771,350 | 52 | 34 | MIT | false | 2023-04-17T06:03:24 | 2014-09-07T21:20:37 | 2023-03-02T18:19:49 | 2023-04-17T06:03:20 | 2,424 | 84 | 51 | 19 | Python | false | false | from revscoring.datasources import revision_oriented
from revscoring.dependencies import DependentSet
from ..meta import aggregators
from . import datasources
name = "bytes.revision"
class Revision(DependentSet):
def __init__(self, name, revision_datasources):
super().__init__(name)
self.length = aggregators.len(
revision_datasources.bytes,
name=name + ".length"
)
"`int` : The length of the revision content in bytes"
if hasattr(revision_datasources, "parent"):
self.parent = Revision(
name + ".parent",
revision_datasources.parent
)
"""
:class:`revscoring.features.bytes.Revision` : The
parent (aka "previous") revision of the page.
"""
revision = Revision(name,
datasources.Revision(name, revision_oriented.revision))
"""
Represents the base revision of interest. Implements this a basic structure:
* revision: :class:`~revscoring.features.bytes.Revision`
* parent: :class:`~revscoring.features.bytes.Revision`
"""
| UTF-8 | Python | false | false | 1,126 | py | 329 | revision_oriented.py | 283 | 0.625222 | 0.625222 | 0 | 38 | 28.631579 | 77 |
diweiqiang/PRIS | 7,464,653,160,914 | 436a03719bf2ba13659b101b5783418a1d3a3b9f | 7576879de06944ab501ef8875ec02aaab15d2e47 | /peris_model.py | b4a7c8d019c2d149cc37f4c173f6fd933584a2c9 | []
| no_license | https://github.com/diweiqiang/PRIS | 4ed89540c4731a4e6b3591410796885b5287c9af | 2e691ac82e6f91ae118c0cbb80aba6d2f337c052 | refs/heads/master | 2022-04-21T10:01:00.071643 | 2020-04-24T04:22:21 | 2020-04-24T04:22:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import time
from utils import Eval
import numpy as np
from sampler import *
from mylayers import RCEncoding, EuclideanDistillation
samplers = [SamplerModel, PopularSamplerModel, ClusterSamplerModel, ClusterPopularSamplerModel, ExactSamplerModel]
class EvaluateCallback(tf.keras.callbacks.Callback):
def __init__(self, round_):
self.round = round_
super(EvaluateCallback, self).__init__()
def on_epoch_begin(self, epoch, logs=None):
self.starttime = time.time()
def on_epoch_end(self, epoch, logs=None):
elapsed = time.time() - self.starttime
print('Epoch={} - {}s - loss={:.4f}'.format(self.round + epoch + 1, int(elapsed), logs['loss']))
def compute_loss(pred, prob, weighted):
if weighted:
importance = tf.nn.softmax(tf.negative(pred) - tf.log(prob))
else:
importance = tf.nn.softmax(tf.ones_like(pred))
weight_loss = tf.multiply(importance, tf.negative(tf.log_sigmoid(pred)))
loss = tf.reduce_sum(weight_loss, -1, keepdims=True)
return loss
def identity_loss(y_true, y_pred):
return tf.reduce_mean(y_pred - 0 * y_true)
class PerisModel:
def __init__(self, config):
user_id = tf.keras.Input(shape=(1,), name='user_id')
pos_id = tf.keras.Input(shape=(1,), name='pos_id')
neg_id = tf.keras.Input(shape=(config.neg_num,), name='neg_id')
neg_prob = tf.keras.Input(shape=(config.neg_num,), name='neg_prob', dtype='float32')
item_embed_layer = tf.keras.layers.Embedding(config.num_item, config.d, name='item_embedding',
embeddings_initializer=tf.keras.initializers.glorot_normal(),
activity_regularizer=tf.keras.regularizers.l2(config.coef / config.batch_size))
user_embed = tf.keras.layers.Embedding(config.num_user, config.d, name='user_embedding',
embeddings_initializer=tf.keras.initializers.glorot_normal(),
activity_regularizer=tf.keras.regularizers.l2(config.coef / config.batch_size))(user_id)
pos_item_embed = item_embed_layer(pos_id)
neg_item_embed = item_embed_layer(neg_id)
pos_score = tf.keras.layers.dot([user_embed, pos_item_embed], axes=-1)
neg_score = tf.keras.layers.dot([user_embed, neg_item_embed], axes=-1)
ruij = tf.keras.layers.Flatten()(tf.keras.layers.subtract([pos_score, neg_score]))
loss = tf.keras.layers.Lambda(lambda x: compute_loss(*x, config.weighted))([ruij, neg_prob])
self.model = tf.keras.Model(inputs=[user_id, pos_id, neg_id, neg_prob], outputs=loss)
self.model.compile(loss=identity_loss, optimizer=tf.keras.optimizers.Adam(lr=config.learning_rate))
self.config = config
def get_uv(self):
user_embed = self.model.get_layer('user_embedding')
item_embed = self.model.get_layer('item_embedding')
u = user_embed.get_weights()[0]
v = item_embed.get_weights()[0]
return u, v
def fit(self, train):
steps_per_epoch = int((train.nnz + self.config.batch_size - 1) / self.config.batch_size)
opt_para = [{}, {'mode': self.config.mode}, {'num_clusters': self.config.num_clusters},
{'num_clusters': self.config.num_clusters, 'mode': self.config.mode}, {}]
if self.config.sampler in {0, 1}:
sampler = samplers[self.config.sampler](train, **opt_para[self.config.sampler])\
.negative_sampler(neg=self.config.neg_num)
dataset = IO.construct_dataset(sampler, self.config.neg_num).shuffle(50000)\
.batch(self.config.batch_size).repeat(self.config.epochs)
self.model.fit(dataset, epochs=self.config.epochs, steps_per_epoch=steps_per_epoch, verbose=0,
callbacks=[EvaluateCallback(0)])
elif self.config.sampler in {2, 3, 4}:
sampler = samplers[self.config.sampler].__bases__[0](train, **opt_para[self.config.sampler % 2])\
.negative_sampler(neg=self.config.neg_num)
for i in range(int(self.config.epochs / self.config.epochs_)):
dataset = IO.construct_dataset(sampler, self.config.neg_num).shuffle(50000)\
.batch(self.config.batch_size).repeat(self.config.epochs_)
self.model.fit(dataset, epochs=self.config.epochs_, steps_per_epoch=steps_per_epoch, verbose=0,
callbacks=[EvaluateCallback(i * self.config.epochs_)])
u, v = self.get_uv()
sampler = samplers[self.config.sampler](train, {'U': u, 'V': v}, **opt_para[self.config.sampler])\
.negative_sampler(self.config.neg_num)
def evaluate(self, train, test):
m, n = train.shape
u, v = self.get_uv()
users = np.random.choice(m, min(m, 50000), False)
m = Eval.evaluate_item(train[users, :], test[users, :], u[users, :], v, topk=-1)
return m
class PerisJointModel:
def __init__(self, config):
user_id = tf.keras.Input(shape=(1,), name='user_id')
pos_id = tf.keras.Input(shape=(1,), name='pos_id')
neg_id = tf.keras.Input(shape=(config.neg_num,), name='neg_id')
neg_prob = tf.keras.Input(shape=(config.neg_num,), name='neg_prob', dtype='float32')
item_embed_layer = tf.keras.layers.Embedding(config.num_item, config.d, name='item_embedding',
embeddings_initializer=tf.keras.initializers.glorot_normal(),
activity_regularizer=tf.keras.regularizers.l2(config.coef / config.batch_size))
user_embed = tf.keras.layers.Embedding(config.num_user, config.d, name='user_embedding',
embeddings_initializer=tf.keras.initializers.glorot_normal(),
activity_regularizer=tf.keras.regularizers.l2(config.coef / config.batch_size))(user_id)
pos_item_embed = item_embed_layer(pos_id)
neg_item_embed = item_embed_layer(neg_id)
pos_score = tf.keras.layers.dot([user_embed, pos_item_embed], axes=-1)
neg_score = tf.keras.layers.dot([user_embed, neg_item_embed], axes=-1)
ruij = tf.keras.layers.Flatten()(tf.keras.layers.subtract([pos_score, neg_score]))
loss = tf.keras.layers.Lambda(lambda x: compute_loss(*x, config.weighted))([ruij, neg_prob])
num_clusters = config.num_clusters
reg = tf.keras.layers.ActivityRegularization(l2=config.coef2 / config.batch_size)
dist = EuclideanDistillation(coef=config.coef_kd)
def transform(x):
return reg(dist(x))
stop_grad = tf.keras.layers.Lambda(lambda x: tf.stop_gradient(x))
num_codewords = [num_clusters]
item_rce_layer = RCEncoding(num_codewords, att_mode='bilinear', rnn_mode='none', name='rcencoding')
pos_item_embed_stop = stop_grad(pos_item_embed)
neg_item_embed_stop = stop_grad(neg_item_embed)
pos_item_embed_, pos_item_cluster_idx = item_rce_layer(pos_item_embed_stop)
neg_item_embed_, _ = item_rce_layer(neg_item_embed_stop)
user_embed_ = tf.keras.layers.Dense(config.d, use_bias=False, name='user_dense',
activity_regularizer=tf.keras.regularizers.l2(config.coef2 / config.batch_size))(stop_grad(user_embed))
pos_score_ = tf.keras.layers.dot([user_embed_, transform([pos_item_embed_stop, pos_item_embed_])], axes=-1)
neg_score_ = tf.keras.layers.dot([user_embed_, transform([neg_item_embed_stop, neg_item_embed_])], axes=-1)
ruij_ = tf.keras.layers.Flatten()(tf.keras.layers.subtract([pos_score_, neg_score_]))
loss_ = tf.keras.layers.Lambda(lambda x: compute_loss(*x, config.weighted))([ruij_, neg_prob])
loss2 = tf.keras.layers.Lambda(lambda x: x[0] + x[1])([loss, loss_])
self.model = tf.keras.Model(inputs=[user_id, pos_id, neg_id, neg_prob], outputs=loss2)
self.model.compile(loss=identity_loss, optimizer=tf.keras.optimizers.Adam())
self.config = config
def get_cluster(self, m, n):
cluster = self.model.get_layer('rcencoding')
model_pred_item_cluster = tf.keras.Model(inputs=self.model.input[1], outputs=cluster.output[1])
user_embed_layer = self.model.get_layer('user_dense')
model_pred_user = tf.keras.Model(inputs=self.model.input[0], outputs=user_embed_layer.output)
item_code = np.squeeze(model_pred_item_cluster.predict(np.arange(n)))
item_center = cluster.get_weights()[0]
U = np.squeeze(model_pred_user.predict(np.arange(m)))
return U, item_code, item_center
def get_uv(self):
user_embed = self.model.get_layer('user_embedding')
item_embed = self.model.get_layer('item_embedding')
U = user_embed.get_weights()[0]
V = item_embed.get_weights()[0]
return U, V
def fit(self, train):
steps_per_epoch = int((train.nnz + self.config.batch_size - 1) / self.config.batch_size)
opt_para = {} if self.config.sampler == 2 else {'mode': self.config.mode}
if self.config.sampler in {2, 3}:
sampler = samplers[self.config.sampler].__bases__[0](train, **opt_para)\
.negative_sampler(neg=self.config.neg_num)
for i in range(int(self.config.epochs / self.config.epochs_)):
dataset = IO.construct_dataset(sampler, self.config.neg_num).shuffle(50000)\
.batch(self.config.batch_size).repeat(self.config.epochs_)
self.model.fit(dataset, epochs=self.config.epochs_, steps_per_epoch=steps_per_epoch, verbose=0,
callbacks=[EvaluateCallback(i * self.config.epochs_)])
u, code, center = self.get_cluster(self.config.num_user, self.config.num_item)
sampler = ClusterPopularSamplerModel(train, {'U': u, 'code': code, 'center': center}, **opt_para)\
.negative_sampler(self.config.neg_num)
def evaluate(self, train, test):
m, n = train.shape
u, v = self.get_uv()
users = np.random.choice(m, min(m, 50000), False)
m = Eval.evaluate_item(train[users, :], test[users, :], u[users, :], v, topk=-1)
return m | UTF-8 | Python | false | false | 10,439 | py | 9 | peris_model.py | 9 | 0.61117 | 0.603314 | 0 | 187 | 54.828877 | 147 |
ChenBooming/python_pycharm | 15,556,371,563,351 | f592685de3f60acacab1a31453122980c02c750e | f620c09675fc77c91c9c33dcd5533c4c06829da3 | /Python/itchat_test/test.py | cf437d77d5203838e91df5b7cd9b8ce972130d23 | [
"MIT"
]
| permissive | https://github.com/ChenBooming/python_pycharm | f47564b987792ba81ad6c7caaa6e1b58416cfd65 | aec59a5411166bc0ac04c872181b2162bcdb6fb8 | refs/heads/master | 2021-01-24T18:05:35.233449 | 2017-03-17T07:43:47 | 2017-03-17T07:43:47 | 84,391,710 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf8
import itchat
# tuling plugin can be get here:
# https://github.com/littlecodersh/EasierLife/tree/master/Plugins/Tuling
from tuling import get_response
@itchat.msg_register('Text')
def text_reply(msg):
if u'作者' in msg['Text'] or u'主人' in msg['Text']:
return u'找陈蒙 421235586@qq.com'
elif u'同盟会群' in msg['Text'] or u'同盟会' in msg['Text']:
itchat.send('@img@/Users/koudai232/PycharmProjects/python_pycharm/Python/itchat_test/img/tongmenghui_group.png', msg['FromUserName'])
return u'长按扫码进群,有问题请@陈蒙或者群主'
elif u'白领活动' in msg['Text']:
itchat.send('@img@/Users/koudai232/PycharmProjects/python_pycharm/Python/itchat_test/img/funbailing.jpg', msg['FromUserName']) # there should be a picture
return u'长按扫码关注,有问题联系@陈蒙'
elif u'亲子活动' in msg['Text']:
itchat.send('@img@/Users/koudai232/PycharmProjects/python_pycharm/Python/itchat_test/img/funmili.jpg', msg['FromUserName']) # there should be a picture
return u'长按扫码关注,有问题联系@陈蒙'
else:
return get_response(msg['Text'])
@itchat.msg_register(['Picture', 'Recording', 'Attachment', 'Video'])
def atta_reply(msg):
return (u'很好,'+{ 'Picture': u'图片', 'Recording': u'录音',
'Attachment': u'附件', 'Video': u'视频', }.get(msg['Type']) +
u'已转发给王珂') # download function is: msg['Text'](msg['FileName'])
@itchat.msg_register(['Map', 'Card', 'Note', 'Sharing'])
def mm_reply(msg):
if msg['Type'] == 'Map':
return u'收到位置分享'
elif msg['Type'] == 'Sharing':
return u'收到分享' + msg['Text']
elif msg['Type'] == 'Note':
return u'收到:' + msg['Text']
elif msg['Type'] == 'Card':
return u'收到好友信息:' + msg['Text']['Alias']
@itchat.msg_register('Text', isGroupChat = True)
def group_reply(msg):
if msg['isAt']:
return u'@%s\u2005%s' % (msg['ActualNickName'],
get_response(msg['Text']) or u'收到:' + msg['Text'])
@itchat.msg_register('Friends')
def add_friend(msg):
itchat.add_friend(**msg['Text'])
itchat.send_msg(u'可设置自定义回复消息、图片甚至文件,比如:\n'
+ u'同盟会入群:回复 同盟会群\n' + u'亲子活动:回复 亲子活动\n' + u'白领活动:回复 白领活动\n'
+ u'有问题反馈:回复 有问题 然后发送到邮箱即可', msg['RecommendInfo']['UserName'])
itchat.auto_login(True, enableCmdQR=False)
itchat.run()
| UTF-8 | Python | false | false | 2,607 | py | 9 | test.py | 6 | 0.633527 | 0.623272 | 0 | 55 | 39.763636 | 162 |
prathipc/Financials | 14,972,256,043,247 | b5b8fced447b989b185fd4113d1a8d4f7141ccdf | 705502038a7774fb378e289b83070fc54ab71653 | /lograthamic_RateofReturn.py | f2d421e605957938555b888b9a6dd0ee02f5f2c7 | [
"CC0-1.0"
]
| permissive | https://github.com/prathipc/Financials | b1d6516da24399b30057d1cfc5e863c7252a65c2 | 1814e08e2cccec65f60559109d2e0305175cba51 | refs/heads/master | 2020-12-05T02:58:20.573138 | 2020-01-06T01:21:35 | 2020-01-06T01:21:35 | 231,989,742 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from pandas_datareader import data as wb
STCK = wb.DataReader('VFINX', data_source='yahoo', start='2019-1-1')
STCK['log_return'] = np.log(STCK['Adj Close'] / STCK['Adj Close'].shift(1))
print (STCK)
#print(STCK['log_return'])
STCK['log_return'].plot(figsize = (8,5))
plt.show() # use this to show in a graph.
# multiplied by 250 becasue there is an average of 250 days on a calendar year. So just a small trick..
avg_returns_a = STCK['log_return'].mean() * 250
print ('log return is : ' + str(round(avg_returns_a, 2) * 100) + '%') | UTF-8 | Python | false | false | 586 | py | 4 | lograthamic_RateofReturn.py | 4 | 0.6843 | 0.646758 | 0 | 15 | 38.133333 | 104 |
jodal/pyspotify | 8,658,654,085,848 | 0a1332c776b3a9d9b6e0ccd56def198022ff0219 | 66b7a2fb1c573d8c26bcccd7b54b950dc13bd69c | /spotify/session.py | 7b7478b1dd61308f23adb898b40983647382e211 | [
"Apache-2.0"
]
| permissive | https://github.com/jodal/pyspotify | 020b12e486239414c25cdf7408471a4bcbadb182 | 575e0cbd6dfeee0b2e22a6c1d8b4ff3a14bb129a | refs/heads/master | 2023-08-25T05:36:09.958002 | 2022-06-15T14:30:36 | 2022-06-15T14:30:36 | 1,103,479 | 62 | 13 | Apache-2.0 | false | 2022-06-14T21:53:00 | 2010-11-22T20:31:34 | 2022-06-10T19:54:00 | 2022-06-14T21:53:00 | 14,517 | 518 | 94 | 11 | Python | false | false | from __future__ import unicode_literals
import logging
import warnings
import weakref
import spotify
import spotify.connection
import spotify.player
import spotify.social
from spotify import ffi, lib, serialized, utils
__all__ = ["Session", "SessionEvent"]
logger = logging.getLogger(__name__)
class Session(utils.EventEmitter):
"""The Spotify session.
If no ``config`` is provided, the default config is used.
The session object will emit a number of events. See :class:`SessionEvent`
for a list of all available events and how to connect your own listener
functions up to get called when the events happens.
.. warning::
You can only have one :class:`Session` instance per process. This is a
libspotify limitation. If you create a second :class:`Session` instance
in the same process pyspotify will raise a :exc:`RuntimeError` with the
message "Session has already been initialized".
:param config: the session config
:type config: :class:`Config` or :class:`None`
"""
@serialized
def __init__(self, config=None):
super(Session, self).__init__()
if spotify._session_instance is not None:
raise RuntimeError("Session has already been initialized")
if config is not None:
self.config = config
else:
self.config = spotify.Config()
if self.config.application_key is None:
self.config.load_application_key_file()
sp_session_ptr = ffi.new("sp_session **")
spotify.Error.maybe_raise(
lib.sp_session_create(self.config._sp_session_config, sp_session_ptr)
)
self._sp_session = ffi.gc(sp_session_ptr[0], lib.sp_session_release)
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
self.connection = spotify.connection.Connection(self)
self.offline = spotify.offline.Offline(self)
self.player = spotify.player.Player(self)
self.social = spotify.social.Social(self)
spotify._session_instance = self
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
config = None
"""A :class:`Config` instance with the current configuration.
Once the session has been created, changing the attributes of this object
will generally have no effect.
"""
connection = None
"""An :class:`~spotify.connection.Connection` instance for controlling the
connection to the Spotify servers."""
offline = None
"""An :class:`~spotify.offline.Offline` instance for controlling offline
sync."""
player = None
"""A :class:`~spotify.player.Player` instance for controlling playback."""
social = None
"""A :class:`~spotify.social.Social` instance for controlling social
sharing."""
def login(self, username, password=None, remember_me=False, blob=None):
"""Authenticate to Spotify's servers.
You can login with one of two combinations:
- ``username`` and ``password``
- ``username`` and ``blob``
To get the ``blob`` string, you must once log in with ``username`` and
``password``. You'll then get the ``blob`` string passed to the
:attr:`~SessionCallbacks.credentials_blob_updated` callback.
If you set ``remember_me`` to :class:`True`, you can later login to the
same account without providing any ``username`` or credentials by
calling :meth:`relogin`.
"""
username = utils.to_char(username)
if password is not None:
password = utils.to_char(password)
blob = ffi.NULL
elif blob is not None:
password = ffi.NULL
blob = utils.to_char(blob)
else:
raise AttributeError("password or blob is required to login")
spotify.Error.maybe_raise(
lib.sp_session_login(
self._sp_session, username, password, bool(remember_me), blob
)
)
def logout(self):
"""Log out the current user.
If you logged in with the ``remember_me`` argument set to
:class:`True`, you will also need to call :meth:`forget_me` to
completely remove all credentials of the user that was logged in.
"""
spotify.Error.maybe_raise(lib.sp_session_logout(self._sp_session))
@property
def remembered_user_name(self):
"""The username of the remembered user from a previous :meth:`login`
call."""
return utils.get_with_growing_buffer(
lib.sp_session_remembered_user, self._sp_session
)
def relogin(self):
"""Relogin as the remembered user.
To be able do this, you must previously have logged in with
:meth:`login` with the ``remember_me`` argument set to :class:`True`.
To check what user you'll be logged in as if you call this method, see
:attr:`remembered_user_name`.
"""
spotify.Error.maybe_raise(lib.sp_session_relogin(self._sp_session))
def forget_me(self):
"""Forget the remembered user from a previous :meth:`login` call."""
spotify.Error.maybe_raise(lib.sp_session_forget_me(self._sp_session))
@property
@serialized
def user(self):
"""The logged in :class:`User`."""
sp_user = lib.sp_session_user(self._sp_session)
if sp_user == ffi.NULL:
return None
return spotify.User(self, sp_user=sp_user, add_ref=True)
@property
@serialized
def user_name(self):
"""The username of the logged in user."""
return utils.to_unicode(lib.sp_session_user_name(self._sp_session))
@property
@serialized
def user_country(self):
"""The country of the currently logged in user.
The :attr:`~SessionEvent.OFFLINE_STATUS_UPDATED` event is emitted on
the session object when this changes.
"""
return utils.to_country(lib.sp_session_user_country(self._sp_session))
@property
@serialized
def playlist_container(self):
"""The :class:`PlaylistContainer` for the currently logged in user.
.. warning::
The playlists API was broken at 2018-05-24 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to work with playlists.
"""
warnings.warn(
"Spotify broke the libspotify playlists API 2018-05-24 "
"and never restored it. "
"Please use the Spotify Web API to work with playlists."
)
sp_playlistcontainer = lib.sp_session_playlistcontainer(self._sp_session)
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=True
)
@property
def inbox(self):
"""The inbox :class:`Playlist` for the currently logged in user.
.. warning::
The playlists API was broken at 2018-05-24 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to work with playlists.
"""
warnings.warn(
"Spotify broke the libspotify playlists API 2018-05-24 "
"and never restored it. "
"Please use the Spotify Web API to work with playlists."
)
sp_playlist = lib.sp_session_inbox_create(self._sp_session)
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(self, sp_playlist=sp_playlist, add_ref=False)
def set_cache_size(self, size):
"""Set maximum size in MB for libspotify's cache.
If set to 0 (the default), up to 10% of the free disk space will be
used."""
spotify.Error.maybe_raise(lib.sp_session_set_cache_size(self._sp_session, size))
def flush_caches(self):
"""Write all cached data to disk.
libspotify does this regularly and on logout, so you should never need
to call this method yourself.
"""
spotify.Error.maybe_raise(lib.sp_session_flush_caches(self._sp_session))
def preferred_bitrate(self, bitrate):
"""Set preferred :class:`Bitrate` for music streaming."""
spotify.Error.maybe_raise(
lib.sp_session_preferred_bitrate(self._sp_session, bitrate)
)
def preferred_offline_bitrate(self, bitrate, allow_resync=False):
"""Set preferred :class:`Bitrate` for offline sync.
If ``allow_resync`` is :class:`True` libspotify may resynchronize
already synced tracks.
"""
spotify.Error.maybe_raise(
lib.sp_session_preferred_offline_bitrate(
self._sp_session, bitrate, allow_resync
)
)
@property
def volume_normalization(self):
"""Whether volume normalization is active or not.
Set to :class:`True` or :class:`False` to change.
"""
return bool(lib.sp_session_get_volume_normalization(self._sp_session))
@volume_normalization.setter
def volume_normalization(self, value):
spotify.Error.maybe_raise(
lib.sp_session_set_volume_normalization(self._sp_session, value)
)
def process_events(self):
"""Process pending events in libspotify.
This method must be called for most callbacks to be called. Without
calling this method, you'll only get the callbacks that are called from
internal libspotify threads. When the
:attr:`~SessionEvent.NOTIFY_MAIN_THREAD` event is emitted (from an
internal libspotify thread), it's your job to make sure this method is
called (from the thread you use for accessing Spotify), so that further
callbacks can be triggered (from the same thread).
pyspotify provides an :class:`~spotify.EventLoop` that you can use for
processing events when needed.
"""
next_timeout = ffi.new("int *")
spotify.Error.maybe_raise(
lib.sp_session_process_events(self._sp_session, next_timeout)
)
return next_timeout[0]
def inbox_post_tracks(self, canonical_username, tracks, message, callback=None):
"""Post a ``message`` and one or more ``tracks`` to the inbox of the
user with the given ``canonical_username``.
``tracks`` can be a single :class:`~spotify.Track` or a list of
:class:`~spotify.Track` objects.
Returns an :class:`InboxPostResult` that can be used to check if the
request completed successfully.
If callback isn't :class:`None`, it is called with an
:class:`InboxPostResult` instance when the request has completed.
"""
return spotify.InboxPostResult(
self, canonical_username, tracks, message, callback
)
def get_starred(self, canonical_username=None):
"""Get the starred :class:`Playlist` for the user with
``canonical_username``.
.. warning::
The playlists API was broken at 2018-05-24 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to work with playlists.
If ``canonical_username`` isn't specified, the starred playlist for
the currently logged in user is returned.
"""
warnings.warn(
"Spotify broke the libspotify playlists API 2018-05-24 "
"and never restored it. "
"Please use the Spotify Web API to work with playlists."
)
if canonical_username is None:
sp_playlist = lib.sp_session_starred_create(self._sp_session)
else:
sp_playlist = lib.sp_session_starred_for_user_create(
self._sp_session, utils.to_bytes(canonical_username)
)
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(self, sp_playlist, add_ref=False)
def get_published_playlists(self, canonical_username=None):
"""Get the :class:`PlaylistContainer` of published playlists for the
user with ``canonical_username``.
.. warning::
The playlists API was broken at 2018-05-24 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to work with playlists.
If ``canonical_username`` isn't specified, the published container for
the currently logged in user is returned.
"""
warnings.warn(
"Spotify broke the libspotify playlists API 2018-05-24 "
"and never restored it. "
"Please use the Spotify Web API to work with playlists."
)
if canonical_username is None:
canonical_username = ffi.NULL
else:
canonical_username = utils.to_bytes(canonical_username)
sp_playlistcontainer = lib.sp_session_publishedcontainer_for_user_create(
self._sp_session, canonical_username
)
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=False
)
def get_link(self, uri):
"""
Get :class:`Link` from any Spotify URI.
A link can be created from a string containing a Spotify URI on the
form ``spotify:...``.
Example::
>>> session = spotify.Session()
# ...
>>> session.get_link(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
Link('spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> session.get_link(
... 'http://open.spotify.com/track/4wl1dK5dHGp3Ig51stvxb0')
Link('spotify:track:4wl1dK5dHGp3Ig51stvxb0')
"""
return spotify.Link(self, uri=uri)
def get_track(self, uri):
"""
Get :class:`Track` from a Spotify track URI.
Example::
>>> session = spotify.Session()
# ...
>>> track = session.get_track(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> track.load().name
u'Get Lucky'
"""
return spotify.Track(self, uri=uri)
def get_local_track(self, artist=None, title=None, album=None, length=None):
"""
Get :class:`Track` for a local track.
Spotify's official clients supports adding your local music files to
Spotify so they can be played in the Spotify client. These are not
synced with Spotify's servers or between your devices and there is not
trace of them in your Spotify user account. The exception is when you
add one of these local tracks to a playlist or mark them as starred.
This creates a "local track" which pyspotify also will be able to
observe.
"Local tracks" can be recognized in several ways:
- The track's URI will be of the form
``spotify:local:ARTIST:ALBUM:TITLE:LENGTH_IN_SECONDS``. Any of the
parts in all caps can be left out if there is no information
available. That is, ``spotify:local::::`` is a valid local track URI.
- :attr:`Link.type` will be :class:`LinkType.LOCALTRACK` for the
track's link.
- :attr:`Track.is_local` will be :class:`True` for the track.
This method can be used to create local tracks that can be starred or
added to playlists.
``artist`` may be an artist name. ``title`` may be a track name.
``album`` may be an album name. ``length`` may be a track length in
milliseconds.
Note that when creating a local track you provide the length in
milliseconds, while the local track URI contains the length in seconds.
"""
if artist is None:
artist = ""
if title is None:
title = ""
if album is None:
album = ""
if length is None:
length = -1
artist = utils.to_char(artist)
title = utils.to_char(title)
album = utils.to_char(album)
sp_track = lib.sp_localtrack_create(artist, title, album, length)
return spotify.Track(self, sp_track=sp_track, add_ref=False)
def get_album(self, uri):
"""
Get :class:`Album` from a Spotify album URI.
Example::
>>> session = spotify.Session()
# ...
>>> album = session.get_album(
... 'spotify:album:6wXDbHLesy6zWqQawAa91d')
>>> album.load().name
u'Forward / Return'
"""
return spotify.Album(self, uri=uri)
def get_artist(self, uri):
"""
Get :class:`Artist` from a Spotify artist URI.
Example::
>>> session = spotify.Session()
# ...
>>> artist = session.get_artist(
... 'spotify:artist:22xRIphSN7IkPVbErICu7s')
>>> artist.load().name
u'Rob Dougan'
"""
return spotify.Artist(self, uri=uri)
def get_playlist(self, uri):
"""
Get :class:`Playlist` from a Spotify playlist URI.
.. warning::
The playlists API was broken at 2018-05-24 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to work with playlists.
Example::
>>> session = spotify.Session()
# ...
>>> playlist = session.get_playlist(
... 'spotify:user:fiat500c:playlist:54k50VZdvtnIPt4d8RBCmZ')
>>> playlist.load().name
u'500C feelgood playlist'
"""
warnings.warn(
"Spotify broke the libspotify playlists API 2018-05-24 "
"and never restored it. "
"Please use the Spotify Web API to work with playlists."
)
return spotify.Playlist(self, uri=uri)
def get_user(self, uri):
"""
Get :class:`User` from a Spotify user URI.
Example::
>>> session = spotify.Session()
# ...
>>> user = session.get_user('spotify:user:jodal')
>>> user.load().display_name
u'jodal'
"""
return spotify.User(self, uri=uri)
def get_image(self, uri, callback=None):
"""
Get :class:`Image` from a Spotify image URI.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`Image` instance, when
the image is done loading.
Example::
>>> session = spotify.Session()
# ...
>>> image = session.get_image(
... 'spotify:image:a0bdcbe11b5cd126968e519b5ed1050b0e8183d0')
>>> image.load().data_uri[:50]
u'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD'
"""
return spotify.Image(self, uri=uri, callback=callback)
def search(
self,
query,
callback=None,
track_offset=0,
track_count=20,
album_offset=0,
album_count=20,
artist_offset=0,
artist_count=20,
playlist_offset=0,
playlist_count=20,
search_type=None,
):
"""
Search Spotify for tracks, albums, artists, and playlists matching
``query``.
.. warning::
The search API was broken at 2016-02-03 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to perform searches.
The ``query`` string can be free format, or use some prefixes like
``title:`` and ``artist:`` to limit what to match on. There is no
official docs on the search query format, but there's a `Spotify blog
post
<https://www.spotify.com/blog/archives/2008/01/22/searching-spotify/>`_
from 2008 with some examples.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Search` instance, when
the search completes.
The ``*_offset`` and ``*_count`` arguments can be used to retrieve more
search results. libspotify will currently not respect ``*_count``
values higher than 200, though this may change at any time as the limit
isn't documented in any official docs. If you want to retrieve more
than 200 results, you'll have to search multiple times with different
``*_offset`` values. See the ``*_total`` attributes on the
:class:`Search` to see how many results exists, and to figure out
how many searches you'll need to make to retrieve everything.
``search_type`` is a :class:`SearchType` value. It defaults to
:attr:`SearchType.STANDARD`.
Returns a :class:`Search` instance.
"""
raise Exception(
"Spotify broke the libspotify search API 2016-02-03 "
"and never restored it."
)
def get_toplist(
self, type=None, region=None, canonical_username=None, callback=None
):
"""Get a :class:`Toplist` of artists, albums, or tracks that are the
currently most popular worldwide or in a specific region.
``type`` is a :class:`ToplistType` instance that specifies the type of
toplist to create.
``region`` is either a :class:`ToplistRegion` instance, or a 2-letter
ISO 3166-1 country code as a unicode string, that specifies the
geographical region to create a toplist for.
If ``region`` is :attr:`ToplistRegion.USER` and ``canonical_username``
isn't specified, the region of the current user will be used. If
``canonical_username`` is specified, the region of the specified user
will be used instead.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Toplist` instance, when the
toplist request completes.
Example::
>>> import spotify
>>> session = spotify.Session()
# ...
>>> toplist = session.get_toplist(
... type=spotify.ToplistType.TRACKS, region='US')
>>> toplist.load()
>>> len(toplist.tracks)
100
>>> len(toplist.artists)
0
>>> toplist.tracks[0]
Track(u'spotify:track:2dLLR6qlu5UJ5gk0dKz0h3')
"""
return spotify.Toplist(
self,
type=type,
region=region,
canonical_username=canonical_username,
callback=callback,
)
class SessionEvent(object):
"""Session events.
Using the :class:`Session` object, you can register listener functions to
be called when various session related events occurs. This class enumerates
the available events and the arguments your listener functions will be
called with.
Example usage::
import spotify
def logged_in(session, error_type):
if error_type is spotify.ErrorType.OK:
print('Logged in as %s' % session.user)
else:
print('Login failed: %s' % error_type)
session = spotify.Session()
session.on(spotify.SessionEvent.LOGGED_IN, logged_in)
session.login('alice', 's3cret')
All events will cause debug log statements to be emitted, even if no
listeners are registered. Thus, there is no need to register listener
functions just to log that they're called.
"""
LOGGED_IN = "logged_in"
"""Called when login has completed.
Note that even if login has succeeded, that does not mean that you're
online yet as libspotify may have cached enough information to let you
authenticate with Spotify while offline.
This event should be used to get notified about login errors. To get
notified about the authentication and connection state, refer to the
:attr:`SessionEvent.CONNECTION_STATE_UPDATED` event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the login error type
:type error_type: :class:`ErrorType`
"""
LOGGED_OUT = "logged_out"
"""Called when logout has completed or there is a permanent connection
error.
:param session: the current session
:type session: :class:`Session`
"""
METADATA_UPDATED = "metadata_updated"
"""Called when some metadata has been updated.
There is no way to know what metadata was updated, so you'll have to
refresh all you metadata caches.
:param session: the current session
:type session: :class:`Session`
"""
CONNECTION_ERROR = "connection_error"
"""Called when there is a connection error and libspotify has problems
reconnecting to the Spotify service.
May be called repeatedly as long as the problem persists. Will be called
with an :attr:`ErrorType.OK` error when the problem is resolved.
:param session: the current session
:type session: :class:`Session`
:param error_type: the connection error type
:type error_type: :class:`ErrorType`
"""
MESSAGE_TO_USER = "message_to_user"
"""Called when libspotify wants to show a message to the end user.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
NOTIFY_MAIN_THREAD = "notify_main_thread"
"""Called when processing on the main thread is needed.
When this is called, you should call :meth:`~Session.process_events` from
your main thread. Failure to do so may cause request timeouts, or a lost
connection.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
MUSIC_DELIVERY = "music_delivery"
"""Called when there is decompressed audio data available.
If the function returns a lower number of frames consumed than
``num_frames``, libspotify will retry delivery of the unconsumed frames in
about 100ms. This can be used for rate limiting if libspotify is giving you
audio data too fast.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:param audio_format: the audio format
:type audio_format: :class:`AudioFormat`
:param frames: the audio frames
:type frames: bytestring
:param num_frames: the number of frames
:type num_frames: int
:returns: the number of frames consumed
"""
PLAY_TOKEN_LOST = "play_token_lost"
"""Music has been paused because an account only allows music to be played
from one location simultaneously.
When this event is emitted, you should pause playback.
:param session: the current session
:type session: :class:`Session`
"""
LOG_MESSAGE = "log_message"
"""Called when libspotify have something to log.
Note that pyspotify logs this for you, so you'll probably never need to
register a listener for this event.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
END_OF_TRACK = "end_of_track"
"""Called when all audio data for the current track has been delivered.
:param session: the current session
:type session: :class:`Session`
"""
STREAMING_ERROR = "streaming_error"
"""Called when audio streaming cannot start or continue.
:param session: the current session
:type session: :class:`Session`
:param error_type: the streaming error type
:type error_type: :class:`ErrorType`
"""
USER_INFO_UPDATED = "user_info_updated"
"""Called when anything related to :class:`User` objects is updated.
:param session: the current session
:type session: :class:`Session`
"""
START_PLAYBACK = "start_playback"
"""Called when audio playback should start.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`START_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
STOP_PLAYBACK = "stop_playback"
"""Called when audio playback should stop.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`STOP_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
GET_AUDIO_BUFFER_STATS = "get_audio_buffer_stats"
"""Called to query the application about its audio buffer.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:returns: an :class:`AudioBufferStats` instance
"""
OFFLINE_STATUS_UPDATED = "offline_status_updated"
"""Called when offline sync status is updated.
:param session: the current session
:type session: :class:`Session`
"""
CREDENTIALS_BLOB_UPDATED = "credentials_blob_updated"
"""Called when storable credentials have been updated, typically right
after login.
The ``blob`` argument can be stored and later passed to
:meth:`~Session.login` to login without storing the user's password.
:param session: the current session
:type session: :class:`Session`
:param blob: the authentication blob
:type blob: bytestring
"""
CONNECTION_STATE_UPDATED = "connection_state_updated"
"""Called when the connection state is updated.
The connection state includes login, logout, offline mode, etc.
:param session: the current session
:type session: :class:`Session`
"""
SCROBBLE_ERROR = "scrobble_error"
"""Called when there is a scrobble error event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the scrobble error type
:type error_type: :class:`ErrorType`
"""
PRIVATE_SESSION_MODE_CHANGED = "private_session_mode_changed"
"""Called when there is a change in the private session mode.
:param session: the current session
:type session: :class:`Session`
:param is_private: whether the session is private
:type is_private: bool
"""
class _SessionCallbacks(object):
"""Internal class."""
@classmethod
def get_struct(cls):
return ffi.new(
"sp_session_callbacks *",
{
"logged_in": cls.logged_in,
"logged_out": cls.logged_out,
"metadata_updated": cls.metadata_updated,
"connection_error": cls.connection_error,
"message_to_user": cls.message_to_user,
"notify_main_thread": cls.notify_main_thread,
"music_delivery": cls.music_delivery,
"play_token_lost": cls.play_token_lost,
"log_message": cls.log_message,
"end_of_track": cls.end_of_track,
"streaming_error": cls.streaming_error,
"userinfo_updated": cls.user_info_updated,
"start_playback": cls.start_playback,
"stop_playback": cls.stop_playback,
"get_audio_buffer_stats": cls.get_audio_buffer_stats,
"offline_status_updated": cls.offline_status_updated,
"credentials_blob_updated": cls.credentials_blob_updated,
"connectionstate_updated": cls.connection_state_updated,
"scrobble_error": cls.scrobble_error,
"private_session_mode_changed": cls.private_session_mode_changed,
},
)
# XXX Avoid use of the spotify._session_instance global in the following
# callbacks.
@staticmethod
@ffi.callback("void(sp_session *, sp_error)")
def logged_in(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
if error_type == spotify.ErrorType.OK:
logger.info("Spotify logged in")
else:
logger.error("Spotify login error: %r", error_type)
spotify._session_instance.emit(
SessionEvent.LOGGED_IN, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback("void(sp_session *)")
def logged_out(sp_session):
if not spotify._session_instance:
return
logger.info("Spotify logged out")
spotify._session_instance.emit(
SessionEvent.LOGGED_OUT, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *)")
def metadata_updated(sp_session):
if not spotify._session_instance:
return
logger.debug("Metadata updated")
spotify._session_instance.emit(
SessionEvent.METADATA_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, sp_error)")
def connection_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error("Spotify connection error: %r", error_type)
spotify._session_instance.emit(
SessionEvent.CONNECTION_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback("void(sp_session *, const char *)")
def message_to_user(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug("Message to user: %s", data)
spotify._session_instance.emit(
SessionEvent.MESSAGE_TO_USER, spotify._session_instance, data
)
@staticmethod
@ffi.callback("void(sp_session *)")
def notify_main_thread(sp_session):
if not spotify._session_instance:
return
logger.debug("Notify main thread")
spotify._session_instance.emit(
SessionEvent.NOTIFY_MAIN_THREAD, spotify._session_instance
)
@staticmethod
@ffi.callback("int(sp_session *, const sp_audioformat *, const void *, int)")
def music_delivery(sp_session, sp_audioformat, frames, num_frames):
if not spotify._session_instance:
return 0
if spotify._session_instance.num_listeners(SessionEvent.MUSIC_DELIVERY) == 0:
logger.debug("Music delivery, but no listener")
return 0
audio_format = spotify.AudioFormat(sp_audioformat)
frames_buffer = ffi.buffer(frames, audio_format.frame_size() * num_frames)
frames_bytes = frames_buffer[:]
num_frames_consumed = spotify._session_instance.call(
SessionEvent.MUSIC_DELIVERY,
spotify._session_instance,
audio_format,
frames_bytes,
num_frames,
)
logger.debug(
"Music delivery of %d frames, %d consumed",
num_frames,
num_frames_consumed,
)
return num_frames_consumed
@staticmethod
@ffi.callback("void(sp_session *)")
def play_token_lost(sp_session):
if not spotify._session_instance:
return
logger.debug("Play token lost")
spotify._session_instance.emit(
SessionEvent.PLAY_TOKEN_LOST, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, const char *)")
def log_message(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug("libspotify log message: %s", data)
spotify._session_instance.emit(
SessionEvent.LOG_MESSAGE, spotify._session_instance, data
)
@staticmethod
@ffi.callback("void(sp_session *)")
def end_of_track(sp_session):
if not spotify._session_instance:
return
logger.debug("End of track")
spotify._session_instance.emit(
SessionEvent.END_OF_TRACK, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, sp_error)")
def streaming_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error("Spotify streaming error: %r", error_type)
spotify._session_instance.emit(
SessionEvent.STREAMING_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback("void(sp_session *)")
def user_info_updated(sp_session):
if not spotify._session_instance:
return
logger.debug("User info updated")
spotify._session_instance.emit(
SessionEvent.USER_INFO_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *)")
def start_playback(sp_session):
if not spotify._session_instance:
return
logger.debug("Start playback called")
spotify._session_instance.emit(
SessionEvent.START_PLAYBACK, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *)")
def stop_playback(sp_session):
if not spotify._session_instance:
return
logger.debug("Stop playback called")
spotify._session_instance.emit(
SessionEvent.STOP_PLAYBACK, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, sp_audio_buffer_stats *)")
def get_audio_buffer_stats(sp_session, sp_audio_buffer_stats):
if not spotify._session_instance:
return
if (
spotify._session_instance.num_listeners(SessionEvent.GET_AUDIO_BUFFER_STATS)
== 0
):
logger.debug("Audio buffer stats requested, but no listener")
return
logger.debug("Audio buffer stats requested")
stats = spotify._session_instance.call(
SessionEvent.GET_AUDIO_BUFFER_STATS, spotify._session_instance
)
sp_audio_buffer_stats.samples = stats.samples
sp_audio_buffer_stats.stutter = stats.stutter
@staticmethod
@ffi.callback("void(sp_session *)")
def offline_status_updated(sp_session):
if not spotify._session_instance:
return
logger.debug("Offline status updated")
spotify._session_instance.emit(
SessionEvent.OFFLINE_STATUS_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, const char *)")
def credentials_blob_updated(sp_session, data):
if not spotify._session_instance:
return
data = ffi.string(data)
logger.debug("Credentials blob updated: %r", data)
spotify._session_instance.emit(
SessionEvent.CREDENTIALS_BLOB_UPDATED,
spotify._session_instance,
data,
)
@staticmethod
@ffi.callback("void(sp_session *)")
def connection_state_updated(sp_session):
if not spotify._session_instance:
return
logger.debug("Connection state updated")
spotify._session_instance.emit(
SessionEvent.CONNECTION_STATE_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback("void(sp_session *, sp_error)")
def scrobble_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error("Spotify scrobble error: %r", error_type)
spotify._session_instance.emit(
SessionEvent.SCROBBLE_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback("void(sp_session *, bool)")
def private_session_mode_changed(sp_session, is_private):
if not spotify._session_instance:
return
is_private = bool(is_private)
status = "private" if is_private else "public"
logger.debug("Private session mode changed: %s", status)
spotify._session_instance.emit(
SessionEvent.PRIVATE_SESSION_MODE_CHANGED,
spotify._session_instance,
is_private,
)
| UTF-8 | Python | false | false | 42,852 | py | 105 | session.py | 66 | 0.623798 | 0.618408 | 0 | 1,238 | 33.613893 | 88 |
luyaochi/mycrawlerlib | 11,029,476,030,572 | 6c63b735a9e89da448ea2960869c6f1eaae08bb1 | 9164d01a4482936816be74f3fc87c49612f6c303 | /frontier.py | 5403d01af3d4941f9e23d7a586281d16c2c54574 | []
| no_license | https://github.com/luyaochi/mycrawlerlib | e70a0ab2b2398f18039698412959a70cd7ecd395 | 11438dc2985fe76f6dab3b0d59e8baf919c86da4 | refs/heads/master | 2016-08-04T16:29:30.311285 | 2014-10-09T16:49:40 | 2014-10-09T16:49:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*-coding:utf-8-*-
class frontier:
def __init__(self,seed = None):
self.list_frontier = []
self.init_frontier(seed)
def init_frontier(self,seed):
if seed == None:
self.load_frontierFromDb()
else:
self.add_frontier(seed)
return self.list_frontier
def show_frontier(self):
return self.list_frontier
def add_frontier(self,seed):
if seed not in self.list_frontier:
self.list_frontier.append(seed)
def del_frontier(self):
return self.list_frontier.pop(0)
def show_first_frontier(self):
if len(self.list_frontier) > 0:
return self.list_frontier[0]
return ''
def len_frontier(self):
return len(self.list_frontier)
| UTF-8 | Python | false | false | 677 | py | 10 | frontier.py | 10 | 0.691285 | 0.685377 | 0 | 32 | 20.125 | 36 |
jamesjarlathlong/beeview_gateway | 1,666,447,354,649 | e6437cb1b8c4b504aec46732d07817deff893c61 | 13ffe2e92484d3a3283905733afaf17b1b5f9e7b | /algorithms/compressive.py | 839b5a9a33ab48ea2ffbbd7e7d87e939f849ce22 | []
| no_license | https://github.com/jamesjarlathlong/beeview_gateway | 7f89218cb06a29cae5720c6d57fe39c2438ac949 | f1a4576aaded9e0265d832d6f41dde157d5220d9 | refs/heads/master | 2021-01-01T18:51:15.260795 | 2018-04-12T04:55:05 | 2018-04-12T04:55:05 | 98,449,510 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scipy.fftpack as spfft
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import fourier_basis as ft
import cvxpy as cvx
import itertools
import math
import numpy as np
import random
import sys
import fourier_basis as ft
def downsample(data,ri):
return data[ri]
def reconstruct(downsampled, basis, ri):
A = basis[ri]
n =len(basis)
vx = cvx.Variable(n)
objective = cvx.Minimize(cvx.norm(vx, 1))
constraints = [A*vx == downsampled]
prob = cvx.Problem(objective, constraints)
result = prob.solve(verbose=False)
return vx
def fourier_denoising(lamda,downsampled, basis, ri):
A = basis[ri]
n =len(basis)
w = cvx.Variable(n)
loss = cvx.sum_squares(A*w-downsampled)/2 + lamda * cvx.norm(w,1)
problem = cvx.Problem(cvx.Minimize(loss))
result = problem.solve(verbose=True)
return w
def freq_and_time(A, ri, signal, method):
vx = method(signal[ri], A, ri)
x = np.array(vx.value)
x = np.squeeze(x)
sig = np.dot(A,x)
return x, sig
def fourier_cs(signal, downsample_factor, method=reconstruct):
n = len(signal)
m =int(n//downsample_factor)
A = np.array(ft.t(ft.zmean_real_dft(n)))
ri = np.random.choice(n, m, replace=False) # random sample of indices
f, t = freq_and_time(A, ri, signal, method=method)
return f,t
if __name__ == "__main__":
data = np.loadtxt(sys.argv[1])
factor = int(sys.argv[2]) | UTF-8 | Python | false | false | 1,405 | py | 33 | compressive.py | 18 | 0.688968 | 0.685409 | 0 | 51 | 26.568627 | 70 |
xflows/cf_weka | 3,461,743,658,359 | 6c8a25a26a406d504056234dd36531e68b889ebf | db7373b959ccb2eceb67179bea8fff5d7c8b25c0 | /cf_weka/library.py | e7c38ce36434f6e935027bb1112b94e6920b17d2 | [
"MIT"
]
| permissive | https://github.com/xflows/cf_weka | b6520b249fa24760067de03a5249b30711e933ea | 9e76d9ef101a001bbcbc2a6fe67296619a7d0bb0 | refs/heads/master | 2021-01-10T01:17:28.015025 | 2017-10-18T12:18:32 | 2017-10-18T12:18:32 | 49,005,637 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = 'vid, daleksovski'
import classification
import evaluation
import utilities
import preprocessing
#
# CLASSIFICATION ALGORITHMS
#
def decision_tree_j48(input_dict):
"""Decision Tree learner J48"""
p = input_dict['params']
return {'learner': classification.j48(p)} # '-C 0.25 -M 2'
def naive_bayes(input_dict):
"""Naive Bayes learner"""
p = input_dict['params']
return {'learner': classification.naive_bayes(p)}
def random_forest(input_dict):
"""Random Forest learner"""
p = input_dict['params']
return {'learner': classification.random_forest(p)}
def multilayer_perceptron(input_dict):
"""MLP Neural-network learner"""
p = input_dict['params']
return {'learner': classification.multilayer_perceptron(p)}
def smo(input_dict):
"""SVM learner"""
p = input_dict['params']
return {'learner': classification.smo(p)}
def logistic_regression_weka(input_dict):
"""Logistic Regression learner"""
p = input_dict['params']
return {'learner': classification.logistic(p)}
def rules_zeror(input_dict):
"""rulesZeroR Rule learner"""
p = input_dict['params']
return {'learner': classification.rules_zeror(p)}
def rules_jripper(input_dict):
"""Rule learner JRipper"""
p = input_dict['params']
return {'learner': classification.rules_jrip(p)}
def knn(input_dict):
"""K-Nearest-Neighbours learner IBk"""
p = input_dict['params']
return {'learner': classification.ibk(p)}
def random_tree(input_dict):
"""Random Tree learner"""
p = input_dict['params']
return {'learner': classification.random_tree(p)}
def rep_tree(input_dict):
"""Reduced Error Pruning tree"""
p = input_dict['params']
return {'learner': classification.rep_tree(p)}
def k_star(input_dict):
"""K* is an instance-based classifier, that is the class of a test instance is based upon the class of those training instances similar to it, as determined by some similarity function"""
p = input_dict['params']
return {'learner': classification.k_star(p)}
#
# PREPROCESSING
#
def feature_selection(input_dict):
"""Correlation-based Feature Subset Selection"""
instances = input_dict['instances']
output_dict = {}
output_dict['selected'] = preprocessing.correlation_basedfeat_sel(instances)
return output_dict
def normalize(input_dict):
"""Normalizes all numeric values in the given dataset"""
instances = input_dict['instances']
output_dict = {}
# 1,0 -> normalize to [0,1]; 2,-1 then to [-1,1]
output_dict['normalized'] = preprocessing.normalize(instances, '-S 2.0 -T -1.0')
return output_dict
#
# EVALUATION
#
def apply_mapped_classifier_get_instances(input_dict):
"""An advanced version of the Apply Classifier method"""
sclassifier = input_dict['classifier']
soriginalInstances = input_dict['original_training_instances']
sinstances = input_dict['instances']
instances, report = evaluation.apply_mapped_classifier_get_instances(sclassifier, soriginalInstances, sinstances)
output_dict = {'instances': instances, 'mapping_report': report}
return output_dict
#
# UTILITIES
#
def export_dataset_to_arff(input_dict):
"""Export Dataset to an ARFF Textual Format"""
arff_file_contents = utilities.export_dataset_to_arff(input_dict['dataset'])
file_out = open('myfile', 'w')
file_out.write(arff_file_contents)
file_out.close()
output_dict = {}
output_dict['file_out'] = file_out
return output_dict
def import_dataset_from_arff(input_dict):
"""Imports Dataset From an ARFF Textual Format"""
arff = input_dict['arff']
output_dict = {}
output_dict['instances'] = utilities.import_dataset_from_arff(arff)
return output_dict
def load_uci(input_dict):
"""Loads a UCI dataset"""
arff_file = input_dict['filename']
output_dict = {}
output_dict['data'] = utilities.load_uci_dataset_weka(arff_file)
return output_dict
| UTF-8 | Python | false | false | 4,032 | py | 28 | library.py | 10 | 0.675099 | 0.669395 | 0 | 150 | 25.88 | 191 |
4ar0n/fifthtutorial | 395,137,005,226 | 1f2d9fc24beb5175ac21e4efcc44547fbf06c6c2 | d22cb36b926c23e15cf223288dc54f2218980e9d | /homework2.0_q.py | 55c8f963ae18a46f82b7d558848cf9c3f33f4dec | []
| no_license | https://github.com/4ar0n/fifthtutorial | 395c749848d69734856fcae406b94b91a75f36f0 | d520e3100f99df6537ace1eac54001a873bd9cdb | refs/heads/master | 2023-02-18T23:04:56.162587 | 2021-01-16T14:31:47 | 2021-01-16T14:31:47 | 312,524,107 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pprint import pprint
from random import *
from datetime import datetime
person_mapping_dict = { 1:"Aaron",
2:"Vincent",
3:"Sir Ying",
4:"Jason",
5:"Billy" }
good_mapping_dict = { 45:"banana",
46:"apple",
47:"melon",
48:"orange" }
price_mapping_table = { 45:4,
46:6,
47:20,
48:3, }
data=[[datetime(2020, 1, 6), 3, 45],
[datetime(2020, 1, 10), 1, 48],
[datetime(2020, 1, 28), 3, 45],
[datetime(2020, 2, 2), 4, 48],
[datetime(2020, 2, 15), 3, 45],
[datetime(2020, 2, 23), 4, 45],
[datetime(2020, 3, 5), 4, 45],
[datetime(2020, 3, 6), 2, 45],
[datetime(2020, 3, 8), 2, 48],
[datetime(2020, 3, 20), 1, 45],
[datetime(2020, 4, 26), 4, 47],
[datetime(2020, 4, 28), 3, 46],
[datetime(2020, 5, 17), 1, 47],
[datetime(2020, 5, 18), 1, 47],
[datetime(2020, 5, 28), 3, 47],
[datetime(2020, 6, 9), 2, 46],
[datetime(2020, 6, 12), 2, 46],
[datetime(2020, 6, 16), 2, 47],
[datetime(2020, 6, 22), 5, 48],
[datetime(2020, 6, 24), 5, 48],
[datetime(2020, 7, 4), 4, 46],
[datetime(2020, 7, 9), 4, 45],
[datetime(2020, 7, 20), 5, 48],
[datetime(2020, 7, 22), 4, 45],
[datetime(2020, 7, 25), 2, 47],
[datetime(2020, 7, 26), 5, 45],
[datetime(2020, 7, 28), 3, 46],
[datetime(2020, 7, 28), 4, 48],
[datetime(2020, 8, 2), 5, 46],
[datetime(2020, 8, 3), 4, 46],
[datetime(2020, 8, 6), 1, 48],
[datetime(2020, 8, 7), 3, 48],
[datetime(2020, 8, 28), 1, 48],
[datetime(2020, 9, 3), 4, 48],
[datetime(2020, 9, 26), 1, 47],
[datetime(2020, 9, 28), 3, 47],
[datetime(2020, 10, 7), 3, 47],
[datetime(2020, 10, 9), 1, 46],
[datetime(2020, 10, 20), 3, 47],
[datetime(2020, 10, 22), 1, 46],
[datetime(2020, 10, 22), 2, 48],
[datetime(2020, 10, 23), 3, 48],
[datetime(2020, 10, 27), 3, 48],
[datetime(2020, 11, 1), 2, 47],
[datetime(2020, 11, 17), 4, 46],
[datetime(2020, 11, 24), 2, 48],
[datetime(2020, 12, 5), 1, 45],
[datetime(2020, 12, 10), 4, 45],
[datetime(2020, 12, 28), 5, 46]]
Question 1a
Write a script to show the transactions in the data above. e.g.: 'Aaron has bought a/an orange at a price of 3 on 2020-01-10.'
Give the total revenue at then as a summary: "Total Revenue: 40"
Question 1b
Write a FUNCTION to show filtered transactions by name_id in the above.
def transaction_filter(1):
...
print and return transactions-of-name_id=1
Question 1c (*advanced)
Write a FUNCTION to show filtered transactions by name_id , good_id and month, *PLUS your function need to capable of showing all transactions or filter just good_id or just name_id etc.
| UTF-8 | Python | false | false | 2,770 | py | 34 | homework2.0_q.py | 29 | 0.54657 | 0.355957 | 0 | 87 | 30.804598 | 188 |
P-R-McWhirter/augment_height | 18,674,517,808,094 | a4861f902bb92d46b429a49f736fa3af8f1f74c0 | 0d91e95505e3ee60e058837d6d24d3e93a3eec65 | /augment_height.py | 34ed2587d77e76da21717d1d307684a6a9e4c587 | []
| no_license | https://github.com/P-R-McWhirter/augment_height | 0513b469d4feb3cef84e0186f446480d28b191f9 | cdec23ee7c93b820a518735149c33fdd056a441f | refs/heads/master | 2021-02-18T19:33:14.877719 | 2020-03-05T17:36:03 | 2020-03-05T17:36:03 | 245,227,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Imports
from __future__ import print_function
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import gc
import itertools
import cv2
import argparse
gc.enable()
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--start", type=float, required=True, help="starting height of data")
ap.add_argument("-e", "--end", type=float, required=True, help="ending height of data")
ap.add_argument("-t", "--type", type=str, required=True, help="file extension of data")
args = vars(ap.parse_args())
start = args["start"]
end = args["end"]
filetype = args["type"]
ratio = start/end
color = [0, 0, 0]
imgs = []
cwd = os.getcwd()
for file in os.listdir(cwd):
if file.endswith(filetype):
imgs.append(file)
new_folder = 'data_' + str(int(start)) + '_' + str(int(end))
if not os.path.exists(new_folder):
os.makedirs(new_folder)
for file in imgs:
img = cv2.imread(file)
newsize_x = int(img.shape[0] * ratio)
newsize_y = int(img.shape[1] * ratio)
resize = cv2.resize(img, dsize=(newsize_y, newsize_x), interpolation=cv2.INTER_CUBIC)
delta_w = img.shape[1] - resize.shape[1]
delta_h = img.shape[0] - resize.shape[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
new_im = cv2.copyMakeBorder(resize, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
cv2.imwrite(new_folder + '/' + file, new_im)
labels = pd.read_csv(cwd + "/" + file[:-4] + ".txt", sep = " ", header = None).values
labels[:,1] = (labels[:,1] - 0.5) * ratio + 0.5
labels[:,2] = (labels[:,2] - 0.5) * ratio + 0.5
labels[:,3] = labels[:,3] * ratio
labels[:,4] = labels[:,4] * ratio
np.savetxt(new_folder + '/' + file[:-4] + ".txt", labels, delimiter = ' ', fmt='%i %1.6f %1.6f %1.6f %1.6f')
| UTF-8 | Python | false | false | 1,957 | py | 2 | augment_height.py | 1 | 0.625447 | 0.601942 | 0 | 67 | 28.208955 | 112 |
manhcntt21/TextNormSeq2Seq | 7,121,055,811,919 | a2840cdc8c280ce92c46d31aae8c3e5d2648cb9b | 6f2d9408e0074ccc29fdcf65f55f55fa7ef53ee6 | /check.py | 08509afc3a3baa8b07200a9fa2fc59fc056165c9 | [
"MIT"
]
| permissive | https://github.com/manhcntt21/TextNormSeq2Seq | a3a4fb770b42594a5fa6837664013a496b067132 | 440b252bddc0c735f083acd51271f2056d088a0a | refs/heads/master | 2020-09-13T13:54:19.390847 | 2020-02-03T02:09:01 | 2020-02-03T02:09:01 | 222,806,593 | 0 | 0 | NOASSERTION | true | 2019-11-19T23:02:02 | 2019-11-19T23:02:01 | 2019-11-14T15:46:25 | 2019-09-10T15:13:32 | 45 | 0 | 0 | 0 | null | false | false |
import random
def get_repleace_character():
repleace_character = {}
# repleace_character['ch'] = ['tr']
# repleace_character['tr'] = ['ch']
repleace_character['l'] = ['n']
repleace_character['n'] = ['l']
repleace_character['x'] = ['s']
repleace_character['s'] = ['x']
repleace_character['r'] = ['d', 'gi']
repleace_character['d'] = ['r', 'gi']
# repleace_character['gi'] = ['d', 'r']
repleace_character['c'] = ['q', 'k']
repleace_character['k'] = ['q', 'c']
repleace_character['q'] = ['c', 'k']
repleace_character['i'] = ['y']
repleace_character['y'] = ['i']
repleace_character['_'] = ['_']
return repleace_character
def get_prox_keys():
array_prox = {}
array_prox['a'] = ['q', 'w', 'z', 'x', 's']
array_prox['b'] = ['v', 'f', 'g', 'h', 'n', ' ']
array_prox['c'] = ['x', 's', 'd', 'f', 'v']
array_prox['d'] = ['x', 's', 'w', 'e', 'r', 'f', 'v', 'c']
array_prox['e'] = ['w', 's', 'd', 'f', 'r']
array_prox['f'] = ['c', 'd', 'e', 'r', 't', 'g', 'b', 'v']
array_prox['g'] = ['r', 'f', 'v', 't', 'b', 'y', 'h', 'n']
array_prox['h'] = ['b', 'g', 't', 'y', 'u', 'j', 'm', 'n']
array_prox['i'] = ['u', 'j', 'k', 'l', 'o']
array_prox['j'] = ['n', 'h', 'y', 'u', 'i', 'k', 'm']
array_prox['k'] = ['u', 'j', 'm', 'l', 'o']
array_prox['l'] = ['p', 'o', 'i', 'k', 'm']
array_prox['m'] = ['n', 'h', 'j', 'k', 'l']
array_prox['n'] = ['b', 'g', 'h', 'j', 'm']
array_prox['o'] = ['i', 'k', 'l', 'p']
array_prox['p'] = ['o', 'l']
array_prox['q'] = ['w', 'a']
array_prox['r'] = ['e', 'd', 'f', 'g', 't']
array_prox['s'] = ['q', 'w', 'e', 'z', 'x', 'c']
array_prox['t'] = ['r', 'f', 'g', 'h', 'y']
array_prox['u'] = ['y', 'h', 'j', 'k', 'i']
array_prox['v'] = ['', 'c', 'd', 'f', 'g', 'b']
array_prox['w'] = ['q', 'a', 's', 'd', 'e']
array_prox['x'] = ['z', 'a', 's', 'd', 'c']
array_prox['y'] = ['t', 'g', 'h', 'j', 'u']
array_prox['z'] = ['x', 's', 'a']
array_prox['1'] = ['q', 'w']
array_prox['2'] = ['q', 'w', 'e']
array_prox['3'] = ['w', 'e', 'r']
array_prox['4'] = ['e', 'r', 't']
array_prox['5'] = ['r', 't', 'y']
array_prox['6'] = ['t', 'y', 'u']
array_prox['7'] = ['y', 'u', 'i']
array_prox['8'] = ['u', 'i', 'o']
array_prox['9'] = ['i', 'o', 'p']
array_prox['0'] = ['o', 'p']
array_prox['_'] = ['_']
return array_prox
def add_noise(word):
# i = random.randint(0,len(word)-1)
# op = random.randint(0, 30)
i = 0
op = 4
if op == 0:
return word[:i] + word[i+1:]
if op == 1:
i += 1
return word[:i-1] + word[i:i+1] + word[i-1:i] + word[i+1:]
if op == 2 or op == 3:
try:
# print(op)
print(random.choice(repleace_character[word[i]]))
return word[:i] + random.choice(repleace_character[word[i]]) + word[i+1:] # thay doi dau
except:
return word
# print(random.choice(get_prox_keys[word[i]]))
# return word[:i] + random.choice(get_prox_keys[word[i]]) + word[i+1:]
try:
tmp = get_prox_keys()
tmp1 = random.choice(tmp[word[i]])
print(tmp1)
return word[:i] + tmp1 + word[i+1:] #default is keyboard errors
except :
# print(word)
return word
if __name__ == '__main__':
# a = '[anh'
# b = add_noise(a)
# print(b)
# b = get_prox_keys()
a = 10
b = 'string'
c = str(a)
print(type(a))
print(type(c))
| UTF-8 | Python | false | false | 3,524 | py | 66 | check.py | 9 | 0.418275 | 0.408343 | 0 | 104 | 32.836538 | 100 |
g2thend/proxypool | 2,405,181,701,292 | 2e19cb52db6edb83275d2ba77f24c43fc683850d | 5f98c9ee03fe4e6c36eab76d819d2ab868b837c8 | /imorter.py | eec48591e818ec08f1de146f8b0fd9a4f8ded6eb | []
| no_license | https://github.com/g2thend/proxypool | 604fe8f6153b57e70b392ad3023786e98da1043f | fc30541131c095c7d4d845d7b884e78c6f7e9674 | refs/heads/master | 2020-12-14T17:43:12.544796 | 2020-01-19T02:42:25 | 2020-01-19T02:42:25 | 234,827,201 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/20/19 2:19 PM
# @Author : yon
# @Email : @qq.com
# @File : imorter
# 手动录入代理
# 未测试
from proxypool.db import sqlitedb
importer = sqlitedb()
def importproxy(proxy):
result = importer.add(proxy)
print(proxy)
print('录入成功' if result else '录入失败')
def scan():
print('请输入代理, 输入exit退出读入')
while True:
proxy = input("代理ip,格式为 ip:port")
protocol = input("代理协议:http或https")
temp = (proxy, protocol)
if proxy == 'exit':
break
set(list(temp))
if __name__ == '__main__':
scan()
| UTF-8 | Python | false | false | 691 | py | 11 | imorter.py | 10 | 0.55935 | 0.543089 | 0 | 32 | 18.1875 | 43 |
ezeutno/PycharmProject | 6,717,328,868,756 | 94ed949972b741443f798b3378624901ba37f47c | c4a0669126f2fbf757ac3b33a8279ef32305bbd7 | /Data Project/Jude_Assignment/Alien_Invasion(New)/earth.py | 738b0735ec6040ee00969c85928b6670fe303be6 | []
| no_license | https://github.com/ezeutno/PycharmProject | 822b5a7da05729c5241a03b7413548a34b12e4a5 | bdb87599885287d2d7cd5cd703b62197563722b8 | refs/heads/master | 2021-07-18T20:55:08.605486 | 2017-10-24T03:14:10 | 2017-10-24T03:14:10 | 105,782,136 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
class Earth():
def __init__ (self,ai_setting,screen):
self.screen = screen
self.ai_setting = ai_setting
x = self.ai_setting
self.image = pygame.image.load('Data_base\\earth.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
def blitme(self):
self.screen.blit(self.image, self.rect) | UTF-8 | Python | false | false | 491 | py | 135 | earth.py | 125 | 0.621181 | 0.621181 | 0 | 15 | 31.8 | 62 |
JohnLZeller/F310_Gamepad_Parser | 19,224,273,630,627 | 9e013c5208450692eb8d8375a6a6663fcaa95b04 | e01376a824b8e9cc0e6910115e33365824de857c | /gui_main.py | 79d4e29ceb66c34e43e64159aef435ccc6f1309f | []
| no_license | https://github.com/JohnLZeller/F310_Gamepad_Parser | 7863fe383cad51b20703ba22dc3bff4aed311c07 | 04c47aac51a2697ad0288fbdad382ace754484c9 | refs/heads/master | 2021-01-21T05:06:02.317722 | 2013-02-12T23:22:49 | 2013-02-12T23:22:49 | 12,443,874 | 4 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | ############ Logitech F310 Gamepad Controller GUI - gui_main.py ##########
# Original Author: John Zeller
# Description: GUI_Main displays the values being stored in the states
# python dictionary in a very simple labeled Tkinter GUI
# to make them easily viewable.
import Tkinter as tk # Native Python GUI Framework
import time
class GUI():
def __init__(self, states):
self.states = states # Save the states in self.states
self.root = tk.Tk()
self.root.title('Logitech F310 Gamepad Controller Output Display')
self.root.geometry('600x750')
# Create and Position Title Labels
self.label_Time = tk.Label(self.root, text="GUI Running for... ").grid(row=0, padx=10, pady=20)
self.label_Time_s = tk.Label(self.root, text="seconds").grid(row=0, column=2)
self.label_Buttons_Title = tk.Label(self.root, text="-- Buttons --").grid(columnspan=2, pady=10, sticky="E")
self.label_A = tk.Label(self.root, text="A").grid(row=2, padx=5, sticky="E")
self.label_B = tk.Label(self.root, text="B").grid(row=3, padx=5, sticky="E")
self.label_X = tk.Label(self.root, text="X").grid(row=4, padx=5, sticky="E")
self.label_Y = tk.Label(self.root, text="Y").grid(row=5, padx=5, sticky="E")
self.label_Back = tk.Label(self.root, text="Back").grid(row=6, padx=5, sticky="E")
self.label_Start = tk.Label(self.root, text="Start").grid(row=7, padx=5, sticky="E")
self.label_Middle = tk.Label(self.root, text="Middle").grid(row=8, padx=5, sticky="E")
self.label_Left = tk.Label(self.root, text="Left").grid(row=9, padx=5, sticky="E")
self.label_Right = tk.Label(self.root, text="Right").grid(row=10, padx=5, sticky="E")
self.label_Up = tk.Label(self.root, text="Up").grid(row=11, padx=5, sticky="E")
self.label_Down = tk.Label(self.root, text="Down").grid(row=12, padx=5, sticky="E")
self.label_LB = tk.Label(self.root, text="LB").grid(row=13, padx=5, sticky="E")
self.label_RB = tk.Label(self.root, text="RB").grid(row=14, padx=5, sticky="E")
self.label_LJButton = tk.Label(self.root, text="LJ/Button").grid(row=15, padx=5, sticky="E")
self.label_RJButton = tk.Label(self.root, text="RJ/Button").grid(row=16, padx=5, sticky="E")
self.label_Joys_Title = tk.Label(self.root, text="-- Joys --").grid(columnspan=2, pady=10, sticky="E")
self.label_LT = tk.Label(self.root, text="LT").grid(row=18, padx=5, sticky="E")
self.label_RT = tk.Label(self.root, text="RT").grid(row=19, padx=5, sticky="E")
self.label_LJLeft = tk.Label(self.root, text="LJ/Left").grid(row=20, padx=5, sticky="E")
self.label_LJRight = tk.Label(self.root, text="LJ/Right").grid(row=21, padx=5, sticky="E")
self.label_LJUp = tk.Label(self.root, text="LJ/Up").grid(row=22, padx=5, sticky="E")
self.label_LJDown = tk.Label(self.root, text="LJ/Down").grid(row=23, padx=5, sticky="E")
self.label_RJLeft = tk.Label(self.root, text="RJ/Left").grid(row=24, padx=5, sticky="E")
self.label_RJRight = tk.Label(self.root, text="RJ/Right").grid(row=25, padx=5, sticky="E")
self.label_RJUp = tk.Label(self.root, text="RJ/Up").grid(row=26, padx=5, sticky="E")
self.label_RJDown = tk.Label(self.root, text="RJ/Down").grid(row=27, padx=5, sticky="E")
self.label_Packet = tk.Label(self.root, text="Packets coming from controller (Bytes 0-7)").grid(row=29, \
column=2, columnspan=8, pady=10, sticky="W")
self.label_Packet0 = tk.Label(self.root, text="Byte 0").grid(row=30, column=1, padx=5, sticky="E")
self.label_Packet1 = tk.Label(self.root, text="Byte 1").grid(row=30, column=2, padx=5, sticky="E")
self.label_Packet2 = tk.Label(self.root, text="Byte 2").grid(row=30, column=3, padx=5, sticky="E")
self.label_Packet3 = tk.Label(self.root, text="Byte 3").grid(row=30, column=4, padx=5, sticky="E")
self.label_Packet4 = tk.Label(self.root, text="Byte 4").grid(row=30, column=5, padx=5, sticky="E")
self.label_Packet5 = tk.Label(self.root, text="Byte 5").grid(row=30, column=6, padx=5, sticky="E")
self.label_Packet6 = tk.Label(self.root, text="Byte 6").grid(row=30, column=7, padx=5, sticky="E")
self.label_Packet7 = tk.Label(self.root, text="Byte 7").grid(row=30, column=8, padx=5, sticky="E")
self.label_HEX = tk.Label(self.root, text="HEX --->").grid(row=31, column=0, padx=5, sticky="E")
self.label_DECIMAL = tk.Label(self.root, text="DECIMAL --->").grid(row=32, column=0, padx=5, sticky="E")
# Create Dynamic Variable Labels
self.variable_Time = tk.Label(text="")
self.variable_Blank = tk.Label(text="")
self.variable_A = tk.Label(text="")
self.variable_B = tk.Label(text="")
self.variable_X = tk.Label(text="")
self.variable_Y = tk.Label(text="")
self.variable_Back = tk.Label(text="")
self.variable_Start = tk.Label(text="")
self.variable_Middle = tk.Label(text="")
self.variable_Left = tk.Label(text="")
self.variable_Right = tk.Label(text="")
self.variable_Up = tk.Label(text="")
self.variable_Down = tk.Label(text="")
self.variable_LB = tk.Label(text="")
self.variable_RB = tk.Label(text="")
self.variable_LJButton = tk.Label(text="")
self.variable_RJButton = tk.Label(text="")
self.variable_LT = tk.Label(text="")
self.variable_RT = tk.Label(text="")
self.variable_LJLeft = tk.Label(text="")
self.variable_LJRight = tk.Label(text="")
self.variable_LJUp = tk.Label(text="")
self.variable_LJDown = tk.Label(text="")
self.variable_RJLeft = tk.Label(text="")
self.variable_RJRight = tk.Label(text="")
self.variable_RJUp = tk.Label(text="")
self.variable_RJDown = tk.Label(text="")
self.variable_Packet0 = tk.Label(text="")
self.variable_Packet1 = tk.Label(text="")
self.variable_Packet2 = tk.Label(text="")
self.variable_Packet3 = tk.Label(text="")
self.variable_Packet4 = tk.Label(text="")
self.variable_Packet5 = tk.Label(text="")
self.variable_Packet6 = tk.Label(text="")
self.variable_Packet7 = tk.Label(text="")
self.variable_Packet0_INT = tk.Label(text="")
self.variable_Packet1_INT = tk.Label(text="")
self.variable_Packet2_INT = tk.Label(text="")
self.variable_Packet3_INT = tk.Label(text="")
self.variable_Packet4_INT = tk.Label(text="")
self.variable_Packet5_INT = tk.Label(text="")
self.variable_Packet6_INT = tk.Label(text="")
self.variable_Packet7_INT = tk.Label(text="")
# Position Dynamic Variable Labels
self.variable_Time.grid(row=0, column=1)
self.variable_A.grid(row=2, column=1)
self.variable_B.grid(row=3, column=1)
self.variable_X.grid(row=4, column=1)
self.variable_Y.grid(row=5, column=1)
self.variable_Back.grid(row=6, column=1)
self.variable_Start.grid(row=7, column=1)
self.variable_Middle.grid(row=8, column=1)
self.variable_Left.grid(row=9, column=1)
self.variable_Right.grid(row=10, column=1)
self.variable_Up.grid(row=11, column=1)
self.variable_Down.grid(row=12, column=1)
self.variable_LB.grid(row=13, column=1)
self.variable_RB.grid(row=14, column=1)
self.variable_LJButton.grid(row=15, column=1)
self.variable_RJButton.grid(row=16, column=1)
self.variable_LT.grid(row=18, column=1)
self.variable_RT.grid(row=19, column=1)
self.variable_LJLeft.grid(row=20, column=1)
self.variable_LJRight.grid(row=21, column=1)
self.variable_LJUp.grid(row=22, column=1)
self.variable_LJDown.grid(row=23, column=1)
self.variable_RJLeft.grid(row=24, column=1)
self.variable_RJRight.grid(row=25, column=1)
self.variable_RJUp.grid(row=26, column=1)
self.variable_RJDown.grid(row=27, column=1)
self.variable_Packet0.grid(row=31, column=1, sticky="E")
self.variable_Packet1.grid(row=31, column=2, sticky="E")
self.variable_Packet2.grid(row=31, column=3, sticky="E")
self.variable_Packet3.grid(row=31, column=4, sticky="E")
self.variable_Packet4.grid(row=31, column=5, sticky="E")
self.variable_Packet5.grid(row=31, column=6, sticky="E")
self.variable_Packet6.grid(row=31, column=7, sticky="E")
self.variable_Packet7.grid(row=31, column=8, sticky="E")
self.variable_Packet0_INT.grid(row=32, column=1, sticky="E")
self.variable_Packet1_INT.grid(row=32, column=2, sticky="E")
self.variable_Packet2_INT.grid(row=32, column=3, sticky="E")
self.variable_Packet3_INT.grid(row=32, column=4, sticky="E")
self.variable_Packet4_INT.grid(row=32, column=5, sticky="E")
self.variable_Packet5_INT.grid(row=32, column=6, sticky="E")
self.variable_Packet6_INT.grid(row=32, column=7, sticky="E")
self.variable_Packet7_INT.grid(row=32, column=8, sticky="E")
# Start tracking time
self.start_time = time.time()
# Run update_label()
self.update_labels()
self.root.mainloop() # Once here, begin main loop again
def update_labels(self):
time_counter = time.time() - self.start_time
time_counter = round(time_counter, 0)
# Configure Labels with Updates
self.variable_Time.configure(text=str(time_counter))
self.variable_A.configure(text=str(self.states['A']))
self.variable_B.configure(text=str(self.states['B']))
self.variable_X.configure(text=str(self.states['X']))
self.variable_Y.configure(text=str(self.states['Y']))
self.variable_Back.configure(text=str(self.states['Back']))
self.variable_Start.configure(text=str(self.states['Start']))
self.variable_Middle.configure(text=str(self.states['Middle']))
self.variable_Left.configure(text=str(self.states['Left']))
self.variable_Right.configure(text=str(self.states['Right']))
self.variable_Up.configure(text=str(self.states['Up']))
self.variable_Down.configure(text=str(self.states['Down']))
self.variable_LB.configure(text=str(self.states['LB']))
self.variable_RB.configure(text=str(self.states['RB']))
self.variable_LJButton.configure(text=str(self.states['LJ/Button']))
self.variable_RJButton.configure(text=str(self.states['RJ/Button']))
self.variable_LT.configure(text=str(self.states['LT']))
self.variable_RT.configure(text=str(self.states['RT']))
self.variable_LJLeft.configure(text=str(self.states['LJ/Left']))
self.variable_LJRight.configure(text=str(self.states['LJ/Right']))
self.variable_LJUp.configure(text=str(self.states['LJ/Up']))
self.variable_LJDown.configure(text=str(self.states['LJ/Down']))
self.variable_RJLeft.configure(text=str(self.states['RJ/Left']))
self.variable_RJRight.configure(text=str(self.states['RJ/Right']))
self.variable_RJUp.configure(text=str(self.states['RJ/Up']))
self.variable_RJDown.configure(text=str(self.states['RJ/Down']))
self.variable_Packet0.configure(text=self.states['Byte0'])
self.variable_Packet1.configure(text=self.states['Byte1'])
self.variable_Packet2.configure(text=self.states['Byte2'])
self.variable_Packet3.configure(text=self.states['Byte3'])
self.variable_Packet4.configure(text=self.states['Byte4'])
self.variable_Packet5.configure(text=self.states['Byte5'])
self.variable_Packet6.configure(text=self.states['Byte6'])
self.variable_Packet7.configure(text=self.states['Byte7'])
self.variable_Packet0_INT.configure(text=str(self.states['Byte0/INT']))
self.variable_Packet1_INT.configure(text=str(self.states['Byte1/INT']))
self.variable_Packet2_INT.configure(text=str(self.states['Byte2/INT']))
self.variable_Packet3_INT.configure(text=str(self.states['Byte3/INT']))
self.variable_Packet4_INT.configure(text=str(self.states['Byte4/INT']))
self.variable_Packet5_INT.configure(text=str(self.states['Byte5/INT']))
self.variable_Packet6_INT.configure(text=str(self.states['Byte6/INT']))
self.variable_Packet7_INT.configure(text=str(self.states['Byte7/INT']))
self.root.after(10, self.update_labels) | UTF-8 | Python | false | false | 12,513 | py | 2 | gui_main.py | 2 | 0.638376 | 0.611204 | 0 | 197 | 62.522843 | 116 |
rinoshinme/detection_yolo | 16,329,465,678,604 | 747623a8ea175bf2e5caeb8bcf165ba9dfb92be8 | 42245303c0914e9923a67f7e5763f354627e1d7a | /yolo/yolov1.py | c39617f182d1eae1b8988db4b408ca628d3c094f | []
| no_license | https://github.com/rinoshinme/detection_yolo | 0d68170136da908d6b7e384c2b9538f0017411e2 | 8c367d31cca4361aff1eacd6c96e544d5497ccbd | refs/heads/master | 2020-07-02T05:04:44.489451 | 2019-08-09T09:55:15 | 2019-08-09T09:55:15 | 201,423,587 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
from utils.layers import leaky_relu
from config.yolov1_config import cfg
slim = tf.contrib.slim
class YOLOv1(object):
def __init__(self, is_training=True):
self.classes = cfg.CLASSES
self.num_classes = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.output_size = (self.cell_size * self.cell_size) * \
(self.num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = 0
self.boundary2 = 0
self.object_scale = cfg.OBJECT_SCALE
self.noobject_scale = cfg.NOOBJECT_SCALE
self.class_scale = cfg.CLASS_SCALE
self.coord_scale = cfg.COORD_SCALE
self.learning_rate = cfg.LEARNING_RATE
self.batch_size = cfg.BATCH_SIZE
self.alpha = cfg.ALPHA
self.is_training = is_training
def build_network(self, images, num_output, keep_prob=0.5):
with tf.variable_scope('yolo'):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]), name='pad_1')
net = tf.layers.conv2d(net, 64, [3, 3], [2, 2], padding='valid',
activation=leaky_relu(self.alpha),
name='conv_2')
return net
def loss_layer(self):
pass
if __name__ == '__main__':
model = YOLOv1()
| UTF-8 | Python | false | false | 1,507 | py | 22 | yolov1.py | 17 | 0.569343 | 0.5501 | 0 | 44 | 33.25 | 90 |
javerdejo/UMACamp_sensors | 2,860,448,223,835 | c448651a88c86378291100a365e153a7d13bfd8a | a00fc0006530b637f1439982b85a8f0b2fa12cc4 | /sensors/bluetooth/settings.py | c2f6e0c67e41426ee864f4edbedca1b8d61510e7 | []
| no_license | https://github.com/javerdejo/UMACamp_sensors | c272aa240a1f02f4b4e2aefa12218d3de8442996 | 05d6315ca085bbf2e77688a02eef18ea2ec10c23 | refs/heads/master | 2021-01-11T18:57:20.384878 | 2017-05-09T08:44:57 | 2017-05-09T08:44:57 | 79,280,292 | 0 | 0 | null | false | 2017-03-03T13:34:02 | 2017-01-17T22:46:44 | 2017-02-24T10:26:42 | 2017-03-03T13:32:59 | 47 | 0 | 0 | 2 | Python | null | null | BT_SCANNER_DURATION = 10 #seconds
BT_SCANNER_LOGGING = "/var/log/bluetooth.log"
BT_SCANNER_DATAHTML = "/var/log/bluetooth_data.log"
BT_SCANNER_LOGPATH = "/bluetooht_sensed"
| UTF-8 | Python | false | false | 173 | py | 12 | settings.py | 11 | 0.751445 | 0.739884 | 0 | 4 | 42.25 | 51 |
vtheno/python-monad | 1,176,821,040,834 | 1da93c0cef33f802d0edcc0b358370fffddb1848 | e8e36611267126c6add47d8a66175ec5a30961e9 | /infix.py | 2aea38426855a4c12725ba23b3cd8d102807f023 | [
"MIT"
]
| permissive | https://github.com/vtheno/python-monad | 666a044e5bc011e44b74440b725ba7dfabe83c20 | 781407a1b3eca7fb0beb0899136f4859f5e77540 | refs/heads/master | 2021-05-26T19:56:16.386606 | 2014-01-24T15:18:43 | 2014-01-24T15:18:43 | 106,650,047 | 1 | 0 | null | true | 2017-10-12T05:47:57 | 2017-10-12T05:47:57 | 2017-10-12T05:45:52 | 2014-01-24T15:18:49 | 152 | 0 | 0 | 0 | null | null | null | """
Infix hack from:
http://code.activestate.com/recipes/384122/
"""
# pylint: disable=R0903
class Infix(object):
"""
Infix decorator. For instance:
>>> @Infix
>>> def mult(x, y):
... return x * y
>>> 2 |mult| 4
8
>>> 2 <<mult>> 4
8
"""
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return self.__or__(other)
def __rshift__(self, other):
return self.__ror__(other)
def __call__(self, left, right):
return self.function(left, right)
| UTF-8 | Python | false | false | 749 | py | 7 | infix.py | 7 | 0.543391 | 0.522029 | 0 | 36 | 19.805556 | 79 |
rimvydaszilinskas/printer | 1,906,965,484,100 | befe3f1fc2053a6cb2cfc2b279ed089ea285068e | 786a05b62e0a6bded2568194d80570e2c1f58df1 | /lib/templates.py | fa1b98e6018f3c3ad7b1a34193944e0a97a6fcec | []
| no_license | https://github.com/rimvydaszilinskas/printer | ac31d05bd4c747dd793c5e3c5182573f922e7940 | 4a443c049dfc2fdb690f3c079393a7f2aad28058 | refs/heads/master | 2022-05-24T04:22:19.652929 | 2020-04-16T11:40:19 | 2020-04-16T11:40:19 | 171,291,657 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import urllib.request
def load_image(template):
print(template)
return open("/home/pi/printer/templates/" + template)
def cleanup_templates():
for root, dirs, files in os.walk("/home/pi/printer/templates"):
for filename in files:
if filename != "default.png" and filename != "template.bmp":
os.remove("/home/pi/printer/templates/" + filename)
def download_template(url, event_id):
urllib.request.urlretrieve(url, "/home/pi/printer/templates/" + event_id + ".png")
return "/home/pi/printer/templates/" + event_id + ".png"
def template_exist(event_id):
return os.path.isfile("/home/pi/printer/templates/" + event_id + ".png") | UTF-8 | Python | false | false | 697 | py | 14 | templates.py | 9 | 0.662841 | 0.662841 | 0 | 19 | 35.736842 | 86 |
wkdewey/learnpythonthehardway | 13,812,614,850,581 | e62e78f693e95d5dd4ae8971cebc808c10cc6f90 | 2ff2cb79b2ea556514ceb9f41c55734f30f4b5fe | /ex17.py | d0cb3a3c4481920873d422c7617e7874a178acce | []
| no_license | https://github.com/wkdewey/learnpythonthehardway | 4ccc7dc02aeac09d42d36344961f539703cc008a | 284643d0ad52abfc826492c6f9a3cdbd9dcd8b50 | refs/heads/master | 2020-03-23T02:31:25.530269 | 2018-07-14T21:06:54 | 2018-07-14T21:06:54 | 140,978,281 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import argv; from os.path import exists; script, from_file, to_file = argv;print "Copying from %s to %s" % (from_file, to_file); indata = open(from_file).read(); out_file = open(to_file, 'w').write(indata)
x = len("12345")
print "%d" % x
| UTF-8 | Python | false | false | 247 | py | 27 | ex17.py | 27 | 0.663968 | 0.643725 | 0 | 3 | 81.333333 | 214 |
rdghosal/WaveNative | 16,329,465,659,863 | 551c9608375bf82c17d00c2df5bb24e15885a4bc | 4c71c8a38429a39da78509c617923f7911b07bab | /api/services/user.py | 9168764d00abc470547274c229d3a57ec5be182b | []
| no_license | https://github.com/rdghosal/WaveNative | 4b4e184070b9e992c9379df647e66a64cfb19e4b | 340fdb8e354daf87915d8e4725e510dd222cc4cc | refs/heads/master | 2023-01-12T05:49:39.878089 | 2020-02-16T02:16:31 | 2020-02-16T02:16:31 | 196,894,015 | 0 | 0 | null | false | 2023-01-07T14:39:10 | 2019-07-14T23:40:55 | 2020-02-16T02:16:37 | 2023-01-07T14:39:09 | 7,756 | 0 | 0 | 25 | JavaScript | false | false | from api import db
from api.models.user import User
from werkzeug.security import check_password_hash, generate_password_hash
def check_form_data(form):
"""Takes data used for registration
and evaluates whether info is original"""
if form["password"] != form["confirmation"] or \
db.session.query(User).filter(User.username == form["username"]).count() > 0:
return False
return True
def add_user(form):
"""Adds user to database"""
# Add new user data
db.session.add(User(
username=form["username"],
hashed=generate_password_hash(form["password"]),
age=form["age"],
country=form["country"]
))
# Commit change to db
db.session.commit()
def get_user_info(form):
"""Checks login credentials against database"""
result = db.session.query(User).filter(User.username == form["username"]).first()
if not result or not check_password_hash(result.hashed, form["password"]):
return None
return {
"id": result.id,
"username": result.username
} | UTF-8 | Python | false | false | 1,071 | py | 41 | user.py | 29 | 0.645191 | 0.644258 | 0 | 37 | 27.972973 | 85 |
mhyttsten/GeneralMLRepoPython | 11,596,411,703,049 | 52be2f8821d9fa77f71892998fb48654208274e6 | c790d516ae4fcb5a83010a08f9756313afd170a4 | /TF_Old/Ref_TFFileIO.py | 99032f755f0b4f9e56c2268ec7855c949877969c | []
| no_license | https://github.com/mhyttsten/GeneralMLRepoPython | 1ccbf0c7fbd85ec981b8d4d417ab673dcaa2e8ec | c5d3bfb115601a3f162e8b089b0a1527ac0ba139 | refs/heads/master | 2021-09-06T05:20:07.597283 | 2018-02-02T18:16:06 | 2018-02-02T18:16:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
def input_pipeline(filenames, batch_size, num_epochs=None):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
reader = tf.TextLineReader()
key, record_string = reader.read(filename_queue)
f1, f2, f3, label = tf.decode_csv(record_string, record_defaults=[["a"], ["b"], ["c"], ["d"]])
features = tf.stack([f1, f2, f3])
min_after_dequeue = 10000
capacity = min_after_dequeue + 3*batch_size
feature_batch, label_batch = tf.train.shuffle_batch(
[features, label],
num_threads=16,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
allow_smaller_final_batch=True)
return feature_batch, label_batch
get_batch = input_pipeline(
filenames=[("csv_file%d.csv" % i) for i in range(4)],
batch_size=3,
num_epochs=2)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
coord = tf.train.Coordinator()
sess = tf.Session()
writer = tf.summary.FileWriter('tb_report', sess.graph)
sess.run(init_op)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
(features, labels) = sess.run(get_batch)
for i in range(features.shape[0]):
print "f: " + str(features[i]) + ", l: " + labels[i]
print 'features: {}, labels: {}'.format(str(features.shape), str(labels.shape))
except tf.errors.OutOfRangeError, e:
print 'Done training -- epoch limit reached'
finally:
coord.request_stop()
coord.join(threads)
writer.close() | UTF-8 | Python | false | false | 1,621 | py | 45 | Ref_TFFileIO.py | 45 | 0.660703 | 0.649599 | 0 | 45 | 35.044444 | 99 |
sneha8412/back-end-inspiration-board | 18,657,337,937,933 | efdb86bfd1b5c23c64eacf5538a0d0b5921d26f2 | c59b47da153036a20be3152cd0b9498c4333f1d8 | /app/card_routes.py | e57e419c4d97e6a19b0c6f537ee634f422a0754a | []
| no_license | https://github.com/sneha8412/back-end-inspiration-board | f4f18555ec9a2914de69a93196f12c17378c2bfb | cf752d0f8d6798933bdb1a3d94a1760687994efd | refs/heads/main | 2023-06-18T03:16:18.209195 | 2021-06-29T02:59:24 | 2021-06-29T02:59:24 | 381,177,181 | 0 | 0 | null | true | 2021-06-28T22:36:06 | 2021-06-28T22:36:05 | 2021-06-23T23:37:41 | 2021-06-23T23:04:21 | 0 | 0 | 0 | 0 | null | false | false | from flask import Blueprint, request, jsonify, make_response
from app import db
from .models.board import Board
from .models.card import Card
import os
# example_bp = Blueprint('example_bp', __name__)
card_bp = Blueprint("cards", __name__, url_prefix="/cards")
@card_bp.route("", methods=["POST"], strict_slashes=False)
def create_a_card():
request_body = request.get.json()
if "message" not in request_body:
return jsonify(details="invalid data"), 400
new_card = card.from_json(request_body)
db.session.add(new_card)
db.session.commit()
return new_card.to_json_card(), 201
@card_bp.route("", methods=["DELETE"], strict_slashes=False)
def delete_a_card():
pass
#update a card
# @card_bp.route("", methods=["PUT"], strict_slashes=False)
# def like_a_card():
# pass
@card_bp.route("", methods=["GET"], strict_slashes=False)
def get_all_cards():
pass
| UTF-8 | Python | false | false | 908 | py | 5 | card_routes.py | 5 | 0.664097 | 0.657489 | 0 | 37 | 23.513514 | 60 |
OverLordGoldDragon/StackExchangeAnswers | 16,733,192,597,947 | 856b67ec64e3567bf89bb9645b342ef4a20a0009 | 88d98cc49954c8bf7f8076eedaeb2336282de4ce | /SignalProcessing/Q76636 - filters - Why is x(n) - x(n - 1) + x(n + 2) lowpass/main.py | 07941c8928d54daef0e8de186616be9172bc8f6a | [
"MIT"
]
| permissive | https://github.com/OverLordGoldDragon/StackExchangeAnswers | 1312601f1a1b5970536171600e4317791c5e4953 | 47a5fd462e506cd417c7112a9fff3300b489f0de | refs/heads/main | 2023-08-08T09:51:52.304487 | 2023-08-02T09:20:03 | 2023-08-02T09:20:03 | 387,701,666 | 13 | 3 | MIT | false | 2021-10-02T01:48:50 | 2021-07-20T06:57:55 | 2021-08-18T01:23:43 | 2021-10-02T01:48:49 | 154,626 | 2 | 0 | 1 | Python | false | false | import numpy as np
import matplotlib.pyplot as plt
def plot(x, title):
fig = plt.figure()
plt.plot(np.abs(x))
plt.scatter(np.arange(len(x)), np.abs(x), s=10)
plt.title(title, weight='bold', fontsize=18, loc='left')
return fig
def plot_T(x, Tmax):
if Tmax == 0:
title = "|H(w)|: x(n)"
elif Tmax == 1:
title = "|H(w)|: x(n) - x(n - 1)"
elif Tmax == 2:
title = "|H(w)|: x(n) - x(n - 1) + x(n - 2)"
else:
title = "|H(w)|: x(n) - x(n - 1) + x(n - 2) - ... x(n - %s)" % Tmax
fig = plot(x, title, scatter=1)
plt.ylim(-.05, 1.05)
plt.savefig(f'im{Tmax}.png', bbox_inches='tight')
plt.close(fig)
def csoid(f):
return (np.cos(2*np.pi* f * t) -
np.sin(2*np.pi* f * t) * 1j)
#%%# Direct frequency response ###############################################
N = 32
t = np.linspace(0, 1, N, 0)
for Tmax in range(N):
x = np.sum([(-1)**T * csoid(T) for T in range(Tmax + 1)], axis=0)
x /= np.abs(x).max()
plot_T(x, Tmax)
#%%# WGN example #############################################################
def plot_and_save(x, title, savepath):
fig = plot(x, title)
plt.savefig(savepath, bbox_inches='tight')
plt.close(fig)
np.random.seed(69)
x = np.random.randn(32)
xf0 = np.fft.fft(x)
x = x - np.roll(x, 1) + np.roll(x, 2)
xf1 = np.fft.fft(x)
plot_and_save(xf0, "|X(w)|: x(n)", "WGN0.png")
plot_and_save(xf1, "|X(w)|: x(n) - x(n - 1) + x(n - 2)", "WGN1.png")
| UTF-8 | Python | false | false | 1,471 | py | 54 | main.py | 48 | 0.485384 | 0.456152 | 0 | 54 | 26.240741 | 78 |
YannisYao/12306Plus | 1,812,476,234,058 | bf3d92b212563f0f8e02a4c201a62542a169f7f1 | 1212291e28543032194a500a3c508cc44c71e238 | /business/check_captcha.py | 05e6955a6e5dfce8d5f0fa0e981e352c2518c221 | []
| no_license | https://github.com/YannisYao/12306Plus | 1d72102c6ca7b362ac077a12c069af8527a4bd4c | dbec0c8260fd94d5a7392330a1c3c692a6bbd1d5 | refs/heads/master | 2021-05-11T06:13:25.597472 | 2018-01-19T07:58:07 | 2018-01-19T07:58:07 | 117,982,002 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #校验验证码
from business.damatuWeb import DamatuApi
from business.captcha_download import download_captcha
import json
from business.urlcontants import UrlContants
from business.middleproxy import MiddleProxy
from bs4 import BeautifulSoup
#坐标识别式验证码
CAPTCHA_TYPE = 310
#坐标偏移量
OFFSET_X = 0
OFFSET_Y = 30
def get_result_points(ret,result):
result_point = ''
if ret == 0:
if result is not None:
captcha_point = result.split('|')
for point in captcha_point:
if point is not None:
pointX = point.split(',')[0]
pointY = point.split(',')[1]
pointX = int(pointX) - OFFSET_X
pointY = int(pointY) - OFFSET_Y
result_point = result_point + str(pointX) + ',' + str(pointY)+','
return result_point.rstrip(',')
else:
print('打码故障! 错误代码:%s' % ret)
return None
def check_captcha_request(param,url=UrlContants.CAPTCHA_CHECK):
params={'answer':param,
'login_site':'E',
'rand':'sjrand'}
s = MiddleProxy.getSession().post(url, params=params,headers=MiddleProxy.headers_xhr)
if s.status_code == 200 and s.headers['Content-Type'] == 'application/json;charset=UTF-8':
content = str(s.content, encoding='utf-8')
jres = json.loads(content)
if jres['result_code'] == '4':
return (True,jres['result_message'])
else:
return (False,jres['result_message'])
elif s.status_code == 200 and s.headers['Content-Type'] == 'text/html':
bsObj = BeautifulSoup(s.content,'html.parser')
err_msg = bsObj.find('div', {'class': 'err_text'}).find('li', {'id': 'err_bot'}).get_text().strip()
err_msg = err_msg[:err_msg.index('!')+2]
return (False,err_msg)
def check_captcha():
#存储验证码图片
image_path = download_captcha()
if image_path is not None:
damatu = DamatuApi(MiddleProxy.getDm2User(),MiddleProxy.getDm2Pwd())
#使用打码兔获取验证码
print(damatu.getBalance())
ret,result,id = damatu.decode(image_path,310)
print('打码兔提供--->'+result)
#获取转换后的坐标
result_points = get_result_points(ret,result)
print('打码兔坐标转换--->'+result_points)
#发起验证码验证
check_result = check_captcha_request(result_points)
if check_result['result_code'] == '4':
#此处后续可以进行数据库存储,因12306验证码图片不是动态生成的,可以存储对应的答案,减少对打码平台的依赖
return True
else:
return False | UTF-8 | Python | false | false | 2,752 | py | 21 | check_captcha.py | 19 | 0.594821 | 0.58247 | 0 | 70 | 34.871429 | 107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.