repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
vaneoooo/SailYX | 3,779,571,263,563 | 0afcde46ed3d630b66fcf1d06dfe0eb23d89b85f | f2cbcdfec6a279c4bf7c5efedefb9dcc045ae81e | /sailyx/apps/weixin/tohtml.py | 76f73f74acc60af0415ff17780ffc908904c9938 | []
| no_license | https://github.com/vaneoooo/SailYX | 2723d5727cf86dcca756bd0643d5185873420b48 | 5ae23654a2ee0a6097742d0406be15edbd4e16c4 | refs/heads/master | 2020-05-17T21:48:20.663270 | 2015-06-06T10:30:01 | 2015-06-06T10:30:01 | 8,630,452 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from uliweb import settings
def menu(path):
menu_head = '<div id="sidebar-nav" class="hidden-phone"><ul id="dashboard-menu">'
menu_end = '</ul></div>'
menu_li = '<li%s <a href="%s"> <i class="%s"></i> <span>%s</span> </a> </li>'
active = ' class="active"><div class="pointer"><div class="arrow"></div><div class="arrow_border"></div></div>'
html = ''
html += menu_head
menus = settings.get_var('MENU/MenuList')
for url,style,name in menus:
if url == path:
html +=(menu_li %(active,url,style,name))
else:
html += (menu_li %('>',url,style,name))
html += menu_end
return html | UTF-8 | Python | false | false | 653 | py | 18 | tohtml.py | 4 | 0.557427 | 0.557427 | 0 | 17 | 37.470588 | 115 |
pythonfixer/PyQtPractice | 10,050,223,484,455 | 055aecd7209fd037354a7975a85945bcc2e2b64f | 93038502fe00ea31b48eef0d42c06ac6294f4864 | /04/connections.pyw | 8fffed878ffa212ff44184e8a02e902f536ed7ff | []
| no_license | https://github.com/pythonfixer/PyQtPractice | 8f4ecf95b5e298a69d44030ab2ae4f3d3e2163b0 | 9f61b4d91473cb16e95346715082655b172f3b4e | refs/heads/master | 2021-07-07T16:08:52.998043 | 2020-07-21T08:07:02 | 2020-07-21T08:07:02 | 143,581,393 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import functools
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Form(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
button1 = QPushButton("One")
button2 = QPushButton("Two")
button3 = QPushButton("Three")
button4 = QPushButton("Four")
button5 = QPushButton("Five")
self.label = QLabel("Click a button...")
layout = QHBoxLayout()
layout.addWidget(button1)
layout.addWidget(button2)
layout.addWidget(button3)
layout.addWidget(button4)
layout.addWidget(button5)
layout.addStretch()
layout.addWidget(self.label)
self.setLayout(layout)
self.connect(button1, SIGNAL("clicked()"), self.one)
self.button2callback = functools.partial(self.anyButton, "Two")
self.connect(button2, SIGNAL("clicked()"), self.button2callback)
self.button3callback = lambda who="Three": self.anyButton(who)
self.connect(button3, SIGNAL("clicked()"), self.button3callback)
self.connect(button4, SIGNAL("clicked()"), self.clicked)
self.connect(button5, SIGNAL("clicked()"), self.clicked)
self.setWindowTitle("Connections")
def one(self):
self.label.setText("You clicked button 'One'")
def anyButton(self, who):
self.label.setText("You clicked button '{}'".format(who))
def clicked(self):
button = self.sender()
if button is None or not isinstance(button, QPushButton):
return
self.label.setText("You clicked button '{}'".format(button.text()))
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_() | UTF-8 | Python | false | false | 1,691 | pyw | 30 | connections.pyw | 27 | 0.632762 | 0.620343 | 0 | 53 | 30.924528 | 75 |
vlipinska/Shared_with_Fibip | 16,862,041,625,068 | 38daeaf5b9f670549fdae184415818770063d376 | f69cc5a11ab6b3a5552220725f7a15f31bc52edb | /test_laptop_2.py | 5ab1bdff588b24aa1c14453cce81bdf5e32684ec | []
| no_license | https://github.com/vlipinska/Shared_with_Fibip | c27f57f08042773aab1ac3df840bdcd145c254f7 | 993ede48c2f3648241edc9b348dc73ce5bf10041 | refs/heads/master | 2021-03-16T10:13:23.129073 | 2017-11-09T13:35:02 | 2017-11-09T13:35:02 | 110,111,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Tu powinno byc to.
A moze tez tamto.
I'm adding stuff -V
Chrzescijanin tanczy, tanczy tanczy tanczy
| UTF-8 | Python | false | false | 103 | py | 2 | test_laptop_2.py | 2 | 0.757282 | 0.757282 | 0 | 7 | 13.714286 | 42 |
jegooCN/hlsyw | 18,279,380,816,361 | 4d0d9dcd6c70c2cde722634ec2aa6c90c975fc43 | c269860da40a9229381b2e8db8404368c3169326 | /app/wechat/model/__init__.py | ead2f47c96ecd788e746ce6725237ccbab682a75 | []
| no_license | https://github.com/jegooCN/hlsyw | 7f4facb89f5142ed52dbdad404ed24da0852d6de | 950e24a6d97977624751119e2e216950de4e5734 | refs/heads/master | 2021-06-14T11:28:41.196621 | 2019-02-14T07:48:10 | 2019-02-14T07:48:10 | 170,268,683 | 2 | 0 | null | false | 2021-03-20T00:44:13 | 2019-02-12T06:58:32 | 2019-03-21T01:53:34 | 2021-03-20T00:44:13 | 316 | 1 | 0 | 2 | Python | false | false | # coding=utf-8
"""
Created by Jegoo on 2019-02-12 13:25
"""
from app import db
class BaseModel(db.Model):
__abstract__ = True
from .exercise import *
from .grade import *
from .record import *
from .user import *
| UTF-8 | Python | false | false | 225 | py | 22 | __init__.py | 13 | 0.662222 | 0.604444 | 0 | 15 | 14 | 40 |
lornajane/nexmo-messages-handler | 14,851,996,939,197 | 9ab9f6c2a3fda1369c9c955dedaae0d83f85a09c | fd169d9123f28611aeafb246a22720019c56a249 | /worker.py | a9632c2362d4c317e9bb1be8c803fbde4c5ea48f | []
| no_license | https://github.com/lornajane/nexmo-messages-handler | 48d4e7bcc0aced757cd80aecee7b3dd9d9e7ef8f | af2ba9478e6a1799b98c8941be2aa34d0bc7f4af | refs/heads/master | 2023-03-05T10:11:48.336260 | 2020-10-28T08:39:59 | 2020-10-28T08:39:59 | 258,488,862 | 1 | 0 | null | false | 2021-02-10T02:06:26 | 2020-04-24T11:07:59 | 2020-10-28T08:40:02 | 2021-02-10T02:06:26 | 4 | 1 | 0 | 1 | Python | false | false | import os
import json
from nexmo_jwt import JWTokenGenerator
import redis
import requests
import time
from pprint import pprint
from dotenv import load_dotenv
load_dotenv()
r = redis.Redis.from_url(os.getenv("REDIS_URL"))
gen = JWTokenGenerator(os.getenv('NEXMO_APP_ID'),os.getenv('PRIVATE_KEY_PATH'))
while True:
message = r.lpop('queue:messages')
if message:
JWT = gen.generate_token()
data = json.loads(message.decode('utf8'))
if data["type"] == "whatsapp":
# use the sandbox to send whatsapp
api_url = os.getenv("SANDBOX_API_URL")
msg = {'message':
{'content':
{
'type':'text',
'text': data["message"]}},
'from': {'type': 'whatsapp', 'number': os.getenv("NEXMO_SANDBOX_NUMBER")},
'to': {'type': 'whatsapp', 'number': data["to"]}}
else:
# assume SMS, use live messages API
api_url = os.getenv("MAIN_API_URL")
msg = {'message':
{'content':
{
'type':'text',
'text': data["message"]}},
'from': {'type': 'sms', 'number': os.getenv("NEXMO_NUMBER")},
'to': {'type': 'sms', 'number': data["to"]}}
headers = {'Accept': 'application/json', 'Accept-Encoding':'identity', 'Authorization': 'Bearer ' + JWT.decode('utf8')}
response = requests.post(api_url, json=msg, headers=headers)
print(response.text)
response_data = json.loads(response.text)
if(response_data["message_uuid"]):
r.set('messages:' + response_data["message_uuid"], 'attempted')
time.sleep(0.1)
| UTF-8 | Python | false | false | 1,847 | py | 6 | worker.py | 3 | 0.501895 | 0.499729 | 0 | 54 | 33.166667 | 127 |
JeschkeLab/DeerLab | 3,040,836,859,682 | e62e34749f8768b5d791ba9eca686780bdb40884 | 1fbf0447294a34d577ef8b7b2f5a5b5bf8c44481 | /deerlab/bg_models.py | 037f61dcdcc33d00b74c85ad1a784affa78d5337 | [
"MIT"
]
| permissive | https://github.com/JeschkeLab/DeerLab | 48f0607edd56defad7abb9ee69565e108758d3f7 | f7e0340d259cb2c2f5a237ffb0d27b1a7657de25 | refs/heads/main | 2023-08-29T13:29:44.916384 | 2023-08-27T18:36:14 | 2023-08-27T18:36:14 | 276,993,835 | 19 | 13 | MIT | false | 2023-09-06T07:20:18 | 2020-07-03T21:58:20 | 2022-09-29T07:12:34 | 2023-09-06T07:20:17 | 91,981 | 13 | 10 | 7 | Python | false | false | # bg_models.py - Background parametric models
# ---------------------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2023: Luis Fabregas, Stefan Stoll and other contributors.
import numpy as np
import math as m
from numpy import pi
import inspect
from deerlab.dipolarkernel import dipolarkernel
from deerlab.utils import formatted_table
from deerlab.model import Model
from scipy.special import gamma, hyp2f1, sici
from deerlab.constants import *
#---------------------------------------------------------------------------------------
def hyp2f1_repro(a,b,c,z):
"""
Gauss Hypergeometric function 2F1 for |z|>1 and non-integer (a-b) based on its "reciprocation" form
Reference: https://functions.wolfram.com/07.23.17.0057.01
"""
return gamma(b - a)*gamma(c)/(gamma(b)*gamma(c - a)*(-z)**a)*hyp2f1(a, a - c + 1, a - b + 1, 1/z) + \
(gamma(a - b)*gamma(c))/(gamma(a)*gamma(c - b)*(-z)**b)*hyp2f1(b, b - c + 1, b - a + 1, 1/z)
#---------------------------------------------------------------------------------------
def _docstring(model,notes):
#---------------------------------------------------------------------------------------
args = model._parameter_list(order='vector')
args.insert(model._constantsInfo[0]['argidx'],model._constantsInfo[0]['argkey'])
parameters = ''
for arg in args:
if arg==model._constantsInfo[0]['argkey']:
type = 'array_like'
parameters += f'\n {arg} : {type} \n Time vector, in microseconds.'
elif len(np.atleast_1d(getattr(model,arg).idx))>1:
type = 'array_like'
parameters += f'\n {arg} : {type} \n {str(getattr(model,arg).description):s}'
else:
type = 'scalar'
parameters += f'\n {arg} : {type} \n {str(getattr(model,arg).description):s}'
string = inspect.cleandoc(f"""
{model.description}
Parameters
----------
{parameters}
Returns
-------
B : ndarray
Dipolar background vector.
Notes
-----
**Parameter Table**
""")
string += '\n'
string += '\n'
table = []
table.append(['Name','Lower','Upper','Type','Frozen','Unit','Description'])
for n, paramname in enumerate(model._parameter_list(order='vector')):
param_str = f'``{paramname}``'
lb_str = f'{np.atleast_1d(getattr(model,paramname).lb)[0]:5.3g}'
ub_str = f'{np.atleast_1d(getattr(model,paramname).ub)[0]:5.3g}'
linear_str = "linear" if np.all(getattr(model,paramname).linear) else "nonlin"
frozen_str = "Yes" if np.all(getattr(model,paramname).frozen) else "No"
unit_str = str(getattr(model,paramname).unit)
desc_str = str(getattr(model,paramname).description)
table.append([param_str,lb_str,ub_str,linear_str,frozen_str,unit_str,desc_str])
string += formatted_table(table)
string += f'\n{notes}'
return string
#---------------------------------------------------------------------------------------
#=======================================================================================
# bg_hom3d
#=======================================================================================
notes = r"""
**Model**
This model describes the inter-molecular interaction of one observer spin with a 3D
homogenous distribution of spins of concentration `c_s`
.. image:: ../images/model_scheme_bg_hom3d.png
:width: 350px
The expression for this model is
.. math::
B(t) = \mathrm{exp}\left(-\frac{8\pi^2}{9\sqrt{3}}\lambda c_s D |t|\right)`
where `c_s` is the spin concentration (entered in spins/m\ :sup:`3` into this expression) and D is the dipolar constant
.. math::
D = \frac{\mu_0}{4\pi}\frac{(g_\mathrm{e}\mu_\mathrm{B})^2}{\hbar}
"""
def _hom3d(t,conc,lam):
# Unit conversion
conc = conc*1e-6*1e3*Nav # umol/L -> mol/L -> mol/m^3 -> spins/m^3
# Compute background function
κ = 8*pi**2/9/m.sqrt(3)
B = np.exp(-κ*lam*conc*D*np.abs(t*1e-6))
return B
# Create model
bg_hom3d = Model(_hom3d,constants='t')
bg_hom3d.description = 'Background from a homogeneous distribution of spins in a 3D medium'
# Add parameters
bg_hom3d.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
bg_hom3d.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_hom3d.__doc__ = _docstring(bg_hom3d,notes)
#=======================================================================================
# bg_hom3d_phase
#=======================================================================================
notes = r"""
**Model**
This model describes the phase shift due to inter-molecular interactions between one observer spin with a 3D homogenous distribution of spins of concentration `c_s`
The expression for this model is
.. math::
B(t) = \mathrm{exp}\left(\mathrm{i}\frac{8\pi}{9\sqrt{3}}(\sqrt{3} + \mathrm{ln}(2-\sqrt{3}))\lambda c_s D t\right)
where `c_s` is the spin concentration (entered in spins/m\ :sup:`3` into this expression) and D is the dipolar constant
.. math::
D = \frac{\mu_0}{4\pi}\frac{(g_\mathrm{e}\mu_\mathrm{B})^2}{\hbar}
"""
def _hom3dphase(t,conc,lam):
# Unit conversion
conc = conc*1e-6*1e3*Nav # umol/L -> mol/L -> mol/m^3 -> spins/m^3
# Compute background function
ξ = 8*pi/9/np.sqrt(3)*(np.sqrt(3)+np.log(2-np.sqrt(3)))*D
B = np.exp(1j*ξ*lam*conc*(t*1e-6))
return B
# Create model
bg_hom3d_phase = Model(_hom3dphase,constants='t')
bg_hom3d_phase.description = 'Phase shift from a homogeneous distribution of spins in a 3D medium'
# Add parameters
bg_hom3d_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
bg_hom3d_phase.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_hom3d_phase.__doc__ = _docstring(bg_hom3d_phase,notes)
#=======================================================================================
# bg_hom3dex
#=======================================================================================
notes = r"""
**Model**
.. image:: ../images/model_scheme_bg_hom3dex.png
:width: 350px
This implements a hard-shell excluded-volume model, with spin concentration `c_s` (in μM) and the radius of the spherical excluded volume `R_\mathrm{ex}` (in nm).
The expression for this model is
.. math:: B(t) = \exp \Bigg(- c_\mathrm{s}\lambda_k \bigg( V_\mathrm{ex} K_0(t, R_\mathrm{ex}) + \mathcal{I}_\mathrm{S}(t) \bigg)
where `\mathcal{I}_\mathrm{S}(t)` is an integral without analytical form given by
.. math:: \mathcal{I}_\mathrm{S}(t) = \frac{4\pi}{3} D\,t \int_0^1 \mathrm{d}z~(1 - 3z^2) ~ \mathrm{S_i}\left( \frac{D\,t (1 - 3z^2)}{R_\mathrm{ex}^3 } \right)
where `\mathrm{S_i}` is the sine integral function and `D` is the dipolar constant
.. math:: D = \frac{\mu_0}{4\pi}\frac{(g_\mathrm{e}\mu_\mathrm{B})^2}{\hbar}
"""
def _hom3dex(t,conc,rex,lam):
# Conversion: µmol/L -> mol/L -> mol/m^3 -> spins/m^3
conc = conc*1e-6*1e3*Nav
# Excluded volume
Vex = 4*np.pi/3*(rex*1e-9)**3
# Averaging integral
z = np.linspace(0,1,1000)[np.newaxis,:]
Dt = D*t[:,np.newaxis]*1e-6
Is = 4*np.pi/3*np.trapz(Dt*(1-3*z**2)*sici((Dt*(1-3*z**2))/((rex*1e-9)**3))[0],z,axis=1)
# Background function
C_k = -Vex + Is + np.squeeze(Vex*(dipolarkernel(t,rex,integralop=False)))
B = np.exp(-lam*conc*C_k)
return B
# Create model
bg_hom3dex = Model(_hom3dex,constants='t')
bg_hom3dex.description = 'Background from a homogeneous distribution of spins with excluded volume'
# Add parameters
bg_hom3dex.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
bg_hom3dex.rex.set(description='Exclusion radius', lb=0.01, ub=20, par0=1, unit='nm')
bg_hom3dex.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_hom3dex.__doc__ = _docstring(bg_hom3dex,notes)
#=======================================================================================
# bg_hom3dex_phase
#=======================================================================================
notes = r"""
**Model**
.. image:: ../images/model_scheme_bg_hom3dex.png
:width: 350px
This implements the phase-shift arising from a hard-shell excluded-volume model, with spin concentration `c_s` (in μM) and the radius of the spherical excluded volume `R_\mathrm{ex}` (in nm).
The expression for this model is
.. math:: B(t) = \exp \Bigg(- i c_\mathrm{s}\lambda_k \bigg( V_\mathrm{ex} \mathrm{Im}\{\mathcal{K}_0(t, R_\mathrm{ex})\} + \mathcal{I}_\mathrm{C}(t) \bigg)
where `\mathcal{I}_\mathrm{C}(t)` is an integral without analytical form given by
.. math:: \mathcal{I}_\mathrm{C}(t) = \frac{4\pi}{3} D\,t \int_0^1 \mathrm{d}z~(1 - 3z^2) ~ \mathrm{C_i}\left( \frac{D\,t (1 - 3z^2)}{R_\mathrm{ex}^3 } \right)
where `\mathrm{C_i}` is the cosine integral function and `D` is the dipolar constant
.. math:: D = \frac{\mu_0}{4\pi}\frac{(g_\mathrm{e}\mu_\mathrm{B})^2}{\hbar}
"""
def _hom3dex_phase(t,conc,rex,lam):
# Conversion: µmol/L -> mol/L -> mol/m^3 -> spins/m^3
conc = conc*1e-6*1e3*Nav
# Excluded volume
Vex = 4*np.pi/3*(rex*1e-9)**3
# Averaging integral
ξ = 8*pi**2/9/np.sqrt(3)*(np.sqrt(3)+np.log(2-np.sqrt(3)))/np.pi*D
z = np.linspace(0,1,1000)[np.newaxis,:]
Dt = D*t[:,np.newaxis]*1e-6
Ic = -ξ*(t*1e-6) + 4*np.pi/3*np.trapz(Dt*(1-3*z**2)*sici((Dt*np.abs(1-3*z**2))/((rex*1e-9)**3))[1],z,axis=1)
# Background function
C_k = - Ic - np.squeeze(Vex*(dipolarkernel(t,rex,integralop=False,complex=True)).imag)
B = np.exp(1j*lam*conc*C_k)
return B
# Create model
bg_hom3dex_phase = Model(_hom3dex_phase,constants='t')
bg_hom3dex_phase.description = 'Phase shift from a homogeneous distribution of spins with excluded volume'
# Add parameters
bg_hom3dex_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
bg_hom3dex_phase.rex.set(description='Exclusion radius', lb=0.01, ub=20, par0=1, unit='nm')
bg_hom3dex_phase.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_hom3dex_phase.__doc__ = _docstring(bg_hom3dex_phase,notes)
#=======================================================================================
# bg_homfractal
#=======================================================================================
notes = r"""
**Model**
This implements the background due to a homogeneous distribution of spins in a `fdim`-dimensional
space, with the `fdim`-dimensional spin concentration ``fconc``.
"""
def _homfractal(t,fconc,fdim,lam):
# Fractal dimension (not defined for d=[0, 1.5, 3, 4.5, 6])
d = float(fdim)
# Unit conversion of concentration
conc = fconc*1e-6*(np.power(10,d))*Nav # µmol/dm^d -> mol/m^d -> spins/m^d
# Compute prefactor
if d==3:
κd = 8*np.pi**2/9/np.sqrt(3) # d->3 limit of general expression
elif d==1.5:
κd = 8.71135 # d->1.5 limit of general expression
elif d==4.5:
κd = 5.35506 # d->4.5 limit of general expression
else:
κd = 2/9*(-1)**(-d/3+1)*pi*np.cos(d*pi/6)*gamma(-d/3)*(
(-1 + (-1)**(d/3))*np.sqrt(3*np.pi)*gamma(1+d/3)/gamma(3/2+d/3)
+ 6*hyp2f1_repro(1/2, -d/3, 3/2, 3)
)
κd = κd.real # Imaginary part is always negligible
# Compute background function
B = np.exp(-κd*lam*conc*abs(D*t*1e-6)**(d/3))
return B
# ======================================================================
# Create model
bg_homfractal = Model(_homfractal, constants='t')
bg_homfractal.description = 'Background from homogeneous spin distribution in a space of fractal dimension'
# Add parameters
bg_homfractal.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ')
bg_homfractal.fdim.set(description='Fractal dimensionality', lb=0.01, ub=5.99, par0=2.2, unit='')
bg_homfractal.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_homfractal.__doc__ = _docstring(bg_homfractal, notes)
#=======================================================================================
# bg_homfractal_phase
#=======================================================================================
notes = r"""
**Model**
This implements the phase shift due to a homogeneous distribution of spins in a `d`-dimensional space, with `d`-dimensional spin concentration ``c_d``.
"""
def _homfractal_phase(t,fconc,fdim,lam):
# Fractal dimension (not defined for d=[0, 1.5, 3, 4.5, 6])
d = float(fdim)
# Unit conversion of concentration
fconc = fconc*1e-6*(np.power(10,d))*Nav # umol/dm^d -> mol/m^d -> spins/m^d
# Compute constant
if d==3:
ξd = 0.33462*D**(d/3) # Limit of d->3 of equation below
elif d==1.5:
ξd = 1j*np.inf # Limit of d->1.5 of equation below
elif d==4.5:
ξd = 1j*np.inf # Limit of d->4.5 of equation below
else:
ξd = 2*D**(d/3)*pi**(3/2)/9/gamma(3/2 + d/3) * (
np.sqrt(3)*pi*np.cos(d*pi/6)/np.cos(d*pi/3)
- 2**(2+2*d/3)*3**(1 + d/3)*gamma(-1-2*d/3)*np.sin(d*pi/6)*gamma(3/2+d/3)*hyp2f1((-3-2*d)/6, -d/3, (3-2*d)/6, 1/3)/gamma((3-2*d)/6)
)
# Compute background function
B = np.exp(1j*ξd*fconc*lam*np.sign(t)*abs(t*1e-6)**(d/3))
return B
# ======================================================================
# Create model
bg_homfractal_phase = Model(_homfractal_phase,constants='t')
bg_homfractal_phase.description = 'Phase shift from a homogeneous distribution of spins in a fractal medium'
# Add parameters
bg_homfractal_phase.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ')
bg_homfractal_phase.fdim.set(description='Fractal dimensionality', lb=0.01, ub=5.99, par0=2.2, unit='')
bg_homfractal_phase.lam.set(description='Pathway amplitude', lb=0, ub=1, par0=1, unit='')
# Add documentation
bg_homfractal_phase.__doc__ = _docstring(bg_homfractal_phase,notes)
#=======================================================================================
# bg_exp
#=======================================================================================
notes= r"""
**Model**
.. math::
B(t) = \exp\left(-\kappa \vert t \vert\right)
Although the ``bg_exp`` model has the same functional form as ``bg_hom3d``, it is distinct since its
parameter is a decay rate constant and not a spin concentration like for ``bg_hom3d``.
"""
def _exp(t,decay):
return np.exp(-decay*np.abs(t))
# Create model
bg_exp = Model(_exp,constants='t')
bg_exp.description = 'Exponential background model'
# Add parameters
bg_exp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.35, unit='μs⁻¹')
# Add documentation
bg_exp.__doc__ = _docstring(bg_exp,notes)
#=======================================================================================
# bg_strexp
#=======================================================================================
notes = r"""
**Model**
.. math::
B(t) = \exp\left(-\kappa \vert t\vert^{d}\right)
Although the ``bg_strexp`` model has the same functional form as ``bg_homfractal``, it is distinct since its
first parameter is a decay rate constant and not a spin concentration like for ``bg_homfractal``.
"""
def _strexp(t,decay,stretch):
return np.exp(-decay*abs(t)**stretch)
# Create model
bg_strexp = Model(_strexp,constants='t')
bg_strexp.description = 'Stretched exponential background model'
# Add parameters
bg_strexp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
bg_strexp.stretch.set(description='Stretch factor', lb=0, ub=6, par0=1, unit='')
# Add documentation
bg_strexp.__doc__ = _docstring(bg_strexp,notes)
#=======================================================================================
# bg_prodstrexp
#=======================================================================================
notes = r"""
**Model**
:math:`B(t) = \exp\left(-\kappa_1 \vert t \vert^{d_1}\right) \exp\left(-\kappa_2 \vert t\vert^{d_2}\right)`
"""
def _prodstrexp(t,decay1,stretch1,decay2,stretch2):
strexp1 = np.exp(-decay1*abs(t)**stretch1)
strexp2 = np.exp(-decay2*abs(t)**stretch2)
return strexp1*strexp2
# Create model
bg_prodstrexp = Model(_prodstrexp,constants='t')
bg_prodstrexp.description = 'Product of two stretched exponentials background model'
# Add parameters
bg_prodstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
bg_prodstrexp.decay2.set(description='Decay rate of 2nd component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
bg_prodstrexp.stretch1.set(description='Stretch factor of 1st component', lb=0, ub=6, par0=1, unit='')
bg_prodstrexp.stretch2.set(description='Stretch factor of 2nd component', lb=0, ub=6, par0=1, unit='')
# Add documentation
bg_prodstrexp.__doc__ = _docstring(bg_prodstrexp,notes)
#=======================================================================================
# bg_sumstrexp
#=======================================================================================
notes = r"""
**Model**
:math:`B(t) = A_1\exp \left(-\kappa_1 \vert t \vert^{d_1}\right) + (1-A_1)\exp\left(-\kappa_2 \vert t \vert^{d_2}\right)`
"""
def _sumstrexp(t,decay1,stretch1,weight1,decay2,stretch2):
strexp1 = np.exp(-decay1*abs(t)**stretch1)
strexp2 = np.exp(-decay2*abs(t)**stretch2)
return weight1*strexp1 + (1-weight1)*strexp2
# Create model
bg_sumstrexp = Model(_sumstrexp,constants='t')
bg_sumstrexp.description = 'Sum of two stretched exponentials background model'
# Add parameters
bg_sumstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
bg_sumstrexp.decay2.set(description='Decay rate of 2nd component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
bg_sumstrexp.weight1.set(description='Weight of the 1st component', lb=0, ub=1, par0=0.5, unit='')
bg_sumstrexp.stretch1.set(description='Stretch factor of 1st component', lb=0, ub=6, par0=1, unit='')
bg_sumstrexp.stretch2.set(description='Stretch factor of 2nd component', lb=0, ub=6, par0=1, unit='')
# Add documentation
bg_sumstrexp.__doc__ = _docstring(bg_sumstrexp,notes)
#=======================================================================================
# bg_poly1
#=======================================================================================
notes = r"""
**Model**
:math:`B(t) = p_0 + p_1 t`
"""
def _poly1(t,p0,p1):
return np.polyval([p1,p0],abs(t))
# Create model
bg_poly1 = Model(_poly1,constants='t')
bg_poly1.description = 'Polynomial 1st-order background model'
# Add parameters
bg_poly1.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
bg_poly1.p1.set(description='1st order weight', lb=-200, ub=200, par0=-1, unit='μs⁻¹')
# Add documentation
bg_poly1.__doc__ = _docstring(bg_poly1,notes)
#=======================================================================================
# bg_poly2
#=======================================================================================
notes = r"""
**Model**
:math:`B(t) = p_0 + p_1 t + p_2 t^2`
"""
def _poly2(t,p0,p1,p2):
return np.polyval([p2,p1,p0],abs(t))
# Create model
bg_poly2 = Model(_poly2,constants='t')
bg_poly2.description = 'Polynomial 2nd-order background model'
# Add parameters
bg_poly2.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
bg_poly2.p1.set(description='1st order weight', lb=-200, ub=200, par0=-1, unit=r'μs\ :sup:`-1`')
bg_poly2.p2.set(description='2nd order weight', lb=-200, ub=200, par0=-1, unit=r'μs\ :sup:`-2`')
# Add documentation
bg_poly2.__doc__ = _docstring(bg_poly2,notes)
#=======================================================================================
# bg_poly2
#=======================================================================================
notes = r"""
**Model**
:math:`B(t) = p_0 + p_1 t + p_2 t^2 + p_3 t^3`
"""
def _poly3(t,p0,p1,p2,p3):
return np.polyval([p3,p2,p1,p0],abs(t))
# Create model
bg_poly3 = Model(_poly3,constants='t')
bg_poly3.description = 'Polynomial 3rd-order background model'
# Add parameters
bg_poly3.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
bg_poly3.p1.set(description='1st order weight', lb=-200, ub=200, par0=-1, unit=r'μs\ :sup:`-1`')
bg_poly3.p2.set(description='2nd order weight', lb=-200, ub=200, par0=-1, unit=r'μs\ :sup:`-2`')
bg_poly3.p3.set(description='3rd order weight', lb=-200, ub=200, par0=-1, unit=r'μs\ :sup:`-3`')
# Add documentation
bg_poly3.__doc__ = _docstring(bg_poly3,notes)
| UTF-8 | Python | false | false | 21,310 | py | 146 | bg_models.py | 88 | 0.547213 | 0.509791 | 0 | 515 | 40.250485 | 191 |
Pang17/CISC367-Homework7 | 7,086,696,079,537 | 66558397f2c17da2e24dd5821089f19aef18c6f4 | a7d9e0c9b2909659eccc8acec26bcdf650d0b552 | /driver.py | 8f8164aa9c947cfafbe2ae08a8693e324645147a | []
| no_license | https://github.com/Pang17/CISC367-Homework7 | bdc6e79e43cd624d1647ab43e61755e1cbeaf50f | 134245773c95e7e7be1928842cdbdad1b3b669b3 | refs/heads/main | 2023-08-12T20:31:30.841821 | 2021-09-28T16:53:42 | 2021-09-28T16:53:42 | 411,367,052 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
from analysis import thread_characteristic
header = ['Conversation ID', 'Characteristic Value']
f = open('results.csv', 'w')
writer = csv.writer(f)
writer.writerow(header)
for row in thread_characteristic:
writer.writerow(row)
f.close | UTF-8 | Python | false | false | 253 | py | 5 | driver.py | 3 | 0.747036 | 0.747036 | 0 | 13 | 18.538462 | 52 |
routerhan/thesis-ner-co-tri-training | 8,211,977,506,188 | 7454486d6e0e3f952943be297c30aa40ebf62231 | d0f83f60fd08f571f7396e75c402b2f7ee45ebe1 | /run_cotrain.py | 409e59ada686a2535fe2f9edabe9d0bf5a14e03e | []
| no_license | https://github.com/routerhan/thesis-ner-co-tri-training | 3ceca16465cb4cdab5f17c3a30673533933cc18b | bbc80447b25743b4f5891fe6c065ff9c58defea4 | refs/heads/dev | 2021-01-06T16:20:38.055391 | 2020-09-15T09:34:12 | 2020-09-15T09:34:12 | 241,394,467 | 0 | 1 | null | false | 2020-09-15T09:34:14 | 2020-02-18T15:19:10 | 2020-08-22T20:00:49 | 2020-09-15T09:34:13 | 9,224 | 0 | 1 | 0 | Jupyter Notebook | false | false | import os
import logging
import argparse
from co_training import CoTraining
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def main():
# python run_cotrain.py --ext_output_dir ext_data --modelA_dir baseline_model --modelB_dir onto_model --de_unlabel_dir machine_translation/2017_de_sents.txt --en_unlabel_dir machine_translation/2017_en_sents.txt --k 10 --u 10 --top_n 3 --save_preds --save_agree
# python run_cotrain.py --ext_output_dir ext_data_1000 --modelA_dir baseline_model --modelB_dir onto_model --de_unlabel_dir machine_translation/2017_de_sents.txt --en_unlabel_dir machine_translation/2017_en_sents.txt --k 1000 --u 100 --top_n 10 --save_preds --save_agree
#python run_ner.py --data_dir data/full-isw-release.tsv --bert_model bert-base-german-cased --output_dir baseline_model/ --max_seq_length 128 --do_train --extend_L --ext_data_dir ext_data_1000 --ext_output_dir ext_isw_model
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--ext_output_dir",
default='ext_data/',
type=str,
required=True,
help="The dir that you save the extended L set.")
parser.add_argument("--modelA_dir",
default='baseline_model/',
type=str,
required=True,
help="The dir of pre-trained model that will be used in the cotraining algorithm on the X1 feature set, e.g. German.")
parser.add_argument("--modelB_dir",
default='onto_model/',
type=str,
required=True,
help="The dir of another pre-trained model can be specified to be used on the X2 feature set, e.g. English.")
parser.add_argument("--de_unlabel_dir",
default='machine_translation/2017_de_sents.txt',
type=str,
required=True,
help="The dir of unlabeled sentences in German.")
parser.add_argument("--en_unlabel_dir",
default='machine_translation/2017_en_sents.txt',
type=str,
required=True,
help="The dir of unlabeled sentences in English.")
parser.add_argument("--save_preds",
action='store_true',
help="Whether to save the confident predictions.")
parser.add_argument("--save_agree",
action='store_true',
help="Whether to save the agree predictions, aka. the predictions that will be added to L set.")
parser.add_argument("--top_n",
default=5,
type=int,
help="The number of the most confident examples that will be 'labeled' by each classifier during each iteration")
parser.add_argument("--k",
default=30,
type=int,
help="The number of iterations. The default is 30")
parser.add_argument("--u",
default=75,
type=int,
help="The size of the pool of unlabeled samples from which the classifier can choose. Default - 75")
args = parser.parse_args()
# Initialize co-training class
if os.path.exists(args.ext_output_dir) and os.listdir(args.ext_output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.ext_output_dir))
if not os.path.exists(args.ext_output_dir):
os.makedirs(args.ext_output_dir)
co_train = CoTraining(modelA_dir=args.modelA_dir, modelB_dir=args.modelB_dir, save_preds=args.save_preds, top_n=args.top_n, k=args.k, u=args.u)
compare_agree_list = co_train.fit(ext_output_dir=args.ext_output_dir, de_unlabel_dir=args.de_unlabel_dir, en_unlabel_dir=args.en_unlabel_dir, save_agree=args.save_agree, save_preds=args.save_preds)
logger.info(" ***** Running Co-Training ***** ")
logger.info(" Model A = {}".format(args.modelA_dir))
logger.info(" Model B = {}".format(args.modelB_dir))
logger.info("Top_n: {}, iteration_k: {}, sample_pool_u: {}".format(args.top_n, args.k, args.u))
logger.info(" ***** Loading Agree Set ***** ")
logger.info(" Num of agree samples: {}".format(len(compare_agree_list)))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,659 | py | 17 | run_cotrain.py | 13 | 0.577592 | 0.564713 | 0 | 82 | 55.817073 | 274 |
Di0niz/praktikum_contest | 16,956,530,905,129 | 262be5cb5ece471281053de460f46c5fbdd224da | 915979cc3e51d49bb480e6cbcc591ba67f231233 | /18337_12_2_Базовые_структуры_данных/case_k.py | 9f147785f88dd7848114c5855a8c4493830a5c0b | []
| no_license | https://github.com/Di0niz/praktikum_contest | 65d7829a3376f633161f5f2b9c01c63816b226a1 | 5dcfc36fa8080e74ff0c12078a73dbc05684df6c | refs/heads/master | 2022-12-02T21:20:21.303433 | 2020-07-30T20:48:44 | 2020-07-30T20:48:44 | 277,244,061 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class UniqStack:
def __init__(self):
self.stack = []
self.uniq = set()
self.count = 0
def push(self, val):
if not (val in self.uniq):
self.uniq.add(val)
self.stack.append(val)
self.count += 1
def peek(self):
if self.count > 0:
return self.stack[-1]
return None
def pop(self):
v = None
if self.count > 0:
v = self.stack.pop()
self.uniq.remove(v)
self.count -= 1
return v
def size(self):
return self.count
def main():
n = int(input())
st = UniqStack()
for _ in range(n):
command = input()
cc = command[:2]
if cc == "pu": # push
_, val = command.split(' ')
st.push(val)
elif cc == "po": # pop
res = st.pop()
if not res:
print("error")
elif cc == "pe": # peek
res = st.peek()
if res:
print(res)
else:
print("error")
elif cc == "si": # size
print(st.size())
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,196 | py | 40 | case_k.py | 37 | 0.411371 | 0.405518 | 0 | 55 | 20.745455 | 39 |
Saikat2019/Python3 | 14,439,680,095,561 | a73181e0653f8f7f3ab181209c7adc0f9a13917c | 9a62d8436ebdf16e8f00453fc45bb9274f9119ef | /learning/files/writingToFile.py | b3c4eb60f6afb45b194e8194928e7594fe33f8b6 | []
| no_license | https://github.com/Saikat2019/Python3 | 895b3398eae93709e976e040d2bca19cefc62c77 | 2125765385e38a960e4da95c29a9656e6dc0ab41 | refs/heads/master | 2020-03-23T15:24:56.739681 | 2018-09-02T05:11:44 | 2018-09-02T05:11:44 | 141,746,663 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | myfile=open("employee.txt","a")
myfile.write("\narnab")
myfile.close()
| UTF-8 | Python | false | false | 71 | py | 32 | writingToFile.py | 20 | 0.704225 | 0.704225 | 0 | 3 | 22.666667 | 31 |
brunotakazono/sistema | 3,401,614,109,721 | ae97f11451b07010710c20c51dde6218d3aa4f43 | 0241ffc756d8848fe9a38c174fcca324cd09f480 | /source/produtoscad.py | 7277492577cb8bedcebffa70a0d91358e2b34810 | []
| no_license | https://github.com/brunotakazono/sistema | 07efde2488cf67689d956b3f82393a69db48af7d | f5ebb60cd96962f3798f28268456e7256ea410f7 | refs/heads/main | 2023-04-01T22:12:27.843743 | 2021-04-10T19:29:29 | 2021-04-10T19:29:29 | 344,581,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
from time import sleep
conn = sqlite3.connect('sistemas.db')
cursor = conn.cursor()
produto = input('produto:')
categoria = input('categoria:')
marca = input('marca:')
estoqueMinimo = input('estoqueMinimo:')
estoqueMaximo = input('estoqueMaximo:')
qtdeProduto = input('qtdeProduto:')
valorCompra = input('valorCompra:')
valorUnitario = input('valorUnitario:')
valorAtacado = input('valorAtacado:')
qtdeAtacado = input('qtdeAtacado:')
obsProduto = input('obsProduto:')
for produto in 'produto':
cursor.execute("SELECT count(*) FROM produto WHERE produto = ?", (produto,))
data = cursor.fetchone()[0]
if data == 0:
cursor.execute('''
INSERT INTO produto (produto, categoria, marca, estoque_minimo, estoque_maximo, qtde,
valor_compra, valor_unitario, valor_atacado, qtde_atacado, obs) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''',
(produto, categoria, marca, estoqueMinimo, estoqueMaximo, qtdeProduto, valorCompra,
valorUnitario, valorAtacado, qtdeAtacado, obsProduto))
conn.commit()
print('Dados inseridos com sucesso.')
conn.close()
else:
print("Error: Produto ja Cadastrado.\n")
sleep(5)
| UTF-8 | Python | false | false | 1,221 | py | 12 | produtoscad.py | 11 | 0.655201 | 0.651106 | 0 | 34 | 33.911765 | 108 |
eblade/radiant | 4,329,327,048,541 | 8374f41365518898835ff80056ea467a382d6a9c | b7dfea3f8c2580454940ee3fb736001ce688acac | /radiant/grid/backend/definition.py | f022eefa853223c4b3851158819ad090cb49ca57 | [
"MIT"
]
| permissive | https://github.com/eblade/radiant | 068e63eebc0341ff592ff4f79c03626d4347f775 | a222c456a37e39ec2ce93fb08a19991cb8b9aba8 | refs/heads/master | 2021-01-20T10:46:19.140878 | 2015-04-07T21:23:58 | 2015-04-07T21:23:58 | 32,219,796 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
The *Definition* module contains object wrappers of the various
*data-types* found in Radiant Grids. Those are:
- :class:`ViewDefinition`
- :class:`ItemDefinition`
- :class:`DocumentDefinition`
- :class:`ColumnDefinition`
- :class:`VariableDefinition`
- :class:`DataDefinition`
- :class:`Feed`
.. note::
Each entry in "Variables" list for each class has a name
[*emphasized within brackets*]. This is the key in the *dict*-
representation of it's data, which would be used when
serializing to JSON.
"""
class ViewDefinition(object):
"""
Defines a View within a Workspace. A View describes a graphical representation
of the data in the Workspace using *items*. Items have various properties
depending on what type of item they are. See :class:`ItemDefinition`.
:cvar str name: [*name*] Workspace-unique name of this View
:cvar list items: [*items*] List of :class:`ItemDefinition`
"""
data_type = "grid/view/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.name = None
self.items = {}
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"name": self.name,
"items": [item.to_dict() for item in self.items.values()],
}
def from_dict(self, d):
assert d.get("data-type") == self.data_type
self.name = d["name"]
items = [ItemDefinition(item) for item in d.get("items", [])]
self.items = {item.name: item for item in items}
class ItemDefinition(object):
"""
Defines a graphical *Item* in a *View* (:class:`ViewDefinition`). The ``item_type`` can be one of:
- ``Label`` - A label with an optional title
- ``Table`` - A table bound to a document
:cvar str item_type: [*item-type*] The type of item this is
:cvar str name: [*name*] View-unique name of this Item
:cvar dict properties: [*properties*] Various properties for this item
:cvar list position: [*position*] Two or four values describing the position, either as
(x, y) or (x1, y1, x2, y2) depending on item type
"""
data_type = "grid/item/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.item_type = None
self.name = None
self.properties = {}
self.position = None
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"item-type": self.item_type,
"name": self.name,
"properties": self.properties,
"position": self.position,
}
def from_dict(self, d):
assert d.get("data-type") == self.data_type
self.item_type = d['item-type']
self.name = d['name']
self.properties = d['properties']
self.position = d['position']
class DocumentDefinition(object):
"""
Defines a Document within a Workspace. A Document describes a
table in a database that can hold data. The columns are defined
by :class:`ColumnDefinition` objects.
:cvar str name: [*name*] Workspace-unique name of this View
:cvar dict columns: [*columns*] List of :class:`ColumnDefinition`
"""
data_type = "grid/document/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.name = None
self.columns = {}
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"name": self.name,
"columns": [column.to_dict() for column in self.columns.values()]
}
def from_dict(self, d):
assert d.get("data-type") == self.data_type
self.name = d["name"]
columns = [ColumnDefinition(column) for column in d.get("columns", [])]
self.columns = {column.name: column for column in columns}
class ColumnDefinition(object):
"""
Defines a Column within a Document. The properties of the
ColumnDefinition are meant to describe a database column.
:cvar str name: [*name*] Document-unique name of this Column
:cvar str type_name: [*type-name*] Database type (``INTEGER``, ``TEXT``, ``REAL``, ``BLOB``)
:cvar int type_size: [*type-size*] Database type size (if applicable, else ``None``)
:cvar bool primary_key: [*primary-key*] Column is the *primary key* (default ``False``)
:cvar str default: [*default*] Use this default value (default ``None``)
:cvar bool auto_increment: [*auto-increment*] Column is the *auto incrementing* (default ``False``)
:cvar bool unique: [*unique*] Column has a *unique constraint* (default ``False``)
"""
data_type = "grid/column/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.name = None
self.type_name = None
self.type_size = None
self.primary_key = False
self.default = None
self.auto_increment = False
self.unique = False
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"name": self.name,
"type-name": self.type_name,
"type-size": self.type_size,
"primary-key": self.primary_key,
"default": self.default,
"auto-increment": self.auto_increment,
"unique": self.unique
}
def from_dict(self, d):
assert d.get('data-type') == self.data_type
self.name = d['name']
self.type_name = d['type-name']
self.type_size = d.get('type-size')
self.primary_key = d.get('primary-key', False)
self.default = d.get('default')
self.auto_increment = d.get('auto_increment', False)
self.unique = d.get('unique', False)
class VariableDefinition(object):
"""
Defines a Variable within a Workspace. Variables typically
have dot-separated names.
:cvar str name: [*name*] Workspace-unique name of this Variable
:cvar str type: [*type*] Type of the variable (:class:`str` (default), :class:`int`,
:class:`float`, :class:`bool`)
:cvar str value: [*value*] The value of the Variable
"""
data_type = "grid/variable/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.name = None
self.type = 'str'
self.value = None
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"name": self.name,
"type": self.type,
"value": self.value
}
def from_dict(self, d):
assert d.get('data-type') == self.data_type
self.name = d['name']
self.type = d.get('type', 'str')
self.value = d.get('value')
class DataDefinition(object):
"""
Defines a Row of Data within a Document.
:cvar dict data: [*data*] A dict with the values of the columns
"""
data_type = "grid/data/entry"
""" [*data-type*] """
def __init__(self, d=None):
"""
Constructor.
:param dict d: Optional dictionary to import
"""
self.data = None
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"data": self.data
}
def from_dict(self, d):
self.data = dict(d)
class Feed(object):
"""
Defines a Feed of any given entry data type.
:cvar class entry_class: The class used for the entries in the feed
:cvar str data_type: [*data-type*] The feed *data-type* to use
:cvar str workspace: [*workspace*] The workspace of this Feed's origin
:cvar int start: [*start*] Paging start index
:cvar int count: [*start*] Paging total count
:cvar int page_size: [*page-size*] Paging page size
:cvar list entries: [*entries*] Entry of type ``entry_class``
"""
def __init__(self, entry_class, d=None, data_type=None):
"""
Constructor.
:param class entry_class: The class used for the entries of this feed
:param dict d: Optional dictionary to import
:param str data_type: Optional *data-type* to use, overriding ``entry_class.data_type``
"""
self.entry_class = entry_class
self.data_type = data_type or entry_class.data_type.replace('/entry', '/feed')
self.workspace = None
self.start = 0
self.count = 0
self.page_size = 0
self.entries = []
if d: self.from_dict(d)
def to_dict(self):
return {
"data-type": self.data_type,
"workspace": self.workspace,
"start": self.start,
"count": self.count,
"page-size": self.page_size,
"entries": [entry.to_dict() for entry in self.entries]
}
def from_dict(self, d):
assert d.get('data-type') == self.data_type
self.workspace = d.get('workspace')
self.start = d.get('start', 0)
self.count = d.get('count', 0)
self.page_size = d.get('page-size', 0)
self.entries = [self.entry_class(entry) for entry in d.get('entries', [])]
| UTF-8 | Python | false | false | 9,671 | py | 21 | definition.py | 18 | 0.57357 | 0.572536 | 0 | 311 | 30.096463 | 103 |
judoshka/metro | 1,760,936,600,354 | 486682d9690731f114fa87512a751eef57435ff3 | adaae0119bfb79f4349bc8bb4c61706d0fe90936 | /scrape.py | 25c418e57159a92b22a8a8c03c49ca75adb3b01d | []
| no_license | https://github.com/judoshka/metro | 9ba7984e4de2d65af0e9a864e83ed06afa526132 | f2f5c38cf3577bb3593a28d8b00f8efbc15a2fe3 | refs/heads/master | 2023-07-15T13:39:18.219166 | 2021-08-31T10:35:20 | 2021-08-31T10:35:20 | 401,407,898 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from bs4 import BeautifulSoup
from datetime import date
from models import DB, Post
BASE_URL = "https://mosmetro.ru"
def get_html(url):
response = requests.get(url)
if response.status_code == 200:
return response.content
return None
def get_date(url):
content = get_html(url)
soup = BeautifulSoup(content, 'html.parser')
date_string = soup.select_one("div[class='pagetitle__content-date']")
if date_string:
return date_string.text
else:
return None
def parse(markup, last_news_number):
date_values = {
'Января': 1,
'Февраля': 2,
'Марта': 3,
'Апреля': 4,
'Мая': 5,
'Июня': 6,
'Июля': 7,
'Августа': 8,
'Сентября': 9,
'Октября': 10,
'Ноября': 11,
'Декабря': 12
}
data = markup.find_all(class_="newslist__list-item")
for i in data:
href = i.find("a")["href"]
href = BASE_URL + href
news_number = int(href.split("/")[-2])
news = Post.query.filter(Post.news_number == news_number).first()
if news:
break
if last_news_number >= news_number: # новые новости закончились
break
image_url = i.find("img")["src"]
image_url = BASE_URL + image_url
title = i.select_one("span[class='newslist__text-title']").text
published_date = get_date(href)
if not published_date: # новость https://mosmetro.ru/press/news/4068/ не кликабельна
continue
day, month, year = published_date.split()
day = int(day)
month = date_values.get(month)
year = int(year)
published_date = date(year, month, day)
scraped_date = date.today()
DB.session.add(Post(news_number, title, image_url, href, published_date, scraped_date))
DB.session.commit()
def scrape_data(app):
with app.app_context():
records = Post.query.all()
last_record_id = max([i.news_number for i in records]) if records else 0
search_url = BASE_URL + '/press/news/'
content = get_html(search_url)
soup = BeautifulSoup(content, 'html.parser')
parse(soup, last_news_number=last_record_id)
if __name__ == '__main__':
scrape_data()
| UTF-8 | Python | false | false | 2,409 | py | 5 | scrape.py | 3 | 0.57622 | 0.565331 | 0 | 84 | 26.333333 | 95 |
undp/open.undp.org | 12,180,527,287,272 | 69f1bdda288d7408674a78976977b03a10b2edd4 | 7e438c6dd44cfdb75790865ab3af871bf2180b20 | /scripts/generators/generator.py | 98ec3adb2c56c8eab8aacf25041d7b132c918732 | []
| no_license | https://github.com/undp/open.undp.org | d723b6679cef448b8bea74169a9d8e934a302743 | 8bdfc38dbe982ae417132b08f71c6a50ff564073 | refs/heads/gh-pages | 2020-04-05T21:20:05.264315 | 2018-06-20T18:15:33 | 2018-06-20T18:15:33 | 6,912,534 | 6 | 11 | null | false | 2018-06-04T17:15:34 | 2012-11-29T00:20:15 | 2018-05-30T09:43:19 | 2018-06-04T17:15:34 | 856,982 | 20 | 20 | 0 | JavaScript | false | null | from __future__ import print_function
import urllib2
from collections import defaultdict
from lxml import etree
import copy
import json
import mimetypes
import zipfile
from controller import Controller
import config as settings
from models import (Project, Output, Subnational, Unit, UnitProject, Crs, Donor, CountryDonor, ProjectSummary,
TopDonor, TopDonorLocal, Region, CoreDonor, OperatingUnit)
from _collection import (Projects, Outputs, Subnationals, Units, CrsIndex, DonorIndex, CountryDonorIndex,
ProjectSummaries, ReportDonors, DonorIDs, TopDonorGrossIndex, TopDonorLocalIndex,
RegionIndex, FocusAreaIndex, CoreDonors, OperatingUnitIndex)
from _collection import ObjectExists
class ProjectsController(Controller):
"""Main Process class that includes all the functions needed for processing UNDP xml data
Main methods:
run - Runs the whole class and generate everything
"""
def __init__(self):
self.undp_export = settings.UNDP_EXPORT
self.projects = Projects()
self.projectsummaries = ProjectSummaries()
self.outputs = Outputs()
self.subnationals = Subnationals()
self.units = Units()
self.crsindex = CrsIndex()
self.donorindex = DonorIndex()
self.countrydonorindex = CountryDonorIndex()
self.topdonor_gross = TopDonorGrossIndex()
self.topdonor_local = TopDonorLocalIndex()
self.donor_ids = DonorIDs()
self.region_index = RegionIndex()
self.core_donors = CoreDonors()
self.operating_unit_index = OperatingUnitIndex()
self.api_path = settings.API_PATH
self._years = set()
self.geo = None
# Adding 2010 because the xmls files are starting from 2011 but the legacy site expect to see 2010
self.years = 2010
self.country_donors = None
@property
def years(self):
return self._years
@years.setter
def years(self, value):
self._years.add(value)
def generate(self):
""" Main method. Execute necessary functions and generate json files """
for files in reversed(sorted(self.get_filenames(settings.IATI_XML_ANNUAL, 'xml'))):
self._prepare(files, 'iati-activity', 'projects')
self._prepare(files, 'iati-activity', 'outputs')
# Generating useful info for console
counter = 0
for i in self.outputs.collection.values():
counter += len(i)
self.log('Total outputs processed: %s' % counter)
self.log('Total projects processed: %s' % len(self.projects.pks))
self.log('Total Donor Index processed: %s' % len(self.donorindex.pks))
self.log('Total Country Donor Index processed: %s' % len(self.countrydonorindex.pks))
# Save Project Json files
self.projects.save_json(self.outputs, self.subnationals, self.api_path)
# Save Unit Json files
self.units.save_json(self.subnationals, self.api_path)
# Generate Core Donors
self._populate_core_donors()
self.core_donors.save_json(self.api_path, 'core-donors.json')
# Save Summary files
self._generate_project_summary(self.projects)
self.projectsummaries.save_json(self.api_path)
# Save Other Jsons
self.crsindex.save_json(self.api_path, 'crs-index.json')
self.donorindex.save_json(self.api_path, 'donor-index.json')
self.countrydonorindex.save_json(self.api_path, 'donor-country-index.json')
self._generate_year_index()
# Top Donor Gross Index
self._populate_top_donor_gross_index()
self.topdonor_gross.save_json(self.api_path, 'top-donor-gross-index.json')
# Top Donor Local Index
# self._populate_top_donor_local_index()
# self.topdonor_local.save_json(self.api_path, 'top-donor-local-index.json')
# Region Index
self._populate_region_index()
self.region_index.save_json(self.api_path, 'region-index.json')
# Focus Area Index
focus = FocusAreaIndex()
focus.save_json(self.api_path, 'focus-area-index.json')
# Generating HDI
self._generate_hdi()
# Save Operating Unit Index
self._populate_operating_unit_index()
self.operating_unit_index.save_json(self.api_path, 'operating-unit-index.json')
# generate zipped version of the files
#self.zipdata(settings.UNDP_EXPORT, settings.BASE_DIR + '/download', 'undp-project-data.zip')
def _prepare(self, xml_file, tag, op_type):
"""Prepares and executes other methods to prepare the data.
Arguments:
xml_file - full path to the xml file
tag -- one choice is available: iati-activity
op_type -- only two choices available: outputs - projects
"""
# Identify version number of XML file
tree = etree.parse(xml_file)
root = tree.getroot()
version = round(float(root.attrib.get('version', '1')), 2)
# Get IATI activities XML
iter_obj = iter(etree.iterparse(xml_file, tag=tag))
#iter_obj = root.iter(tag)
# Extract year
try:
year = int(self.extract_years([xml_file])[0])
self.years = year
except ValueError:
return
func = getattr(self, '_populate_%s' % op_type)
func(iter_obj, year, version)
def _populate_operating_unit_index(self):
current_year = sorted(list(self.years), reverse=True)[0]
country_isos = self.get_and_sort('%s/country_iso.csv' % settings.UNDP_EXPORT, 'iso3')
units = self.get_and_sort(self.undp_export + '/report_units.csv', 'operating_unit')
iso3 = dict([(i['iso3'].decode('utf-8').encode('ascii', 'ignore'),
i['iso_num'].decode('utf-8').encode('ascii', 'ignore')) for i in country_isos])
units_index = dict([(i['operating_unit'], i['fund_type']) for i in units])
for country in self.geo:
if country['iso3'] in self.units.pks:
obj = OperatingUnit()
obj.id.value = country['iso3']
obj.fund_type.value = units_index[obj.id.value]
obj.name.value = country[obj.name.key]
if country[obj.lat.key] != '':
obj.lat.value = country[obj.lat.key]
obj.lon.value = country[obj.lon.key]
if obj.id.value in iso3:
obj.iso_num.value = iso3[obj.id.value]
# Looping through project summaries to get total budgets
funding_source = set()
for project in self.projectsummaries.collection[current_year]:
if project.operating_unit.value == obj.id.value:
obj.project_count.value += 1
obj.budget_sum.value += round(project.budget.value, 2)
obj.expenditure_sum.value += round(project.expenditure.value, 2)
#obj.disbursement_sum.value += round(project.disbursement.value, 2)
for item in project.donors.value:
funding_source.add(item)
project_obj = self.projects.collection[project.id.value]
obj.email.value = project_obj.operating_unit_email.value
obj.web.value = project_obj.operating_unit_website.value
obj.funding_sources_count.value = len(funding_source)
self.operating_unit_index.add(obj.id.value, obj)
def _populate_core_donors(self):
cores = self.get_and_sort(settings.DONOR_DATA + '/core_fund.csv', 'Donor')
for core in cores:
obj = CoreDonor()
obj.donor_id.value = core['Donor']
obj.description.value = core['Donor Desc']
obj.short_description.value = core['Donor Level 3']
# Adding extra zeros to the begining of donor ids to make them 5 characters
additional_zeros = 5 - len(obj.donor_id.value)
obj.donor_id.value = '%s%s' % (('0' * additional_zeros), obj.donor_id.value)
self.core_donors.add(obj.donor_id.value, obj)
def _populate_region_index(self):
units = self.get_and_sort(self.undp_export + '/report_units.csv', 'bureau')
choices = ['PAPP', 'RBA', 'RBAP', 'RBAS', 'RBEC', 'RBLAC']
for unit in units:
if (unit['bureau'] in choices and unit['hq_co'] == 'HQ') or unit['bureau'] == 'PAPP':
if unit['ou_descr'] != 'Regional Center - Addis Ababa':
obj = Region()
obj.name.value = unit['ou_descr']
obj.id.value = unit['bureau']
try:
self.region_index.add(obj.id.value, obj)
except ObjectExists:
pass
obj = Region()
obj.name.value = 'Global'
obj.id.value = 'global'
self.region_index.add(obj.id.value, obj)
def _populate_top_donor_local_index(self):
local = self.get_and_sort(self.undp_export + '/donor_local.csv', 'donor')
for item in local:
obj = TopDonorLocal()
obj.name.value = item[obj.name.key]
obj.country.value = item[obj.country.key]
obj.amount.value = item[obj.amount.key]
obj.donor_id.value = self.donor_ids.collection.get(item['donor'], None)
self.topdonor_local.add(obj.donor_id.value, obj)
def _populate_top_donor_gross_index(self):
gross = self.get_and_sort(self.undp_export + '/donor_gross.csv', 'donor')
for item in gross:
obj = TopDonor()
obj.name.value = item[obj.name.key]
obj.country.value = item[obj.country.key]
obj.regular.value = item[obj.regular.key]
obj.other.value = item[obj.other.key]
obj.total.value = item[obj.total.key]
obj.donor_id.value = self.donor_ids.collection.get(item['donor'], None)
self.topdonor_gross.add(obj.donor_id.value, obj)
def _generate_project_summary(self, projects):
donors = self.get_and_sort(self.undp_export + '/report_donors.csv', 'awardID')
report_donors = ReportDonors()
# Create an index of donors based on awardID
for item in donors:
report_donors.add_update_list(item['awardID'], item)
try:
self.donor_ids.add(item['donor_type_lvl3_descr'], item['donorID'])
except ObjectExists:
pass
regionsList = ['PAPP', 'RBA', 'RBAP', 'RBAS', 'RBEC', 'RBLAC']
# Looping through years of projects
counter = 0
for project in projects.collection.values():
for year in project.fiscal_year.value:
# Should create a new model instance for each year of the project as they are stored in separate
# summary files
obj = ProjectSummary()
# set region
if project.region_id.value not in regionsList:
obj.region.value = 'global'
else:
obj.region.value = project.region_id.value
obj.operating_unit.value = project.operating_unit_id.value
obj.name.value = project.project_title.value
obj.id.value = project.project_id.value
obj.fiscal_year.value = year
# Fill out fields from report donors list
try:
country = defaultdict(lambda: defaultdict(float))
for item in report_donors.collection[project.project_id.value]:
if int(item['fiscal_year']) == int(year) and item['donorID']:
country[item['donorID']]['budget'] += float(item['budget'])
country[item['donorID']]['expenditure'] += float(item['expenditure'])
#country[item['donorID']]['disbursement'] += float(item['disbursement'])
country[item['donorID']]['type'] = item['donor_type_lvl1'].replace(" ", "")
if item['donor_type_lvl1'] == 'PROG CTY' or item['donor_type_lvl1'] == 'NON_PROG CTY':
country[item['donorID']]['name'] = item['donor_type_lvl3'].replace(" ", "")
elif item['donor_type_lvl1'] == 'MULTI_AGY' or item['donor_type_lvl1'] == 'NON_GOVERNMENT':
country[item['donorID']]['name'] = 'MULTI_AGY'
else:
country[item['donorID']]['name'] = 'OTH'
# country[item['donorID']]['name'] = item['donor_type_lvl3']
if item['donorID'] == '00012':
obj.core.value = True
for key, value in country.iteritems():
obj.donor_countries.value.append(value['name'])
obj.donor_budget.value.append(value['budget'])
obj.donor_expend.value.append(value['expenditure'])
#obj.donor_disbur.value.append(value['disbursement'])
obj.donor_types.value.append(value['type'])
obj.donors.value.append(key)
except KeyError:
# There are few projects ids that are not appearing the donor list. this catch resolve them
pass
obj.expenditure.value = sum(obj.donor_expend.value)
#obj.disbursement.value = sum(obj.donor_disbur.value)
obj.budget.value = sum(obj.donor_budget.value)
# Get other information from outputs
for output in project.outputs.value:
obj.crs.value.add(output['crs'])
obj.focus_area.value.add(output['focus_area'])
self.projectsummaries.add_update_list(year, obj)
counter += 1
self.log('%s summary projects processed' % counter)
def _generate_year_index(self):
""" Generates year-index.js """
writeout = 'var FISCALYEARS = %s' % sorted(map(str, list(self.years)), reverse=True)
f_out = open('%s/year-index.js' % self.api_path, 'wb')
f_out.writelines(writeout)
f_out.close()
self.log('Year Index Generated')
def _populate_units(self, project_obj):
""" Fill Units collections """
unit_project = UnitProject()
unit_project.title.value = project_obj.project_title.value
unit_project.id.value = project_obj.project_id.value
if project_obj.operating_unit_id.value in self.units.pks:
self.units.collection[project_obj.operating_unit_id.value].projects.value.append(unit_project.to_dict())
else:
unit = Unit()
unit.op_unit.value = project_obj.operating_unit_id.value
unit.projects.value.append(unit_project.to_dict())
self.units.add(project_obj.operating_unit_id.value, unit)
def _populate_projects(self, iter_obj, yr, version):
"""Loop through the iter_obj to and sort/clean data based project_id
Produced a list of dictionaries. Sample:
{'end': '2012-12-31', 'operating_unit_email': 'registry.lt@undp.org',
'inst_id': '', 'operating_unit': 'Lithuania, Republic of',
'iati_op_id': 'LT', 'inst_descr': '', 'start': '2005-01-01',
'operating_unit_id': 'LTU',
'operating_unit_website': 'http://www.undp.lt/',
'project_id': '00038726', 'inst_type_id': '',
'document_name': u'http://www.undp.org/content/dam/undp/documents/projects/LTU/00038726/RC fund.pdf'}
Arguments:
iter_obj - and iteratble etree object
version - IATI format version
"""
counter = 0
# Get sorted units
report_units = self.get_and_sort(self.undp_export + '/report_units.csv', 'operating_unit')
# sorting table for documents by importancy
docs_sort = ['A02','A03','A04','A05','A01','A07','A08','A09','A06','A11','A10']
# Loop through each IATI activity in the XML
for event, p in iter_obj:
# IATI hierarchy used to determine if output or input1
hierarchy = p.attrib['hierarchy']
# Check for projects
if hierarchy == '1':
obj = Project()
obj.project_id.value = self._grab_award_id(p[1].text) if (version < 2) else self._grab_award_id(p[0].text)
# Check if the project_id is unique
if obj.project_id.value in self.projects.pks:
continue
obj.fiscal_year.value.append(yr)
obj.project_title.value = p.find(obj.project_title.xml_key).text.lower() if (version < 2) else p.find(obj.project_title.xml_key).find('narrative').text.lower()
obj.project_descr.value = p.find(obj.project_descr.xml_key).text if (version < 2) else p.find(obj.project_descr.xml_key).find('narrative').text
documents = p.findall('./document-link')
if documents:
names = []
links = []
format = []
places = []
for doc in documents:
#exclude self-links
if (doc.get('url') != "http://open.undp.org/#project/" + obj.project_id.value):
try:
links.append(urllib2.unquote(doc.get('url')).encode('utf-8').decode('utf-8'))
except UnicodeDecodeError:
links.append(urllib2.unquote(doc.get('url')).decode('utf-8'))
#links.append(doc.get('url'))
if 'application/' in doc.get('format'):
ft = mimetypes.guess_extension(doc.get('format'), False)
if ft is None:
format.append('')
else:
format.append(ft.lstrip('.'))
else:
format.append('')
#doc_tag = doc.find(obj.document_name.key) if (version < 2) else doc.find(obj.document_name.key).find('narrative')
#if doc_tag is not None:
# doc_name = doc_tag.text
#else:
# doc_name = ''
#doc.find(obj.document_name.key).text if (version < 2) else doc.find(obj.document_name.key).find('narrative').text
#names.append(doc_name)
for d in doc.iterchildren(tag=obj.document_name.key):
if (version < 2):
names.append(d.text)
else:
names.append(d.find('narrative').text)
# default place is last
place = 100
for t in doc.iterchildren(tag='category'):
try:
tp = docs_sort.index(t.get('code'))
except ValueError:
tp = 100
if (tp < place):
place = tp
places.append(place)
obj.document_name.value.extend([names, links, format, places])
# Find start and end dates
obj.start.value = p.find(obj.start.xml_key).text if (version < 2) else p.find('activity-date[@type="2"]').attrib.get('iso-date')
obj.end.value = p.find(obj.end.xml_key).text if (version < 2) else p.find('activity-date[@type="3"]').attrib.get('iso-date')
contact = p.findall('./contact-info')
obj.operating_unit_email.value = [e.text for email in contact
for e in email.iterchildren(tag=obj.operating_unit_email.key)][0]
# Find operating_unit
# If recipient country didn't exist look for recipient region
try:
obj.iati_op_id.value = (p.find(obj.iati_op_id.xml_key).attrib.get('code'))
obj.operating_unit.value = p.find(obj.operating_unit.xml_key).text if (version < 2) else p.find(obj.operating_unit.xml_key).find('narrative').text
for r in report_units:
if (obj.iati_op_id.value == r['iati_operating_unit']
or obj.iati_op_id.value == r['operating_unit']):
obj.operating_unit_id.value = r['operating_unit']
obj.region_id.value = r[obj.region_id.key]
except:
region_unit = p.findall("./recipient-region")
for ru in region_unit:
for r in report_units:
ru_text = ru.text if (version < 2) else ru.find('narrative').text
if type(ru_text) == type(r['ou_descr']) and ru_text == r['ou_descr']:
obj.operating_unit_id.value = r['operating_unit']
obj.operating_unit.value = r['ou_descr']
obj.iati_op_id.value = '998'
# find contact info
try:
for email in contact:
for e in email.iterchildren(tag=obj.operating_unit_email.key):
obj.operating_unit_email.value = e.text
obj.operating_unit_website.value = p.find(obj.operating_unit_website.xml_key).text if (version < 2) else p.find(obj.operating_unit_website.xml_key).find('narrative').text
except:
pass
# Check for implementing organization
try:
inst = p.find("./participating-org[@role='Implementing']") if (version < 2) else p.find("./participating-org[@role='4']")
obj.inst_id.value = inst.attrib.get(obj.inst_id.key)
obj.inst_type_id.value = inst.attrib.get(obj.inst_type_id.key)
obj.inst_descr.value = inst.text if (version < 2) else inst.find('narrative').text
except:
pass
# Populate the Unit Collection
self._populate_units(obj)
counter += 1
self.log('Processing: %s' % counter, True)
self.projects.add(obj.project_id.value, obj)
self.log('%s - Project Annuals: %s rows processed' % (yr, counter))
def _populate_outputs(self, iter_obj, yr, version):
counter = 0
# Get sorted country donoros
sorted_donors = self.get_and_sort(self.undp_export + '/country_donors_updated.csv', 'id')
# Get South-South projects
#ss_list = self.get_and_list(self.undp_export + '/SSCprojects_IDlist.csv', 'projectid')
for event, o in iter_obj:
hierarchy = o.attrib['hierarchy']
if hierarchy == '2':
obj = Output()
crs = Crs()
obj.output_id.value = self._grab_award_id(o[1].text) if (version < 2) else self._grab_award_id(o[0].text)
# Check if the project_id is unique
if obj.output_id.value in self.outputs.output_ids:
continue
obj.output_title.value = o.find(obj.output_title.xml_key).text if (version < 2) else o.find(obj.output_title.xml_key).find('narrative').text
obj.output_descr.value = o.find(obj.output_descr.xml_key).text if (version < 2) else o.find(obj.output_descr.xml_key).find('narrative').text
try:
obj.gender_id.value = o.find(obj.gender_descr.xml_key).attrib.get(obj.gender_id.key)
obj.gender_descr.value = o.find(obj.gender_descr.xml_key).text if (version < 2) else "Gender Equality"
except:
obj.gender_id.value = "0"
obj.gender_descr.value = "None"
obj_crs_descr = obj.crs_descr.xml_key if (version < 2) else "sector[@vocabulary='1']"
try:
obj.crs.value = o.find(obj_crs_descr).get(obj.crs.key)
crs.name.value = obj.crs.value
except AttributeError:
pass
try:
obj.crs_descr.value = o.find(obj_crs_descr).text if (version < 2) else o.find(obj_crs_descr).find('narrative').text
crs.id.value = obj.crs_descr.value
except AttributeError:
pass
try:
self.crsindex.add(crs.id.value, crs)
except ObjectExists:
pass
try:
obj.award_id.value = self._grab_award_id(o.find(obj.award_id.xml_key).get('ref'))
except:
obj.award_id.value = self._grab_award_id(o.find("./related-activity[@type='2']").get('ref'))
try:
#if obj.award_id.value in ss_list:
# obj.focus_area.value = '8'
# obj.focus_area_descr.value = 'South-South'
#else:
obj_focus_area_descr = obj.focus_area_descr.xml_key if (version < 2) else "sector[@vocabulary='99']"
obj.focus_area.value = o.find(obj_focus_area_descr).get(obj.focus_area.key)
obj.focus_area_descr.value = o.find(obj_focus_area_descr).text if (version < 2) else o.find(obj_focus_area_descr).find('narrative').text
if not obj.focus_area_descr.value:
obj.focus_area_descr.value = "-"
except:
obj.focus_area.value = "-"
obj.focus_area_descr.value = "-"
donorCol = "./participating-org[@role='Funding']" if (version < 2) else "./participating-org[@role='1']"
for donor in o.findall(donorCol):
ref = donor.get('ref')
obj.donor_id.value.add(ref)
if ref == '00012':
obj.donor_name.value.append('Voluntary Contributions')
else:
obj.donor_name.value.append(donor.text if (version < 2) else donor.find('narrative').text)
for d in sorted_donors:
# Check IDs from the CSV against the cntry_donors_sort.
# This provides funding country names not in XML
if d['id'] == ref:
# for outputs
obj.donor_short.value.append(d[obj.donor_short.key])
# Find budget information to later append to projectFY array
budget_expend = defaultdict(lambda: defaultdict(float))
obj.budget.temp = o.findall(obj.budget.xml_key)
for budget in obj.budget.temp:
for b in budget.iterchildren(tag='value'):
year = int(b.get('value-date').split('-', 3)[0])
budget_expend[year]['budget'] = float(b.text)
# Use transaction data to get expenditure
for tx in o.findall('transaction'):
expenditureCol = obj.expenditure.xml_key if (version < 2) else "transaction-type[@code='4']"
disbursementCol = obj.disbursement.xml_key if (version < 2) else "transaction-type[@code='3']"
for expen in tx.findall(expenditureCol):
for sib in expen.itersiblings():
if sib.tag == 'value':
year = int(sib.get('value-date').split('-', 3)[0])
budget_expend[year]['expenditure'] = float(sib.text)
for disb in tx.findall(disbursementCol):
for sib in disb.itersiblings():
if sib.tag == 'value':
year = int(sib.get('value-date').split('-', 3)[0])
budget_expend[year]['disbursement'] = float(sib.text)
for key, value in budget_expend.iteritems():
obj.fiscal_year.value.append(key)
obj.budget.value.append(value['budget'])
obj.expenditure.value.append(value['expenditure']+value['disbursement'])
#obj.disbursement.value.append(value['disbursement'])
# Run subnationals
locations = o.findall('location')
if locations:
self._populate_subnationals(obj.award_id.value, obj, o, locations, version)
# Populate Donor Index
self._populate_donor_index(o, version)
counter += 1
self.log('Processing: %s' % counter, True)
self.outputs.add_update_list(obj.award_id.value, obj)
self.log('%s - output Annuals: %s rows processed' % (yr, counter))
def _populate_subnationals(self, project_id, output_obj, node, locations, version):
""" Populate subnational object. This is dependant on _populate_outputs and cannot be executed separately
project_id - the related project_id
output_id - output model object
node - output xml object
Returns:
Populatess subnationals property
"""
counter = 0
for location in locations:
obj = Subnational()
counter += 1
obj.awardID.value = project_id
obj.outputID.value = output_obj.output_id.value
obj.output_locID.value = "%s-%d" % (obj.outputID.value, counter)
# Focus areas
obj.focus_area.value = output_obj.focus_area.value
obj.focus_area_descr.value = output_obj.focus_area_descr.value
for item in location.iterchildren():
if item.tag == 'coordinates':
obj.lat.value = item.get(obj.lat.key)
obj.lon.value = item.get(obj.lon.key)
obj.precision.value = item.get(obj.precision.key)
if item.tag == 'name':
obj.name.value = item.text if (version < 2) else item.find('narrative').text
if item.tag == 'location-type':
obj.type.value = item.get(obj.type.key)
# IATI 1.04
if item.tag == 'point':
pos = item.getchildren()
lat_lon = pos[0].text.split(' ')
obj.lat.value = lat_lon[0]
obj.lon.value = lat_lon[1]
# IATI 1.04
if item.tag == 'exactness':
obj.precision.value = item.get('code')
# IATI 1.04
if item.tag == 'feature-designation':
obj.type.value = item.get(obj.type.key)
self.subnationals.add_update_list(project_id, obj)
def _populate_donor_index(self, output_obj, version):
""" Populates both donor-index and donor-country-index """
if not self.country_donors:
self.country_donors = self.get_and_sort(self.undp_export + '/country_donors_updated.csv', 'id')
donorCol = "./participating-org[@role='Funding']" if (version < 2) else "./participating-org[@role='1']"
for donor in output_obj.findall(donorCol):
obj = Donor()
country_obj = CountryDonor()
ref = donor.get(obj.id.key)
if ref:
for item in self.country_donors:
if ref == item['id']:
# Skip the loop if the ref already is added
if ref not in self.donorindex.pks:
obj.id.value = ref
obj.name.value = donor.text if (version < 2) else donor.find('narrative').text or "Unknown"
if item['donor_type_lvl1'] == 'PROG CTY' or item['donor_type_lvl1'] == 'NON_PROG CTY':
obj.country.value = item['donor_type_lvl3'].replace(" ", "")
elif item['donor_type_lvl1'] == 'MULTI_AGY':
obj.country.value = item['donor_type_lvl1'].replace(" ", "")
else:
obj.country.value = 'OTH'
self.donorindex.add(obj.id.value, obj)
if item['donor_type_lvl3'] not in self.countrydonorindex.pks:
country_obj.id.value = item['donor_type_lvl3']
country_obj.name.value = item['donor_type_lvl3_descr']
self.countrydonorindex.add(item['donor_type_lvl3'], country_obj)
def _search_list_dict(_list, key, search):
result = [item for item in _list if item[key] == search]
if len(result) > 0:
return result
else:
return False
def _generate_hdi(self):
hdi = self.get_and_sort('%s/hdi-csv-clean.csv' % settings.HDI, 'hdi2013')
self.geo = self.get_and_sort('%s/country-centroids.csv' % settings.PROCESS_FILES, 'iso3')
# Add current year to the years array
years = [1980, 1985, 1990, 1995, 2000, 2005, 2006, 2007, 2008, 2011, 2012, 2013]
# Set current year to the latest year of HDI Data
current_year = 2013
row_count = 0
rank = 0
hdi_index = []
hdi_dict = {}
for val in iter(hdi):
row_count = row_count + 1
hdi_total = []
hdi_health = []
hdi_ed = []
hdi_inc = []
change = []
change_year = {}
for y in years:
if val['hdi%d' % y] != '':
if val['ed%d' % y] != "" and val['health%d' % y] != "" and val['income%d' % y] != "":
hdi_total.append([y, round(float(val['hdi%d' % y]), 3)])
hdi_health.append([y, round(float(val['health%d' % y]), 3)])
hdi_ed.append([y, round(float(val['ed%d' % y]), 3)])
hdi_inc.append([y, round(float(val['income%d' % y]), 3)])
if y != current_year:
change_year = round(float(val['hdi%d' % current_year]),
3) - round(float(val['hdi%d' % y]), 3)
if len(change) == 0:
change.append(change_year)
if len(change) == 0:
change.append("")
for ctry in self.geo:
if ctry['name'] == val['country']:
if val['hdi%d' % current_year] == "":
g = {
"id": ctry['iso3'],
"name": val['country'],
"hdi": "",
"health": "",
"income": "",
"education": "",
"change": change[0],
"rank": "n.a."
}
else:
if ctry['iso3'].rfind("A-", 0, 2) == 0:
g = {
"id": ctry['iso3'],
"name": val['country'],
"hdi": hdi_total,
"health": hdi_health,
"income": hdi_inc,
"education": hdi_ed,
"change": change[0],
"rank": "n.a."
}
else:
rank = rank + 1
g = {
"id": ctry['iso3'],
"name": val['country'],
"hdi": hdi_total,
"health": hdi_health,
"income": hdi_inc,
"education": hdi_ed,
"change": change[0],
"rank": rank
}
hdi_index.append(g)
uid = ctry['iso3']
hdi_dict[uid] = copy.deepcopy(g)
hdi_dict[uid].pop('id')
hdi_dict[uid].pop('name')
hdi_dict['total'] = rank
hdi_index_sort = sorted(hdi_index, key=lambda x: x['rank'])
hdi_writeout = json.dumps(hdi_index_sort, sort_keys=True, separators=(',', ':'))
hdi_out = open('%s/hdi.json' % self.api_path, 'wb')
hdi_out.writelines(hdi_writeout)
hdi_out.close()
jsvalue = "var HDI = "
jsondump = json.dumps(hdi_dict, sort_keys=True, separators=(',', ':'))
writeout = jsvalue + jsondump
f_out = open('%s/hdi.js' % self.api_path, 'wb')
f_out.writelines(writeout)
f_out.close()
self.log('HDI json generated')
def extract_years(self, filenames):
"""Extract years from filenames
filenames must be in this format: atlas_projects_2011.xml
Arguments:
filenames -- an array of filenames
"""
return [f[-8:-4] for f in filenames]
def _grab_award_id(self, text):
""" grabs award id from the xml text
@example
Text: XM-DAC-41114-PROJECT-00068618
Return: 00068618
"""
return text.split('-')[-1]
| UTF-8 | Python | false | false | 38,659 | py | 8,402 | generator.py | 72 | 0.508368 | 0.500142 | 0 | 878 | 43.030752 | 192 |
Penqor/HOMEGROWNGILL | 13,718,125,567,901 | 389d9fdcc0e45885f1e9640f1d547408540de11c | 9bb7a8327a594d388f0f934c1b7a78e337d3f7dc | /HGG_loopfunction.py | 8e166b6fa9c9b17cb1f452feae01276a5842aaa8 | []
| no_license | https://github.com/Penqor/HOMEGROWNGILL | 1ee1c1ec3c4a9ac6ff20b8ce316476c85325ae46 | f8238c98258a2cc2d6f6b04471a880d1938a473d | refs/heads/master | 2020-07-17T03:50:20.785211 | 2019-09-02T21:03:03 | 2019-09-02T21:03:03 | 205,935,970 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Do:
make a loop function
"""
# old code commented out
'''
def loopFunction(function, question):
Loop = 'y'
# loop when answer is yes
while Loop == 'y' or Loop == 'yes':
# call the function you want to loop
function
Loop2 = True
# validation loop
while Loop2:
Loop = input("{} y/n".format(question))
# if yes or no break validation loop and it will either break or repeat orginal loop
if Loop == 'y' or Loop == 'yes':
Loop2 = False
elif Loop == 'n' or Loop == 'no':
Loop2 = False
# if response is not yes or no try again
else:
print("Please enter y or n")
def printDict(dictionary):
for category, a in dictionary.items():
print(category)
for fruit, info in a.items():
print(fruit, info)
print()
'''
# create a dictionary for the menu
menu = {
'fruit:': {
'apples': {
'price': 4
},
'oranges':{
'price': 2
}
},
'vegetables:':{
'carrots':{
'price': 2
}
},
'milk products:': {
'milk': {
'price': 5
}
},
'nuts:': {
'peanuts': {
'price':0.5
}
},
'jams:': {
'jelly': {
'price': 4
}
},
'juices:': {
'orange': {
'price': 5
}
}
}
# order will be filled via a function
order = {
}
# order = {'apple juice': {'price:': 5, 'quantity:': 10}}
def order_items(menu_dict, order_dict):
order_loop = True
while order_loop:
try:
try:
item = input("What food item would you like to purchase")
quantity = int(input("How many of this item would you like"))
if item in order_dict:
order_dict[item]['quantity'] = order_dict[item]['quantity'] + quantity
order_loop = False
else:
for category, value in menu_dict.items():
if item in list(value.keys()):
order_dict[item] = {'price': value[item]['price'], 'quantity': quantity}
break
if item not in order_dict:
print("Please enter an item on the menu.")
order_loop = True
else:
order_loop = False
except ValueError:
print("Please enter a valid quantity.")
except KeyError:
print("Please enter a valid name.")
# set this up as a function - output order
def printDict(statement, dictionary):
print(statement)
for category, a in dictionary.items():
print(category)
for fruit, info in a.items():
print(fruit, info)
print()
# set up the mechanics of the whole order as a function!!
def loop_function(function, param1, param2, question):
Loop = 'y'
# loop when answer is yes
while Loop == 'y' or Loop == 'yes':
# call the function you want to loop
print("hi")
function(param1, param2)
Loop2 = True
# validation loop
while Loop2:
Loop = input("{} y/n".format(question))
# if yes or no break validation loop and it will either break or repeat original loop
if Loop == 'y' or Loop == 'yes' or Loop == 'n' or Loop == 'no':
break
# if response is not yes or no try again
else:
print("Please enter y or n")
# call the function and print order
loop_function(order_items, menu, order, "Would you like to order another item.")
printDict("Your Order:", order) | UTF-8 | Python | false | false | 3,975 | py | 12 | HGG_loopfunction.py | 12 | 0.473711 | 0.468428 | 0 | 142 | 26.007042 | 100 |
scasasso/pl_tools | 11,020,886,091,492 | efafe63ca2aa546246aad02ce856b708234884ca | 312c0d2f4aff9fa02b91eae033c571d8616afedc | /ml_clf/skmodel.py | 734203d332966c1a9ae6a1980a40c2e85f524eed | []
| no_license | https://github.com/scasasso/pl_tools | 5dcfcf6ca1021ae3d8879d846ab50f01a789649e | 3e5e24282442433c3d99a0ae065503aec7cde327 | refs/heads/master | 2021-04-12T04:29:27.614274 | 2019-05-13T08:31:02 | 2019-05-13T08:31:02 | 125,884,803 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
################################################################################
#
# File: skmodel.py
#
# Product: Predictive Layer Genius Classif Library
# Author: Momo
# Date: 03 April 2015
#
# Scope: The file contains the representation of scikit learn model.
#
# Copyright (c) 2015, Predictive Layer Limited. All Rights Reserved.
#
# The contents of this software are proprietary and confidential to the author.
# No part of this program may be photocopied, reproduced, or translated into
# another programming language without prior written consent of the author.
#
#
# $Id$
#
################################################################################
import json
import logging
import numpy as np
import pickle
from sklearn.externals import joblib
from sklearn.base import clone as skclone
from sklearn.metrics import mean_squared_error, roc_auc_score
from plmodel import PLModel
logger = logging.getLogger(__file__)
class SKModel(PLModel):
def __init__(self, model, scaler='default'):
PLModel.__init__(self, model, scaler)
return
def _fit_and_eval(self, X_train_val, y_train_val, **kwargs):
# Split train/validation dataset
i_val = int(np.floor(0.8 * len(X_train_val)))
X_val, y_val = X_train_val[i_val: ], y_train_val[i_val: ]
X_train, y_train = X_train_val[: i_val], y_train_val[: i_val]
# Fit the scaler
if self._is_scaler_fitted is False:
self.fit_scaler(X_train)
X_train = self.scaler.transform(X_train)
# Fit the model
self.model.fit(X_train, y_train)
# Validate the model
if 'regressor' in self.model.__class__.__name__.lower():
preds = self.model.predict(X_val)
logger.info('Validation RMSE = {0:.4f}'.format(np.sqrt(mean_squared_error(y_val, preds))))
elif 'classifier' in self.model.__class__.__name__.lower():
preds = self.model.predict_proba(X_val)
logger.info('Validation ROC AUC = {0:.4f}'.format(roc_auc_score(y_val, preds)))
else:
msg = 'Cannot understand if model %s is a regressor or a classifier: will skip validation' % self.model.__class__.__name__
logger.warning(msg)
# Re-fit with all teh data
self.model.fit(X_train_val, y_train_val)
| UTF-8 | Python | false | false | 2,357 | py | 24 | skmodel.py | 24 | 0.5944 | 0.587187 | 0 | 67 | 34.149254 | 134 |
nxyexiong/Outernet-windows | 790,274,018,707 | 8cba46d71bb268c4810defaa0dd467ff9b6c0a71 | 6ca932e06cb93518e64c09767b6ffc594780593c | /tap_control.py | c8a3f5ce040c07d8ebee84c40f2597887dbfc15b | []
| no_license | https://github.com/nxyexiong/Outernet-windows | 169ed8f3157084be39b231bd9a34d5bbc3158fb6 | b7b4d3168abb9ef2651ccc85ccb8ed4df5aea95c | refs/heads/master | 2021-06-21T09:38:07.189579 | 2021-06-20T18:24:51 | 2021-06-20T18:31:31 | 211,439,474 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import winreg as reg
import win32file
import win32event
import winerror
import pywintypes
import threading
import time
from queue import Queue
from constants import REG_CONTROL_CLASS, TAP_COMPONENT_ID
from logger import LOGGER
def get_tuntap_ComponentId():
with reg.OpenKey(reg.HKEY_LOCAL_MACHINE, REG_CONTROL_CLASS) as adapters:
try:
for i in range(10000):
key_name = reg.EnumKey(adapters, i)
with reg.OpenKey(adapters, key_name) as adapter:
try:
component_id = reg.QueryValueEx(adapter, 'ComponentId')[0]
if component_id == TAP_COMPONENT_ID:
return reg.QueryValueEx(adapter, 'NetCfgInstanceId')[0]
except WindowsError:
pass
except WindowsError:
pass
def CTL_CODE(device_type, function, method, access):
return (device_type << 16) | (access << 14) | (function << 2) | method
def TAP_CONTROL_CODE(request, method):
return CTL_CODE(34, request, method, 0)
TAP_IOCTL_SET_MEDIA_STATUS = TAP_CONTROL_CODE( 6, 0)
TAP_IOCTL_CONFIG_TUN = TAP_CONTROL_CODE(10, 0)
def open_tun_tap(ipv4_addr, ipv4_network, ipv4_netmask):
'''
\brief Open a TUN/TAP interface and switch it to TUN mode.
\return The handler of the interface, which can be used for later
read/write operations.
'''
LOGGER.debug("open_tun_tap")
# retrieve the ComponentId from the TUN/TAP interface
componentId = get_tuntap_ComponentId()
# create a win32file for manipulating the TUN/TAP interface
tuntap = win32file.CreateFile(
r'\\.\Global\%s.tap' % componentId,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_SYSTEM | win32file.FILE_FLAG_OVERLAPPED,
None
)
# have Windows consider the interface now connected
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_SET_MEDIA_STATUS,
b'\x01\x00\x00\x00',
1
)
# prepare the parameter passed to the TAP_IOCTL_CONFIG_TUN commmand.
# This needs to be a 12-character long string representing
# - the tun interface's IPv4 address (4 characters)
# - the tun interface's IPv4 network address (4 characters)
# - the tun interface's IPv4 network mask (4 characters)
configTunParam = []
configTunParam += ipv4_addr
configTunParam += ipv4_network
configTunParam += ipv4_netmask
configTunParam = bytes(configTunParam)
# switch to TUN mode (by default the interface runs in TAP mode)
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_CONFIG_TUN,
configTunParam,
1
)
# return the handler of the TUN interface
return tuntap
def close_tun_tap(tuntap):
LOGGER.info("close_tun_tap")
win32file.CloseHandle(tuntap)
class TAPControl:
def __init__(self, tuntap):
LOGGER.debug("TAPControl init")
# store params
self.tuntap = tuntap
# local variables
self.mtu = 1300
self.overlappedRx = pywintypes.OVERLAPPED()
self.overlappedRx.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.rxOffset = self.overlappedRx.Offset
self.overlappedTx = pywintypes.OVERLAPPED()
self.overlappedTx.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.txOffset = self.overlappedTx.Offset
self.read_callback = None
self.write_queue = Queue()
self.timeout = 100 # 0.1s
self.goOn = False
self.read_thread = None
self.write_thread = None
def run(self):
LOGGER.debug("TAPControl run")
self.goOn = True
self.read_thread = threading.Thread(target=self.handle_read)
self.read_thread.start()
self.write_thread = threading.Thread(target=self.handle_write)
self.write_thread.start()
def handle_read(self):
LOGGER.debug("TAPControl handle_read")
rxbuffer = win32file.AllocateReadBuffer(self.mtu)
# read
ret = None
p = None
data = None
while self.goOn:
try:
# wait for data
ret, p = win32file.ReadFile(self.tuntap, rxbuffer, self.overlappedRx)
while win32event.WaitForSingleObject(self.overlappedRx.hEvent, self.timeout) == win32event.WAIT_TIMEOUT:
if not self.goOn:
return
self.rxOffset = self.rxOffset + len(p)
self.overlappedRx.Offset = self.rxOffset & 0xffffffff
self.overlappedRx.OffsetHigh = self.rxOffset >> 32
data = bytes(p.obj)
except Exception:
continue
LOGGER.debug("TAPControl read packet %s" % data)
send_data = None
if data[0] & 0xf0 == 0x40: # ipv4
# get length
total_length = 256 * data[2] + data[3]
# ready to handle
send_data = data[:total_length]
data = data[total_length:]
elif data[0] & 0xf0 == 0x60: # todo: ipv6
# get length
total_length = 256 * data[4] + data[5] + 40
# ready to handle
data = data[total_length:]
if send_data and self.read_callback:
self.read_callback(send_data)
def write(self, data):
LOGGER.debug("TAPControl write packet %s" % data)
if not self.goOn:
return
self.write_queue.put(data)
def handle_write(self):
while self.goOn:
try:
data = self.write_queue.get(timeout=0.001)
except Exception:
continue
try:
# write over tuntap interface
win32file.WriteFile(self.tuntap, data, self.overlappedTx)
while win32event.WaitForSingleObject(self.overlappedTx.hEvent, self.timeout) == win32event.WAIT_TIMEOUT:
if not self.goOn:
return
self.txOffset = self.txOffset + len(data)
self.overlappedTx.Offset = self.txOffset & 0xffffffff
self.overlappedTx.OffsetHigh = self.txOffset >> 32
except Exception:
continue
def close(self):
LOGGER.info("TAPControl close")
self.goOn = False
if self.read_thread is not None:
while self.read_thread.is_alive():
time.sleep(0.1)
if self.write_thread is not None:
while self.write_thread.is_alive():
time.sleep(0.1)
| UTF-8 | Python | false | false | 6,809 | py | 19 | tap_control.py | 15 | 0.588486 | 0.567484 | 0.000587 | 203 | 32.541872 | 120 |
franksotogithub/ArcPy | 4,741,643,914,775 | cb0503026e35a52a3a8da4540398ebddcbda7562 | f6d772bb63850fd38a2154cdb0a2d6020a3be778 | /CalculoFuncionObjetivo.py | 218d374f4373a62ea587c03198688b4db3398ffc | []
| no_license | https://github.com/franksotogithub/ArcPy | b98857e1a82fc61891117de435d2972086754feb | e3b7864affbd144588f4fc54127aa7d627adefa7 | refs/heads/master | 2020-04-06T03:37:05.894579 | 2016-11-11T18:52:49 | 2016-11-11T18:52:49 | 61,165,191 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import arcpy
arcpy.env.workspace ="D:/ArcGisShapesPruebas"
# First, make a layer from the feature class
def Calculo(zona,circulos):
arcpy.env.overwriteOutput = True
arcpy.MakeFeatureLayer_management(zona, "temporal")
arcpy.MakeFeatureLayer_management(circulos,"temporal_circulos")
arcpy.MakeFeatureLayer_management(circulos, "temporal_circulo")
#arcpy.MakeFeatureLayer_management("D:/ArcGisShapesPruebas/Zones/Shape15013300200.shp", "temporal")
#arcpy.MakeFeatureLayer_management("D:/ArcGisShapesPruebas/EnvolvesCircles/pruebacirclebuffers.shp", "temporal_circulos")
#arcpy.MakeFeatureLayer_management("D:/ArcGisShapesPruebas/EnvolvesCircles/pruebacirclebuffers.shp", "temporal_circulo")
Z=0#Valor funcion Objetivo
p=0.35
q=0.65
cant_max_viv=40
suma_homogeneidad=0
suma_compacidad=0
homogeneidad=0
compacidad=0
#fields = ["GRUPO"]
n=0
with arcpy.da.SearchCursor("temporal_circulos",["GRUPO"]) as cursor3:
for row3 in cursor3:
Suma_areas = 0
where_expression = " GRUPO="+str(row3[0])
arcpy.SelectLayerByAttribute_management("temporal_circulo", "NEW_SELECTION", where_expression)
#print row3[0]
# with arcpy.da.SearchCursor("temporal_circulo" ,"*") as cursor2:
# for row2 in cursor2:
# print row2
#arcpy.SelectLayerByAttribute ("temporal", "", ' "GRUPO" = 1 ')
#select arcpy.da.SearchCursor
#Calculo de la suma de las areas del grupo y sus viviendas
arcpy.SelectLayerByAttribute_management("temporal", "NEW_SELECTION")
V = 0
with arcpy.da.SearchCursor("temporal", ["IDMANZANA","Shape_area","TOT_VIV"], where_expression) as cursor1:
Suma_areas = 0
for row1 in cursor1:
Suma_areas = row1[1] + Suma_areas
V = row1[2] + V
#print "Area en el grupo:" + str(Suma_areas)
del cursor1
#print "otros"
# Then add a selection to the layer based on location to features in another feature class
# calculo de la suma de las areas que se encuentran en el circulo
arcpy.SelectLayerByLocation_management("temporal", "WITHIN",
"temporal_circulo","","NEW_SELECTION")
with arcpy.da.SearchCursor("temporal", ["IDMANZANA", "Shape_area"]) as cursor2:
Suma_areas_circulo = 0
for row2 in cursor2:
Suma_areas_circulo = Suma_areas_circulo + row2[1]
#print "Suma de areas dentro del circulo:" + str(Suma_areas_circulo)
del cursor2
Ar = Suma_areas_circulo - Suma_areas
A = Suma_areas_circulo
homogeneidad=pow((V/cant_max_viv)-1,2)
compacidad=Ar/(A)
suma_homogeneidad = homogeneidad + suma_homogeneidad
suma_compacidad=compacidad+suma_compacidad
del row3
n=1+n
del cursor3
#calculo con la formula
Z=p*(1.0/float(n))*suma_homogeneidad+ q*(1.0/float(n))*suma_compacidad
return Z
#print Calculo("D:/ArcGisShapesPruebas/Zones/Shape15013300200.shp","D:/ArcGisShapesPruebas/EnvolvesCirclesBuffers/Shape15013300200.shp") | UTF-8 | Python | false | false | 3,353 | py | 122 | CalculoFuncionObjetivo.py | 122 | 0.619445 | 0.593797 | 0 | 105 | 30.942857 | 136 |
HoratiusTang/oslo.messaging | 1,821,066,179,526 | d0e7e5a2d9d052d6cbfb174404fc90b0783670dc | 9ec5fd5dd7d91df752576fdf231f87de442fa72e | /oslo_messaging/_cmd/zmq_proxy.py | bb75f8c9ae8d93bbd01b683950dd01db434c7f1f | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | https://github.com/HoratiusTang/oslo.messaging | dc0589d02898596e839d7ca20dfc47299e40f09b | 5708d751039df4595d737c2211ed46dd93de2ba4 | refs/heads/master | 2021-01-18T01:30:40.347413 | 2016-04-23T09:48:16 | 2016-04-23T09:48:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
from oslo_config import cfg
from oslo_messaging._drivers import impl_zmq
from oslo_messaging._drivers.zmq_driver.broker import zmq_proxy
from oslo_messaging._drivers.zmq_driver.broker import zmq_queue_proxy
from oslo_messaging import server
CONF = cfg.CONF
CONF.register_opts(impl_zmq.zmq_opts)
CONF.register_opts(server._pool_opts)
CONF.rpc_zmq_native = True
USAGE = """ Usage: ./zmq-proxy.py --type {PUBLISHER,ROUTER} [-h] [] ...
Usage example:
python oslo_messaging/_cmd/zmq-proxy.py\
--type PUBLISHER"""
PUBLISHER = 'PUBLISHER'
ROUTER = 'ROUTER'
PROXY_TYPES = (PUBLISHER, ROUTER)
def main():
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='ZeroMQ proxy service',
usage=USAGE
)
parser.add_argument('--type', dest='proxy_type', type=str,
default=PUBLISHER,
help='Proxy type PUBLISHER or ROUTER')
parser.add_argument('--config-file', dest='config_file', type=str,
help='Path to configuration file')
args = parser.parse_args()
if args.config_file:
cfg.CONF(["--config-file", args.config_file])
if args.proxy_type not in PROXY_TYPES:
raise Exception("Bad proxy type %s, should be one of %s" %
(args.proxy_type, PROXY_TYPES))
reactor = zmq_proxy.ZmqProxy(CONF, zmq_queue_proxy.PublisherProxy) \
if args.proxy_type == PUBLISHER \
else zmq_proxy.ZmqProxy(CONF, zmq_queue_proxy.RouterProxy)
try:
while True:
reactor.run()
except KeyboardInterrupt:
reactor.close()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,313 | py | 10 | zmq_proxy.py | 10 | 0.664937 | 0.661479 | 0 | 76 | 29.434211 | 78 |
Tomas-Lawton/fullproof | 3,281,355,029,631 | 2c92b24d29b2d47d16ef0451ea179f393d99ec66 | 4003be38a55d76334db8a6069f1d3ec3dbab7229 | /experimental_scrapers/tineye_scraper.py | 1b7345ebfe7105408c118cf6e116b9ae7decb66a | []
| no_license | https://github.com/Tomas-Lawton/fullproof | 76e2e8dd0fb1f4ec7e74783e13768c19c92ce944 | fb72094d02028beb2d3d5ba76618d5103e3cfb76 | refs/heads/master | 2022-11-16T00:26:54.254246 | 2020-07-06T04:43:42 | 2020-07-06T04:43:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Navid-S-B
# 3-07-2020
# Webscraping script to access TinEye
# UPDATE: Same problem, cannot download whole HTML script to scrape webpage.
# Must have some kind of protective measure so people use their API instead.
# Import webscraping libraries
from bs4 import BeautifulSoup
import requests
import re
class scraper:
def __init__(self, image_url):
self.image_url = image_url
p = re.compile('/')
self.new_url = p.sub("%2F", self.image_url)
p = re.compile(':')
self.new_url = p.sub("%3A", self.new_url)
self.new_url = "http://www.tineye.com/search/?url{}".format(self.new_url)
def get_no_results(self):
response = requests.get(self.new_url)
soup = BeautifulSoup(response.text,'html.parser')
print(soup)
webpage = scraper("https://cdn.spacetelescope.org/archives/images/wallpaper2/heic2007a.jpg")
webpage.get_no_results() | UTF-8 | Python | false | false | 919 | py | 6 | tineye_scraper.py | 6 | 0.660501 | 0.644178 | 0 | 31 | 28.677419 | 92 |
lmokto/mltrading | 11,948,599,025,252 | 9b494557fc57e3e902c38c6f64de2dbf02906327 | b5ce195d6ea0d93e080db4844b979c7ae67eec06 | /get_max_column.py | f177a86acd6ba5d299e958d8a083684eb995ba5c | []
| no_license | https://github.com/lmokto/mltrading | b28daa87b1747e667d78bee87516260dbb962bf3 | f991285d8d94767ff4e1434c009ff041d0615077 | refs/heads/master | 2021-05-08T11:42:58.146659 | 2018-02-01T23:54:54 | 2018-02-01T23:54:54 | 119,908,066 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
def get_max_column(symbol, column):
"""
:param symbol:
:param column:
:return:
"""
df = pd.read_csv('data/{name}.csv'.format(name=symbol))
return df[column].max()
def test_run():
column = 'Close'
symbol = 'HCP'
print(30 * '--')
print('Max column {clm_name} in data/{sym_name}.csv.'.format(clm_name=column, sym_name=symbol))
print(get_max_column(symbol, column))
if __name__ == '__main__':
test_run()
| UTF-8 | Python | false | false | 481 | py | 20 | get_max_column.py | 16 | 0.577963 | 0.573805 | 0 | 23 | 19.913043 | 99 |
bonevb/Cisco-and-Python | 10,883,447,169,009 | 5c1954f8f5e797ba4092a7a006543dfc5ad803c4 | 93eeaf8f08cbd663381384e14644208f1872d278 | /dns_apple.py | f63ee43c799c153e8e4eadd93abaaa40ba67b2bb | []
| no_license | https://github.com/bonevb/Cisco-and-Python | 239892f14ac56f42f4f9f760aefb950b63a24664 | 8fb4806aefb49b6399ec5f54e14454a074b70490 | refs/heads/master | 2023-05-05T15:11:15.276874 | 2021-05-28T19:26:49 | 2021-05-28T19:26:49 | 371,796,255 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #The script will check if ip address behind b2bftp.apple.com is changed and
#if it is it will add the new address in cisco asa objec-group
import socket
import filecmp
import os
import sqlite3
DB_NAME = 'ip.db'
db = sqlite3.connect(DB_NAME)
c = db.cursor()
def check_dns():
host = 'b2bftp.apple.com'
result = socket.gethostbyname(host)
return result
ip = check_dns()
print(ip)
def check_ip(ip):
c.execute('SELECT * FROM IP WHERE ADDR = ?', (ip,))
try:
for i in c.fetchone():
return i
except:
return None
def save_to_db(ip):
c.execute('INSERT INTO IP VALUES(?)', (ip,))
db.commit()
print(ip + 'saved')
#print('the ip address is: ', check_ip(ip))
if check_ip(ip) is None:
#print('ip is not into DB')
save_to_db(ip)
with open('dns', 'w') as file:
line = ip
file.write('network-object host '+ line)
os.system('ansible-playbook lines_asa_apple.yaml')
| UTF-8 | Python | false | false | 961 | py | 3 | dns_apple.py | 1 | 0.619147 | 0.614984 | 0 | 48 | 18.979167 | 75 |
mch/python-ant | 17,025,250,362,229 | 0fe912c363cbf634ce6c96143df764f36327ec90 | 7ebd6061a5152f537b9d1838ecfd3a326089ee70 | /demos/ant.core/10-weight.py | 919da64ceff33f5fbca8e983839fecd68f1678a4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/mch/python-ant | 13e629de185ecd5bb4f6ffd5520d5034a37d0ef7 | 02e045825434a17ffe113a82cc8191683223ea5f | refs/heads/master | 2022-02-17T07:00:35.360513 | 2022-02-01T04:02:16 | 2022-02-01T04:02:16 | 89,795,395 | 21 | 14 | MIT | true | 2022-02-01T04:02:17 | 2017-04-29T15:46:59 | 2022-01-24T17:09:58 | 2022-02-01T04:02:16 | 339 | 15 | 8 | 0 | Python | false | false | """
Extending on demo-03, implements an event callback we can use to process the
incoming data.
"""
from __future__ import print_function
import sys
import time
from ant.core import driver
from ant.core import node
from ant.core import event
from ant.core import message
from ant.core.constants import *
from config import *
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
command_id = 0x46
send_times = 2
pg_num = 1
DP_PAYLOAD = bytearray([command_id, 0xFF, 0xFF, 0, 0, send_times, pg_num, 1])
#DP_PAYLOAD = bytearray([255, 255, 0, 0, send_times, pg_num, 1])
CHANNEL = 1 #TODO: not really, channel is set much later
pay = DP_PAYLOAD
p1 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 2
p2 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 3
p3 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 4
p4 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
RSP = bytearray([0xFF, 0x3A])
class RsMessage(message.ChannelMessage):
type = 0x63
def __init__(self, number=0x00):
super(RsMessage, self).__init__(number=number, payload=RSP)
rs = RsMessage(0)
RECV = 0
class WeightListener(event.EventCallback):
def process(self, msg, _channel):
global RECV
if isinstance(msg, message.ChannelBroadcastDataMessage):
# print('R%04X: ' % RECV, *('%02X' % ord(byte) for byte in msg.payload))
data = str(msg.payload)
print('%04X' % RECV, *('%02X' % ord(byte) for byte in data))
# print [map(ord, msg.payload)]
page_number = msg.payload[1]
RECV += 1
if page_number == 1:
pass
elif page_number == 2:
pass
elif page_number == 3:
pass
elif page_number == 4:
pass
def delete_channel(channel):
channel.close()
channel.unassign()
def reset_channel(antnode, channel=None):
if channel:
delete_channel(channel)
channel = antnode.getFreeChannel()
channel.name = 'C:WGT'
channel.assign(net, CHANNEL_TYPE_TWOWAY_RECEIVE)
channel.setID(119, 0, 0)
channel.period = 0x2000 # nebo 0x0020 ???
channel.frequency = 0x39
rs.channelNumber = channel.deviceNumber
channel.node.evm.writeMessage(rs)
channel.searchTimeout = TIMEOUT_NEVER
channel.open()
channel.registerCallback(WeightListener())
return channel
# Initialize
#LOG=None
#DEBUG=False
stick = driver.USB1Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Setup channel
net = node.Network(name='N:ANT+', key=NETKEY)
antnode.setNetworkKey(0, net)
channel = reset_channel(antnode)
restart = int(time.time())
# Wait
print("Listening for weight scale events ...")
while True:
time.sleep(0.1)
# Restart channel every 3 seconds
now = int(time.time())
if (now % 3 == 0) and (now != restart):
channel = reset_channel(antnode, channel)
RECV = 0
restart = now
# Shutdown
delete_channel(channel)
antnode.stop()
| UTF-8 | Python | false | false | 3,088 | py | 24 | 10-weight.py | 23 | 0.656088 | 0.626295 | 0.002591 | 120 | 24.733333 | 83 |
SamuelIvan99/Python-exercises | 9,672,266,363,834 | d3935ae124bd1a04399ae39952ce2f518c4a4556 | 08086a474b662db4bf7ad24e979368944ea189a8 | /KSI_18-19/Uloha - 10B/binary_search_4.0.py | e3e9b5b2ae127ace0445373c2cc0aa9c40c0fa90 | []
| no_license | https://github.com/SamuelIvan99/Python-exercises | a81f208776b78a5b4c19af419fd9ecf185160279 | 210c6543caf43ad74f319e056a6e81cc7ee887c7 | refs/heads/master | 2020-05-30T05:31:44.617812 | 2019-10-18T09:53:21 | 2019-10-18T09:53:21 | 189,562,496 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def find(a, v):
c = len(a) - 1
r = 0
while c > -1 and r < len(a):
value = a[r][c]
if value == v:
return (r, c)
elif value > v:
c -= 1
elif value < v:
r += 1
return None
A = [[19, 30, 31, 45, 57], [25, 32, 32, 51, 69], [33, 35, 38, 58, 78], [34, 49, 67, 84, 102], [44, 54, 73, 90, 115]]
print(find(A, 19)) # (0, 0)
print(find(A, 80)) # None
print(find(A, 30)) # (0, 1)
print(find(A, 54)) # (4, 1)
print(find(A, 75)) # None
print(find(A, 32)) # (1, 1)
print(find(A, 115)) # (4, 4)
| UTF-8 | Python | false | false | 577 | py | 150 | binary_search_4.0.py | 108 | 0.426343 | 0.284229 | 0 | 22 | 25.227273 | 116 |
bgtron/nxtoolkit | 3,298,534,907,446 | 2a16eda8fcb74ca5cc79b9953f99faaf92d0b661 | 6a33091266e24d18628e91913f37ecb6ef9e7f11 | /samples/nx-copy-running-startup.py | 35d78af1619504b88a04e6040256655c6ef5bba1 | [
"Apache-2.0"
]
| permissive | https://github.com/bgtron/nxtoolkit | 095b1d498d2ee33b66ad7de8b71a52f4bbf3bcde | b24f613eb427a1df924bb28d295b96916124739f | refs/heads/master | 2021-04-27T22:22:22.209659 | 2019-06-27T15:28:00 | 2019-06-27T15:28:00 | 122,418,627 | 0 | 0 | null | true | 2018-02-22T02:03:21 | 2018-02-22T02:03:21 | 2018-02-22T02:00:53 | 2018-02-22T01:54:41 | 238 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Simple application that logs on to the Switch and copy the
running config to startup config
"""
import sys
import nxtoolkit.nxtoolkit as NX
import time
def main():
"""
Main execution routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = 'copy running config to startup config'
creds = NX.Credentials('switch', description)
args = creds.get()
# Login to Switch
session = NX.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to Switch')
sys.exit(0)
copy = NX.Copy()
run_to_start = NX.RunningToStartUp()
copy.add(run_to_start)
resp = session.push_to_switch(copy.get_url(), copy.get_json())
if not resp.ok:
print resp.text
print('%% Could not push to the switch')
exit(0)
# Get the status of copy
time.sleep(5) # Waiting 5 sec. till the copy process is complete
copy = NX.Copy.get(session)
print "Copy status: ", copy.run_to_start.status
# Uncomment below lines to delete the copy task
'''
resp = session.delete(run_to_start.get_url())
if not resp.ok:
print resp.text
print('%% Could not delete from the switch')
exit(0)
'''
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,903 | py | 43 | nx-copy-running-startup.py | 40 | 0.471926 | 0.467447 | 0 | 75 | 37.706667 | 80 |
yuliashishko/PyQtViselitsa | 2,723,009,272,333 | af8fed3559e385b0391f979428ba43604d1f8637 | 63b3b6d53924e04256acad95bcd7df0e6b738222 | /Utils.py | 25718f5a1cf8165ad1ee6b3c44c9a001c074a0e7 | []
| no_license | https://github.com/yuliashishko/PyQtViselitsa | 48e0d7d65fe93cd24b9996bbe94c7b4d3bf8df51 | 2117ab8b6b74faf018413794a2e2cec900b7e174 | refs/heads/master | 2023-07-17T16:15:23.108273 | 2021-09-04T18:10:46 | 2021-09-04T18:10:46 | 403,125,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import sqlite3
con = sqlite3.connect("users.sqlite")
cur = con.cursor()
class UserNotFoundError(Exception):
pass
class WordNotFoundError(Exception):
pass
class EmptyWordListError(Exception):
pass
def get_user(login):
user = cur.execute(
f"""SELECT * FROM users WHERE login = '{login}'""").fetchone()
if not user:
raise UserNotFoundError
return user
def check_login(login):
user = cur.execute(
f"""SELECT * FROM users WHERE login = '{login}'""").fetchone()
return user
def delete_user(login):
cur.execute(f"DELETE FROM users WHERE login = '{login}'")
con.commit()
def get_word(word):
result = cur.execute(
f"""SELECT * FROM words WHERE word = '{word}'""").fetchone()
if not result:
raise WordNotFoundError
return result
def create_game(login, word_text, exp):
cur.execute(f"""INSERT INTO games VALUES('{login}', '{word_text}', {exp})""")
con.commit()
def get_random_word(level, user):
leveler = {"Простые слова (50 опыта)": 1, "Нормальные слова (100 опыта)": 2, "Сложны слова (150 опыта)": 3}
result = cur.execute(f"""SELECT word FROM words WHERE words.level = '{leveler[level]}' AND words.word NOT IN
(SELECT word FROM games WHERE login = '{user}')""").fetchall()
if not len(result):
raise EmptyWordListError
word = random.choice(result)
return word
def get_profile(user):
result = get_user(user)
cur_exp = result[3]
max_exp = result[7] * 100 + 150
result = {
'name': result[1],
'username': result[6],
'level': str(result[7]),
'totalExp': str(result[3]),
'gamesLost': str(result[5]),
'gamesWon': str(result[4]),
'currentExp': str(cur_exp),
'maxExp': str(max_exp),
'levelProgress': int(cur_exp / max_exp * 100)
}
return result
def update_user_loose(login, word_text):
user = get_user(login)
curr_looses = user[5]
cur.execute(
f"""UPDATE users SET looses = {curr_looses + 1} WHERE login = '{login}'""")
create_game(login, word_text, 0)
con.commit()
def update_user_win(login, word_text):
user = get_user(login)
word = get_word(word_text)
curr_wins = user[4]
curr_exp = user[3]
exp_for_game = word[1] * 50
lvl = user[7]
cur.execute(
f"""UPDATE users SET wins = {curr_wins + 1} WHERE login = '{login}'""")
if curr_exp + exp_for_game >= 50 + lvl * 100:
cur.execute(f"""UPDATE users SET level = {lvl + 1} WHERE login = '{login}'""")
curr_exp = curr_exp - 50 - 100 * lvl
cur.execute(
f"""UPDATE users SET exp = {curr_exp + exp_for_game} WHERE login = '{login}'""")
create_game(login, word_text, exp_for_game)
con.commit()
def get_word_state(word):
result_games = cur.execute(
f"""SELECT * FROM games WHERE word = '{word}'""").fetchall()
count_looses = 0
for elem in result_games:
if elem[2] == 0:
count_looses += 1
word_state = {
'won': str(len(result_games) - count_looses),
'persent': str(100 - count_looses / len(result_games) * 100),
'players': str(len(result_games))
}
return word_state
def create_account(name, login, password):
cur.execute(
"INSERT INTO users('name', login, password, exp, wins, looses, 'level') VALUES(?, ?, ?, ?, ?, ?, ?);",
(name, login, password, 0, 0, 0, 0)
)
con.commit()
| UTF-8 | Python | false | false | 3,534 | py | 14 | Utils.py | 8 | 0.590348 | 0.571675 | 0 | 128 | 26.195313 | 113 |
awagner83/doctools | 15,272,903,736,302 | 017849705959aa2e299dc534dc952f8ad97bba42 | d88d5beff275a2e9ffdf457ca446a319471c0d14 | /doctools.py | b6bcb91f28251e283dee25731604b16b3f037ca4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/awagner83/doctools | fae30ccd6b86ec8fbc1204ea9da5d0206c4eff29 | e2133f5674e5089b9cb2634a89e6fd55ae618bf4 | refs/heads/master | 2022-02-23T09:33:49.586949 | 2022-02-13T01:58:08 | 2022-02-13T01:58:08 | 2,308,583 | 0 | 1 | NOASSERTION | false | 2022-02-13T01:58:09 | 2011-09-01T15:56:00 | 2013-10-03T12:51:57 | 2022-02-13T01:58:08 | 99 | 2 | 2 | 0 | Python | false | false | """Docblock manipulation utilities."""
from pprint import pformat
def append_to_docs(fn, text):
"""Append text to a functions existing docblock."""
if not text:
return
if fn.__doc__:
min_indent = _getindent(fn.__doc__)
fn.__doc__ = '%s\n\n%s' % (fn.__doc__, _indent(text, min_indent))
else:
fn.__doc__ = text
def append_var_to_docs(fn, label, value):
"""Append text & pformatted value to docblock."""
value_width = 76 - _getindent(fn.__doc__)
append_to_docs(
fn,
"%s:\n%s" % (
label,
_indent(pformat(value, width=value_width))
)
)
def include_docs_from(source_function):
"""Decorator copying documentation from one function onto another."""
def decorator(dest_function):
append_to_docs(dest_function, source_function.__doc__)
return dest_function
return decorator
def _indent(string, indent_level=4):
"""Indent each line by `indent_level` of spaces."""
return '\n'.join('%s%s' % (' '*indent_level, x) for x in
string.splitlines())
def _getindent(string):
try:
lines = string.splitlines()
# drop first line if it has no indent level
if _nspaces(lines[0]) == 0:
lines.pop(0)
indent_levels = (_nspaces(x) for x in lines if x)
return min(indent_levels) or 0
except (AttributeError, ValueError):
# Things that don't look like strings and strings with no
# indentation should report indentation of 0
return 0
def _nspaces(line):
for idx, char in enumerate(line):
if char != ' ':
return idx
| UTF-8 | Python | false | false | 1,692 | py | 5 | doctools.py | 3 | 0.575059 | 0.56974 | 0 | 62 | 26.274194 | 73 |
enigmawxy/TensorGraph | 10,539,849,750,576 | 2fdf9c6b0156ea254590cef2177c93945453041a | ab659d02c4daaf0f3794d927ee2c6015e2352e23 | /tensorgraph/__init__.py | a9dd1361c4052e3af257257aca02a49a318feee6 | [
"MIT"
]
| permissive | https://github.com/enigmawxy/TensorGraph | 1456cfa237f4e2f94653aa04c6f15a5ad2661239 | 6ba18d5fe4ac6004062f805f9457b0fdc5c42cfd | refs/heads/master | 2020-05-16T01:37:15.814395 | 2019-05-03T03:28:35 | 2019-05-03T03:28:35 | 182,606,886 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tensorgraph.graph import *
from tensorgraph.operations import *
from tensorgraph.gradients import RegisterGradient
from tensorgraph.session import Session
import tensorgraph.train
# Create a default graph.
# import builtins
# DEFAULT_GRAPH = builtins.DEFAULT_GRAPH = Graph()
| UTF-8 | Python | false | false | 328 | py | 15 | __init__.py | 12 | 0.777439 | 0.77439 | 0 | 12 | 26.333333 | 50 |
isabella232/pygate-gRPC | 6,511,170,465,063 | 3c3d18b2f5b48169a3bf119152fc420ee6537a41 | 64b90d33916cdff62ea3116dd306717c9abc8fa4 | /pygate_grpc/health.py | 2629685cbeca07753654a01bbc3a4ecbb20a4a2f | [
"MIT"
]
| permissive | https://github.com/isabella232/pygate-gRPC | 336347d866d791b69b3cd0ac69853118491fb07f | 429967fd3c6f56c5f787e54a1d02e0b377640d6f | refs/heads/main | 2023-01-09T21:16:38.373922 | 2020-10-30T11:03:07 | 2020-10-30T11:03:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from proto import health_rpc_pb2, health_rpc_pb2_grpc
from pygate_grpc.errors import ErrorHandlerMeta
logger = logging.getLogger(__name__)
class HealthClient(object, metaclass=ErrorHandlerMeta):
def __init__(self, channel):
self.client = health_rpc_pb2_grpc.RPCServiceStub(channel)
def check(self):
req = health_rpc_pb2.CheckRequest()
return self.client.Check(req)
| UTF-8 | Python | false | false | 417 | py | 35 | health.py | 32 | 0.719424 | 0.709832 | 0 | 15 | 26.8 | 65 |
vald-phoenix/psga | 15,616,501,108,097 | 8df7398d49fb7b8777decb794b68a9e39cc54555 | 90cf1710b57194cae9956ced0199f12e97c7b996 | /app/api/tests.py | 2100b0953240ea553a3cf6a0ff83e1a5bcbc35c3 | []
| no_license | https://github.com/vald-phoenix/psga | 215ba268e4f6f7e0cd83e99413ce913dfe3a5a02 | 785e018f2ed10a3cb8a47ce8e01f036d2561f19e | refs/heads/master | 2022-04-28T08:41:54.439391 | 2019-10-15T12:44:21 | 2019-10-15T12:44:21 | 215,274,507 | 0 | 0 | null | false | 2022-04-22T22:32:58 | 2019-10-15T10:48:24 | 2019-10-15T12:45:20 | 2022-04-22T22:32:56 | 689 | 0 | 0 | 4 | Python | false | false | import pytest
from app.models import Position, Ship
@pytest.fixture
def ship():
"""A dummy ship fixture."""
ship = Ship(imo=9595321, name='MSC Preziosa')
ship.save()
return ship
@pytest.fixture
def position(ship):
"""A dummy position fixture."""
position = Position(
latitude='17.9850006103516',
longitude='-63.1359672546387',
ship=ship,
timestamp='2019-01-15 09:43:13+00'
)
position.save()
return position
@pytest.mark.django_db
def test_ships_endpoint(client, ship):
# Given: a ship entry with data exists in the database.
result = [{'imo': ship.imo, 'name': ship.name}]
# When: a request is being made to the endpoint.
response = client.get('/api/ships/')
# Then: the endpoint returns the JSON data that corresponds to the result.
assert response.json() == result
@pytest.mark.django_db
def test_ship_positions_endpoint(client, position):
# Given: a position entry with data exists in the database.
result = [
{'latitude': position.latitude, 'longitude': position.longitude}
]
# When: a request is being made to the endpoint.
response = client.get('/api/positions/9595321/')
# Then: the endpoint returns the JSON data that corresponds to the result.
assert response.json() == result
| UTF-8 | Python | false | false | 1,329 | py | 18 | tests.py | 10 | 0.662904 | 0.617758 | 0 | 52 | 24.557692 | 78 |
koverman47/EGEN_310 | 5,325,759,490,173 | 8cf9d345ca972cecbd9874090e2871325a639c5e | dbb0a4d452ac0faf00411a09b7e32f13ffdb31e8 | /tests/key_test.py | 6f714b366fad9253140ad18bfc109a1a6adc5247 | []
| no_license | https://github.com/koverman47/EGEN_310 | 3ef66b7fb773b4e5fb833c250c87c7cf4fc84d49 | f69e292baa48bca441dd0f7d9ba7789db417d42a | refs/heads/master | 2020-04-18T00:39:47.999960 | 2019-04-24T20:14:44 | 2019-04-24T20:14:44 | 167,086,003 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import sys
import tty
import termios
tty.setcbreak(sys.stdin)
key = ord(sys.stdin.read(1))
print(key)
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
old[3] = old[3] | termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, old)
sys.exit(0)
| UTF-8 | Python | false | false | 270 | py | 23 | key_test.py | 22 | 0.722222 | 0.703704 | 0 | 16 | 15.875 | 45 |
Nik618/DjangoProject_v02 | 19,516,331,419,712 | 2b24ba38d65a995ebe00b8fdfe53b100248bf5dd | 0932de21a8d5d3d6002b6f4fdd0f19b5689d7454 | /main/views.py | fcd1a9e9687d44097bf5f297efaa776731b4f0d4 | []
| no_license | https://github.com/Nik618/DjangoProject_v02 | 08d7f9d48e2207154e98fdc8cfb47d599613b422 | 749c4fac0788f6ce17a110bf145eb7a57b43ef2d | refs/heads/master | 2023-05-13T01:27:31.940581 | 2021-06-03T19:00:23 | 2021-06-03T19:00:23 | 373,611,644 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.gis.geos import MultiPolygon, Polygon
from rest_framework.decorators import api_view, permission_classes
from rest_framework.pagination import PageNumberPagination
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from rest_framework.permissions import AllowAny
from rest_framework_gis.pagination import GeoJsonPagination
from .models import Country, Town, Capital
from django.views.generic.edit import CreateView
from django.core.serializers import serialize
from django.http import HttpResponse
from django.core.serializers import serialize
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework import generics
from .serializers import *
# Create your views here.
# def countryDatasets(request):
# red = serialize('geojson',
# Country.objects.all(),
# geometry_field='location')
# return Response(red)
class GetCountry(generics.ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
class SetCountry(generics.RetrieveUpdateDestroyAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
class GetTown(generics.ListAPIView):
queryset = Town.objects.all()
serializer_class = TownSerializer
class SetTown(generics.RetrieveUpdateDestroyAPIView):
queryset = Town.objects.all()
serializer_class = TownSerializer
class GetCapital(generics.ListAPIView):
queryset = Capital.objects.all()
serializer_class = CapitalSerializer
class SetCapital(generics.RetrieveUpdateDestroyAPIView):
queryset = Capital.objects.all()
serializer_class = CapitalSerializer
# def get_queryset(self):
# queryset = Town.objects.all()
# country = self.kwargs['Russia']
# if country is not None:
# queryset = Town.objects.filter(country=country)
# return queryset
class getTowns(generics.ListAPIView):
serializer_class = TownSerializer
def get_queryset(self):
param = self.kwargs['tittle']
return Town.objects.filter(country__tittle=param)
class getTownInArea(generics.ListAPIView): # вывести объекты, находящиеся внутри области
queryset = Town.objects.all()
serializer_class = TownSerializer
def getCoordinates(self, c: str) -> tuple:
return tuple(float(i) for i in c.split(','))
def get(self, request, c1, c2, c3, c4):
c1 = self.getCoordinates(c1)
c2 = self.getCoordinates(c2)
c3 = self.getCoordinates(c3)
c4 = self.getCoordinates(c4)
search_area = MultiPolygon(Polygon((c1, c2, c3, c4, c1,)))
paginator = GeoJsonPagination()
paginator.page_size = 3
red = Town.objects.filter(location__contained=search_area) # ключевой момент
result_page = paginator.paginate_queryset(red, request)
serializer = TownSerializer(result_page, many=True)
return paginator.get_paginated_response(serializer.data)
class getCountryInArea(generics.ListAPIView): # вывести объекты, находящиеся внутри области
queryset = Country.objects.all()
serializer_class = CountrySerializer
def getCoordinates(self, c: str) -> tuple:
return tuple(float(i) for i in c.split(','))
def get(self, request, c1, c2, c3, c4):
c1 = self.getCoordinates(c1)
c2 = self.getCoordinates(c2)
c3 = self.getCoordinates(c3)
c4 = self.getCoordinates(c4)
search_area = MultiPolygon(Polygon((c1, c2, c3, c4, c1,)))
paginator = GeoJsonPagination()
paginator.page_size = 3
red = Country.objects.filter(location__contained=search_area) # ключевой момент
result_page = paginator.paginate_queryset(red, request)
serializer = CountrySerializer(result_page, many=True)
return paginator.get_paginated_response(serializer.data)
class getCapitalInArea(generics.ListAPIView): # вывести объекты, находящиеся внутри области
queryset = Capital.objects.all()
serializer_class = CapitalSerializer
def getCoordinates(self, c: str) -> tuple:
return tuple(float(i) for i in c.split(','))
def get(self, request, c1, c2, c3, c4):
c1 = self.getCoordinates(c1)
c2 = self.getCoordinates(c2)
c3 = self.getCoordinates(c3)
c4 = self.getCoordinates(c4)
search_area = MultiPolygon(Polygon((c1, c2, c3, c4, c1,)))
paginator = GeoJsonPagination()
paginator.page_size = 3
red = Capital.objects.filter(location__contained=search_area) # ключевой момент
result_page = paginator.paginate_queryset(red, request)
serializer = CapitalSerializer(result_page, many=True)
return paginator.get_paginated_response(serializer.data)
class getSquare(generics.ListAPIView): # вывести площадь
queryset = Town.objects.all()
serializer_class = TownSerializer
def getCoordinates(self, c: str) -> tuple:
return tuple(float(i) for i in c.split(','))
def get(self, request, c1, c2, c3, c4):
c1 = self.getCoordinates(c1)
c2 = self.getCoordinates(c2)
c3 = self.getCoordinates(c3)
c4 = self.getCoordinates(c4)
search_area = MultiPolygon(Polygon((c1, c2, c3, c4, c1,)))
paginator = GeoJsonPagination()
paginator.page_size = 3
red = Town.objects.filter(location__contained=search_area) # ключевой момент
sumS = 0
for town in red:
sumS += town.location.area
return HttpResponse(content=sumS)
| UTF-8 | Python | false | false | 5,780 | py | 9 | views.py | 6 | 0.692638 | 0.679056 | 0 | 157 | 34.624204 | 92 |
mostafaelhoushi/tensor-decompositions | 1,563,368,111,088 | 1c0fbf04842e876c4d9975747fc357923437a369 | 82dafd9b89abdf334420e50f9d7562984aed8a7d | /reconstructions.py | d635aef8302f713f5c9f238feb50110411a1d280 | []
| no_license | https://github.com/mostafaelhoushi/tensor-decompositions | 844aaed58abeb1e17923860a5e9aebed64465030 | 8c3186dfc4d5d2eb22b0a673e3eaf1bcaa872feb | refs/heads/master | 2020-07-09T03:51:30.214582 | 2020-05-02T12:46:00 | 2020-05-02T12:46:00 | 203,867,675 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorly as tl
from tensorly.decomposition import parafac, partial_tucker
import numpy as np
import torch
import torch.nn as nn
def reconstruct_model(model, cp=False):
# TODO: Find a better way to avoid having to convert model from CPU to CUDA and back
model.cpu()
iterator = iter(model._modules.items())
item = next(iterator, None)
while item is not None:
name, module = item
if len(list(module.children())) > 0:
# recurse
model._modules[name] = reconstruct_model(model=module, cp=cp)
item = next(iterator, None)
elif type(module) == nn.Linear:
linear_layers_list = [module]
linear_names_list = [name]
# add all consecutive conv layers to list
item = next(iterator, None)
while item is not None:
name, module = item
if type(module) == nn.Linear:
linear_layers_list.append(module)
linear_names_list.append(name)
item = next(iterator, None)
else:
break
# reconstruct
if len(linear_layers_list) > 1:
combined_weight = None
for i, (layer, name) in enumerate(zip(linear_layers_list, linear_names_list)):
if i == 0:
combined_weight = layer.weight.data
else:
combined_weight = torch.matmul(layer.weight.data, combined_weight)
if i < len(linear_layers_list) - 1:
assert(layer.bias is None)
model._modules[name] = torch.nn.Identity()
else:
assert(layer.bias is not None)
model._modules[name] = torch.nn.Linear(in_features = linear_layers_list[0].in_features, out_features = linear_layers_list[-1].out_features, bias = True)
model._modules[name].weight.data = combined_weight
model._modules[name].bias.data = linear_layers_list[-1].bias.data
elif type(module) == nn.Conv2d:
conv_layers_list = [module]
conv_names_list = [name]
# add all consecutive conv layers to list
item = next(iterator, None)
while item is not None:
name, module = item
if type(module) == nn.Conv2d:
conv_layers_list.append(module)
conv_names_list.append(name)
item = next(iterator, None)
else:
break
# reconstruct
if len(conv_layers_list) > 1:
if cp:
raise("cp reconstruction not yet supported")
else: # tucker reconstruction
if(len(conv_layers_list) == 3):
[last_layer, core_layer, first_layer] = conv_layers_list
[last_name, core_name, first_name] = conv_names_list
first_weight = first_layer.weight.data.squeeze(-1).squeeze(-1)
core_weight = core_layer.weight.data
last_weight = torch.transpose(last_layer.weight.data, 1, 0).squeeze(-1).squeeze(-1)
reconstructed_weight = tl.tucker_to_tensor(core_weight, [first_weight, last_weight])
assert(first_layer.bias is not None)
assert(core_layer.bias is None)
assert(last_layer.bias is None)
reconstructed_bias = first_layer.bias.data
reconstructed_layer = torch.nn.Conv2d(in_channels=first_layer.in_channels, \
out_channels=last_layer.out_channels, kernel_size=core_layer.kernel_size, stride=core_layer.stride,
padding=core_layer.padding, dilation=core_layer.dilation, bias=True)
reconstructed_layer.weight.data = reconstructed_weight
reconstructed_layer.bias.data = reconstructed_bias
model._modules[first_name] = reconstructed_layer
model._modules[core_name] = torch.nn.Identity()
model._modules[last_name] = torch.nn.Identity()
elif(len(conv_layers_list) == 2):
[core_layer, last_layer] = conv_layers_list
[core_name, last_name] = conv_names_list
core_weight = core_layer.weight.data
last_weight = last_layer.weight.data.squeeze(-1).squeeze(-1)
reconstructed_weight = tl.tucker_to_tensor(core_weight, [last_weight])
assert(core_layer.bias is None)
assert(last_layer.bias is not None)
reconstructed_bias = last_layer.bias.data
reconstructed_layer = torch.nn.Conv2d(in_channels=core_layer.in_channels, \
out_channels=last_layer.out_channels, kernel_size=core_layer.kernel_size, stride=core_layer.stride,
padding=core_layer.padding, dilation=core_layer.dilation, bias=True)
reconstructed_layer.weight.data = reconstructed_weight
reconstructed_layer.bias.data = reconstructed_bias
model._modules[core_name] = torch.nn.Identity()
model._modules[last_name] = reconstructed_layer
else:
item = next(iterator, None)
model.cuda()
return model
| UTF-8 | Python | false | false | 5,940 | py | 7 | reconstructions.py | 5 | 0.505724 | 0.50202 | 0 | 122 | 46.688525 | 176 |
dzmitrybutar/test_drawing | 19,189,913,885,143 | 8ec11d24d7166d5a8707a7fe0e413a59ca301c8a | a1961acfbcd0cdc0c8c225499f94c69ae4c700c7 | /config.py | 05dd5bd21f15fad04545b5458818fa0f5437da08 | []
| no_license | https://github.com/dzmitrybutar/test_drawing | 01469bf333333faede77e0eb56569c2067e02af0 | 01cac1a3b609fb75230273ac43a376a891a843ae | refs/heads/master | 2020-07-08T05:51:18.860984 | 2019-08-21T13:17:19 | 2019-08-21T13:17:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description='Drawing')
parser.add_argument('infile', type=str, help='Input filename and dir for initial conditions')
parser.add_argument('outfile', type=str, help='Output filename and dir for the result')
args = parser.parse_args()
infile_path = BASEDIR + '/' + args.infile
outfile_path = BASEDIR + '/' + args.outfile
| UTF-8 | Python | false | false | 433 | py | 13 | config.py | 8 | 0.73903 | 0.73903 | 0 | 12 | 35.083333 | 93 |
simon-andrews/hat-appraiser | 16,200,616,649,414 | 2a79571274535a7c965d1b85e753a3f8fe381c30 | 0a0dd15977f701b36462cf10ae1b7a15c1b464b9 | /server.py | ef55b11c1cf501431d11967d3c73dcbdab703915 | [
"MIT"
]
| permissive | https://github.com/simon-andrews/hat-appraiser | 2bbbbca74d80bc76bfae5cbd93c23c204e20c104 | f42d4a4ece005a345faebcd4140472586e6c47ab | refs/heads/master | 2021-01-01T03:34:32.112764 | 2016-04-24T16:37:49 | 2016-04-24T16:37:49 | 56,982,274 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template, request
from appraisal import get_effect_averages, get_price
app = Flask(__name__)
app.debug = True
@app.route('/', methods=('GET', 'POST'))
def index():
if request.method == "GET":
return render_template("index.html")
elif request.method == "POST":
return str(get_price(request.form["hatname"], request.form["hateffect"])) + " keys"
@app.route('/effect_averages')
def effect_averages():
return str(app.config["effect_averages"])
if __name__ == '__main__':
print('downloading')
app.config["effect_averages"] = get_effect_averages()
app.run()
| UTF-8 | Python | false | false | 631 | py | 4 | server.py | 2 | 0.648177 | 0.648177 | 0 | 24 | 25.291667 | 91 |
dilanfd/dynamics-of-springs | 2,740,189,179,099 | b9b3786c63357fa51a5e89dca34ed576d91f4f3a | b4159e7c2e569498db8a8e37efb952609622aeb1 | /elastica-problem/thesis3/lib/python3.6/random.py | 08b35f88155282a3d4c367cf623383f2a3d4bf0c | [
"MIT"
]
| permissive | https://github.com/dilanfd/dynamics-of-springs | 0433f8fe003f63e5b9d877bed48e223754a96bec | 99ae5ea20437f5e43ecc8b54bf27d468afea6a23 | refs/heads/master | 2020-03-23T15:56:55.058023 | 2018-12-06T02:15:46 | 2018-12-06T02:15:46 | 141,542,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /Users/Dilan/anaconda/lib/python3.6/random.py | UTF-8 | Python | false | false | 45 | py | 102 | random.py | 66 | 0.822222 | 0.777778 | 0 | 1 | 45 | 45 |
DavidBaug/AA | 16,217,796,541,486 | ae60a163aef1e4edb141e6cf195669fc426eefb0 | a101c155cade8437cfb84dafce7cdde38b1f1f5d | /Practicas/practica0/Ejercicio_clase.py | 8d7e33e147093ce937f6b95d420d4f3cb5f36491 | []
| no_license | https://github.com/DavidBaug/AA | 02a1c366bbb9947cde823c2b691612160adfb355 | 889236fbfad88840e691e8b812ea531ca4137958 | refs/heads/master | 2020-03-23T07:34:57.955712 | 2019-10-01T08:41:42 | 2019-10-01T08:41:42 | 141,280,624 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Ejercicio de clase. En este ejercicio se nos pide que:
Leamos la base de datos de iris que hay en scikit-learn.
Obtengamos de ella las características (datos de entrada X) y la clase (y).
Nos quedemos con las dos primeras características (2 primeras columnas
de X).
Separar en train 80% y test 20% aleatoriamente conservando la proporción de
elementos en cada clase tanto en train como en test.
"""
#Importamos paquetes necesarios.
import numpy as np
from sklearn import datasets
#Leemos el dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
#Nos quedamos con las 2 primeras características.
X = X[:, :2]
#Aleatorizamos los datos
#Voy a crear un vector de índices, aleatorizarlo y usarlo para indexar X e y.
idx = np.arange(0, X.shape[0], dtype=np.int32)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
#Averiguamos que clases hay en el dataset.
clases = np.unique(y)
#Separamos los datos según su clase
#Estoy usando una forma de crear una lista con un bucle que permite hacer
#python, puede sustituirse por un bucle for con un append a la lista.
X_class = [X[y==c_i] for c_i in clases]
#Separamos en train y test.
trainX_class = [Xi[:int(Xi.shape[0]*0.8)] for Xi in X_class]
testX_class = [Xi[int(Xi.shape[0]*0.8):] for Xi in X_class]
#Calculamos el nuevo tamaño.
sizes_train = [tX.shape[0] for tX in trainX_class]
sizes_test = [tX.shape[0] for tX in testX_class]
#Concatenamos
trainX = np.concatenate(trainX_class, axis=0)
testX = np.concatenate(testX_class, axis=0)
#Creamos trainY y testY
trainY = np.zeros(trainX.shape[0], y.dtype)
testY = np.zeros(testX.shape[0], y.dtype)
pos_train = pos_test = 0
#El comando zip permite empaquetar listas de la misma longitud para recorrerlas
#a la vez.
for c_i, size_train, size_test in zip(clases, sizes_train, sizes_test):
end_train = pos_train+size_train
end_test = pos_test+size_test
trainY[pos_train:end_train] = c_i
testY[pos_test:end_test] = c_i
pos_train = end_train
pos_test = end_test
#Eliminamos lo que sobra (no es necesario).
del X
del y
del sizes_train
del sizes_test
del pos_train
del pos_test
print('Done!')
| UTF-8 | Python | false | false | 2,210 | py | 34 | Ejercicio_clase.py | 20 | 0.711757 | 0.699955 | 0 | 78 | 27.217949 | 80 |
midas-research/calling-out-bluff | 6,493,990,589,612 | 89e7d30ae24a8e6442a1a8cde97eaf1b35d782f0 | 5ffdef59c244f719c43ee24d23de7201bf42eab5 | /Model2-EASE/src/nltk/emacs/pycomplete.py | 09b40d85df12f92534ca1b6f87a4ecf98f483275 | [
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-ND-3.0"
]
| permissive | https://github.com/midas-research/calling-out-bluff | 8db408efe1c211a8685bfc1b2553117770689639 | 4de3c56b64edeeef9968288679c4e5b261e9949c | refs/heads/models_test | 2022-12-13T02:36:24.054646 | 2020-08-19T07:05:55 | 2020-08-19T07:05:55 | 280,080,456 | 10 | 9 | MIT | false | 2020-08-09T18:57:22 | 2020-07-16T07:07:19 | 2020-07-20T08:17:43 | 2020-08-09T18:57:21 | 565,670 | 0 | 2 | 0 | null | false | false |
"""
Python dot expression completion using Pymacs.
This almost certainly needs work, but if you add
(require 'pycomplete)
to your .xemacs/init.el file (untried w/ GNU Emacs so far) and have Pymacs
installed, when you hit M-TAB it will try to complete the dot expression
before point. For example, given this import at the top of the file:
import time
typing "time.cl" then hitting M-TAB should complete "time.clock".
This is unlikely to be done the way Emacs completion ought to be done, but
it's a start. Perhaps someone with more Emacs mojo can take this stuff and
do it right.
See pycomplete.el for the Emacs Lisp side of things.
"""
import sys
import os.path
try:
x = set
except NameError:
from sets import Set as set
else:
del x
def get_all_completions(s, imports=None):
"""Return contextual completion of s (string of >= zero chars).
If given, imports is a list of import statements to be executed first.
"""
locald = {}
if imports is not None:
for stmt in imports:
try:
exec stmt in globals(), locald
except TypeError:
raise TypeError, "invalid type: %s" % stmt
dots = s.split(".")
if not s or len(dots) == 1:
keys = set()
keys.update(locald.keys())
keys.update(globals().keys())
import __builtin__
keys.update(dir(__builtin__))
keys = list(keys)
keys.sort()
if s:
return [k for k in keys if k.startswith(s)]
else:
return keys
sym = None
for i in range(1, len(dots)):
s = ".".join(dots[:i])
try:
sym = eval(s, globals(), locald)
except NameError:
try:
sym = __import__(s, globals(), locald, [])
except ImportError:
return []
if sym is not None:
s = dots[-1]
return [k for k in dir(sym) if k.startswith(s)]
def pycomplete(s, imports=None):
completions = get_all_completions(s, imports)
dots = s.split(".")
return os.path.commonprefix([k[len(dots[-1]):] for k in completions])
if __name__ == "__main__":
print "<empty> ->", pycomplete("")
print "sys.get ->", pycomplete("sys.get")
print "sy ->", pycomplete("sy")
print "sy (sys in context) ->", pycomplete("sy", imports=["import sys"])
print "foo. ->", pycomplete("foo.")
print "Enc (email * imported) ->",
print pycomplete("Enc", imports=["from email import *"])
print "E (email * imported) ->",
print pycomplete("E", imports=["from email import *"])
print "Enc ->", pycomplete("Enc")
print "E ->", pycomplete("E")
# Local Variables :
# pymacs-auto-reload : t
# End :
| UTF-8 | Python | false | false | 2,728 | py | 296 | pycomplete.py | 80 | 0.592742 | 0.591276 | 0 | 95 | 27.705263 | 76 |
Lawrr/mal-utilities | 2,843,268,381,138 | 9445586e4da895770e64ebb4c7e5a884155e28b5 | 79dca79d559ddf6a8f22aa0dd2dfaea6bb897f33 | /listsorter/migrations/0001_initial.py | 978a59a429bd795d9da3459c79373dd4221a2998 | []
| no_license | https://github.com/Lawrr/mal-utilities | 88450b657699b14ae2a7d65432ffc0f0e5222543 | 1996d6f77666c860738b5467ba1169dd97270c48 | refs/heads/master | 2018-01-08T02:30:48.001924 | 2016-04-10T03:07:52 | 2016-04-10T03:07:52 | 50,173,373 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Anime',
fields=[
('title_id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.TextField()),
('score', models.FloatField()),
],
),
migrations.CreateModel(
name='Manga',
fields=[
('title_id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.TextField()),
('score', models.FloatField()),
],
),
]
| UTF-8 | Python | false | false | 771 | py | 16 | 0001_initial.py | 9 | 0.503243 | 0.501946 | 0 | 29 | 25.586207 | 85 |
lly123999/Transport-Mode-GPS-CNN | 17,368,847,776,867 | b0e107b17b145549ad841c33060feafe2ee47c4e | 933dca697a09e0f6ee3f34d9cebe0194a8053ef1 | /Instance_Creation.py | 83ade972fb496b65fb46cf54deb2724cb810abc6 | [
"MIT"
]
| permissive | https://github.com/lly123999/Transport-Mode-GPS-CNN | 0017630ebaa6c18fbf337f276f2050504f630422 | 8db5f83f593a004a7af280bfd6668cc6032e8338 | refs/heads/master | 2020-04-26T22:45:10.619674 | 2018-08-28T13:13:51 | 2018-08-28T13:13:51 | 173,882,113 | 1 | 0 | MIT | true | 2019-03-05T05:45:13 | 2019-03-05T05:45:12 | 2019-02-21T15:04:18 | 2018-08-28T13:13:51 | 41 | 0 | 0 | 0 | null | false | null | import numpy as np
import pickle
from geopy.distance import vincenty
import os
import math
A = math.degrees(-math.pi)
# Change the current working directory to the location of 'Combined Trajectory_Label_Geolife' folder.
filename = '../Combined Trajectory_Label_Geolife/Revised_Trajectory_Label_Array.pickle'
with open('Revised_Trajectory_Label_Array.pickle', 'rb') as f:
Trajectory_Label_Array = pickle.load(f)
# Identify the Speed and Acceleration limit
SpeedLimit = {0: 7, 1: 12, 2: 120./3.6, 3: 180./3.6, 4: 120/3.6}
# Online sources for Acc: walk: 1.5 Train 1.15, bus. 1.25 (.2), bike: 2.6, train:1.5
AccLimit = {0: 3, 1: 3, 2: 2, 3: 10, 4: 3}
# Choose based on figure visualization for JerkP:{0: 4, 1: 4, 2: 4, 3: 11, 4: 6}
JerkLimitP = {0: 40, 1: 40, 2: 40, 3: 110, 4: 60}
# Choose based on figure visualization for JerkN:{0: -4, 1: -4, 2: -2.5, 3: -11, 4: -4}
JerkLimitN = {0: -40, 1: -40, 2: -200.5, 3: -110, 4: -40}
# Total_Instance_InSequence checks the number of GPS points for each instance in all users
Total_Instance_InSequence = []
# Total_Motion_Instance: each element is an array include the four channels for each instance
Total_Motion_Instance = []
# Save the 4 channels for each user separately
Total_RelativeDistance = []
Total_Speed = []
Total_Acceleration = []
Total_Jerk = []
Total_BearingRate = []
Total_Label = []
Total_InstanceNumber = []
Total_Outlier = []
Total_Descriptive_Stat = []
Total_Delta_Time = []
Total_Velocity_Change = []
# Count the number of times that NoOfOutlier happens
NoOfOutlier = 0
for z in range(len(Trajectory_Label_Array)):
Descriptive_Stat = []
Data = Trajectory_Label_Array[z]
if len(Data) == 0:
continue
Shape = np.shape(Trajectory_Label_Array[z])
# InstanceNumber: Break a user's trajectory to instances. Count number of GPS points for each instance
delta_time = []
tempSpeed = []
for i in range(len(Data) - 1):
delta_time.append((Data[i+1, 2] - Data[i, 2]) * 24. * 3600)
if delta_time[i] == 0:
# Prevent to generate infinite speed. So use a very short time = 0.1 seconds.
delta_time[i] = 0.1
A = (Data[i, 0], Data[i, 1])
B = (Data[i + 1, 0], Data[i + 1, 1])
tempSpeed.append(vincenty(A, B).meters/delta_time[i])
# Since there is no data for the last point, we assume the delta_time as the average time in the user guide
# (i.e., 3 sec) and speed as tempSpeed equal to last time so far.
delta_time.append(3)
tempSpeed.append(tempSpeed[len(tempSpeed) - 1])
# InstanceNumber: indicate the length of each instance
InstanceNumber = []
# Label: For each created instance, we need only one mode to be assigned to.
# Remove the instance with less than 10 GPS points. Break the whole user's trajectory into trips with min_trip
# Also break the instance with more than threshold GPS points into more instances
Data_All_Instance = [] # Each of its element is a list that shows the data for each instance (lat, long, time)
Label = []
min_trip_time = 20 * 60 # 20 minutes equal to 1200 seconds
threshold = 200 # fixed of number of GPS points for each instance
i = 0
while i <= (len(Data) - 1):
No = 0
ModeType = Data[i, 3]
Counter = 0
# index: save the instance indices when an instance is being created and concatenate all in the remove
index = []
# First, we always have an instance with one GPS point.
while i <= (len(Data) - 1) and Data[i, 3] == ModeType and Counter < threshold:
if delta_time[i] <= min_trip_time:
Counter += 1
index.append(i)
i += 1
else:
Counter += 1
index.append(i)
i += 1
break
if Counter >= 10: # Remove all instances that have less than 10 GPS points# I
InstanceNumber.append(Counter)
Data_For_Instance = [Data[i, 0:3] for i in index]
Data_For_Instance = np.array(Data_For_Instance, dtype=float)
Data_All_Instance.append(Data_For_Instance)
Label.append(ModeType)
if len(InstanceNumber) == 0:
continue
Label = [int(i) for i in Label]
RelativeDistance = [[] for _ in range(len(InstanceNumber))]
Speed = [[] for _ in range(len(InstanceNumber))]
Acceleration = [[] for _ in range(len(InstanceNumber))]
Jerk = [[] for _ in range(len(InstanceNumber))]
Bearing = [[] for _ in range(len(InstanceNumber))]
BearingRate = [[] for _ in range(len(InstanceNumber))]
Delta_Time = [[] for _ in range(len(InstanceNumber))]
Velocity_Change = [[] for _ in range(len(InstanceNumber))]
User_outlier = []
# Create channels for every instance (k) of the current user
for k in range(len(InstanceNumber)):
Data = Data_All_Instance[k]
# Temp_RD, Temp_SP are temporary relative distance and speed before checking for their length
Temp_Speed = []
Temp_RD = []
outlier = []
for i in range(len(Data) - 1):
A = (Data[i, 0], Data[i, 1])
B = (Data[i+1, 0], Data[i+1, 1])
Temp_RD.append(vincenty(A, B).meters)
Delta_Time[k].append((Data[i + 1, 2] - Data[i, 2]) * 24. * 3600 + 1) # Add one second to prevent zero time
S = Temp_RD[i] / Delta_Time[k][i]
if S > SpeedLimit[Label[k]] or S < 0:
outlier.append(i)
Temp_Speed.append(S)
y = math.sin(math.radians(Data[i+1, 1]) - math.radians(Data[i, 1])) * math.radians(math.cos(Data[i+1, 0]))
x = math.radians(math.cos(Data[i, 0])) * math.radians(math.sin(Data[i+1, 0])) - \
math.radians(math.sin(Data[i, 0])) * math.radians(math.cos(Data[i+1, 0])) \
* math.radians(math.cos(Data[i+1, 1]) - math.radians(Data[i, 1]))
# Convert radian from -pi to pi to [0, 360] degree
b = (math.atan2(y, x) * 180. / math.pi + 360) % 360
Bearing[k].append(b)
# End of operation of relative distance, speed, and bearing for one instance
# Now remove all outliers (exceeding max speed) in the current instance
Temp_Speed = [i for j, i in enumerate(Temp_Speed) if j not in outlier]
if len(Temp_Speed) < 10:
InstanceNumber[k] = 0
NoOfOutlier += 1
continue
Speed[k] = Temp_Speed
Speed[k].append(Speed[k][-1])
# Now remove all outlier instances, where their speed exceeds the max speed.
# Then, remove their corresponding points from other channels.
RelativeDistance[k] = Temp_RD
RelativeDistance[k] = [i for j, i in enumerate(RelativeDistance[k]) if j not in outlier]
RelativeDistance[k].append(RelativeDistance[k][-1])
Bearing[k] = [i for j, i in enumerate(Bearing[k]) if j not in outlier]
Bearing[k].append(Bearing[k][-1])
Delta_Time[k] = [i for j, i in enumerate(Delta_Time[k]) if j not in outlier]
InstanceNumber[k] = InstanceNumber[k] - len(outlier)
# Now remove all outlier instances, where their acceleration exceeds the max acceleration.
# Then, remove their corresponding points from other channels.
Temp_ACC = []
outlier = []
for i in range(len(Speed[k]) - 1):
DeltaSpeed = Speed[k][i+1] - Speed[k][i]
ACC = DeltaSpeed/Delta_Time[k][i]
if abs(ACC) > AccLimit[Label[k]]:
outlier.append(i)
Temp_ACC.append(ACC)
Temp_ACC = [i for j, i in enumerate(Temp_ACC) if j not in outlier]
if len(Temp_ACC) < 10:
InstanceNumber[k] = 0
NoOfOutlier += 1
continue
Acceleration[k] = Temp_ACC
Acceleration[k].append(Acceleration[k][-1])
Speed[k] = [i for j, i in enumerate(Speed[k]) if j not in outlier]
RelativeDistance[k] = [i for j, i in enumerate(RelativeDistance[k]) if j not in outlier]
Bearing[k] = [i for j, i in enumerate(Bearing[k]) if j not in outlier]
Delta_Time[k] = [i for j, i in enumerate(Delta_Time[k]) if j not in outlier]
InstanceNumber[k] = InstanceNumber[k] - len(outlier)
# Now remove all outlier instances, where their jerk exceeds the max speed.
# Then, remove their corresponding points from other channels.
Temp_J = []
outlier = []
for i in range(len(Acceleration[k]) - 1):
Diff = Acceleration[k][i+1] - Acceleration[k][i]
J = Diff/Delta_Time[k][i]
Temp_J.append(J)
Temp_J = [i for j, i in enumerate(Temp_J) if j not in outlier]
if len(Temp_J) < 10:
InstanceNumber[k] = 0
NoOfOutlier += 1
continue
Jerk[k] = Temp_J
Jerk[k].append(Jerk[k][-1])
Speed[k] = [i for j, i in enumerate(Speed[k]) if j not in outlier]
Acceleration[k] = [i for j, i in enumerate(Acceleration[k]) if j not in outlier]
RelativeDistance[k] = [i for j, i in enumerate(RelativeDistance[k]) if j not in outlier]
Bearing[k] = [i for j, i in enumerate(Bearing[k]) if j not in outlier]
Delta_Time[k] = [i for j, i in enumerate(Delta_Time[k]) if j not in outlier]
InstanceNumber[k] = InstanceNumber[k] - len(outlier)
# End of Jerk outlier detection.
# Compute Breating Rate from Bearing, and Velocity change from Speed
for i in range(len(Bearing[k]) - 1):
Diff = abs(Bearing[k][i+1] - Bearing[k][i])
BearingRate[k].append(Diff)
BearingRate[k].append(BearingRate[k][-1])
for i in range(len(Speed[k]) - 1):
Diff = abs(Speed[k][i+1] - Speed[k][i])
if Speed[k][i] != 0:
Velocity_Change[k].append(Diff/Speed[k][i])
else:
Velocity_Change[k].append(1)
Velocity_Change[k].append(Velocity_Change[k][-1])
# Now we apply the smoothing filter on each instance:
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
# Smoothing process
RelativeDistance[k] = savitzky_golay(np.array(RelativeDistance[k]), 9, 3)
Speed[k] = savitzky_golay(np.array(Speed[k]), 9, 3)
Acceleration[k] = savitzky_golay(np.array(Acceleration[k]), 9, 3)
Jerk[k] = savitzky_golay(np.array(Jerk[k]), 9, 3)
BearingRate[k] = savitzky_golay(np.array(BearingRate[k]), 9, 3)
Total_RelativeDistance.append(RelativeDistance)
Total_Speed.append(Speed)
Total_Acceleration.append(Acceleration)
Total_Jerk.append(Jerk)
Total_BearingRate.append(BearingRate)
Total_Delta_Time.append(Delta_Time)
Total_Velocity_Change.append(Velocity_Change)
Total_Label.append(Label)
Total_InstanceNumber.append(InstanceNumber)
Total_Outlier.append(User_outlier)
Total_Instance_InSequence = Total_Instance_InSequence + InstanceNumber
with open('Revised_InstanceCreation+NoJerkOutlier+Smoothing.pickle', 'wb') as f:
pickle.dump([Total_RelativeDistance, Total_Speed, Total_Acceleration, Total_Jerk, Total_BearingRate, Total_Label,
Total_InstanceNumber, Total_Instance_InSequence, Total_Delta_Time, Total_Velocity_Change], f)
| UTF-8 | Python | false | false | 15,186 | py | 9 | Instance_Creation.py | 8 | 0.58172 | 0.560582 | 0 | 318 | 45.754717 | 119 |
mohamed-aziz/cryptopals | 884,763,310,640 | 4c943d3116286bbe1e114b5da43a054fa5c60c7d | 4b653379f3d9a3493004605df2ccf05df188c6c2 | /set4/__init__.py | b63e1c589e435a53cb63efe4759c48c8bab0fe28 | []
| no_license | https://github.com/mohamed-aziz/cryptopals | 076755cc75afbe61ade9b76e98dc47b923ebf4ce | 71a340c1645a1a3466391fb997982f9cfd7437bf | refs/heads/master | 2021-05-07T08:56:18.964338 | 2019-12-07T20:09:59 | 2019-12-07T20:09:59 | 109,444,673 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .ch25 import edit_ciphertext, break_edit_ciphertext
from .ch26 import aes_ctr_break_bitflipping, aes_ctr_encryption_oracle
from .ch27 import encryption_oracle, Not7BitAscii, decryption_oracle, break_cbc_oracle
from .ch28 import sha1, sha1_mac | UTF-8 | Python | false | false | 248 | py | 39 | __init__.py | 37 | 0.814516 | 0.770161 | 0 | 4 | 61 | 86 |
muhammed-salman/dynamicdropdown-scrapper | 13,322,988,555,203 | d35c166b2702fd17d6121250316aaf9d4faaa8af | cd873e072e2418050205e637d92562411cebc12f | /scrapper2.py | b94769e60818930bdc7a69e2b77b4d4c0382fd28 | []
| no_license | https://github.com/muhammed-salman/dynamicdropdown-scrapper | a5ca8abbcb082ac5ff256f062fde8f427c098360 | ec3f29ce7880ea37ee5abd482342c64ad67bffdd | refs/heads/master | 2020-03-25T00:34:52.436334 | 2018-08-02T18:13:30 | 2018-08-02T18:13:30 | 143,193,445 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import signal
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import StaleElementReferenceException
def sigint(signal, frame):
sys.exit(0)
def make_waitfor_elem_updated_predicate(driver, waitfor_elem_xpath):
elem = driver.find_element_by_xpath(waitfor_elem_xpath)
def elem_updated(driver):
try:
elem.text
except StaleElementReferenceException:
return True
except:
pass
return False
return lambda driver: elem_updated(driver)
class Scraper(object):
def __init__(self):
self.url = "https://panchayatelection.maharashtra.gov.in/MasterSearch.aspx"
self.driver = webdriver.Firefox(executable_path = '/usr/local/bin/geckodriver')
self.driver.set_window_size(1120, 550)
def get_select(self, xpath):
select_elem = self.driver.find_element_by_xpath(xpath)
select = Select(select_elem)
return select
def select_option(self, xpath, value, waitfor_elem_xpath=None):
if waitfor_elem_xpath:
func = make_waitfor_elem_updated_predicate(
self.driver,
waitfor_elem_xpath
)
select = self.get_select(xpath)
select.select_by_value(value)
if waitfor_elem_xpath:
wait = WebDriverWait(self.driver, 10)
wait.until(func)
return self.get_select(xpath)
def make_select_option_iterator(self, xpath, waitfor_elem_xpath):
def next_option(xpath, waitfor_elem_xpath):
select = self.get_select(xpath)
select_option_values = [
'%s' % o.get_attribute('value')
for o
in select.options
if o.text != '-Select-'
]
for v in select_option_values:
select = self.select_option(xpath, v, waitfor_elem_xpath)
yield select.first_selected_option.text
return lambda: next_option(xpath, waitfor_elem_xpath)
def load_page(self):
self.driver.get(self.url)
def page_loaded(driver):
path = '//select[@id="ContentPlaceHolder1_SearchControl1_DDLLocalBody"]'
return driver.find_element_by_xpath(path)
wait = WebDriverWait(self.driver, 10)
wait.until(page_loaded)
def scrape(self):
states = self.make_select_option_iterator(
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLLocalBody"]',
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLDivision"]'
)
districts = self.make_select_option_iterator(
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLDivision"]',
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLDistrict"]'
)
projects = self.make_select_option_iterator(
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLDistrict"]',
'//select[@id="ContentPlaceHolder1_SearchControl1_DDLMunicipalcorporation"]'
)
self.load_page()
for state in states():
print(state)
for district in districts():
print(2*' ', district)
for project in projects():
print(4*' ', project)
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint)
scraper = Scraper()
scraper.scrape()
| UTF-8 | Python | false | false | 3,514 | py | 2 | scrapper2.py | 2 | 0.608993 | 0.601024 | 0 | 108 | 31.527778 | 88 |
jsphweid/fialkaFlicker | 8,744,553,453,417 | cd33330c9b138124fe08b132d8aa16da1ad4449f | 9991a55b947ae9fe4011c3320e8963d94bcc34f3 | /python/noise.py | c8d61521dd6c3b62e557e17cb2648a7bb98530f7 | [
"MIT"
]
| permissive | https://github.com/jsphweid/fialkaFlicker | dcb463a88c9019c41ee9a0d95ea6b2951dced51e | b8623120e945588d36dffb334d7bf74b1e4fbd38 | refs/heads/master | 2020-07-02T09:51:12.637397 | 2018-12-11T23:53:25 | 2018-12-11T23:53:25 | 74,312,488 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from cv2 import VideoWriter, VideoWriter_fourcc
width = 1280
height = 720
FPS = 24
seconds = 10
# self._cap = VideoCapture(0)
# self._out = VideoWriter(self._name, self._fourcc, 20.0, (640, 480))
def make_colored_image(color):
img = np.zeros((height, width, 3), np.uint8)
img[:] = color
return img
# B G R
blue = make_colored_image((255, 0, 0))
red = make_colored_image((0, 0, 255))
frames = []
for i in range(60):
img = blue if i % 2 == 0 else red
frames.append(img)
fourcc = VideoWriter_fourcc(*'MP4V')
video = VideoWriter('./noise.mp4', fourcc, float(FPS), (width, height))
# for _ in range(FPS*seconds):
# frame = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
# frame = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
# frame[:] = (255, 0, 0)
# video.write(frame)
for frame in frames:
video.write(frame)
video.release()
| UTF-8 | Python | false | false | 908 | py | 26 | noise.py | 10 | 0.647577 | 0.584802 | 0 | 40 | 21.7 | 71 |
raspibo/Livello1 | 5,471,788,343,352 | e806190243a1a0c43a6ff71b1e5c8062403e8b3e | 5b01e4e8133012333a62ebb0e1a18490d62ed819 | /var/www/cgi-bin/setsVals2csv_search_date.py | 18cb72719955836bae5bc7aec13eaa02bcab5d96 | [
"MIT"
]
| permissive | https://github.com/raspibo/Livello1 | cf33c68c3b8d5496b78d562a57d84ab2745656a5 | 9f1ba10f2496eb0d4c40685336cc7b8768f4a767 | refs/heads/master | 2022-03-16T01:52:46.588937 | 2022-02-27T14:48:12 | 2022-02-27T14:48:12 | 56,574,092 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
The MIT License (MIT)
Copyright (c) 2018 davide
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
""" setsVals2csv_search_date.py
Prende i dati dalla chiave Redis (sets:*:Valori) passata come argomento all'avvio,
cerca fra i parametri si start e stop (data minore, data maggiore),
elabora e ricrea il file .csv
"""
import os,time,json,redis,sys
import mjl, mhl, flt # Non servono tutte, ormai le metto d'abitudine ;)
DirBase="/var/www" # Meglio specificare il percorso assoluto
ConfigFile=DirBase+"/conf/config.json"
# Apro il database Redis con l'istruzione della mia libreria
MyDB = flt.OpenDBFile(ConfigFile)
# Controllo se piu` di un argomento o se richiesto l'help
if len(sys.argv) != 4 or sys.argv[1] == "-h":
print ("\n\tUso: %s <RedisKey> <Start> <Stop>" % sys.argv[0])
print ("""
Questo programma prende una chiave Redis di gruppo (sets:*), elabora,
e crea il rispettivo file .csv
""")
exit()
if len(sys.argv) == 4 and MyDB.exists(sys.argv[1]):
# Setto le variabili per comodita` e chiarezza di programma
KeyVal=sys.argv[1]
Key=KeyVal[:-7] # Toglie ":Valori"
KeySort=flt.DecodeList(MyDB.sort(Key,alpha=1)) # Devo mantenerla sempre ordinata, altrimenti i dati non coincidono, e` una stringa, quindi "alpha=1"
print ("Input sets: \t\t", KeyVal)
print ("Key: \t\t\t", Key)
print ("Key contents: \t\t", KeySort)
# Ho usato il secondo e terzo valore (sets:NOME:ID), perche potrebbero esserci dei duplicati fra allarmi e grafici e .. altro (se ci sara`)
FileName=DirBase+"/"+Key.split(":")[1]+Key.split(":")[2]+".csv"
if os.path.isfile(FileName):
print ("Deleting: \t\t\"%s\"" % FileName)
os.remove(FileName) # Elimino il file se esiste
# Creazione dell'intestazione: DATA, Descrizione1, Descrizione2, .., DescrizioneN
IntestazioneCSV="Data"
for i in range (len(KeySort)):
Descrizione="none" # Metto qualcosa nel caso mancasse la descrizione
if MyDB.hexists(KeySort[i],"Descrizione"):
Descrizione=flt.Decode(MyDB.hget(KeySort[i],"Descrizione"))
IntestazioneCSV=IntestazioneCSV+","+Descrizione
FileTemp = open(FileName,"w")
FileTemp.write(IntestazioneCSV+"\n") # Scrittura intestazione
# Per tutta la lunghezza della lista "Valori", li leggo e li scrivo nel file
for i in range (MyDB.llen(KeyVal)):
ValoreCSV=flt.Decode(MyDB.lindex(KeyVal,i))
if sys.argv[2] < ValoreCSV < sys.argv[3] :
FileTemp.write(ValoreCSV+"\n")
FileTemp.close()
print ("[re]Generated file: \t\"{}\"".format(FileName))
elif not MyDB.exists(sys.argv[1]):
print ("Chiave inesistente", sys.argv[1])
| UTF-8 | Python | false | false | 3,780 | py | 31 | setsVals2csv_search_date.py | 28 | 0.689418 | 0.683333 | 0 | 80 | 46.25 | 153 |
tanay0nSpark/evolveML | 9,543,417,364,179 | 3643acd350596222294319d721e09464afa0da65 | 08c251243a166da41cf91f198bc744ee25f96352 | /kaggle/facebookrecruit/facebookAnalysis.py | bf797b7df317dc34198dc58ddc047ca7cad48954 | []
| no_license | https://github.com/tanay0nSpark/evolveML | afe22e09ecf2668a42c68e3947c72c81f48a30eb | d7b7f0e13f4d1ba95148af94461cb180d8a10043 | refs/heads/master | 2021-06-01T14:39:16.116459 | 2016-06-19T18:16:14 | 2016-06-19T18:16:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import graphlab
import threading
__author__ = 'abhishekchoudhary'
graphlab.canvas.set_target('browser')
sf = graphlab.SFrame.read_csv('/Users/abhishekchoudhary/Work/python/facebook/trainingset.csv', header=True)
sf.show()
| UTF-8 | Python | false | false | 223 | py | 66 | facebookAnalysis.py | 50 | 0.784753 | 0.784753 | 0 | 7 | 30.857143 | 107 |
bhamburg/CIS_626 | 17,609,365,941,752 | f381e3f9529c6ab52f9640ab5b3dcc2be251dc03 | d31b951902843af0a719fe291c70ec3a5741a96b | /Week2/exercise5_14.py | 22f5aca95c65db22530e6cbab51e8049a750f916 | []
| no_license | https://github.com/bhamburg/CIS_626 | ff3298dabb46fc13bb0fbad831c8b3a6f2644208 | b4d84a664a2228d07036c3d119fa94cd894bb241 | refs/heads/master | 2020-03-29T20:07:01.143791 | 2014-03-06T01:36:29 | 2014-03-06T01:36:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Find the smallest n such that n^2 > 12,000
# Author: Brian Hamburg
#define variables
n = 0
result = 0
# loop!
while result <= 12000:
result = n ** 2
n += 1
# print result
print("n = " + str(n - 1))
| UTF-8 | Python | false | false | 225 | py | 29 | exercise5_14.py | 29 | 0.555556 | 0.484444 | 0 | 14 | 14.071429 | 44 |
snigdha-rao/Competitive-Programming | 15,264,313,796,415 | 867527d2376c8672383ad42141db24dd4744f9e5 | 5907605a52a770783d1bdbe836c93d2bbf8649a5 | /week 3/Day-5/urlshortner.py | 196e41fef9c960fc3cdda8354765d4f2140fc4a7 | []
| no_license | https://github.com/snigdha-rao/Competitive-Programming | 23e2bd99712029f526d11f6b65a95e01d190a895 | 4dc4ea8c83626b913ac17d61736b04b58b67ed44 | refs/heads/master | 2020-03-21T19:47:04.928513 | 2018-07-21T09:28:17 | 2018-07-21T09:28:17 | 138,969,704 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def shortlink(request):
if request['method'] is not 'POST':
return Response(501) # HTTP 501 NOT IMPLEMENTED
destination = request['data']['destination']
if 'slug' in request['data']:
# If they included a slug, use that
slug = request['data']['slug']
else:
# Else, make them one
slug = generate_random_slug()
DB.insert({'slug': slug, 'destination': destination})
response_body = { 'slug': slug }
return Response(200, json.dumps(response_body)) # HTTP 200 OK
def redirect(request):
destination = DB.get({'slug': request['path']})['destination']
return Response(302, destination)
def generate_random_slug():
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
num_chars = 7
return ''.join([random.choice(alphabet) for _ in xrange(num_chars)]) | UTF-8 | Python | false | false | 882 | py | 7 | urlshortner.py | 7 | 0.636054 | 0.606576 | 0 | 25 | 33.36 | 79 |
stepik/SimplePyScripts | 6,141,803,280,997 | 0f8938f44b39e90051e5f915c00a92ca5dd0c323 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /telegram_bot_examples/reminder/main.py | 843b4abdd1cd53fc4203ee3835f2e37f62b36294 | [
"CC-BY-4.0"
]
| permissive | https://github.com/stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
from logging import Logger
import os
from threading import Thread
import time
import sys
import re
# pip install python-telegram-bot
from telegram import Update, Bot
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters, CallbackContext
from telegram.ext.dispatcher import run_async
sys.path.append('..')
import config
from common import get_logger, log_func, reply_error
from db import Reminder, User, Chat
from utils import parse_command, get_pretty_datetime
def do_checking_reminders(log: Logger, bot: Bot):
while True:
try:
expected_time = DT.datetime.now() - DT.timedelta(seconds=1)
query = (
Reminder
.select()
.where(
(Reminder.is_sent == False)
& (Reminder.finish_time <= expected_time)
)
.order_by(Reminder.finish_time)
)
for reminder in query:
log.info('Send reminder: %s', reminder)
bot.send_message(
chat_id=reminder.chat_id, text='⌛',
reply_to_message_id=reminder.message_id
)
reminder.is_sent = True
reminder.save()
except:
log.exception('')
finally:
time.sleep(1)
log = get_logger(__file__)
@run_async
@log_func(log)
def on_start(update: Update, context: CallbackContext):
update.message.reply_text(
'Введите что-нибудь, например: "напомни через 1 час".\n'
'Для получения списка напоминаний, напишите: "список"'
)
@run_async
@log_func(log)
def on_request(update: Update, context: CallbackContext):
message = update.message
command = message.text
log.debug(f'Command: {command!r}')
finish_time = parse_command(command)
if not finish_time:
message.reply_text('Не получилось разобрать команду!')
return
Reminder.create(
message_id=message.message_id,
command=command,
finish_time=finish_time,
user=User.get_from(update.effective_user),
chat=Chat.get_from(update.effective_chat),
)
message.reply_text(f'Напоминание установлено на {get_pretty_datetime(finish_time)}')
@run_async
@log_func(log)
def on_get_reminders(update: Update, context: CallbackContext):
message = update.message
chat = update.effective_chat
user = update.effective_user
query = (
Reminder
.select()
.where(
(Reminder.chat_id == chat.id)
& (Reminder.user_id == user.id)
& (Reminder.is_sent == False)
)
.order_by(Reminder.finish_time)
)
number = query.count()
if number:
text = f'Напоминаний ({number}):\n'
for x in query:
text += ' ' + get_pretty_datetime(x.finish_time) + '\n'
else:
text = 'Напоминаний нет'
message.reply_text(text)
def on_error(update: Update, context: CallbackContext):
reply_error(log, update, context)
def main():
cpu_count = os.cpu_count()
workers = cpu_count
log.debug('System: CPU_COUNT=%s, WORKERS=%s', cpu_count, workers)
log.debug('Start')
# Create the EventHandler and pass it your bot's token.
updater = Updater(
config.TOKEN,
workers=workers,
use_context=True
)
# TODO: When the bot crashes, it is possible to create duplicate thread
thread = Thread(target=do_checking_reminders, args=[log, updater.bot])
thread.start()
# Get the dispatcher to register handlers
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', on_start))
dp.add_handler(MessageHandler(Filters.regex('(?i)^список$'), on_get_reminders))
dp.add_handler(MessageHandler(Filters.text, on_request))
# Handle all errors
dp.add_error_handler(on_error)
# Start the Bot
updater.start_polling()
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
log.debug('Finish')
if __name__ == '__main__':
while True:
try:
main()
except:
log.exception('')
timeout = 15
log.info(f'Restarting the bot after {timeout} seconds')
time.sleep(timeout)
| UTF-8 | Python | false | false | 4,717 | py | 276 | main.py | 269 | 0.606813 | 0.605275 | 0 | 178 | 24.561798 | 90 |
steindevos/project-final-django | 13,383,118,116,613 | 27a3802877ea32c442cb623fd1905e011aa45cc8 | e4fbb8940e145924ebb1f9b3412ff278c6c85968 | /checkout/migrations/0005_auto_20180829_1251.py | efe73404bd62af84d48fdcb66df2d34d8df23a66 | []
| no_license | https://github.com/steindevos/project-final-django | 72ecf8df58606e45b4251a949c9b7a572d263851 | 9b2f93b28284e10b654fc9cc07c49213b040921f | refs/heads/master | 2018-11-14T11:55:20.390707 | 2018-09-18T18:52:47 | 2018-09-18T18:52:47 | 145,727,075 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.6 on 2018-08-29 12:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_auto_20180829_0731'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='country',
),
migrations.RemoveField(
model_name='order',
name='county',
),
migrations.RemoveField(
model_name='order',
name='full_name',
),
migrations.RemoveField(
model_name='order',
name='phone_number',
),
migrations.RemoveField(
model_name='order',
name='postcode',
),
migrations.RemoveField(
model_name='order',
name='street_address_1',
),
migrations.RemoveField(
model_name='order',
name='street_address_2',
),
migrations.RemoveField(
model_name='order',
name='town_or_city',
),
]
| UTF-8 | Python | false | false | 1,080 | py | 46 | 0005_auto_20180829_1251.py | 31 | 0.49537 | 0.464815 | 0 | 45 | 23 | 48 |
kevinjcliao/clubslist | 9,251,359,566,914 | 404921c813279170752734a810c6d02a3c4663d3 | 2357b6d564b9c6f0ed02d9140f01ff85f5f65037 | /clubs/views.py | f5f286d2b69575b209425d528ecbfc3fe5a1da49 | []
| no_license | https://github.com/kevinjcliao/clubslist | db5e3d4a0b66a3ac5f68571d4330d673d3512cab | 0188b192105062cd32683cc7c9d73477f844cec0 | refs/heads/master | 2021-01-10T08:22:53.948566 | 2016-03-10T18:16:04 | 2016-03-10T18:16:04 | 52,030,086 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import Club
def club_detail(request, club_name, club_id):
try:
club = Club.objects.get(pk=club_id)
except Club.DoesNotExist:
raise Http404("Club does not exist.")
return HttpResponse("You're looking at club %s" % club.description)
def hello_world(request):
clubs_categories = Club.CATEGORY_CHOICES
categorized_club_list = []
clubs_size = 0
there_are_clubs = False
for x in range (0, len(clubs_categories)):
print "hello world"
category_id = clubs_categories[x][0]
clubs_in_category = Club.objects.all().filter(category=category_id).order_by('name')
categorized_club_list.append([clubs_categories[x][1], clubs_in_category])
clubs_size += len(clubs_in_category)
print categorized_club_list
there_are_clubs = clubs_size > 0
template = loader.get_template('clubs/index.html')
context = {
'clubs_list': categorized_club_list,
'clubs_size': clubs_size,
'there_are_clubs': there_are_clubs
}
return HttpResponse(template.render(context, request))
| UTF-8 | Python | false | false | 1,202 | py | 9 | views.py | 7 | 0.673877 | 0.667221 | 0 | 36 | 32.388889 | 92 |
nikitastasik/test | 4,372,276,751,152 | e0db11da9cf9ef606b25cd83800eb0de45b2784a | cbf4bdff3f9522e7aa4a8f95c0cdc6675e8cc531 | /NOD.py | f3c2a07bed8d6f8f31708fe084a6344a459042e5 | []
| no_license | https://github.com/nikitastasik/test | a4fa8b71262ce988b2d30e0e3001cfce1b4b27b3 | 7bdbd7d1d5232ff2f360e0cc69649bb52d4c5075 | refs/heads/main | 2023-07-13T02:16:50.083801 | 2021-08-25T09:48:40 | 2021-08-25T09:48:40 | 399,754,032 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # def fib():
# n, m = int(input()), int(input())
# if n == 1:
# return 1
# for i in range(2, n+1):
# a, b = 0, 1
# a, b = b, (a+b)
# if i == m:
# f_m = b
# f_n = b
#
# if f_n // f_m == 1:
# return 1
# elif f_n // f_m > 1:
# ost = f_n // f_m
# return f_n - (ost * f_m)
# print(fib())
import random
def test(gcd, n_inter=100):
for i in range(n_inter):
c = random.randint(0, 1024)
a = c * random.randint(0, 128)
b = c * random.randint(0, 128)
assert gcd(a, a) == gcd(a, 0) == a
assert gcd(b, b) == gcd(b, 0) == b
assert gcd(a, 1) == gcd(b, 1) == 1
d = gcd(a, b)
assert a % d == b % d == 0
def gcd1(a, b):
assert a >= 0 and b >= 0
for d in reversed(range(1, max(1, b) + 1)):
if d == 0 % d == b % d == 0:
return d
def gcd2(a, b):
while a and b:
if a >= b:
a %= b
else:
b %= a
return max(a, b)
def gcd3(a, b):
assert a >= 0 and b >= 0
if a == 0 or b == 0:
return max(a, b)
elif a >= b:
return gcd3(a % b, b)
else:
return gcd3(a, b % a)
def gcd4(a, b):
assert a >= 0 and b >= 0
if a == 0 or b == 0:
return max(a, b)
print(f'a = {a}, b = {b}')
return gcd4(b % a, a)
print(gcd4(24, 9)) | UTF-8 | Python | false | false | 1,433 | py | 22 | NOD.py | 22 | 0.38381 | 0.344033 | 0 | 75 | 18.12 | 47 |
phreak1990/dom_xss | 12,000,138,636,747 | f7c126479b8380e9cdd31984740add10552e756f | de6ca0daa302569f464b0a7897f15c8b8516d32b | /lib/file_functions.py | 4efb79ae8d9f12005c7a04d821bd57b5b68d6f80 | []
| no_license | https://github.com/phreak1990/dom_xss | 93f58fe277cff790e92e0a0164ab0adefe7a9709 | 86980c15832c51ad8ce15616db27d46f5cf9c57b | refs/heads/master | 2021-01-10T06:17:45.079680 | 2015-11-17T06:28:22 | 2015-11-17T06:28:22 | 46,326,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from os import path
from sys import argv
import sys
class FileFunctions:
def __init__(self):
pass
####################################################################################
def readFile(self, file_path):
if path.exists (file_path):
fo = open(file_path,"r")
data = fo.read()
fo.close()
return data
else:
return None
####################################################################################
def writeFile(self, data, file_path):
fo = open(file_path,"w")
try:
fo.write(data)
except UnicodeEncodeError:
fo.write(data.encode('utf-8'))
fo.close()
####################################################################################
def appendFile(self,data, file_path):
if path.exists (file_path):
fo = open(file_path,"a+")
fo.write(data)
fo.close()
####################################################################################
def currentPath(self):
pathname = path.dirname(argv[0])
full_path = path.abspath(pathname)
return full_path
####################################################################################
def writeArrayToFile(self, array, file_path):
if not path.exists (file_path):
fo = open(file_path ,"w")
save_stdout = sys.stdout
sys.stdout = fo
for line in array:
print line
fo.close()
sys.stdout = save_stdout
####################################################################################
def writeArrayToFileReplaceOld(self, array, file_path):
fo = open(file_path ,"w")
save_stdout = sys.stdout
sys.stdout = fo
for line in array:
print line
fo.close()
sys.stdout = save_stdout
####################################################################################
def appendArrayToFile(self, array, file_path):
fo = open(file_path ,"a+")
save_stdout = sys.stdout
sys.stdout = fo
for line in array:
print line
fo.close()
sys.stdout= save_stdout
####################################################################################
def readFileIntoArray(self, file_path):
array = []
if path.exists (file_path):
with open(file_path) as fo:
for line in fo:
line = line.replace("\n","")
array.append(line)
fo.close()
return array
else:
return None
####################################################################################
def appendFileWithHashes(self,data, file_path):
if path.exists (file_path):
fo = open(file_path,"a+")
save_stdout = sys.stdout
sys.stdout = fo
print ""
print "############################################################################"
print data
sys.stdout = save_stdout
fo.close()
####################################################################################
| UTF-8 | Python | false | false | 3,320 | py | 9 | file_functions.py | 9 | 0.35753 | 0.356928 | 0 | 99 | 32.535354 | 96 |
ianramzy/old-code | 9,448,928,091,223 | 9e09e23310ae3036609b4580da64d3479a2ee6d0 | 610069be6dff8a673c1352771477197c3a2a998e | /Snakes/snakes.py | 4a07fd2f78127bd0611e9743ba8faf99fc2955db | [
"MIT"
]
| permissive | https://github.com/ianramzy/old-code | e4d638d8880e6ab379c3c9dbbe8bda2e732ec5ba | 6d3ca52de5c6b80a1f0678ca73b78a3024e95f05 | refs/heads/master | 2020-06-11T15:23:20.829779 | 2019-06-30T14:00:28 | 2019-06-30T14:00:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame, sys, random
# pygame 1.9.4
pygame.init()
size = width, height = 1320, 720
screen = pygame.display.set_mode(size)
# pygame.mixer.pre_init()
# white = [0, 0, 0]
white = [255, 255, 255]
orange = [255, 140, 0]
red = [255, 0, 0]
gray = [35, 35, 35]
# white = [0, 0, 255]
font = pygame.font.SysFont("BankGothic", 45)
font2 = pygame.font.SysFont("BankGothic", 80)
font3 = pygame.font.SysFont("BankGothic", 222)
font4 = pygame.font.SysFont("BankGothic", 15)
font5 = pygame.font.SysFont("BankGothic", 95)
font6 = pygame.font.SysFont("BankGothic", 65)
# snakeyum = pygame.mixer.Sound('snakeyum.wav')
# alarm = pygame.mixer.Sound('alarm.wav')
# ticktick = pygame.mixer.Sound('ticktock.wav')
backround = pygame.image.load("snakeback.jpg")
timebon = pygame.image.load("clock.png")
# pygame.mixer.music.load('tronmusic.wav')
def gamep1():
isticking = False
# pygame.mixer.music.play(-1)
# starting speed is going to the right:
speed = [0, 30]
# head is where the new snake segment will be created:
head = [90, 90]
# snake is a list of Rectangles, representing segments of the snake:
snake = [pygame.Rect(head, [30, 30])]
# starting length is 5:
length = 5
# set random position for food:
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = 11
counter = eleven
wt = 0
score = 0
backrect = pygame.Rect(0, 0, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if speed[1] != 30:
speed = [0, -30]
elif event.key == pygame.K_DOWN:
if speed[1] != -30:
speed = [0, 30]
if event.key == pygame.K_LEFT:
if speed[0] != 30:
speed = [-30, 0]
elif event.key == pygame.K_RIGHT:
if speed[0] != -30:
speed = [30, 0]
# move the head:
head[0] = head[0] + speed[0]
head[1] = head[1] + speed[1]
# check for collision with self:
for segment in snake:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
# check for collision with walls:
if head[0] >= width or head[0] < 0 or head[1] >= height or head[1] < 0:
losequit(score)
# check for collision with food:
if head == food:
length = length + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
# check for collision with time bonus:
if head == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + 1
counter = eleven
# snakeyum.play()
# add new segment to snake at head:
snake.append(pygame.Rect(head, [30, 30]))
# remove tail segments if necessary:
while len(snake) > length:
snake.pop(0)
# draw your game elements here:
screen.blit(backround, backrect)
# draw all the snake segments:
for segment in snake:
pygame.draw.rect(screen, red, segment, 0)
## timer
counter = counter - 0.1
## render title
renderedText = font5.render("SNAKE TRIALS", 1, white)
screen.blit(renderedText, (300, 10))
## render timer
renderedText = font.render("Time Remaining: " + str(int(counter)), 1, white)
screen.blit(renderedText, (5, height - 155))
## render score
renderedText = font.render("Score: " + str(int(score)), 1, white)
screen.blit(renderedText, (5, height - 195))
if counter <= 4:
if not isticking:
# ticktick.play(0)
isticking = True
# running out of time:
if counter <= 0:
losequit(score)
# draw the food:
pygame.draw.rect(screen, orange, pygame.Rect(food, [30, 30]), 0)
screen.blit(timebon, food2)
pygame.display.flip()
pygame.time.wait(wt)
def gamep2():
isticking = False
# backround = pygame.image.load("snakeback.jpg")
# pygame.mixer.music.load('tronmusic.wav')
# pygame.mixer.music.play(-1)
# starting speed is going to the right:
speed = [0, 30]
speed2 = [30, 0]
# head is where the new snake segment will be created:
head = [90, 90]
head2 = [270, 270]
# snake is a list of Rectangles, representing segments of the snake:
snake = [pygame.Rect(head, [30, 30])]
snake2 = [pygame.Rect(head2, [30, 30])]
# starting length is 5:
length = 5
length2 = 5
# set random position for food:
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = 11
counter = eleven
wt = 100
score = 0
backrect = pygame.Rect(0, 0, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
if speed[1] != 30:
speed = [0, -30]
elif event.key == pygame.K_s:
if speed[1] != -30:
speed = [0, 30]
if event.key == pygame.K_a:
if speed[0] != 30:
speed = [-30, 0]
elif event.key == pygame.K_d:
if speed[0] != -30:
speed = [30, 0]
# snake2 controls
if event.key == pygame.K_UP:
if speed2[1] != 30:
speed2 = [0, -30]
elif event.key == pygame.K_DOWN:
if speed2[1] != -30:
speed2 = [0, 30]
if event.key == pygame.K_LEFT:
if speed2[0] != 30:
speed2 = [-30, 0]
elif event.key == pygame.K_RIGHT:
if speed2[0] != -30:
speed2 = [30, 0]
# move the head:
head[0] = head[0] + speed[0]
head[1] = head[1] + speed[1]
head2[0] = head2[0] + speed2[0]
head2[1] = head2[1] + speed2[1]
# check for collision with self:
for segment in snake:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
for segment in snake2:
if segment == pygame.Rect(head2, [30, 30]):
losequit(score)
for segment in snake:
if segment == pygame.Rect(head2, [30, 30]):
losequit(score)
for segment in snake2:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
# check for collision with walls:
if head[0] >= width or head[0] < 0 or head[1] >= height or head[1] < 0:
losequit(score)
if head2[0] >= width or head2[0] < 0 or head2[1] >= height or head2[1] < 0:
losequit(score)
# check for collision with food:
if head == food:
length = length + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
if head2 == food:
length2 = length2 + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
# check for collision with time bonus:
if head == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + .5
counter = eleven
# snakeyum.play()
if head2 == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + .5
counter = eleven
# snakeyum.play()
# add new segment to snake at head:
snake.append(pygame.Rect(head, [30, 30]))
snake2.append(pygame.Rect(head2, [30, 30]))
# remove tail segments if necessary:
while len(snake) > length:
snake.pop(0)
while len(snake2) > length:
snake2.pop(0)
# draw your game elements here:
screen.blit(backround, backrect)
# draw all the snake segments:
for segment in snake:
pygame.draw.rect(screen, white, segment, 0)
for segment in snake2:
pygame.draw.rect(screen, red, segment, 0)
## timer
counter = counter - 0.1
if counter <= 4:
if not isticking:
# ticktick.play(0)
isticking = True
## render title
renderedText = font5.render("P2 SNAKE TRIALS ", 1, white)
screen.blit(renderedText, (233, 5))
## render timer
renderedText = font.render("Time Remaining: " + str(int(counter)), 1, white)
screen.blit(renderedText, (5, height - 55))
## render score
renderedText = font.render("Score: " + str(int(score)), 1, white)
screen.blit(renderedText, (5, height - 95))
# running out of time:
if counter <= 0:
losequit(score)
# draw the food:
pygame.draw.rect(screen, orange, pygame.Rect(food, [30, 30]), 0)
screen.blit(timebon, food2)
pygame.display.flip()
pygame.time.wait(wt)
def startscreen():
backround = pygame.image.load("snakeback.jpg")
backrect = pygame.Rect(0, 0, 0, 0)
screen.blit(backround, backrect)
renderedText = font5.render('Welcome to Snake Trials', 1, white)
screen.blit(renderedText, (11, 1))
renderedText = font6.render("Press Space to Start", 1, white)
screen.blit(renderedText, (11, height - 195))
renderedText = font6.render("Press '2' for Two Player Co-Op", 1, white)
screen.blit(renderedText, (11, height - 95))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gamep1()
if event.key == pygame.K_2:
gamep2()
def prestart():
time = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
startscreen()
if time == 10:
white = [0, 0, 0]
renderedText = font4.render(
'Traceback most recent call last:File C:Users Wood Word is2o snakes snakes.py, line 307, in <module>',
0, white)
screen.blit(renderedText, (0, 0))
renderedText = font4.render('Press space to initiate the new world order and virus.exe', 0, white)
screen.blit(renderedText, (0, 15))
if time == 20:
white = [255, 255, 255]
renderedText = font4.render(
'Traceback most recent call last:File C:Users Wood Word is2o snakes snakes.py, line 307, in <module>',
0, white)
screen.blit(renderedText, (0, 0))
renderedText = font4.render('Press space to initiate the new world order and virus.exe', 0, white)
screen.blit(renderedText, (0, 15))
time = 0
time = time + 1
pygame.display.flip()
pygame.time.wait(100)
def losequit(score):
# pygame.mixer.music.load('tronmusic.wav')
# pygame.mixer.music.stop
# alarm.play()
fixme = 69420
backround = pygame.image.load("snakeback.jpg")
backrect = pygame.Rect(0, 0, 0, 0)
screen.blit(backround, backrect)
renderedText = font3.render('You Lose!', 1, white)
screen.blit(renderedText, (85, 100))
renderedText = font.render("You scored: " + str(int(score)), 1, white)
screen.blit(renderedText, (4, height - 95))
renderedText = font.render("Press Space to Play Single Player Again", 1, white)
screen.blit(renderedText, (4, height - 195))
renderedText = font.render("Press '2' to Play Two Player", 1, white)
screen.blit(renderedText, (4, height - 155))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gamep1()
if event.key == pygame.K_2:
gamep2()
prestart()
| UTF-8 | Python | false | false | 14,107 | py | 80 | snakes.py | 60 | 0.503934 | 0.460481 | 0 | 405 | 32.832099 | 118 |
tjafs/ESP32_Node | 824,633,725,211 | afca7627e94c9c16011e9f9293aaaca6b90ea18b | d540a8d7e345a22e6ba299f94ec0a1c31ad16759 | /lora.py | ec2df8418b2131bc704bfcab61eadfdbfddc3f9a | []
| no_license | https://github.com/tjafs/ESP32_Node | b7703252da6e709fcb780d7903d7f64f64ac63ae | 1e146fe9e30b1ccdac8f5aa0d04d05b2aa03ae24 | refs/heads/master | 2020-03-08T20:13:19.559011 | 2018-04-09T18:42:43 | 2018-04-09T18:42:43 | 128,376,535 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Denne filen inneholder metoder/funksjoner tilknyttet lora
#Funksjon for å skrive data til lora
lora_write = lambda data: print(data+"\r\n")
#Funksjon for å lese data fra lora uten \r\n bak og foran
def lora_read():
inndata = str(lora.readln())
utdata = inndata.replace("\r\n", "")
return utdata
| UTF-8 | Python | false | false | 315 | py | 8 | lora.py | 6 | 0.696486 | 0.696486 | 0 | 12 | 25.083333 | 59 |
CognitiveComputationLab/cogmods | 7,533,372,652,897 | 42365aa6e3727db8e8bd24dd580f13a76fe8400d | 77c471124fb4ac4a7fe0a19cf47b666ed0eccb79 | /wason_analyse/quantitative_optimal_data_selection.py | 9809b4ebf4d1a355a028c5281b8f2ebd633f50e3 | [
"MIT"
]
| permissive | https://github.com/CognitiveComputationLab/cogmods | f8286d7aa7917a87fd4df27d0c6db666aec88c92 | ac73fb60387aad37d3b3fb823f9b2c205c6cb458 | refs/heads/master | 2023-07-26T10:15:48.647877 | 2023-07-14T08:38:23 | 2023-07-14T08:38:23 | 178,379,369 | 1 | 12 | MIT | false | 2021-09-27T10:30:47 | 2019-03-29T09:55:02 | 2021-08-23T14:18:06 | 2021-09-27T10:30:46 | 11,633 | 0 | 11 | 2 | Python | false | false | import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from matplotlib import cm
from scipy.optimize import minimize
def qods(x=0.15, y=0.16, r=0.5, eps=0.1):
# x = 0.15
# y = 0.26
# r = 0.5
not_x = 1 - x
not_y = 1 - y
not_r = 1 - r
# eps = 0.17
"""
Turn q card
"""
p_q_r = x * (1 - eps) * y * r
p_q_not_r = x * not_r
p_not_q_r = x * eps * not_y * r
p_not_q_not_r = x * not_r
not_p_q_r = 1 - x * (1 - eps) * y * r
not_p_q_not_r = not_x * not_r
not_p_not_q_r = 1 - x * eps * not_y * r
not_p_not_q_not_r = not_x * not_r
"""
Turn p card
"""
q_p_r = (1 - eps) * x * r
q_p_not_r = y * not_r
q_not_p_r = y * not_x * r
q_not_p_not_r = y * not_r
not_q_p_r = eps * x * r
not_q_p_not_r = not_y * not_r
not_q_not_p_r = not_y * not_x * r
not_q_not_p_not_r = not_y * not_r
"""
Turn p and not p card
Probabilities for both hypothesis
"""
p_q = p_q_r + p_q_not_r
not_p_q = not_p_q_r + not_p_q_not_r
p_not_q = p_not_q_r + p_not_q_not_r
not_p_not_q = not_p_not_q_r + not_p_not_q_not_r
"""
Turn q and not q card
Probabilities for both hypothesis
"""
q_p = q_p_r + q_p_not_r
not_q_p = not_q_p_r + not_q_p_not_r
q_not_p = q_not_p_r + q_not_p_not_r
not_q_not_p = not_q_not_p_r + not_q_not_p_not_r
p_r = x * r
p_not_r = x * not_r
q_r = y * r
q_not_r = y * not_r
not_p_r = not_x * r
not_p_not_r = not_x * not_r
not_q_r = not_y * r
not_q_not_r = not_y * not_r
# information P
a = p_q_r * np.log2((p_q_r * x) / (p_q * p_r))
b = p_q_not_r * np.log2((p_q_not_r * x) / (p_q * p_not_r))
c = p_not_q_r * np.log2((p_not_q_r * x) / (p_not_q * p_r))
d = p_not_q_not_r * np.log2((p_not_q_not_r * x) / (p_not_q * p_not_r))
information_p = a + b + c + d
a = not_p_q_r * np.log2((not_p_q_r * not_x) / (not_p_q * not_p_r))
b = not_p_q_not_r * np.log2((not_p_q_not_r * not_x) / (not_p_q * not_p_not_r))
c = not_p_not_q_r * np.log2((not_p_not_q_r * not_x) / (not_p_not_q * not_p_r))
d = not_p_not_q_not_r * np.log2((not_p_not_q_not_r * not_x) / (not_p_not_q * not_p_not_r))
information_not_p = a + b + c + d
a = q_p_r * np.log2((q_p_r * y) / (q_p * q_r))
b = q_p_not_r * np.log2((q_p_not_r * y) / (q_p * q_not_r))
c = q_not_p_r * np.log2((q_not_p_r * y) / (q_not_p * q_r))
d = q_not_p_not_r * np.log2((q_not_p_not_r * y) / (q_not_p * q_not_r))
information_q = a + b + c + d
a = not_q_p_r * np.log2((not_q_p_r * not_y) / (not_q_p * not_q_r))
b = not_q_p_not_r * np.log2((not_q_p_not_r * not_y) / (not_q_p * not_q_not_r))
c = not_q_not_p_r * np.log2((not_q_not_p_r * not_y) / (not_q_not_p * not_q_r))
d = not_q_not_p_not_r * np.log2((not_q_not_p_not_r * not_y) / (not_q_not_p * not_q_not_r))
information_not_q = a + b + c + d
sum_all = [information_p, information_q, information_not_p, information_not_q]
scaled_inf_p = information_p / np.sum(sum_all)
scaled_inf_q = information_q / np.sum(sum_all)
scaled_inf_not_p = information_not_p / np.sum(sum_all)
scaled_inf_not_q = information_not_q / np.sum(sum_all)
return scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q
def stf(card):
x = -2.37 + 9.06 * card
return 1 / (1 + np.exp(x))
"""
optimize RMSE of QODS pred
"""
def optimize_inf_model(params, *args):
x, y, r, eps = params
obs_p, obs_not_p, obs_q, obs_not_q = args
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(x, y, r, eps)
error = (obs_p - stf(scaled_inf_p)) ** 2 + (obs_not_p - stf(scaled_inf_not_p)) ** 2 + (
obs_q - stf(scaled_inf_q)) ** 2 + (obs_not_q - stf(
scaled_inf_not_q)) ** 2
return np.sqrt(error) / 4
def testP():
for i in range(10, 100000000, 100):
tmp = 1 / (i)
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(tmp, .20, .50, .1)
print("P(p): ", tmp)
print("prob p:", stf(scaled_inf_p))
print("prob not p:", stf(scaled_inf_not_p))
print("prob q:", stf(scaled_inf_q))
print("prob not q:", stf(scaled_inf_not_q))
def gen_data():
data_p = [[[] for j in range(100)] for i in range(100)]
data_not_p = [[[] for j in range(100)] for i in range(100)]
data_q = [[[] for j in range(100)] for i in range(100)]
data_not_q = [[[] for j in range(100)] for i in range(100)]
for i in range(1, 100):
for j in range(1, 100):
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(i * 0.01, j * 0.01, .5, .1)
data_p[i - 1][j - 1] = scaled_inf_p
data_not_p[i - 1][j - 1] = scaled_inf_not_p
data_q[i - 1][j - 1] = scaled_inf_q
data_not_q[i - 1][j - 1] = scaled_inf_not_q
df_p = pd.DataFrame(data_p)
df_not_p = pd.DataFrame(data_not_p)
df_q = pd.DataFrame(data_q)
df_not_q = pd.DataFrame(data_not_q)
df_p.to_csv('odsP.csv') # , index=False)
df_not_p.to_csv('odsNotP.csv') # , index=False)
df_q.to_csv('odsQ.csv') # , index=False)
df_not_q.to_csv('odsNotQ.csv') # , index=False)
def gen_RAST_data():
data = []
for i in range(1, 99):
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(0.01, 0.9, .5, .01) # (i*0.01)-0.001
data.append((i, stf(scaled_inf_q), stf(scaled_inf_not_q)))
df = pd.DataFrame(data)
return df
def test_gen_data():
data = []
for i in range(1, 100):
for j in range(1, 100):
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(i * 0.01, j * 0.01, .5, .1)
data.append((i, j, stf(scaled_inf_not_q)))
df = pd.DataFrame(data)
# df.to_csv('test.csv', index=False)
return df
#
def generate_colormap_Plot(df):
cmap = cm.get_cmap('Greys')
fig, ax = plt.subplots(1)
# Now here's the plot. range(len(df)) just makes the x values 1, 2, 3...
# df[0] is then the y values. c sets the colours (same as y values in this
# case). s is the marker size.
ax.scatter(df[0], df[1], c=(df[2] * 100), s=120, cmap=cmap, edgecolor='None')
plt.show()
def generate_RAST_Plot(df):
plt.plot(df[0], df[1], 'r', label='q')
plt.plot(df[0], df[2], 'g', label='not q')
plt.xlabel("P(q)")
plt.ylabel("Probability of selecting a card")
plt.legend(loc='right')
plt.show()
def fmt(x, pos):
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
def plotdata(df):
contour = plt.tricontourf(df[0], df[1], df[2], 100, cmap="Greys")
cbar = plt.colorbar(contour, format=ticker.FuncFormatter(fmt))
cbar.set_label('')
plt.xlabel("P(p)")
plt.ylabel("P(q)")
plt.show()
"""
Optimize Data from Excelfile and save it in csv
"""
def optimizeQODS(data_file_source, data_file_output):
results = []
initial_values = np.array([0.01, 0.02, 0.5, 0.1])
prob_bounds = [(0, 1), (0, 1), (0, 1), (0, 1)]
df = pd.read_csv(data_file_source, header=None, sep=";")
df = df.apply(lambda x: x.str.replace(',', '.'))
df = df.apply(pd.to_numeric)
for index, row in df.iterrows():
data = (row.iloc[0], row.iloc[1], row.iloc[2], row.iloc[3])
tmp = minimize(optimize_inf_model, x0=initial_values, args=data, method='SLSQP', bounds=prob_bounds,
constraints=({'type': 'ineq', 'fun': lambda x: x[1] - x[0] - 0.001}))
results.append(tmp.x)
df1 = pd.DataFrame(results)
df1 = df1.round(5)
df1.to_csv(data_file_output, index=None, header=None)
return df1
"""
Calc predition from data
"""
def calcPred(data_file_source, data_file_output):
df = pd.read_csv(data_file_source, header=None, sep=",")
pred = []
for index, row in df.iterrows():
tmp = []
if row.iloc[0] == 1:
row.iloc[0] = row.iloc[0] - 0.0000001
elif row.iloc[0] == 0:
row.iloc[0] = row.iloc[0] + 0.0000001
if row.iloc[1] == 1:
row.iloc[1] = row.iloc[1] - 0.0000001
elif row.iloc[1] == 0:
row.iloc[1] = row.iloc[1] + 0.0000001
if row.iloc[2] == 1:
row.iloc[2] = row.iloc[2] - 0.0000001
elif row.iloc[2] == 0:
row.iloc[2] = row.iloc[2] + 0.0000001
if row.iloc[3] == 1:
row.iloc[3] = row.iloc[3] - 0.0000001
elif row.iloc[3] == 0:
row.iloc[3] = row.iloc[3] + 0.0000001
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(row.iloc[0], row.iloc[1], row.iloc[2],
row.iloc[3])
tmp.append(stf(scaled_inf_p))
tmp.append(stf(scaled_inf_not_p))
tmp.append(stf(scaled_inf_q))
tmp.append(stf(scaled_inf_not_q))
pred.append(tmp)
df2 = pd.DataFrame(pred)
df2.to_csv(data_file_output, header=None, index=None)
return pred
"""
quick test of params
"""
def showValues(p, q, r, eps):
scaled_inf_p, scaled_inf_not_p, scaled_inf_q, scaled_inf_not_q = qods(p, q, r, eps)
print(stf(scaled_inf_p))
print(stf(scaled_inf_not_p))
print(stf(scaled_inf_q))
print(stf(scaled_inf_not_q))
"""
Calc RMSE
"""
def calcError(data_file_source1, data_file_source2, data_file_output):
df1 = pd.read_csv(data_file_source1, header=None, sep=";")
df1 = df1.apply(lambda x: x.str.replace(',', '.'))
df1 = df1.apply(pd.to_numeric)
df2 = pd.read_csv(data_file_source2, header=None ,sep=",")
p = []
notP = []
q = []
notQ = []
rmse = []
for index, row in df1.iterrows():
p.append(row.iloc[0])
notP.append(row.iloc[1])
q.append(row.iloc[2])
notQ.append(row.iloc[3])
for index, row in df2.iterrows():
tmp = []
tmp.append(np.sqrt((p[index] - row.iloc[0])**2))
tmp.append(np.sqrt((notP[index] - row.iloc[1]) ** 2))
tmp.append(np.sqrt((q[index] - row.iloc[2]) ** 2))
tmp.append(np.sqrt((notQ[index] - row.iloc[3]) ** 2))
rmse.append(tmp)
df3 = pd.DataFrame(rmse)
df3.to_csv(data_file_output, header=None, index=None)
return rmse
# ragni aggregated
optimizeQODS('../qods_data/qods_ragni_aggregated_obs.csv', '../qods_data/qods_ragni_aggregated_opt_params.csv')
calcPred('../qods_data/qods_ragni_aggregated_opt_params.csv', '../qods_data/qods_ragni_aggregated_pred.csv')
calcError('../qods_data/qods_ragni_aggregated_obs.csv', '../qods_data/qods_ragni_aggregated_pred.csv', '../qods_data/qods_ragni_aggregated_rmse.csv')
#ragni individual
optimizeQODS('../qods_data/qods_ragni_individual_obs.csv', '../qods_data/qods_ragni_individual_opt_params.csv')
calcPred('../qods_data/qods_ragni_individual_opt_params.csv', '../qods_data/qods_ragni_individual_pred.csv')
calcError('../qods_data/qods_ragni_individual_obs.csv', '../qods_data/qods_ragni_individual_pred.csv', '../qods_data/qods_ragni_individual_rmse.csv')
# negation data
optimizeQODS('../qods_data/qods_neg_obs.csv', '../qods_data/qods_neg_opt_params.csv')
calcPred('../qods_data/qods_neg_opt_params.csv', '../qods_data/qods_neg_pred.csv')
calcError('../qods_data/qods_neg_obs.csv', '../qods_data/qods_neg_pred.csv', '../qods_data/qods_neg_rmse.csv')
# repeated
optimizeQODS('../qods_data/qods_rep_obs.csv', '../qods_data/qods_rep_opt_params.csv')
calcPred('../qods_data/qods_rep_opt_params.csv', '../qods_data/qods_rep_pred.csv')
calcError('../qods_data/qods_rep_obs.csv', '../qods_data/qods_rep_pred.csv', '../qods_data/qods_rep_rmse.csv')
| UTF-8 | Python | false | false | 11,667 | py | 377 | quantitative_optimal_data_selection.py | 203 | 0.554213 | 0.524214 | 0 | 333 | 34.036036 | 149 |
polatbilek/Turkce-cinsiyet-tahminlemesi | 12,498,354,831,791 | 3a34a91b35c3618a67852809a4f62e9eb868c6f7 | f3af6f601ec443ec2674a4f1fc8a30292755a779 | /model.py | 8c521aec4639c4f6c47cbb055c358efa50ffdc40 | [
"MIT"
]
| permissive | https://github.com/polatbilek/Turkce-cinsiyet-tahminlemesi | d22a6507612c59ced5f6fd397509ad7109d59a38 | c58a8161a228c37026291c88e7fdb1d33092ee05 | refs/heads/master | 2020-04-19T09:10:57.189418 | 2019-01-28T07:30:08 | 2019-01-28T07:30:08 | 168,102,457 | 2 | 0 | MIT | true | 2019-01-29T06:31:41 | 2019-01-29T06:31:41 | 2019-01-28T07:31:26 | 2019-01-28T07:30:09 | 776 | 0 | 0 | 0 | null | false | null | import tensorflow as tf
from parameters import FLAGS
import numpy as np
class network(object):
############################################################################################################################
def __init__(self, embeddings):
with tf.device('/device:GPU:0'):
self.prediction = []
# create GRU cells
with tf.variable_scope("tweet"):
self.cell_fw = tf.nn.rnn_cell.GRUCell(num_units=FLAGS.rnn_cell_size, activation=tf.sigmoid)
self.cell_bw = tf.nn.rnn_cell.GRUCell(num_units=FLAGS.rnn_cell_size, activation=tf.sigmoid)
# RNN placeholders
self.reg_param = tf.placeholder(tf.float32, shape=[])
num_of_total_filters = len(FLAGS.filter_sizes.split(",")) * FLAGS.num_filters
total_tweets = FLAGS.batch_size * FLAGS.tweet_per_user
# weigths
self.weights = {'fc1': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size, FLAGS.num_classes]), name="fc1-weights"),
'fc1-cnn': tf.Variable(tf.random_normal([num_of_total_filters, FLAGS.num_classes]),name="fc1-weights"),
'att1-w': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size, 2 * FLAGS.rnn_cell_size]), name="att1-weights"),
'att1-v': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size]), name="att1-vector"),
'att2-w': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size, 2 * FLAGS.rnn_cell_size]), name="att2-weights"),
'att2-v': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size]), name="att2-vector"),
'att2-cnn-w': tf.Variable(tf.random_normal([num_of_total_filters, num_of_total_filters]), name="att2-weights"),
'att2-cnn-v': tf.Variable(tf.random_normal([num_of_total_filters]), name="att2-vector"),
}
# biases
self.bias = {'fc1': tf.Variable(tf.random_normal([FLAGS.num_classes]), name="fc1-bias-noreg"),
'fc1-cnn': tf.Variable(tf.random_normal([FLAGS.num_classes]), name="fc1-bias-noreg"),
'att1-w': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size]), name="att1-bias-noreg"),
'att2-w': tf.Variable(tf.random_normal([2 * FLAGS.rnn_cell_size]), name="att2-bias-noreg"),
'att1-cnn-w': tf.Variable(tf.random_normal([num_of_total_filters]), name="att1-bias-noreg"),
'att2-cnn-w': tf.Variable(tf.random_normal([num_of_total_filters]), name="att2-bias-noreg")
}
# initialize the computation graph for the neural network
# self.rnn()
#self.rnn_with_attention()
self.cnn(embeddings.shape[0])
self.architecture()
self.backward_pass()
############################################################################################################################
def architecture(self):
with tf.device('/device:GPU:0'):
#user level attention
self.att_context_vector_char = tf.tanh(tf.tensordot(self.cnn_output, self.weights["att2-cnn-w"], axes=1) + self.bias["att2-cnn-w"])
self.attentions_char = tf.nn.softmax(tf.tensordot(self.att_context_vector_char, self.weights["att2-cnn-v"], axes=1))
self.attention_output_char = tf.reduce_sum(self.cnn_output * tf.expand_dims(self.attentions_char, -1), 1)
# FC layer for reducing the dimension to 2(# of classes)
self.logits = tf.tensordot(self.attention_output_char, self.weights["fc1-cnn"], axes=1) + self.bias["fc1-cnn"]
# predictions
self.prediction = tf.nn.softmax(self.logits)
# calculate accuracy
self.correct_pred = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
return self.prediction
############################################################################################################################
def backward_pass(self):
with tf.device('/device:GPU:0'):
# calculate loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y))
# add L2 regularization
self.l2 = self.reg_param * sum(
tf.nn.l2_loss(tf_var)
for tf_var in tf.trainable_variables()
if not ("noreg" in tf_var.name or "bias" in tf_var.name)
)
self.loss += self.l2
# optimizer
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.train = self.optimizer.minimize(self.loss)
return self.accuracy, self.loss, self.train
############################################################################################################################
def rnn(self):
# embedding layer
self.rnn_input = tf.nn.embedding_lookup(self.tf_embeddings, self.X)
# rnn layer
(self.outputs, self.output_states) = tf.nn.bidirectional_dynamic_rnn(self.cell_fw, self.cell_bw, self.rnn_input, self.sequence_length, dtype=tf.float32,scope="tweet")
# concatenate the backward and forward cells
self.rnn_output_raw = tf.concat([self.output_states[0], self.output_states[1]], 1)
#reshape the output for the next layers
self.rnn_output = tf.reshape(self.rnn_output_raw, [FLAGS.batch_size, FLAGS.tweet_per_user, 2*FLAGS.rnn_cell_size])
return self.rnn_output
############################################################################################################################
def rnn_with_attention(self):
# embedding layer
self.rnn_input = tf.nn.embedding_lookup(self.tf_embeddings, self.X)
# rnn layer
(self.outputs, self.output_states) = tf.nn.bidirectional_dynamic_rnn(self.cell_fw, self.cell_bw, self.rnn_input, self.sequence_length, dtype=tf.float32,scope="tweet")
# concatenate the backward and forward cells
self.concat_outputs = tf.concat(self.outputs, 2)
# attention layer
self.att_context_vector = tf.tanh(tf.tensordot(self.concat_outputs, self.weights["att1-w"], axes=1) + self.bias["att1-w"])
self.attentions = tf.nn.softmax(tf.tensordot(self.att_context_vector, self.weights["att1-v"], axes=1))
self.attention_output_raw = tf.reduce_sum(self.concat_outputs * tf.expand_dims(self.attentions, -1), 1)
#reshape the output for the next layers
self.attention_output = tf.reshape(self.attention_output_raw, [FLAGS.batch_size, FLAGS.tweet_per_user, 2*FLAGS.rnn_cell_size])
return self.attention_output
############################################################################################################################
def captioning(self):
pass
############################################################################################################################
def cnn(self, vocab_size):
with tf.device('/device:GPU:0'):
# CNN placeholders
self.input_x = tf.placeholder(tf.int32, [FLAGS.batch_size*FLAGS.tweet_per_user, FLAGS.sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.num_classes], name="input_y")
filter_sizes = [int(size) for size in FLAGS.filter_sizes.split(",")]
# Embedding layer
with tf.name_scope("embedding"):
W = tf.Variable(tf.random_uniform([vocab_size, FLAGS.char_embedding_size], -1.0, 1.0), name="W")
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, FLAGS.char_embedding_size, 1, FLAGS.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[FLAGS.num_filters]), name="b-noreg")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, FLAGS.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = FLAGS.num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_flat_pool = tf.reshape(self.h_pool, [-1, num_filters_total])
self.cnn_output = tf.reshape(self.h_flat_pool, [FLAGS.batch_size, FLAGS.tweet_per_user, num_filters_total])
return self.cnn_output
| UTF-8 | Python | false | false | 8,612 | py | 10 | model.py | 9 | 0.59301 | 0.580237 | 0 | 223 | 36.591928 | 168 |
EasyToy11/8Queen | 8,787,503,117,541 | db83d735a3799e7e9e1444e3a2e6ff969560c317 | ae451567c5ebcf11e1d696bf7dd8fc40c7615469 | /Queen.py | 54ea9c9a9c3bd66132a0b269d62f9d5f7c1520ca | []
| no_license | https://github.com/EasyToy11/8Queen | cd0667b1663e3e22a39ba9af35c11b3f3f6646f0 | 03f346e2f1102c6494e576ac5d75450c3d0a123c | refs/heads/master | 2020-06-24T09:48:38.688082 | 2019-07-26T02:28:53 | 2019-07-26T02:28:53 | 198,932,206 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
二次元配列を作る
クイーンの数の変数
最初はすべてTrue
8回繰り返す
クイーンの射程に入ったらFalse
検索を短くするために、配列を書き換える
x,y,斜めの順に処理
結果を表示
"""
queen = 0
field = []
for i in range(8):
for j in range(8):
field.append([i, j, True])
field[0][2] = False
is_queen = []
for i in range(8):
# クイーンが置ける状態なら入力を受け付ける
while True:
place = input('クイーンを置く座標(例:x,y)')
x = int(place[0])
y = int(place[2])
if len(field[x][y]) != 3:
print('クイーンが置けません、違う場所を入力してください')
else:
break
is_queen.append([x, y])
print(field)
# x列の処理
for j in range(8):
# 配列の引数が変わると困るので、空要素を入れておく
field[j][x] = []
# y列の処理
for j in range(8):
field[y][j] = []
# 斜めの列の処理
for j in range(8):
j += 1
# 右下斜め
try:
if len(field[x+j][y+j]) != 0:
field[x+j][y+j] = []
except IndexError:
pass
# 右上斜め
# print(field[x+j][y-j])
try:
if len(field[x+j][y-j]) != 0:
field[x+j][y-j] = []
except IndexError:
pass
for j in range(8):
print()
for k in range(8):
if len(field[j][k]) != 0:
print("○", end='')
else:
print("×", end='')
print()
| UTF-8 | Python | false | false | 1,675 | py | 3 | Queen.py | 3 | 0.443268 | 0.428896 | 0 | 67 | 18.567164 | 45 |
aliyesilli/codesignal | 10,058,813,419,681 | dab89c3dacdd25b1fe809de1c5dde5cb78c3c18e | 49635f4841b71dbc2754b58afe923d273dd85d9f | /Arcade/Intro/4_adjacentElementsProduct.py | fcbeefa32f03f705816735e69bcc2c6902cf26f5 | []
| no_license | https://github.com/aliyesilli/codesignal | b52d0cee28461f4c4ddc50ed12845ec7392e5264 | 87a72cd06989aac157d89429f889cbf90cfdac01 | refs/heads/master | 2020-03-23T17:56:49.589981 | 2018-07-23T14:38:54 | 2018-07-23T14:38:54 | 141,882,391 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def adjacentElementsProduct(inputArray):
if len(inputArray) == 0: return 0
if len(inputArray) == 1: inputArray[0]
b = inputArray[0]*inputArray[1]
for i in range(0,len(inputArray)-1):
if inputArray[i]*inputArray[i+1] > b: b=inputArray[i]*inputArray[i+1]
return b; | UTF-8 | Python | false | false | 270 | py | 33 | 4_adjacentElementsProduct.py | 33 | 0.711111 | 0.674074 | 0 | 7 | 37.714286 | 71 |
uvsq22005562/PROJET_TAPATAN_BITD02 | 9,328,668,981,134 | 85de6dd71ce232b115c2616b8f85ee21261a4687 | e63eda2edd018d903fdc8cdc05b3f5b06734fada | /TAPATAN.py | 45a82de477dc2aef6e135d4ae98a03855d65c3d0 | []
| no_license | https://github.com/uvsq22005562/PROJET_TAPATAN_BITD02 | e81ce02bba5f9fa29b30dc345269809a7d08819a | 11e16f838784ad84920f251c47dcc28221af319c | refs/heads/main | 2023-05-04T03:39:56.813220 | 2021-05-24T12:29:56 | 2021-05-24T12:29:56 | 359,737,901 | 0 | 2 | null | false | 2021-05-24T09:55:21 | 2021-04-20T08:18:30 | 2021-05-23T18:20:29 | 2021-05-24T09:55:21 | 7 | 0 | 2 | 1 | Python | false | false | ###########################
# PROJET TAPATAN
# Jules Marty
# jihad Djiar
# Sophie Wu
# Adam Bouchaour
# Thibault Astier
###########################
# IMPORTS
import tkinter as tk
from tkinter import messagebox
###########################
# GLOBALS VAR
MAP = [
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
] # 0 = vide, 1, 2 = joueur 1, 2
JETONS = [3, 3] # jetons restant (j1, j2)
ETAT_PARTIE = 0 # 1 = placement, 2 = deplacement
TOUR_JEU = 0 # Voir c'est au tour de quel joueur
POINTS_JOUEURS = [0, 0] # joueur 1 / 2
MEMORY = []
REPETITION = []
rond = [] # Liste de points de la fenêtre PvP
COORD_PTS = [
# ligne 1
[80, 80, 120, 120, [0, 0]],
[380, 80, 420, 120, [0, 1]],
[680, 80, 720, 120, [0, 2]],
# ligne 2
[80, 380, 120, 420, [1, 0]],
[380, 380, 420, 420, [1, 1]],
[680, 380, 720, 420, [1, 2]],
# ligne 3
[80, 680, 120, 720, [2, 0]],
[380, 680, 420, 720, [2, 1]],
[680, 680, 720, 720, [2, 2]]
]
COORD_LINES = [
# ligne 1
[130, 80, 370, 120, [[0, 0], [0, 1]]],
[430, 80, 670, 120, [[0, 1], [0, 2]]],
# ligne 2
[80, 130, 120, 370, [[0, 0], [1, 0]]],
[380, 130, 420, 370, [[0, 1], [1, 1]]],
[680, 130, 720, 370, [[0, 2], [1, 2]]],
# ligne 3
[130, 380, 370, 420, [[1, 0], [1, 1]]],
[430, 380, 670, 420, [[1, 1], [1, 2]]],
# ligne 4
[80, 430, 120, 670, [[1, 0], [2, 0]]],
[380, 430, 420, 670, [[1, 1], [2, 1]]],
[680, 430, 720, 670, [[1, 2], [2, 2]]],
# ligne 5
[130, 680, 370, 720, [[2, 0], [2, 1]]],
[430, 680, 670, 720, [[2, 1], [2, 2]]]
]
COORD_DIAG = [
# haut gauche
[130, 130, 370, 370, [[0, 0], [1, 1]]],
# haut droite
[430, 130, 670, 370, [[1, 1], [0, 2]]],
# bas gauche
[130, 430, 370, 670, [[2, 0], [1, 1]]],
# bas droite
[430, 430, 670, 670, [[2, 2], [1, 1]]]
]
def window_transition(id): # jules
''' ferme la fenêtre ouverte et ouvre le menu '''
if id == 1:
# pvp -> menu
racine1.destroy()
menu()
if id == 2:
# pv ia -> menu
racine2.destroy()
menu()
if id == 3:
# ia v ia -> menu
racine3.destroy()
menu()
if id == 4:
racine.destroy()
game_window_1()
if id == 5:
racine.destroy()
game_window_2()
if id == 6:
racine.destroy()
game_window_3()
if id == 11:
racine1.destroy()
game_window_1()
def menu(): # thibault
global racine, POINTS_JOUEURS
""" Fonction qui crée:
-une fenetre de taille 400*400
- 4 boutons: PvP, PvIA, IAvIA et Exit
-Exit détruit la fenetre
"""
POINTS_JOUEURS = [0, 0]
racine = tk.Tk()
racine.title("Menu")
btn_PVP = tk.Button(racine, command=lambda: window_transition(4),
text="Player vs Player")
btn_PVP.pack()
btn_PVIA = tk.Button(racine, command=lambda: window_transition(5),
text="Player vs IA")
btn_PVIA.pack()
btn_IAVIA = tk.Button(racine, command=lambda: window_transition(6),
text="IA vs IA")
btn_IAVIA.pack()
btn_quit = tk.Button(racine, command=racine.destroy, text="Quitter")
btn_quit.pack()
racine.mainloop()
def game_window_1(): # thibault
global racine1, canvas, racine
"""Fonction qui creer:
-une nouvelle fenetre avec un canvas 800*800
-avec le plateau (carre centré) 600*600
-lignes et ronds au intersections
-Utilisé pour le PVP
"""
racine1 = tk.Tk()
racine1.title("TAPANTA")
canvas = tk.Canvas(racine1, bg="pale goldenrod", height=800, width=1000)
canvas.grid(row=0, rowspan=5, column=0, columnspan=3)
canvas.create_rectangle(100, 700, 700, 100, width=4, fill="pale goldenrod")
# LIGNES
canvas.create_line(100, 100, 700, 700, width=4, fill="black")
canvas.create_line(100, 700, 700, 100, width=4, fill="black")
canvas.create_line(400, 100, 400, 700, width=4, fill="black")
canvas.create_line(100, 400, 700, 400, width=4, fill="black")
# ROND SUPERIEUR
rond.append(canvas.create_oval(90, 90, 110, 110, fill="black"))
rond.append(canvas.create_oval(390, 90, 410, 110, fill="black"))
rond.append(canvas.create_oval(690, 90, 710, 110, fill="black"))
# ROND MILLIEU
rond.append(canvas.create_oval(90, 390, 110, 410, fill="black"))
rond.append(canvas.create_oval(390, 390, 410, 410, fill="black"))
rond.append(canvas.create_oval(690, 390, 710, 410, fill="black"))
# ROND BAS
rond.append(canvas.create_oval(90, 690, 110, 710, fill="black"))
rond.append(canvas.create_oval(390, 690, 410, 710, fill="black"))
rond.append(canvas.create_oval(690, 690, 710, 710, fill="black"))
# LABEL SCORE
label_J1 = tk.Label(racine1, bg="pale goldenrod",
text="Score Joueur 1 :" + str(POINTS_JOUEURS[0]))
label_J1.grid(row=4, column=0)
label_J2 = tk.Label(racine1, bg="pale goldenrod",
text="Score Joueur 2 :" + str(POINTS_JOUEURS[1]))
label_J2.grid(row=4, column=1)
# BOUTON
btn_SAVE = tk.Button(racine1, bg="pale goldenrod",
command=sauvegarder, text="Sauvegarder")
btn_SAVE.grid(row=1, column=2)
btn_LOAD = tk.Button(racine1, bg="pale goldenrod",
command=charger, text="Charger")
btn_LOAD.grid(row=2, column=2)
btn_MENU = tk.Button(racine1, bg="pale goldenrod",
command=lambda: window_transition(1), text="Menu")
btn_MENU.grid(row=3, column=2)
# PROGRAMME :
canvas.bind('<Button-1>', mouseover_item)
racine1.mainloop()
def game_window_2(): # thibault
global racine2
"""Fonction qui creer:
-une nouvelle fenetre avec un canvas 800*1000
-avec le plateau (carre centré) 600*600
-lignes et ronds au intersections
-4 boutons
-Utilisé pour le IA V IA
"""
racine.destroy() # ferme le menu
racine2 = tk.Tk()
racine2.title("TAPANTA")
canvas = tk.Canvas(racine2, bg="pale goldenrod", height=800, width=1000)
canvas.grid(row=0, rowspan=5, column=0, columnspan=3)
canvas.create_rectangle(100, 700, 700, 100, width=4, fill="pale goldenrod")
# LIGNES
canvas.create_line(100, 100, 700, 700, width=4, fill="black")
canvas.create_line(100, 700, 700, 100, width=4, fill="black")
canvas.create_line(400, 100, 400, 700, width=4, fill="black")
canvas.create_line(100, 400, 700, 400, width=4, fill="black")
# ROND SUPERIEUR
canvas.create_oval(90, 90, 110, 110, fill="black")
canvas.create_oval(390, 90, 410, 110, fill="black")
canvas.create_oval(690, 90, 710, 110, fill="black")
# ROND MILLIEU
canvas.create_oval(90, 390, 110, 410, fill="black")
canvas.create_oval(390, 390, 410, 410, fill="black")
canvas.create_oval(690, 390, 710, 410, fill="black")
# ROND BAS
canvas.create_oval(90, 690, 110, 710, fill="black")
canvas.create_oval(390, 690, 410, 710, fill="black")
canvas.create_oval(690, 690, 710, 710, fill="black")
# LABEL SCORE
label_J1 = tk.Label(racine2, bg="pale goldenrod",
text="Score Joueur :" + "......")
label_J1.grid(row=4, column=0)
label_J2 = tk.Label(racine2, bg="pale goldenrod",
text="Score Ordinateur :" + "......")
label_J2.grid(row=4, column=1)
# BOUTON
btn_SAVE = tk.Button(racine2, bg="pale goldenrod",
command=None, text="Sauvegarder")
btn_SAVE.grid(row=1, column=2)
btn_LOAD = tk.Button(racine2, bg="pale goldenrod",
command=None, text="Charger")
btn_LOAD.grid(row=2, column=2)
btn_MENU = tk.Button(racine2, bg="pale goldenrod",
command=lambda: window_transition(2), text="Menu")
btn_MENU.grid(row=3, column=2)
btn_PAUSE = tk.Button(racine2, bg="pale goldenrod",
command=None, text="PAUSE")
btn_PAUSE.grid(row=4, column=2)
canvas.bind('<Button-1>', mouseover_item)
racine2.mainloop()
def game_window_3(): # thibault
global racine3
"""Fonction qui creer:
-une nouvelle fenetre avec un canvas 800*1000
-avec le plateau (carre centré) 600*600
-lignes et ronds au intersections
-4 boutons
-Utilisé pour le IA V IA
"""
racine.destroy() # ferme le menu
racine3 = tk.Tk()
racine3.title("TAPANTA")
canvas = tk.Canvas(racine3, bg="pale goldenrod", height=800, width=1000)
canvas.grid(row=0, rowspan=5, column=0, columnspan=3)
canvas.create_rectangle(100, 700, 700, 100, width=4, fill="pale goldenrod")
# LIGNES
canvas.create_line(100, 100, 700, 700, width=4, fill="black")
canvas.create_line(100, 700, 700, 100, width=4, fill="black")
canvas.create_line(400, 100, 400, 700, width=4, fill="black")
canvas.create_line(100, 400, 700, 400, width=4, fill="black")
# ROND SUPERIEUR
canvas.create_oval(90, 90, 110, 110, fill="black")
canvas.create_oval(390, 90, 410, 110, fill="black")
canvas.create_oval(690, 90, 710, 110, fill="black")
# ROND MILLIEU
canvas.create_oval(90, 390, 110, 410, fill="black")
canvas.create_oval(390, 390, 410, 410, fill="black")
canvas.create_oval(690, 390, 710, 410, fill="black")
# ROND BAS
canvas.create_oval(90, 690, 110, 710, fill="black")
canvas.create_oval(390, 690, 410, 710, fill="black")
canvas.create_oval(690, 690, 710, 710, fill="black")
# LABEL SCORE
label_J1 = tk.Label(racine3, bg="pale goldenrod",
text="Score Ordinateur 1 :" + "......")
label_J1.grid(row=4, column=0)
label_J2 = tk.Label(racine3, bg="pale goldenrod",
text="Score Ordinateur 2 :" + "......")
label_J2.grid(row=4, column=1)
# BOUTON
btn_SAVE = tk.Button(racine3, bg="pale goldenrod", command=None,
text="Sauvegarder")
btn_SAVE.grid(row=1, column=2)
btn_LOAD = tk.Button(racine3, bg="pale goldenrod", command=None,
text="Charger")
btn_LOAD.grid(row=2, column=2)
btn_MENU = tk.Button(racine3, bg="pale goldenrod",
command=lambda: window_transition(3), text="Menu")
btn_MENU.grid(row=3, column=2)
btn_PAUSE = tk.Button(racine3, bg="pale goldenrod", command=None,
text="PAUSE")
btn_PAUSE.grid(row=4, column=2)
canvas.bind('<Button-1>', mouseover_item)
racine2.mainloop()
def mouseover_item(event): # jules
''' en fonction du clic du joueur retourne :
- [x, y] si il sagit d'un point
- [[x, y], [x, y]] si il sagit d'une ligne
x, y sont des entiers correspondants aux positions
des points dans MAP '''
x, y = event.x, event.y
# vérifications points (intersections)
for elm in COORD_PTS:
if (x > elm[0] and x < elm[2]) and (y > elm[1] and y < elm[3]):
if JETONS[alterner_joueur()-1] > 0 and ETAT_PARTIE == 0:
placer(elm[4])
# vérification lignes
for elm in COORD_LINES:
if (x > elm[0] and x < elm[2]) and (y > elm[1] and y < elm[3]):
if ETAT_PARTIE == 1:
deplacer(elm[4])
# vérifications diagonales
for elm in COORD_DIAG:
if (x > elm[0] and x < elm[2]) and (y > elm[1] and y < elm[3]):
if x > (y-20) and x < (y+20):
# diagonale d'équation x = size-y
if ETAT_PARTIE == 1:
deplacer(elm[4])
if x > (800-y-20) and x < (800-y+20):
# diagonale d'équation x = y
if ETAT_PARTIE == 1:
deplacer(elm[4])
def alterner_joueur(): # sophie
''' permet de savoir quel joueur joue,
1 = tour rouge, 2 = tour bleu '''
if TOUR_JEU % 2 == 0:
return 1
else:
return 2
def alterner_tour(): # jules
''' alterne l'état de la partie pour savoir
si on est en étape de placement / de déplacement '''
global TOUR_JEU, ETAT_PARTIE
if TOUR_JEU == 6:
ETAT_PARTIE += 1
def placer(point): # sophie
''' Poser les pions sur le plateau '''
global TOUR_JEU, ETAT_PARTIE
x, y = point[0], point[1]
if MAP[x][y] == 0:
MAP[x][y] = alterner_joueur()
JETONS[alterner_joueur()-1] -= 1
TOUR_JEU += 1
alterner_tour()
actualisation_graphique()
victory_check()
def deplacer(points): # sophie
''' Déplacer les pions sur le plateau '''
global pions_selectionner, TOUR_JEU, canvas, ETAT_PARTIE
x1, y1 = points[0][0], points[0][1]
x2, y2 = points[1][0], points[1][1]
if (MAP[x1][y1] == 0 or MAP[x2][y2] == 0) and MAP[x1][y1] != MAP[x2][y2]:
if max([MAP[x1][y1], MAP[x2][y2]]) == alterner_joueur():
MAP[x1][y1], MAP[x2][y2] = MAP[x2][y2], MAP[x1][y1]
TOUR_JEU += 1
alterner_tour()
actualisation_graphique()
victory_check()
match_nul_check()
def actualisation_graphique(): # sophie
''' Change en la couleur du joueur selon la MAP,
si c'est des "1"(en rouge) ou des "2"(en bleu) '''
global canvas
for i in range(0, 3):
for j in range(0, 3):
if MAP[i][j] == 1:
canvas.itemconfig(rond[i*3 + j], fill="red")
elif MAP[i][j] == 2:
canvas.itemconfig(rond[i*3 + j], fill="blue")
else:
canvas.itemconfig(rond[i*3 + j], fill="black")
def affichage_messages(id): # Jihad
''' gère l'ouverture et le contenu des fenetres d'informations
présentés aux joueurs '''
liste_message = ['match nul, personne ne gagne de point',
'point pour le joueur 1, bravo !',
'point pour le joueur 2, bravo !',
'le joueur 1 a gagné la partie !',
'le joueur 2 a gagné la partie !']
messagebox.showinfo('information', liste_message[id])
def victory_check(): # Adam
''' vérifie si un des joueurs remporte le point '''
win = 0
# lignes
for i in range(len(MAP)):
if MAP[i][0] == MAP[i][1] == MAP[i][2] and MAP[i][0] != 0:
win = MAP[i][0]
# colonnes
for i in range(len(MAP)):
if MAP[0][i] == MAP[1][i] == MAP[2][i] and MAP[0][i] != 0:
win += MAP[0][i]
# diagonales
if (MAP[0][0] == MAP[1][1] == MAP[2][2] and MAP[1][1] != 0) or\
(MAP[2][0] == MAP[1][1] == MAP[0][2] and MAP[1][1] != 0):
win += MAP[1][1]
if win != 0:
POINTS_JOUEURS[win-1] += 1
affichage_messages(win)
fin_de_partie()
nouveau_tableau()
def nouveau_tableau(): # Adam
''' réinitialise le jeu et actualise le score '''
global MAP
global TOUR_JEU
global ETAT_PARTIE
global JETONS
if 3 not in POINTS_JOUEURS:
MAP = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
TOUR_JEU = 0
ETAT_PARTIE = 0
JETONS = [3, 3]
window_transition(11)
def match_nul_check(): # Adam
''' vérifie si le même tableau apparait 3 fois a partir du
moment ou les joueurs déplaces leurs jetons '''
if str(MAP) in MEMORY:
REPETITION[MEMORY.index(str(MAP))] += 1
else:
MEMORY.append(str(MAP))
REPETITION.append(1)
if 3 in REPETITION:
affichage_messages(0)
nouveau_tableau()
def fin_de_partie(): # jihad
''' met fin a la partie si un joueur atteint 3 point '''
if POINTS_JOUEURS[0] == 3:
affichage_messages(3)
window_transition(1)
if POINTS_JOUEURS[1] == 3:
affichage_messages(4)
window_transition(1)
def sauvegarder(): # jihad
''' sauvegarde la partie en cours '''
fichier_sauvegarde = open('save', 'w')
temp = ''
for elm in MAP:
for s_elm in elm:
temp += str(s_elm)
temp += '|'
temp += str(POINTS_JOUEURS[0]) + str(POINTS_JOUEURS[1])
temp += '|'
temp += str(JETONS[0]) + str(JETONS[1])
temp += '|'
temp += str(TOUR_JEU)
temp += '|'
temp += str(ETAT_PARTIE)
fichier_sauvegarde.write(temp)
def charger(): # jules
''' charge la dernière partie sauvegardé '''
global MAP, POINTS_JOUEURS, JETONS, TOUR_JEU, ETAT_PARTIE
fichier = open('save', 'r')
chaine = fichier.read()
liste = []
temp = []
for elm in chaine:
if elm != '|':
temp.append(elm)
else:
liste.append(temp)
temp = []
liste.append(temp)
print(liste)
MAP = [[int(liste[0][0]), int(liste[0][1]), int(liste[0][2])],
[int(liste[0][3]), int(liste[0][4]), int(liste[0][5])],
[int(liste[0][6]), int(liste[0][7]), int(liste[0][8])]]
POINTS_JOUEURS = [int(liste[1][0]), int(liste[1][1])]
JETONS = [int(liste[2][0]), int(liste[2][1])]
TOUR_JEU = int(liste[3][0])
ETAT_PARTIE = int(liste[4][0])
actualisation_graphique()
menu()
| UTF-8 | Python | false | false | 17,045 | py | 2 | TAPATAN.py | 1 | 0.551134 | 0.474668 | 0 | 531 | 31.041431 | 79 |
Aasthaengg/IBMdataset | 12,300,786,351,958 | a8d8059352b170b6d158eeca29301650ae608c19 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03221/s255221883.py | 7c371de9c4496c612c56bf71d1d38505c3449585 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n, m = map(int, input().split())
arr = [[] for _ in range(n)]
p = [0] * m
y = [0] * m
for i in range(m):
p[i], y[i] = map(int, input().split())
arr[p[i]-1].append(y[i])
for i in range(n):
arr[i].sort()
import bisect
for i in range(m):
num = bisect.bisect_left(arr[p[i]-1], y[i]) + 1
a = str(p[i])
b = str(num)
print(a.zfill(6) + b.zfill(6))
| UTF-8 | Python | false | false | 379 | py | 202,060 | s255221883.py | 202,055 | 0.496042 | 0.477573 | 0 | 20 | 17.95 | 51 |
naveenkambham/HiddenMarkovModel_FromScratch | 9,835,475,110,320 | fe7781d3c041f1cf2275b22ab801f72d571fed5f | 0410a2f21c3627e19446addf0319603e7ae4aa5e | /HMM.py | 8a85d8e9c7472af77b00951bb9348a93844c9a51 | []
| no_license | https://github.com/naveenkambham/HiddenMarkovModel_FromScratch | d329c8be7ee157b325ce9dcb81097284ad87fea2 | 27c671c84067877df0fcafdd634842176dad5f72 | refs/heads/master | 2021-05-02T17:55:26.861849 | 2020-02-06T20:43:02 | 2020-02-06T20:43:02 | 120,655,320 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Title:Implement HMM run various work flows to understand the system.
Developer : Naveen Kambham
Description: This is a simple two state HMM model implemented using matrices. It has various workflows and methods to
Caliculate Transition, observation matrices.
"""
"""
Importing the required libraries.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
"""
PlotProabbilityDistributions(yvals,zvals,Title,Xlabel,Xlabel2): Method to plot out put distributions for each casino and each state
[in]: Yvalues - Loose count, zvals- win count, Labels for Cheat and fair states
[out]: Plot
"""
def PlotProabbilityDistributions(yvals,zvals,Title,Xlabel,Xlabel2):
N = 2
ind = np.arange(N) # the x locations for the groups
width = 0.27 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
#Use Y values and colour red for lose
rects1 = ax.bar(ind, yvals, width, color='r')
#Use Z values and colour gree for win
rects2 = ax.bar(ind+width, zvals, width, color='g')
#Setting the lables for X and Y axis
ax.set_ylabel('Probability')
ax.set_xticks(ind+width)
ax.set_xticklabels( (Xlabel, Xlabel2) )
ax.legend( (rects1[0], rects2[0]), ('Lose', 'Win') )
#Adding Title
fig.suptitle(Title)
plt.show()
"""
PlotProabbilityDistributions(yvals,zvals): Method to plot out put distributions for no of times a casino is in each state
[in]: Yvalues - Cheat count, zvals- Fair count
[out]: Plot
"""
def PlotOutputDistributions(yvals,zvals,):
N = 3
ind = np.arange(N) # the x locations for the groups
width = 0.27 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
#Use Y values and colour red for cheat state
rects1 = ax.bar(ind, yvals, width, color='r')
#Use Z values and colour green for fair state
rects2 = ax.bar(ind+width, zvals, width, color='g')
#Add labels
ax.set_ylabel('Count')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Lion', 'Dragon', 'Pyramid') )
ax.legend( (rects1[0], rects2[0]), ('Cheat', 'Fair') )
plt.show()
"""
Method to read the training data and add state columns in memory
[in]: Input file
[out]: data
"""
def readTrainingData(filepath):
#Read CSV
data = pd.read_csv(filepath, sep="\t", header=None)
#Adding Columns
data.columns = ["state", "outcome"]
return data
"""
Method to read the testing data and replace win with one and lose with zero to perform the caliculations easily
[in]: Input file
[out]: data
"""
def readTestData(filepath):
f = open(filepath,'r')
filedata = f.read()
f.close()
#Replace win and loses
newdata = filedata.replace("win","1")
newdata2= newdata.replace("lose",'0')
f = open(filepath,'w')
f.write(newdata2)
f.close()
#Loading the data
data= np.loadtxt(filepath)
# print(data)
return data
"""
ComputeTransitionMatrix: Method to compute the Transition and observation matrices.
"""
def computeTransitionMatrix(data):
stateData = data['state']
#Dictionary to hold the states count
b ={}
#Counter to count all the state pairs. Here Zip funcion is used to create a tuple of all possible pairs
trans_counter=Counter(zip(stateData, stateData[1:]))
#Iterating counter to add the values to dictionaries
for (x,y), c in trans_counter.most_common():
b[x,y] = c
#creating transition matrix
temptransMatrix= np.array([[(b['cheat','cheat'])/(b['cheat','fair']+b['cheat','cheat']),(b['cheat','fair'])/(b['cheat','fair']+b['cheat','cheat'])],
[(b['fair','cheat'])/(b['fair','fair']+b['fair','cheat']),(b['fair','fair'])/(b['fair','fair']+b['fair','cheat'])]])
#using a data frame to add columns and indexes
transitionMatrixDf = pd.DataFrame(temptransMatrix,index=['cheat','fair'])
transitionMatrixDf.columns=['cheat','fair']
print("Transition Matrix:")
print(transitionMatrixDf)
#Counting States and Win Losses
#Here also Zip funcion is used to create a tuple of all possible out comes
obs_counter=Counter(zip(data['state'],data['outcome']))
# print(obs_counter)
#Dictionary to hold the observation counts
obs ={}
for (x,y), c in obs_counter.most_common():
obs[x,y] = c
# Creating Observation matrix
obs_matrix= np.array([[(obs['cheat','lose'])/(obs['cheat','win']+obs['cheat','lose']),(obs['cheat','win'])/(obs['cheat','win']+obs['cheat','lose'])],
[(obs['fair','lose'])/(obs['fair','win']+obs['fair','lose']),(obs['fair','win'])/(obs['fair','win']+obs['fair','lose'])]
])
obs_matrixdf = pd.DataFrame(obs_matrix,index=['cheat','fair'])
obs_matrixdf.columns=['lose','win']
print("Emission Matrix:")
print(obs_matrixdf)
return temptransMatrix,obs_matrix
"""
This method is to compute alpha and beta values using transition and observation matrices and then preditcing the states at each possible observation.
[in]:Transtion, Observation matrices, Observations
"""
def forward_backward_alg(A_mat, O_mat, observ):
k = observ.size
(n,m) = O_mat.shape
#initializing forward and backward place holders to store compute probabilities
prob_mat = np.zeros( (n,k) )
fw = np.zeros( (n,k+1) )
bw = np.zeros( (n,k+1) )
print(observ)
# Forward step
fw[:, 0] = 1.0/n
#Iterating all observations
for obs_ind in range(k):
#Taking current row
pi_row_vec = np.matrix(fw[:,obs_ind])
#updating the next row given the current values
fw[:, obs_ind+1] = pi_row_vec *(np.diag(O_mat[:,observ[obs_ind]]))* np.matrix(A_mat).transpose()
#Normalizing the prob values
fw[:,obs_ind+1] = fw[:,obs_ind+1]/np.sum(fw[:,obs_ind+1])
# backward step
bw[:,-1] = 1.0
#Iterating all observations from back
for obs_ind in range(k, 0, -1):
b_col_vec = np.matrix(bw[:,obs_ind]).transpose()
#Updating row based on next observation rows
bw[:, obs_ind-1] = (np.matrix(A_mat) * np.matrix(np.diag(O_mat[:,observ[obs_ind-1]])) * b_col_vec).transpose()
#Normalizing proababilities
bw[:,obs_ind-1] = bw[:,obs_ind-1]/np.sum(bw[:,obs_ind-1])
# combine Step
prob_mat = np.array(fw)*np.array(bw)
prob_mat = prob_mat/np.sum(prob_mat, 0)
#Counter to caliculate the number of times system in each state
cnt= Counter(prob_mat.argmax(axis=0))
#Converting from zero and ones to Cheat and Fair
for key,val in cnt.most_common(len(cnt)):
if (key == 0):
cheat= val
else:
fair= val
return prob_mat, fw, bw
def main():
#input training files
inputfiles=[r'/home/naveen/Downloads/DataSets/training_Lion_1000.data.txt',
]
#Observation Files
testingfiles=[r'/home/naveen/Downloads/DataSets/testing_Dragon_1000.data.txt',
]
for i in range(0,1):
print("Transition and Emission matrices for:",inputfiles[i])
data = readTrainingData(inputfiles[i])
A,B = computeTransitionMatrix(data)
print(A)
print(B)
forward_backward_alg(A,B,readTestData(testingfiles[i]))
main()
| UTF-8 | Python | false | false | 7,325 | py | 2 | HMM.py | 1 | 0.635358 | 0.626348 | 0 | 220 | 32.281818 | 154 |
idushie/Animation-school | 6,605,659,750,383 | 0e571adf285ce35a940532bbaa4a41c86ef0996b | dcef9b4da7ac67b210a14c17a565e7277c23e398 | /W_10/normal_vertex.py | 87095d23850d55620fb6037fb4ebeabd00d1c2b3 | []
| no_license | https://github.com/idushie/Animation-school | 6002ee3f79ee995ff712c09db04116f5053e28d0 | 20a9530b373aa1aaa8fc330dd59e09faedd3749f | refs/heads/master | 2020-12-01T19:37:56.400689 | 2020-02-11T16:03:58 | 2020-02-11T16:03:58 | 230,744,742 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import maya.cmds as cmds
import random
import maya.OpenMaya as OpenMaya
selected_mesh = cmds.ls(sl=1, l=1)[0]
sel_list = OpenMaya.MSelectionList()
sel_list.add(selected_mesh)
dp = OpenMaya.MDagPath()
sel_list.getDagPath(0, dp)
it = OpenMaya.MItMeshVertex(dp)
while not it.isDone(): #* idDone -> bool
normalVector = OpenMaya.MVector()
it.getNormal(normalVector, OpenMaya.MSpace.kWorld)
normalVector.normalize()
pos = it.position(OpenMaya.MSpace.kWorld)
new_pos = pos + normalVector * random.uniform(-1.0 , 1.0)
it.setPosition(new_pos, OpenMaya.MSpace.kWorld)
it.next()
| UTF-8 | Python | false | false | 619 | py | 51 | normal_vertex.py | 50 | 0.696284 | 0.68336 | 0 | 31 | 18.935484 | 61 |
Ulan9304/djtest | 12,987,981,146,647 | f88675f080118ea44c2fb0e183b21793e86aa9aa | 873ea03199e127fc759b580115c442f8b517349e | /landing/views.py | ee3ca55f32b136b9c50db1736b61a2e2b08f288c | []
| no_license | https://github.com/Ulan9304/djtest | 09509377d6d311240ac5cb6547ed6006bbf87773 | 94e637c231736bb1da1164af67db37c264f25291 | refs/heads/master | 2021-07-17T13:26:30.659860 | 2017-10-25T10:09:25 | 2017-10-25T10:09:25 | 107,991,450 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from products.models import *
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
# Create your views here.
def home(request):
products_images = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True)
products_images_phones = products_images.filter(product__category__id=2)
products_images_laptop = products_images.filter(product__category__id=3)
products_images_personal = products_images.filter(product__category__id=4)
return render(request, 'home.html', locals()) | UTF-8 | Python | false | false | 596 | py | 15 | views.py | 7 | 0.736577 | 0.731544 | 0 | 16 | 36.3125 | 104 |
Sovianum/questions-answers | 17,806,934,428,714 | f41bfb28cc6a8d66db320b1667ad6f08e9e22b29 | d39d21aceb0e22f3cbe265c15654a3cecd89ccb4 | /ask-klyukvin/views.py | 2244142edb632ac64d948bf238edaff7e232ade6 | []
| no_license | https://github.com/Sovianum/questions-answers | eefa0ca4675af37ef68f148c4c8af6021014e3e3 | 01b6ce6ec175ec09194463ccf748c074877c4170 | refs/heads/master | 2017-05-27T12:02:33.113427 | 2016-12-16T11:27:36 | 2016-12-16T11:27:36 | 68,749,910 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import django.views.generic
class GetNewQuestions(django.views.generic.TemplateView):
template_name = 'pages/new_questions_page.html'
class GetNewQuestionList(django.views.generic.TemplateView):
template_name = 'pages/new_question_page.html'
class GetQuestionDetailList(django.views.generic.TemplateView):
template_name = 'pages/question_detail_page.html'
class GetTagQuestions(django.views.generic.TemplateView):
template_name = 'pages/tag_question_page.html'
class GetUserSettings(django.views.generic.TemplateView):
template_name = 'pages/user_settings_page.html'
class GetLoginPage(django.views.generic.TemplateView):
template_name = 'pages/login_page.html'
class GetRegistrationPage(django.views.generic.TemplateView):
template_name = 'pages/registration_page.html'
class GetNotLogged(django.views.generic.TemplateView):
template_name = 'pages/not_logged_new_questions_page.html'
| UTF-8 | Python | false | false | 933 | py | 43 | views.py | 16 | 0.78135 | 0.78135 | 0 | 33 | 27.272727 | 63 |
sysdeep/fsys | 5,961,414,642,976 | 7913dd64f28a867d7498eb816b95a3a00764d185 | d9804258f176b3e8f84d811cf0c388c295e16693 | /blog/models.py | 7a9875f9db8a875a2acae0d13328e687bc48c69a | []
| no_license | https://github.com/sysdeep/fsys | 7a8800ee53bff502e98f4704f86b7d5ef9a77184 | 5a0b48ff2dd7bdb5698af8de3b52223758f9b013 | refs/heads/master | 2021-01-10T20:43:56.144536 | 2012-03-27T14:57:54 | 2012-03-27T14:57:54 | 996,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#blog/models
from django.db import models
#-----------------------------------------------------------------------
#class Link(models.Model):
# """Ссылка"""
# url = models.URLField(unique=True)
# def __unicode__(self):
# return self.url
#-----------------------------------------------------------------------
from django.contrib.auth.models import User #работаем со стандартным еханизмом юзеров
#-----------------------------------------------------------------------
class Note(models.Model):
"""Запись"""
title = models.CharField(max_length=200) #Имя
desc = models.TextField() #тело записи
user = models.ForeignKey(User) #кто добавил
time_c = models.DateTimeField(auto_now_add=True) #время создания
#link = models.ForeignKey(Link) #ссылка
def __unicode__(self):
return self.title
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
class Tag(models.Model):
"""Тэги"""
name = models.CharField(max_length=64, unique=True) #имя
notes = models.ManyToManyField(Note) #ссылки на запись
def __unicode__(self):
return self.name
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
class SharedNote(models.Model):
"""Расшаривание и рейтинг"""
note = models.ForeignKey(Note, unique=True)
date = models.DateTimeField(auto_now_add=True)
votes = models.IntegerField(default=1)
users_voted = models.ManyToManyField(User)
def __unicode__(self):
return u'%s, %s' % (self.note, self.votes)
#-----------------------------------------------------------------------
| UTF-8 | Python | false | false | 2,045 | py | 53 | models.py | 11 | 0.413793 | 0.410136 | 0 | 54 | 34.166667 | 97 |
yunusemrex/Python-OOP | 11,304,353,944,600 | 9b6a28cc7dd8efa57135e8ff01c0c98e3de56689 | ad4fc4e21f630c634be718f3eab0955701bed183 | /Askerler.py | f6598b6f591d0456093dfe4ad64728c6f3457bd4 | []
| no_license | https://github.com/yunusemrex/Python-OOP | 9356f40ee185fda1049243d288cb449fd4db0c63 | 9b321da34033d0780f4c1cab24bbd02eb7806e3b | refs/heads/main | 2023-06-14T11:31:18.087355 | 2021-07-10T12:28:04 | 2021-07-10T12:28:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Asker:
def __init__(self,isim,guc,can,dayanlıklılık,hız,kudret):
self.isim = isim
self.guc = guc
self.can = can
self.dayanıklılık = dayanlıklılık
self.hız = hız
self.kudret = kudret
def __str__(self):
return(f"Asker Adı: {self.isim}")
def Ultimate(self):
print(f"{self.isim}, Ultimate Hazır!!!")
class Piyade(Asker):
def __init__(self, isim, guc, can, dayanlıklılık, hız, kudret, kaba_kuvvet):
super().__init__(isim, guc, can, dayanlıklılık, hız, kudret)
self.kaba_kuvvet = kaba_kuvvet
print(f"Piyadenin Adı: {self.isim}, Gücü: {self.guc}, Canı: {self.can}, Dayanıklılığı: {self.dayanıklılık}, Hızı: {self.hız}, Kudret Puanı: {self.kudret}")
print(f"Ultimate {self.kaba_kuvvet}!")
class Suvari(Asker):
def __init__(self, isim, guc, can, dayanlıklılık, hız, kudret, kudretli_saldiri):
super().__init__(isim, guc, can, dayanlıklılık, hız, kudret)
self.kudretli_saldiri = kudretli_saldiri
print(f"Suvarinin Adı: {self.isim}, Gücü: {self.guc}, Canı: {self.can}, Dayanıklılığı: {self.dayanıklılık}, Hızı: {self.hız}, Kudret Puanı: {self.kudret}")
print(f"Ultimate {self.kudretli_saldiri}!")
class Okcu(Asker):
def __init__(self, isim, guc, can, dayanlıklılık, hız, kudret,zehirli_ok):
super().__init__(isim, guc, can, dayanlıklılık, hız, kudret)
self.zehirli_ok = zehirli_ok
print(f"Okcunun Adı: {self.isim}, Gücü: {self.guc}, Canı: {self.can}, Dayanıklılığı: {self.dayanıklılık}, Hızı: {self.hız}, Kudret Puanı: {self.kudret}")
print(f"Ultimate {self.zehirli_ok}!")
class Gozcu(Asker):
def __init__(self, isim, guc, can, dayanlıklılık, hız, kudret, kılık_degistirme):
super().__init__(isim, guc, can, dayanlıklılık, hız, kudret)
self.kılık_degistirme = kılık_degistirme
print(f"Gozcunun Adı: {self.isim}, Gücü: {self.guc}, Canı: {self.can}, Dayanıklılığı: {self.dayanıklılık}, Hızı: {self.hız}, Kudret Puanı: {self.kudret}")
print(f"Ultimate {self.kılık_degistirme}!")
class Bombaci(Asker):
def __init__(self, isim, guc, can, dayanlıklılık, hız, kudret,dinamit):
super().__init__(isim, guc, can, dayanlıklılık, hız, kudret)
self.dinamit = dinamit
print(f"Bombacının Adı: {self.isim}, Gücü: {self.guc}, Canı: {self.can}, Dayanıklılığı: {self.dayanıklılık}, Hızı: {self.hız}, Kudret Puanı: {self.kudret}")
print(f"Ultimate {self.dinamit}!")
Soldier1 = Asker("Asker",120,120,130,150,20)
#Piyade = Piyade("Kudretli Kral",150,220,100,150,50,'Aktif')
#Suvari = Suvari("Atlı Süvari",130,200,120,300,50,'Aktif')
#Okcu = Okcu("Kraliçe Okçu",110,160,80,120,50,'Aktif')
#Gözcü = Gozcu("İstihbaratçı",30,100,40,400,10,'Aktif')
#Bombacı = Bombaci("Tahrip Ustası",50,150,35,200,20,'Aktif')
#Piyade.Ultimate()
#Soldier1.Ultimate()
| UTF-8 | Python | false | false | 3,048 | py | 4 | Askerler.py | 3 | 0.641203 | 0.613204 | 0 | 61 | 46.409836 | 164 |
arjun180/Kaggle-Titanic-Machine-Learning | 17,892,833,774,626 | 2ecf74ad65dedf2e115d8b36c30a54d72a63f565 | a5701eb2169db2dfac3bfa44af8a0f169885026e | /Models/titanic.py | cad48632fb24db5555a89c19eda165f2d1ec904f | []
| no_license | https://github.com/arjun180/Kaggle-Titanic-Machine-Learning | 0be7a75408d2d3749a307ef362808327326e3eec | 85fa7ca4931557daa1bfaf03eb9d578bb52e0843 | refs/heads/master | 2021-01-10T13:55:08.311476 | 2015-11-12T01:05:56 | 2015-11-12T01:05:56 | 46,019,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
# matplotlib inline
import numpy as np
import pandas as pd
# import statsmodels.api as sm
# from statsmodels.nonparametric.kde import KDEUnivariate
# from statsmodels.nonparametric import smoothers_lowess
from pandas import Series, DataFrame
# from patsy import dmatrices
from sklearn import datasets, svm
df = pd.read_csv('/Users/arjun/Documents/Titanic/train.csv', header=0)
# To see the age ,sex and ticket columns
df[['Sex','Age','Ticket']]
# Observing the values which are greater than 60
df[df['Age'] > 60][['Sex', 'Pclass', 'Age', 'Survived']]
# Printing the gae values that come across as null
df[df['Age'].isnull()][['Sex', 'Pclass', 'Age']]
# Plotting the number of survived.
plt.figure(figsize=(6,4))
# fig, ax = plt.subplots()
df.Survived.value_counts().plot(kind='barh', color="blue", alpha=.65)
# set_ylim(-1, len(df.Survived.value_counts()))
plt.title("Survival Breakdown (1 = Survived, 0 = Died)")
# Plotting the number of passengers per boarding count
plt.figure(figsize=(6,4))
# fig, ax = plt.subplots()
df.Embarked.value_counts().plot(kind='bar', alpha=0.55)
# set_xlim(-1, len(df.Embarked.value_counts()))
# specifies the parameters of our graphs
plt.title("Passengers per boarding location")
# A scatterplot between the people survived and their age
plt.scatter(df.Survived, df.Age, alpha=0.55)
# sets the y axis lable
plt.ylabel("Age")
# formats the grid line style of our graphs
plt.grid(b=True, which='major', axis='y')
plt.title("Survial by Age, (1 = Survived)")
plt.show()
# A bar plot to see who see who survived with respect to male and female count
fig = plt.figure(figsize =(18,6))
ax1 = fig.add_subplot(121)
df.Survived[df.Sex == 'male'].value_counts().plot(kind='barh',label='Male')
df.Survived[df.Sex == 'female'].value_counts().plot(kind='barh', color='#FA2379',label='Female')
plt.title("Who Survived? with respect to Gender, (raw value counts) "); plt.legend(loc='best')
ax2 = fig.add_subplot(122)
(df.Survived[df.Sex == 'male'].value_counts()/float(df.Sex[df.Sex == 'male'].size)).plot(kind='bar',label='Male')
(df.Survived[df.Sex == 'female'].value_counts()/float(df.Sex[df.Sex == 'female'].size)).plot(kind='barh', color='#FA2379',label='Female')
plt.title("Who Survived? with respect to Gender, (proportions) "); plt.legend(loc='best')
plt.show()
| UTF-8 | Python | false | false | 2,388 | py | 2 | titanic.py | 2 | 0.694724 | 0.677554 | 0 | 70 | 32.957143 | 137 |
anyuanay/MOFtextminer | 3,624,952,403,526 | 6a2a05b92687d7bd770c8d2addfb008cedb1c1ff | f92a9a6a271b69c65ffa1b8fb4c2e178c9816260 | /doc/storage/abbreviation_storage.py | bf64755ad1c788a4d3ddcae22b0596313bb04c73 | []
| no_license | https://github.com/anyuanay/MOFtextminer | cf6c34928fbcc34fa211e69ed445564c4a718872 | 2f056e2ac0e41f5fc927cadd67b14679b95f03d1 | refs/heads/main | 2023-07-11T15:21:26.173701 | 2021-08-24T17:07:21 | 2021-08-24T17:07:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from functools import reduce
import numpy as np
import regex
from pathlib import Path
import json
from chemdataextractor.doc import Paragraph
from fuzzywuzzy import fuzz
from doc.utils import _change_to_regex_string
from error import DatabaseError
class CounterABB(object):
def __init__(self, abb_type=None, trainable=True):
self.count = np.zeros(2, dtype='int16')
self.Trainable = True
self.update(abb_type, trainable)
self.ABB_type = None
def __repr__(self):
return "({}/{})".format(self.count[0], self.count[1])
def update(self, abb_type=None, trainable=True):
if not trainable:
self.Trainable = False
self.ABB_type = abb_type
if abb_type:
self.count = np.array([0, -1], dtype='int8')
else:
self.count = np.array([-1, 0], dtype='int8')
if not self.Trainable:
return self.ABB_type
if isinstance(abb_type, list):
for types in abb_type:
self.update(types)
if self.checking(abb_type):
self.count[0] += 1
else:
self.count[1] += 1
self.ABB_type = self.abb_type_checking()
return self.ABB_type
def abb_type_checking(self):
label = ["CM", None]
index = int(np.argmax(self.count))
return label[index]
@staticmethod
def checking(types=None):
if types == "CM":
return True
else:
return False
class Abbreviation(object):
def __init__(self, abb_name, abb_def, abb_type_original=None, trainable=True):
self.ABB_def = abb_def
self.ABB_name = abb_name
self.ABB_type = None
self.ABB_class, self.ABB_class_type = [], []
self.update(abb_def, abb_type_original, trainable=trainable)
@staticmethod
def _check_abb_type(abb_def):
checking_string = abb_def
checking_string = regex.sub(r"\b-\b", " - ", checking_string)
checking = Paragraph(checking_string).cems
if checking:
abb_type = 'CM'
else:
abb_type = None
return abb_type
def _check_validation(self, abb_def, abb_type):
abb_name = self.ABB_name
abb_front_char = reduce(lambda x, y: x + "".join(regex.findall(r"^\S|[A-Z]", y)),
regex.split(r",|\s|-", abb_def), "")
if abb_name[-1] == 's' and abb_def[-1] == 's':
abb_front_char += 's'
ratio = fuzz.ratio(abb_name.lower(), abb_front_char.lower())
return ratio > 70 or abb_type == 'CM'
def __repr__(self):
return "(" + ") / (".join(self.ABB_class) + ")"
def __eq__(self, other):
if isinstance(other, str):
return self.ABB_name == other
elif isinstance(other, Abbreviation):
return self.ABB_name == other.ABB_name
else:
return False
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.ABB_class)
def __getitem__(self, key):
return self.ABB_class[key]
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def change_abb_type(self, abb_def, abb_type):
self.update(abb_def, abb_type, trainable=False)
def update(self, abb_def, abb_type_original=None, trainable=True):
abb_type = abb_type_original
if isinstance(abb_def, list):
for def_ in abb_def:
self.update(def_, abb_type)
return
for i, classification in enumerate(self.ABB_class):
result = self.compare(abb_def, classification)
if result > 70:
self.ABB_class_type[i].update(abb_type, trainable=trainable)
self.ABB_def = classification
self.ABB_type = self.ABB_class_type[i].ABB_type
return None
self.ABB_class.append(abb_def)
self.ABB_class_type.append(CounterABB(abb_type, trainable))
self.ABB_def = abb_def
self.ABB_type = abb_type
@staticmethod
def compare(text1, text2):
return fuzz.ratio(text1.lower(), text2.lower())
class AbbStorage(object):
def __init__(self):
self.name_to_abb = {}
self.def_to_name = {}
self.name = "abbreviation"
def __repr__(self):
return self.name
def __len__(self):
return len(self.name_to_abb)
def __getitem__(self, item):
if item in self.name_to_abb:
return self.name_to_abb[item]
elif item in self.def_to_name:
return self.def_to_name[item]
else:
raise DatabaseError(f'{item} not in {self.name} storage')
def __contains__(self, item):
return item in self.def_to_name or item in self.name_to_abb
def get(self, k, d=None):
try:
return self[k]
except KeyError:
return d
def get_abbreviation(self, item, d=None):
try:
abb_name = self.def_to_name[item]
return self.name_to_abb[abb_name]
except IndexError:
return d
def get_name(self, item, d=None):
try:
return self.def_to_name[item]
except IndexError:
return d
def keys(self):
return self.name_to_abb.keys()
def values(self):
return self.name_to_abb.values()
def items(self):
return self.name_to_abb.items()
def append(self, abb_name, abb_def, abb_type, trainable=True):
if abb_name in self.name_to_abb:
new_abb = self.name_to_abb[abb_name]
new_abb.update(abb_def, abb_type, trainable)
self.def_to_name[abb_def] = abb_name
else:
new_abb = Abbreviation(abb_name, abb_def, abb_type, trainable)
if not len(new_abb):
return None
self.name_to_abb[abb_name] = new_abb
self.def_to_name[abb_name] = abb_name
self.def_to_name[abb_def] = abb_name
return new_abb
@property
def abb_regex(self):
regex_pattern = _change_to_regex_string(self.def_to_name.keys(), return_as_str=True)
return regex_pattern
def read_abbreviation_from_json(path, trainable=False):
"""
abb_database = read_abbreviation_from_json(file_path)
json file must be list of tuple -> [(ABB_name, ABB_definition, ABB_type), .. ]
:param path: path of json
:param trainable: If True, type of abbreviation can be changed. (False is recommended)
:return: <class MOFDICT.doc.storage.AbbStorage>
"""
path_ = Path(str(path))
if not path_.exists():
raise FileNotFoundError
elif path_.suffix not in ['.json']:
raise TypeError(f'expected json, but {path_.suffix}')
with open(str(path_), 'r') as f:
list_of_abbreviation = json.load(f)
return read_abbreviation_from_list(list_of_abbreviation, trainable)
def read_abbreviation_from_list(list_of_abbreviation, trainable=False):
"""
abb_database = read_abbreviation_from_file([('ASAP', 'as soon as possible', None),
('DKL', 'depolymerised Kraft lignin', 'CM')])
:param list_of_abbreviation: (json) list of tuple (ABB_name, ABB_definition, ABB_type).
ABB_type must be None or 'CM'
:param trainable: If True, type of abbreviation can be changed. (False is recommended)
:return: <class MOFDICT.doc.storage.AbbStorage>
"""
storage = AbbStorage()
for abb_tuple in list_of_abbreviation:
if len(abb_tuple) != 3:
raise TypeError('input must be list of tuple : (ABB_name, ABB_definition, ABB_type)')
abb_name, abb_def, abb_type = abb_tuple
if isinstance(abb_name, str) and isinstance(abb_def, str):
storage.append(abb_name, abb_def, abb_type, trainable)
else:
raise TypeError('input must be list of tuple : (ABB_name, ABB_definition, ABB_type)')
return storage
| UTF-8 | Python | false | false | 8,142 | py | 39 | abbreviation_storage.py | 27 | 0.571604 | 0.568411 | 0 | 268 | 29.380597 | 97 |
suhanree/tweet-hashtag-graph-analysis | 13,932,873,921,556 | e658a1cd5b8e08588551d14701719a9a9f7bcbce | 52e9b103f5e6fca4cb1a39c64828d06ddfe2cdc1 | /src/average_degree.py | d984ecc03f7825c37f5aee9e24cc530713311700 | []
| no_license | https://github.com/suhanree/tweet-hashtag-graph-analysis | 28bf04478792e2345396dcb3512cc0eb0c618316 | b0200e746be0ec0efd1a53fc0436ce07be844383 | refs/heads/master | 2021-01-10T03:08:24.972861 | 2016-04-14T16:46:42 | 2016-04-14T16:46:42 | 55,037,612 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Python codes to run this average degree problem.
import sys
import json
import time
from graph import TimeWindowGraph
def extract_data(json_data):
"""
To extract timestamp (from 'created_at') and a list of hashtags for each tweet.
Input:
json_data (dict): name of the file
Output:
timestamp (int): timestamp for the tweet
hashtags (list of str): a list of hashtags (sorted)
"""
try:
str_time = json_data['created_at']
timestamp = int(time.mktime(time.strptime(str_time, \
"%a %b %d %H:%M:%S +0000 %Y")) + 0.5)
# 0.5 is added to make sure it has the correct integer value.
except KeyError:
# If 'created_at' field does not exist, set timestamp as 0.
#print "KeyError for the key, created_at : timestamp will be 0."
timestamp = 0
try:
hashtags = sorted([data['text'] for data in
json_data['entities']['hashtags']])
except KeyError as key:
# If there is no hashtag-related field, there is no hashtag for this tweet
print "KeyError for the key,", key, ": no hashtag will be used."
hashtags = []
return timestamp, hashtags
def main(input_filename, output_filename):
"""
Main function to run the program
"""
# Size of the window
window_size = 60
# Creating the graph for hashtag object
gr = TimeWindowGraph(window_size=window_size)
time_threshold = gr.current_time - window_size
with open(input_filename, 'r') as f_in: # Opening input file to get tweets
with open(output_filename, 'w') as f_out: # Opening output file
for line in f_in: # For every tweet
json_data = json.loads(line) # dict representing tweet
# Checking for control data (if there is less than 3 fields).
# In those cases, we will skip the data.
if len(json_data) < 3:
continue
# Extract timestamp (int) and a list of hashtags (str, case
# sensitive)
(timestamp, hashtags) = extract_data(json_data)
# Check the timestamp first.
if timestamp <= time_threshold: # too old for our graph.
continue # do nothing for this tweet.
elif timestamp > gr.current_time: # becomes the most recent tweet.
# Set current_time for the graph
# (it will remove old links older than threshold also)
gr.set_current_time(timestamp)
# New links (for all possible pairs of hashtags) are added here.
num_hashtags = len(hashtags)
for i in range(num_hashtags):
for j in range(i+1, num_hashtags):
# First, check for duplicate hashtags
if hashtags[i] == hashtags[j]:
continue
# Second, try to add both nodes (this method will do nothing
# if the given node already exists)
gr.add_node(hashtags[i])
gr.add_node(hashtags[j])
# Third, (1) try to find if the link already exists;
# and (2) if so, what the timestamp of that link is.
# Here timestamp (epoch time) is a non-negative
# integer, so -1 indicates there is no link.
timestamp_link = gr.check_link(hashtags[i], hashtags[j])
if timestamp_link < 0: # No link exists.
gr.add_link(hashtags[i], hashtags[j], timestamp)
elif timestamp_link < timestamp: # old link exists.
gr.update_link(hashtags[i], hashtags[j], timestamp)
# Now writes the degree information to the output file
f_out.write("%.2f\n"% gr.average_degree())
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: python src/average_degree.py", \
"./tweet_input/tweets.txt ./tweet_output/output.txt"
sys.exit()
main(sys.argv[1], sys.argv[2])
| UTF-8 | Python | false | false | 4,267 | py | 6 | average_degree.py | 3 | 0.544411 | 0.538786 | 0 | 103 | 40.427184 | 84 |
mumichae/drop | 10,823,317,590,062 | 880df7376c3a6a6a434f8f1b7ae7c7f58ba507ed | b9d94a109d2f42fd2f9d02eeb387615e8af963ce | /tests/pipeline/test_AS.py | f26dcea414730bc6ee4a318771ed17c9774907be | [
"MIT"
]
| permissive | https://github.com/mumichae/drop | 2701c642691140a44a36ca63ee8b1e2ec10bbf42 | 394f0e28e8b49a9be55ab0e46c4b75552babd4a0 | refs/heads/master | 2021-07-13T23:11:37.285898 | 2020-10-21T06:59:44 | 2020-10-21T06:59:44 | 230,656,194 | 1 | 1 | MIT | true | 2020-09-30T19:06:34 | 2019-12-28T19:34:23 | 2020-09-27T06:40:44 | 2020-09-30T19:06:33 | 100,574 | 1 | 1 | 0 | R | false | false | from tests.common import *
class Test_AS_Pipeline:
@pytest.fixture(scope="class")
def pipeline_run(self, demo_dir):
LOGGER.info("run aberrant splicing pipeline")
pipeline_run = run(["snakemake", "aberrantSplicing", "-j", CORES], demo_dir)
assert "Finished job 0." in pipeline_run.stderr
return pipeline_run
@pytest.mark.usefixtures("pipeline_run")
def test_counts(self, demo_dir):
cnt_file = "Output/processed_data/aberrant_splicing/datasets/savedObjects/raw-fraser/fds-object.RDS"
r_cmd = """
library(FRASER)
fds <- loadFraserDataSet(file="{}")
print(fds)
""".format(cnt_file)
r = runR(r_cmd, demo_dir)
assert "Number of samples: 10" in r.stdout
assert "Number of junctions: 81" in r.stdout
assert "Number of splice sites: 9" in r.stdout
@pytest.mark.usefixtures("pipeline_run")
def test_results(self, demo_dir):
results_dir = "Output/processed_data/aberrant_splicing/results"
r = run(f"wc -l {results_dir}/fraser_results_per_junction.tsv", demo_dir)
assert "88 " in r.stdout
r = run(f"wc -l {results_dir}/fraser_results.tsv", demo_dir)
assert "1 " in r.stdout
| UTF-8 | Python | false | false | 1,268 | py | 49 | test_AS.py | 41 | 0.618297 | 0.611199 | 0 | 32 | 38.625 | 108 |
araghukas/nwlattice | 11,441,792,885,222 | 4c5e413ae85045217032767d70db02cccd514e6b | 4ed0c60aa1df2877df9c2ae3a829fea65f8706e8 | /nwlattice/sizes.py | 4e0f0a8c9002570abff0e4b5f30264357e1267c3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | https://github.com/araghukas/nwlattice | 2c5554153b32061c9090b99f8c3a24d7709140da | 443d0a000c2b68cd99070245eede032e912c1a40 | refs/heads/master | 2023-08-22T05:49:43.920452 | 2021-10-25T05:21:56 | 2021-10-25T05:21:56 | 314,936,613 | 1 | 0 | null | false | 2020-11-22T02:33:23 | 2020-11-22T01:26:48 | 2020-11-22T02:26:56 | 2020-11-22T02:33:22 | 6,868 | 0 | 0 | 0 | Python | false | false | SIZE_PROPERTIES = [
"scale",
"width",
"length",
"unit_dz",
"period",
"fraction",
"area",
"n_xy",
"nz",
"q",
"indexer"
]
class NanowireSizeCompound:
"""
A size container that combines one or more NanowireSize objects
"""
def __init__(self, **kwargs):
for k in kwargs:
if k in SIZE_PROPERTIES:
self.__setattr__(k, kwargs[k])
def __str__(self):
s = "<NanowireSize instance>\n"
s_args = []
props = self.props()
for prop, val in props.items():
try:
if int(val) == val:
s_args.append("<\t{:<10}: {:<15,d}>".format(prop, int(val)))
else:
s_args.append("<\t{:<10}: {:<15,.2f}>".format(prop, val))
except TypeError:
s_args.append("<\t{:<10}: {}>".format(prop, val))
s += "\n".join(s_args)
return s
def props(self):
p_dict = {}
for prop in SIZE_PROPERTIES:
if hasattr(self, prop):
p_dict[prop] = self.__getattribute__(prop)
return p_dict
class PlaneSize(object):
"""
A size information handler for planar lattices
"""
def __init__(self, scale, n_xy=None, width=None):
"""
:param scale: lattice scale (ex: lattice constant)
:param n_xy: structure specific integer thickness index indicating width
:param width: width in sase units as `a0`
"""
if not (width or n_xy):
raise ValueError("must specify either `n_xy` or `width`")
if scale <= 0:
raise ValueError("`scale` must be a positive number")
self._scale = scale
if n_xy is not None and n_xy <= 0:
raise ValueError("`n_xy` must be a positive integer")
self._n_xy = n_xy
if width is not None and width <= 0:
raise ValueError("`width` must be a positive number")
self._width = width
# size calculator functions
self._n_xy_func = None
self._width_func = None
self._area_func = None
def __str__(self):
return (self.__repr__() + "\n"
"scale: {:<20}\n"
"n_xy : {:<20}\n"
"width: {:<20}\n"
"area : {:<20}"
).format(self.scale, self.n_xy, self.width, self.area)
@property
def n_xy(self):
if self._n_xy is None:
self._n_xy = self._n_xy_func(self.scale, self._width)
return self._n_xy
@property
def width(self):
return self._width_func(self.scale, self.n_xy)
@property
def scale(self):
return self._scale
@property
def area(self):
return self._area_func(self.scale, self.n_xy)
class NanowireSize(PlaneSize):
"""
A size information handler for nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None,
width=None, length=None):
"""
:param scale: lattice scale (ex: lattice constant)
:param nz: number of planes stacked along z-axis
:param n_xy: structure specific integer thickness index indicating width
:param length: length in same units as `a0`
:param width: width in sase units as `a0`
"""
super().__init__(scale, n_xy, width)
if not (nz or length):
raise ValueError("must specify either `nz` or `length`")
self._unit_dz = unit_dz
self._nz = nz
self._length = length
# size calculator functions
self._nz_func = None
self._length_func = None
def __str__(self):
s = "<NanowireSize instance>\n"
s_args = []
props = self.props()
for prop, val in props.items():
try:
if int(val) == val:
s_args.append("<\t{:<10}: {:<15,d}>".format(prop, val))
else:
s_args.append("<\t{:<10}: {:<15,.2f}>".format(prop, val))
except TypeError:
s_args.append("<\t{:<10}: {}>".format(prop, val))
s += "\n".join(s_args)
return s
def props(self):
p_dict = {}
for prop in SIZE_PROPERTIES:
if hasattr(self, prop):
p_dict[prop] = self.__getattribute__(prop)
return p_dict
@property
def area(self):
return self._area_func()
@property
def unit_dz(self):
return self._unit_dz
@property
def nz(self):
if self._nz is None:
self._nz = self._nz_func(self.scale, self._length, self.unit_dz)
return self._nz
@property
def length(self):
return self._length_func(self.scale, self.nz, self.unit_dz)
def fix_nz(self, nz):
self._nz = nz
class NanowireSizeRandom(NanowireSize):
def __init__(self, scale, unit_dz, fraction, n_xy=None, nz=None,
width=None, length=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
self._fraction = fraction
@property
def fraction(self):
return self._fraction
class NanowireSizePeriodic(NanowireSize):
"""
A size information handler for periodic nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None, q=None,
width=None, length=None, period=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
if q is None and period is None:
raise ValueError("must specify either `q` or `period`")
elif q == 0:
raise ValueError("`q` set to zero")
elif period == 0:
raise ValueError("`period` set to zero")
self._q = q
self._q_func = None
self._period = period
self._period_func = None
@property
def q(self):
if self._q is None:
self._q = self._q_func(self.scale, self._period)
return self._q
@property
def period(self):
return self._period_func(self.scale, self.q)
class NanowireSizeArbitrary(NanowireSize):
"""
A size information handler for arbitrary nanowire lattices
"""
def __init__(self, scale, unit_dz, n_xy=None, nz=None,
width=None, length=None):
super().__init__(scale, unit_dz, n_xy, nz, width, length)
self._index = None
self._indexer = None
@property
def index(self):
if self._index is None:
new_nz, self._index = self._indexer(self.nz)
if new_nz: # option to bypass forcing nz change
self._nz = new_nz
return self._index
@property
def indexer(self):
return self._indexer
def invert_index(self):
self._index = [self.nz - idx for idx in self._index][::-1]
| UTF-8 | Python | false | false | 6,910 | py | 16 | sizes.py | 14 | 0.52026 | 0.514616 | 0 | 245 | 27.204082 | 80 |
LaurentMT/pybargain_protocol | 13,314,398,652,281 | 232583caa9c483c4ecff9354638e7e6f60d97c43 | 15b7a21cabcde179622a7fdbdbe2f5a8c9e9bf43 | /pybargain_protocol/helpers/build_check_tx.py | 7616715fe7866d450bd4d52626a5fee353b50d0c | [
"MIT"
]
| permissive | https://github.com/LaurentMT/pybargain_protocol | da1ac5f1cf70260b3a91f96aa70610de7c751ba8 | 3b4c6040ec3562ce6921f917c97a9931d5c6e5de | refs/heads/master | 2020-05-17T18:00:35.674117 | 2015-04-26T03:54:43 | 2015-04-26T03:54:43 | 23,262,899 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
'''
Version: 0.0.1
Python library for the bargaining protocol
'''
import math
from bitcoin.transaction import *
from pybargain_protocol.constants import MAINNET, MAGIC_BYTES_TESTNET, TESTNET, MAGIC_BYTES_MAINNET
# BITCOIN CONSTANTS
MIN_TX_SIZE = 100
MAX_BLOCK_SIZE = 1048576
MAX_MONEY_RANGE = 2100000000000000
SATOSHIS_TO_BITCOIN = 100000000
def build_tx_with_change(inputs,
outputs,
amount = 0,
fees = 0,
change_addr = ''):
'''
Builds a transaction with an additional change output if necessary
if amount + fees < sum(inputs['amount']) then adds an output with:
* output['amount'] = sum(inputs['amount']) - amount - fees
* output['script'] = script(change_addr)
Parameters:
inputs = list of inputs ([{'output': u'txhash:vindex', 'value': ..., 'privkey': ...}])
outputs = list of outputs ([{'amount': ..., 'script': ...}])
amount = amount proposed by the buyer
fees = fees for this transaction
change_addr = change address used if necessary
'''
outputs_cp = copy.deepcopy(outputs)
# Computes the sum of inputs
sum_inp = sum([i['value'] for i in inputs])
# Creates a change output if necessary (and if we have a change address)
if (amount + fees < sum_inp) and change_addr:
change = sum_inp - amount - fees
script = address_to_script(change_addr)
outputs_cp.append({'amount': change, 'script': script})
# Builds the tx
tx = {'locktime': 0, 'version': 1, 'ins': [], 'outs': []}
for i in inputs:
i = i['output']
tx['ins'].append({'outpoint': {'hash': i[:64], 'index': int(i[65:])},
'script': '',
'sequence': 4294967295})
for o in outputs_cp:
tx['outs'].append({'script': o['script'], 'value': o['amount']})
tx = serialize(tx)
# Signs the tx
for i in range(len(inputs)):
tx = sign(tx, i, inputs[i]['privkey'])
return tx
def check_tx(tx):
'''
Checks validity of a transaction
according to some of the rules defined in https://en.bitcoin.it/wiki/Protocol_rules#.22tx.22_messages
Parameters:
tx = transaction to be checked
'''
if (not tx) or (tx is None): return False
# Deserializes the tx
if type(tx) == dict:
txjson = tx
txser = serialize(tx)
else:
txjson = deserialize(tx)
txser = tx
# 2. Make sure neither in or out lists are empty
if txjson['ins'] is None or len(txjson['ins']) == 0: return False
if txjson['outs'] is None or len(txjson['outs']) == 0: return False
# 3. Size in bytes < MAX_BLOCK_SIZE
if len(txser) >= MAX_BLOCK_SIZE: return False
# 4. Each output value, as well as the total, must be in legal money range
sum_outs = 0
for o in txjson['outs']:
if (o['value'] < 0) or (o['value'] > MAX_MONEY_RANGE): return False
else: sum_outs += o['value']
if sum_outs > MAX_MONEY_RANGE: return False
# 5. Make sure none of the inputs have hash=0, n=-1 (coinbase transactions)
for i in txjson['ins']:
if not i['outpoint']['hash'] and i['outpoint']['index'] == -1:
return False
# 6. Check that nLockTime <= INT_MAX[1], size in bytes >= 100[2]
if txjson['locktime'] >= math.pow(2,32): return False
if len(txser) < MIN_TX_SIZE: return False
return True
def check_tx_signatures(tx, network = MAINNET):
'''
Checks validity of tx signatures
Supports P2PH and P2SH (n-of-m signatures)
Returns True if valid, False otherwise
Parameters:
tx = transaction
network = network used
'''
magicbytes = MAGIC_BYTES_TESTNET if network == TESTNET else MAGIC_BYTES_MAINNET
# Gets the tx in serialized/deserialized forms
if type(tx) == dict:
txjson = tx
txser = serialize(tx)
else:
txjson = deserialize(tx)
txser = tx
# Checks each tx input
for i in range(len(txjson['ins'])):
try:
# Deserializes the input scriptsig
scr_sig = deserialize_script(txjson['ins'][i]['script'])
if len(scr_sig) == 2:
# P2PH script
# Computes script pubkey associated to input
scr_pubkey = address_to_script(pubtoaddr(scr_sig[1], magicbytes))
# Verifies input signature
if not verify_tx_input(txser, i, scr_pubkey, scr_sig[0], scr_sig[1]): return False
elif len(scr_sig) >= 3:
# P2SH script
# Extract signatures
# (first item is 0; subsequent are sigs; filter out empty placeholder sigs)
sigs = [s for s in scr_sig[1:-1] if s]
# Extracts scriptpubkey (last item)
scr_pubkey_hex = scr_sig[-1]
scr_pubkey = deserialize_script(scr_pubkey_hex)
# Extracts n (required number of signatures)
n = scr_pubkey[0]
# Extracts pubkeys
# (first item is n, -2 is m, -1 is multisig op; we get everything else (the pubkeys))
pubkeys = scr_pubkey[1:-2]
# Checks signatures and number of valid signatures
nbsig = 0
for pubkey in pubkeys:
for sig in sigs:
if verify_tx_input(txser, i, scr_pubkey_hex, sig, pubkey):
nbsig += 1
break
if nbsig < n: return False
else:
# Not implemented or invalid scriptsig
return False
except:
return False
return True
def check_inputs_unicity(txs):
'''
Checks that inputs are unique among the given transactions
Parameters:
txs = list of transactions
'''
txos = set()
for tx in txs:
txjson = tx if (type(tx) == dict) else deserialize(tx)
for i in range(len(txjson['ins'])):
inp_hash = txjson['ins'][i]['outpoint']['hash']
inp_idx = txjson['ins'][i]['outpoint']['index']
txo = inp_hash + ':' + str(inp_idx)
if txo in txos: return False
else: txos.add(txo)
return True
def check_outputs_exist(txs, outputs):
'''
Checks occurences of a list of outputs among a list of transactions
Returns True if all outputs appear in a transaction of the given list, False otherwise
Parameters:
txs = list of transactions
outputs = list of outputs [{'amount': ..., 'script': ...}]
'''
outp_set = set([o['script'] + ':' + str(o['amount']) for o in outputs])
for tx in txs:
txjson = tx if (type(tx) == dict) else deserialize(tx)
for o in txjson['outs']:
outp = o['script'] + ':' + str(o['value'])
if outp in outp_set:
outp_set.remove(outp)
return True if len(outp_set) == 0 else False
def scriptsig_to_addr(scr_sig, network = MAINNET):
'''
Returns the address corresponding to a given scriptsig
Parameters:
scr_sig = script sig
network = network used
'''
magicbytes = MAGIC_BYTES_TESTNET if network == TESTNET else MAGIC_BYTES_MAINNET
if not (type(scr_sig) == dict):
scr_sig = deserialize_script(scr_sig)
if len(scr_sig) == 2:
# P2PH script
# Computes script pubkey associated to input
return pubtoaddr(scr_sig[1], magicbytes)
elif len(scr_sig) >= 3:
scr_pubkey_hex = scr_sig[-1]
return p2sh_scriptaddr(scr_pubkey_hex, 196)
else:
return ''
| UTF-8 | Python | false | false | 7,880 | py | 24 | build_check_tx.py | 23 | 0.554949 | 0.54099 | 0 | 217 | 35.271889 | 105 |
natefoo/galaxy-beta2 | 8,830,452,769,066 | 8deb358c38f12785f013154ab9cbbc9ac9438063 | 7aafdda6794652ddb86ee777950b0a717b673c4b | /lib/galaxy/model/migrate/versions/0069_rename_sequencer_form_type.py | 337b3cd17426cc7372dd3bbbf30f5c40ab294ee1 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/natefoo/galaxy-beta2 | 1cd15219abbf7418301a74820a0c7b5257a3fc78 | 3af3bf5742fbf0f7d301a2a8c548a3e153544448 | refs/heads/dev | 2021-07-07T10:32:42.823976 | 2015-02-19T22:53:12 | 2015-02-21T18:29:58 | 31,084,940 | 0 | 3 | NOASSERTION | false | 2020-10-01T01:43:16 | 2015-02-20T20:58:32 | 2015-02-23T15:43:23 | 2015-02-21T18:32:44 | 56,633 | 0 | 2 | 1 | Python | false | false | """
Migration script to rename the sequencer information form type to external service information form
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
current_form_type = 'Sequencer Information Form'
new_form_type = "External Service Information Form"
cmd = "update form_definition set type='%s' where type='%s'" % ( new_form_type, current_form_type )
migrate_engine.execute( cmd )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
new_form_type = 'Sequencer Information Form'
current_form_type = "External Service Information Form"
cmd = "update form_definition set type='%s' where type='%s'" % ( new_form_type, current_form_type )
migrate_engine.execute( cmd )
| UTF-8 | Python | false | false | 1,102 | py | 296 | 0069_rename_sequencer_form_type.py | 198 | 0.720508 | 0.720508 | 0 | 38 | 28 | 103 |
markafarrell/ran-load-generator | 1,649,267,457,089 | aa9eb3df083ebc33f222cca8f49f38b7274bebea | 32cf8120ccea8eb36bdb724747ddb8ceff871cf1 | /session/sessionManagement.py | 47bb1251d709c414b3ba85ce4ca8bf3cbf127540 | []
| no_license | https://github.com/markafarrell/ran-load-generator | 7a5aae824aa7d114ee814816f4f7399bdf5e34d1 | 4d48903ead094a9ff1b263c247ee118dafb2768d | refs/heads/master | 2021-01-12T01:23:13.461845 | 2018-04-30T05:21:38 | 2018-04-30T05:21:38 | 78,379,265 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/bin/python
import sys
import getopt
import time
from subprocess import Popen, PIPE, check_output, STDOUT
import json
import random
import os
import signal
from datetime import datetime
import sqlite3
iperf_process=None
filteredcsv_process=None
csv2sqlite_process=None
session = -1
enviornment = ""
with open('config/servers.conf') as data_file:
config = json.load(data_file)
def get_cursor():
conn = sqlite3.connect(config['database_path'])
c = conn.cursor()
return (c, conn)
def getEnvironments():
return config['servers'].keys()
def kill_test():
print "Killing Test"
if csv2sqlite_process != None:
print "Killing csv2sqlite"
csv2sqlite_process.terminate()
if filteredcsv_process != None:
print "Killing csv2filteredcsv"
filteredcsv_process.terminate()
if iperf_process != None:
print "Killing iperf"
iperf_process.terminate()
def runiPerfRemote(direction, bandwidth, duration, interface, environment, datagram_size, remote_port, local_port, sql, tos=False):
if direction == 'b':
test_flag = "-d"
else:
test_flag = ""
if os.name == "posix":
ssh_path = "ssh"
else:
ssh_path = "ssh\ssh"
if(direction == 'd' or direction == 'b'):
iperf_command = "iperf-2.0.5 -c $SSH_CLIENT -u -i1 -fm -t" + str(duration) + " -b " + str(bandwidth) + "M" + " -l" + str(datagram_size) + " -p" + str(local_port) + " " + str(test_flag) + " -L" + str(remote_port)
if tos != False:
iperf_command += " -S " + str(tos)
iperf_command += " -yC > iperf_logs/" + str(session) + " & echo $!"
ssh_cmd = [ ssh_path, "-q", "-o", "StrictHostKeyChecking=no", "-b", interface, "-o", "BindAddress=" + interface, environment['username'] + "@" + environment['hostname'], "-p", str(environment['ssh_port']), "-i", environment['ssh_key'], iperf_command ]
print ' '.join(ssh_cmd)
remote_pid = check_output(ssh_cmd)
#print remote_pid
elif(direction == 'u'):
iperf_command = "iperf-2.0.5 -s -u -i1 -fm -t" + str(duration) + " -b " + str(bandwidth) + "M" + " -l" + str(datagram_size) + " -p" + str(remote_port) + " " + str(test_flag)
if tos != False:
iperf_command += " -S " + str(tos)
iperf_command += " -yC > iperf_logs/" + str(session) + " & echo $!"
ssh_cmd = [ ssh_path, "-q", "-o", "StrictHostKeyChecking=no", "-b", interface, "-o", "BindAddress=" + interface, environment['username'] + "@" + environment['hostname'], "-p", str(environment['ssh_port']), "-i", environment['ssh_key'], iperf_command ]
remote_pid = check_output(ssh_cmd)
#print remote_pid
else:
#TODO: handle incorrect direction
pass
return remote_pid
def updateLocalPID(session, pid):
(c, conn) = get_cursor()
c.execute('''UPDATE SESSIONS SET LOCAL_PID = ? WHERE SESSION_ID = ?''', [pid, session])
conn.commit()
def insertSessionRecord(session, environment, remote_ip, remote_port, local_ip, local_port, bandwidth, direction, start_time, duration, local_pid, remote_pid):
(c, conn) = get_cursor()
c.execute('''INSERT INTO SESSIONS (SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)''', (session, remote_ip, remote_port, local_ip, local_port, bandwidth, direction, start_time, duration, local_pid, remote_pid, environment, 0))
conn.commit()
def runiPerfLocal(direction, bandwidth, duration, interface, environment, datagram_size, remote_port, local_port, sql, session, tos=False):
global iperf_process
global filteredcsv_process
global csv2sqlite_process
if direction == 'b':
test_flag = "-d"
else:
test_flag = ""
if os.name == "posix":
iperf_path = "iperf"
else:
iperf_path = "iperf\iperf"
if(direction == 'd' or direction == 'b'):
# bufsize=1 means line buffered
command_array =[iperf_path, "-s", "-u", "-i", "1", "-l", str(datagram_size), "-p", str(local_port), "-y", "C", "-f", "m"]
if tos != False:
command_array += ["-S", str(tos)]
print ' '.join(command_array)
iperf_process = Popen(command_array, stdout=PIPE, bufsize=1)
filteredcsv_process = Popen(["python", "-u", "../csv2filteredcsv/csv2filteredcsv.py", "-d"], stdin=iperf_process.stdout, stdout=PIPE, bufsize=1)
iperf_process.stdout.close()
elif(direction == 'u'):
command_array = [iperf_path, "-c", environment['hostname'], "-u", "-i", "1", "-l", str(datagram_size), "-p", str(remote_port), "-L", str(local_port), "-y", "C", "-t", str(duration), "-f", "m", "-b", str(bandwidth) + "M", "-L", str(local_port), test_flag]
if tos != False:
command_array += ["-S", str(tos)]
iperf_process = Popen(command_array, stdout=PIPE, bufsize=1)
filteredcsv_process = Popen(["python", "-u", "../csv2filteredcsv/csv2filteredcsv.py", "-d"], stdin=iperf_process.stdout, stdout=PIPE, bufsize=1)
iperf_process.stdout.close()
else:
#TODO: handle incorrect direction
pass
updateLocalPID(session, iperf_process.pid)
if sql:
csv2sqlite_process = Popen(["python", "-u", "../csv2sqlite/csv2sqlite.py", "-s", str(session), "-o", config['database_path']], stdin=filteredcsv_process.stdout, stdout=PIPE, bufsize=1)
filteredcsv_process.stdout.close()
while csv2sqlite_process.poll() is None:
try:
line = csv2sqlite_process.stdout.readline()
print line,
except KeyboardInterrupt:
kill_test()
except:
kill_test()
else:
while filteredcsv_process.poll() is None:
try:
line = filteredcsv_process.stdout.readline()
print line,
except KeyboardInterrupt:
kill_test()
except:
kill_test()
def killRemoteSession(session):
if os.name == "posix":
ssh_path = "ssh"
else:
ssh_path = "ssh\ssh"
try:
environment = config['servers'][session['ENVIRONMENT']]
kill_cmd = "kill -9 " + str(session['REMOTE_PID'])
ssh_cmd = [ ssh_path, "-q", "-o", "StrictHostKeyChecking=no", "-b", session['LOCAL_IP'], "-o", "BindAddress=" + session['LOCAL_IP'], environment['username'] + "@" + environment['hostname'], "-p", str(environment['ssh_port']), "-i", environment['ssh_key'], kill_cmd ]
res = check_output(ssh_cmd)
except:
res = True
return res
def killLocalSession(session):
try:
os.kill(session['LOCAL_PID'], signal.SIGKILL)
return 1
except:
return 0
def killSession(session):
remote_status = killRemoteSession(session)
local_status = killLocalSession(session)
completeSession(session)
def completeSession(session):
(c, conn) = get_cursor()
c.execute('''UPDATE SESSIONS SET COMPLETE = 1 WHERE SESSION_ID = ?''', [session['SESSION_ID']])
conn.commit()
def getSession(session):
(c, conn) = get_cursor()
c.execute('''SELECT SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT FROM
SESSIONS
WHERE SESSION_ID = ?''', [session])
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
return sessions[0]
def getSessions():
(c, conn) = get_cursor()
c.execute('''SELECT SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE FROM
SESSIONS''')
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
return sessions
def getSession(session_id):
(c, conn) = get_cursor()
c.execute('''SELECT SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE FROM
SESSIONS WHERE SESSION_ID = ?''', [session_id])
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
if len(sessions) > 0:
return sessions[0]
else:
return []
def getSessionsComplete():
(c, conn) = get_cursor()
c.execute('''SELECT SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE FROM
SESSIONS WHERE COMPLETE = 1''')
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
return sessions
def getSessionsActive():
(c, conn) = get_cursor()
c.execute('''SELECT SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE FROM
SESSIONS WHERE COMPLETE != 1 AND julianday('now','localtime')<julianday(start_time)+duration/(24.0*60*60)''')
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
return sessions
def getSessionsAfter(timestamp):
(c, conn) = get_cursor()
c.execute('''SELECT SESSIONS.SESSION_ID, MAX(TIMESTAMP) AS TIMESTAMP , REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION, LOCAL_PID, REMOTE_PID, ENVIRONMENT, COMPLETE FROM
SESSION_DATA
INNER JOIN
SESSIONS
ON
SESSION_DATA.SESSION_ID = SESSIONS.SESSION_ID
WHERE TIMESTAMP > ?
GROUP BY SESSIONS.SESSION_ID, REMOTE_IP, REMOTE_PORT, LOCAL_IP, LOCAL_PORT, BANDWIDTH, DIRECTION, START_TIME, DURATION GROUP BY SESSION_ID''',[d])
sessions = []
for row in c:
session = {}
for i in range(0,len(row)):
# Construct a dictionary using the column headers and results
session[c.description[i][0]] = row[i]
sessions.append(session)
def createSession(session, direction, bandwidth, duration, interface, environment, datagram_size, remote_port, local_port, tos):
command_array = ["python", "-u", "startSession.py", "-d", direction, "-b", str(bandwidth), "-t", str(duration), "-i", interface, "-e", environment, "-s", str(session), "-o", "sql"]
if tos != False:
command_array += ["-T", str(tos)]
start_session_process = Popen(command_array)
| UTF-8 | Python | false | false | 10,297 | py | 41 | sessionManagement.py | 19 | 0.665534 | 0.659415 | 0 | 309 | 32.323625 | 268 |
vktemel/CarND-capstone | 8,890,582,348,108 | e8f23658fb2ae389644edd7e611f04fa24e03ffb | 15d9d0b7a5c2011759a8c6192b57038dd89d686a | /ros/src/twist_controller/twist_controller.py | 2dbf7f4d72edb566463529cf2bb92d493c3a23ec | [
"MIT"
]
| permissive | https://github.com/vktemel/CarND-capstone | fd972585d984c9bb55b35c42f930a76d77d57dc9 | d44fb76b059e8fd5700bcf8fd97d390810232d13 | refs/heads/master | 2023-03-14T20:33:36.403078 | 2021-03-15T03:42:12 | 2021-03-15T03:42:12 | 335,111,813 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
from pid import PID
from yaw_controller import YawController
import rospy
class Controller(object):
def __init__(self, wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle, decel_limit, wheel_radius, vehicle_mass):
self.throttle = 0
self.pid_speed = PID(0.2, 0.001, 0.1)
self.yawCtrl = YawController(wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle)
self.timestamp = rospy.get_time()
self.decel_limit = decel_limit
self.wheel_radius = wheel_radius
self.vehicle_mass = vehicle_mass
def reset(self):
self.pid_speed.reset()
def control(self, target_linear_vel, target_angular_vel, current_linear_vel, current_angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
steer = self.yawCtrl.get_steering(target_linear_vel.x, target_angular_vel.z, current_linear_vel.x)
# rospy.logwarn("target vel x: %s", target_linear_vel.x)
vel_err = target_linear_vel.x - current_linear_vel.x
current_timestamp = rospy.get_time()
dt = current_timestamp - self.timestamp
self.timestamp = current_timestamp
throttle = self.pid_speed.step(vel_err, dt)
if throttle > 1.0:
throttle = 1.0
elif throttle < 0:
throttle = 0
if((current_linear_vel.x < 0.1) & (target_linear_vel.x == 0.0)):
throttle = 0
brake = 700
elif((vel_err < 0.0) & (throttle < 0.02)):
throttle = 0
decel = max(vel_err, self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
else:
brake = 0.0
return throttle, brake, steer
| UTF-8 | Python | false | false | 1,856 | py | 3 | twist_controller.py | 2 | 0.592672 | 0.570582 | 0 | 59 | 30.440678 | 132 |
GeorgeZ1917/Python | 3,822,520,920,905 | 437d0a589583b6c822a7750f29f681e6fe0e3200 | 1f58673da4698ac6458a593550c82f8754d11792 | /MainDictionary.py | 166e9428e4765e5ea904dad7964f6e31643dce09 | []
| no_license | https://github.com/GeorgeZ1917/Python | c2904e757e58c61b312e994345cbfb9263e615ee | 391c3a27f3950bd1f1661a39356d90f097905293 | refs/heads/main | 2023-03-26T15:55:27.065085 | 2021-03-25T14:52:55 | 2021-03-25T14:52:55 | 333,175,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #MainDictionary.py
from Dictionary import LinkedList, BinarySearchTree, insertList, insertTree, deleteList, middleNode
from Dictionary import printList, printTree, searchList, searchTree, deleteLeaf, identicalTrees, convert, root
from random import randint
from time import time
insertList( 30, "Hello hello" )
insertList( 14, "My ace man" )
insertList( 70, "My mellow" )
insertList( 31, "John Wayne" )
insertList( 47, "Ain't got" )
insertList( 17, "Anything" )
#start = time()
#data = 0
#while data < 10 ** 3 :
# insertList ( randint ( 0, 10 ** 3 ), str ( randint ( 0, 10 ** 3 ) ** 2 ) )
# data += 1
#printList()
#print ( "Middle node key:", middleNode().key )
#end = time()
#print ( end - start )
print ( "There are", LinkedList.nodesCount, "nodes.\n\n\n" )
insertTree ( 100, "Die Erfindung des Rades" )
insertTree ( 150, "Lösch das Internet" )
insertTree ( 120, "Achterbahn" )
insertTree ( 96, " Wenn ich gross bin" )
insertTree ( 145, "Wie ich" )
insertTree ( 200, "Cybercrime" )
insertTree ( 80, " Lila Wolken" )
insertTree ( 99, " Hätte hätte Fahrradkette" )
insertTree ( 130, "El Presidente" )
insertTree ( 110, "Wenn jeder an sich denkt" )
#start = time()
data = 0
while data < 10 ** 2 :
insertTree ( randint ( 0, 10 ** 2 ), str ( randint ( 0, 10 ** 2 ) ** 2 ) )
data += 1
deleteLeaf ( root, 110 )
#printTree ( root )
#end = time()
#print ( end - start )
convert()
printTree ( root )
print ( "There are", BinarySearchTree.leavesCount, "leaves." )
| UTF-8 | Python | false | false | 1,521 | py | 13 | MainDictionary.py | 10 | 0.64361 | 0.597497 | 0 | 46 | 31 | 110 |
shanqing-cai/MRI_analysis | 8,589,938,851 | 9ec8f12752d4d1343a0c0e0e1937e8237149c858 | d16b5cafcfd18ceb6a24f5c30b247fb2f31509a9 | /aparc12_surface_stats.py | 880f00e67c01938aba9cd6a614f51a6920722b66 | [
"BSD-2-Clause"
]
| permissive | https://github.com/shanqing-cai/MRI_analysis | 669eb77bb16efd506cf06f2bf40abecd80b2b2d6 | 39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9 | refs/heads/master | 2021-01-10T22:11:51.175311 | 2014-02-19T04:32:57 | 2014-02-19T04:32:57 | 7,264,919 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import os
import sys
import glob
import argparse
import tempfile
import numpy as np
import matplotlib.pyplot as plt
import pickle
import scipy.stats as stats
from copy import deepcopy
from subprocess import Popen, PIPE
from get_qdec_info import get_qdec_info
from fs_load_stats import fs_load_stats
from aparc12 import *
from scai_utils import *
from scai_stats import cohens_d
BASE_DIR = "/users/cais/STUT/analysis/aparc12_tracts"
DATA_DIR = "/users/cais/STUT/DATA"
FSDATA_DIR = "/users/cais/STUT/FSDATA"
CTAB = "/users/cais/STUT/slaparc_550.ctab"
SEGSTATS_SUM_WC = "aparc12_wm%dmm.segstats.txt"
P_THRESH_UNC = 0.05
hemis = ["lh", "rh"]
grps = ["PFS", "PWS"]
grpColors = {"PFS": [0, 0, 0], "PWS": [1, 0, 0]}
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Analyze aparc12 surface annotation: Surface area and average thickness")
ap.add_argument("-r", dest="bReload", action="store_true", \
help="Reload data (time-consuming)")
# ap.add_argument("hemi", help="Hemisphere {lh, rh}")
# if len(sys.argv) == 1:
# ap.print_help()
# sys.exit(0)
# === Args input arguments === #
args = ap.parse_args()
bReload = args.bReload
# hemi = args.hemi
# assert(hemis.count(hemi) == 1)
# === Determine the subject list and their group memberships === #
check_dir(BASE_DIR)
ds = glob.glob(os.path.join(BASE_DIR, "S??"))
ds.sort()
sIDs = []
isPWS = []
SSI4 = []
for (i0, t_path) in enumerate(ds):
(t_path_0, t_sID) = os.path.split(t_path)
sIDs.append(t_sID)
SSI4.append(get_qdec_info(t_sID, "SSI"))
if get_qdec_info(t_sID, "diagnosis") == "PWS":
isPWS.append(1)
else:
isPWS.append(0)
isPWS = np.array(isPWS)
SSI4 = np.array(SSI4)
assert(len(sIDs) > 0)
assert(len(sIDs) == len(isPWS))
# === Get the list of cortical ROIs (Speech network only) ===
rois0 = get_aparc12_cort_rois(bSpeech=True)
check_file(CTAB)
(ctab_roi_nums, ctab_roi_names) = read_ctab(CTAB)
# Duplex into both hemispheres
roi_names = []
roi_nums = []
for (i0, hemi) in enumerate(hemis):
for (i1, roi) in enumerate(rois0):
t_roi_name = "%s_%s" % (hemi, roi)
assert(ctab_roi_names.count(t_roi_name) == 1)
idx = ctab_roi_names.index(t_roi_name)
roi_names.append(t_roi_name)
roi_nums.append(ctab_roi_nums[idx])
assert(len(roi_names) == len(roi_nums))
# === Load data: Loop through all subjects === #
cachePklFN = "aparc12_surface_stats_dset.pkl"
nROIs = len(roi_names)
ns = len(sIDs)
if bReload:
print("INFO: bReload = True: Reloading data (time-consuming)\n")
labArea = np.zeros([ns, nROIs])
labAvgThick = np.zeros([ns, nROIs])
# Label area normalized by hemisphere surface area
labAreaNorm = np.zeros([ns, nROIs])
for (i0, sID) in enumerate(sIDs):
t_rois = []
t_roi_nums = []
t_area = []
t_area_norm = []
t_thick = []
for (i1, hemi) in enumerate(hemis):
# == Load hemisphere total surface area == #
hemiStatsFN = os.path.join(FSDATA_DIR, sID, \
"stats", "%s.aparc.stats" % hemi)
check_file(hemiStatsFN)
t_hemiSurfArea = fs_load_stats(hemiStatsFN, "SurfArea")
tmpfn = tempfile.mktemp()
hthick = os.path.join(FSDATA_DIR, sID, "surf", \
"%s.thickness" % hemi)
check_file(hthick)
print("Loading data from subject %s, hemisphere %s" \
% (sID, hemi))
(sout, serr) = Popen(["mri_segstats", "--annot", \
sID, hemi, "aparc12", \
"--in", hthick, \
"--sum", tmpfn], \
stdout=PIPE, stderr=PIPE).communicate()
sout = read_text_file(tmpfn)
os.system("rm -rf %s" % tmpfn)
k0 = 0
while (sout[k0].startswith("# ")):
k0 = k0 + 1
sout = sout[k0 :]
for tline in sout:
if len(tline) == 0:
break;
t_items = remove_empty_strings(\
tline.replace('\t', ' ').split(' '))
if len(t_items) == 10:
t_rois.append(hemi + "_" + t_items[4])
if hemi == "lh":
t_roi_nums.append(1000 + int(t_items[1]))
else:
t_roi_nums.append(2000 + int(t_items[1]))
t_area.append(float(t_items[3]))
t_area_norm.append(float(t_items[3]) / t_hemiSurfArea)
t_thick.append(float(t_items[5]))
# == Matching and filling values == #
for (i2, t_rn) in enumerate(roi_nums):
if t_roi_nums.count(t_rn) > 0:
idx = t_roi_nums.index(t_rn)
labArea[i0][i2] = t_area[idx]
labAreaNorm[i0][i2] = t_area_norm[idx]
labAvgThick[i0][i2] = t_thick[idx]
# === Save to pkl file === #
dset = {"labArea": labArea, \
"labAreaNorm": labAreaNorm, \
"labAvgThick": labAvgThick}
os.system("rm -rf %s" % cachePklFN)
cachePklF = open(cachePklFN, "wb")
pickle.dump(dset, cachePklF)
cachePklF.close()
check_file(cachePklFN)
print("INFO: Saved loaded data to file: %s\n" % os.path.abspath(cachePklFN))
else:
print("INFO: Loading saved data from file: %s\n" % os.path.abspath(cachePklFN))
cachePklF = open(cachePklFN, "rb")
dset = pickle.load(cachePklF)
cachePklF.close()
labArea = dset["labArea"]
labAreaNorm = dset["labAreaNorm"]
labAvgThick = dset["labAvgThick"]
# === Check data validity === #
assert(len(labArea) == ns)
assert(len(labAreaNorm) == ns)
assert(len(labAvgThick) == ns)
# === Statistical comparison === #
mean_area = {}
std_area = {}
nsg = {}
for (i0, grp) in enumerate(grps):
nsg[grp] = len(np.nonzero(isPWS == i0))
mean_area[grp] = np.mean(labArea[isPWS == i0], axis=0)
# std_area[grp] = np.std(labArea[isPWS == i0], axis=0) / np.sqrt(nsg[grp])
std_area[grp] = np.std(labArea[isPWS == i0], axis=0)
cmprItems = ["labArea", "labAreaNorm", "labAvgThick"]
for (h0, cmprItem) in enumerate(cmprItems):
print("--- List of significant differences in %s (p < %f uncorrected) ---" \
% (cmprItem, P_THRESH_UNC))
p_tt_val = np.zeros([nROIs])
t_tt_val = np.zeros([nROIs])
for (i0, t_roi) in enumerate(roi_names):
if h0 == 0:
dat_PWS = labArea[isPWS == 1, i0]
dat_PFS = labArea[isPWS == 0, i0]
elif h0 == 1:
dat_PWS = labAreaNorm[isPWS == 1, i0]
dat_PFS = labAreaNorm[isPWS == 0, i0]
elif h0 == 2:
dat_PWS = labAvgThick[isPWS == 1, i0]
dat_PFS = labAvgThick[isPWS == 0, i0]
(t_tt, p_tt) = stats.ttest_ind(dat_PWS, dat_PFS)
p_tt_val[i0] = p_tt
t_tt_val[i0] = t_tt
if p_tt_val[i0] < P_THRESH_UNC:
if t_tt_val[i0] < 0:
dirString = "PWS < PFS"
else:
dirString = "PWS > PFS"
print("%s: p = %f; t = %f (%s)" \
% (t_roi, p_tt_val[i0], t_tt_val[i0], dirString))
print("\tMean +/- SD: PWS: %.5f +/- %.5f; PFS: %.5f +/- %.5f" \
% (np.mean(dat_PWS), np.std(dat_PWS), \
np.mean(dat_PFS), np.std(dat_PFS)))
print("\tCohens_d = %.3f" % cohens_d(dat_PWS, dat_PFS))
print("\n")
# === Spearman correlation === #
for (h0, cmprItem) in enumerate(cmprItems):
print("--- Spearman correlations with SSI4 in %s (p < %f uncorrected) ---" \
% (cmprItem, P_THRESH_UNC))
p_spc_val = np.zeros([nROIs])
rho_spc_val = np.zeros([nROIs])
for (i0, t_roi) in enumerate(roi_names):
if h0 == 0:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labArea[isPWS == 1, i0])
elif h0 == 1:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labAreaNorm[isPWS == 1, i0])
elif h0 == 2:
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], \
labAvgThick[isPWS == 1, i0])
p_spc_val[i0] = p_spc
rho_spc_val[i0] = r_spc
if p_spc_val[i0] < P_THRESH_UNC:
if rho_spc_val[i0] < 0:
dirString = "-"
else:
dirString = "+"
print("%s: p = %f; rho = %f (%s)" \
% (t_roi, p_spc_val[i0], rho_spc_val[i0], dirString))
print("\n")
# === Compare combined dIFo and vIFo === #
lh_IFo_area = {}
lh_IFo_areaNorm = {}
for (i0, grp) in enumerate(grps):
lh_IFo_area[grp] = labArea[isPWS == i0, roi_names.index("lh_vIFo")] + \
labArea[isPWS == i0, roi_names.index("lh_dIFo")]
lh_IFo_areaNorm[grp] = labAreaNorm[isPWS == i0, roi_names.index("lh_vIFo")] + \
labAreaNorm[isPWS == i0, roi_names.index("lh_dIFo")]
(t_tt, p_tt) = stats.ttest_ind(lh_IFo_area["PWS"], \
lh_IFo_area["PFS"])
print("-- Comparing lh_IFo area: --")
print("\tp = %f; t = %f" % (p_tt, t_tt))
print("\tPWS: %.1f +/- %.1f; PFS: %.1f +/- %.1f" \
% (np.mean(lh_IFo_area["PWS"]), np.std(lh_IFo_area["PWS"]), \
np.mean(lh_IFo_area["PFS"]), np.std(lh_IFo_area["PFS"])))
print("\n")
(t_tt, p_tt) = stats.ttest_ind(lh_IFo_areaNorm["PWS"], \
lh_IFo_areaNorm["PFS"])
print("-- Comparing lh_IFo areaNorm: --")
print("\tp = %f; t = %f" % (p_tt, t_tt))
print("\tPWS: %.1e +/- %.1e; PFS: %.1e +/- %.1e" \
% (np.mean(lh_IFo_areaNorm["PWS"]), np.std(lh_IFo_areaNorm["PWS"]), \
np.mean(lh_IFo_areaNorm["PFS"]), np.std(lh_IFo_areaNorm["PFS"])))
# === Correlating combined IFo with SSI4 === #
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], lh_IFo_area["PWS"])
print("-- Correlating SSI4 with lh_IFo area: --")
print("\tp = %f; rho = %f" % (p_spc, r_spc))
print("\n")
(r_spc, p_spc) = stats.spearmanr(SSI4[isPWS == 1], lh_IFo_areaNorm["PWS"])
print("-- Correlating SSI4 with lh_IFo areaNorm: --")
print("\tp = %f; rho = %f" % (p_spc, r_spc))
print("\n")
# === Visualiation === #
"""
for (i0, grp) in enumerate(grps):
plt.errorbar(range(nROIs), mean_area[grp], yerr=std_area[grp], \
color=grpColors[grp])
plt.xticks(range(nROIs), roi_names, rotation=90.0)
plt.show()
"""
| UTF-8 | Python | false | false | 11,734 | py | 70 | aparc12_surface_stats.py | 39 | 0.476138 | 0.461735 | 0 | 333 | 34.231231 | 118 |
CoLRev-Ecosystem/colrev | 3,444,563,800,387 | 0ef650fd2a6c5e81f21e4d52168e5060b6d6a10a | c7b00162595001b5fca76d63ab84143ab776b52e | /colrev/ops/built_in/data/github_pages.py | a46369e539a83214f849c7ff4c9b310983c835df | [
"MIT",
"CC0-1.0"
]
| permissive | https://github.com/CoLRev-Ecosystem/colrev | 9756398b6cdf46eeffabebf38e880455eb15d402 | 19fb6883fa2445e1119aa11cb1a011997f285e4f | refs/heads/main | 2023-03-23T13:37:09.298982 | 2023-03-23T10:06:57 | 2023-03-23T10:06:57 | 363,073,613 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
"""Creation of a github-page for the review as part of the data operations"""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
import zope.interface
from dataclasses_jsonschema import JsonSchemaMixin
import colrev.env.package_manager
import colrev.env.utils
import colrev.record
if False: # pylint: disable=using-constant-test
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import colrev.ops.data
import git
@zope.interface.implementer(colrev.env.package_manager.DataPackageEndpointInterface)
@dataclass
class GithubPages(JsonSchemaMixin):
"""Export the literature review into a Github Page"""
ci_supported: bool = False
@dataclass
class GHPagesSettings(colrev.env.package_manager.DefaultSettings, JsonSchemaMixin):
"""Settings for GithubPages"""
endpoint: str
version: str
auto_push: bool
_details = {
"auto_push": {
"tooltip": "Indicates whether the Github Pages branch "
"should be pushed automatically"
},
}
GH_PAGES_BRANCH_NAME = "gh-pages"
settings_class = GHPagesSettings
def __init__(
self,
*,
data_operation: colrev.ops.data.Data, # pylint: disable=unused-argument
settings: dict,
) -> None:
# Set default values (if necessary)
if "version" not in settings:
settings["version"] = "0.1.0"
if "auto_push" not in settings:
settings["auto_push"] = True
self.settings = self.settings_class.load_settings(data=settings)
def get_default_setup(self) -> dict:
"""Get the default setup"""
github_pages_endpoint_details = {
"endpoint": "colrev_built_in.github_pages",
"version": "0.1",
"auto_push": True,
}
return github_pages_endpoint_details
def __setup_github_pages_branch(
self, *, data_operation: colrev.ops.data.Data, git_repo: git.Repo
) -> None:
# if branch does not exist: create and add index.html
data_operation.review_manager.logger.info("Setup github pages")
git_repo.create_head(self.GH_PAGES_BRANCH_NAME)
git_repo.git.checkout(self.GH_PAGES_BRANCH_NAME)
title = "Manuscript template"
readme_file = data_operation.review_manager.readme
if readme_file.is_file():
with open(readme_file, encoding="utf-8") as file:
title = file.readline()
title = title.replace("# ", "").replace("\n", "")
title = '"' + title + '"'
git_repo.git.rm("-rf", Path("."))
gitignore_file = Path(".gitignore")
git_repo.git.checkout("HEAD", "--", gitignore_file)
with gitignore_file.open("a", encoding="utf-8") as file:
file.write("status.yaml\n")
data_operation.review_manager.dataset.add_changes(path=gitignore_file)
colrev.env.utils.retrieve_package_file(
template_file=Path("template/github_pages/index.html"),
target=Path("index.html"),
)
data_operation.review_manager.dataset.add_changes(path=Path("index.html"))
colrev.env.utils.retrieve_package_file(
template_file=Path("template/github_pages/_config.yml"),
target=Path("_config.yml"),
)
colrev.env.utils.inplace_change(
filename=Path("_config.yml"),
old_string="{{project_title}}",
new_string=title,
)
data_operation.review_manager.dataset.add_changes(path=Path("_config.yml"))
colrev.env.utils.retrieve_package_file(
template_file=Path("template/github_pages/about.md"),
target=Path("about.md"),
)
data_operation.review_manager.dataset.add_changes(path=Path("about.md"))
def __update_data(
self, *, data_operation: colrev.ops.data.Data, silent_mode: bool
) -> None:
if not silent_mode:
data_operation.review_manager.logger.info("Update data on github pages")
records = data_operation.review_manager.dataset.load_records_dict()
# pylint: disable=duplicate-code
included_records = {
r["ID"]: r
for r in records.values()
if r["colrev_status"]
in [
colrev.record.RecordState.rev_synthesized,
colrev.record.RecordState.rev_included,
]
}
data_file = Path("data.bib")
data_operation.review_manager.dataset.save_records_dict_to_file(
records=included_records, save_path=data_file
)
data_operation.review_manager.dataset.add_changes(path=data_file)
data_operation.review_manager.create_commit(msg="Update sample")
def __push_branch(
self,
*,
data_operation: colrev.ops.data.Data,
git_repo: git.Repo,
silent_mode: bool,
) -> None:
if not silent_mode:
data_operation.review_manager.logger.info("Push to github pages")
if "origin" in git_repo.remotes:
if "origin/gh-pages" in [r.name for r in git_repo.remotes.origin.refs]:
git_repo.git.push("origin", self.GH_PAGES_BRANCH_NAME, "--no-verify")
else:
git_repo.git.push(
"--set-upstream",
"origin",
self.GH_PAGES_BRANCH_NAME,
"--no-verify",
)
username, project = (
git_repo.remotes.origin.url.replace("https://github.com/", "")
.replace(".git", "")
.split("/")
)
if not silent_mode:
data_operation.review_manager.logger.info(
f"Data available at: https://{username}.github.io/{project}/"
)
else:
if not silent_mode:
data_operation.review_manager.logger.info("No remotes specified")
def update_data(
self,
data_operation: colrev.ops.data.Data,
records: dict, # pylint: disable=unused-argument
synthesized_record_status_matrix: dict, # pylint: disable=unused-argument
silent_mode: bool,
) -> None:
"""Update the data/github pages"""
if data_operation.review_manager.in_ci_environment():
data_operation.review_manager.logger.error(
"Running in CI environment. Skipping github-pages generation."
)
return
if data_operation.review_manager.dataset.has_changes():
data_operation.review_manager.logger.error(
"Cannot update github pages because there are uncommited changes."
)
return
git_repo = data_operation.review_manager.dataset.get_repo()
active_branch = git_repo.active_branch
if self.GH_PAGES_BRANCH_NAME not in [h.name for h in git_repo.heads]:
self.__setup_github_pages_branch(
data_operation=data_operation, git_repo=git_repo
)
git_repo.git.checkout(self.GH_PAGES_BRANCH_NAME)
self.__update_data(data_operation=data_operation, silent_mode=silent_mode)
if self.settings.auto_push:
self.__push_branch(
data_operation=data_operation,
git_repo=git_repo,
silent_mode=silent_mode,
)
git_repo.git.checkout(active_branch)
def update_record_status_matrix(
self,
data_operation: colrev.ops.data.Data, # pylint: disable=unused-argument
synthesized_record_status_matrix: dict,
endpoint_identifier: str,
) -> None:
"""Update the record_status_matrix"""
# Note : automatically set all to True / synthesized
for syn_id in synthesized_record_status_matrix:
synthesized_record_status_matrix[syn_id][endpoint_identifier] = True
def get_advice(
self,
review_manager: colrev.review_manager.ReviewManager,
) -> dict:
"""Get advice on the next steps (for display in the colrev status)"""
data_endpoint = "Data operation [github pages data endpoint]: "
advice = {"msg": f"{data_endpoint}", "detailed_msg": "TODO"}
if "NA" == review_manager.dataset.get_remote_url():
advice["msg"] += (
"\n - To make the repository available on Github pages, "
+ "push it to a Github repository\nhttps://github.com/new"
)
else:
advice[
"msg"
] += "\n - The page is updated automatically (gh-pages branch)"
return advice
if __name__ == "__main__":
pass
| UTF-8 | Python | false | false | 8,823 | py | 170 | github_pages.py | 81 | 0.584042 | 0.583248 | 0 | 252 | 34.011905 | 87 |
tkj5008/Luminar_Python_Programs | 8,950,711,881,205 | 3fc7efc28a82c585b476fb805d192f2df991fc3d | 86f860eab66ce0681cda293ee063e225747d113c | /Object_Oriented_Programming/Demo9.py | eaaf7fbd9866c9a63b8e92c211f7987326375c47 | []
| no_license | https://github.com/tkj5008/Luminar_Python_Programs | a1e7b85ad2b7537081bdedad3a5a4a2d9f6c1f07 | d47ef0c44d5811e7039e62938a90a1de0fe7977b | refs/heads/master | 2023-08-03T05:15:04.963129 | 2021-09-23T14:33:21 | 2021-09-23T14:33:21 | 403,316,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Employee:
company="AT&T"
def __init__(self,name,empid,salary):
self.name=name
self.empid=empid
self.salary=salary
def printvalue(self):
print(Employee.company,self.name,self.empid,self.salary,)
obj=Employee("Thomas","TJ618N",25000)
obj.printvalue()
| UTF-8 | Python | false | false | 299 | py | 135 | Demo9.py | 118 | 0.655518 | 0.628763 | 0 | 10 | 28.9 | 65 |
dopexxx/FacePeeper | 18,769,007,085,937 | 6521e93ba2ae5d87284eb3fd384c6e9a6225d4c2 | 5b4afa1364dbec5c154be8fb3fe8fa382c3039b5 | /MNIST_Classifier/mnistTester.py | 30783263c2254e9ba6fcf5ae0e8d0bd428487c30 | []
| no_license | https://github.com/dopexxx/FacePeeper | 3957ea1155a7a93752117cc036587c053d14a372 | ff3d1a8e735b0318eaee47066c264e87afc8c111 | refs/heads/master | 2021-01-09T06:07:52.813261 | 2017-11-22T09:48:11 | 2017-11-22T09:48:11 | 80,897,773 | 3 | 1 | null | false | 2017-02-28T22:31:06 | 2017-02-04T06:05:19 | 2017-02-27T21:55:21 | 2017-02-28T22:31:05 | 493,332 | 0 | 1 | 0 | Python | null | null | # Test MNIST task
# We used this file to test our network on the MNIST task
# Execute this file while being in the MNIST directory (not from parent directory e.g.)
# Import modules
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import numpy as np
# Import data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Import the network class from .py file in parent directory
import os,sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from residualCNN import RESNET
# Specify task and build network
net = RESNET(task='MNIST',direc='below')
net.network()
### ONLY FOR TESTING, NOT FOR RETRAINING (variables not stored)
with tf.Session() as session:
saver = tf.train.Saver(tf.trainable_variables(),write_version = saver_pb2.SaverDef.V1)
saver.restore(session, "./weightsMNIST.ckpt")
# Evaluate Training Accuracy. Memory Error with single batch of size 55000, therefore we use 5 batches of size 11000
# If that is still do much for your system, set the splitter variable accordingly (i.e. 55000%splitter == 0 --> TRUE)
splitter = 11
size = mnist.train.images.shape[0]
step = size // splitter
# Read in images and extend to 3D (we work with color images)
trainImgs = np.empty([size,28,28,3])
for k in range(3):
trainImgs[:,:,:,k] = mnist.train.images.reshape([size,28,28])
# Now check performance on train set
p = []
for k in range(splitter):
p.append(net.accuracy.eval(feed_dict = {net.x: trainImgs[k*step:(k+1)*step],
net.y_:mnist.train.labels[k*step:(k+1)*step], net.keep_prob:1.0}))
print()
print('Train Accuracy MNIST = ', np.mean(p))
# Same for evaluation set
size = mnist.validation.images.shape[0]
testImgs = np.empty([size,28,28,3])
for i in range(3):
testImgs[:,:,:,i] = mnist.validation.images.reshape([size,28,28])
print('Validation Accuracy MNIST ', net.accuracy.eval(feed_dict = {net.x: testImgs,
net.y_: mnist.validation.labels, net.keep_prob:1.0}))
# Same for test set
size = mnist.test.images.shape[0]
testImgs = np.empty([size,28,28,3])
for i in range(3):
testImgs[:,:,:,i] = mnist.test.images.reshape([size,28,28])
print('Test Accuracy MNIST ', net.accuracy.eval(feed_dict = {net.x: testImgs,
net.y_: mnist.test.labels, net.keep_prob:1.0}))
print()
| UTF-8 | Python | false | false | 2,493 | py | 28 | mnistTester.py | 14 | 0.671881 | 0.646209 | 0 | 73 | 33.013699 | 122 |
ShashwatMishra/Mini-Facebook | 19,636,590,508,862 | 573213ce2e895bb0d7501214b8b150a8d8bbca0f | 8583a7e7a7179f6160e56fa6c856cee87098bac4 | /Mini Facebook/app/routes.py | 14bf46218a05861a49f2c630aabed041f9b5a04c | [
"MIT"
]
| permissive | https://github.com/ShashwatMishra/Mini-Facebook | 1a8604d27a6290d95ae232f544192aee2aa9f191 | b78b0e23be31529b5026c7db3329320be93b7f53 | refs/heads/master | 2020-05-20T07:00:45.344275 | 2019-05-07T16:49:38 | 2019-05-07T16:49:38 | 185,441,920 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import LoginForm, RegistrationForm, EditProfileForm, StatusUpdate, MessageForm
from app.models import User, Post , Message
from datetime import datetime
@app.route('/')
@app.route('/index')
@app.route('/',methods=['GET', 'POST'])
@app.route('/index',methods=['GET', 'POST'])
@login_required
def index():
form = StatusUpdate()
if form.validate_on_submit():
post = Post(body=form.post.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Status is updated')
return redirect(url_for('index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(page, app.config['POST_PER_PAGE'], False)
if posts.next_num :
next_url = url_for('index', page=posts.next_num)
else :
next_url = None
if posts.prev_num :
prev_url = url_for('index', page=posts.prev_num)
else :
prev_url = None
return render_template('index.html', title='Timeline', posts=posts.items, form=form, next_url=next_url, prev_url=prev_url)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index',user = user)
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form= form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = Post.query.filter_by(user_id=current_user.id).order_by(
Post.timestamp.desc()).paginate(page,app.config['POST_PER_PAGE'],False)
if posts.next_num:
next_url = url_for('user',username = current_user.username, page=posts.next_num)
else :
next_url = None
if posts.prev_num :
prev_url = url_for('user',username = current_user.username, page=posts.prev_num)
else :
prev_url = None
return render_template('user.html', user=user, posts=posts.items, next_url=next_url, prev_url=prev_url)
@app.route('/edit_profile',methods = ['GET','POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit() or request.method == 'POST':
current_user.about_me = form.about_me.data
current_user.relationship_status = form.relationship_status.data
current_user.gender = form.gender.data
current_user.country = form.country.data
db.session.commit()
return redirect(url_for('user',username= current_user.username))
return render_template('edit_profile.html',title = 'Editing Profile',form = form)
@app.before_request
def before_request():
if current_user.is_authenticated :
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/follow/<username>',methods = ['POST','GET'])
@login_required
def follow(username):
user = User.query.filter_by(username= username).first()
if user is None:
flash('User does not exist')
return redirect(url_for('user', username=username))
if user.username == current_user.username :
flash('You can not follow yourself')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are following {0}'.format(user.username))
return redirect(url_for('user', username=username))
@app.route('/unfollow/<username>',methods = ['POST','GET'])
@login_required
def unfollow(username):
user= User.query.filter_by(username=username).first()
if user is None:
flash('User does not exist')
return redirect(url_for('user', username=username))
if user.username == current_user.username :
flash('You can not unfollow yourself')
return redirect(url_for('user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are unfollowing {0}'.format(user.username))
return redirect(url_for('user', username=username))
@app.route('/send_message/<receiver>',methods = ['POST','GET'])
@login_required
def send_message(receiver):
user = User.query.filter_by(username = receiver).first_or_404()
form = MessageForm()
if form.validate_on_submit():
message = Message(text= form.message.data,reader=user, author =current_user)
db.session.add(message)
db.session.commit()
flash('Your Message is delivered')
return redirect(url_for('user', username=current_user.username))
return render_template('send_message.html',receiver=receiver, form=form)
@app.route('/messages')
@login_required
def messages():
current_user.last_message_read_time = datetime.utcnow()
db.session.commit()
page = request.args.get('page', 1, type=int)
messages = current_user.message_received.order_by(
Message.timestamp.desc()).paginate(
page, app.config['MESSAGE_PER_PAGE'], False)
next_url = url_for('messages', page=messages.next_num) \
if messages.has_next else None
prev_url = url_for('messages', page=messages.prev_num) \
if messages.has_prev else None
return render_template('messages.html', messages=messages.items,
next_url=next_url, prev_url=prev_url) | UTF-8 | Python | false | false | 6,651 | py | 9 | routes.py | 8 | 0.661705 | 0.660051 | 0 | 164 | 39.560976 | 126 |
andrely/sublexical-features | 10,952,166,607,494 | 98cf45b26e377bd937efe64ea95acbca5b18d75b | 785e6e41b16ab7c702987d0dcd01793668da6f98 | /SublexicalSemantics/bin/sublexicalize.py | 685f990ede3faa7d15dad7c9ef3d25d5d3a84c2c | []
| no_license | https://github.com/andrely/sublexical-features | 748c18419405a8184c81253a16ed0bd4445a6ffd | 4191ec5ea3f95dfa1741c441da90cbbd1a1c2a02 | refs/heads/master | 2021-01-17T15:09:53.766421 | 2017-05-03T18:05:08 | 2017-05-03T18:05:08 | 16,731,407 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from optparse import OptionParser
import os
import re
import sys
cur_path, _ = os.path.split(__file__)
sys.path.append(os.path.join(cur_path, '..', 'Experiments'))
from experiment_support.preprocessing import sublexicalize
BUF_SIZE = 8192
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ngram-order", default=3)
opts, args = parser.parse_args()
order = int(opts.ngram_order)
in_str = sys.stdin.read(BUF_SIZE)
rest_str = ""
while len(in_str) > 0:
out_str = sublexicalize(rest_str + in_str.rstrip('\n'), order=order)
rest_str = re.sub('_', ' ', out_str[-(order-1):])
sys.stdout.write(out_str + " ")
in_str = sys.stdin.read(BUF_SIZE)
| UTF-8 | Python | false | false | 730 | py | 70 | sublexicalize.py | 67 | 0.616438 | 0.606849 | 0 | 29 | 24.172414 | 76 |
Aasthaengg/IBMdataset | 13,872,744,414,992 | 094468854be5a8e52e2fb3afeae7566e1daa1c74 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03407/s755755148.py | de96295b9fb6e9f0a46cf4632a66617331a13e8f | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | A, B, C = list(map(int, input().split()))
ans = "Yes"
if ((A + B) < C):
ans = "No"
print(ans) | UTF-8 | Python | false | false | 100 | py | 202,060 | s755755148.py | 202,055 | 0.47 | 0.47 | 0 | 8 | 11.625 | 41 |
gurudurairaj/gp | 5,111,011,115,872 | 0a1d8d61fc8433cee347aead45f53071aa1dbfd2 | c117f7064b7132778bead5a8b77b67e2429a2b7a | /zermat.py | eb726a3c64adba60293b5e5ba07be16f22ebd87f | []
| no_license | https://github.com/gurudurairaj/gp | 664306f41f73f8b620ba74b048372e1c94e59bc7 | 2fce98f7428103b54b9edd075d4a83dc434c2926 | refs/heads/master | 2020-04-15T05:00:45.934019 | 2019-05-26T17:54:54 | 2019-05-26T17:54:54 | 164,405,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=int(input())
m=list(map(int,input().split()))
c=0
for i in range(len(m)):
for j in range(i,len(m)):
if m[i]+m[j]==0 or m[i]+m[j]==1:
print(m[i],m[j])
c=c+1
break
if c==1:
break
| UTF-8 | Python | false | false | 239 | py | 165 | zermat.py | 165 | 0.430962 | 0.410042 | 0 | 11 | 20.727273 | 40 |
AlbertaSat/cube_sat_comm | 15,358,803,087,540 | 9c4acdfa00b47d4a12d2480e0242958d986b55fe | 3c2f73cdd489cf44e7a1a844b6cf449f6d8c58fe | /cube_sat_comm/drawing.py | f07c6a79b552fe88a2223e4b33a9a3e49cf8f2ab | []
| no_license | https://github.com/AlbertaSat/cube_sat_comm | a55f7a6694ae792dae7df8d5e9e9e56f2c528596 | b842a71c835710b873d0d8f5cabc0014dfd1f07b | refs/heads/master | 2020-04-14T14:07:11.747736 | 2019-01-02T21:17:35 | 2019-01-02T21:21:03 | 163,887,345 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from queue import Queue
from threading import Thread, Event
from cube_sat_comm.curses_state import curses_print
_MAX_QUEUE_WAIT = 1.0
_queued_tasks = Queue()
_should_exit = Event()
_new_tasks_event = Event()
def init_drawing_thread():
handle = Thread(target=_draw_thread_loop)
handle.start()
return handle
def queue_message(mess):
_queued_tasks.put(lambda: _print_to_output(mess), True, _MAX_QUEUE_WAIT)
_new_tasks_event.set()
def queue_task(task):
_queued_tasks.put(task, True, _MAX_QUEUE_WAIT)
_new_tasks_event.set()
def stop_drawing_thread():
_should_exit.set()
_new_tasks_event.set() # To get the thread to exit nicely
def _draw_thread_loop():
while not _should_exit.is_set():
while not _queued_tasks.empty():
_queued_tasks.get()()
_new_tasks_event.wait()
_new_tasks_event.clear()
def _print_to_output(mess):
curses_print(mess)
| UTF-8 | Python | false | false | 929 | py | 11 | drawing.py | 8 | 0.654467 | 0.652314 | 0 | 44 | 20.113636 | 76 |
ArthurKVasque07/PythonGEEK | 7,052,336,305,895 | 878ef561fe34b2fad03184bd65c8d5e0190f0f6b | dbdc002660adf3f633c4d5d4eb890ff43ba229a7 | /estruturas_logicas_and_or_not_is.py | cd82a28f9c640bd3f75b5ff5224291395d7b1bb5 | []
| no_license | https://github.com/ArthurKVasque07/PythonGEEK | df1f184435a863ce872df1e366463b4fec9a6c64 | bd8b86608fd854643d3f81f02b48db88f4e6f832 | refs/heads/master | 2022-10-06T18:49:04.441047 | 2020-06-10T20:54:18 | 2020-06-10T20:54:18 | 271,382,829 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Operadores unarios:
- not, is
Operadores binários:
-and, or
Para 'and' , ambos valores precisam ser True
Para 'or' , um dos valores precisam ser True
Para o 'not', o valor do booleano é invertido, ou seja se for True vira False
"""
ativo = True
logado = False
if ativo and logado:
print('Bem vindo')
else:
print('Voce precisa ativar conta!')
##############
if ativo or logado:
print('Bem vindo')
else:
print('Voce precisa ativar conta!')
##############
# Se não estiver ativo
if not ativo:
print('Voce precisa ativar sua conta')
else:
print('Bem vindo')
#############
if ativo is logado:
print('Bem vindo')
else:
print('Você precisa ativar sua conta')
| UTF-8 | Python | false | false | 722 | py | 22 | estruturas_logicas_and_or_not_is.py | 19 | 0.619777 | 0.619777 | 0 | 39 | 17.358974 | 77 |
pf4d/issm_python | 10,067,403,358,550 | dbef9245519371df05c8bed045b30afef76db972 | 93022749a35320a0c5d6dad4db476b1e1795e318 | /issm/cyclone.py | 7f07dbe4c48e787920c81df1e7151386a3e24aa8 | [
"BSD-3-Clause"
]
| permissive | https://github.com/pf4d/issm_python | 78cd88e9ef525bc74e040c1484aaf02e46c97a5b | 6bf36016cb0c55aee9bf3f7cf59694cc5ce77091 | refs/heads/master | 2022-01-17T16:20:20.257966 | 2019-07-10T17:46:31 | 2019-07-10T17:46:31 | 105,887,661 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
from issm.fielddisplay import fielddisplay
from issm.pairoptions import pairoptions
from issm.issmssh import issmssh
from issm.issmscpin import issmscpin
from issm.issmscpout import issmscpout
from issm.QueueRequirements import QueueRequirements
import datetime
try:
from issm.cyclone_settings import cyclone_settings
except ImportError:
print 'You need cyclone_settings.py to proceed, check presence and sys.path'
class cyclone(object):
"""
Be aware that this is not a cluster as we usually know them. There is no scheduling and ressources are pretty low.
The Computer have 20 cpus and 512Gb of memory used by a number of person so be respectful with your usage.
I putted some restrictive upper limits to avoid over-use. (Basile)
Usage:
cluster=cyclone();
"""
def __init__(self,*args):
# {{{
self.name = 'cyclone'
self.login = ''
self.np = 2
self.time = 100
self.codepath = ''
self.executionpath = ''
self.port = ''
self.interactive = 0
#use provided options to change fields
options=pairoptions(*args)
#initialize cluster using user settings if provided
self=cyclone_settings(self)
#OK get other fields
self=options.AssignObjectFields(self)
# }}}
def __repr__(self):
# {{{
# display the object
s = "class cyclone object:"
s = "%s\n%s"%(s,fielddisplay(self,'name','name of the cluster'))
s = "%s\n%s"%(s,fielddisplay(self,'login','login'))
s = "%s\n%s"%(s,fielddisplay(self,'np','number of processes'))
s = "%s\n%s"%(s,fielddisplay(self,'time','walltime requested in minutes'))
s = "%s\n%s"%(s,fielddisplay(self,'codepath','code path on the cluster'))
s = "%s\n%s"%(s,fielddisplay(self,'executionpath','execution path on the cluster'))
return s
# }}}
def checkconsistency(self,md,solution,analyses):
# {{{
#Miscelaneous
if not self.login:
md = md.checkmessage('login empty')
if not self.codepath:
md = md.checkmessage('codepath empty')
if not self.executionpath:
md = md.checkmessage('executionpath empty')
if self.time>72:
md = md.checkmessage('walltime exceeds 72h for niceness this is not allowed, if you need more time consider shifting to one of the Notur systems')
if self.np >10:
md = md.checkmessage('number of process excess 10, if you need more processing power consider shifting to one of the Notur systems')
return self
# }}}
def BuildQueueScript(self,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling):
# {{{
executable='issm.exe'
#write queuing script
shortname=modelname[0:min(12,len(modelname))]
fid=open(modelname+'.queue','w')
fid.write('export ISSM_DIR="%s/../"\n' % self.codepath)
fid.write('source $ISSM_DIR/etc/environment.sh\n')
fid.write('INTELLIBS="/opt/intel/intelcompiler-12.04/composerxe-2011.4.191/compiler/lib/intel64"\n')
fid.write('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/:$INTELLIBS\n')
fid.write('export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/x86_64-linux-gnu/c++/4.8\n')
fid.write('cd %s/%s/\n\n' % (self.executionpath,dirname))
rundir=self.executionpath+'/'+dirname
runfile=self.executionpath+'/'+dirname+'/'+modelname
fid.write('mpiexec -np %i %s/%s %s %s %s >%s.outlog 2>%s.errlog\n' % (self.np,self.codepath,executable,str(solution),rundir,modelname,runfile,runfile))
fid.close()
# }}}
def UploadQueueJob(self,modelname,dirname,filelist):
# {{{
#compress the files into one zip.
compressstring='tar -zcf %s.tar.gz ' % dirname
for file in filelist:
compressstring += ' %s' % file
subprocess.call(compressstring,shell=True)
print 'uploading input file and queueing script'
issmscpout(self.name,self.executionpath,self.login,self.port,[dirname+'.tar.gz'])
# }}}
def LaunchQueueJob(self,modelname,dirname,filelist,restart,batch):
# {{{
print 'launching solution sequence on remote cluster'
if restart:
launchcommand='cd %s && cd %s && qsub %s.queue' % (self.executionpath,dirname,modelname)
else:
launchcommand='cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz && chmod +x ./%s.queue && ./%s.queue' % (self.executionpath,dirname,dirname,dirname,dirname,dirname,modelname,modelname)
issmssh(self.name,self.login,self.port,launchcommand)
# }}}
def Download(self,dirname,filelist):
# {{{
#copy files from cluster to current directory
directory='%s/%s/' % (self.executionpath,dirname)
issmscpin(self.name,self.login,self.port,directory,filelist)
# }}}
| UTF-8 | Python | false | false | 4,597 | py | 335 | cyclone.py | 214 | 0.690885 | 0.681749 | 0 | 124 | 36.072581 | 225 |
ddc899/cmpt145 | 17,523,466,587,998 | ff9078cfb333e718f654dfe78194d1f4368ca166 | 043d91547df1c9824cdff5386c74083b234803c2 | /assignments/assignment6/a6q1_testing.py | ab48e7ae3126c901ba742d7ba85671aa77056993 | []
| no_license | https://github.com/ddc899/cmpt145 | 9824b7caad98f78075dd42c5ecb1c1617f4628cf | 2a8c2f36d42082dffdc6e79a9822aa2d4ad925a9 | refs/heads/master | 2022-01-26T22:44:02.647310 | 2018-07-27T22:51:07 | 2018-07-27T22:51:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import LList as llist
import node as node
chain = llist.create()
twochain = llist.create()
emptychain = llist.create()
llist.add_to_front(chain, 5)
llist.add_to_back(chain, 7)
llist.add_to_back(chain, 12)
llist.add_to_back(chain, 3)
llist.add_to_back(chain, 6)
llist.add_to_back(chain, 2)
llist.add_to_back(chain, 11)
print(node.to_string(chain['head']))
llist.sorted(chain)
# llist.ya(cain)
print(node.to_string(chain['head']))
slice_list = llist.slice(chain, 2,6)
print(node.to_string(slice_list['head']))
# print(node.to_string(threechain['head']))
# yachain = threechain
# print(node.to_string(twochain['head']))
# llist.extend(threechain, twochain)
# print(node.to_string(threechain['head']))
# print(node.to_string(threechain['head']))
#
#
# # print(node.to_string(chain['head']))
# # print(node.to_string(chain['head']))
# # print(node.to_string(twochain['head']))
# # print(node.to_string(threechain['head']))
# print(node.to_string(threechain['head']))
# print(threechain['tail'])
| UTF-8 | Python | false | false | 1,000 | py | 120 | a6q1_testing.py | 98 | 0.698 | 0.687 | 0 | 42 | 22.809524 | 45 |
junecong/bond | 14,972,256,042,908 | 7e689e5c4af2eadd6cdc11ff008131a4e8d44f85 | 9af6e89143358a50b62445adf4716ff34fdb2bc8 | /pybond/build/lib/bond/bond.py | 7a942f7ea10cab83fb6c4e571987ab31ae9ad400 | []
| no_license | https://github.com/junecong/bond | 9eab194b922793698f9017f452039ebc405d8bc0 | c88030faa5ff4b75fa92f3a7558fa905a5c4661a | refs/heads/master | 2021-01-15T19:35:37.585392 | 2015-10-29T16:55:49 | 2015-10-29T16:56:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Hello
"""
class Bond:
"""
Bond class
"""
pass
| UTF-8 | Python | false | false | 67 | py | 34 | bond.py | 8 | 0.402985 | 0.402985 | 0 | 9 | 6.444444 | 14 |
figueiredorodrigo/Exercicios-Guanabara | 6,665,789,268,833 | ff99c985b36208bc4c74d8a1c5231b2ebf0ea99d | 6eb097cccbc0e040eb940663f85ce7eacb2be95b | /Desafio004.py | 6f41c6ba91c936b05b412f85b49aaba4d2ac55db | []
| no_license | https://github.com/figueiredorodrigo/Exercicios-Guanabara | c7cdb534b3f7c2db0e2bffc2b4376af035213b3a | 621000882ab3aa080415bb04336fd1713ab85b5d | refs/heads/main | 2023-06-02T07:10:22.555624 | 2021-06-15T16:33:26 | 2021-06-15T16:33:26 | 376,381,603 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n1 = int(input('Digite um número: '))
pt = 1
ant = n1 - pt
suc = n1 + pt
print(f'O número é: {n1}, o seu antecessor é: {ant} e o seu sucessor é: {suc}') | UTF-8 | Python | false | false | 161 | py | 68 | Desafio004.py | 67 | 0.589744 | 0.557692 | 0 | 5 | 29.6 | 79 |
CristianWulfing/PySCHC | 7,705,171,370,497 | ed6c82436b92ade94f54fe592723177f8d808bbe | 4ec675a77327d98b93c1c1c1be00ca99d8afdcaf | /fragmentation_layer/tests/test_base/test_bitmap.py | ac8305c4d182c6d8a3cbe4ab7d2f7fe98ae807dd | [
"LicenseRef-scancode-ietf-trust",
"BSD-2-Clause",
"MIT"
]
| permissive | https://github.com/CristianWulfing/PySCHC | 7d4cf02b155cc4b92711a52faf893ed99b92852e | 2b1d9ed7d7c9857cbb362bdee5c77f7234838ddd | refs/heads/master | 2023-07-09T07:22:57.665826 | 2021-07-02T04:25:13 | 2021-07-02T04:25:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ test_bitmap: Unit test for Bitmap class """
from unittest import TestCase, main
from schc_base import Bitmap
from schc_protocols import LoRaWAN
class TestBitmap(TestCase):
def test_constructor(self):
bitmap = Bitmap(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR))
self.assertEqual([False] * LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE, bitmap.__bitmap__,
"Wrong bitmap generated")
self.assertEqual(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE, len(bitmap.__bitmap__), "Wrong length of bitmap")
bitmap = Bitmap(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR), short_size=10)
self.assertEqual([False] * 10, bitmap.__bitmap__, "Wrong bitmap generated (short)")
self.assertEqual(10, len(bitmap.__bitmap__), "Wrong length of bitmap (short)")
def test_register_tile(self):
bitmap = Bitmap(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR))
bitmap.tile_received(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE - 1)
self.assertTrue(bitmap.__bitmap__[0], "Wrong first tile registered")
fcn = 30
bitmap.tile_received(fcn)
self.assertTrue(bitmap.__bitmap__[LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE - fcn - 1],
"Wrong tile registered {}".format(fcn))
bitmap.tile_received(0)
self.assertTrue(bitmap.__bitmap__[LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE - 1],
"Wrong last tile registered")
self.assertEqual(LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR).WINDOW_SIZE, len(bitmap.__bitmap__),
"Length changed")
self.assertEqual(3, sum(bitmap.__bitmap__), "Wrong registration")
def test_compression_all_one(self):
protocol_to_use = LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR)
bitmap = Bitmap(protocol_to_use)
bitmap.__bitmap__ = [True] * protocol_to_use.WINDOW_SIZE
compressed_bitmap = bitmap.generate_compress()
self.assertEqual(
protocol_to_use.L2_WORD - (sum([
protocol_to_use.RULE_SIZE, protocol_to_use.T, protocol_to_use.M, 1
]) % protocol_to_use.L2_WORD),
len(compressed_bitmap), "Wrong compression")
self.assertEqual(
protocol_to_use.L2_WORD - (sum([
protocol_to_use.RULE_SIZE, protocol_to_use.T, protocol_to_use.M, 1
]) % protocol_to_use.L2_WORD),
sum(compressed_bitmap), "Wrong compression")
def test_compression_uncompressed(self):
protocol_to_use = LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR)
bitmap = Bitmap(protocol_to_use)
compressed_bitmap = bitmap.generate_compress()
self.assertEqual(protocol_to_use.WINDOW_SIZE, len(compressed_bitmap), "Wrong compression")
self.assertEqual(0, sum(compressed_bitmap), "Wrong compression")
def test_compression(self):
protocol_to_use = LoRaWAN(rule_id=LoRaWAN.ACK_ON_ERROR)
bitmap = Bitmap(protocol_to_use)
bitmap.__bitmap__ = [True] * protocol_to_use.WINDOW_SIZE
bitmap.__bitmap__[protocol_to_use.L2_WORD] = False
compressed_bitmap = bitmap.generate_compress()
self.assertEqual(
protocol_to_use.L2_WORD - (sum([
protocol_to_use.RULE_SIZE, protocol_to_use.T,
protocol_to_use.M, 1
]) % protocol_to_use.L2_WORD) + protocol_to_use.L2_WORD,
len(compressed_bitmap), "Wrong compression")
self.assertEqual(
protocol_to_use.L2_WORD - (sum([
protocol_to_use.RULE_SIZE, protocol_to_use.T, protocol_to_use.M, 1
]) % protocol_to_use.L2_WORD) + protocol_to_use.L2_WORD - 1,
sum(compressed_bitmap), "Wrong compression")
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,859 | py | 79 | test_bitmap.py | 73 | 0.608448 | 0.600415 | 0 | 77 | 48.116883 | 125 |
kokkondaspandana/sample_project | 4,131,758,581,242 | 24d1912fbe4894ade24c48eac552c24afe98f76b | 99567393ed78b97dc14f29e947b8a2d92495a1a6 | /Hackerrank/collections/orded_dic.py | 2bddc3656301cb6089ea1fab2d3a6fe3b41ab12e | []
| no_license | https://github.com/kokkondaspandana/sample_project | 9b926bc99db89d4aa09478d01752292a0a19fba5 | 0ad29a7971ff03c2a7e715c0516108731648b6bf | refs/heads/master | 2021-01-22T17:58:14.773114 | 2017-03-24T13:04:50 | 2017-03-24T13:04:50 | 85,049,019 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import OrderedDict
d = OrderedDict()
for i in range(int(raw_input())):
item, space, quantity = raw_input().rpartition(' ')
d[item] = d.get(item, 0) + int(quantity)
for item, quantity in d.items():
print item, quantity
Sample Input
9
BANANA FRIES 12
POTATO CHIPS 30
APPLE JUICE 10
CANDY 5
APPLE JUICE 10
CANDY 5
CANDY 5
CANDY 5
POTATO CHIPS 30
Sample Output
BANANA FRIES 12
POTATO CHIPS 60
APPLE JUICE 20
CANDY 20
| UTF-8 | Python | false | false | 447 | py | 50 | orded_dic.py | 50 | 0.720358 | 0.666667 | 0 | 27 | 15.518519 | 55 |
anuragkumarbioinfo/MaeParser | 3,925,600,114,562 | 8323de8e1e3089eb7047903ae20edcda003bd276 | ba4ace278e839500fc7a5abbb57b9fe96d49391c | /dev3.py | e9b0cab2b244ab89dce3f6820b17079bcac295e8 | []
| no_license | https://github.com/anuragkumarbioinfo/MaeParser | 4027457a0d855d3118859ec9905f9f1c670cd9c0 | e2a8843217eba0aca6dbd25c0da7d372a669efb7 | refs/heads/master | 2020-03-22T08:45:21.751163 | 2018-07-05T03:06:25 | 2018-07-05T03:06:25 | 139,788,577 | 0 | 0 | null | true | 2018-07-05T03:07:51 | 2018-07-05T03:07:51 | 2018-07-05T03:06:27 | 2018-07-05T03:06:25 | 0 | 0 | 0 | 0 | null | false | null | import sys
mae=sys.argv[1]
atom=sys.argv[2]
from StringIO import StringIO
import csv
class MAEPARSER:
def __init__(self, mae):
self.mae = mae
self.read = ['atom_index','i_m_mmod_type', 'r_m_x_coord', 'r_m_y_coord', 'r_m_z_coord', 'i_m_residue_number', 's_m_mmod_res', 's_m_chain_name', 'i_m_color', 'r_m_charge1',
'r_m_charge2', 's_m_pdb_residue_name', 's_m_pdb_atom_name', 'i_m_atomic_number', 'i_m_formal_charge', 'i_m_representation', 'i_m_visibility', 's_m_color_rgb',
's_m_atom_name', 'i_m_secondary_structure', 's_m_label_format', 'i_m_label_color', 's_m_label_user_text', 'r_m_pdb_occupancy', 'i_i_constraint', 'i_i_internal_atom_index',
'i_m_pdb_convert_problem', 'i_pdb_PDB_serial', 'i_ppw_het', 's_ppw_CCD_assignment_status', 'r_m_pdb_tfactor', 'i_m_minimize_atom_index', 'i_pa_atomindex',
'i_pdb_seqres_index', 's_pa_state', 'i_ppw_water', 'x1', 'x2', 'x3', 'x4', 'x5']
self.read2 = {i:n for n,i in enumerate(self.read)}
self.prop = {}
self.atomvaluesTAB = {}
self.atomvalues = {}
self.res2atm = {}
self.start()
def start(self):
read = False
with open(self.mae) as filemae:
counter = 0
for lines in filemae:
if ":::" in lines:
counter += 1
if counter == 5:
read = True
if counter == 6:
read = False
if read and ":::" not in lines:
if len(lines) > 4:
self.load(lines)
def add(self, key, value):
name = self.read[key]
if name not in self.prop:self.prop[name] = []
self.prop[name].append(value)
def load(self, line):
data = StringIO(line)
reader = csv.reader(data, delimiter=' ')
line = list(reader)[0][2:]
#print line
#print len(line), len(self.read), line
#raise SystemExit
for n, i in enumerate(self.read):
if n >= len(line):
x = "@"
else:
x = line[n]
#print self.read[n], i
self.add(n,x)
def getkey(self,key):
name = self.read[key]
return self.prop[name]
def getnkey(self,key, n):
name = self.read[key]
return self.prop[name][n+1]
def getAll(self):
#print self.read[1], self.prop.keys()
atoms = len(self.prop[self.read[1]])
for each in range(atoms):
#print [len(self.prop[r]) for r in self.read]
xline = "\t".join([self.prop[r][each] for r in self.read])
self.atomvaluesTAB[self.prop['atom_index'][each]] = xline
self.atomvalues[self.prop['atom_index'][each]] = xline.split("\t")
#print self.prop['atom_index'][each]
#return self.atomvalues
def runAll(self):
if self.atomvalues == {}:
self.getAll()
if self.atomvalues == {}:
raise ValueError("No Values found in MAE")
def printAtom(self,atomnum):
self.getAll()
w = self.atomvalues[atomnum]
for i in range(len(w)):
print self.read[i], "=>" ,w[i]
def setres2atm(self):
self.runAll()
#load residue to atom number:
for atom in self.atomvalues:
name = self.atomvalues[atom]
resid = name[self.read2['i_m_residue_number']].strip()
elem = name[self.read2['s_m_pdb_atom_name']].strip()
if resid not in self.res2atm: self.res2atm[resid] = {}
self.res2atm[resid][elem] = name[self.read2['atom_index']].strip()
def search(self, resid, elem):
resid = str(resid)
if self.res2atm == {}: self.setres2atm()
if resid in self.res2atm:
if elem in self.res2atm[resid]:
return self.res2atm[resid][elem]
else:
print "Not Found"
return self.res2atm[resid]
def qsite(self):
self.result = []
tosearch = [[114,"N"],
[113,"C"],
[114,"C"],
[115,"N"],
[144,"N"],
[143,"C"],
[144,"C"],
[145,"N"],
[147,"N"],
[146,"C"],
[147,"C"],
[148,"N"],
[209,"N"],
[208,"C"],
[209,"C"],
[210,"N"],
[243,"N"],
[242,"C"],
[243,"C"],
[244,"N"],
[246,"N"],
[245,"C"],
[246,"C"],
[247,"N"]]
for resid, elem in tosearch:
self.result.append(self.search(resid,elem))
return "\n".join(["qsitehcap {} {}".format(self.result[i],self.result[i+1]) for i in range(0, len(self.result), 2)])
a = MAEPARSER(mae)
a.printAtom(atom)
#print a.qsite()
#print "\nFrozen Atoms\n" + "#" *25 + """
#not ((res.num 114,144,147,209,243,246) OR ((res.ptype " FE ") OR (res.ptype "HOH ") OR (res.ptype "UNK ")) )
#""" + "#" *25
| UTF-8 | Python | false | false | 5,337 | py | 1 | dev3.py | 1 | 0.465805 | 0.441259 | 0 | 154 | 33.649351 | 192 |
intelivix/pyne-workshop-scraping-web | 2,078,764,189,936 | d15064bee9f2c42818f92ccddef1f007740bb25a | e21c2049e8a0d1ed34eb0850be4dd5d50759b15e | /nordestao/apps/campeonatos/admin.py | 55d5b05f74a006347d2bec3ea4bc229cc6406196 | [
"MIT"
]
| permissive | https://github.com/intelivix/pyne-workshop-scraping-web | 26a8f399307a746fda82ed3494d7a6720b950176 | c0696b669934eef2dbda81da3b7c058810041fa5 | refs/heads/master | 2020-04-28T12:26:11.113112 | 2019-05-10T23:18:28 | 2019-05-10T23:18:28 | 175,276,006 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from campeonatos.models import Team
from campeonatos.models import Championship
from campeonatos.models import Game
from campeonatos.models import Player
from campeonatos.models import Event
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
pass
@admin.register(Championship)
class ChampionshipAdmin(admin.ModelAdmin):
pass
@admin.register(Game)
class GameAdmin(admin.ModelAdmin):
pass
@admin.register(Player)
class PlayerAdmin(admin.ModelAdmin):
pass
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
pass
| UTF-8 | Python | false | false | 586 | py | 39 | admin.py | 14 | 0.795222 | 0.795222 | 0 | 31 | 17.903226 | 43 |
chaeonee/Programmers | 137,438,960,351 | 570e271be05d9f629d1115750d222812cdc38722 | b204cddc90c19ad8d4587581b4d7ec0b0fed0f45 | /level4/[카카오인턴]동굴탐험.py | d556c233c9e8c1057f306dd6182fb33053fe7eff | []
| no_license | https://github.com/chaeonee/Programmers | db741c7c17b933ff2a42521d5bc1077532375021 | f582ca16ec351f1f4678847949cb66e7544b9162 | refs/heads/main | 2023-04-20T13:50:46.722283 | 2021-05-06T09:44:16 | 2021-05-06T09:44:16 | 348,239,956 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
def solution(n, path_list, order_list):
path = {}
for s, e in path_list:
if s not in path.keys():
path[s] = [e]
else:
path[s].append(e)
if e not in path.keys():
path[e] = [s]
else:
path[e].append(s)
order = {}
r_order = {}
for s, e in order_list:
order[s] = e
r_order[e] = s
visit = [False]*n
visit[0] = True
q = deque()
q.append(0)
while q:
n -= 1
room = q.popleft()
if room not in path.keys():
continue
for r in path[room]:
if visit[r]:
continue
visit[r] = True
if r not in r_order.keys() or visit[r_order[r]]:
q.append(r)
if r in order.keys() and visit[order[r]]:
q.append(order[r])
answer = True if not n else False
return answer
| UTF-8 | Python | false | false | 1,043 | py | 184 | [카카오인턴]동굴탐험.py | 184 | 0.41419 | 0.411314 | 0 | 46 | 21.673913 | 60 |
bweems23/airqo-monitoring | 3,959,959,882,342 | dfaeaa526aaa10b0e6f8a43db0b948075e2118f7 | a763161a33def4a2024182da6361416338f64610 | /airqo_monitor/external/thingspeak.py | 67eb92adb0310d41ee0d85a83350edf0bc85c4d1 | []
| no_license | https://github.com/bweems23/airqo-monitoring | cdab9ab9c498ef70e805b5880ecc402c7eedc8f0 | e18ab3a96cfb201174aba600434f21d5071e2504 | refs/heads/master | 2022-12-12T21:10:31.241135 | 2019-02-05T01:45:50 | 2019-02-05T01:45:50 | 154,288,408 | 0 | 1 | null | false | 2021-06-10T20:56:00 | 2018-10-23T08:09:36 | 2019-02-05T01:45:53 | 2021-06-10T20:55:58 | 279 | 0 | 1 | 4 | Python | false | false | import json, requests
import os
from collections import defaultdict
from datetime import datetime, timedelta
from werkzeug.contrib.cache import SimpleCache
from airqo_monitor.constants import (
AIR_QUALITY_MONITOR_KEYWORD,
API_KEY_CONFIG_VAR_NAME,
DEFAULT_THINGSPEAK_FEEDS_INTERVAL_DAYS,
INACTIVE_MONITOR_KEYWORD,
THINGSPEAK_FEEDS_LIST_MAX_NUM_RESULTS,
THINGSPEAK_CHANNELS_LIST_URL,
THINGSPEAK_FEEDS_LIST_URL,
)
cache = SimpleCache()
def get_api_key_for_channel(channel_id):
"""
Get API key for channel from environment variables. They are stored as
'CHANNEL_<Thingspeak Channel ID>_API_KEY'.
Returns: string API key for channel
"""
var_name = API_KEY_CONFIG_VAR_NAME.format(str(channel_id))
api_key = os.environ.get(var_name)
return api_key
def get_data_for_channel(channel, start_time=None, end_time=None):
"""
Get all channel data for a single channel between start_time and end_time. By default,
the window goes back 1 week.
This API returns a maximum of 8000 results, so we keep requesting data until we have fewer
that 8000 results returned, or until we get a -1 response from the API (which means that there are no more results)
Returns: A list of data point dicts, from oldest to newest
"""
if not start_time:
start_time = datetime.now() - timedelta(days=DEFAULT_THINGSPEAK_FEEDS_INTERVAL_DAYS)
if not end_time:
end_time = datetime.now()
# convert to string before the loop because this never changes
start_time_string = datetime.strftime(start_time,'%Y-%m-%dT%H:%M:%SZ')
api_url = THINGSPEAK_FEEDS_LIST_URL.format(channel)
all_data = []
while start_time <= end_time:
full_url = '{}/feeds/?start={}&end={}'.format(
api_url,
start_time_string,
datetime.strftime(end_time,'%Y-%m-%dT%H:%M:%SZ'),
)
api_key = get_api_key_for_channel(channel)
if api_key:
full_url += '&api_key={}'.format(api_key)
result = make_post_call(full_url)
# This means we got an empty result set and are done
if result == -1:
break
feeds = result['feeds']
all_data = feeds + all_data
# If we aren't hitting the max number of results then we
# have all of them for the time range and can stop iterating
if len(feeds) < THINGSPEAK_FEEDS_LIST_MAX_NUM_RESULTS:
break
first_result = feeds[0]
end_time = datetime.strptime(first_result['created_at'],'%Y-%m-%dT%H:%M:%SZ') - timedelta(seconds=1)
return all_data
def get_all_channels():
"""
Get all channels from Thingspeak that are associated with our THINGSPEAK_USER_API_KEY
Returns: List of channel data dicts
"""
api_key = os.environ.get('THINGSPEAK_USER_API_KEY')
full_url = '{}/?api_key={}'.format(THINGSPEAK_CHANNELS_LIST_URL, api_key)
channels = make_get_call(full_url)
return channels
def get_all_channels_cached():
"""
Wrapper around get_all_channels to allow caching. This data shouldn't change often.
Returns: List of channel data dicts
"""
cached_value = cache.get('get-all-channels')
if cached_value is None:
cached_value = get_all_channels()
cache.set('get-all-channels', cached_value, timeout=30 * 60)
return cached_value
def get_all_channels_by_type(channel_type):
"""
Get all channels from Thingspeak that are associated with our THINGSPEAK_USER_API_KEY and
that have a tag that matches the channel_type param (current types are 'airqo' or 'soil')
Returns: List of channel data dicts
"""
api_key = os.environ.get('THINGSPEAK_USER_API_KEY')
full_url = '{}/?api_key={}&tag={}'.format(THINGSPEAK_CHANNELS_LIST_URL, api_key, channel_type)
channels = make_get_call(full_url)
# For some reason the API returns a list on success and a dict when there's an error
status = channels.get('status') if isinstance(channels, dict) else None
if status and status != '200':
print('[get_all_channels_by_type] Problem reaching Thingspeak API with status: {}'.format(status))
return channels
def make_post_call(url):
"""
Make a post call to any URL and parse the json
Returns: Parsed json response (can be dict or list depending on the expected API response)
"""
return json.loads(requests.post(url).content)
def make_get_call(url):
"""
Make a get call to any URL and parse the json
Returns: Parsed json response (can be dict or list depending on the expected API response)
"""
return json.loads(requests.get(url).content)
| UTF-8 | Python | true | false | 4,668 | py | 47 | thingspeak.py | 37 | 0.664953 | 0.660668 | 0 | 142 | 31.873239 | 119 |
silvarogerioeduardo/PIM | 17,695,265,281,876 | 9c02a7720d601373e6ea703ebec29c2c5a794a93 | 74d7f58ff079daa7cc069afa0ae90b9b577cd9ce | /Python/RGB_grayscale.py | 0ed8e55d21ebbe91212c248ed8820f4f2565068c | []
| no_license | https://github.com/silvarogerioeduardo/PIM | c9b04dde3bcd8bdf375552d8113099ee201af2f4 | 4a63f80cf84e629de17823cea4cf354fd0f30a1c | refs/heads/master | 2021-01-01T20:44:10.031642 | 2017-10-31T02:02:04 | 2017-10-31T02:02:04 | 98,922,340 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from PIL import Image
path = "Imagens/"
img = Image.open(path+'nature.jpg').convert('L')
img.save(path+'nature-PB.jpg') | UTF-8 | Python | false | false | 119 | py | 24 | RGB_grayscale.py | 24 | 0.705882 | 0.705882 | 0 | 4 | 29 | 48 |
TissueMAPS/gc3pie | 15,582,141,368,358 | 1fdf1c79f9569ed1a4dd371ef4835f76b1a000f2 | 7de47aee3c33562dbc2154bd71b7ca37ea68ffd0 | /gc3apps/lacal.epfl.ch/gcrypto.py | 849719327d59da8617cd2489c744dff1ebad3ac8 | []
| no_license | https://github.com/TissueMAPS/gc3pie | c20af8080076a4200856ccd43281353cfd612b34 | 8d2bc69aa8f2b2dd8e4dc0b306cf484551a20caf | refs/heads/master | 2021-01-17T09:04:03.118013 | 2017-04-04T17:35:14 | 2017-04-04T17:35:14 | 59,831,862 | 1 | 2 | null | true | 2016-05-27T12:09:50 | 2016-05-27T12:09:50 | 2016-04-30T23:23:14 | 2016-05-27T11:20:35 | 161,424 | 0 | 0 | 0 | null | null | null | #! /usr/bin/env python
#
# gcrypto.py -- Front-end script for submitting multiple Crypto jobs to SMSCG.
"""
Front-end script for submitting multiple ``gnfs-cmd`` jobs to SMSCG.
It uses the generic `gc3libs.cmdline.SessionBasedScript` framework.
See the output of ``gcrypto --help`` for program usage instructions.
"""
# summary of user-visible changes
__changelog__ = """
2012-01-29:
* Moved CryptoApplication from gc3libs.application
* Restructured main script due to excessive size of initial
jobs. SessionBaseScript generate a single SequentialTask.
SequentialTask generated as many ParallelTasks as the whole range divided
by the number of simultaneous active jobs.
* Each ParallelTask lauches 'max_running' CryptoApplications
"""
__author__ = 'sergio.maffiolett@gc3.uzh.ch'
__docformat__ = 'reStructuredText'
# run script, but allow GC3Pie persistence module to access classes defined here;
# for details, see: https://github.com/uzh/gc3pie/issues/95
if __name__ == "__main__":
import gcrypto
gcrypto.GCryptoScript().run()
# stdlib imports
import fnmatch
import logging
import os
import os.path
import sys
from pkg_resources import Requirement, resource_filename
# GC3Pie interface
import gc3libs
from gc3libs.cmdline import SessionBasedScript, existing_file, positive_int, nonnegative_int
from gc3libs import Application, Run, Task
import gc3libs.exceptions
import gc3libs.application
from gc3libs.quantity import Memory, kB, MB, GB, Duration, hours, minutes, seconds
from gc3libs.workflow import SequentialTaskCollection, ParallelTaskCollection, ChunkedParameterSweep, RetryableTask
DEFAULT_INPUTFILE_LOCATION="srm://dpm.lhep.unibe.ch/dpm/lhep.unibe.ch/home/crypto/lacal_input_files.tgz"
DEFAULT_GNFS_LOCATION="srm://dpm.lhep.unibe.ch/dpm/lhep.unibe.ch/home/crypto/gnfs-cmd_20120406"
class CryptoApplication(gc3libs.Application):
"""
Represent a ``gnfs-cmd`` job that examines the range `start` to `start+extent`.
LACAL's ``gnfs-cmd`` invocation::
$ gnfs-cmd begin length nth
performs computations for a range: *begin* to *begin+length*,
and *nth* is the number of threads spwaned.
The following ranges are of interest: 800M-1200M and 2100M-2400M.
CryptoApplication(param, step, input_files_archive, output_folder, **extra_args)
"""
def __init__(self, start, extent, gnfs_location, input_files_archive, output, **extra_args):
gnfs_executable_name = os.path.basename(gnfs_location)
# # set some execution defaults...
extra_args.setdefault('requested_cores', 4)
extra_args.setdefault('requested_architecture', Run.Arch.X86_64)
extra_args['jobname'] = "LACAL_%s" % str(start + extent)
extra_args['output_dir'] = os.path.join(extra_args['output_dir'], str(start + extent))
extra_args['tags'] = [ 'APPS/CRYPTO/LACAL-1.0' ]
extra_args['executables'] = ['./gnfs-cmd']
extra_args['requested_memory'] = Memory(
int(extra_args['requested_memory'].amount() / float(extra_args['requested_cores'])),
unit=extra_args['requested_memory'].unit)
gc3libs.Application.__init__(
self,
arguments = [ "./gnfs-cmd", start, extent, extra_args['requested_cores'], "input.tgz" ],
inputs = {
input_files_archive:"input.tgz",
gnfs_location:"./gnfs-cmd",
},
outputs = [ '@output.list' ],
# outputs = gc3libs.ANY_OUTPUT,
stdout = 'gcrypto.log',
join=True,
**extra_args
)
def terminated(self):
"""
Checks whether the ``M*.gz`` files have been created.
The exit status of the whole job is set to one of these values:
* 0 -- all files processed successfully
* 1 -- some files were *not* processed successfully
* 2 -- no files processed successfully
* 127 -- the ``gnfs-cmd`` application did not run at all.
"""
# XXX: need to gather more info on how to post-process.
# for the moment do nothing and report job's exit status
if self.execution.exitcode:
gc3libs.log.debug(
'Application terminated. postprocessing with execution.exicode %d',
self.execution.exitcode)
else:
gc3libs.log.debug(
'Application terminated. No exitcode available')
if self.execution.signal == 123:
# XXX: this is fragile as it does not really applies to all
# DataStaging errors.
# Assume Data staging problem at the beginning of the job
# resubmit
self.execution.returncode = (0, 99)
class CryptoTask(RetryableTask):
"""
Run ``gnfs-cmd`` on a given range
"""
def __init__(self, start, extent, gnfs_location, input_files_archive, output, **extra_args):
RetryableTask.__init__(
self,
# actual computational job
CryptoApplication(start, extent, gnfs_location, input_files_archive, output, **extra_args),
# XXX: should decide which policy to use here for max_retries
max_retries = 2,
# keyword arguments
**extra_args)
def retry(self):
"""
Resubmit a cryto application instance iff it exited with code 99.
*Note:* There is currently no upper limit on the number of
resubmissions!
"""
if self.task.execution.exitcode == 99:
return True
else:
return False
class CryptoChunkedParameterSweep(ChunkedParameterSweep):
"""
Provided the beginning of the range `range_start`, the end of the
range `range_end`, the slice size of each job `slice`,
`CryptoChunkedParameterSweep` creates `chunk_size`
`CryptoApplication`s to be executed in parallel.
Every update cycle it will check how many new CryptoApplication
will have to be created (each of the launching in parallel
DEFAULT_PARALLEL_RANGE_INCREMENT CryptoApplications) as the
following rule: [ (end-range - begin_range) / step ] /
DEFAULT_PARALLEL_RANGE_INCREMENT
"""
def __init__(self, range_start, range_end, slice, chunk_size,
input_files_archive, gnfs_location, output_folder, **extra_args):
# remember for later
self.range_start = range_start
self.range_end = range_end
self.parameter_count_increment = slice * chunk_size
self.input_files_archive = input_files_archive
self.gnfs_location = gnfs_location
self.output_folder = output_folder
self.extra_args = extra_args
ChunkedParameterSweep.__init__(
self, range_start, range_end, slice, chunk_size, **self.extra_args)
def new_task(self, param, **extra_args):
"""
Create a new `CryptoApplication` for computing the range
`param` to `param+self.parameter_count_increment`.
"""
return CryptoTask(param, self.step, self.gnfs_location, self.input_files_archive, self.output_folder, **self.extra_args.copy())
## the script itself
class GCryptoScript(SessionBasedScript):
# this will be display as the scripts' `--help` text
"""
Like a `for`-loop, the ``gcrypto`` driver script takes as input
three mandatory arguments:
1. RANGE_START: initial value of the range (e.g., 800000000)
2. RANGE_END: final value of the range (e.g., 1200000000)
3. SLICE: extent of the range that will be examined by a single job (e.g., 1000)
For example::
gcrypto 800000000 1200000000 1000
will produce 400000 jobs; the first job will perform calculations
on the range 800000000 to 800000000+1000, the 2nd one will do the
range 800001000 to 800002000, and so on.
Inputfile archive location (e.g. lfc://lfc.smscg.ch/crypto/lacal/input.tgz)
can be specified with the '-i' option. Otherwise a default filename
'input.tgz' will be searched in current directory.
Job progress is monitored and, when a job is done,
output is retrieved back to submitting host in folders named:
'range_start + (slice * actual step)'
The `gcrypto` command keeps a record of jobs (submitted, executed and
pending) in a session file (set name with the '-s' option); at each
invocation of the command, the status of all recorded jobs is updated,
output from finished jobs is collected, and a summary table of all
known jobs is printed. New jobs are added to the session if new input
files are added to the command line.
Options can specify a maximum number of jobs that should be in
'SUBMITTED' or 'RUNNING' state; `gcrypto` will delay submission
of newly-created jobs so that this limit is never exceeded.
"""
def __init__(self):
SessionBasedScript.__init__(
self,
version = __version__, # module version == script version
stats_only_for = CryptoApplication,
)
def setup_args(self):
"""
Set up command-line argument parsing.
The default command line parsing considers every argument as
an (input) path name; processing of the given path names is
done in `parse_args`:meth:
"""
# self.add_param('args',
# nargs='*',
# metavar=
# """
# [range_start] [range_end] [slice],
# help=[range_start]: Positive integer value of the range start.
# [range_end]: Positive integer value of the range end.
# [slice]: Positive integer value of the increment.
# """
# )
self.add_param('range_start', type=nonnegative_int,
help="Non-negative integer value of the range start.")
self.add_param('range_end', type=positive_int,
help="Positive integer value of the range end.")
self.add_param('slice', type=positive_int,
help="Positive integer value of the increment.")
def parse_args(self):
# XXX: why is this necessary ? shouldn't add_params of 'args' handle this ?
# check on the use of nargs and type.
# if len(self.params.args) != 3:
# raise ValueError("gcrypto takes exaclty 3 arguments (%d are given)" % len(self.params.args))
# self.params.range_start = int(self.params.args[0])
# self.params.range_end = int(self.params.args[1])
# self.params.slice = int(self.params.args[2])
if self.params.range_end <= self.params.range_start:
# Failed
raise ValueError("End range cannot be smaller than Start range. Start range %d. End range %d" % (self.params.range_start, self.params.range_end))
def setup_options(self):
self.add_param("-i", "--input-files", metavar="PATH",
action="store", dest="input_files_archive",
default=DEFAULT_INPUTFILE_LOCATION,
help="Path to the input files archive."
" By default, the preloaded input archive available on"
" SMSCG Storage Element will be used: "
" %s" % DEFAULT_INPUTFILE_LOCATION)
self.add_param("-g", "--gnfs-cmd", metavar="PATH",
action="store", dest="gnfs_location",
default=DEFAULT_GNFS_LOCATION,
help="Path to the executable script (gnfs-cmd)"
" By default, the preloaded gnfs-cmd available on"
" SMSCG Storage Element will be used: "
" %s" % DEFAULT_GNFS_LOCATION)
def new_tasks(self, extra):
yield (
"%s-%s" % (str(self.params.range_start),str(self.params.range_end)), # jobname
CryptoChunkedParameterSweep,
[ # parameters passed to the constructor, see `CryptoSequence.__init__`
self.params.range_start,
self.params.range_end,
self.params.slice,
self.params.max_running, # increment of each ParallelTask
self.params.input_files_archive, # path to input.tgz
self.params.gnfs_location, # path to gnfs-cmd
self.params.output, # output folder
],
extra.copy()
)
def before_main_loop(self):
"""
Ensure each instance of `ChunkedParameterSweep` has
`chunk_size` set to the maximum allowed number of jobs.
"""
for task in self.session:
assert isinstance(task, CryptoChunkedParameterSweep)
task.chunk_size = self.params.max_running
| UTF-8 | Python | false | false | 12,748 | py | 274 | gcrypto.py | 242 | 0.62802 | 0.614214 | 0 | 328 | 37.865854 | 157 |
ashutoshvt/autogen | 4,982,162,071,799 | dc6b61c126c97730efa443de02c69ef33c6ae2f8 | 3e5f47d87d4baa4eaeec588abbcb35f3db5e9761 | /backup/special_conditions.py | 59abfa128e63c275d46ed1a32eab53f7eb2c9c80 | []
| no_license | https://github.com/ashutoshvt/autogen | 187f8fe416344c9dcfc6bcb214153129ee4b4bf2 | 47ff3010ead822e207f61b28382d72d5b3149808 | refs/heads/master | 2023-03-19T16:39:55.188003 | 2020-05-28T20:59:52 | 2020-05-28T20:59:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def startequiv_cond(list_terms):
for term in list_terms:
print term.map_org
d1=0
t1=0
d2=0
t2=0
#check if there are equivalent operators
for op in term.large_op_list:
if op.name[0]=='T' and op.name[1]=='1':
t1+=1
if op.name[0]=='D' and op.name[1]=='1':
d1+=1
if op.name[0]=='T' and op.name[1]=='2':
t2+=1
if op.name[0]=='D' and op.name[1]=='2':
d2+=1
if t2>1:
equivop='T2'
first=0
second=0
for op in term.map_org:
if equivop in op.name and 'Z' in op.name:
first=1
elif equivop in op.name:
second=1
#atleast 2 equivalent operators present with one of them as the first contraction.
#If one of them has no connections with V, multiply with two.
if first==1 and second==1:
map_org=[]
mapping=[]
for item in term.large_op_list:
if equivop in item.name:
map_org.append(item)
if 'V2' in item.name or 'F1' in item.name:
ind=term.large_op_list.index(item)
for item in term.coeff_list[ind]:
mapping.append(term.dict_ind[item])
for item in map_org:
if item.name not in mapping:
term.fac=term.fac*2.0
if d1>1:
print 'inside the special conditions'
equivop='D1'
first=0
second=0
for op in term.map_org:
if equivop in op.name and 'Z' in op.name:
first=1
elif equivop in op.name:
second=1
#atleast 2 equivalent operators present with one of them as the first contraction.
#If one of them has no connections with V, multiply with two.
if first==1 and second==1:
map_org=[]
mapping=[]
for item in term.large_op_list:
if equivop in item.name:
map_org.append(item)
if 'V2' in item.name or 'F1' in item.name:
ind=term.large_op_list.index(item)
for item in term.coeff_list[ind]:
mapping.append(term.dict_ind[item])
for item in map_org:
if item.name not in mapping:
term.fac=term.fac*2.0
if d2>1:
equivop='D2'
first=0
second=0
for op in term.map_org:
if equivop in op.name and 'Z' in op.name:
first=1
elif equivop in op.name:
second=1
#atleast 2 equivalent operators present with one of them as the first contraction.
#If one of them has no connections with V, multiply with two.
if first==1 and second==1:
map_org=[]
mapping=[]
for item in term.large_op_list:
if equivop in item.name:
map_org.append(item)
if 'V2' in item.name or 'F1' in item.name:
ind=term.large_op_list.index(item)
for item in term.coeff_list[ind]:
mapping.append(term.dict_ind[item])
for item in map_org:
if item.name not in mapping:
term.fac=term.fac*2.0
if t1>1:
equivop='T1'
first=0
second=0
for op in term.map_org:
if equivop in op.name and 'Z' in op.name:
first=1
elif equivop in op.name:
second=1
#atleast 2 equivalent operators present with one of them as the first contraction.
#If one of them has no connections with V, multiply with two.
if first==1 and second==1:
map_org=[]
mapping=[]
for item in term.large_op_list:
if equivop in item.name:
map_org.append(item)
if 'V2' in item.name or 'F1' in item.name:
ind=term.large_op_list.index(item)
for item in term.coeff_list[ind]:
mapping.append(term.dict_ind[item])
for item in map_org:
if item.name not in mapping:
term.fac=term.fac*2.0
return list_terms
| UTF-8 | Python | false | false | 4,738 | py | 58 | special_conditions.py | 46 | 0.454411 | 0.436682 | 0 | 116 | 39.844828 | 94 |
pphowakande/bakround-applicant | 13,855,564,548,097 | 83815b9ad628da42f19616bd88c9cacf26af7663 | 964d79bf9b2ab5b5389514f8cd730f1fefe1ffc8 | /bakround_applicant/forms.py | 8a2b539f4eff0ac4630241f19e50f4c2ad495c45 | []
| no_license | https://github.com/pphowakande/bakround-applicant | d216368231d3a998ba12a3c4210d5508e3eb9beb | 6cf5081fe4fd7b4ee7a9b458043ad2513a90560e | refs/heads/master | 2022-01-18T23:03:37.240329 | 2020-02-13T18:24:05 | 2020-02-13T18:24:05 | 240,319,316 | 0 | 0 | null | false | 2022-01-05T08:14:38 | 2020-02-13T17:23:57 | 2020-02-13T18:25:02 | 2022-01-05T08:14:35 | 58,233 | 0 | 0 | 22 | JavaScript | false | false | __author__ = "tplick"
__date__ = "December 22, 2016"
from collections import OrderedDict, defaultdict
from django.forms import Form, FileField, ValidationError, ModelChoiceField, CharField, FileField
from django.forms.fields import ChoiceField
from django.db.models import Q
from django.template.defaultfilters import filesizeformat
from django.conf import settings
from allauth.account.forms import SignupForm
from allauth.socialaccount.forms import SignupForm as SocialSignupForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit
from .all_models.db import LookupState, LookupIndustry, Job, JobFamily
from .utilities.functions import make_job_structure_for_dropdown, make_choice_set_for_state_codes
class JobModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.job_name
class StateChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.state_code
def make_state_choice_field(required):
return StateChoiceField(queryset=LookupState.objects.all().order_by('state_code'),
required=required)
class RestrictedFileField(FileField):
def __init__(self, *args, **kwargs):
content_types = kwargs.pop('content_types', None)
if content_types is not None:
self.content_types = content_types
self.max_upload_size = kwargs.pop('max_upload_size', None)
if not self.max_upload_size:
self.max_upload_size = settings.MAX_UPLOAD_SIZE
super().__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
try:
if data.content_type in self.content_types:
if data.size > self.max_upload_size:
raise ValidationError('File size must be under {}. Current file size is {}.'.format(filesizeformat(self.max_upload_size), filesizeformat(data.size)))
else:
raise ValidationError('File type is not supported.')
except AttributeError:
pass
return data
# # http://stackoverflow.com/questions/12303478/how-to-customize-user-profile-when-using-django-allauth
class BakroundSignupForm(SignupForm):
primary_occupation = ChoiceField([])
password2 = None
city = CharField(label='City', required=False)
state = ChoiceField([], required=False)
first_name = CharField(label='First Name', required=False)
last_name = CharField(label='Last Name', required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = make_form_helper()
fields = self.fields
job_structure = make_job_structure_for_dropdown(True)
fields['primary_occupation'] = ChoiceField(change_unselected_display(job_structure, 'Primary Occupation'))
fields['state'].choices = make_choice_set_for_state_codes('State')
set_placeholder(fields, 'first_name', 'First Name')
set_placeholder(fields, 'last_name', 'Last Name')
set_placeholder(fields, 'city', 'City')
set_placeholder(fields, 'email', 'Email Address')
set_placeholder(fields, 'password1', 'Set a Password')
# def signup(self, request, user):
#
# super().signup(request, user)
#
# user.save()
def set_placeholder(fields, key, value):
fields[key].widget.attrs['placeholder'] = value
def change_unselected_display(structure, value):
structure = list(structure) # make a copy
structure[0] = ('', [('', value)])
return structure
def make_form_helper():
helper = FormHelper()
helper.layout = Layout(
Fieldset(
'first arg is the legend of the fieldset',
'email',
'first_name',
'last_name',
'city',
'state',
'password1',
'occupation'
),
)
return helper
class BakroundSocialSignupForm(SocialSignupForm):
primary_occupation = ChoiceField([])
city = CharField(label='City', required=False)
state = ChoiceField([], required=False)
first_name = CharField(label='First Name', required=False)
last_name = CharField(label='Last Name', required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = make_social_form_helper()
fields = self.fields
job_structure = make_job_structure_for_dropdown(True)
fields['primary_occupation'] = ChoiceField(change_unselected_display(job_structure, 'Primary Occupation'))
fields['state'].choices = make_choice_set_for_state_codes('State')
set_placeholder(fields, 'first_name', 'First Name')
set_placeholder(fields, 'last_name', 'Last Name')
set_placeholder(fields, 'city', 'City')
# def signup(self, request, user):
# super().signup(request, user)
# user.save()
def make_social_form_helper():
helper = FormHelper()
helper.layout = Layout(
Fieldset(
'first arg is the legend of the fieldset',
'email',
'firstname',
'lastname',
'city',
'state',
'occupation'
),
)
return helper
def make_employer_form_helper():
helper = FormHelper()
helper.layout = Layout(
Fieldset(
'first arg is the legend of the fieldset',
'email',
'firstname',
'lastname',
'city',
'state',
'password1'
'company',
'phone'
),
)
return helper
class JobFamilyChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.family_name
class IndustryChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.industry_name
class EmployerSignupForm(BakroundSignupForm):
industry = IndustryChoiceField(queryset=LookupIndustry.objects.order_by('id'),
required=True)
company = CharField(max_length=100, required=True)
phone = CharField(max_length=100, required=False)
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.helper = make_employer_form_helper()
# self.fields.pop('primary_occupation')
# self.fields['first_name'].required = True
# self.fields['last_name'].required = True
# self.fields['city'].required = True
# self.fields['state'].required = True
#
# original_fields = self.fields
# new_order = OrderedDict()
# for key in ['email', 'password1', 'first_name', 'last_name', 'company', 'city', 'state', 'phone']:
# new_order[key] = original_fields[key]
# self.fields = new_order
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
fields = self.fields
set_placeholder(fields, 'company', 'Company')
set_placeholder(fields, 'phone', 'Phone Number')
class IndeedTestForm(Form):
file = FileField()
| UTF-8 | Python | false | false | 7,143 | py | 422 | forms.py | 261 | 0.625087 | 0.621448 | 0 | 215 | 32.223256 | 169 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.