repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
szollosova/webstrom | 16,492,674,421,140 | 7a86ebce0cc05d831421c0af1bac7a7ed47cb528 | adb6f583accf1ab7409253210136e0e9a51e2cbf | /user/forms.py | cc94d1b58bdc6b4611515915f093a4bd351c5e7d | []
| no_license | https://github.com/szollosova/webstrom | b06b86a2798705a8f4765e4b583166f552e30387 | 10b5e433b62291b4e3fc1d4169011db9fb2f7f76 | refs/heads/master | 2022-07-16T15:16:51.556206 | 2020-05-17T07:36:32 | 2020-05-17T07:36:32 | 263,088,587 | 0 | 0 | null | true | 2020-05-11T15:52:32 | 2020-05-11T15:52:31 | 2020-05-11T14:47:53 | 2020-05-11T14:47:50 | 2,292 | 0 | 0 | 0 | null | false | false | from django import forms
from django.contrib.auth.password_validation import validate_password
from competition.models import Grade
from user.models import County, District, Profile, User
class UserCreationForm(forms.ModelForm):
class Meta:
model = User
fields = ('email',)
password1 = forms.CharField(label='Heslo', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Potvrdenie hesla', widget=forms.PasswordInput)
def clean_password1(self):
password1 = self.cleaned_data['password1']
validate_password(password1)
return password1
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Heslá sa nezhodujú')
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class ProfileCreationForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['first_name', 'last_name', 'nickname',
'school_not', 'county', 'district', 'school',
'school_name', 'school_not_found', 'school_info',
'grade', 'phone', 'parent_phone', 'gdpr', ]
grade = forms.ModelChoiceField(
queryset=Grade.objects.filter(is_active=True),
label='Ročník',
help_text='V prípade, že je leto, zadaj ročník, '
'ktorý končíš (školský rok začína septembrom).')
school_not = forms.BooleanField(
required=False,
label='Už nie som študent základnej ani strednej školy.')
school_name = forms.CharField(
required=False,
label='Škola*')
school_not_found = forms.BooleanField(
required=False,
label='Moja škola sa v zozname nenachádza.')
school_info = forms.CharField(
required=False,
widget=forms.Textarea,
label='povedz nám, kam chodíš na školu, aby sme ti ju mohli dodatočne pridať')
county = forms.ModelChoiceField(
required=False,
queryset=County.objects,
label='Kraj školy')
district = forms.ModelChoiceField(
required=False,
queryset=District.objects,
label='Okres školy')
def __init__(self, *args, **kwargs):
super(ProfileCreationForm, self).__init__(*args, **kwargs)
self.fields['county'].queryset = County.objects.all_except_unspecified()
self.fields['school'].widget = forms.HiddenInput()
def clean_gdpr(self):
gdpr = self.cleaned_data['gdpr']
if not gdpr:
raise forms.ValidationError(
'Súhlas so spracovaním osobných údajov je nutnou podmienkou registrácie')
return gdpr
def save(self, commit=True):
profile = super(ProfileCreationForm, self).save(commit=False)
profile.year_of_graduation = \
self.cleaned_data['grade'].get_year_of_graduation_by_date()
if commit:
profile.save()
return profile
| UTF-8 | Python | false | false | 3,266 | py | 14 | forms.py | 9 | 0.632508 | 0.626935 | 0 | 99 | 31.626263 | 89 |
MatthewBenjamin/fsnd-blog | 858,993,489,370 | 9fad8a566ce658bea1cf71b77ccc46e6e4800fa7 | 19247cef8d4464a005969239a5510382e06e5487 | /models.py | c4b554e5672c3bd2729f5fea1ed57776ae521fc2 | []
| no_license | https://github.com/MatthewBenjamin/fsnd-blog | b7373a33080efbea407d427620ad8ba230076647 | 8ab0b4ed159f8ca96c6a8c82415b2e339d0ae936 | refs/heads/master | 2020-04-06T06:58:10.158739 | 2016-09-08T19:09:44 | 2016-09-08T19:09:44 | 60,494,946 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # models.py - appengine datastore models
from google.appengine.ext import ndb
from string import letters
import hashlib
import random
def make_salt(length=5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt=None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
class User(ndb.Model):
username = ndb.StringProperty(required=True)
pw_hash = ndb.StringProperty(required=True)
email = ndb.StringProperty()
liked_posts = ndb.KeyProperty(repeated=True)
liked_comments = ndb.KeyProperty(repeated=True)
@classmethod
def register_user(cls, username, password, email):
pw_hash = make_pw_hash(username, password)
return User(username=username,
pw_hash=pw_hash,
email=email)
@classmethod
def user_by_name(cls, name):
user = User.query(User.username == name).get()
return user
@classmethod
def login(cls, name, password):
user = cls.user_by_name(name)
if user and valid_pw(name, password, user.pw_hash):
return user
class Post(ndb.Model):
"""Blog post"""
subject = ndb.StringProperty(required=True)
content = ndb.TextProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
likes = ndb.IntegerProperty(default=0, required=True)
# TODO: tags = repeated string property
@property
def serialize(self):
"""Return object data in an easily serializable format."""
return {
'subject': self.subject,
'content': self.content,
'created:': str(self.created),
'modified': str(self.modified),
'likes': self.likes
}
class Comment(ndb.Model):
content = ndb.TextProperty(required=True)
author = ndb.StringProperty(required=True)
likes = ndb.IntegerProperty(default=0)
| UTF-8 | Python | false | false | 2,139 | py | 14 | models.py | 3 | 0.637214 | 0.633941 | 0 | 75 | 27.52 | 66 |
NGG-kang/PythonPractice | 1,425,929,183,258 | 4137f48c134d48fec09061ba77c6fc3f3e706097 | 5136c61a230de741830c2971c704967360d8911d | /For.py | f38fcc306c25e53b527ecf66145d53be82724e60 | []
| no_license | https://github.com/NGG-kang/PythonPractice | e301af7a5abb7848faab5bdfd15f682dbbf53926 | b91d487ba764f7ed7dc5527871691dc93c2eb0ce | refs/heads/main | 2023-03-02T11:35:41.989193 | 2021-01-29T23:29:32 | 2021-01-29T23:29:32 | 332,351,023 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # for문
for 변수 in 리스트(또는 튜플, 문자열):
# 리스트 넣을 시 변수에는 리스트의 요소가 들어간다
for i in range(5): # 5까지
for i in range(1,5): # 1부터 5까지
for i in range(5,0, -1): # 5부터 0까지 -1씩 까짐
# 리스트 내 포 사용하기
# 이건 좀 기니까 예시로 대체
numbers = [1, 2, 3, 4, 5]
result = []
for n in numbers:
if n % 2 == 1:
result.append(n*2)
# for문을
result = [n*2 for n in numbers if n%2 == 1]
# 로 대체한다
| UTF-8 | Python | false | false | 525 | py | 13 | For.py | 12 | 0.546419 | 0.485411 | 0 | 19 | 18.842105 | 43 |
uwcirg/true_nth_usa_portal | 7,645,041,804,047 | 113228410cbce1e04ec304ffee03a4048f75122e | 152856254c7c236dfba0fd1a5d1e96b0de5e300e | /tests/test_next_step.py | ed2b7d393655bb5c3d097e5bee395a9ce46de867 | [
"BSD-3-Clause"
]
| permissive | https://github.com/uwcirg/true_nth_usa_portal | 7299c592a7e5539d8e052ea09bff16043244fb2e | 34c95244fdc0aa40ff4b133b7af90ef4551c3191 | refs/heads/master | 2021-04-03T01:12:01.841170 | 2021-04-02T02:09:18 | 2021-04-02T02:09:18 | 37,150,258 | 3 | 9 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_webtest import SessionScope
import pytest
from werkzeug.exceptions import BadRequest
from portal.database import db
from portal.models.intervention import Intervention
from portal.models.next_step import NextStep
from tests import TestCase
def test_validate():
assert NextStep.validate('decision_support')
def test_invalid():
with pytest.raises(BadRequest):
NextStep.validate('bogus')
class TestNextStep(TestCase):
def test_decision_support(self):
# w/o strategy setup, will get back indeterminate ds match
test_link = 'http://test.me'
ds_p3p = Intervention.query.filter_by(
name='decision_support_p3p').one()
ds_wc = Intervention.query.filter_by(
name='decision_support_wisercare').one()
for ds in ds_p3p, ds_wc:
ds.link_url = test_link
with SessionScope(db):
db.session.commit()
assert test_link == NextStep.decision_support(self.test_user)
| UTF-8 | Python | false | false | 987 | py | 421 | test_next_step.py | 272 | 0.683891 | 0.680851 | 0 | 34 | 28.029412 | 69 |
ngotest/ebmr_proj | 9,698,036,163,974 | 98d68ac87f3b5851fc35f89cd1a2040f52f66059 | 0119ac95e0e08e946c92e9eb6e828cc412fd6fbc | /music_player/playlist/urls.py | b5c6051ee711fcdaa1a836174fe88069b7e2599d | []
| no_license | https://github.com/ngotest/ebmr_proj | 58f351ef0590634cc6de92433cb93ae5d93ea353 | 59504ba8d1eb1acecec6e72024b903b2322991bf | refs/heads/master | 2023-03-01T02:19:25.297879 | 2021-02-04T05:28:53 | 2021-02-04T05:28:53 | 323,348,594 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.urls import path,include
from . import views
app_name='playlist'
urlpatterns = [
path('',views.general,name='general'),
path('<int:type>/',views.emotion,name='emotion'),
path('songupload/<int:type>/',views.song_upload,name='songupload'),
path('up-song/',views.up_song,name='song'),
path('fav/<int:id>',views.fav,name="fav"),
path('delete/<int:type>/<int:id>/',views.delete,name='delete'),
path('playsong/<int:sid>/',views.playsong,name='playsong'),
]
| UTF-8 | Python | false | false | 553 | py | 31 | urls.py | 18 | 0.638336 | 0.638336 | 0 | 14 | 38.5 | 75 |
annijanora/meiginajums1102-1 | 16,947,940,956,879 | dd9fb1183313cbd0b36bd13ca9a1979e48bb147b | fffb3bff816bc34358c29d82fd27bc32711000aa | /main.py | ec1c42abcf98eee352bdfcde7a240c7846a8021c | []
| no_license | https://github.com/annijanora/meiginajums1102-1 | c3cb5258f0f1dca36bd5b1ebd936a8c24b1d2c04 | 7f3be8cb3f1fab209f379b268f437f8b15fd6491 | refs/heads/master | 2023-05-11T10:48:34.463438 | 2020-02-18T20:17:27 | 2020-02-18T20:17:27 | 241,458,829 | 0 | 0 | null | false | 2023-05-02T22:47:00 | 2020-02-18T20:19:43 | 2020-02-18T20:19:51 | 2023-05-02T22:47:00 | 4 | 0 | 0 | 1 | HTML | false | false | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
return render_template('sakums.html')
@app.route('/viens')
def viens():
return render_template('viens.html')
@app.route('/divi')
def divi():
return render_template('divi.html')
@app.route('/tris')
def tris():
return render_template('tris.html')
@app.route('/sakums')
def sakums():
return render_template('sakums.html')
app.run(host='0.0.0.0',port=8020) | UTF-8 | Python | false | false | 470 | py | 5 | main.py | 1 | 0.674468 | 0.657447 | 0 | 24 | 18.625 | 49 |
victorsemenov1980/WordCound_RabbitMQ_SQLalchemyORM | 9,869,834,890,713 | e5c4266bdd61bc35ef69278a7efd86f96ab94af3 | 36420c31e002b47faaa344afaac580c583e7b0b5 | /producer.py | e55fbb9439d19328e78054148139243219edb46b | [
"MIT"
]
| permissive | https://github.com/victorsemenov1980/WordCound_RabbitMQ_SQLalchemyORM | b53dc0145929a37e6e96ccf4cfb4b92a6e76c488 | e62a07e7b72f82e45c03964649b79be74f4c0f93 | refs/heads/main | 2023-05-04T13:47:53.879247 | 2021-05-28T06:56:40 | 2021-05-28T06:56:40 | 371,608,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 20 19:32:19 2021
@author: Viktor Semenov
"""
from os import listdir
from os.path import isfile, join
import mimetypes
from datetime import datetime
import pika
params=pika.URLParameters('amqps://____your link here')
#txt_dir = ('TXTfiles')
def publish_good(file_name):
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue='new_files') # Declare a queue
message = file_name
channel.basic_publish(exchange='', routing_key='new_files', body=message)
print(" [x] Sent %r" % message)
connection.close()
def publish_bad(file_name):
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue='errors') # Declare a queue
message = file_name
channel.basic_publish(exchange='', routing_key='errors', body=message)
print(" [x] Sent %r" % message)
connection.close()
def file_check(dir_):
file_list=[]
with open('filelist.txt','r+') as file:
for line in file:
file_list.append(line.rstrip('\n'))
file.write(str(datetime.now()))
file.write('\n')
onlyfiles = [f for f in listdir(dir_) if isfile(join(dir_, f))]
for i in onlyfiles:
if i not in file_list:
file_type=mimetypes.guess_type(i)
if file_type[0] != 'text/plain':
print('ALERT',' filename: ', i)
publish_bad(i)
else:
print(i,'-->',file_type[0])
publish_good(i)
with open('filelist.txt','a') as file:
file.write(i)
file.write('\n')
else:
print('Found already processed file ',i)
| UTF-8 | Python | false | false | 1,852 | py | 24 | producer.py | 9 | 0.575054 | 0.566415 | 0 | 70 | 24.957143 | 77 |
nishizumi-lab/sample | 5,257,039,998,525 | e2107b99ed8120cf6d22154f018d4fdfd2ce124b | f8bb2d5287f73944d0ae4a8ddb85a18b420ce288 | /python/opencv/basic/ex10.py | 1f8d23bf7002aa1256c4a17db7fd16f41c7ed4af | []
| no_license | https://github.com/nishizumi-lab/sample | 1a2eb3baf0139e9db99b0c515ac618eb2ed65ad2 | fcdf07eb6d5c9ad9c6f5ea539046c334afffe8d2 | refs/heads/master | 2023-08-22T15:52:04.998574 | 2023-08-20T04:09:08 | 2023-08-20T04:09:08 | 248,222,555 | 8 | 20 | null | false | 2023-02-02T09:03:50 | 2020-03-18T12:14:34 | 2023-01-18T21:29:33 | 2023-02-02T09:03:45 | 616,689 | 8 | 13 | 5 | C | false | false | #-*- coding:utf-8 -*-
import cv2
import numpy as np
width = 200
height = 100
img = np.zeros((height, width, 3), np.uint8)
# 画像の書き込み
cv2.imwrite("/Users/github/sample/python/opencv/basic/ex10.png", img)
| UTF-8 | Python | false | false | 219 | py | 1,222 | ex10.py | 814 | 0.687805 | 0.62439 | 0 | 11 | 17.636364 | 69 |
Chris35Wills/python_functions | 4,612,794,913,315 | f1f243c94d665905d6deeefbffe0740d64339ad4 | 22d3b8993c20f4189a78d3337b37ea098859cc1d | /dem_point_operations.py | 70811fb99ae7d32c2af776a5493e59929874a02b | []
| no_license | https://github.com/Chris35Wills/python_functions | b603ebfb51e56f157ffb84c5d28e8c28b5048c81 | cb1782e524577de465b4fb2cca2da4ed6e63e566 | refs/heads/master | 2020-04-16T02:11:09.535516 | 2017-07-19T15:40:53 | 2017-07-19T15:40:53 | 54,633,206 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import numpy as np
import pandas as pd
sys.path.append('./georaster')
import georaster
"""
DEM and point operations
"""
def extract_values(dem, points, extracted_col_header='extracted_z'):
"""
Extracts points from a dem at point locations - binds them to a pandas dataframe
Variables:
dem : a georaster object
points : a pandas dataframe with headers of "x" and "y"
Returns:
points (pandas dataframe)
"""
dem_tl_x=dem.extent[0] *-1 #-800000 #*-1 << must do this
dem_tl_y=dem.extent[3] #-599500.
post=dem.xres
pnt_x=points['x'].values
pnt_y=points['y'].values
ix=np.floor((pnt_x+dem_tl_x)/post) # origin is top left
iy=np.floor(((dem_tl_y)-pnt_y)/post) # origin is top left
dem_values=dem.r[list(iy),list(ix)]
points[extracted_col_header]=pd.Series(dem_values, index=points.index)
return points
| UTF-8 | Python | false | false | 862 | py | 25 | dem_point_operations.py | 24 | 0.676334 | 0.657773 | 0 | 37 | 22.297297 | 82 |
amandasystems/msc-polonius-fact-study | 8,555,574,857,235 | 200aec7539a8672e1db3e84dda5f5c077463857f | da17c47ba292e7e46686b81fd70d7b2597a9201f | /benchmark-solving.py | ca3e1561f6fe6a3bf62d6c57dcb214313ebd784a | []
| no_license | https://github.com/amandasystems/msc-polonius-fact-study | c9402e067730340d889e508cad2d7d51bca72f69 | b0e0fe45602e6f1208e2b4f0806f67126f9384d5 | refs/heads/master | 2022-02-12T17:03:37.219351 | 2019-09-10T15:16:30 | 2019-09-10T15:16:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# benchmark a release version of Polonius on a number of directories containing
# nll-facts.
# benchmark-solving <my-crate> <my-other-crate>
import csv
import os
import sys
import timeit
from pathlib import Path
from benchmark import inputs_or_workdir, run_command
POLONIUS_OPTIONS = ["--skip-timing"]
POLONIUS_PATH = "../polonius/target/release/polonius"
POLONIUS_COMMAND = [POLONIUS_PATH, *POLONIUS_OPTIONS]
NR_REPEATS = 2
HARD_TIMEOUT = "10m"
SOFT_TIMEOUT = "5m"
ALGORITHMS = ["Naive", "Hybrid", "DatafrogOpt"]
def run_with_timeout(command):
return run_command(
["timeout", f"--kill-after={HARD_TIMEOUT}", SOFT_TIMEOUT, *command])
def benchmark_crate_fn(p, algorithm):
"""
Perform benchmarks on a function's input data, located in p
"""
benchmark_timer = timeit.Timer(
lambda: run_with_timeout([*POLONIUS_COMMAND, "-a", algorithm, "--", str(p)]))
try:
return min(benchmark_timer.repeat(NR_REPEATS, number=1))
except RuntimeError:
return None
def benchmark_crate_fns(facts_path):
return ([p.stem, *[benchmark_crate_fn(p, a) for a in ALGORITHMS]]
for p in facts_path.iterdir()
if p.is_dir() and not p.stem[0] == ".")
def benchmark_crate_folder(p):
assert isinstance(p, Path)
assert p.is_dir(), f"{p} must be a directory!"
facts_path = p / "nll-facts"
if not facts_path.is_dir():
facts_path = p
program_name = p.stem
for fn_name_and_runtimes in benchmark_crate_fns(facts_path):
yield [program_name, *fn_name_and_runtimes]
def benchmark_crates_to_csv(dirs, out_fp):
writer = csv.writer(out_fp)
writer.writerow([
"program", "function",
*[f"min({NR_REPEATS}) {a} runtime" for a in ALGORITHMS]
])
for crate_count, c in enumerate(dirs, start=1):
print(
f"processing crate #{crate_count}/{len(dirs)}: {c.stem}"\
.ljust(os.get_terminal_size(0).columns),
file=sys.stderr,
end="\r")
writer.writerows(benchmark_crate_folder(c))
if __name__ == '__main__':
crate_fact_list = inputs_or_workdir()
benchmark_crates_to_csv(crate_fact_list, sys.stdout)
| UTF-8 | Python | false | false | 2,210 | py | 16 | benchmark-solving.py | 9 | 0.638914 | 0.634842 | 0 | 78 | 27.333333 | 85 |
JohnWestonNull/RaspberryPiWithScreen | 7,172,595,399,138 | c36cd56dd2b1244725f3f468c6c384e20b4d7035 | 239d4895fb24d501cca3cdc773debfac6a2266bc | /screen_off.py | 8e801322458871007fdeda379f309833a3c17b45 | []
| no_license | https://github.com/JohnWestonNull/RaspberryPiWithScreen | 017329416c84fbaebd3110803418f7cbf986d776 | 0453f6e8a9f0a5762e4aa2d4fae7045555fb3a37 | refs/heads/master | 2020-12-13T00:33:13.134745 | 2020-01-16T08:33:53 | 2020-01-16T08:33:53 | 234,267,775 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from App import App
class ScreenOff(App):
def __init__(self):
super().__init__(name="Screen Off")
def render(self):
self.draw.rectangle(((0, 0),(127, 63)), fill=1)
if self.driver is not None:
self.driver.refresh() | UTF-8 | Python | false | false | 259 | py | 6 | screen_off.py | 6 | 0.563707 | 0.532819 | 0 | 10 | 25 | 55 |
yashpupneja/Coding-Problems | 8,297,876,835,193 | 6123ad1f72fc833efd80ba5c89c14c39c2c44e6d | b49b049643f956a09d9205bfe151e4211aeab3b3 | /Leetcode Problems/02. Number of consecutive ones.py | f2531432d59b500e7a69190e0fd8602bd038197f | []
| no_license | https://github.com/yashpupneja/Coding-Problems | b20337c5a52e47cef5af487ccb32cc1aebb167d4 | 04804cb5be12779bb2018d3550c1d506d6c373b3 | refs/heads/master | 2023-02-10T15:48:23.098878 | 2021-01-06T18:41:07 | 2021-01-06T18:41:07 | 281,583,764 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Given a binary array, find the maximum number of consecutive 1s in this array.
Example 1:
Input: [1,1,0,1,1,1]
Output: 3
Explanation: The first two digits or the last three digits are consecutive 1s.
The maximum number of consecutive 1s is 3.
Note:
The input array will only contain 0 and 1.
The length of input array is a positive integer and will not exceed 10,000
"""
def findMaxConsecutiveOnes(nums):
count=0
res=0
for i in range(len(nums)):
if nums[i]==0:
count=0
else:
count=count+1
res=max(res,count)
return res
def main():
num=list(map(int,input().split()))
print(findMaxConsecutiveOnes(num))
if __name__=='__main__':
main()
| UTF-8 | Python | false | false | 833 | py | 66 | 02. Number of consecutive ones.py | 65 | 0.572629 | 0.542617 | 0 | 33 | 23.242424 | 78 |
srikanth3006/Python | 13,305,808,700,994 | 96da991425931bf5116e179e12f2a32adab1bb88 | 054b8db82269c51a3ab4e60ea49c77b75c762c43 | /DataStructures/Graphs/DFS_in_Graphs.py | b00b9973db029dc29483af233d1b6b41ff9753de | []
| no_license | https://github.com/srikanth3006/Python | 8c001f9ae48a5e5681f3c3a4730399b63ebece96 | 56601df8a179a7526cb69e95cf485efe806ecdc6 | refs/heads/master | 2020-06-19T14:08:19.393909 | 2019-07-28T18:47:12 | 2019-07-28T18:47:12 | 196,737,796 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node(object):
def __init__(self, name):
self.name = name
self.adjacencyList = list()
self.visited = None
class DepthFirstSearch(object):
def dfs(self, starting_node):
print("%s" % starting_node.name)
starting_node.visited = True
for node in starting_node.adjacencyList:
if not node.visited:
self.dfs(node)
def dfs_using_Stack(self, starting_node):
stack = list()
stack.append(starting_node)
while stack:
node = stack.pop()
print("%s" % node.name)
for n in node.adjacencyList:
if not n.visited:
n.visited = True
stack.append(n)
node1 = Node('A')
node2 = Node('B')
node3 = Node('C')
node4 = Node('D')
node5 = Node('E')
node1.adjacencyList.append(node2)
node1.adjacencyList.append(node3)
node2.adjacencyList.append(node4)
node4.adjacencyList.append(node5)
dfs = DepthFirstSearch()
dfs.dfs(node1) | UTF-8 | Python | false | false | 1,009 | py | 47 | DFS_in_Graphs.py | 46 | 0.583746 | 0.569871 | 0 | 40 | 24.25 | 48 |
gubjanos/gravity-test | 16,870,631,549,254 | 035ac55494613480ca7fb5a95063195ef28ee84b | 6fef56670700919113f15505c25cd1d95df187d6 | /load_data.py | d15459998a52b0b164e36554ad3f01e74fc9ac7e | []
| no_license | https://github.com/gubjanos/gravity-test | 5caab9c955343f5c734491ae90f14958f69b27fd | 2dedf62056704614c7e8506d0b3f882b36f5f8a9 | refs/heads/master | 2016-09-08T00:41:13.718894 | 2014-06-22T12:06:18 | 2014-06-22T12:06:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def load_data(path):
from numpy import loadtxt
instances = loadtxt(path)
y = []
x = []
for j in xrange(len(instances)):
y.append(instances[j][0])
x.append(instances[j][1:])
return x, y
| UTF-8 | Python | false | false | 225 | py | 5 | load_data.py | 5 | 0.564444 | 0.555556 | 0 | 9 | 24 | 36 |
oumkale/test-python | 3,736,621,597,087 | 9f9b206c84972a23f62ef73d92914756cf6257fc | 9a52dd6961c033d06174921de2955be5f9035f03 | /pkg/types/types.py | 50a1b48fb83f0db26b6149b81a9f8d8c3b03fd79 | [
"Apache-2.0"
]
| permissive | https://github.com/oumkale/test-python | f4ef58e3751b1ef1626e77c7c02833e4b1b9d237 | 1f3d3e42ffbe1bf5ed9df8a0c6038e50129b2c4d | refs/heads/main | 2023-08-13T01:30:40.624522 | 2021-10-12T06:14:08 | 2021-10-12T06:14:08 | 416,193,482 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# PreChaosCheck initial stage of experiment check for health before chaos injection
PreChaosCheck = "PreChaosCheck"
# PostChaosCheck pre-final stage of experiment check for health after chaos injection
PostChaosCheck = "PostChaosCheck"
# Summary final stage of experiment update the verdict
Summary = "Summary"
# ChaosInject this stage refer to the main chaos injection
ChaosInject = "ChaosInject"
# AwaitedVerdict marked the start of test
AwaitedVerdict = "Awaited"
# PassVerdict marked the verdict as passed in the end of experiment
PassVerdict = "Pass"
# FailVerdict marked the verdict as failed in the end of experiment
FailVerdict = "Fail"
# StoppedVerdict marked the verdict as stopped in the end of experiment
StoppedVerdict = "Stopped"
#ResultDetails is for collecting all the chaos-result-related details
class ResultDetails(object):
def __init__(self, Name=None, Verdict=None, FailStep=None, Phase=None,
PassedProbeCount=None, UID=None):
self.Name = Name
self.Verdict = Verdict
self.FailStep = FailStep
self.Phase = Phase
self.ResultUID = UID
self.PassedProbeCount = PassedProbeCount
# EventDetails is for collecting all the events-related details
class EventDetails(object):
def __init__(self, Message=None, Reason=None, ResourceName=None, ResourceUID=None, Type=None, UID=None):
self.Message = Message
self.Reason = Reason
self.ResourceName = ResourceName
self.ResourceUID = UID
self.Type = Type
# AppDetails contains all the application related envs
class AppDetails(object):
def __init__(self, Namespace=None, Label=None, Kind=None, AnnotationCheck=None, AnnotationKey=None, AnnotationValue=None):
self.Namespace = Namespace
self.Label = Label
self.Kind = Kind
self.AnnotationCheck = AnnotationCheck
self.AnnotationKey = AnnotationKey
self.AnnotationValue = AnnotationValue
# ChaosDetails is for collecting all the global variables
class ChaosDetails(object):
def __init__(self, ChaosPodName=None, ChaosNamespace=None, EngineName=None, InstanceID=None, ExperimentName=None, Timeout=None,
Delay=None, ChaosDuration=None, JobCleanupPolicy=None, Randomness=None, ParentsResources=None,
Namespace=None, Label=None, Kind=None, AnnotationCheck=None, AnnotationKey=None, AnnotationValue=None, UID=None
):
self.ChaosUID = UID
self.ChaosNamespace = ChaosNamespace
self.ChaosPodName = ChaosPodName
self.EngineName = EngineName
self.InstanceID = InstanceID
self.ExperimentName = ExperimentName
self.Timeout = Timeout
self.Delay = Delay
self.AppDetail = AppDetails(Namespace, Label, Kind, AnnotationCheck, AnnotationKey, AnnotationValue)
self.ChaosDuration = ChaosDuration
self.JobCleanupPolicy = JobCleanupPolicy
self.Randomness = Randomness
self.ParentsResources = []
def append(self, value):
self.ParentsResources.append(value)
#SetResultAttributes initialise all the chaos result ENV
def SetResultAttributes(ResultDetails , ChaosDetails):
ResultDetails.Verdict = "Awaited"
ResultDetails.Phase = "Running"
ResultDetails.FailStep = "N/A"
ResultDetails.PassedProbeCount = 0
if ChaosDetails.EngineName != "":
ResultDetails.Name = ChaosDetails.EngineName + "-" + ChaosDetails.ExperimentName
else:
ResultDetails.Name = ChaosDetails.ExperimentName
if ChaosDetails.InstanceID != "":
ResultDetails.Name = ResultDetails.Name + "-" + ChaosDetails.InstanceID
#SetResultAfterCompletion set all the chaos result ENV in the EOT
def SetResultAfterCompletion(ResultDetails, verdict, phase, failStep):
ResultDetails.Verdict = verdict
ResultDetails.Phase = phase
ResultDetails.FailStep = failStep
#SetEngineEventAttributes initialise attributes for event generation in chaos engine
def SetEngineEventAttributes(EventDetails, Reason, Message, Type , ChaosDetails):
EventDetails.Reason = Reason
EventDetails.Message = Message
EventDetails.ResourceName = ChaosDetails.EngineName
EventDetails.ResourceUID = ChaosDetails.ChaosUID
EventDetails.Type = Type
#SetResultEventAttributes initialise attributes for event generation in chaos result
def SetResultEventAttributes(EventDetails, Reason, Message, Type, ResultDetails):
EventDetails.Reason = Reason
EventDetails.Message = Message
EventDetails.ResourceName = ResultDetails.Name
EventDetails.ResourceUID = ResultDetails.ResultUID
EventDetails.Type = Type
| UTF-8 | Python | false | false | 4,522 | py | 35 | types.py | 23 | 0.756524 | 0.756303 | 0 | 105 | 42.057143 | 129 |
cryzed/Feedbuffer | 7,258,494,758,895 | f339bcfba12855a9ce9c276f0cfec355b8a1db43 | 63f50c6742d1cdcc1b64a69828b3acbf493f2ce1 | /feedbuffer/database.py | f9a6f31c3d029e4cf431a0b19c481311db1f1006 | [
"MIT"
]
| permissive | https://github.com/cryzed/Feedbuffer | bc3faedae83d6c7ffe65e70e8ffdc81c5f87aedf | 0f258bbc7fbcdb39e19fd7d26192a45098b1fc68 | refs/heads/master | 2021-01-15T15:25:39.979230 | 2016-07-02T19:12:38 | 2016-07-02T19:12:38 | 52,744,909 | 20 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import concurrent.futures
import functools
import peewee
from feedbuffer import settings, log
_database = peewee.SqliteDatabase(settings.DATABASE_PATH)
_logger = log.get_logger(__name__)
# Easy way to queue function calls and execute them in a single thread, without having to manually write
# producer-consumer logic.
_database_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
class Model(peewee.Model):
class Meta:
database = _database
class Feed(Model):
url = peewee.TextField(unique=True)
update_interval = peewee.IntegerField(default=settings.DEFAULT_UPDATE_INTERVAL)
data = peewee.TextField()
class FeedItem(Model):
id_ = peewee.TextField(unique=True)
data = peewee.TextField()
feed = peewee.ForeignKeyField(Feed, related_name='entries')
_database.create_tables([Feed, FeedItem], safe=True)
def _execute_in(executor):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
future = executor.submit(function, *args, **kwargs)
return future.result()
return wrapper
return decorator
def _get_feed_query(url):
return Feed.select().where(Feed.url == url)
def _feed_item_exists(feed, id_):
return FeedItem.select().where(FeedItem.feed == feed and FeedItem.id_ == id_).exists()
def _feed_exists(url):
return _get_feed_query(url).exists()
def _get_feed(url):
return _get_feed_query(url).get()
@_execute_in(_database_executor)
def feed_exists(url):
return _get_feed_query(url).exists()
@_execute_in(_database_executor)
def get_feed(url):
return _get_feed(url)
@_execute_in(_database_executor)
def update_feed(url, feed_data, entries):
if _feed_exists(url):
feed = _get_feed(url)
else:
feed = Feed(url=url, data=feed_data)
feed.save()
data_source = [
{'id_': id_, 'data': entry, 'feed': feed} for (id_, entry) in entries if not _feed_item_exists(feed, id_)
]
_logger.info('Updating feed: %s with %d new entries...', url, len(data_source))
with _database.atomic():
FeedItem.insert_many(data_source).execute()
feed.data = feed_data
feed.save()
@_execute_in(_database_executor)
def flush_feed(feed):
query = FeedItem.delete().where(FeedItem.feed == feed)
query.execute()
# Generic way to update data in a model instance using the write executor
@_execute_in(_database_executor)
def update_model_data(model, **kwargs):
for key, value in kwargs.items():
setattr(model, key, value)
model.save()
| UTF-8 | Python | false | false | 2,585 | py | 8 | database.py | 6 | 0.670793 | 0.670406 | 0 | 106 | 23.386792 | 113 |
rqdzs/EasyTutoring | 10,471,130,278,814 | a6979b894b8e41f6c32fcf062d884e4d53c3eceb | feab6c3e0dc419d1a22e01c8cdab1d6ae2dc4a44 | /turmas/migrations/0004_turma_disciplina.py | 46eeeffd1ca5886d3c0808a985a26d2194164827 | []
| no_license | https://github.com/rqdzs/EasyTutoring | 3c35b6547c22f86ec03d955f057daeee1e146d26 | 53fcec8e17bb950049ad3379ee63658c23aa3f38 | refs/heads/master | 2021-08-16T16:42:34.532149 | 2017-11-20T02:55:32 | 2017-11-20T02:55:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-26 13:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('disciplinas', '0001_initial'),
('turmas', '0003_remove_turma_disciplina'),
]
operations = [
migrations.AddField(
model_name='turma',
name='disciplina',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='disciplinas', to='disciplinas.Disciplina'),
),
]
| UTF-8 | Python | false | false | 629 | py | 98 | 0004_turma_disciplina.py | 69 | 0.6407 | 0.600954 | 0 | 22 | 27.590909 | 149 |
heimagithub/par | 6,691,559,075,498 | 88b4def2f8e22ac8d58cb5493169a13ec8e06aab | 0da45639cca5b4631ef8f167cb457eee0da29cab | /180227_6_sqlite_del.py | a9dfb2ea6e08c29e25facb46f5440f9e67da8e3a | []
| no_license | https://github.com/heimagithub/par | 9450af8f7ce87cf13bd58608a6e81813e95b359c | 9e5efc1904ec32f337e3f1ae7fe9ea7330fe4f39 | refs/heads/master | 2021-01-25T14:33:11.459379 | 2018-03-03T03:04:31 | 2018-03-03T03:04:31 | 123,708,172 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sqlite3
dbpath = '/home/heima/Databases/NewDB.db'
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute('DELETE from LAND where ID = 4838674')
conn.commit()
conn.close() | UTF-8 | Python | false | false | 196 | py | 25 | 180227_6_sqlite_del.py | 23 | 0.72449 | 0.678571 | 0 | 13 | 14.153846 | 48 |
gratcliff/Happy_Cup_Server | 18,966,575,604,375 | 9b310b5e888872484e4a4c596f52b738c63d9516 | d050ed3ce3742a3de82606d8eb5a20ffe5173e0b | /apps/products/migrations/0001_initial.py | fafcf726f5d7bcaa1a9fe05386e5753a3a67e2da | []
| no_license | https://github.com/gratcliff/Happy_Cup_Server | 59f5871cd47516c9f7f73b3d4c649aacdebc29b6 | bd93c58e0015f55142fad14b7fba25abc639c626 | refs/heads/master | 2021-01-11T02:38:45.701336 | 2016-12-02T02:43:18 | 2016-12-02T02:43:18 | 70,946,723 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 17:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product_options', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Coffee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=24)),
('description', models.TextField()),
('image_url', models.URLField()),
('price_factor', models.SmallIntegerField(default=0, verbose_name='Increase or decrease the base price by the following percentage. Use negative values to decrease price.')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Merchandise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=24)),
('description', models.TextField()),
('image_url', models.URLField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
options={
'verbose_name_plural': 'Merchandise',
},
),
migrations.CreateModel(
name='ProductPromotion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('description', models.CharField(max_length=64)),
('discount', models.PositiveSmallIntegerField(default=15, help_text='Positive, whole numbers only', verbose_name='Percent discount')),
('expiration_date', models.DateTimeField(help_text='Server timezone is UTC (Coordinated Universal Time)', verbose_name='Date and time that promotion ends')),
('expired', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('frequency', models.PositiveSmallIntegerField(verbose_name='Number of weeks between each shipment')),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('coffees', models.ManyToManyField(to='products.Coffee')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='VarietyPack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=24)),
('description', models.TextField()),
('image_url', models.URLField()),
('coffee_qty', models.PositiveSmallIntegerField(default=0, verbose_name='Number of bags of coffee in variety pack (if applicable)')),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('coffees', models.ManyToManyField(blank=True, to='products.Coffee', verbose_name='Coffees in variety pack (if applicable)')),
('featured', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.ProductPromotion', verbose_name='To feature this product, select a promotional deal.')),
('merchandise', models.ManyToManyField(blank=True, to='products.Merchandise', verbose_name='Merchandise in variety pack (if applicable)')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='merchandise',
name='featured',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.ProductPromotion', verbose_name='To feature this product, select a promotional deal.'),
),
migrations.AddField(
model_name='merchandise',
name='sizes',
field=models.ManyToManyField(blank=True, to='product_options.ShirtSize', verbose_name='Shirt Sizes available (if applicable)'),
),
migrations.AddField(
model_name='coffee',
name='featured',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.ProductPromotion', verbose_name='To feature this product, select a promotional deal.'),
),
migrations.AddField(
model_name='coffee',
name='grinds',
field=models.ManyToManyField(to='product_options.CoffeeGrind'),
),
migrations.AddField(
model_name='coffee',
name='roast',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product_options.CoffeeRoast'),
),
migrations.AddField(
model_name='coffee',
name='sizes',
field=models.ManyToManyField(to='product_options.CoffeeVolume'),
),
]
| UTF-8 | Python | false | false | 6,235 | py | 126 | 0001_initial.py | 103 | 0.578348 | 0.572093 | 0 | 126 | 48.484127 | 217 |
Al3x-BB/Unidad3-Ejercicio1 | 2,860,448,257,172 | 74b2582d064907147b8bd3965e1d369de5139c46 | 533d68e3993c246b159abd50eae7be1e1e5e43be | /ManejaLibros.py | 1e8bfff49f4d36966fecb3db9b114fedea8fa7bc | []
| no_license | https://github.com/Al3x-BB/Unidad3-Ejercicio1 | 0aad51a2bee955154b2a874759afb6a96639568f | eef9d10a09b1e149c9fb4e04d47987b11ec27cfc | refs/heads/main | 2023-05-03T00:38:21.373599 | 2021-05-24T14:35:51 | 2021-05-24T14:35:51 | 370,382,547 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Libro import claseLibro
import re
import csv
class claseManejaLibros:
__lista = []
def __init__(self, lista = []):
self.__lista = lista
def crearLista(self):
band = False
archi = open('ArchivoLibros.csv')
reader = csv.reader(archi, delimiter = ';')
for fila in reader:
if(band == False):
fila[0] = fila[0][3:]
if(re.match('^[0-9]', fila[0].lower())):
unLibro =claseLibro(fila[0], fila[1], fila[2], fila[3], fila[4], fila[5])
unLibro.crearListaCap(reader)
self.__lista.append(unLibro)
band = True
else:
if(re.match('^[0-9]', fila[0].lower())):
unLibro = claseLibro(fila[0], fila[1], fila[2], fila[3], fila[4], fila[5])
unLibro.crearListaCap(reader)
self.__lista.append(unLibro)
def punto1(self, id):
acum = 0
i = 0
#buscar el libro en la lista
while(i<len(self.__lista)):
if (id == self.__lista[i].getId()):
break
i+=1
#tarea
print('|----{}----|'.format(self.__lista[i].getTitulo()))
for x in range(len(self.__lista[i].getCaps())):
print('-> {}'.format(self.__lista[i].getCaps()[x].getTitulo()))
acum += int(self.__lista[i].getCaps()[x].getPags())
print('Contiene {} páginas'.format(acum))
def punto2(self, palabra):
band = [False, False]
for i in range(len(self.__lista)): #busca la palabra en el libro
band[0] = False
for j in range(len(self.__lista[i].getCaps())): #busca la palabra en los capítulos
if(palabra in self.__lista[i].getTitulo() and band[0] == False):
print('|----{}----|\nAutor: {}'.format(self.__lista[i].getTitulo(), self.__lista[i].getAutor()))
band = [True, True] #detecta que la palabra se econtró y que se econtró en el título
if(palabra in self.__lista[i].getCaps()[j].getTitulo()):
print('|----{}----|\nAutor: {}'.format(self.__lista[i].getCaps()[j].getTitulo(),
self.__lista[i].getAutor()))
band[1] = True #detecta que la palabra se econtró
if(band[1] == False):
print('ERROR: palabra no encontrada')
def mostrar(self):
for i in range(len(self.__lista)):
self.__lista[i].mostrar() | UTF-8 | Python | false | false | 2,623 | py | 6 | ManejaLibros.py | 5 | 0.479557 | 0.468093 | 0 | 55 | 45.618182 | 116 |
KarthikMAM/AVSR | 19,533,511,265,860 | d08645eba043a023eae15863c9dcd079ac09068a | 56682fb5b73a2b4c22fb8c3ea75d05a919d998ad | /align_file_indexer.py | 9575d7436718ed49dd0fdd4fa2c6901cf72de5af | []
| no_license | https://github.com/KarthikMAM/AVSR | 7aeefb80a197aa189846846f6a5cf8fe8cd93033 | 2ad53ef3dc7a32104a28827cc4faa6128ba0f095 | refs/heads/master | 2023-07-09T22:36:49.891147 | 2023-06-30T21:02:28 | 2023-06-30T21:02:28 | 87,597,032 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from os import path, listdir, makedirs
import numpy as np
from config import ALIGN_RAW, ALIGN_TEXT, ALIGN_INDEXED
def map_letters(letters):
return np.array(list(map(
lambda x: 0 if x == " " else ord(x) - ord("a") + 1,
list(letters)
)))
print("\n\n", "INDEXING ALIGN FILES:START".center(100, "-"), sep="", end="\n\n\n")
for speaker in listdir(ALIGN_RAW):
for align_file in listdir(path.join(ALIGN_RAW, speaker)):
if align_file.endswith(".align"):
with open(path.join(ALIGN_RAW, speaker, align_file)) as inp:
contents = " ".join(list(map(lambda line: line.split()[-1], inp.readlines()[1:-1])))
makedirs(path.join(ALIGN_TEXT, speaker), exist_ok=True)
makedirs(path.join(ALIGN_INDEXED, speaker), exist_ok=True)
print(contents, end="", file=open(path.join(ALIGN_TEXT, speaker, align_file.split(".")[0] + ".txt"), "w"))
np.save(path.join(ALIGN_INDEXED, speaker, align_file.split(".")[0]), map_letters(contents))
print("\n\n", "INDEXING ALIGN FILES:SUCCESS".center(100, "-"), sep="", end="\n\n\n") | UTF-8 | Python | false | false | 1,151 | py | 16 | align_file_indexer.py | 15 | 0.596872 | 0.585578 | 0 | 27 | 41.666667 | 122 |
JulesBelveze/wikipedia-pages-suggestion | 13,675,175,889,773 | 66cf9adedaf72ea51c8435f3babc0811c80f7acc | 5f329b39a638d88a3c046570f0f2e79926c25c19 | /testers/PageRankTest.py | 430f1bdd607498a85c3de6b7f5f72841cc10a610 | []
| no_license | https://github.com/JulesBelveze/wikipedia-pages-suggestion | 3abc3963a38cd6090ba99929c913cabf08dba7ea | ed9b4af493a257fca2d440027b4647f9d442b350 | refs/heads/master | 2020-03-31T18:35:09.466225 | 2019-05-06T06:28:19 | 2019-05-06T06:28:19 | 152,464,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import networkx as nx
import sys
sys.path.append('..')
import PageRank
G = nx.DiGraph()
G.add_nodes_from([1,2,3,4,5,6,7,8,9,10,11])
G.add_edge(1,2)
G.add_edge(1,4)
G.add_edge(1,3)
G.add_edge(2,3)
G.add_edge(4,3)
G.add_edge(1,5)
G.add_edge(2,1)
G.add_edge(5,2)
G.add_edge(3,4)
G.add_edge(2,4)
PR = PageRank.PageRank(G)
PR.constructDispersionMatrix(G)
print(PR.getPageRank())
print(sum(PR.getPageRank())) | UTF-8 | Python | false | false | 406 | py | 16 | PageRankTest.py | 13 | 0.679803 | 0.598522 | 0 | 24 | 15.958333 | 43 |
SeoDongMyeong/tbot | 8,366,596,342,653 | 820d55f7d3dd07ad3d69b6fc110818e2d80cf59e | 3928ac6c2253928c27a4814eaf2fee92973e80cd | /run.py | 610b5c168fcc46c67224136ecb71be2e9c5b207d | []
| no_license | https://github.com/SeoDongMyeong/tbot | 77ee8095a1e76614cdb0ed67d65551ad0b842480 | c4c67a0912b183fcbdee06a7d39f4e3ab62bc3e6 | refs/heads/master | 2021-05-20T17:56:05.111025 | 2016-06-20T06:41:26 | 2016-06-20T06:41:26 | 61,489,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import requests
import logging
import re
import os
from uuid import uuid4
from urllib2 import quote
from bs4 import BeautifulSoup
from telegram.ext import Updater, CommandHandler
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from config import get_config
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def get_image_from_url(url):
r = requests.get(url)
img = Image.open(BytesIO(r.content))
img = img.resize((150, 150), Image.NEAREST)
return img
def get_font(mode='key'):
if not mode in ['key', 'value'] :
raise ValueError('NOT MODE')
size = 35
if mode == 'key' :
size = 40
font = ImageFont.truetype('static/nanum.ttf', size)
return font
def get_overlog_uid(query):
url = 'https://overlog.net/leaderboards/global/score?q=%s' % quote(query.encode('utf-8'))
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
t = soup.find('table', class_='table-striped').find('tbody')
tr = t.find('tr')
if not tr.has_attr('data-uid'):
return False
return tr['data-uid']
def overlog_renew(uid):
headers = {'content-type' : 'application/x-www-form-urlencoded; charset=UTF-8', 'origin' : 'https://overlog.net', 'referer' : 'https://overlog.net/detail/overview/' + uid, 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
url = 'https://overlog.net/detail/renew'
body = {'uid' : uid}
rv = requests.post(url, headers=headers, data=body).json()
def get_overlog_hero_detail(uid, hero_id):
entry = {}
headers = {'content-type' : 'application/x-www-form-urlencoded; charset=UTF-8', 'origin' : 'https://overlog.net', 'referer' : 'https://overlog.net/detail/overview/' + uid, 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
url = 'https://overlog.net/detail/hero/%s' % hero_id
body = {'uid' : uid}
rv = requests.post(url, headers=headers, data=body).json()
if rv['error'] != 0:
return False
html = rv['html']
soup = BeautifulSoup(html, 'html.parser')
verts = soup.find_all(text=re.compile(u'영웅 기술'))
entry['skill'] = []
for vert in verts:
vert = vert.parent.parent
skill_name = vert.find('h4').get_text().split(u'영웅 기술:')[1]
tmp_skill = vert.find('dl')
entry['skill'].append({
'name' : skill_name,
'key' : tmp_skill.find('dt').get_text(),
'value' : tmp_skill.find('dd').get_text().strip()
})
tmp_kill = soup.find('h4', text=re.compile(u'처치')).parent.find('dl')
entry['kill'] = {
'key' : tmp_kill.find('dt').get_text(),
'value' : tmp_kill.find('dd').get_text().strip()
}
tmp_deal = soup.find(text=re.compile(u'딜량')).parent.parent.find_all('dl')[2]
entry['deal'] = {
'key' : tmp_deal.find('dt').get_text().replace(u'게임당 평균 ', ''),
'value' : tmp_deal.find('dd').get_text().strip()
}
return entry
def get_overlog_data(uid):
entries = {'data' : []}
url = 'https://overlog.net/detail/overview/%s' % uid
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
character = soup.find('div', class_='character')
entries['playerIcon'] = character.find('img', class_='portrait')['src']
entries['displayName'] = character.find('p', class_='displayName').contents[0].strip()
entries['level'] = character.find(text=re.compile('Level'))
heros = soup.find_all(class_='heroList')[1]
basics = heros.find_all(class_='heroBasic')
details = heros.find_all(class_='heroDetail')
idx = 0
for basic in basics:
entry = {}
idx += 1
entry['idx'] = idx
entry['name'] = basic.find('span', class_='name').get_text()
check_point = basic.find('td', class_='not_available')
if check_point :
entry['text'] = check_point.get_text()
else :
entry['playTime'] = basic.find('td', class_='timePlayed').get_text().strip()
entry['kda'] = basic.find('span', class_='rate').get_text()
entry['ratio'] = basic.find('span', class_='ratio').get_text()
entry['objective'] = basic.find(class_='objective').contents[2].strip()
entry['img'] = basic.find('img')['src']
hero_id = basic['data-hero']
entry['info'] = get_overlog_hero_detail(uid, hero_id)
entries['data'].append(entry)
return entries
def overlog(bot, update):
chat_id = update.message.chat_id
query = update.message.text.split('/w')[1].strip()
uid = get_overlog_uid(query)
if not uid:
bot.sendMessage(update.message.chat_id, text='등록된 프로필이 없습니다. "Overwatch#1234"와 같이 뒤에 숫자까지 함께 입력하여 검색하시기 바랍니다. 대소문자를 구분합니다')
return
overlog_renew(uid)
bot.sendMessage(update.message.chat_id, text=u'%s님 데이터가 갱신되었습니다.' % (query))
entries = get_overlog_data(uid)
text = '%s[%s]' % (entries['displayName'], entries['level'])
text += '\n\n'
for entry in entries['data'][:3] :
text += '%d. %s(%s)\n' % (entry['idx'], entry['name'], entry['playTime'])
text += 'kda : %s %s : %s %s : %s\n' % (entry['kda'], '승률'.decode('utf-8'), entry['ratio'], '평균 임무기여'.decode('utf-8'), entry['objective'])
text += '%s : %s\n' % (entry['info']['kill']['key'], entry['info']['kill']['value'])
text += '%s : %s\n' % (entry['info']['deal']['key'], entry['info']['deal']['value'])
for skill in entry['info']['skill'] :
text += '%s(%s) : %s\n' % (skill['name'], skill['key'], skill['value'])
text += '\n'
bot.sendMessage(update.message.chat_id, text=text)
def add_text(draw, key, value, bg_w=50, bg_h=50):
key_font = get_font('key')
val_font = get_font('value')
k_w, k_h = key_font.getsize(key) # keyword width, keyword height
v_w, v_h = val_font.getsize(value) # value width, value height
draw.text((bg_w, bg_h), key,fill='white', font=key_font)
draw.text((bg_w, bg_h + k_h), value, fill='white', font=val_font)
bg_w += k_w + 20 if k_w > v_w else v_w + 20 # margin width
return bg_w
def overlog_img(bot, update):
chat_id = update.message.chat_id
query = update.message.text.split('/wi')[1].strip()
uid = get_overlog_uid(query)
if not uid:
bot.sendMessage(update.message.chat_id, text='등록된 프로필이 없습니다. "Overwatch#1234"와 같이 뒤에 숫자까지 함께 입력하여 검색하시기 바랍니다. 대소문자를 구분합니다')
return
overlog_renew(uid)
bot.sendMessage(update.message.chat_id, text=u'%s님 데이터가 갱신되었습니다.' % (query))
bot.sendMessage(update.message.chat_id, text=u'%s님 데이터 수집을 시작합니다.' % query)
entries = get_overlog_data(uid)
bg_img = Image.open('static/background-1.jpg')
draw = ImageDraw.Draw(bg_img)
img = get_image_from_url(entries['playerIcon'])
bg_img.paste(img, (90, 90))
add_text(draw, entries['displayName'], entries['level'], 260, 90)
bg_w, bg_h = 90, 310
for entry in entries['data'][:5]:
if 'text' in entry.keys():
continue
img = get_image_from_url(entry['img'])
bg_img.paste(img, (bg_w, bg_h))
bg_w += 170
bg_w = add_text(draw, entry['name'], entry['playTime'], bg_w, bg_h)
bg_w = add_text(draw, 'KDA', entry['kda'], bg_w, bg_h)
bg_w = add_text(draw, u'승률', entry['ratio'], bg_w, bg_h)
bg_w = add_text(draw, entry['info']['deal']['key'], entry['info']['deal']['value'], bg_w, bg_h)
for skill in entry['info']['skill'] :
key = '%s(%s)' % (skill['name'], skill['key'])
bg_w = add_text(draw, key, skill['value'], bg_w, bg_h)
# initialize
bg_w = 90
# bg_h += 170
bg_h += 220
filename = '%s.jpeg' % str(uuid4().hex)
bg_img.save(filename, quality=60, optimize=True, progressive=True)
bot.sendMessage(update.message.chat_id, text=u'데이터 수집이 완료되었습니다. 이미지 업로드를 시작합니다.')
bot.send_photo(update.message.chat_id, photo=open(filename, 'rb'))
os.remove(filename)
def echo(bot, update):
bot.sendMessage(update.message.chat_id, text='Hi!')
def main() :
updater = Updater(get_config('token'))
dp = updater.dispatcher
dp.add_handler(CommandHandler('echo', echo))
dp.add_handler(CommandHandler('w', overlog))
dp.add_handler(CommandHandler('wi', overlog_img))
updater.start_polling()
updater.idle()
if __name__ == '__main__' :
main()
| UTF-8 | Python | false | false | 8,980 | py | 3 | run.py | 2 | 0.594968 | 0.579661 | 0 | 202 | 41.693069 | 315 |
syurskyi/Python_Topics | 6,141,803,254,235 | 23527fe77fc1b5a33982a3819bdad38f29508896 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /105_network/002_requests_library/_exercises/templates/Python HTTP Guide. Requests library/003_.py | f1210ab0b92fbaec2e35cde3ff03b3412821485d | []
| no_license | https://github.com/syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | false | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | 2022-11-03T01:22:28 | 2023-02-16T03:08:09 | 198,671 | 2 | 2 | 33 | Python | false | false | # # В простых методах запросов значительных отличий у них не имеется. Но давайте взглянем на работы с Basic Auth:
#
# ______ u__.r__
# password_mgr _ ?.r__.H_PMWDR..
# top_level_url _ 'https://httpbin.org/basic-auth/user/passwd'
# password_mgr.a_p.. N.. ? 'user', 'passwd'
# handler _ ?.r__.H_BAH.. ?
# opener _ ?.r__.b_o.. ?
# response _ ?.o.. t_..
# print(?.g_c..
# # 200
# print ?.r..
# # b'{\n "authenticated": true, \n "user": "user"\n}\n'
#
#
# ______ r__
# response _ ?.g.. 'https://httpbin.org/basic-auth/user/passwd' a.._('user', 'passwd'
# print ?.c..
# # b'{\n "authenticated": true, \n "user": "user"\n}\n'
# print ?.j..
# # {'user': 'user', 'authenticated': True}
#
# # А теперь чувствуется разница между pythonic и non-pythonic? Я думаю разница на лицо.
# # И несмотря на тот факт, что requests ничто иное как обёртка над urllib3,
# # а последняя является надстройкой над стандартными средствами Python,
# # удобство написания кода в большинстве случаев является приоритетом номер один.
# #
# # В requests имеется:
# #
# # Множество методов http аутентификации
# # Сессии с куками
# # Полноценная поддержка SSL
# # Различные методы-плюшки вроде .json(), которые вернут данные в нужном формате
# # Проксирование
# # Грамотная и логичная работа с исключениями
| UTF-8 | Python | false | false | 1,777 | py | 15,362 | 003_.py | 14,734 | 0.639508 | 0.636434 | 0 | 35 | 36.171429 | 113 |
hpham-broadsoft/Cloudqa | 4,956,392,300,365 | 192c736fa0d9d9b92b1bef26c50b519573440823 | 68d7f806a2d49093c92b34150f81f4b03198d6c8 | /utils/RialtoQA/RialtoAutomationTestReport/Engineers.py | 469662c901a2fb6da230c4e9d7c1481ac861164a | []
| no_license | https://github.com/hpham-broadsoft/Cloudqa | c1344166baf9382f90e11c39605e71c3636965ae | 5d1fd3934972924de9b04c49958ec8d34bf01486 | refs/heads/master | 2019-04-29T04:28:50.231114 | 2017-06-20T23:25:46 | 2017-06-20T23:25:46 | 94,146,466 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #from requests.auth import HTTPBasicAuth
import requests
from xml.etree import ElementTree as ET
import getpass
import csv
import datetime
import getpass
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os.path
import sys
import sqlite3 as db
#username=sys.argv[1]
#password=sys.argv[2]
#username=raw_input("Enter Username:")
#password=getpass.getpass("Enter Password:")
List_Files=['Rialto-PartyManager','Rialto-MyPhone','Rialto-MyReports','Rialto-SP Portal','Rialto-SP Portal2','Rialto-MySite']
Final_Last_Build=[]
Build_Data=[]
for i in List_Files:
print i
url="http://10.242.148.135:9090/job/"+i+"/lastBuild/"
headers = {'Authorization':'Basic Z3ZlbmthdGVzd2FyYW46cWFzdm5yZXBv'}
resp = requests.get(url,headers=headers,verify = False)
string1=resp.content
Last_Build=string1.split('</title>')
Last_Build=Last_Build[0].split('<title>')
Last_Build=Last_Build[1].split('#')
Last_Build=Last_Build[1].split('[Jenkins]')
Last_Build=Last_Build[0].split(' ')
Final_Last_Build.append(int(Last_Build[0]))
#print Last_Build
print Final_Last_Build
url="http://10.242.148.135:9090/api/xml"
headers = {'Authorization':'Basic Z3ZlbmthdGVzd2FyYW46cWFzdm5yZXBv'}
r = requests.get(url,headers=headers,verify = False)
#print r.content
with open("colour.xml","w") as f:
f.write(r.content)
print "Successfully Created Color.xml"
doc = ET.parse("colour.xml").getroot()
jobname=doc.findall("job")
for filename in jobname:
color=filename.find("color").text
name=filename.find("name").text
if 'anime' in color:
if name in List_Files:
l=List_Files.index(name)
print Final_Last_Build[l]
Final_Last_Build[l]=Final_Last_Build[l]-1
print Final_Last_Build[l]
for files, build in zip(List_Files, Final_Last_Build):
try:
build=str(build)
url="http://10.242.148.135:9090/job/"+files+"/"+build+"/robot/report/output.xml"
headers = {'Authorization':'Basic Z3ZlbmthdGVzd2FyYW46cWFzdm5yZXBv'}
r = requests.get(url,headers=headers,verify = False)
#print r.content
Filename=files+".xml"
with open(Filename,"w") as f:
f.write(r.content)
print "Successfully Created" + files
except Exception as e:
print e
now = datetime.datetime.now()
print str(now)
#from bs4 import BeautifulSoup
FList = []
RLTO = []
Only_RLTO=[]
FINAL_LIST=[]
Person = []
DictList = []
FatalError=[]
Not_Present=[]
File_list=[]
String1=[]
class Testnew1():
doc = ET.parse("Finaloutput.xml").getroot()
filename=doc.findall("Filename")
for filename in filename:
fname=filename.get('key')
#print fname
doc1 = ET.parse(fname).getroot()
logentries = doc1.findall("suite")
for logentry in logentries:
parentsuite=logentry.get('name')
print parentsuite
suites=logentry.findall("suite")
for suite in suites:
childsuite=suite.get('name')
#print childsuite
test=suite.findall("test")
for test in test:
name=test.attrib["name"]
#print name
status = test.find("status")
#print status
s=status.get('status')
if s=="PASS":
msg=test.find("status").text
string1=parentsuite,msg,"PASS"
FINAL_LIST.append(string1)
RLTO.append(name)
if s=="FAIL":
msg=test.find("status").text
string1=parentsuite,msg,"FAIL"
FINAL_LIST.append(string1)
RLTO.append(name)
#print FINAL_LIST
print "Total Testcase Passed:" +str(len(FINAL_LIST))
for i in range(len(RLTO)):
if "RLTOQA" in RLTO[i].split(":")[0]:
RLTO[i]=RLTO[i].split(":")[0]
else:
RLTO[i]=RLTO[i].split(":")[1]
for i in range(len(RLTO)):
if "-" in RLTO[i]:
Only_RLTO1=RLTO[i]=RLTO[i].split("-")[1]
if "_" in RLTO[i]:
Only_RLTO1=RLTO[i]=RLTO[i].split("_")[1]
if "RLTOQA"in Only_RLTO1:
Only_RLTO1=RLTO[i]=RLTO[i].split(",")[0]
#print Only_RLTO1
Only_RLTO.append(Only_RLTO1)
continue
if " " in Only_RLTO1:
Only_RLTO1=RLTO[i]=RLTO[i].split(" ")[0]
#print Only_RLTO1
Only_RLTO.append(Only_RLTO1)
else:
Only_RLTO.append(Only_RLTO1)
####print Only_RLTO
####print len(Only_RLTO)
for i in range(len(Only_RLTO)):
string2=Only_RLTO[i],FINAL_LIST[i]
FList.append(string2)
#print FList
with open("RIALTO-AUTOMATION STATUS - REGRESSION.csv", 'rb') as f:
reader = csv.reader(f)
row2 = list(reader)
length=len(row2)
for row in row2:
for rlto in Only_RLTO:
if rlto in row[0]:
if rlto==row[0]:
####print row[0] +'Present in '+ rlto + "Owner" + row[3]
String2=row[0],row[3]
Person.append(String2)
Not_Present.append(row[0])
print Person
print len(Person)
for i in Only_RLTO:
if i not in Not_Present:
String2=i,"Unassigned"
Person.append(String2)
#print Person
#print len(Person)
dict_tuples2 = dict(FList)
string=[(int, str, dict_tuples2.get(int, 0)) for int, str in Person]
#print string
#print len(string)
conn = db.connect("TestAnalysisTool.db")
c=conn.cursor()
#c.execute("CREATE TABLE AnalysisEngineerTable(Id INTEGER PRIMARY KEY AUTOINCREMENT,RLTOQAID INTEGER NOT NULL UNIQUE,FileName Varchar(60),Engineer Varchar(20),Date2 Date,Status Varchar(15));")
for ids,name,msg in string:
parentname=msg[0]
Fmsg=msg[1]
Status=msg[2]
print ids,name,parentname,Fmsg,Status
#c.execute("SELECT * FROM AnalysisEngineerTable WHERE RLTOQAID = ?;",(int(ids)))
#row=len(c.fetchall())
#if row==0:
try:
c.execute("INSERT INTO AnalysisEngineerTable(RLTOQAID,FileName,Engineer,Date2,Status) VALUES(?,?,?,?,?)",(ids,parentname,name,now,Status))
except Exception as e:
print e
conn.commit()
conn.close()
| UTF-8 | Python | false | false | 6,742 | py | 1,234 | Engineers.py | 273 | 0.561999 | 0.542717 | 0 | 192 | 33.114583 | 196 |
SahibSingh1311/Session_3 | 12,575,664,288,174 | d545fec502d3b094eabf0db098d85688c4da4e38 | d122fa13031f6f087e91bf89c2133e19c3e3a816 | /session12a.py | d1773146dbafbc61de3a712810800c13938269b5 | []
| no_license | https://github.com/SahibSingh1311/Session_3 | 2b5574d6ba9c352aee8d602ae7aa8e6ec11076f1 | 41ab1cbca13e251c318bd657cbe3aa87f284f52e | refs/heads/master | 2022-01-12T10:18:54.134755 | 2019-06-24T05:23:32 | 2019-06-24T05:23:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Database: mySQL
prog Lang: SQL -> Structured Query Language
1. Create Database
Database is collection of Tables
Tables can be related to each other : 1 to 1 relation or 1 to many relationship
2. Create Table
Table is collection of rows and columns eg: Excel Sheet
ORM: Object relation mapping
Your object's attribute should be your table column name
but in tables we have 1 additional column and we call it as Primary Key
1 2 3 4.......
code: CREATE TABLE customer(
cid int PRIMARY KEY AUTO_INCREMENT,
name varchar (256),
phone varchar (20),
email varchar (256)
)
3. Insert Data in Table
cref = customer("John","9999988888","john@example.com")
insert into customer(Null, "John","9999988888","john@example.com")
4. Install Library mysql-connector
5. Create a DBHelper
6. Update data in table
"""
import mysql.connector
class DBHelper:
def saveCustomerInDB(self, customer):
#1. Create SQL Statement
sqlInsert = "insert into customer values (Null, '{}', '{}','{}') ".format(customer.name,customer.phone, customer.email)
#2 Create Connection
con = mysql.connector.connect(user = "root", password = "", host ="localhost", database = "customer")
#3 Obtain cursor or execute SQL statements {ACID}
cursor = con.cursor()
cursor.execute(sqlInsert)
con.commit()
print(customer.name, "SAVED!!")
def updateCustomerInDB(self, customer):
sql = "update customer set name = '{}', phone = '{}', email = '{}' where cid = '{}'".format(customer.name, customer.phone, customer.email, customer.cid )
con = mysql.connector.connect(user = "root", password ="", host = "localhost", database = "customer")
cursor = con.cursor()
cursor.execute(sql)
con.commit()
print("Customer Updated")
def deleteCustomerDetails(self,customer):
sql = "delete from customer where cid = {}".format(customer.cid)
con = mysql.connector.connect(user="root", password="", host="localhost", database="customer")
cursor = con.cursor()
cursor.execute(sql)
con.commit()
def fetchAllCustomer(self):
sql = "select * from customer"
con = mysql.connector.connect(user="root", password="", host="localhost", database="customer")
cursor = con.cursor()
cursor.execute(sql)
# row = cursor.fetchone()
# print(row)
# row = cursor.fetchone()
# print(row)
rows = cursor.fetchall()
#print(rows) #Rows is a List of Tuples, 1 Tuple Represent 1 Row
for row in rows:
print(row)
def fetchCustomer(self, cid):
sql = "select * from customer where cid = {}".format(cid)
con = mysql.connector.connect(user="root", password="", host="localhost", database="customer")
cursor = con.cursor()
cursor.execute(sql)
row = cursor.fetchone()
print(row)
class customer:
def __init__(self, name, phone, email):
self.name = name
self.phone = phone
self.email = email
def showCustomerDetails(self):
print(">>Name: {} Phone: {} Email: {}".format(self.name, self.phone, self.email ))
"""
print("Options: ")
print("1. Create New Customer")
print("2. Update Customer")
print("3. Delete Customer")
print("4. Show All Customers")
print("5. Show Particular Customers")
choice = int(input("Enter Choice: "))
if choice == 1:
cRef = customer(None, None, None)
cRef.name = input("Enter Customer Name : ")
cRef.phone = input("Enter Customer Phone : ")
cRef.email = input("Enter Customer Email : ")
cRef.showCustomerDetails()
save = input("Do you want to Save Customer:(yes / no) ")
if save == "yes":
db = DBHelper()
db.saveCustomerInDB(cRef)
elif choice == 2:
cRef = customer(None, None, None)
cRef.cid = int(input("Enter Customer ID : ")) #You NEED TO KNOW THE CUSTOMER ID
cRef.name = input("Enter Customer Name : ")
cRef.phone = input("Enter Customer Phone : ")
cRef.email = input("Enter Customer Email : ")
cRef.showCustomerDetails()
save = input("Do you want to Save Customer:(yes / no) ")
if save == "yes":
db = DBHelper()
db.updateCustomerInDB(cRef)
elif choice == 3:
cRef = customer(None, None, None)
cRef.cid = int(input("Enter Customer ID : ")) # You NEED TO KNOW THE CUSTOMER ID
save = input("Do you want to delete Customer:(yes / no) ")
if save == "yes":
db = DBHelper()
db.deleteCustomerDetails(cRef)
print("Customer Deleted")
elif choice == 4:
db = DBHelper()
db.fetchAllCustomer()
elif choice == 5:
cRef = customer(None, None, None)
cRef.cid = int(input("Enter Customer ID: "))
db = DBHelper()
db.fetchCustomer(cRef.cid)
""" | UTF-8 | Python | false | false | 5,072 | py | 39 | session12a.py | 39 | 0.597989 | 0.586751 | 0 | 150 | 32.82 | 161 |
Gillepool/Coursera | 1,013,612,296,257 | 5472a94e37de8459ef01072d97d5b3d6e9f8f33c | 9c64f63b9ca4519b07cfca2f6b8af1cdca6a7a33 | /movieReccomendations.py | 1c7fa4aac3aa0eea637d961acb1158d112bc7469 | []
| no_license | https://github.com/Gillepool/Coursera | 274401e07287760bec1b9cc70306a46540ed350e | 3268148469f5f8d37078d266eee0b22b5fa5b92d | refs/heads/master | 2020-09-11T08:18:00.440936 | 2016-09-12T20:28:29 | 2016-09-12T20:28:29 | 65,901,843 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scipy.io
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, Lamnda):
X = params[0:num_movies*num_features].reshape(num_movies, num_features)
Theta = params[num_movies*num_features:].reshape(num_users, num_features)
J = 0
X_grad = np.zeros((np.shape(X)))
Theta_grad = np.zeros((np.shape(Theta)))
J_temporary = (X.dot(Theta.T) - Y)**2
J = np.sum(np.sum(J_temporary[R == 1]))/2 + Lamnda/2 * np.sum(np.sum(Theta**2)) + Lamnda/2 * np.sum(np.sum(X**2))
X_grad = ((X.dot(Theta.T) - Y ) *R ).dot(Theta) + Lamnda*X
Theta_grad = ((X.dot(Theta.T) - Y) * R).T.dot(X) + Lamnda*Theta
grad = np.append(X_grad.flatten(), Theta_grad.flatten())
return J, grad
def computeNumericalGradient(J, theta):
numgrad = np.zeros((np.shape(theta)))
perturb = np.zeros((np.shape(theta)))
e = 1e-4
for p in range(len(theta.flatten())):
perturb[p] = e
loss1, grad1 = J(theta - perturb)
loss2, grad2 = J(theta + perturb)
numgrad[p] = (loss2 - loss1)/(2*e)
perturb[p] = 0
return numgrad
def checkCostFunction(Lambda = None):
if Lambda == None:
Lambda = 0
X_t = np.random.rand(4,3)
theta_t = np.random.rand(5,3)
Y = X_t.dot(theta_t.T)
Y[np.random.rand(np.shape(Y)[0]) > 0.5] = 0
R = np.zeros((np.shape(Y)))
R[Y != 0] = 1
m, n = np.shape(X_t)
X = np.random.randn(m,n)
a, b = np.shape(theta_t)
theta = np.random.randn(a,b)
num_users = np.shape(Y)[1]
num_movies = np.shape(Y)[0]
num_features = np.shape(theta_t)[1]
def J(t):
return cofiCostFunc(t, Y, R, num_users, num_movies, \
num_features, Lambda)
numgrad = computeNumericalGradient(J, \
np.append(X.flatten(), theta.flatten()))
cost, grad = cofiCostFunc(np.append(X.flatten(), \
theta.flatten()), Y, R, num_users, \
num_movies, num_features, Lambda)
#print numgrad, grad
#print 'The above two columns you get should be very similar.'
#print '(Left-Your Numerical Gradient, Right-Analytical Gradient)'
#diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad)
#print 'If your backpropagation implementation is correct, then \
#the relative difference will be small (less than 1e-9).\
#Relative Difference: ', diff
def LoadMovieList():
counter = 0
movielist = {}
with open('data/movie_ids.txt', 'rb') as fid:
lines = fid.readlines()
for line in lines:
movielist[counter] = line.split(' ', 1)[1]
counter += 1
return movielist
def normalizeRatings(Y, R):
[m, n] = np.shape(Y)
Ymean = np.zeros((m, 1))
YNorm = np.zeros(np.shape(Y))
for i in range(m):
idx = np.where(R[i, :] == 1)
Ymean[i] = np.mean(Y[i, idx])
YNorm[i, idx] = Y[i, idx] - Ymean[i]
return YNorm, Ymean
print 'Loading movie ratings dataset.'
data = scipy.io.loadmat('data/ex8_movies.mat')
R = data['R'] # R is a 1682x943 matrix. R[i, j] where i is movie and j is user. R(i, j) = 1 id user j gave a rating to movie i
Y = data['Y'] # A matrix 1682x943 with 1682 moies annd 943 users
#R[0, :] movie number 0, which is Toy story, checking for all users
print 'Average rating for movie 1 (Toy Story): %8.8f/5 ' \
%np.mean(Y[0,np.where(R[0,:] -1 == 0)])
plt.figure(figsize=(5, 5))
plt.imshow(Y)
#plt.show()
#Debugging the collaborative filtering cost function
data1 = scipy.io.loadmat('data/ex8_movieParams.mat')
X = data1['X']
theta = data1['Theta']
# Reduce the data set size so that this runs faster
num_users = 4
num_movies = 5
num_features = 3
X = X[0:num_movies, 0:num_features]
theta = theta[0:num_users, 0:num_features]
Y = Y[0:num_movies, 0:num_users]
R = R[0:num_movies, 0:num_users]
#Evaluate the costfunction
J, grad = cofiCostFunc(np.append(X.flatten(), theta.flatten()), Y, R, num_users, num_movies, num_features, 0)
print(J)
checkCostFunction(0)
J, grad = cofiCostFunc(np.append(X.flatten(), theta.flatten()), Y, R, num_users, num_movies, num_features, 1.5)
print(J)
checkCostFunction(1.5)
#Main code for learning movie recommendations and categories goes here......
movieList = LoadMovieList()
my_ratings = np.zeros((1682,1))
#Ratings of a few random movies
my_ratings[1] = 4 # Toy story, rating 4...
my_ratings[98] = 2
my_ratings[7] = 3
my_ratings[12]= 5
my_ratings[54] = 4
my_ratings[64] = 5
my_ratings[66] = 3
my_ratings[69] = 5
my_ratings[183] = 4
my_ratings[226] = 5
my_ratings[335] = 5
my_ratings[2] = 5
my_ratings[3] = 1
my_ratings[99] = 5
my_ratings[123]= 5
my_ratings[333] = 4
my_ratings[76] = 5
my_ratings[97] = 3
my_ratings[212] = 5
my_ratings[200] = 4
my_ratings[90] = 1
my_ratings[330] = 1
print("\n\nNew User Ratings:\n")
for i in range(len(my_ratings)):
if my_ratings[i] > 0:
print("Rated %d for %s\n", my_ratings[i], movieList[i-1])
print 'Training collaborative filtering...'
data = scipy.io.loadmat('data/ex8_movies.mat')
R = data['R']
Y = data['Y']
#Add my own ratings toe the data matirces
Y = np.append(my_ratings, Y, 1)
R = np.append((my_ratings != 0), R, 1)
#Normaliz ratings
[Ynorm, Ymean] = normalizeRatings(Y, R)
#Usefule values
num_users = np.shape(Y)[1]
num_movies = np.shape(Y)[0]
num_features = 10 # number of "unknown" categpries(features)
#set init params (Theta, X)
X = np.random.randn(num_movies, num_features)
Theta = np.random.randn(num_users, num_features)
initital_parameters = np.append(X.flatten(), Theta.flatten())
Lambda = 10
cost = lambda params: cofiCostFunc(params, Y, R, num_users, num_movies, num_features, Lambda)[0]
grad = lambda params: cofiCostFunc(params, Y, R, num_users, num_movies, num_features, Lambda)[1]
theta = op.minimize(cost, np.append(X.flatten(), Theta.flatten()), method='CG', jac=grad, options={'disp':True, 'maxiter':50})
theta = theta.x
print(theta)
#Unfold the theta
X = theta[:num_movies*num_features].reshape(num_movies, num_features)
Theta = theta[num_movies*num_features:].reshape(num_users, num_features)
p = X.dot(Theta.T)
my_predictions = p[:, 0]+Ymean.flatten()
movieList = LoadMovieList()
ix = my_predictions.argsort()[::-1]
print(R)
for i in range(len(movieList)):
j = ix[i]
print("Predicting rating %.1f for movie %s" % (my_predictions[j]/2, movieList[j]))
| UTF-8 | Python | false | false | 6,427 | py | 4 | movieReccomendations.py | 2 | 0.634511 | 0.604481 | 0 | 221 | 28.081448 | 127 |
MostafaOkasha/orc | 13,374,528,180,442 | 3adcc5aa06d3682cc97bf41eee3ba0634f0033e2 | 9e0607d3cc71c1e890b558742305f2669777b8b4 | /database.py | 7894a89d83dfdc09d9ec00da53d98b169559b829 | []
| no_license | https://github.com/MostafaOkasha/orc | 9858111d841ee052b57a69becd3812257ec11d02 | d4829cbcb698e037cd741a61a0eaf501aa2da708 | refs/heads/master | 2020-04-09T15:11:01.261873 | 2016-11-04T20:05:58 | 2016-11-04T20:05:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sqlite3
import datetime
# TODO catch DB errors
# Setup sql Instructions
'''
To setup sqlite3 to make it more readable when running queries run the following
.header on
.mode column
.timer on
To setup the databases
USER TABLE
CREATE TABLE IF NOT EXISTS `users` (
`id` float NOT NULL,
`name` varchar(30) NOT NULL,
`pw_hash` varchar(60) NOT NULL,
`bad_pw_count` varchar(60) NOT NULL,
`game_wins` int(11) NOT NULL,
`game_total` int(11) NOT NULL,
`game_inprogress` tinyint(1) NOT NULL,
`date_joined` date NOT NULL,
`last_connected` date NOT NULL,
`account_locked` tinyint(1) NOT NULL,
`locked_date` date NOT NULL,
`require_pw_change` tinyint(1) NOT NULL,
PRIMARY KEY (`id`)
);
GAME TABLE
CREATE TABLE IF NOT EXISTS `game` (
`id` float NOT NULL,
`player_1` float NOT NULL,
`player_2` float NOT NULL,
`date_started` date NOT NULL,
`date_ended` date NOT NULL,
`player_move` float NOT NULL,
`game_winner` float NOT NULL,
PRIMARY KEY (`id`,`date_started`)
);
MOVES TABLE
CREATE TABLE IF NOT EXISTS `moves` (
`move_id` int(11) NOT NULL,
`id` int(11) NOT NULL,
`from` int(11) NOT NULL,
`to` int(11) NOT NULL,
`date` int(11) NOT NULL,
`time` int(11) NOT NULL,
PRIMARY KEY (`move_id`)
);
INSERT DATA into Users table
INSERT INTO `users` (`id`, `name`, `pw_hash`,`bad_pw_count`, `game_wins`, `game_total`, `game_inprogress`, `date_joined`, `last_connected`, `account_locked`, `locked_date`,`require_pw_change`) VALUES
(1, 'shane', '1',0, 0, 0, 0, '2016-04-11', '2016-04-11', 0, 0, 0),
(2, '2', 'c4ca4238a0b9',0, 0, 0, 0, '2016-04-18', '2016-04-18', 0, 0, 0);
'''
# To use SQLLite Instructions
'''
.tables - Will list out the tables
.schema [tablename] - Will show the CREATE statement(s) for a table or tables
'''
def authuser(uname, pw):
print("authing user")
toreturn = False
sql = 'SELECT * from users where name = "'+uname+'" AND pw_hash = "'+pw+'";'
data = (runquery(sql))
if len(data) != 0:
if data[9] != 1: # account not locked?
sql = "UPDATE users SET last_connected = " + '"' + datetime.datetime.now().strftime("%Y-%m-%d") + '"' + ' WHERE name = "' + uname + '";'
updatedatabase(sql)
toreturn = True
else:
toreturn = "Account locked"
if toreturn != True:
print("Username/Pasword didn't match - looking for username")
sql = 'SELECT * from users where name = "' + uname + '";'
data = (runquery(sql))
if len(data) != 0: # Found username
print("found username")
badpwcount = int(data[3]) + 1
if badpwcount <= 5:
sql = "UPDATE users SET bad_pw_count = " + str(badpwcount) + ' WHERE name = "' + str(uname) + ";"
toreturn = "Incorrect username password combination"
else:
sql = "UPDATE users SET account_locked = 1, locked_date = " + '"' + datetime.datetime.now().strftime("%Y-%m-%d") + '"' + ' WHERE name = "' + str(uname) +'";'
toreturn = "Account locked"
updatedatabase(sql)
return toreturn
def updatedatabase(sqlstatement):
try:
con = sqlite3.connect('orc.db')
cur = con.cursor()
cur.execute(sqlstatement)
con.commit()
except sqlite3.Error:
if con:
con.rollback()
finally:
if con:
con.close
def runquery(sqlstatement):
con = sqlite3.connect('orc.db')
cur = con.cursor()
cur.execute(sqlstatement)
data = cur.fetchall()
if len(data) != 0:
data = data[0] # convert to a Tuple
else:
data = ""
con.close()
return data
| UTF-8 | Python | false | false | 3,903 | py | 6 | database.py | 2 | 0.560594 | 0.535229 | 0 | 135 | 27.911111 | 203 |
trucker-su/Django-Blog-With-Quiz-Game | 10,909,216,942,518 | f38e8fc5cfc19a7356708a87fc66e4707ae9a77d | 06d8496178d209b2c7914ff1b2c2cc3aecf7829b | /blog/migrations/0016_article_slug.py | 097b5245319ad72223e027c77c1eef6786f54e2f | []
| no_license | https://github.com/trucker-su/Django-Blog-With-Quiz-Game | 111d5f87d0f573e15e1c1baec9f23fa4029b380b | e45d692414829e20722b400d53fa129312b4918f | refs/heads/master | 2023-04-03T22:24:52.821470 | 2021-03-19T06:28:02 | 2021-03-19T06:28:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.5 on 2020-06-01 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_article_article_text2'),
]
operations = [
migrations.AddField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, max_length=250, null=True),
),
]
| UTF-8 | Python | false | false | 407 | py | 35 | 0016_article_slug.py | 22 | 0.592138 | 0.535627 | 0 | 18 | 21.611111 | 74 |
ioppermann/munin-contrib | 14,766,097,586,745 | ff7e541c9dccf7d0e949e12d9000c214a4209663 | c4c5ee3612c37ba495fcc41836635fd10191b926 | /plugins/weather/weather_ | d91580b7b7ec3db6eb848695278c9ea2ce0a38af | []
| no_license | https://github.com/ioppermann/munin-contrib | 9ef935303baab10c3c453a1887d718a0b9d4af68 | c9157be3fe0bdcc3fb54855056e1cebe94e4dcd5 | refs/heads/master | 2021-01-24T03:37:46.738509 | 2018-02-24T21:58:04 | 2018-02-24T21:58:50 | 21,696,221 | 7 | 3 | null | true | 2015-01-07T18:02:47 | 2014-07-10T14:01:37 | 2014-09-10T05:39:59 | 2015-01-07T18:02:45 | 4,938 | 3 | 1 | 0 | Perl | null | null | #!/usr/bin/python
import os
import re
import sys
import urllib
url = 'http://www.weather.com/weather/today/%s'
re_tmp = re.compile('realTemp: "(\d+)"')
re_hum = re.compile('relativeHumidity: "(\d+)"')
re_loc = re.compile('locName: "([\w ]+)"')
#code = sys.argv[0][(sys.argv[0].rfind('_') + 1):]
code = os.environ.get('code', sys.argv[0][(sys.argv[0].rfind('_') + 1):])
if code == None: sys.exit(1)
if len(sys.argv) == 2 and sys.argv[1] == "autoconf":
print "yes"
elif len(sys.argv) == 2 and sys.argv[1] == "config":
u = urllib.urlopen(url % code)
txt = u.read()
u.close()
LOC_list = re_loc.findall(txt)
if len(LOC_list):
LOC = LOC_list[0]
else:
LOC = "Unknown"
print 'graph_title Weather in %s' % LOC
print 'graph_vlabel Temperature and Humidity'
print 'graph_category sensors'
print 'temperature.label Temperature'
print 'humidity.label Humidity'
print 'graph_args --base 1000 -l 0'
else:
u = urllib.urlopen(url % code)
txt = u.read()
u.close()
TMP_F_list = re_tmp.findall(txt)
HUM_list = re_hum.findall(txt)
if len(HUM_list):
HUM = HUM_list[0]
else:
sys.exit(1)
if len(TMP_F_list):
TMP_F = TMP_F_list[0]
TMP_C = (int(TMP_F) - 32) * 5/9
else:
sys.exit(1)
print 'temperature.value %s' % TMP_C
print 'humidity.value %s' % HUM
| UTF-8 | Python | false | false | 1,279 | 806 | weather_ | 499 | 0.627834 | 0.608288 | 0 | 61 | 19.967213 | 73 |
|
TheKinshu/100-Days-Python | 8,744,553,426,205 | e12e6aa86ada66e9b77a3702eceed1cf100e2ad9 | 0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52 | /Day45/MovieToWatch.py | 5be19e2e5e47ed622655b39619f209ed9e9a8c9e | []
| no_license | https://github.com/TheKinshu/100-Days-Python | 15cbacc608ee349cc9733a7032e10a359bebb731 | 293ad6b3e5f5208da84efbc5b2d2d395a5a53421 | refs/heads/master | 2023-04-18T08:21:30.361800 | 2021-05-02T18:48:39 | 2021-05-02T18:48:39 | 351,582,416 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
import requests
URL = "https://www.empireonline.com/movies/features/best-movies-2/"
response = requests.get(URL)
website_html = response.text
soup = BeautifulSoup(website_html, "html.parser")
content = soup.find(name="div", class_="jsx-3821216435 block-item listicle-container")
movies = content.find_all(name='a')
reviewsMovies = [movie.getText() for movie in movies if "Read Empire's" in (movie.getText())]
movieList = []
for movie in reviewsMovies:
temp = movie.split()
movieList.append(' '.join(temp[4:]))
movieL = (movieList[::-1])
with open("./Day45/movie.txt", "w") as file:
for movie in movieL:
file.write(f"{movie}\n") | UTF-8 | Python | false | false | 687 | py | 110 | MovieToWatch.py | 90 | 0.697234 | 0.673945 | 0 | 27 | 24.481481 | 93 |
yanbinbi/leetcode | 463,856,489,633 | 74029cb32b649d0f9a08c0294195485ca1cde5a3 | 5afd733a5c1f753601c69b8b4eae1b49edfbae7c | /101-200/119.py | 93cf5d2bee16f95dc9da01e38c24bd119d1fb339 | []
| no_license | https://github.com/yanbinbi/leetcode | 9dcd4a0160be915006455b83d6b7cd39e9819811 | 616a868bfa7bdd00195067b0477b0236a72d23e0 | refs/heads/master | 2021-05-13T19:34:17.222576 | 2017-11-12T02:04:31 | 2017-11-12T02:04:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
if rowIndex < 0:
return []
ret = [1]
for i in range(1, rowIndex+1):
for j in range(i-1):
ret.append(ret.pop(0)+ret[0])
ret.append(1)
return ret
| UTF-8 | Python | false | false | 362 | py | 219 | 119.py | 218 | 0.444751 | 0.422652 | 0 | 14 | 24.857143 | 45 |
bianzheng123/NN_as_Classification | 7,198,365,221,981 | 2443726509aad27c37d791600d25d25a03de6133 | fafecccce95eb60bebbb3fb9a12dc0517bd0e8ff | /config_batch_run.py | 6ab5e9209207f99f98428d57ba87840752e96d9c | []
| no_license | https://github.com/bianzheng123/NN_as_Classification | 92d41a9845e7d6ab1bf7c129a4ae6341b0d535cc | 57c56d196c54d41b9f3a5fece5664c68c5f07faf | refs/heads/master | 2023-05-02T05:06:50.856257 | 2021-05-19T04:38:54 | 2021-05-19T04:38:54 | 314,740,560 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from util import send_email
def run_nohup(long_config_dir, short_config_dir, dataset_name, fname, _type, k):
os.system('nohup python3 -u run.py --long_term_config_dir %s --short_term_config_dir %s --type %s --k %d > '
'./log/%s/%s_%s.log 2>&1 &' % (
long_config_dir, short_config_dir, _type, k, _type, dataset_name, fname))
def run_frontend(long_config_dir, short_config_dir, _type, k):
os.system('python3 -u run.py --long_term_config_dir %s --short_term_config_dir %s --type %s --k %d' % (
long_config_dir, short_config_dir, _type, k))
'''
nn_classification
short_config_fname_arr = ['1_hnsw_16', '1_kmeans_independent_16', '1_kmeans_multiple_16', '1_knn_16',
'1_lsh_16', '1_random_hash_16']
pq_nn
short_config_fname_arr = ['1_hnsw_16', '1_kmeans_independent_16', '1_knn_16',
'1_lsh_16', '1_random_hash_16']
counting_index
short_config_fname_arr = ['1_kmeans_independent_16', '1_kmeans_multiple_16', '1_e2lsh_16']
'''
if __name__ == '__main__':
ds_fname = 'deep'
k = 10
_type = 'nn_classification' # pq_nn nn_classification counting_index
base_config_dir = '/home/zhengbian/NN_as_Classification/config/%s/big_ds/' % _type
long_config_dir = base_config_dir + ds_fname + '.json'
# para_l = ['two_block_512_dim', 'two_block_1024_dim', 'one_block_2048_dim', 'one_block_512_dim',
# 'two_block_512_dim_no_bn_dropout', 'res_net'] # cnn two_block_8192_dim_no_bn_dropout
para_l = ['kaffpa'] # cnn two_block_8192_dim_no_bn_dropout
# para_l = [1, 2, 3]
method_l = ['knn']
para_name = 'partition_type'
n_classifier = 4
for method in method_l:
for para in para_l:
fname = '{}_{}_256_{}_{}.json'.format(n_classifier, method, para_name, para)
short_config_dir = base_config_dir + fname
# run_nohup(long_config_dir, short_config_dir, ds_fname, fname, _type, k)
run_frontend(long_config_dir, short_config_dir, _type, k)
# send_email.send("glove increase weight complete")
| UTF-8 | Python | false | false | 2,148 | py | 77 | config_batch_run.py | 64 | 0.58473 | 0.54702 | 0 | 48 | 42.75 | 112 |
SalatskySal/osbs-client | 10,728,828,353,923 | 6c59b869ae6bf0899d2d58f5dff96b19a56735b8 | 5e100133ec454150f20e97ed9e47df7f08468c3d | /tests/utils/test_yaml.py | 5385d667f5fea9c4f005ae42d665cfade0e136a1 | [
"BSD-3-Clause"
]
| permissive | https://github.com/SalatskySal/osbs-client | 460774e3cac5552edb26c5fdfcd421ee3c916cd7 | b66e19eb5ffe0e6cf97cb5c0c2f57403a862a0cf | refs/heads/master | 2021-02-12T17:11:29.599698 | 2020-04-27T19:21:45 | 2020-04-29T13:55:09 | 244,610,612 | 0 | 0 | BSD-3-Clause | true | 2020-03-03T10:47:08 | 2020-03-03T10:47:07 | 2020-02-26T16:22:06 | 2020-03-02T20:35:30 | 3,926 | 0 | 0 | 0 | null | false | false | """
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from flexmock import flexmock
from osbs.utils.yaml import read_yaml, read_yaml_from_file_path
import json
import os
import pkg_resources
import pytest
import yaml
def test_read_yaml_file_ioerrors(tmpdir):
config_path = os.path.join(str(tmpdir), 'nosuchfile.yaml')
with pytest.raises(IOError):
read_yaml_from_file_path(config_path, 'schemas/nosuchfile.json')
@pytest.mark.parametrize('from_file', [True, False])
@pytest.mark.parametrize('config', [
("""\
compose:
modules:
- mod_name:mod_stream:mod_version
"""),
])
def test_read_yaml_file_or_yaml(tmpdir, from_file, config):
expected = yaml.safe_load(config)
if from_file:
config_path = os.path.join(str(tmpdir), 'config.yaml')
with open(config_path, 'w') as fp:
fp.write(config)
output = read_yaml_from_file_path(config_path, 'schemas/container.json')
else:
output = read_yaml(config, 'schemas/container.json')
assert output == expected
def test_read_yaml_bad_package(caplog):
with pytest.raises(ImportError):
read_yaml("", 'schemas/container.json', package='bad_package')
assert 'Unable to find package bad_package' in caplog.text
def test_read_yaml_file_bad_extract(tmpdir, caplog):
class FakeProvider(object):
def get_resource_stream(self, pkg, rsc):
raise IOError
# pkg_resources.resource_stream() cannot be mocked directly
# Instead mock the module-level function it calls.
(flexmock(pkg_resources)
.should_receive('get_provider')
.and_return(FakeProvider()))
config_path = os.path.join(str(tmpdir), 'config.yaml')
with open(config_path, 'w'):
pass
with pytest.raises(IOError):
read_yaml_from_file_path(config_path, 'schemas/container.json')
assert "unable to extract JSON schema, cannot validate" in caplog.text
def test_read_yaml_file_bad_decode(tmpdir, caplog):
(flexmock(json)
.should_receive('load')
.and_raise(ValueError))
config_path = os.path.join(str(tmpdir), 'config.yaml')
with open(config_path, 'w'):
pass
with pytest.raises(ValueError):
read_yaml_from_file_path(config_path, 'schemas/container.json')
assert "unable to decode JSON schema, cannot validate" in caplog.text
| UTF-8 | Python | false | false | 2,539 | py | 5 | test_yaml.py | 3 | 0.674281 | 0.672706 | 0 | 86 | 28.523256 | 80 |
thomas536/alltheplaces | 14,963,666,099,195 | 3b81b7cb024a0f04a65ac1f52cdebfc9b90fa475 | 251f5c092d4b7760cec8c2b6324e5290b917721f | /locations/spiders/chrome_industries.py | 05d92544eb10d5ec370da9e9f6832e93cf406db3 | [
"MIT",
"CC0-1.0"
]
| permissive | https://github.com/thomas536/alltheplaces | 663a2441054ba62df6d6e070c19b1ba91f2f4f1f | ac4d4783572d55c0799fe6aeb5f6c0e72fad55fb | refs/heads/master | 2021-11-27T12:21:46.387422 | 2021-09-08T18:33:46 | 2021-09-08T18:33:46 | 242,420,362 | 0 | 0 | NOASSERTION | true | 2021-09-09T05:00:22 | 2020-02-22T22:25:57 | 2021-08-24T04:15:25 | 2021-09-09T05:00:20 | 7,437 | 0 | 0 | 0 | Python | false | false | import scrapy
import re
import json
import lxml
from locations.items import GeojsonPointItem
class ChromeIndustriesSpider(scrapy.Spider):
name = "chrome_industries"
item_attributes = { 'brand': "Chrome Industries" }
allowed_domains = ["www.chromeindustries.com"]
start_urls = (
'https://www.chromeindustries.com/on/demandware.store/Sites-chrome_na-Site/en_US/Stores-Search?latitude=32.7269669&longitude=-117.16470939999999&maxDistance=50000000',
)
def parse(self, response):
json_data = json.loads(response.body_as_unicode().replace('null' , '""'))
for item in json_data['locations']:
properties = {
'addr_full': item['address'] + ' '+item['address2'],
'phone': item['phone'],
'name' : item['name'],
'city': item['city'],
'state': item['state'],
'postcode': item['zipcode'],
'ref': item['id'],
'website': "https://www.chromeindustries.com/stores/",
'lat': float(item['latitude']),
'lon': float(item['longitude'])
}
yield GeojsonPointItem(**properties) | UTF-8 | Python | false | false | 1,201 | py | 429 | chrome_industries.py | 418 | 0.571191 | 0.542048 | 0 | 29 | 40.448276 | 175 |
YanghaoZYH/GaAN | 18,073,222,418,629 | 43a3fb5b6eb8b5f823e25d6cb68f794234a69abe | 7d5205682e904a9ffac3b6e04946efaa3fd86283 | /mxgraph/graph.py | 2e65d7432cc96f74ebd5f156b9f05440425c213a | []
| no_license | https://github.com/YanghaoZYH/GaAN | 690bae05e91c3acfc8cb8d6f074d11fe7ec9dd34 | 98010378215dfdd0b4c66485dc70d2f384311ba2 | refs/heads/master | 2020-07-06T14:02:55.025690 | 2019-08-18T16:20:45 | 2019-08-18T16:20:45 | 203,041,405 | 1 | 0 | null | true | 2019-08-18T18:24:32 | 2019-08-18T18:24:31 | 2019-08-18T16:20:56 | 2019-08-18T16:20:54 | 106 | 0 | 0 | 0 | null | false | false | import numpy as np
import scipy.sparse as ss
import logging
import mxgraph._graph_sampler as _graph_sampler
def set_seed(seed):
"""Set the random seed of the inner sampling handler
Parameters
----------
seed : int
Returns
-------
ret : bool
"""
return _graph_sampler.set_seed(seed)
class CSRMat(object):
"""A simple wrapper of the CSR Matrix
Apart from the traditoinal CSR format, we use two additional arrays: row_ids and col_ids
to track the original ids of the row/col indices
We use the C++ API to accelerate the speed if possible
"""
def __init__(self, end_points, ind_ptr, row_ids, col_ids, values=None, force_contiguous=False):
self.end_points = end_points
self.ind_ptr = np.ascontiguousarray(ind_ptr, dtype=np.int32)
self.values = None if values is None else values.astype(np.float32)
self.row_ids = row_ids
self.col_ids = col_ids
assert self.ind_ptr.size == len(self.row_ids) + 1
if force_contiguous:
self.end_points = np.ascontiguousarray(self.end_points, dtype=np.int32)
self.ind_ptr = np.ascontiguousarray(self.ind_ptr, dtype=np.int32)
if self.values is not None:
self.values = np.ascontiguousarray(self.values, dtype=np.float32)
self.row_ids = np.ascontiguousarray(self.row_ids, dtype=np.int32)
self.col_ids = np.ascontiguousarray(self.col_ids, dtype=np.int32)
self._row_id_reverse_mapping = -1 * np.ones(self.row_ids.max() + 1, dtype=np.int32)
self._col_id_reverse_mapping = -1 * np.ones(self.col_ids.max() + 1, dtype=np.int32)
self._row_id_reverse_mapping[self.row_ids] = np.arange(self.row_ids.size, dtype=np.int32)
self._col_id_reverse_mapping[self.col_ids] = np.arange(self.col_ids.size, dtype=np.int32)
# self._row_id_reverse_mapping = dict()
# self._col_id_reverse_mapping = dict()
# for (i, ele) in enumerate(self.row_ids):
# self._row_id_reverse_mapping[ele] = i
# for (i, ele) in enumerate(self.col_ids):
# self._col_id_reverse_mapping[ele] = i
def to_spy(self):
"""Convert to the scipy csr matrix
Returns
-------
ret : ss.csr_matrix
"""
if self.values is None:
values = np.ones(shape=self.end_points.shape, dtype=np.float32)
else:
values = self.values
return ss.csr_matrix((values, self.end_points, self.ind_ptr), shape=(self.row_ids.size, self.col_ids.size))
@staticmethod
def from_spy(mat):
"""
Parameters
----------
mat : ss.csr_matrix
Returns
-------
ret : CSRMat
"""
return CSRMat(end_points=mat.indices,
ind_ptr=mat.indptr,
row_ids=np.arange(mat.shape[0], dtype=np.int32),
col_ids=np.arange(mat.shape[1], dtype=np.int32),
values=mat.data,
force_contiguous=True)
@property
def nnz(self):
return self.end_points.size
def reverse_row_map(self, node_ids):
"""Maps node ids back to row indices in the CSRMat
Parameters
----------
node_ids : np.ndarray or list or tuple or int
Returns
-------
ret : np.ndarray
"""
# if isinstance(node_ids, (np.ndarray, list, tuple)):
# return np.array(list(map(lambda ele: self._row_id_reverse_mapping[ele], node_ids)),
# dtype=np.int32)
# else:
return self._row_id_reverse_mapping[node_ids]
def reverse_col_map(self, node_ids):
"""Maps node ids back to col indices in the CSRMat
Parameters
----------
node_ids : np.ndarray or list or tuple or int
Returns
-------
ret : np.ndarray
"""
# if isinstance(node_ids, (np.ndarray, list, tuple)):
# return np.array(list(map(lambda ele: self._col_id_reverse_mapping[ele], node_ids)),
# dtype=np.int32)
# else:
return self._col_id_reverse_mapping[node_ids]
def submat(self, row_indices=None, col_indices=None):
"""Get the submatrix of the corresponding row/col indices
Parameters
----------
row_indices : np.ndarray or None
col_indices : np.ndarray or None
Returns
-------
ret : CSRMat
"""
row_indices = None if row_indices is None else row_indices.astype(np.int32)
col_indices = None if col_indices is None else col_indices.astype(np.int32)
dst_end_points, dst_values, dst_ind_ptr, dst_row_ids, dst_col_ids\
= _graph_sampler.csr_submat(self.end_points,
self.values,
self.ind_ptr,
self.row_ids,
self.col_ids,
row_indices,
col_indices)
return CSRMat(end_points=dst_end_points,
ind_ptr=dst_ind_ptr,
row_ids=dst_row_ids,
col_ids=dst_col_ids,
values=dst_values)
def summary(self):
print(self.info())
def info(self):
info_str = "Summary" + \
"\n Row={}, Col={}, NNZ={}".format(self.row_ids.size,
self.col_ids.size,
self.end_points.size)
return info_str
class SimpleGraph(object):
"""A simple graph container
We use the CSR format to store the adjacency matrix
"""
def __init__(self, node_ids, node_types, undirected=True,
end_points=None, ind_ptr=None, edge_weight=None, adj=None, edge_features=None):
"""Initialize a SimpleGraph
Parameters
----------
node_ids : np.ndarray
Maps the indices to the real node_ids
node_types : np.ndarray
Types of the nodes, 1 --> Train, 2 --> Valid, 3 --> Test
undirected : bool
end_points : np.ndarray
Indices of the end-points of the connections
ind_ptr : np.ndarray
Pointer to the beginning of end_points for a specific array
adj : CSRMat
The CSR matrix that stores the value
edge_features : None or np.ndarray
The edge features, should be None or have shape (node_num, edge_feature_dim)
"""
if adj is not None:
self.adj = adj
else:
assert end_points is not None and ind_ptr is not None
self.adj = CSRMat(end_points=end_points,
ind_ptr=ind_ptr,
row_ids=node_ids,
col_ids=node_ids,
values=edge_weight)
self.degrees = np.floor(np.array(self.adj.to_spy().sum(axis=1))).astype(np.int32).reshape((-1,))
self.node_ids = node_ids.astype(np.int32)
self.node_types = node_types.astype(np.int32)
self.undirected = undirected
self.edge_features = edge_features
self._node_id_reverse_mapping = -1 * np.ones(shape=self.node_ids.max() + 1, dtype=np.int32)
self._node_id_reverse_mapping[self.node_ids] = np.arange(self.node_ids.size, dtype=np.int32)
# self._node_id_reverse_mapping = dict()
# for (i, ele) in enumerate(self.node_ids):
# self._node_id_reverse_mapping[ele] = i
@property
def end_points(self):
return self.adj.end_points.astype(np.int32)
@property
def ind_ptr(self):
return self.adj.ind_ptr.astype(np.int32)
@property
def node_num(self):
return self.node_ids.size
@property
def edge_num(self):
return self.adj.end_points.size if not self.undirected else self.adj.end_points.size / 2
@property
def avg_degree(self):
return self.adj.end_points.size / self.node_ids.size
def to_networkx(self):
"""Convert to a networkx graph
Returns
-------
ret: networkx.Graph
"""
raise NotImplementedError
def reverse_map(self, node_ids):
"""Maps node ids back to indices in the graph
Parameters
----------
node_ids : np.ndarray or list or tuple or int
Returns
-------
ret : np.ndarray
"""
return self._node_id_reverse_mapping[node_ids]
# if isinstance(node_ids, (np.ndarray, list, tuple)):
# return np.array(list(map(lambda ele: self._node_id_reverse_mapping[ele], node_ids)),
# dtype=np.int32)
# else:
# return self._node_id_reverse_mapping[node_ids]
def subgraph_by_indices(self, indices):
"""Obtain subgraph by index values, i.e., 0, 1, 2, ...
Parameters
----------
indices
Returns
-------
"""
subgraph_adj = self.adj.submat(row_indices=indices, col_indices=indices)
subgraph_node_ids = self.node_ids[indices]
subgraph_node_types = self.node_types[indices]
if self.edge_features is not None:
new_edge_features = self.edge_features[indices, :]
else:
new_edge_features = None
return SimpleGraph(node_ids=subgraph_node_ids,
node_types=subgraph_node_types,
undirected=self.undirected,
adj=subgraph_adj,
edge_features=new_edge_features)
def subgraph_by_node_ids(self, node_ids):
"""Obtain subgraph by the node_ids
For example, the original graph has node ids: (2, 7, 9, 11) and you decide to take a look at the
subgraph that contains nodes (2, 9), call G.subgraph_by_node_ids([2, 9])
Parameters
----------
real_ids
Returns
-------
"""
return self.subgraph_by_indices(self.reverse_map(node_ids))
def save(self, fname):
return np.savez_compressed(fname,
node_ids=self.node_ids,
node_types=self.node_types,
end_points=self.adj.end_points,
ind_ptr=self.adj.ind_ptr,
undirected=np.array((self.undirected,), dtype=np.bool))
@staticmethod
def load(fname):
G_data = np.load(fname)
if 'undirected' in G_data.keys():
if isinstance(G_data['undirected'], list):
undirected = G_data['undirected'][0]
else:
undirected = G_data['undirected']
else:
undirected = True # default value of undirected is true
return SimpleGraph(node_ids=G_data['node_ids'],
node_types=G_data['node_types'],
undirected=undirected,
end_points=G_data['end_points'],
ind_ptr=G_data['ind_ptr'])
@property
def train_indices(self):
all_indices = np.arange(0, self.node_num, dtype=np.int32)
return all_indices[self.node_types == 1]
@property
def train_node_ids(self):
return self.node_ids[self.train_indices]
def fetch_train(self):
return self.subgraph_by_indices(indices=self.train_indices)
@property
def valid_indices(self):
all_indices = np.arange(0, self.node_num, dtype=np.int32)
return all_indices[self.node_types == 2]
@property
def valid_node_ids(self):
return self.node_ids[self.valid_indices]
def fetch_valid(self):
return self.subgraph_by_indices(indices=self.valid_indices)
@property
def test_indices(self):
all_indices = np.arange(0, self.node_num, dtype=np.int32)
return all_indices[self.node_types == 3]
@property
def test_node_ids(self):
return self.node_ids[self.test_indices]
def fetch_test(self):
return self.subgraph_by_indices(indices=self.test_indices)
@property
def train_valid_indices(self):
all_indices = np.arange(0, self.node_num, dtype=np.int32)
return all_indices[self.node_types <= 2]
@property
def train_valid_node_ids(self):
return self.node_ids[self.train_valid_indices]
def fetch_train_valid(self):
return self.subgraph_by_indices(indices=self.train_valid_indices)
def summary(self, graph_name="Graph"):
logging.info(self.info(graph_name))
def info(self, graph_name="Graph", indent_token="\t"):
info_str = indent_token + "Summary of {}\n".format(graph_name) + \
indent_token + indent_token + "Undirected=%s\n" %str(self.undirected) + \
indent_token + indent_token + "Node Number={}, Train={}, Valid={}, Test={}\n".format(self.node_num,
(self.node_types == 1).sum(),
(self.node_types == 2).sum(),
(self.node_types == 3).sum()) + \
indent_token + indent_token + "Edge Number={}\n".format(self.edge_num) + \
indent_token + indent_token + "Avg Degree={}".format(self.avg_degree)
if self.edge_features is not None:
info_str += indent_token + indent_token + "Edge Features Shape={}".format(self.edge_features.shape)
return info_str
def random_walk(self,
initial_node=-1,
walk_length=10000,
return_prob=None,
max_node_num=-1,
max_edge_num=-1):
"""Random Walk
At every step, we will return to the initial node with return_p.
Otherwise, we will jump randomly to a conneted node.
Ref: [KDD06] Sampling from Large Graphs
Parameters
----------
initial_node : int or None
walk_length : int
return_prob : float or None
max_node_num : int or None
max_edge_num : int or None
Returns
-------
G_sampled : SimpleGraph
"""
if initial_node is None:
initial_node = -1
if max_node_num is None:
max_node_num = -1
if max_edge_num is None:
max_edge_num = -1
if return_prob is None:
return_prob = 0.15
subgraph_end_points, subgraph_ind_ptr, subgraph_node_ids =\
_graph_sampler.random_walk(self.end_points,
self.ind_ptr,
self.node_ids,
int(self.undirected),
initial_node,
walk_length,
return_prob,
max_node_num,
max_edge_num)
indices = self.reverse_map(subgraph_node_ids)
subgraph_node_types = self.node_types[indices]
return SimpleGraph(node_ids=subgraph_node_ids,
node_types=subgraph_node_types,
undirected=self.undirected,
end_points=subgraph_end_points,
ind_ptr=subgraph_ind_ptr)
class HeterGraph(SimpleGraph):
def __init__(self, node_ids, node_types, num_set, node_sets, node_indices_in_set, undirected=True,
end_points=None, ind_ptr=None, adj=None, edge_weight=None, edge_features=None, edge_types=None):
super(HeterGraph, self).__init__(node_ids=node_ids, node_types=node_types, undirected=undirected,
end_points=end_points, ind_ptr=ind_ptr, edge_weight=edge_weight, adj=adj,
edge_features=edge_features)
self._num_set = int(num_set)
self.node_sets = node_sets ## which node set the node belongs to
self.node_indices_in_set = node_indices_in_set ## the position where the node in its set
#self.edge_types = edge_types
@property
def num_set(self):
return self._num_set
def subgraph_by_indices(self, indices):
subgraph_adj = self.adj.submat(row_indices=indices, col_indices=indices)
subgraph_node_ids = self.node_ids[indices]
subgraph_node_types = self.node_types[indices]
subgraph_node_sets = self.node_sets[indices]
subgraph_node_indices_in_set = self.node_indices_in_set[indices]
if self.edge_features is not None:
new_edge_features = self.edge_features[indices, :]
else:
new_edge_features = None
return HeterGraph(node_ids=subgraph_node_ids,
node_types=subgraph_node_types,
num_set=self._num_set,
node_sets=subgraph_node_sets,
node_indices_in_set=subgraph_node_indices_in_set,
adj=subgraph_adj,
undirected=self.undirected,
edge_features=new_edge_features)
def subgraph_by_node_ids(self, node_ids):
return self.subgraph_by_indices(self.reverse_map(node_ids))
def node_sets_by_indices(self, indices):
return self.node_sets[indices]
def node_sets_by_node_ids(self, node_ids):
return self.node_sets_by_indices(self.reverse_map(node_ids))
def node_indices_in_set_by_indices(self, indices):
return self.node_indices_in_set[indices]
def node_indices_in_set_by_node_ids(self, node_ids):
return self.node_indices_in_set_by_indices(self.reverse_map(node_ids))
@staticmethod
def load(fname):
print("Loading a Heterogeneous Graph ...")
G_data = np.load(fname)
if 'undirected' in G_data.keys():
if isinstance(G_data['undirected'], list):
undirected = G_data['undirected'][0]
else:
undirected = G_data['undirected']
else:
undirected = True # default value of undirected is true
if "adj" in G_data and isinstance(G_data["adj"], CSRMat):
return HeterGraph(node_ids=G_data['node_ids'],
node_types=G_data['node_types'],
num_set=G_data['num_set'],
node_sets=G_data['node_sets'],
node_indices_in_set=G_data['node_indices_in_set'],
adj=G_data['adj'],
undirected=undirected)
else:
return HeterGraph(node_ids=G_data['node_ids'],
node_types=G_data['node_types'],
num_set=G_data['num_set'],
node_sets = G_data['node_sets'],
node_indices_in_set=G_data['node_indices_in_set'],
end_points=G_data['end_points'],
ind_ptr=G_data['ind_ptr'],
edge_weight=G_data['edge_weight'],
undirected=undirected)
def save(self, fname):
return np.savez_compressed(fname,
node_ids=self.node_ids,
node_types=self.node_types,
set_num=self._num_set,
node_sets=self.node_sets,
node_indices_in_set=self.node_indices_in_set,
adj=self.adj,
undirected=np.array((self.undirected,), dtype=np.bool))
class BiGraph(SimpleGraph):
def __init__(self, node_ids, node_types, num_node_set, num_edge_set, node_sets, node_indices_in_set, undirected=True,
end_points=None, ind_ptr=None, adj=None, edge_weight=None, edge_features=None):
super(BiGraph, self).__init__(node_ids=node_ids, node_types=node_types, undirected=undirected,
end_points=end_points, ind_ptr=ind_ptr, edge_weight=edge_weight, adj=adj,
edge_features=edge_features)
self._num_node_set = int(num_node_set)
self._num_edge_set = int(num_edge_set)
self.node_sets = node_sets ## which node set the node belongs to SHAPE np.array(num_node, )
self.node_indices_in_set = node_indices_in_set ## the position where the node in its set SHAPE np.array(num_node, )
#self.edge_types = edge_types
@property
def num_node_set(self):
return self._num_node_set
@property
def num_edge_set(self):
return self._num_edge_set
def subgraph_by_indices(self, indices):
subgraph_adj = self.adj.submat(row_indices=indices, col_indices=indices)
subgraph_node_ids = self.node_ids[indices]
subgraph_node_types = self.node_types[indices]
subgraph_node_sets = self.node_sets[indices]
subgraph_node_indices_in_set = self.node_indices_in_set[indices]
new_edge_features = self.edge_features[indices, :] if self.edge_features is not None else None
return BiGraph(node_ids=subgraph_node_ids,
node_types=subgraph_node_types,
num_node_set=self._num_node_set,
num_edge_set=self._num_edge_set,
node_sets=subgraph_node_sets,
node_indices_in_set=subgraph_node_indices_in_set,
adj=subgraph_adj,
undirected=self.undirected,
edge_features=new_edge_features)
def subgraph_by_node_ids(self, node_ids):
return self.subgraph_by_indices(self.reverse_map(node_ids))
def node_sets_by_indices(self, indices):
return self.node_sets[indices]
def node_sets_by_node_ids(self, node_ids):
return self.node_sets_by_indices(self.reverse_map(node_ids))
def node_indices_in_set_by_indices(self, indices):
return self.node_indices_in_set[indices]
def node_indices_in_set_by_node_ids(self, node_ids):
return self.node_indices_in_set_by_indices(self.reverse_map(node_ids))
@staticmethod
def load(fname):
print("Loading a Bipartite Graph ...")
G_data = np.load(fname)
if 'undirected' in G_data.keys():
if isinstance(G_data['undirected'], list):
undirected = G_data['undirected'][0]
else:
undirected = G_data['undirected']
else:
undirected = True # default value of undirected is true
if "adj" in G_data and isinstance(G_data["adj"], CSRMat):
return BiGraph(node_ids=G_data['node_ids'],
node_types=G_data['node_types'],
num_node_set=G_data['num_node_set'],
num_edge_set=G_data['num_edge_set'],
node_sets=G_data['node_sets'],
node_indices_in_set=G_data['node_indices_in_set'],
adj=G_data['adj'],
undirected=undirected)
else:
return BiGraph(node_ids=G_data['node_ids'],
node_types=G_data['node_types'],
num_node_set=G_data['num_node_set'][0],
num_edge_set=G_data['num_edge_set'][0],
node_sets = G_data['node_sets'],
node_indices_in_set=G_data['node_indices_in_set'],
end_points=G_data['end_points'],
ind_ptr=G_data['ind_ptr'],
edge_weight=G_data['edge_weight'],
undirected=undirected)
def save(self, fname):
return np.savez_compressed(fname,
node_ids=self.node_ids,
node_types=self.node_types,
num_node_set=self._num_node_set,
num_edge_set=self._num_edge_set,
node_sets=self.node_sets,
node_indices_in_set=self.node_indices_in_set,
adj=self.adj,
undirected=np.array((self.undirected,), dtype=np.bool))
def check_subgraph(G_sampled, G_all):
"""Check whether G_sampled is a subgraph of G_all
Parameters
----------
G_sampled : SimpleGraph
G_all : SimpleGraph
Returns
-------
correct : bool
"""
correct = True
for id_index in range(G_sampled.node_ids.size):
sampled_node_id = G_sampled.node_ids[id_index]
sampled_node_neighbor_id = G_sampled.node_ids[G_sampled.end_points[G_sampled.ind_ptr[id_index]:
G_sampled.ind_ptr[id_index + 1]]]
# print("sampled_node_id: {}".format(sampled_node_id),"\n",
# "\t sampled_neighbor_ids: {}".format(sampled_node_neighbor_id))
G_all_idx = G_all.reverse_map(sampled_node_id)
node_neighbor_id = frozenset(G_all.node_ids[G_all.end_points[G_all.ind_ptr[G_all_idx]:
G_all.ind_ptr[G_all_idx + 1]]].tolist())
# print("node_id: {}".format(node_id), "\n",
# "\t neighbor_ids: {}".format(node_neighbor_id))
for end_id in sampled_node_neighbor_id:
if end_id not in node_neighbor_id:
print("Wrong edge:", G_sampled.node_ids[id_index], end_id)
return False
return correct
if __name__ == '__main__':
from mxgraph.config import cfg
import cProfile, pstats
cfg.DATA_NAME = 'ppi'
from mxgraph.iterators import cfg_data_loader
import time
set_seed(100)
G_all, features, labels, num_class = cfg_data_loader()
########################################################
############# Run Graph Sampling Algorithm #############
########################################################
G_train = G_all.fetch_train()
# pr = cProfile.Profile()
# pr.enable()
start = time.time()
G_sampled_small = G_train.random_walk(initial_node=None,
return_prob=0.0,
walk_length=10000,
max_node_num=2000,
max_edge_num=None)
end = time.time()
print('Time spent for random walk sampling, sample %d nodes: %g'%(2000, end - start))
G_sampled_sub_set = G_train.subgraph_by_node_ids(G_sampled_small.node_ids)
G_sampled_sub_set.summary("Random Walk Node Subgraph")
G_sampled_random_node = G_train.subgraph_by_node_ids(np.random.choice(G_train.node_ids, size=2000, replace=False))
G_sampled_random_node.summary("Random Node Subgraph")
start = time.time()
G_sampled = G_train.random_walk(initial_node=None,
return_prob=0.15,
walk_length=2000,
max_node_num=None,
max_edge_num=None)
end = time.time()
# pr.disable()
# ps = pstats.Stats(pr).sort_stats('cumulative')
# ps.print_stats(10)
print('Time spent for random walk:', end - start)
G_sampled.summary('Random Walk Subgraph')
print("==================================== Graph Sampling Finished ============================================\n")
########################################################
### testing the correctness of the samping algorithm ###
########################################################
print("Testing the correctness of the samping algorithm...")
correct = check_subgraph(G_sampled=G_sampled, G_all=G_train)
if correct:
print("Correctness Test Passed, G_sampled is a subgraph of G_train!")
else:
raise RuntimeError("Fail Test!")
correct = check_subgraph(G_sampled=G_sampled, G_all=G_all)
if correct:
print("Correctness Test Passed, G_sampled is a subgraph of G_all!")
else:
raise RuntimeError("Fail Test!")
print("==================================== Sampling Correctness Test Finished! ============================================\n")
| UTF-8 | Python | false | false | 28,928 | py | 23 | graph.py | 19 | 0.524302 | 0.519047 | 0 | 708 | 39.857345 | 132 |
cm0ore/ensem_cctbx | 13,013,750,954,008 | d02f9ccdbada04340b06be348dee10216dc84d3b | 810c43b318501dbd319d21a1d68c95d7f13329e4 | /ensem_cctbx_mapmask.py | 15a12ff8aaee6da7c609a8bea300e93473a12c7f | []
| no_license | https://github.com/cm0ore/ensem_cctbx | 7aef3079acdb8387597f23b41d7b863a1fe1ea63 | 71441a3d99439f97463c90d17b64c43e01862304 | refs/heads/main | 2023-02-24T12:30:42.131955 | 2021-01-28T22:26:05 | 2021-01-28T22:26:05 | 332,935,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Generates a shifted waterless_model, map, and mask and also shifts the original model by the same amount
#inputs are waterless_pdb_file and original_pdb_file
import sys
import os
from iotbx.data_manager import DataManager # load in DataManager
dm = DataManager() # Get an initialized version as dm
dm.set_overwrite(True)
from iotbx.map_model_manager import map_model_manager # load in the map_model_manager
mmm=map_model_manager() # get an initialized instance of the map_model_manager
no_water_pdb = sys.argv[1]
model_filename = sys.argv[2]
#create map from waterless model and write new files
file_basename = os.path.basename(model_filename).split('.pdb')[0]
mmm.generate_map(file_name=no_water_pdb, map_id='new_map', box_cushion=1)
mmm.write_model("%s_shifted.pdb" % file_basename)
mmm.write_map("%s_shifted_map.mrc" % file_basename)
mmm.create_mask_around_atoms() # create binary mask around atoms in the model
mmm.apply_mask_to_maps()
dm.write_real_map_file(mmm.map_manager() ,filename="%s_shifted_masked.mrc" % file_basename) # masked
#shift coords of model_file with solvent
model = dm.get_model("%s_shifted.pdb" % file_basename) #shifted and waterless
with_water_model = dm.get_model(model_filename)
sites_cart = with_water_model.get_sites_cart() # get coordinates of atoms in Angstroms
from scitbx.matrix import col # import a tool that handles vectors
shift = tuple(i - j for i,j in zip(model.get_sites_cart()[0], with_water_model.get_sites_cart()[0]))
sites_cart += col(shift)
with_water_model.set_sites_cart(sites_cart)
print(with_water_model.get_sites_cart()[0]) # print coordinate of first atom
dm.write_model_file(with_water_model, "shifted_with_water.pdb", overwrite=True)
| UTF-8 | Python | false | false | 1,799 | py | 2 | ensem_cctbx_mapmask.py | 1 | 0.714286 | 0.710395 | 0 | 35 | 50.314286 | 106 |
zilani-09/Task_String_with_Python | 12,661,563,630,206 | 856c768da05df02d988187f5d3c85a885ab55f62 | dd34a06366df03da881452b10836727d72474a35 | /listComprehension.py | 7d04d5f297f3ee1fa7208f475cb486da28eb883e | []
| no_license | https://github.com/zilani-09/Task_String_with_Python | ebcbae36d3bc1c43fc563602afd8c0a38b595c92 | 33e9dc75ec37822bf4c8b744202e9e4c69d2b16b | refs/heads/master | 2023-02-21T02:50:06.999599 | 2020-01-29T08:24:51 | 2020-01-29T08:24:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = [2,3,4]
d = [i*i for i in a]
print(d)
| UTF-8 | Python | false | false | 53 | py | 40 | listComprehension.py | 40 | 0.377358 | 0.320755 | 0 | 4 | 9.75 | 20 |
Anova07/Competitive-programing | 11,862,699,717,639 | d0b304377bd437df2988978c2632e973b05c8265 | 6d03a69bbee0325d6c37c616aec126c07cd9cd85 | /HackerRank/HalloweenSales.py | 565f9a59a46a0bdf7c0fc423ba93f16655a91b2d | [
"Unlicense"
]
| permissive | https://github.com/Anova07/Competitive-programing | 7863fe64fbd533eef99e944f11e8bd0d0aa575d9 | 3e28cd1d852e620ee1edbef0c3c4da739ef7fd9b | refs/heads/main | 2023-08-12T20:00:37.924824 | 2021-10-15T06:32:19 | 2021-10-15T06:32:19 | 417,391,339 | 0 | 0 | Unlicense | true | 2021-10-15T06:29:57 | 2021-10-15T06:29:56 | 2021-10-10T10:38:36 | 2021-10-14T14:32:18 | 458 | 0 | 0 | 0 | null | false | false | import sys
import math
def howManyGames(p, d, m, s):
t_l = 0
if p % d != 0:
t_l = p % d
else:
t_l = d
d = -d
t_n = ((t_l-p)/d) + 1
t_sum = (t_n*(p+t_l))/2
if s > t_sum:
n = ((m-p)/d) + 1
l = p+((n-1)*d)
sn = (n*(p+l))/2
return n+((s-sn)/m)
else:
x1 = abs(int(((-((2*p)-d))+math.sqrt((((2*p)-d)**2)-((4*d)*(-(2*s)))))/(2*d)))
x2 = abs(int(((-((2*p)-d))-math.sqrt((((2*p)-d)**2)-((4*d)*(-(2*s)))))/(2*d)))
l = p+((x1-1)*d)
s1 = (x1*(p+l))/2
l = p+((x2-1)*d)
s2 = (x2*(p+l))/2
if s1 <= s:
return x1
else:
return x2
if __name__ == "__main__":
p, d, m, s = raw_input().strip().split(' ')
p, d, m, s = [int(p), int(d), int(m), int(s)]
answer = howManyGames(p, d, m, s)
print answer
| UTF-8 | Python | false | false | 867 | py | 21 | HalloweenSales.py | 21 | 0.355248 | 0.316032 | 0 | 35 | 23.771429 | 86 |
winksaville/fuchsia | 4,827,543,250,562 | ccc51e4c0c91ce8853d84c6cb32382eddb916476 | 5499e8b91353ef910d2514c8a57a80565ba6f05b | /tools/fidl/difl/comparator.py | edc2dd8eb3b8ed3ce10e7a168444951ca62e1b14 | [
"BSD-3-Clause"
]
| permissive | https://github.com/winksaville/fuchsia | 410f451b8dfc671f6372cb3de6ff0165a2ef30ec | a0ec86f1d51ae8d2538ff3404dad46eb302f9b4f | refs/heads/master | 2022-11-01T11:57:38.343655 | 2019-11-01T17:06:19 | 2019-11-01T17:06:19 | 223,695,500 | 3 | 2 | BSD-3-Clause | false | 2022-10-13T13:47:02 | 2019-11-24T05:08:59 | 2020-08-07T10:10:53 | 2022-10-13T13:47:02 | 428,325 | 1 | 2 | 6 | C++ | false | false | # Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from difl.ir import *
import typing
__all__ = ['Comparator']
class Comparator:
def __init__(self):
self.identifier_shapes_match: typing.Dict[str, bool] = {}
self.identifier_constraints_match: typing.Dict[str, bool] = {}
# notice cycles when comparing shapes & contraints
self.shape_match_stack: typing.List[str] = []
self.constraint_match_stack: typing.List[str] = []
def shapes_match(self, before: Type, after: Type) -> bool:
'''
Compares two types for shape
'''
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType) and \
before.identifier == after.identifier and \
not before.is_nullable and not before.is_nullable:
if before.identifier not in self.identifier_shapes_match:
assert before.identifier not in self.shape_match_stack
self.shape_match_stack.append(before.identifier)
self.identifier_shapes_match[
before.identifier] = self._shapes_match(before, after)
assert before.identifier == self.shape_match_stack.pop()
return self.identifier_shapes_match[before.identifier]
return self._shapes_match(before, after)
def _shapes_match(self, before: Type, after: Type) -> bool:
# identical types are identical
if before == after and not isinstance(before, IdentifierType):
return True
# different sized types are incompatible
if before.inline_size != after.inline_size:
return False
########## Handles, Protocols and Requests
# handles are compatible with handles
if isinstance(before, (ProtocolIdentifierType, RequestType, HandleType)) and \
isinstance(after, (ProtocolIdentifierType, RequestType, HandleType)):
return True
########## Primitives
# compare primitives
if isinstance(before, PrimitiveType) and \
isinstance(after, PrimitiveType):
return before.inline_size == after.inline_size and before.is_float == after.is_float
########## Enums and Bits
# compare enums, bits and integer primitives
if isinstance(before, (PrimitiveType, EnumIdentifierType, BitsIdentifierType)) and \
isinstance(after, (PrimitiveType, EnumIdentifierType, BitsIdentifierType)):
# get the primitive or underlying type
b_prim = before if isinstance(before,
PrimitiveType) else before.primitive
a_prim = after if isinstance(after,
PrimitiveType) else after.primitive
assert b_prim.inline_size == a_prim.inline_size
return b_prim.is_float == a_prim.is_float
########## Arrays
if isinstance(before, ArrayType) != isinstance(after, ArrayType):
# arrays and not-arrays are incompatible
return False
if isinstance(before, ArrayType) and isinstance(after, ArrayType):
if before.count != after.count:
# changing the size is incompatible
return False
# compatibility is based on the member types
return self.shapes_match(before.element_type, after.element_type)
########## Vectors and Strings
if isinstance(before, (VectorType, StringType)) and \
isinstance(after, (VectorType, StringType)):
return self.shapes_match(before.element_type, after.element_type)
########## Identifiers
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType):
if type(before) != type(after):
# identifier types changing is a different shape
return False
if before.identifier != after.identifier:
# TODO: deal with renames?
return False
if isinstance(before, (XUnionIdentifierType, TableIdentifierType)):
# never a shape change
return True
if before.is_nullable or after.is_nullable:
if before.is_nullable != after.is_nullable:
if isinstance(before, XUnionIdentifierType):
# Nullability is soft change for xunions
return True
else:
# No other types should have nullability
assert isinstance(
before,
(StructIdentifierType, UnionIdentifierType))
# Nullability changes layout for structs and unions
return False
else:
# both nullable, no layout change
return True
# both not-nullable
if isinstance(before, StructIdentifierType) and \
isinstance(after, StructIdentifierType):
# TODO: support shape-compatible struct member changes here? like joins & splits?
b_members = before.declaration.members
a_members = after.declaration.members
if len(b_members) != len(a_members):
return False
if len(b_members) == 0:
# all empty structs are the same
return True
return all(
self.shapes_match(b.type, a.type)
for b, a in zip(b_members, a_members))
if isinstance(before, UnionIdentifierType) and \
isinstance(after, UnionIdentifierType):
b_union_members = before.declaration.members
a_union_members = after.declaration.members
if len(b_union_members) != len(a_union_members):
return False
return all(
self.shapes_match(b.type, a.type)
for b, a in zip(b_union_members, a_union_members))
raise NotImplementedError(
"Don't know how to compare shape for %r (%r) and %r (%r)" %
(type(before), before, type(after), after))
def constraints_match(self, before: Type, after: Type) -> bool:
'''
Compares two types for constraints
'''
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType) and \
before.identifier == after.identifier:
if before.identifier not in self.identifier_constraints_match:
if before.identifier in self.constraint_match_stack:
# hit a cycle
return True
self.constraint_match_stack.append(before.identifier)
self.identifier_constraints_match[before.identifier] = \
self._constraints_match(before, after)
assert before.identifier == self.constraint_match_stack.pop()
return self.identifier_constraints_match[before.identifier]
return self._constraints_match(before, after)
def _constraints_match(self, before: Type, after: Type) -> bool:
if not self.shapes_match(before, after):
# shape is the ultimate constraint
return False
if type(before) != type(after):
# changing the type of the type breaks constraints
return False
########## Primitives
if isinstance(before, PrimitiveType) and \
isinstance(after, PrimitiveType):
return before.subtype == after.subtype
########## Strings
if isinstance(before, StringType) and isinstance(after, StringType):
return before.limit == after.limit and \
before.is_nullable == after.is_nullable
########## Vectors
if isinstance(before, VectorType) and isinstance(after, VectorType):
return before.limit == after.limit and \
before.is_nullable == after.is_nullable and \
self.constraints_match(before.element_type, after.element_type)
########## Arrays
if isinstance(before, ArrayType) and isinstance(after, ArrayType):
assert before.count == after.count
return self.constraints_match(before.element_type,
after.element_type)
########## Handles
if isinstance(before, HandleType) and isinstance(after, HandleType):
return before.handle_type == after.handle_type and \
before.is_nullable == after.is_nullable
if isinstance(before, NullableType) and \
isinstance(after, NullableType):
# nullability changes are constraints changes
if before.is_nullable != after.is_nullable:
return False
if isinstance(before, RequestType) and isinstance(after, RequestType):
return before.protocol == after.protocol
if isinstance(before, ProtocolIdentifierType) and \
isinstance(after, ProtocolIdentifierType):
return before.identifier == after.identifier
if isinstance(before, StructIdentifierType) and \
isinstance(after, StructIdentifierType):
b_struct_members = before.declaration.members
a_struct_members = after.declaration.members
assert len(b_struct_members) == len(a_struct_members)
if len(b_struct_members) == 0:
# all empty structs are the same
return True
return all(
self.constraints_match(b.type, a.type)
for b, a in zip(b_struct_members, a_struct_members))
if isinstance(before, TableIdentifierType) and \
isinstance(after, TableIdentifierType):
b_table_members: typing.Dict[int, TableMember] = {
m.ordinal: m
for m in before.declaration.members
}
a_table_members: typing.Dict[int, TableMember] = {
m.ordinal: m
for m in after.declaration.members
}
for ordinal, b_member in b_table_members.items():
a_member = a_table_members.get(ordinal)
if a_member is None:
# leaving out an ordinal breaks constraints
return False
if b_member.reserved or a_member.reserved:
# changing to/from reserved is fine
continue
if not self.constraints_match(b_member.type, a_member.type):
return False
# it's fine if more members were added to after
return True
if isinstance(before, UnionIdentifierType) and \
isinstance(after, UnionIdentifierType):
b_union_members = before.declaration.members
a_union_members = after.declaration.members
if len(b_union_members) != len(a_union_members):
return False
# empty unions are illegal
assert len(b_union_members) != 0
return all(
self.constraints_match(b.type, a.type)
for b, a in zip(b_union_members, a_union_members))
if isinstance(before, XUnionIdentifierType) and \
isinstance(after, XUnionIdentifierType):
# Note: this is applying a strict-mode interpretation
b_xunion_members = before.declaration.members
a_xunion_members = after.declaration.members
if len(b_xunion_members) != len(a_xunion_members):
return False
# empty xunions are illegal
assert len(b_xunion_members) > 0
# members by ordinal
b_members = {m.ordinal: m for m in b_xunion_members}
a_members = {m.ordinal: m for m in a_xunion_members}
# they both have the same set of ordinals
if frozenset(b_members.keys()) != frozenset(a_members.keys()):
return False
return all(
self.constraints_match(b_members[o].type, a_members[o].type)
for o in b_members.keys())
if isinstance(before, EnumIdentifierType) and \
isinstance(after, EnumIdentifierType):
# this is the strict-mode interpretation of enums
assert len(before.declaration.members) == \
len(after.declaration.members)
before_member_values = set(
m.value for m in before.declaration.members)
after_member_values = set(
m.value for m in after.declaration.members)
return before_member_values == after_member_values
if isinstance(before, BitsIdentifierType) and \
isinstance(after, BitsIdentifierType):
# this is the strict-mode interpretation of bits
return before.declaration.mask == after.declaration.mask
raise NotImplementedError(
"Don't know how to compare constraints for %r (%r) and %r (%r)" %
(type(before), before, type(after), after))
| UTF-8 | Python | false | false | 13,431 | py | 8,203 | comparator.py | 5,625 | 0.575162 | 0.574566 | 0 | 299 | 43.919732 | 97 |
topatlant/AdventOfCode-python | 16,518,444,248,356 | dff728a11ae49ca508ef750518a3dd4c70d50faf | ea8fce92f032af8920b47309c1881adacadb09a8 | /y2020/test/test_day05.py | 238642e19b5d5f4d8c90504ae4f1c866e9664e58 | [
"MIT"
]
| permissive | https://github.com/topatlant/AdventOfCode-python | 6ec361967dd571cbdb9b5687e0a39295ef51a503 | 0e6db6c844f99d7ddf009862165646d83c6c6474 | refs/heads/master | 2023-01-20T23:48:36.331972 | 2022-12-22T11:02:48 | 2022-12-22T11:02:48 | 160,153,655 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from y2020.day05 import *
def test_get_row_col():
assert extract_row_col("FBFBBFFRLR") == (44, 5)
assert extract_row_col("BFFFBBFRRR") == (70, 7)
assert extract_row_col("FFFBBBFRRR") == (14, 7)
assert extract_row_col("BBFFBBFRLL") == (102, 4)
def test_get_seat_id():
assert get_seat_id("FBFBBFFRLR") == 357
assert get_seat_id("BFFFBBFRRR") == 567
assert get_seat_id("FFFBBBFRRR") == 119
assert get_seat_id("BBFFBBFRLL") == 820
| UTF-8 | Python | false | false | 463 | py | 82 | test_day05.py | 77 | 0.632829 | 0.565875 | 0 | 15 | 29.866667 | 52 |
JabezThian/System_Security | 5,111,011,086,699 | c0cb1dbe0789282622744876e9b771f09d34abc7 | 365a3bb8cbbaf38fd204bdb06a22336bc1ba0ae6 | /Resend.py | eae7e097ee80b48abd69ed4716ad09173fefe9bd | []
| no_license | https://github.com/JabezThian/System_Security | 19484df27556b31f33a3f0d041442035299bbb52 | dbcf9210583ee79d06495655e678584dc3bbab64 | refs/heads/main | 2023-07-14T09:29:20.053712 | 2021-08-21T06:42:48 | 2021-08-21T06:42:48 | 378,038,704 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Resend:
def __init__(self, nric, email, age, gender, nationality, language, phoneno, quali, industry):
self.__nric = nric
self.__email = email
self.__age = age
self.__gender = gender
self.__nationality = nationality
self.__language = language
self.__phoneno = phoneno
self.__quali = quali
self.__industry = industry
def get_nric(self):
return self.__nric
def get_email(self):
return self.__email
def get_age(self):
return self.__age
def get_gender(self):
return self.__gender
def get_nationality(self):
return self.__nationality
def get_language(self):
return self.__language
def get_phoneno(self):
return self.__phoneno
def get_quali(self):
return self.__quali
def get_industry(self):
return self.__industry
def set_nric(self, nric):
self.__nric = nric
def set_email(self, email):
self.__email = email
def set_gender(self, gender):
self.__gender = gender
def set_nationality(self, nationality):
self.__nationality = nationality
def set_langauge(self, language):
self.__language = language
def set_phoneno(self, phoneno):
self.__phoneno = phoneno
def set_quali(self, quali):
self.__quali = quali
def set_industry(self, industry):
self.__industry = industry
| UTF-8 | Python | false | false | 1,460 | py | 43 | Resend.py | 12 | 0.582877 | 0.582877 | 0 | 62 | 22.548387 | 98 |
SaileshShahri/ecommerce-site | 19,138,374,310,482 | 1fe6c2f988d6b1b4854faf6cf79ebadde10fa58f | 55c94b4dd1ead177b037939df4f8ff13ee1eff8f | /order/models.py | eee2420d1524116693537215163a249515445bfc | []
| no_license | https://github.com/SaileshShahri/ecommerce-site | 2062205b54a4e340021e097989b74dfcdf15b6bc | f293af9525b2025a90855aa3098b9f88c66b2a38 | refs/heads/master | 2022-07-18T23:58:20.764796 | 2020-05-18T11:55:39 | 2020-05-18T11:55:39 | 264,920,445 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.db import models
from django.urls import reverse
User = settings.AUTH_USER_MODEL
from django.utils import timezone
from product.models import Product, ProductVariant
from address.models import Address
# from payment.models import Payment
from django.db.models.signals import pre_save, post_save
from main.utils import unique_order_id_generator, unique_key_generator
PAYMENT_CHOICES = (
("Pay on Delivery", "Pay on Delivery"),
("Debit Card", "Debit Card"),
("Credit Card", "Credit Card"),
("Net Banking", "Net Banking"),
)
class Order(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
address = models.ForeignKey(Address, on_delete=models.CASCADE)
payment = models.CharField(max_length=50, choices=PAYMENT_CHOICES)
order_date = models.DateTimeField(auto_now_add=True)
order_id = models.SlugField(null=True, blank=True, unique=True)
confirmed = models.BooleanField(default=False) # Seller confirmation
class Meta:
ordering = ["-order_date"]
def get_absolute_url(self):
return reverse('order-detail', kwargs={"order_id": self.order_id})
def order_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.order_id:
instance.order_id = unique_order_id_generator(instance)
pre_save.connect(order_pre_save_receiver, sender=Order)
QUANTITY_CHOICES = (
("1", "1"),
("2", "2"),
("3", "3"),
("4", "4"),
("5", "5"),
("6", "6"),
("7", "7"),
("8", "8"),
("9", "9"),
("10", "10"),
)
class OrderProduct(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
variant = models.ForeignKey(ProductVariant, on_delete=models.CASCADE, null=True, blank=True)
quantity = models.CharField(default="1", choices=QUANTITY_CHOICES, max_length=2)
slug = models.SlugField(null=True, blank=True, unique=True)
def get_seller_absolute_url(self):
return reverse('seller-order-detail', kwargs={"slug": self.slug})
def order_product_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_key_generator(instance)
pre_save.connect(order_product_pre_save_receiver, sender=OrderProduct)
| UTF-8 | Python | false | false | 2,296 | py | 67 | models.py | 40 | 0.6973 | 0.685976 | 0 | 71 | 30.338028 | 93 |
mhgharieb/MILP_Division_Property | 1,434,519,083,741 | 138542da5d7929d76e1e52fa45bab02c7f22a019 | 3b13edf19c244501fc1d0718aecec69c9019ec4a | /algorithm2/main.py | fcb6a7700ce6947de8fcf42ad537924185c71457 | []
| no_license | https://github.com/mhgharieb/MILP_Division_Property | 5a780093c972d9b07e1358588914cd3d9aaa4fdc | d1eeebb04e4f2c1959615c15dc683a08786a86e2 | refs/heads/master | 2021-04-29T15:09:06.137941 | 2018-02-16T20:37:16 | 2018-02-16T20:37:16 | 121,791,898 | 0 | 1 | null | true | 2018-02-16T19:31:38 | 2018-02-16T19:31:38 | 2017-08-08T01:10:04 | 2016-12-13T01:55:21 | 163 | 0 | 0 | 0 | null | false | null | # Algorithm 2 presented in paper "Applyint MILP Method to Searching Integral
# Distinguishers based on Division Property for 6 Lightweight Block Ciphers"
# Regarding to the paper, please refer to https://eprint.iacr.org/2016/857
# For more information, feedback or questions, pleast contact at xiangzejun@iie.ac.cn
# Implemented by Xiang Zejun, State Key Laboratory of Information Security,
# Institute Of Information Engineering, CAS
from sbox import Sbox
if __name__ == "__main__":
# PRESENT Sbox
cipher = "PRESENT"
sbox = [0xc, 0x5, 0x6, 0xb, 0x9, 0x0, 0xa, 0xd, 0x3, 0xe, 0xf, 0x8, 0x4, 0x7, 0x1, 0x2]
present = Sbox(sbox)
filename = cipher + "_DivisionTrails.txt"
present.PrintfDivisionTrails(filename)
| UTF-8 | Python | false | false | 725 | py | 16 | main.py | 11 | 0.73931 | 0.691034 | 0 | 23 | 30.521739 | 88 |
SeanZicari/sfxmanager | 10,883,447,149,515 | b1e275c7c19eb5612930f308f0c6a4d4703ce43e | 75635a3621ff9e14ad4e2162e3597d497d19197d | /src/soundserver/server.py | 7df2aec68c71ed6ea12cce40e6cf42f8e65f41ef | []
| no_license | https://github.com/SeanZicari/sfxmanager | 143e4adfc290fc647a5c9c517010450396846575 | 6074a1a8c2879127645e0558142729bc87583d5a | refs/heads/master | 2016-09-06T12:54:15.698338 | 2015-04-18T21:48:21 | 2015-04-18T21:48:21 | 34,175,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import zmq
from zmq.eventloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
CONTROL_PORT = 5600
class SoundServer(object):
def __init__(self):
self._context = zmq.Context()
self._socket = self._context.socket(zmq.REP)
self._socket.bind('tcp://127.0.0.1:{0}'.format(CONTROL_PORT))
self._stream = ZMQStream(self._socket)
self._stream.on_recv(self._handle_msg)
def start(self):
IOLoop.instance().start()
def _handle_msg(self, msg):
method = '_handler_{0}'.format(msg[0].decode("utf-8"))
try:
print("Trying method {0}".format(method))
getattr(self, method)()
except AttributeError:
sys.exit(1)
def _handler_hi(self):
self._socket.send_string('sup?')
def _handler_exit(self):
pass
if __name__ == '__main__':
SoundServer().start()
| UTF-8 | Python | false | false | 911 | py | 10 | server.py | 4 | 0.588364 | 0.570801 | 0 | 40 | 21.775 | 69 |
PatrickDRusk/pdrtools | 10,514,079,980,833 | e8948b7f67e72e26f23360a301916bf8dc25803e | 27982e0aabcd947bc7f26171c1447b16a149bd53 | /pdrtools/slim_price_data_reader.py | 41e87db2838b92576644d386ffc09267da04c0a4 | []
| no_license | https://github.com/PatrickDRusk/pdrtools | 9db3c904b2cf4d0e1ef2981204191e27c0fc69ad | 958d42fce4d788b2c97611155880001d7755f237 | refs/heads/master | 2021-01-10T16:03:13.796790 | 2020-02-26T20:50:16 | 2020-02-26T20:50:16 | 55,615,425 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
"""
Lambda functions for the slim_price_data_reader.
To deploy:
zip /tmp/slim_price_data_reader.zip slim_price_data_reader.py
aws lambda create-function --function-name pdr_price_data --runtime python2.7 \
--role arn:aws:iam::528461152743:role/TradingOpsLambda --handler slim_price_data_reader.main \
--zip-file fileb:///tmp/slim_price_data_reader.zip --publish
To delete the functions:
for name in string_upcase string_reverse string_concat; do
aws lambda delete-function --function-name $name
done
"""
import cPickle
import os
import time
import boto3
current_millis = lambda: int(round(time.time() * 1000))
COMDTYS = (
"CO_COMDTY",
)
MAQUILAS = (
"CO_COMDTY:FRONT:V1",
)
BUCKET = None
PREFIX = 'pdr/blobz'
DO_WRITES = False
WRITE_CONTRACT_DATA = True
RETURN_STRS = []
def read_blob(sec_name, contract_name, category, blob_name):
path = os.path.join(*filter(None, [PREFIX, sec_name, category, contract_name, blob_name]))
pstr = BUCKET.Object(path).get().get('Body').read()
data = cPickle.loads(pstr)
return data
def log_millis(millis, pattern):
delta = current_millis() - millis
print(pattern + str(delta))
RETURN_STRS.append(pattern + str(delta))
return current_millis()
def read_symbol(sec_name):
millis = current_millis()
md_dict = read_blob(sec_name, None, None, 'metadata')
millis = log_millis(millis, "Time to read %s metadata: " % sec_name)
contract_names = sorted(md_dict['expiry_map'].keys())
for contract_name in contract_names:
# noinspection PyUnusedLocal
contract_df = read_blob(sec_name, contract_name, "DAILY", 'close')
log_millis(millis, "Time to read all %s contract prices: " % sec_name)
def read_symbol_big(sec_name):
millis = current_millis()
md_dict = read_blob(sec_name, None, None, 'metadata')
millis = log_millis(millis, "Time to read %s metadata: " % sec_name)
contract_names = sorted(md_dict['expiry_map'].keys())
for contract_name in contract_names:
# noinspection PyUnusedLocal
contract_df = read_blob(sec_name, contract_name, "DAILY", 'all')
log_millis(millis, "Time to read all %s contract prices (big): " % sec_name)
def read_maquila(sec_name, contract_name=None, col='return'):
millis = current_millis()
data = read_blob(sec_name, contract_name, "DAILY", col)
log_millis(millis, "Time to read %s returns: " % sec_name)
return data
def main():
global BUCKET
s3 = boto3.resource('s3')
BUCKET = s3.Bucket('cm-engineers')
for sec_name in COMDTYS:
read_blob(sec_name, None, None, 'metadata') # to prime the timings
read_maquila(sec_name, "0000", col='all')
read_symbol(sec_name)
read_symbol_big(sec_name)
for maquila_key in MAQUILAS:
read_maquila(maquila_key)
return RETURN_STRS
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,927 | py | 18 | slim_price_data_reader.py | 14 | 0.661428 | 0.651862 | 0 | 112 | 25.133929 | 98 |
matt-gardner/pnp | 16,063,177,692,535 | 6721bd40211d46d78d2e3e2318e9ffd39c08ad27 | a897833e9f71bb03f7117fc5826eb0d48f1a2a52 | /experiments/dqa/scripts/generate_diagram_feats.py | 412c9fc07e353120568e60da6e31b46529a97f42 | [
"Apache-2.0"
]
| permissive | https://github.com/matt-gardner/pnp | 95824d0dd577ad4a703d9aa70a936263199552de | c6f52697763ec26b614e5d3ca3fad140b4ccad53 | refs/heads/master | 2021-01-23T06:35:15.006410 | 2017-03-21T23:39:20 | 2017-03-21T23:39:20 | 86,379,969 | 0 | 0 | null | true | 2017-03-27T20:17:13 | 2017-03-27T20:17:13 | 2017-03-19T13:56:17 | 2017-03-27T17:05:13 | 5,887 | 0 | 0 | 0 | null | null | null | #!/usr/bin/python
# Generate random feature vectors for each diagram part
import sys
import ujson as json
import random
diagram_label_file = sys.argv[1]
out_file = sys.argv[2]
def label_to_feature_vector(label, xy, width, height):
DIMS = 2
vec = [0.0] * DIMS
vec[0] = float(xy[0]) / width
vec[1] = float(xy[1]) / height
return vec
# Random with a high-scoring element in a label-specific index.
'''
h = label.__hash__() % (DIMS / 2)
vec[h] = 3.0
for i in xrange(len(vec)):
vec[i] += random.gauss(0.0, 1.0)
return vec
'''
# One-hot at a label-specific index.
'''
h = label.__hash__() % DIMS
vec[h] = 1.0
return vec
'''
# Random around a mean per label
'''
for i in xrange(len(vec)):
mean_random = random.Random()
mean_random.seed(label.__hash__() * i)
mean = mean_random.uniform(-1, 1)
vec[i] = random.gauss(mean, 1.0)
return vec
'''
# Completely random
'''
for i in xrange(len(vec)):
vec[i] = random.gauss(0.0, 1.0)
return vec
'''
image_points = {}
with open(diagram_label_file, 'r') as f:
for line in f:
j = json.loads(line)
image_id = j["imageId"]
width = j["width"]
height = j["height"]
if not image_id in image_points:
image_points[image_id] = {}
# print image_id
for p in j["points"]:
xy = tuple(p["xy"])
label = p["label"]
vec = label_to_feature_vector(label, xy, width, height)
# print " ", xy, label
# print " ", vec
image_points[image_id][xy] = vec
# Convert dict format to something jsonable
image_points_json = []
for image_id in image_points.iterkeys():
point_vectors = []
for point in image_points[image_id]:
point_dict = {}
point_dict["xy"] = list(point)
point_dict["vec"] = image_points[image_id][point]
point_vectors.append(point_dict)
image_json = {"imageId" : image_id, "points" : point_vectors}
image_points_json.append(image_json)
with open(out_file, 'wb') as f:
for j in image_points_json:
print >> f, json.dumps(j)
| UTF-8 | Python | false | false | 2,249 | py | 10 | generate_diagram_feats.py | 4 | 0.545576 | 0.534015 | 0 | 91 | 23.714286 | 67 |
Greeshma-Prakasan/Manage_Docker | 1,760,936,592,107 | 73cedd1720903ef906d60687cc4e9d9783d3b879 | 4e076520ea85693c168758625d8a9617624e7a00 | /manage_docker.py | 567239a1c7ae4d1fa19ba7ac128e9c0697533751 | []
| no_license | https://github.com/Greeshma-Prakasan/Manage_Docker | 61f64038e1f9617550ddef7c31c0e04dda9a4c1d | 23da875555b56e83e11758e1699fd300d3878a7c | refs/heads/main | 2023-08-04T07:48:17.768828 | 2021-09-20T17:54:49 | 2021-09-20T17:54:49 | 408,501,175 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from rich.console import Console
console = Console()
def stat_container():
os.system("docker container stats")
def download_new_image():
img = input("\tEnter the name of Image : ")
os.system(f'docker pull {img}')
def run_container():
img = input("\tEnter the image name : ")
container = input("\tEnter the container name : ")
os.system(f'docker run --name {container} {img}')
console.print("\tContainer is running.........",style="bold blue")
def del_container():
container = input("\tEnter the container name : ")
os.system(f'docker rm {container}')
console.print("\tContainer successfully deleted.........",style="bold blue")
def nw_details():
res = os.popen("docker network inspect bridge").read()
console.print(res,style="bold blue")
def modify_nw():
res = os.popen("docker network ls").read()
console.print(f"Available networks\n\n{res}",style="bold blue")
container = input("Enter the container name : ")
os.system(f"docker network disconnect bridge {container}")
console.print(f"{container} disconnected...........",style="bold blue")
nw = input("Enter the new network name : ")
ip = input("Enter the ip : ")
os.system(f"sudo docker network create -d bridge --subnet={ip} {nw}")
console.print("Network Created...................")
os.system(f"docker network connect {nw} {container}")
console.print(f"Container connected to new network {nw}")
def menu():
console.print("1. Status of containers\n2. Download new Image\n3. Run container\n4. Delete Container\n5. Network details of container\n6. Modify Network details of contaniner\n7. Exit",style="bold yellow")
while True:
menu()
c = int(input("Enter the choice : "))
if c==1:
stat_container()
elif c==2:
download_new_image()
elif c==3:
run_container()
elif c==4:
del_container()
elif c==5:
nw_details()
elif c==6:
modify_nw()
elif c==7:
break
else:
console.print("\tInvalid Choice",style="bold blue") | UTF-8 | Python | false | false | 2,083 | py | 1 | manage_docker.py | 1 | 0.631301 | 0.62458 | 0 | 63 | 32.079365 | 209 |
oksana-yashan/MOPE-5 | 9,062,381,015,748 | 8135944ea1a238b22a2465fc63114df9c79e46a8 | 9cad2261b2f4a2109db13ee8ff680b20ff1a7159 | /Lab5.py | c29374b99715553c3be355ab344f6ab4c179a4bb | []
| no_license | https://github.com/oksana-yashan/MOPE-5 | 1e49e3d475a3104bbf89c4bf66a9a7ab17894a76 | a6bfb01c4614dc81c317684aea02f84cf135254d | refs/heads/master | 2022-04-25T19:03:30.586509 | 2020-04-24T15:12:18 | 2020-04-24T15:12:18 | 257,075,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import numpy as np
import math
from functools import reduce
from itertools import compress
import scipy
from scipy.stats import f,t
x1min, x2min, x3min = -5, -9, -3
x1max, x2max, x3max = 5, 3, 5
x_min = (x1min + x2min + x3min) / 3 # mean of x1min, x2min, x3min
x_max = (x1max + x2max + x3max) / 3
x0_i = [np.average(x1min+x1max),np.average(x2min+x2max),np.average(x3min+x3max)]
y_min = round(200 + x_min)
y_max = round(200 + x_max)
m = 3
N = 8
# from Lab3 without interaction
counter = 0
with_interaction = False
check = True
while (check): # from Lab3
matrix = np.ndarray(shape=(4, 7), dtype=float)
matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0] = 1, 1, 1, 1
matrix[0][1], matrix[1][1], matrix[2][1], matrix[3][1] = -1, -1, 1, 1
matrix[0][2], matrix[1][2], matrix[2][2], matrix[3][2] = -1, 1, -1, 1
matrix[0][3], matrix[1][3], matrix[2][3], matrix[3][3] = -1, 1, 1, -1
matrix_n = np.ndarray(shape=(4, 6), dtype=float)
matrix_n[0][0], matrix_n[1][0], matrix_n[2][0], matrix_n[3][0] = x1min, x1min, x1max, x1max
matrix_n[0][1], matrix_n[1][1], matrix_n[2][1], matrix_n[3][1] = x2min, x2max, x1min, x2max
matrix_n[0][2], matrix_n[1][2], matrix_n[2][2], matrix_n[3][2] = x3min, x3max, x3max, x3min
mY_list = []
for i in range(4):
for j in range(3, 6):
r = np.random.randint(y_min, y_max)
matrix_n[i][j], matrix[i][j + 1] = r, r
mY_list.append(((matrix_n[i][3] + matrix_n[i][4] + matrix_n[i][5]) / m).__round__(4))
mx1 = np.sum(matrix_n, axis=0)[0] / 4
mx2 = np.sum(matrix_n, axis=0)[1] / 4
mx3 = np.sum(matrix_n, axis=0)[2] / 4
my = (sum(mY_list) / len(mY_list)).__round__(2)
a1 = (matrix_n[0][0] * mY_list[0] + matrix_n[1][0] * mY_list[1] + matrix_n[2][0] * mY_list[2] + matrix_n[3][0] *
mY_list[3]) / 4
a2 = (matrix_n[0][1] * mY_list[0] + matrix_n[1][1] * mY_list[1] + matrix_n[2][1] * mY_list[2] + matrix_n[3][1] *
mY_list[3]) / 4
a3 = (matrix_n[0][2] * mY_list[0] + matrix_n[1][2] * mY_list[1] + matrix_n[2][2] * mY_list[2] + matrix_n[3][2] *
mY_list[3]) / 4
a11 = (matrix_n[0][0] ** 2 + matrix_n[1][0] ** 2 + matrix_n[2][0] ** 2 + matrix_n[3][0] ** 2) / 4
a22 = (matrix_n[0][1] ** 2 + matrix_n[1][1] ** 2 + matrix_n[2][1] ** 2 + matrix_n[3][1] ** 2) / 4
a33 = (matrix_n[0][2] ** 2 + matrix_n[1][2] ** 2 + matrix_n[2][2] ** 2 + matrix_n[3][2] ** 2) / 4
a12 = a21 = (matrix_n[0][0] * matrix_n[0][1] + matrix_n[1][0] * matrix_n[1][1] + matrix_n[2][0] * matrix_n[2][1] +
matrix_n[3][0] * matrix_n[3][1]) / 4
a13 = a31 = (matrix_n[0][0] * matrix_n[0][2] + matrix_n[1][0] * matrix_n[1][2] + matrix_n[2][0] * matrix_n[2][2] +
matrix_n[3][0] * matrix_n[3][2]) / 4
a23 = a32 = (matrix_n[0][1] * matrix_n[0][2] + matrix_n[1][1] * matrix_n[1][2] + matrix_n[2][1] * matrix_n[2][2] +
matrix_n[3][1] * matrix_n[3][2]) / 4
b0 = np.linalg.det(
np.array([[my, mx1, mx2, mx3], [a1, a11, a12, a13], [a2, a12, a22, a32], [a3, a13, a23, a33]])) / np.linalg.det(
np.array([[1, mx1, mx2, mx3], [mx1, a11, a12, a13], [mx2, a12, a22, a32], [mx3, a13, a23, a33]]))
b1 = np.linalg.det(
np.array([[1, my, mx2, mx3], [mx1, a1, a12, a13], [mx2, a2, a22, a32], [mx3, a3, a23, a33]])) / np.linalg.det(
np.array([[1, mx1, mx2, mx3], [mx1, a11, a12, a13], [mx2, a12, a22, a32], [mx3, a13, a23, a33]]))
b2 = np.linalg.det(
np.array([[1, mx1, my, mx3], [mx1, a11, a1, a13], [mx2, a12, a2, a32], [mx3, a13, a3, a33]])) / np.linalg.det(
np.array([[1, mx1, mx2, mx3], [mx1, a11, a12, a13], [mx2, a12, a22, a32], [mx3, a13, a23, a33]]))
b3 = np.linalg.det(
np.array([[1, mx1, mx2, my], [mx1, a11, a12, a1], [mx2, a12, a22, a2], [mx3, a13, a23, a3]])) / np.linalg.det(
np.array([[1, mx1, mx2, mx3], [mx1, a11, a12, a13], [mx2, a12, a22, a32], [mx3, a13, a23, a33]]))
print(" Матриця планування")
print(" x1 x2 x3 y1 y2 y3 ")
for i in range(3):
for j in range(6):
print("{:>6.1f}".format(matrix_n[i][j]), end=" ")
print("\t")
print("\n", "y = %.2f + %.2f * x1 + %.2f * x2+ %.2f * x3" % (b0, b1, b2, b3))
print("\nПеревірка:")
print((b0 + b1 * matrix_n[0][0] + b2 * matrix_n[0][1] + b3 * matrix_n[0][2]).__round__(3), " ",
(b0 + b1 * matrix_n[1][0] + b2 * matrix_n[1][1] + b3 * matrix_n[1][2]).__round__(3), " ",
(b0 + b1 * matrix_n[2][0] + b2 * matrix_n[2][1] + b3 * matrix_n[2][2]).__round__(3), " ",
(b0 + b1 * matrix_n[3][0] + b2 * matrix_n[3][1] + b3 * +matrix_n[3][2]).__round__(3))
print(mY_list)
# Перевірка за Кохреном:
s2_y1 = ((matrix[0][4] - mY_list[0]) ** 2 + (matrix[0][5] - mY_list[0]) ** 2 + (matrix[0][6] - mY_list[0]) ** 2) / 3
s2_y2 = ((matrix[1][4] - mY_list[1]) ** 2 + (matrix[1][5] - mY_list[1]) ** 2 + (matrix[1][6] - mY_list[1]) ** 2) / 3
s2_y3 = ((matrix[2][4] - mY_list[2]) ** 2 + (matrix[2][5] - mY_list[2]) ** 2 + (matrix[2][6] - mY_list[2]) ** 2) / 3
s2_y4 = ((matrix[3][4] - mY_list[3]) ** 2 + (matrix[3][5] - mY_list[3]) ** 2 + (matrix[3][6] - mY_list[3]) ** 2) / 3
f1 = m - 1
f2 = N
p = 0.95
q = 1 - p
Gp = max(s2_y1, s2_y2, s2_y3, s2_y4) / (s2_y1 + s2_y2 + s2_y3 + s2_y4)
def cohren_value(f2, f1, q):
f2 += 1
partResult1 = q / (f2 - 1)
params = [partResult1, f1, (f2 - 1 - 1) * f2]
fisher = scipy.stats.f.isf(*params)
result = fisher / (fisher + (f2 - 1 - 1))
return result.__round__(3)
Gt = cohren_value(f2, f1, q)
if (Gp < Gt):
print(" Отже, дисперсія однорідна")
check = False
else:
print(" Дисперсія неоднорідна --> m+1")
m += 1
# Критерій Стьюдента
s2_b = (s2_y1 + s2_y2 + s2_y3 + s2_y4) / 4
s2_bb = s2_b / (4 * m)
s_bb = np.sqrt(s2_bb)
bb0 = (mY_list[0] * matrix[0][0] + mY_list[1] * matrix[1][0] + mY_list[2] * matrix[2][0] + mY_list[3] * matrix[3][
0]) / N
bb1 = (mY_list[0] * matrix[0][1] + mY_list[1] * matrix[1][1] + mY_list[2] * matrix[2][1] + mY_list[3] * matrix[3][
1]) / N
bb2 = (mY_list[0] * matrix[0][2] + mY_list[1] * matrix[1][2] + mY_list[2] * matrix[2][2] + mY_list[3] * matrix[3][
2]) / N
bb3 = (mY_list[0] * matrix[0][3] + mY_list[1] * matrix[1][3] + mY_list[2] * matrix[2][3] + mY_list[3] * matrix[3][
3]) / N
t = [abs(bb0) / s_bb, abs(bb1) / s_bb, abs(bb2) / s_bb, abs(bb3) / s_bb]
f3 = (m - 1) * N # t_t = 2.306 # для значення f3 = 8, t табличне = 2,306
t_t = scipy.stats.t.ppf((1 + (1 - q)) / 2, f3)
print("\nt табличне:", t_t)
if t[0] < t_t:
b0 = 0
print("t0<t_t; b0=0")
if t[1] < t_t:
b1 = 0
print("t1<t_t; b1=0")
if t[2] < t_t:
b2 = 0
print("t2<t_t; b2=0")
if t[3] < t_t:
b3 = 0
print("t3<t_t; b3=0")
print("\n", "y = %.2f + %.2f * x1 + %.2f * x2+ %.2f * x3" % (b0, b1, b2, b3))
y1_exp = b0 + b1 * matrix_n[0][0] + b2 * matrix_n[0][1] + b3 * matrix_n[0][2]
y2_exp = b0 + b1 * matrix_n[1][0] + b2 * matrix_n[1][1] + b3 * matrix_n[1][2]
y3_exp = b0 + b1 * matrix_n[2][0] + b2 * matrix_n[2][1] + b3 * matrix_n[2][2]
y4_exp = b0 + b1 * matrix_n[3][0] + b2 * matrix_n[3][1] + b3 * matrix_n[3][2]
print(f"y1_exp = {b0:.2f}{b1:+.2f}*x11{b2:+.2f}*x12{b3:+.2f}*x13 "
f"= {y1_exp:.2f}")
print(f"y2_exp = {b0:.2f}{b1:+.2f}*x21{b2:+.2f}*x22{b3:+.2f}*x23"
f" = {y2_exp:.2f}")
print(f"y3_exp = {b0:.2f}{b1:+.2f}*x31{b2:+.2f}*x32{b3:+.2f}*x33 "
f"= {y3_exp:.2f}")
print(f"y4_exp = {b0:.2f}{b1:+.2f}*x41{b2:+.2f}*x42{b3:+.2f}*x43"
f" = {y4_exp:.2f}")
# Критерій Фішера
d = 2
f4 = N - d
s2_ad = ((y1_exp - mY_list[0]) ** 2 + (y2_exp - mY_list[1]) ** 2 + (y3_exp - mY_list[2]) ** 2 + (
y4_exp - mY_list[3]) ** 2) / (m / N - d)
Fp = s2_ad / s2_b
Ft = scipy.stats.f.ppf(1 - q, f4, f3)
print("\nFp:", Fp)
print("Ft:", Ft)
if Fp < Ft:
print("Рівняння регресії не адекватно оригіналу при q = 0,05",'\n\n')
with_interaction = True
print("Рівняння регресії з врахуванням ефекту взаємодії:")
else:
print("Рівняння регресії адекватно оригіналу при q = 0,05")
# Equation with intersection(Lab4)
while (with_interaction):
m = 3
N = 8
x0 = [1 for i in range(N)]
# x1 x2 x3 x12 x13 x23 x123
norm_x_table = [[-1, -1, -1, +1, +1, +1, -1],
[-1, +1, +1, -1, -1, +1, -1],
[+1, -1, +1, -1, +1, -1, -1],
[+1, +1, -1, +1, -1, -1, -1],
[-1, -1, +1, +1, -1, -1, +1],
[-1, +1, -1, -1, +1, -1, +1],
[+1, -1, -1, -1, -1, +1, +1],
[+1, +1, +1, +1, +1, +1, +1]]
# 1 2 3 12 13 23 123
x_table = [[x1min, x2min, x3min, x1min * x2min, x1min * x3min, x2min * x3min, x1min * x2min * x3min],
[x1min, x2max, x3max, x1min * x2max, x1min * x3max, x2max * x3max, x1min * x2max * x3max],
[x1max, x2min, x3max, x1max * x2min, x1max * x3max, x2min * x3max, x1max * x2min * x3max],
[x1max, x2max, x3min, x1max * x2max, x1max * x3min, x2max * x3min, x1max * x2max * x3min],
[x1min, x2min, x3max, x1min * x2min, x1min * x3max, x2min * x3max, x1min * x2min * x3max],
[x1min, x2max, x3min, x1min * x2max, x1min * x3min, x2max * x3min, x1min * x2max * x3min],
[x1max, x2min, x3min, x1max * x2min, x1max * x3min, x2min * x3min, x1max * x2min * x3min],
[x1max, x2max, x3max, x1max * x2max, x1max * x3max, x2max * x3max, x1max * x2max * x3max]]
y_arr = [[random.randint(y_min, y_max) for j in range(m)] for i in range(N)] # i rows and j columns
print(y_arr)
# arrays with x1(i), x2(i),x3(i)
x1i = np.array([x_table[i][0] for i in range(8)])
x2i = np.array([x_table[i][1] for i in range(8)])
x3i = np.array([x_table[i][2] for i in range(8)])
yi = np.array([np.average(i) for i in y_arr]) # average for each i row in y_arr
def m_ij(*arrays):
return np.average(reduce(lambda el_1, el_2: el_1 + el_2, arrays)) # reduce: sums all el in given arrays
coefs = [[N, m_ij(x1i), m_ij(x2i), m_ij(x3i), m_ij(x1i * x2i), m_ij(x1i * x3i), m_ij(x2i * x3i), m_ij(x1i * x2i * x3i)],
[m_ij(x1i), m_ij(x1i ** 2), m_ij(x1i * x2i), m_ij(x1i * x3i), m_ij(x1i ** 2 * x2i), m_ij(x1i ** 2 * x3i),
m_ij(x1i * x2i * x3i), m_ij(x1i ** 2 * x2i * x3i)],
[m_ij(x2i), m_ij(x1i * x2i), m_ij(x2i ** 2), m_ij(x2i * x3i), m_ij(x1i * x2i ** 2), m_ij(x1i * x2i * x3i),
m_ij(x2i ** 2 * x3i), m_ij(x1i * x2i ** 2 * x3i)],
[m_ij(x3i), m_ij(x1i * x3i), m_ij(x2i * x3i), m_ij(x3i ** 2), m_ij(x1i * x2i * x3i), m_ij(x1i * x3i ** 2),
m_ij(x2i * x3i ** 2), m_ij(x1i * x2i * x3i ** 2)],
[m_ij(x1i * x2i), m_ij(x1i ** 2 * x2i), m_ij(x1i * x2i ** 2), m_ij(x1i * x2i * x3i), m_ij(x1i ** 2 * x2i ** 2),
m_ij(x1i ** 2 * x2i * x3i), m_ij(x1i * x2i ** 2 * x3i), m_ij(x1i ** 2 * x2i ** 2 * x3i)],
[m_ij(x1i * x3i), m_ij(x1i ** 2 * x3i), m_ij(x1i * x2i * x3i), m_ij(x1i * x3i ** 2),
m_ij(x1i ** 2 * x2i * x3i), m_ij(x1i ** 2 * x3i ** 2), m_ij(x1i * x2i * x3i ** 2),
m_ij(x1i ** 2 * x2i * x3i ** 2)],
[m_ij(x2i * x3i), m_ij(x1i * x2i * x3i), m_ij(x2i ** 2 * x3i), m_ij(x2i * x3i ** 2),
m_ij(x1i * x2i ** 2 * x3i), m_ij(x1i * x2i * x3i ** 2), m_ij(x2i ** 2 * x3i ** 2),
m_ij(x1i * x2i ** 2 * x3i ** 2)],
[m_ij(x1i * x2i * x3i), m_ij(x1i ** 2 * x2i * x3i), m_ij(x1i * x2i ** 2 * x3i), m_ij(x1i * x2i * x3i ** 2),
m_ij(x1i ** 2 * x2i ** 2 * x3i), m_ij(x1i ** 2 * x2i * x3i ** 2), m_ij(x1i * x2i ** 2 * x3i ** 2),
m_ij(x1i ** 2 * x2i ** 2 * x3i ** 2)]]
free_vals = [m_ij(yi), m_ij(yi * x1i), m_ij(yi * x2i), m_ij(yi * x3i), m_ij(yi * x1i * x2i), m_ij(yi * x1i * x3i),
m_ij(yi * x2i * x3i), m_ij(yi * x1i * x2i * x3i)]
# solution of system of equations(array)
b_i = np.linalg.solve(coefs, free_vals)
# just arrays from table
nat_x1 = np.array([norm_x_table[i][0] for i in range(8)])
nat_x2 = np.array([norm_x_table[i][1] for i in range(8)])
nat_x3 = np.array([norm_x_table[i][2] for i in range(8)])
norm_b_i = [m_ij(yi * 1), m_ij(yi * nat_x1), m_ij(yi * nat_x2), m_ij(yi * nat_x3),
m_ij(yi * nat_x1 * nat_x2), m_ij(yi * nat_x1 * nat_x3), m_ij(yi * nat_x2 * nat_x3),
m_ij(yi * nat_x1 * nat_x2 * nat_x3)]
# main functions
def theor_y(x_table, b_coef, importance):
x_table = [list(compress(row, importance)) for row in x_table] # update: if importance 0 - get rid of x(ij)
b_coef = list(compress(b_coef, importance)) # update: if importance 0 - get rid of b
print("X_table :\n", x_table)
print("b-coeffs :\n",b_coef)
y_vals = np.array([sum(map(lambda x, b: x * b, row, b_coef)) for row in x_table])
return y_vals
def student_criteria1(m, N, y_table, norm_x_table):
print("\nЗа критерієм Стьюдента: m = {}, N = {} ".format(m, N))
avg_variation = np.average(list(map(np.var, y_table))) # var = mean(abs(y - y.mean())**2) in numpy
y_avrg = np.array(list(map(np.average, y_table)))
variation_beta_s = avg_variation / N / m
deviation_beta_s = math.sqrt(variation_beta_s)
x_i = np.array([[el[i] for el in norm_x_table] for i in range(len(norm_x_table))])
coef_beta_s = np.array([round(np.average(y_avrg * x_i[i]), 3) for i in range(len(x_i))])
print("Оцінки коефіцієнтів β(s): " + ", ".join(list(map(str, coef_beta_s))))
t_i = np.array([abs(coef_beta_s[i]) / deviation_beta_s for i in range(len(coef_beta_s))])
print("Коефіцієнти t: " + ", ".join(list(map(lambda i: "{:.2f}".format(i), t_i))))
f3 = (m - 1) * N
q = 0.05
t = get_student(f3, q)
importance = [True if el>t else False for el in list(t_i)]
print("f3 = {}; q = {}; tтабл = {}".format(f3, q, t))
beta_i = ["β0", "β1", "β2", "β3", "β12", "β13", "β23", "β123"]
updated_importance = [" - значимий" if i else " - незначимий" for i in importance]
to_print = map(lambda x: x[0] + " " + x[1], zip(beta_i, updated_importance))
x_i_names = list(compress(["", " x1", " x2", " x3", " x12", " x13", " x23", " x123"],
importance)) # if importance 0 - get rid of it
betas_to_print = list(compress(coef_beta_s, importance))
print(*to_print, sep="; ")
equation = " ".join(["".join(i) for i in zip(list(map(lambda x: "{:+.2f}".format(x), betas_to_print)), x_i_names)])
print("Рівняння регресії без незначимих членів: y = " + equation)
return importance
def get_student(f3, q):
return (abs(scipy.stats.t.ppf(q / 2, f3))).__round__(3)
def get_fisher(f3, f4, q):
return (abs(f.isf(q, f4, f3))).__round__(3)
def cochran_criteria(m, N, y_table):
print("За критерієм Кохрена: m = {}, N = {} ".format(m, N))
y_variations = [np.var(i) for i in y_table]
max_y_variation = max(y_variations)
gp = max_y_variation / sum(y_variations)
f1 = m - 1
f2 = N
p = 0.95
q = 1 - p
gt = cohren_value(f1, f2, q)
print("Gp = {}; Gt = {}; f1 = {}; f2 = {}; q = {:.2f}".format(gp, gt, f1, f2, q))
if gp < gt:
print("Gp < Gt => дисперсії рівномірні")
return True
else:
print("Gp > Gt => дисперсії нерівномірні")
return False
while not cochran_criteria(m, 4, y_arr):
m += 1
y_table = [[random.randint(y_min, y_max) for column in range(m)] for row in range(N)]
print("Матриця планування:")
labels = " x1 x2 x3 x12 x13 x23 x123 y1 y2 y3"
rows_table = [list(x_table[i]) + list(y_arr[i]) for i in range(N)]
print(labels)
print("\n".join([" ".join(map(lambda j: "{:<6}".format(j), rows_table[i])) for i in range(len(rows_table))]), "\n")
norm_x_table_with_x0 = [[+1] + row for row in norm_x_table]
importance = student_criteria1(m, N, y_arr, norm_x_table_with_x0) # shows should each b(ij)*x(i) be in our main equation
def fisher_criteria(m, N, d, nat_x_table, y_table, b_coefficients, importance):
print("\nЗа критерієм Фішера: m = {}, N = {} ".format(m, N))
f3 = (m - 1) * N
f4 = N - d
q = 0.05
theoret_y = theor_y(nat_x_table, b_coefficients, importance)
print(theoret_y)
theor_values_to_print = list(zip(map(lambda x: "x1 = {0[0]}, x2 = {0[1]}, x3 = {0[2]}".format(x), nat_x_table), theoret_y))
print("Теоретичні y:")
print("\n".join(["{val[0]}: y = {val[1]}".format(val=el) for el in theor_values_to_print]))
y_averages = np.array(list(map(np.average, y_table)))
s_ad = m / (N - d) * (sum((theoret_y - y_averages) ** 2))
y_variations = np.array(list(map(np.var, y_table)))
s_v = np.average(y_variations)
f_p = round(float(s_ad / s_v), 3)
f_t = get_fisher(f3, f4, q)
print("Fp = {}, Ft = {}".format(f_p, f_t))
print("Fp < Ft --> модель адекватна" if f_p < f_t else "Fp > Ft --> неадекватна")
return True if f_p < f_t else False
x_table_with_x0 = [[+1] + row for row in x_table]
print("rhgjdr",x_table_with_x0)
fisher_with_interaction = fisher_criteria(m, N, 1, x_table_with_x0, y_arr, b_i, importance)
print(" (при врахуванні взаємодії)\n\n")
#with quadratic terms(LAB5)
if fisher_with_interaction == False:
m = 3
N = 15
x0_i = [(x1max + x1min) / 2, (x2max + x2min) / 2, (x3max + x3min) / 2]
y_arr = [[random.randint(y_min, y_max) for _ in range(m)] for _ in range(N)]
delta_x_i = [(x1min - x0_i[0]), (x2min - x0_i[1]), (x3min - x0_i[2])]
l = 1.215
nat_x_table = [[x1min, x2min, x3min],
[x1min, x2max, x3max],
[x1max, x2min, x3max],
[x1max, x2max, x3min],
[x1min, x2min, x3max],
[x1min, x2max, x3min],
[x1max, x2min, x3min],
[x1max, x2max, x3max],
[-l * delta_x_i[0] + x0_i[0], x0_i[1], x0_i[2]],
[l * delta_x_i[0] + x0_i[0], x0_i[1], x0_i[2]],
[x0_i[0], -l * delta_x_i[1] + x0_i[1], x0_i[2]],
[x0_i[0], l * delta_x_i[1] + x0_i[1], x0_i[2]],
[x0_i[0], x0_i[1], -l * delta_x_i[2] + x0_i[2]],
[x0_i[0], x0_i[1], l * delta_x_i[2] + x0_i[2]],
[x0_i[0], x0_i[1], x0_i[2]]]
x_table = [[-1, -1, -1],
[-1, +1, +1],
[+1, -1, +1],
[+1, +1, -1],
[-1, -1, +1],
[-1, +1, -1],
[+1, -1, -1],
[+1, +1, +1],
[-l, 0, 0],
[+l, 0, 0],
[0, -l, 0],
[0, +l, 0],
[0, 0, -l],
[0, 0, +l],
[0, 0, 0]]
while not cochran_criteria(m, N, y_arr):
m += 1
y_arr = [[random.randint(y_min, y_max) for column in range(m)] for row in range(N)]
yi = np.array([np.average(i) for i in y_arr]) # average for each i_th row in y_arr
mat = np.array(nat_x_table)
mat = np.append(mat, [[mat[i][0] * mat[i][1]] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][0] * mat[i][2]] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][1] * mat[i][2]] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][0] * mat[i][1] * mat[i][2]] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][0] ** 2] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][1] ** 2] for i in range(N)], 1)
mat = np.append(mat, [[mat[i][2] ** 2] for i in range(N)], 1)
np.set_printoptions(linewidth=150)
x = np.transpose(mat)
print(x)
mx1 = sum(x[0]) / len(x[0])
mx2 = sum(x[1]) / len(x[1])
mx3 = sum(x[2]) / len(x[2])
mx4 = sum(x[3]) / len(x[3])
mx5 = sum(x[4]) / len(x[4])
mx6 = sum(x[5]) / len(x[5])
mx7 = sum(x[6]) / len(x[6])
mx8 = sum(x[7]) / len(x[7])
mx9 = sum(x[8]) / len(x[8])
mx10 = sum(x[9]) / len(x[9])
a11 = sum([x[0][i] * x[0][i] for i in range(N)]) / len(x[0])
a12 = a21 = sum([x[0][i] * x[1][i] for i in range(N)]) / len(x[0])
a13 = a31 = sum([x[0][i] * x[2][i] for i in range(N)]) / len(x[0])
a14 = a41 = sum([x[0][i] * x[3][i] for i in range(N)]) / len(x[0])
a15 = a51 = sum([x[0][i] * x[4][i] for i in range(N)]) / len(x[0])
a16 = a61 = sum([x[0][i] * x[5][i] for i in range(N)]) / len(x[0])
a17 = a71 = sum([x[0][i] * x[6][i] for i in range(N)]) / len(x[0])
a18 = a81 = sum([x[0][i] * x[7][i] for i in range(N)]) / len(x[0])
a19 = a91 = sum([x[0][i] * x[8][i] for i in range(N)]) / len(x[0])
a110 = a101 = sum([x[0][i] * x[9][i] for i in range(N)]) / len(x[0])
a22 = sum([x[1][i] * x[1][i] for i in range(N)]) / len(x[0])
a23 = a32 = sum([x[1][i] * x[2][i] for i in range(N)]) / len(x[0])
a24 = a42 = sum([x[1][i] * x[3][i] for i in range(N)]) / len(x[0])
a25 = a52 = sum([x[1][i] * x[4][i] for i in range(N)]) / len(x[0])
a26 = a62 = sum([x[1][i] * x[5][i] for i in range(N)]) / len(x[0])
a27 = a72 = sum([x[1][i] * x[6][i] for i in range(N)]) / len(x[0])
a28 = a82 = sum([x[1][i] * x[7][i] for i in range(N)]) / len(x[0])
a29 = a92 = sum([x[1][i] * x[8][i] for i in range(N)]) / len(x[0])
a210 = a102 = sum([x[1][i] * x[9][i] for i in range(N)]) / len(x[0])
a33 = sum([x[2][i] * x[2][i] for i in range(N)]) / len(x[0])
a34 = a43 = sum([x[2][i] * x[3][i] for i in range(N)]) / len(x[0])
a35 = a53 = sum([x[2][i] * x[4][i] for i in range(N)]) / len(x[0])
a36 = a63 = sum([x[2][i] * x[5][i] for i in range(N)]) / len(x[0])
a37 = a73 = sum([x[2][i] * x[6][i] for i in range(N)]) / len(x[0])
a38 = a83 = sum([x[2][i] * x[7][i] for i in range(N)]) / len(x[0])
a39 = a93 = sum([x[2][i] * x[8][i] for i in range(N)]) / len(x[0])
a310 = a103 = sum([x[2][i] * x[9][i] for i in range(N)]) / len(x[0])
a44 = sum([x[3][i] * x[3][i] for i in range(N)]) / len(x[0])
a45 = a54 = sum([x[3][i] * x[4][i] for i in range(N)]) / len(x[0])
a46 = a64 = sum([x[3][i] * x[5][i] for i in range(N)]) / len(x[0])
a47 = a74 = sum([x[3][i] * x[6][i] for i in range(N)]) / len(x[0])
a48 = a84 = sum([x[3][i] * x[7][i] for i in range(N)]) / len(x[0])
a49 = a94 = sum([x[3][i] * x[8][i] for i in range(N)]) / len(x[0])
a410 = a104 = sum([x[3][i] * x[9][i] for i in range(N)]) / len(x[0])
a55 = sum([x[4][i] * x[4][i] for i in range(N)]) / len(x[0])
a56 = a65 = sum([x[4][i] * x[5][i] for i in range(N)]) / len(x[0])
a57 = a75 = sum([x[4][i] * x[6][i] for i in range(N)]) / len(x[0])
a58 = a85 = sum([x[4][i] * x[7][i] for i in range(N)]) / len(x[0])
a59 = a95 = sum([x[4][i] * x[8][i] for i in range(N)]) / len(x[0])
a510 = a105 = sum([x[4][i] * x[9][i] for i in range(N)]) / len(x[0])
a66 = sum([x[5][i] * x[5][i] for i in range(N)]) / len(x[0])
a67 = a76 = sum([x[5][i] * x[6][i] for i in range(N)]) / len(x[0])
a68 = a86 = sum([x[5][i] * x[7][i] for i in range(N)]) / len(x[0])
a69 = a96 = sum([x[5][i] * x[8][i] for i in range(N)]) / len(x[0])
a610 = a106 = sum([x[5][i] * x[9][i] for i in range(N)]) / len(x[0])
a77 = sum([x[6][i] * x[6][i] for i in range(N)]) / len(x[0])
a78 = a87 = sum([x[6][i] * x[7][i] for i in range(N)]) / len(x[0])
a79 = a97 = sum([x[6][i] * x[8][i] for i in range(N)]) / len(x[0])
a710 = a107 = sum([x[6][i] * x[9][i] for i in range(N)]) / len(x[0])
a88 = sum([x[7][i] * x[7][i] for i in range(N)]) / len(x[0])
a89 = a98 = sum([x[7][i] * x[8][i] for i in range(N)]) / len(x[0])
a810 = a108 = sum([x[7][i] * x[9][i] for i in range(N)]) / len(x[0])
a99 = sum([x[8][i] * x[8][i] for i in range(N)]) / len(x[0])
a910 = a109 = sum([x[8][i] * x[9][i] for i in range(N)]) / len(x[0])
a1010 = sum([x[9][i] * x[9][i] for i in range(N)]) / len(x[0])
coefficients = [[1, mx1, mx2, mx3, mx4, mx5, mx6, mx7, mx8, mx9, mx10],
[mx1, a11, a21, a31, a41, a51, a61, a71, a81, a91, a101],
[mx2, a12, a22, a32, a42, a52, a62, a72, a82, a92, a102],
[mx3, a13, a23, a33, a43, a53, a63, a73, a83, a93, a103],
[mx4, a14, a24, a34, a44, a54, a64, a74, a84, a94, a104],
[mx5, a15, a25, a35, a45, a55, a65, a75, a85, a95, a105],
[mx6, a16, a26, a36, a46, a56, a66, a76, a86, a96, a106],
[mx7, a17, a27, a37, a47, a57, a67, a77, a87, a97, a107],
[mx8, a18, a28, a38, a48, a58, a68, a78, a88, a98, a108],
[mx9, a19, a29, a39, a49, a59, a69, a79, a89, a99, a109],
[mx10, a110, a210, a310, a410, a510, a610, a710, a810, a910, a1010]]
free_values = [0 for i in range(11)]
free_values[0] = sum(yi) / len(yi)
for i in range(1, 11):
free_values[i] = sum([x[i - 1][j] * yi[j] for j in range(N)]) / N
beta_coefficients = np.linalg.solve(coefficients, free_values)
np.set_printoptions(suppress=True, linewidth=200)
print(beta_coefficients)
def student_criteria2(m, N, y_table, beta_coefficients):
print("\nПеревірка значимості коефіцієнтів регресії за критерієм Стьюдента: ".format(m, N))
average_variation = np.average(list(map(np.var, y_table)))
print(average_variation)
variation_beta_s = average_variation/N/m
print(variation_beta_s)
standard_deviation_beta_s = math.sqrt(variation_beta_s)
t_i = np.array([(abs(beta_coefficients[i])/standard_deviation_beta_s) for i in range(len(beta_coefficients))])
f3 = (m - 1) * N
q = 0.05
t = get_student(f3, q)
importance = [True if el>t else False for el in list(t_i)]
print("\n"+"Оцінки коефіцієнтів βs: " + ", ".join(list(map(lambda x: str(round(float(x), 3)), beta_coefficients))))
print("Коефіцієнти ts: " + ", ".join(list(map(lambda i: "{:.2f}".format(i), t_i))))
print("f3 = {}; q = {}; tтабл = {}".format(f3, q, t))
beta_i = ["β0", "β1", "β2", "β3", "β12", "β13", "β23", "β123", "β11", "β22", "β33"]
importance_to_print = ["важливий" if i else "неважливий" for i in importance]
to_print = map(lambda x: x[0] + " " + x[1], zip(beta_i, importance_to_print))
x_i_names = list(compress(["", "x1", "x2", "x3", "x12", "x13", "x23", "x123", "x1^2", "x2^2", "x3^2"], importance))
betas_to_print = list(compress(beta_coefficients, importance))
print(*to_print, sep="; ")
equation = " ".join(["".join(i) for i in zip(list(map(lambda x: "{:+.2f}".format(x), betas_to_print)), x_i_names)])
print("Рівняння регресії без незначимих членів: y = " + equation)
return importance
importance = student_criteria2(m, N, y_arr, beta_coefficients)
nat_x_table_x0 = [[+1] + row for row in nat_x_table]
fisher_last = fisher_criteria(m, N, len(list(filter(None, importance))), nat_x_table_x0, y_arr, beta_coefficients, importance)
print(" (при врахуванні квадратичних членів)")
with_interaction = False
| UTF-8 | Python | false | false | 29,486 | py | 1 | Lab5.py | 1 | 0.467204 | 0.385693 | 0 | 588 | 46.920068 | 134 |
yingxingtianxia/python | 18,957,985,667,812 | ca2c74a968c8ced1c0a1474ba1ab939c72c7643d | 6df0d7a677129e9b325d4fdb4bbf72d512dd08b2 | /PycharmProjects/nsd_python_v02/day08/python_code_web/logs_engine/cals/data_save.py | cad8fa5346f8be4fbd628000acaaf4765bea136c | []
| no_license | https://github.com/yingxingtianxia/python | 01265a37136f2ad73fdd142f72d70f7c962e0241 | 3e1a7617a4b6552bce4a7e15a182f30e1bae221e | refs/heads/master | 2021-06-14T15:48:00.939472 | 2019-12-13T05:57:36 | 2019-12-13T05:57:36 | 152,200,507 | 0 | 0 | null | false | 2021-06-10T20:54:26 | 2018-10-09T06:40:10 | 2019-12-13T06:01:21 | 2021-06-10T20:54:24 | 324,414 | 0 | 0 | 3 | Python | false | false | #!/usr/bin/python
# coding=utf-8
"""
python 操作redis是很方便的,直接安装此包
注意:在docker中重启redis,导致数据完全丢失
"""
import redis
r = redis.Redis(host='192.168.99.100', port=46379)
def addOne(key):
add(key, 1)
def add(key, num):
val = r.get(key)
if val == None:
val = 0
else:
val = int(r.get(key))
r.set(key, str(val + num))
def getkeys(pattern=""):
return r.keys(pattern)
def get(key=""):
return r.get(key)
def addSet(key="", uid=""):
r.sadd(key, uid)
def getSet(key=""):
return r.scard(key)
| UTF-8 | Python | false | false | 593 | py | 879 | data_save.py | 608 | 0.58256 | 0.54731 | 0 | 40 | 12.475 | 50 |
zzqzzqzzq0816/FontRecognition | 9,466,107,969,661 | 2f4cc5bec074b10a45721e0227fe485567b94e08 | cb1373bfb1d057ab6cc7135ea6e72c599d2ffaee | /rear/FontRecognition/font/api/views.py | 230d5bdb82cccce936b1b0e36bc3ab716c40559d | []
| no_license | https://github.com/zzqzzqzzq0816/FontRecognition | 116f83947cde0a184ae0c3d5ec9cf358f5404c38 | 1e43a09993d2c4c73ef28de599c6cc658f5227e3 | refs/heads/master | 2022-12-20T01:53:43.650481 | 2020-10-02T03:12:05 | 2020-10-02T03:12:05 | 300,487,336 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework.decorators import api_view
def tran_name(image):
curr_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
suffix = image.name.split('.')[-1].replace('"', '')
image.name = curr_time + '.' + suffix
def save_image(image):
image_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'media')
full_path = image_path + '/' + image.name
with open(full_path, 'wb') as f:
for c in image.chunks():
f.write(c)
return full_path
@csrf_exempt
@api_view(['POST'])
def get_recognition_result(request):
response = {}
try:
image = request.data['image']
tran_name(image)
full_path = save_image(image)
response['msg'] = '成功!'
response['error_num'] = 0
except Exception as e:
response['msg'] = str(e)
response['error_num'] = 1
return Response(response) | UTF-8 | Python | false | false | 1,033 | py | 2 | views.py | 2 | 0.620253 | 0.617332 | 0 | 36 | 27.555556 | 99 |
snigdharao28/nltk_sentdex | 2,138,893,748,992 | 16504e6c0ae8ff30cd0063c43246bcceae7405b0 | a178f9ae84aab9eddd50cd82557a8493914a64a0 | /newtextclf.py | 9792c116c2707ca7adb2973abc63c781b52da19e | []
| no_license | https://github.com/snigdharao28/nltk_sentdex | 4b495fd3fa96f22ad482aa21adb144ff3d45e15a | 5b285260caa1ce3cb8c88381513c6fcd4c02cf45 | refs/heads/master | 2020-04-18T01:39:14.512064 | 2019-01-24T04:57:32 | 2019-01-24T04:57:32 | 167,127,261 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 14:24:41 2019
@author: snigdharao
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 17:40:35 2019
@author: snigdharao
"""
import random
import pickle
import nltk
#from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
short_pos = open("positive.txt","r").read()
short_neg = open("negative.txt","r").read()
all_words = []
documents = []
allowed_word_types = ["J"]
for r in short_pos.split('\n'):
documents.append( (r, "pos") )
words = word_tokenize(r)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for r in short_neg.split('\n'):
documents.append( (r, "neg") )
words = word_tokenize(r)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
save_documents = open("pickled_algos/documents.pickle", "wb")
pickle.dump(documents, save_documents)
save_documents.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:5000]
save_word_features = open("pickled_algos/word_features5k.pickle", "wb")
pickle.dump(word_features, save_word_features)
save_word_features.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(featuresets)
print(len(featuresets))
training_set = featuresets[:10000]
test_set = featuresets[10000:]
#original naive bayes
classifier = nltk.NaiveBayesClassifier.train(training_set)
classifier.show_most_informative_features(15)
print("Original Classifier accuracy percent:", (nltk.classify.accuracy(classifier, test_set))*100)
#saving originalnaivebayes
save_classifier = open("pickled_algos/originalnaivebayes5k.pickle","wb")
pickle.dump(classifier, save_classifier)
save_classifier.close()
#Multinomial NB
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("MultinomialNB accuracy percent:",nltk.classify.accuracy(MNB_classifier,test_set)*100)
#saving multinomial NB
save_classifier = open("pickled_algos/MNB_classifier5k.pickle","wb")
pickle.dump(MNB_classifier, save_classifier)
save_classifier.close()
#Bernoulli NB
BNB_classifier = SklearnClassifier(BernoulliNB())
BNB_classifier.train(training_set)
print("BernoulliNB accuracy percent:",nltk.classify.accuracy(BNB_classifier, test_set)*100)
#saving Bernoulli NB
save_classifier = open("pickled_algos/BNB_classifier5k.pickle","wb")
pickle.dump(BNB_classifier, save_classifier)
save_classifier.close()
#Logistic Regression
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("LogisticRegression_classifier accuracy percent:",nltk.classify.accuracy(LogisticRegression_classifier, test_set)*100)
#saving LogisticRegression
save_classifier = open("pickled_algos/LogisticRegression5k.pickle","wb")
pickle.dump(LogisticRegression_classifier, save_classifier)
save_classifier.close()
#SGDClassifier ( stochastic gradient descent)
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(training_set)
print("SGDClassifier_classifier accuracy percent:",nltk.classify.accuracy(SGDClassifier_classifier, test_set)*100)
#saving SGDClassifier
save_classifier = open("pickled_algos/SGDClassifier5k.pickle", "wb")
pickle.dump(SGDClassifier_classifier, save_classifier)
save_classifier.close()
#Support Vector classifier
# =============================================================================
# SVC_classifier = SklearnClassifier(SVC())
# SVC_classifier.train(training_set)
# print("SVC_classifier accuracy percent:",nltk.classify.accuracy(SVC_classifier, test_set)*100)
# =============================================================================
#Linear SVC
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("LinearSVC_classifier accuracy percent:",nltk.classify.accuracy(LinearSVC_classifier, test_set)*100)
#saving LinearSVC
save_classifier = open("pickled_algos/LinearSVClassifier5k.pickle","wb")
pickle.dump(LinearSVC_classifier, save_classifier)
save_classifier.close()
#NuSVC (number of units)
#NuSVC_classifier = SklearnClassifier(NuSVC())
#NuSVC_classifier.train(training_set)
#print("NuSVC_classifier accuracy percent:",nltk.classify.accuracy(NuSVC_classifier, test_set)*100)
#new voted classifier
#voted_classifier = VoteClassifier(NuSVC_classifier,
# LinearSVC_classifier,
# MNB_classifier,
# BNB_classifier,
# LogisticRegression_classifier)
#
#print("voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, test_set))*100)
#print("Classification:", voted_classifier.classify(test_set[0][0]), "Confidence %:", voted_classifier.confidence(test_set[0][0])*100)
#print("Classification:", voted_classifier.classify(test_set[1][0]), "Confidence %:", voted_classifier.confidence(test_set[1][0])*100)
#print("Classification:", voted_classifier.classify(test_set[2][0]), "Confidence %:", voted_classifier.confidence(test_set[2][0])*100)
#print("Classification:", voted_classifier.classify(test_set[3][0]), "Confidence %:", voted_classifier.confidence(test_set[3][0])*100)
#print("Classification:", voted_classifier.classify(test_set[4][0]), "Confidence %:", voted_classifier.confidence(test_set[4][0])*100)
#print("Classification:", voted_classifier.classify(test_set[5][0]), "Confidence %:", voted_classifier.confidence(test_set[5][0])*100)
| UTF-8 | Python | false | false | 6,793 | py | 4 | newtextclf.py | 4 | 0.70028 | 0.681731 | 0 | 212 | 31.042453 | 134 |
MadhavKMadhu/C4-SMP-ML | 10,299,331,599,347 | 6b897331b0ddf09ccb9d34e6e9d1d509298b2319 | 95951b2999d655ddd1438e76cc8dd5678fc6ef2c | /Week_3_tasks/costFunction.py | 677e8927a8fc565672bfa6a6614f143a19f928e0 | []
| no_license | https://github.com/MadhavKMadhu/C4-SMP-ML | 5541b36aab63f735502f8e09780e2831da4248a0 | 353588db847e6250404399bd621bc1a05167bed1 | refs/heads/master | 2022-09-07T13:34:21.804191 | 2020-06-02T04:36:13 | 2020-06-02T04:36:13 | 255,570,131 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from sigmoid import sigmoid
def costFunction(theta, X, y):
'''Returns Cost for theta, X, y'''
m = y.size
h = sigmoid(np.dot(X,theta))
J = [-(1/m) * (np.sum((y.T)*np.log(h) + (1-(y.T))*np.log(1-h)))]
if np.isnan(J[0]):
return(np.inf)
return(J[0])
def gradient(theta, X, y):
'''Calculate Gradient Descent for Logistic Regression'''
m = y.size
theta = theta.reshape(-1,1)
h = sigmoid(np.dot(X,theta))
grad = ((1/m) * np.dot(X.T, (h-y)) )
return(grad.flatten())
| UTF-8 | Python | false | false | 537 | py | 13 | costFunction.py | 9 | 0.562384 | 0.547486 | 0 | 21 | 24.571429 | 68 |
stlk/django-toolbox | 9,972,914,078,870 | b3f882fa74abb4a77288efef32859a9373dcfb9a | 6773e3ee5045a26b065c0c3c4bc86670499306c7 | /django_toolbox/timing_middleware.py | b75420251eb4a1b8ab17c4e4d2fbabaf1f01c044 | [
"MIT"
]
| permissive | https://github.com/stlk/django-toolbox | 079bb051d4e2d1a03ace86cf4a347e92b597f96b | 146abdae59f7b27be7aaddce611faea91a0d4b69 | refs/heads/master | 2022-12-14T10:59:05.232092 | 2020-10-27T13:17:50 | 2020-10-27T13:17:50 | 156,200,470 | 2 | 0 | MIT | false | 2022-12-08T10:54:35 | 2018-11-05T10:34:03 | 2020-10-27T13:17:54 | 2022-12-08T10:54:34 | 135 | 2 | 0 | 4 | Python | false | false | import logging
from time import time
from django.conf import settings
timing_logger = logging.getLogger("django.request")
SETTINGS = {"EXCLUDED_PATHS": set(), "LONG_REQUEST_THRESHOLD": 1000}
SETTINGS.update(getattr(settings, "METRICS", {}))
SETTINGS["EXCLUDED_PATHS"] = {path.strip("/") for path in SETTINGS["EXCLUDED_PATHS"]}
class TimingMiddleware:
def __init__(self, get_response=None):
self.get_response = get_response
def process_request(self, request):
if request.path.strip("/") in SETTINGS["EXCLUDED_PATHS"]:
return request
setattr(request, "_metrics_start_time", time())
def process_response(self, request, response):
if hasattr(request, "_metrics_start_time"):
duration = time() - request._metrics_start_time
duration_ms = int(round(duration * 1000))
if duration_ms > SETTINGS["LONG_REQUEST_THRESHOLD"]:
timing_logger.warning(f"Long request. duration_ms: {duration_ms}")
else:
timing_logger.info(f"duration_ms: {duration_ms}")
return response
def __call__(self, request):
self.process_request(request)
response = self.get_response(request)
self.process_response(request, response)
return response
| UTF-8 | Python | false | false | 1,297 | py | 42 | timing_middleware.py | 32 | 0.643022 | 0.636854 | 0 | 38 | 33.131579 | 85 |
jukim-greenventory/python-practice | 6,923,487,311,827 | 23c6bfa0d5d455c337fba84d02dad25aeb1c5a87 | 2c0e1786044c2818be20062a1c8f75990c61ae26 | /argparse/6_example.py | b7376246bb9b011b5c909556861f002bc77de897 | []
| no_license | https://github.com/jukim-greenventory/python-practice | 51802bf354c36049f41f539778f576d7e9560305 | 4a1bb17c08204edf6954196c29f6e40a88274ef6 | refs/heads/master | 2023-05-31T21:15:37.770161 | 2021-06-10T12:38:57 | 2021-06-10T12:38:57 | 375,691,460 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
base form:
- parse_args(['option','value'])
other forms:
For long options (options with names longer than a single character),
the option and value can also be passed as a single command-line argument, using = to separate them:
- parse_args(['--option=value'])
For short options (options only one character long),
the option and its value can be concatenated:
- parse_args(['-optionvalue'])
About dest:
- dest allows a custom attribute name to be provided: (in our case "integers")
About metavar:
- metavar only changes the displayed name - the name of the attribute on the parse_args() object is still
determined by the dest value.
About const:
-
About nargs:
N (an integer).
- N arguments from the command line will be gathered together into a list. For example:
'+'
- Just like '*', all command-line args present are gathered into a list.
Additionally, an error message will be generated if there wasn’t at least one command-line argument present.
'*'
-
'?'
- One argument will be consumed from the command line if possible, and produced as a single item.
If no command-line argument is present, the value from default will be produced.
"""
import argparse
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"integers", metavar="N", type=int, nargs="+", help="an integer for the accumulator"
)
parser.add_argument(
"--sum",
dest="accumulate",
action="store_const",
const=sum,
default=max,
help="sum the integers (default: find the max)",
)
args = parser.parse_args()
print(args)
print(args.accumulate(args.integers))
| UTF-8 | Python | false | false | 1,721 | py | 12 | 6_example.py | 11 | 0.680628 | 0.680628 | 0 | 48 | 34.8125 | 116 |
kevinkraydich/showerthoughts | 13,400,297,971,255 | 9d6ea8e73311b7fa51a370a5496771b7abbf45b1 | 5ea34937c95be4ee828b180ca2c753e2db80db45 | /bot/bot.py | 728552356c58c5fe88e099874f95e8d4ac36a873 | [
"MIT"
]
| permissive | https://github.com/kevinkraydich/showerthoughts | 9a551091ac0fa44ac8d44415a9df6d015a8acfbb | 7f312a7dc2670076282d62b20f655d1fb69698e0 | refs/heads/master | 2020-07-10T23:11:22.934416 | 2020-05-06T04:08:19 | 2020-05-06T04:08:19 | 204,393,095 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Python module for Reddit bot.
Kevin Kraydich <kraydich@umich.edu>
"""
# Import third party modules
import os
import configparser
import sys
path = os.path.join(os.getcwd(), 'bot/', 'utils/')
sys.path.append(path)
# Import local modules
import scraper
import text
config = configparser.ConfigParser()
settings_path = os.path.join(os.getcwd(), 'bot/', 'settings.ini')
config.read(settings_path)
class Bot:
def __init__(self):
subreddit = config.get('default', 'subreddit_name')
num_posts = config.getint('default', 'max_posts')
print("Loading scraper")
self._scraper = scraper.Scraper(subreddit, num_posts)
self._numbers = []
def add_number(self, num):
self._numbers.append(num)
def send_texts(self):
print("Grabbing random showerthought")
contents = self._scraper.get_random_post()
for num in self._numbers:
msg = text.Text(num)
msg.send(contents)
| UTF-8 | Python | false | false | 928 | py | 6 | bot.py | 4 | 0.674569 | 0.674569 | 0 | 39 | 21.794872 | 65 |
FranciszekPin/gravity-field-simulator | 1,082,331,760,861 | 5359dd5aa0acc1d2b6ec6543fdf9244010df2ee9 | 5acc2960ce32fac00f55531e2d7ab0063dbd770c | /BallsManager.py | 740191eb80e03d1012b078f2ed0d26e3c3de968c | []
| no_license | https://github.com/FranciszekPin/gravity-field-simulator | ea1dabcd2dc2cb564e7a69d1f7fa2563ab7a0a8a | 603b2c16fa16bdfb5e16112639e736124b07e718 | refs/heads/main | 2023-01-24T18:00:50.312297 | 2020-12-10T19:07:34 | 2020-12-10T19:07:34 | 314,677,977 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy
from panda3d.core import LPoint3, LVector3
from physics.Ball import Ball
from physics.PhysicsManager import PhysicsManager
class BallsManager:
balls = []
speed = 100000
def __init__(self, showBase):
self.showBase = showBase
self.add_balls_to_render()
self.showBase.taskMgr.add(self.update_balls_task, "updateBallsTask")
self.lastDeltaTime = 0
def add_balls_to_render(self):
""" Adds balls to simulator """
self.add_ball(numpy.array([0., 0., 0.]), numpy.array([0, 0.0, 0.0]), False, 333000, "sun")
self.add_ball(numpy.array([0., 0., 100]), numpy.array([0.0004713, 0, 0]), False, 1, "earth")
self.add_ball(numpy.array([0., 0., 50]), numpy.array([0.0006665, 0, 0]), False, 1, "mars")
self.add_ball(numpy.array([0., 0., 25]), numpy.array([0.0009426, 0, 0]), False, 1, "mercury")
def add_ball(self, position, velocity, static, mass, texName):
""" Adds chosen ball to simulator """
ball_model = self.showBase.loader.loadModel("models/ball")
ball_model.setScale(5)
if texName == "sun":
tex = self.showBase.loader.loadTexture("models/sun_1k_tex.jpg")
ball_model.setScale(15)
elif texName == "earth":
tex = self.showBase.loader.loadTexture("models/earth_1k_tex.jpg")
elif texName == "mercury":
tex = self.showBase.loader.loadTexture("models/mercury_1k_tex.jpg")
elif texName == "mars":
tex = self.showBase.loader.loadTexture("models/mars_1k_tex.jpg")
elif texName == "moon":
tex = self.showBase.loader.loadTexture("models/moon_1k_tex.jpg")
ball_model.setScale(2)
else:
tex = 0
ball_model.setTexture(tex)
ball_model.reparentTo(self.showBase.render)
ball_model.setPos(LVector3(position[0], position[1], position[2]))
self.balls.append(Ball(position, ball_model, static, velocity, mass))
def add_planet_square(self, position, radius, distance_between_balls):
""" Colors the area with small balls """
for x in numpy.arange(-radius, radius + 1e-9, distance_between_balls):
for y in numpy.arange(-radius, radius + 1e-9, distance_between_balls):
self.add_ball(numpy.array([x+position[0], 0.0, y+position[2]]), numpy.array([0.00001, 0.0, 0.0]), False, 1000, "earth")
def update_balls_task(self, task):
""" Takes care of changing phsyics values of all objects """
delta_time = (task.time - self.lastDeltaTime) * self.speed
PhysicsManager().update(self.balls, delta_time)
self.update_balls_positions(delta_time)
self.lastDeltaTime = task.time
return task.cont
def update_balls_positions(self, delta_time):
""" Moves all balls by their velocities """
for ball in self.balls:
ball.move(LVector3(ball.velocity[0], ball.velocity[1], ball.velocity[2]) * delta_time)
| UTF-8 | Python | false | false | 2,995 | py | 7 | BallsManager.py | 5 | 0.620033 | 0.583306 | 0 | 72 | 40.597222 | 135 |
giorgiberia/gitlearn | 16,423,954,974,636 | f20b1b4fc502b3788ef41b7ad8af13860ea65ab8 | 71c6a5dcf59a9388e23c468ad716ab807454bb8a | /app/OrbiUser/managers.py | 46092bf856f6ee3cb4561fb73306274866bf6c0d | [
"MIT"
]
| permissive | https://github.com/giorgiberia/gitlearn | 0b63c21d93ae7b7313b2634294d579e75cc772f7 | 447b1ccd6279bd5accf92336c446e3340d8e8181 | refs/heads/master | 2020-06-21T07:26:20.586610 | 2019-07-18T11:49:52 | 2019-07-18T11:49:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def create_user(self, username,fullname, date_of_birth, password=None):
user = self.model(
username=username,
date_of_birth=date_of_birth,
fullname=fullname,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_staffuser(self, fullname, date_of_birth, password):
user = self.create_user(
password=password,
date_of_birth=date_of_birth,
fullname=fullname,
)
user.staff = True
user.save(using=self._db)
return user
def create_superuser(self,username,fullname, date_of_birth, password):
user = self.create_user(
username=username,
password=password,
date_of_birth=date_of_birth,
fullname=fullname,
)
user.staff = True
user.admin = True
user.save(using=self._db)
return user
| UTF-8 | Python | false | false | 1,085 | py | 7 | managers.py | 6 | 0.588018 | 0.588018 | 0 | 37 | 28.324324 | 75 |
NeerajaLanka/100daysofcode | 8,083,128,464,577 | f2a4ca0811ea330f58ee483e495a6a5dade187c7 | 103d5c372371ceb8235becd0b66e504594b61e4b | /Day15coffee_Machine.py | 5913a299df8cb792ea98207f1dd87093581e2151 | []
| no_license | https://github.com/NeerajaLanka/100daysofcode | 29dc194742a9b14565e50f8674724aed85380f18 | 5a42fa02c773f4949ad1acc07c88cef9c702f088 | refs/heads/master | 2023-07-09T04:45:02.757056 | 2021-08-19T19:56:06 | 2021-08-19T19:56:06 | 327,458,893 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #flavours = input("what would you like?(espresso/latte/cappuccino:")
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
#print(MENU["cappuccino"]["ingredients"]["milk"])
resources = {
"water": 25,
"milk": 200,
"coffee": 100,
}
for i in MENU.values():
print(i)
W = i["ingredients"]["water"]
W_r = resources["water"]
#M = i["ingredients"]["milk"]
#M_r = resources["milk"]
C = i["ingredients"]["coffee"]
C_r = resources["coffee"]
c_type = input("which type do you want?")
def compare():
if c_type == "espresso":
if W <= W_r and C <=C_r :
print("take coffee")
elif c_type == "latte" :
if W <= W_r and C <=C_r :
print("enjoy your coffee")
elif c_type == "cappuccino":
if W <= W_r and C <=C_r :
print("enjoy good coffee")
else:
print("not possible")
coffee_type(W,W_r,C,C_r)
| UTF-8 | Python | false | false | 1,384 | py | 92 | Day15coffee_Machine.py | 92 | 0.427023 | 0.402457 | 0 | 59 | 22.457627 | 68 |
Aasthaengg/IBMdataset | 16,466,904,660,324 | e8ef5e615f58b78dd2d64dde919b0c2670160012 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02665/s564559024.py | 0cbbd9ba9d053be1d913e709cc91d4285f6558f2 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=int(input())
l=list(map(int,input().split()))
k=l[n]
count=1
sumleaf=1
flag=True
i0=0
suml=[]
for i in l:
i0+=i
suml.append(i0)
if l[0]>2 or l[0]==1 and n>=1:
print(-1)
flag=False
elif n==0 and l[0]!=1:
print(-1)
flag=False
else:
for i in range(1,n+1):
sumleaf*=2
if i==n and sumleaf<k:
flag=False
print(-1)
break
elif i==n:
count+=k
else:
if sumleaf<=l[i]:
flag=False
print(-1)
break
else:
sumlnew=suml[n]-suml[i-1]
count+=min(sumlnew,sumleaf)
sumleaf=min(sumlnew,sumleaf)-l[i]
if flag==True:
print(count) | UTF-8 | Python | false | false | 851 | py | 202,060 | s564559024.py | 202,055 | 0.398355 | 0.372503 | 0 | 44 | 18.363636 | 49 |
imfede/py-project-euler | 7,112,465,883,775 | 152c206c9634cb556ac378c3d9b0d1a45fa7bc87 | cceeebcfe1df8a849216917217ee413d1ed715b2 | /003.py | 0508dec0e94bb45a4a52b572d406fd9614021d6c | []
| no_license | https://github.com/imfede/py-project-euler | d97861bde90a0bde74041aa29beb071cec6921ac | 6aaf5daf7be962db35bb1284915da76b396bb672 | refs/heads/master | 2021-01-01T19:24:48.072243 | 2015-07-25T13:53:05 | 2015-07-25T13:53:05 | 35,602,711 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# what is the largest prime factor of 600851475143
import math
def isPrime(n):
x = 2
while (x < math.ceil(math.sqrt(n)) ):
if( n%x == 0):
return False
x += 1
return True
def number_from_factors(factors):
p = 1
for i in range( len(factors) ):
p *= factors[i]
return p
n = 600851475143
n2 = n
factors = []
i = 2
while( number_from_factors(factors) != n2 ):
if( isPrime(i) and n%i==0 ):
factors.append(i)
n = n // i
i = 2
else:
i += 1
print( max( factors ) )
| UTF-8 | Python | false | false | 580 | py | 39 | 003.py | 39 | 0.525862 | 0.467241 | 0 | 33 | 16.575758 | 50 |
Jorsorokin/JAGA_software | 6,064,493,823,488 | e571566f43818b29c269cf50979b38f23c887c73 | e713cdc626f2d1bc97cbc8744a071bca0b66b823 | /Python/packet_buffer.py | b196729cfaa9258a72e3d1573f589769a681b58c | []
| no_license | https://github.com/Jorsorokin/JAGA_software | bc605f48ae08a1f9c1954e64fbd88bfa25a4f481 | b9b6efd6aaef5a163af0e1c1197655dd439c9f64 | refs/heads/master | 2021-01-19T15:14:35.674545 | 2017-04-16T19:43:16 | 2017-04-16T19:43:16 | 88,206,238 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Markus De Shon'
import copy
import packet
import sys
class PacketBuffer():
MIN_FILL = 100
MAX_FILL = 1000
SKIP_COUNT = 10 # Skip initial packets when calculating start time because their timing is odd.
LATENCY = 0.003 # Latency of packet delivery on the network.
def __init__(self, filename, debug=False):
self.fh = open(filename, 'rb')
self.channels = None
self.first_seconds = None
self.last_seconds = None
self.first_sample_count = None
self.last_sample_count = None
self.start_time = None
self.total_lost = 0
self.crc_received = 0
self.samples_per_packet = None
self.samples_per_second = None
self.packets_received = 0
self.packets_recovered = 0
self.packet_array = []
self.packet_array_index = 0
self.lost_since_last_crc = 0
self.debug = debug
def __del__(self):
self.fh.close()
def __iter__(self):
return self
def next(self):
if len(self.packet_array) < PacketBuffer.MIN_FILL:
self._fill_array()
try:
p = self.packet_array.pop(0) # FIFO
if (self.first_seconds is not None and self.first_sample_count is not None
and self.start_time is not None):
p.set_start_time(start_time=self.start_time, first_sequence=self.first_sample_count,
first_seconds=self.first_seconds)
return p
except IndexError:
# No packets left.
raise StopIteration
def _read_next(self):
try:
p = packet.Packet(self.fh, channels=self.channels)
if p.crc:
self.crc_received += 1
return p, 0
if self.channels is None and p.channels:
self.channels = p.channels
self.samples_per_packet = p.samples_per_packet
self.samples_per_second = p.samples_per_second
self.packets_received += 1
lost_packets = 0
if p.V0:
if self.last_seconds:
lost_packets = self._count_lost_packets(p)
self.last_seconds = p.seconds
self.last_sample_count = p.sample_count
else: # V3 packets
if self.last_sample_count:
lost_packets = self._count_lost_packets(p)
self.last_sample_count = p.sample_count
return p, lost_packets
except ValueError: # Ran out of data.
return None, 0
except AttributeError:
# The first packet in the file is a CRC packet, so self.channels is not set yet.
sys.stderr.write("WARNING: First packet was a CRC, seeking next packet to continue.\n")
self._seek_channels()
assert(self.channels is not None)
# We've set self.channels from the next packet, so try again to read the leading CRC packet.
return self._read_next()
def _seek_channels(self):
# The first packet was a CRC, so we need to find the channel count in the next packet.
for length in packet.Packet.possible_packet_lengths_v3():
self.fh.seek(length)
try:
sys.stderr.write("Trying offset " + str(length) + "\n")
p = packet.Packet(self.fh)
sys.stderr.write("Next packet header was found... continuing.\n")
self.channels = p.channels
# We found the right offset and set the self.channels value, so go back to the beginning.
self.fh.seek(0)
return
except Exception as e:
sys.stderr("WARNING: Exception received: " + e.message + "\n")
# Try the next offset.
continue
sys.stderr.write("ERROR: Next packet header could not be found. Exiting.\n")
sys.exit(1)
def _count_lost_packets(self, p):
assert (not p.crc)
if p.V0:
increment = self.samples_per_second * (p.seconds - self.last_seconds)
increment += p.sample_count - self.last_sample_count
else:
increment = p.sample_count - self.last_sample_count
lost = (increment / self.samples_per_packet) - 1
if self.debug and lost > 0:
sys.stderr.write(str(lost) + " packets lost.\n")
return (increment / self.samples_per_packet) - 1
def _fill_array(self):
p, lost_packets = self._read_next()
timestamps = []
if self.first_sample_count is None:
if p.V0:
self.first_seconds = 0
self.first_sample_count = (p.seconds * p.samples_per_second) + p.sample_count
else:
self.first_seconds = 0
self.first_sample_count = p.sample_count
while p and len(self.packet_array) < PacketBuffer.MAX_FILL - 1:
array_length = len(self.packet_array)
timestamps.append(p.timestamp)
if lost_packets > 0:
self.lost_since_last_crc += lost_packets
self.total_lost += lost_packets
if p.crc:
if (array_length > p.crc_interval) and self.lost_since_last_crc > 0:
# Reconstruct a single missing packet. Need at least 1 packet of history before window.
if self.debug:
sys.stderr.write("Attempting to reconstruct packet using CRC.\n")
start_window = array_length - p.crc_interval + 1 # Only works for one lost packet.
end_window = array_length
if self.debug:
sys.stderr.write("start_window " + str(start_window) + " end_window "
+ str(end_window) + "\n")
(missing_index, missing_count, xor) = self._xor_over_window(start_window, end_window)
if self.debug:
sys.stderr.write("missing_index " + str(missing_index) + " missing_count "
+ str(missing_count) + "\n")
if missing_count == 1 and missing_index and start_window <= missing_index <= end_window:
if self.debug:
sys.stderr.write("Single missing packet found before sample_count "
+ str(self.packet_array[missing_index].sample_count) + "\n")
self._reconstruct_packet(p, missing_index, xor=xor)
else:
if self.debug:
sys.stderr.write("Could not reconstruct packet, missing " + str(missing_count)
+ " packets.\n")
self.lost_since_last_crc = 0
else:
self.packet_array.append(p)
p, lost_packets = self._read_next()
if p and not p.crc:
self.packet_array.append(p) # The last valid packet that was read.
if not self.start_time:
if len(timestamps) > PacketBuffer.MIN_FILL * 2:
timestamps = timestamps[PacketBuffer.MIN_FILL:] # Drop the first MIN_FILL if we can.
self.start_time = (sum(timestamps) / float(len(timestamps))) - PacketBuffer.LATENCY
def _xor_over_window(self, start_window, end_window):
xor = None
missing_index = None
missing_count = 0
for i in range(start_window, end_window):
interval = (self.packet_array[i].sample_count - self.packet_array[i - 1].sample_count)
if (((self.packet_array[i].sample_count - self.packet_array[i - 1].sample_count)
/ self.samples_per_packet) - 1 == 1):
missing_index = i
missing_count += 1
if xor:
self._xor_arrays(xor, self.packet_array[i].all_samples)
else:
# xor not defined yet, set to the samples from the first packet.
xor = copy.deepcopy(self.packet_array[i].all_samples)
return missing_index, missing_count, xor
def _reconstruct_packet(self, crc_packet, missing_index=None, xor=None, start_window=None, end_window=None):
# Reconstruct packet that would have appeared before missing_index within window
# Two usage modes:
# 1) set xor, missing_index and optionally start_window, end_window OR
# 2) only set start_window, end_window (need to calculate xor and find missing_index).
if not xor:
# Need to calculate xor over the window.
assert (start_window is not None and start_window >= 1)
assert (end_window is not None and end_window >= start_window)
(missing_index, missing_count, xor) = self._xor_over_window(start_window, end_window)
new_packet = copy.deepcopy(self.packet_array[missing_index - 1]) # Start with an existing packet.
new_packet.timestamp = (self.packet_array[missing_index].timestamp + self.packet_array[
missing_index - 1].timestamp) / 2.0
new_packet.sample_count = new_packet.sample_count + self.samples_per_packet
self._xor_arrays(xor, crc_packet.all_samples)
new_packet.all_samples = xor
self.packet_array.insert(missing_index, new_packet)
self.packets_recovered += 1
def _xor_arrays(self, a, b):
assert (len(a) == len(b))
for i in range(len(b)):
for j in range(len(b[i])):
a[i][j] ^= b[i][j]
| UTF-8 | Python | false | false | 9,654 | py | 7 | packet_buffer.py | 6 | 0.553967 | 0.547856 | 0 | 206 | 45.864078 | 112 |
j5int/pyslet | 18,098,992,196,401 | f7e5e2626116048929ddd21136f9b4c7f88b2bc8 | c2b35211fab6b1f8a4a51b3245f540512b76cc6c | /pyslet/streams.py | 65badddf89dac569bf46f0a56455a1b6520ff38a | [
"BSD-3-Clause"
]
| permissive | https://github.com/j5int/pyslet | 08ed591f8ca6347225d9404c4fe16f8afc03d476 | ef27dd6bb6fbd6d47687a349508cd4ab2989a0ad | refs/heads/master | 2021-05-23T11:30:31.713515 | 2016-07-25T09:10:32 | 2016-07-25T09:10:32 | 68,193,782 | 2 | 2 | null | true | 2021-04-06T09:14:05 | 2016-09-14T09:40:27 | 2016-10-19T11:15:54 | 2021-04-06T09:14:05 | 7,646 | 0 | 2 | 0 | Python | false | false | #! /usr/bin/env python
"""This module add some useful stream classes"""
import errno
import io
import logging
import os
import threading
import time
from .py26 import memoryview
if hasattr(errno, 'WSAEWOULDBLOCK'):
_blockers = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK))
else:
_blockers = set((errno.EAGAIN, errno.EWOULDBLOCK))
def io_blocked(err):
"""Returns True if IO operation is blocked
err
An IOError exception (or similar object with errno attribute).
Bear in mind that EAGAIN and EWOULDBLOCK are not necessarily the
same value and that when running under windows WSAEWOULDBLOCK may be
raised instead. This function removes this complexity making it
easier to write cross platform non-blocking IO code."""
return err.errno in _blockers
if hasattr(errno, 'WSAETIMEDOUT'):
_timeouts = set((errno.ETIMEDOUT, errno.WSAETIMEDOUT))
else:
_timeouts = set((errno.ETIMEDOUT, ))
def io_timedout(err):
"""Returns True if an IO operation timed out
err
An IOError exception (or similar object with errno attribute).
Tests for ETIMEDOUT and when running under windows WSAETIMEDOUT
too."""
return err.errno in _timeouts
class BufferedStreamWrapper(io.RawIOBase):
"""A buffered wrapper for file-like objects.
src
A file-like object, we only require a read method
buffsize
The maximum size of the internal buffer
On construction the src is read until an end of file condition is
encountered or until buffsize bytes have been read. EOF is signaled
by an empty string returned by src's read method. Instances then
behave like readable streams transparently reading from the buffer
or from the remainder of the src as applicable.
Instances behave differently depending on whether or not the entire
src is buffered. If it is they become seekable and set a value for
the length attribute. Otherwise they are not seekable and the
length attribute is None.
If src is a non-blocking data source and it becomes blocked,
indicated by read returning None rather than an empty string, then
the instance reverts to non-seekable behaviour."""
def __init__(self, src, buffsize=io.DEFAULT_BUFFER_SIZE):
self.src = src
self.buff = io.BytesIO()
self.bsize = 0
self.overflow = False
self.length = None
while True:
nbytes = buffsize - self.bsize
if nbytes <= 0:
# we've run out of buffer space
self.overflow = True
break
data = src.read(nbytes)
if data is None:
# blocked, treat as overflow
self.overflow = True
break
elif data:
self.buff.write(data)
self.bsize += len(data)
else:
# EOF
break
self.pos = 0
self.buff.seek(0)
if not self.overflow:
self.length = self.bsize
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return not self.overflow
def tell(self):
if self.overflow:
raise io.UnsupportedOperation
else:
return self.pos
def seek(self, offset, whence=io.SEEK_SET):
if self.overflow:
raise io.UnsupportedOperation
elif whence == io.SEEK_SET:
self.pos = offset
elif whence == io.SEEK_CUR:
self.pos += offset
elif whence == io.SEEK_END:
self.pos = self.length + offset
else:
raise ValueError("unrecognized whence value in seek: %s" %
repr(whence))
self.buff.seek(self.pos)
def readinto(self, b):
if self.pos < self.bsize:
# read from the buffer
data = self.buff.read(len(b))
elif self.overflow:
# read from the original source
data = self.src.read(len(b))
if data is None:
# handle blocked read
return None
else:
# end of file
data = b''
self.pos += len(data)
b[0:len(data)] = data
return len(data)
def peek(self, nbytes):
"""Read up to nbytes without advancing the position
If the stream is not seekable and we have read past the end of
the internal buffer then an empty string will be returned."""
if self.pos < self.bsize:
data = self.buff.read(nbytes)
# reset the position of the buffer
self.buff.seek(self.pos)
return data
else:
return b''
class Pipe(io.RawIOBase):
"""Buffered pipe for inter-thread communication
The purpose of this class is to provide a thread-safe buffer to use
for communicating between two parts of an application that support
non-blocking io while reducing to a minimum the amount of
byte-copying that takes place.
Essentially, write calls with immutable byte strings are simply
cached without copying (and always succeed) enabling them to be
passed directly through to the corresponding read operation in
streaming situations. However, to improve flow control a canwrite
method is provided to help writers moderate the amount of data that
has to be held in the buffer::
# data producer thread
while busy:
wmax = p.canwrite()
if wmax:
data = get_at_most_max_bytes(wmax)
p.write(data)
else:
# do something else while the pipe is blocked
spin_the_beach_ball()
bsize
The buffer size, this is used as a guide only. When writing
immutable bytes objects to the pipe the buffer size may be
exceeded as these can simply be cached and returned directly to
the reader more efficiently than slicing them up just to adhere
to the buffer size. However, if the buffer already contains
bsize bytes all calls to write will block or return None.
Defaults to io.DEFAULT_BUFFER_SIZE.
rblocking
Controls the blocking behaviour of the read end of this pipe.
True indicates reads may block waiting for data, False that they
will not and read may return None. Defaults to True.
wblocking
Controls the blocking behaviour of the write end of the this
pipe. True indicates writes may block waiting for data, False
that they will not and write may return None. Defaults to True.
timeout
The number of seconds before a blocked read or write operation
will timeout. Defaults to None, which indicates 'wait forever'.
A value of 0 is not the same as placing both ends of the pipe in
non-blocking mode (though the effect may be similar).
name
An optional name to use for this pipe, the name is used when
raising errors and when logging"""
def __init__(self, bsize=io.DEFAULT_BUFFER_SIZE,
rblocking=True, wblocking=True, timeout=None,
name=None):
#: the name of the pipe
self.name = name
# the maximum buffer size, used for flow control, this
# is not a hard limit
self.max = bsize
# buffered strings of bytes
self.buffer = []
# the total size of all strings in the buffer
self.bsize = 0
# offset into self.buffer[0]
self.rpos = 0
# eof indicator
self._eof = False
self.rblocking = rblocking
# an Event that flags the arrival of a reader
self.rflag = None
self.wblocking = wblocking
# timeout duration
self.timeout = timeout
# lock for multi-threading
self.lock = threading.Condition()
# state values used for monitoring changes
self.wstate = 0
self.rstate = 0
super(Pipe, self).__init__()
def __repr__(self):
if self.name:
return self.name
else:
return super(Pipe, self).__repr__()
def close(self):
"""closed the Pipe
This implementation works on a 'reader closes' principle. The
writer should simply write the EOF marker to the Pipe (see
:meth:`write_eof`.
If the buffer still contains data when it is closed a warning is
logged."""
# throw away all data
logging.debug("Pipe.close %s", repr(self))
with self.lock:
if self.buffer:
logging.warning("Pipe.close for %s discarded non-empty buffer",
repr(self))
self.buffer = []
self.bsize = 0
self.rpos = 0
self._eof = True
# kill anyone waiting
self.rstate += 1
self.wstate += 1
self.lock.notify_all()
super(Pipe, self).close()
# if someone is waiting for a reader - wake them as the reader
# will never come
if self.rflag is not None:
self.rflag.set()
def readable(self):
"""Pipe's are always readable"""
return True
def writable(self):
"""Pipe's are always writeable"""
return True
def readblocking(self):
"""Returns True if reads may block"""
return self.rblocking
def set_readblocking(self, blocking=True):
"""Sets the readblocking mode of the Pipe.
blocking
A boolean, defaults to True indicating that reads may
block."""
with self.lock:
self.rblocking = blocking
def writeblocking(self):
"""Returns True if writes may block"""
return self.wblocking
def set_writeblocking(self, blocking=True):
"""Sets the writeblocking mode of the Pipe.
blocking
A boolean, defaults to True indicating that writes may
block."""
with self.lock:
self.wblocking = blocking
def wait(self, timeout, method):
if timeout is not None:
tstart = time.time()
with self.lock:
while not method():
if timeout is None:
twait = None
else:
twait = (tstart + timeout) - time.time()
if twait < 0:
logging.warning("Pipe.wait timedout on %s", repr(self))
raise IOError(errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.server.Pipe.wait")
logging.debug("Pipe.wait waiting for %s", repr(self))
self.lock.wait(twait)
def empty(self):
"""Returns True if the buffer is currently empty"""
with self.lock:
if self.buffer:
return False
else:
return True
def buffered(self):
"""Returns the number of buffered bytes in the Pipe"""
with self.lock:
return self.bsize - self.rpos
def canwrite(self):
"""Returns the number of bytes that can be written.
This value is the number of bytes that can be written in a
single non-blocking call to write. 0 indicates that the pipe's
buffer is full. A call to write may accept more than this but
*the next* call to write will always accept at least this many.
This class is fully multithreaded so in situations where there
are multiple threads writing this call is of limited use.
If called on a pipe that has had the EOF mark written then
IOError is raised."""
with self.lock:
if self.closed or self._eof:
raise IOError(
errno.EPIPE,
"canwrite: can't write past EOF on Pipe object")
wlen = self.max - self.bsize + self.rpos
if wlen <= 0:
wlen = 0
if self.rflag is not None:
self.rflag.clear()
return wlen
def set_rflag(self, rflag):
"""Sets an Event triggered when a reader is detected.
rflag
An Event instance from the threading module.
The event will be set each time the Pipe is read. The flag may
be cleared at any time by the caller but as a convenience it
will always be cleared when :py:meth:`canwrite` returns 0.
The purpose of this flag is to allow a writer to use a custom
event to monitor whether or not the Pipe is ready to be written.
If the Pipe is full then the writer will want to wait on this
flag until a reader appears before attempting to write again.
Therefore, when canwrite indicates that the buffer is full it
makes sense that the flag is also cleared.
If the pipe is closed then the event is set as a warning that
the pipe will never be read. (The next call to write will
then fail.)"""
with self.lock:
self.rflag = rflag
if self.closed:
self.rflag.set()
def write_wait(self, timeout=None):
"""Waits for the pipe to become writable or raises IOError
timeout
Defaults to None: wait forever. Otherwise the maximum
number of seconds to wait for."""
self.wait(timeout, self.canwrite)
def flush_wait(self, timeout=None):
"""Waits for the pipe to become empty or raises IOError
timeout
Defaults to None: wait forever. Otherwise the maximum
number of seconds to wait for."""
self.wait(timeout, self.empty)
def canread(self):
"""Returns True if the next call to read will *not* block.
False indicates that the pipe's buffer is empty and that a call
to read will block.
Note that if the buffer is empty but the EOF signal has been
given with :py:meth:`write_eof` then canread returns True! The
next call to read will not block but return an empty string
indicating the EOF."""
with self.lock:
if self.closed:
raise IOError(
errno.EPIPE, "can't read from a closed Pipe object")
if self.buffer or self._eof:
return True
else:
return False
def read_wait(self, timeout=None):
"""Waits for the pipe to become readable or raises IOError
timeout
Defaults to None: wait forever. Otherwise the maximum
number of seconds to wait for."""
self.wait(timeout, self.canread)
def write(self, b):
"""writes data to the pipe
The implementation varies depending on the type of b. If b is
an immutable bytes object then it is accepted even if this
overfills the internal buffer (as it is not actually copied).
If b is a bytearray then data is copied, up to the maximum
buffer size."""
if self.timeout is not None:
tstart = time.time()
with self.lock:
if self.closed or self._eof:
raise IOError(errno.EPIPE,
"write: can't write past EOF on Pipe object")
if isinstance(b, memoryview):
# catch memory view objects here
b = b.tobytes()
wlen = self.max - self.bsize + self.rpos
while wlen <= 0:
# block on write or return None
if self.wblocking:
if self.timeout is None:
twait = None
else:
twait = (tstart + self.timeout) - time.time()
if twait < 0:
logging.warning("Pipe.write timed out for %s",
repr(self))
raise IOError(errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.server.Pipe.write")
logging.debug("Pipe.write waiting for %s", repr(self))
self.lock.wait(twait)
# check for eof again!
if self.closed or self._eof:
raise IOError(errno.EPIPE,
"write: EOF or pipe closed after wait")
# recalculate the writable space
wlen = self.max - self.bsize + self.rpos
else:
return None
if isinstance(b, bytes):
nbytes = len(b)
if nbytes:
self.buffer.append(b)
self.bsize += nbytes
self.wstate += 1
self.lock.notify_all()
return nbytes
elif isinstance(b, bytearray):
nbytes = len(b)
if nbytes > wlen:
nbytes = wlen
# partial copy, creates transient bytearray :(
self.buffer.append(bytes(b[:nbytes]))
else:
self.buffer.append(bytes(b))
self.bsize += nbytes
self.wstate += 1
self.lock.notify_all()
return nbytes
else:
raise TypeError(repr(type(b)))
def write_eof(self):
"""Writes the EOF flag to the Pipe
Any waiting readers are notified and will wake to process the
Pipe. After this call the Pipe will not accept any more data."""
with self.lock:
self._eof = True
self.wstate += 1
self.lock.notify_all()
def flush(self):
"""flushes the Pipe
The intention of flush to push any written data out to the
destination, in this case the thread that is reading the data.
In write-blocking mode this call will wait until the buffer is
empty, though if the reader is idle for more than
:attr:`timeout` seconds then it will raise IOError.
In non-blocking mode it simple raises IOError with EWOULDBLOCK
if the buffer is not empty.
Given that flush is called automatically by :meth:`close` for
classes that inherit from the base io classes our implementation
of close discards the buffer rather than risk an exception."""
if self.timeout is not None:
tstart = time.time()
with self.lock:
blen = self.bsize - self.rpos
while self.buffer:
if self.wblocking:
if self.timeout is None:
twait = None
else:
new_blen = self.bsize - self.rpos
if new_blen < blen:
# making progress, restart the clock
blen = new_blen
tstart = time.time()
twait = (tstart + self.timeout) - time.time()
if twait < 0:
logging.warning("Pipe.flush timed out for %s",
repr(self))
logging.debug("Pipe.flush found stuck data: %s",
repr(self.buffer))
raise IOError(errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.server.Pipe.flush")
logging.debug("Pipe.flush waiting for %s", repr(self))
self.lock.wait(twait)
else:
raise io.BlockingIOError(
errno.EWOULDBLOCK,
"Pipe.flush write blocked on %s" % repr(self))
def readall(self):
"""Overridden to take care of non-blocking behaviour.
Warning: readall always blocks until it has read EOF, regardless
of the rblocking status of the Pipe.
The problem is that, if the Pipe is set for non-blocking reads
then we seem to have the choice of returning a partial read (and
failing to signal that some of the data is still in the pipe) or
raising an error and losing the partially read data.
Perhaps ideally we'd return None indicating that we are blocked
from reading the entire stream but this isn't listed as a
possible return result for io.RawIOBase.readall and it would be
tricky to implement anyway as we still need to deal with
partially read data.
Ultimately the safe choices are raise an error if called on a
non-blocking Pipe or simply block. We do the latter on the
basis that anyone calling readall clearly intends to wait.
For a deep discussion of the issues around non-blocking behaviour
see http://bugs.python.org/issue13322"""
data = []
with self.lock:
save_rblocking = self.rblocking
try:
self.rblocking = True
while True:
part = self.read(io.DEFAULT_BUFFER_SIZE)
if not part:
# end of stream
return b''.join(data)
else:
data.append(part)
finally:
self.rlocking = save_rblocking
def _consolidate_buffer(self):
with self.lock:
if self.buffer:
if self.rpos:
self.buffer[0] = self.buffer[0][self.rpos:]
self.rpos = 0
self.buffer = [b''.join(self.buffer)]
def readmatch(self, match=b'\r\n'):
"""Read until a byte string is matched
match
A binary string, defaults to CRLF.
This operation will block if the string is not matched unless
the buffer becomes full without a match, in which case IOError
is raised with code ENOBUFS."""
with self.lock:
pos = -1
while pos < 0:
if self.buffer:
# take care of a special case first
pos = self.buffer[0].find(match, self.rpos)
if pos < 0:
# otherwise consolidate the buffer
self._consolidate_buffer()
pos = self.buffer[0].find(match) # rpos is now 0
if pos >= 0:
src = self.buffer[0]
result = src[self.rpos:pos + len(match)]
self.rpos += len(result)
if self.rpos >= len(src):
# discard src
self.buffer = self.buffer[1:]
self.bsize = self.bsize - len(src)
self.rpos = 0
self.rstate += 1
# success, set the reader flag
if self.rflag is not None:
self.rflag.set()
self.lock.notify_all()
return result
else:
if self._eof:
return b''
# not found, should we block?
if self.canwrite():
# no match, but the buffer is not full so
# set the reader flag to indicate that we
# are now waiting to accept data.
if self.rflag is not None:
self.rflag.set()
if self.rblocking:
# we wait for something to happen on the
# Pipe hopefully a write operation!
cstate = self.wstate
logging.debug("Pipe.readmatch waiting for %s",
repr(self))
self.lock.wait(self.timeout)
if self.wstate == cstate:
logging.warning(
"Pipe.readmatch timed out for %s",
repr(self))
raise IOError(
errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.server.Pipe.readmatch")
# go round the loop again
else:
# non-blocking readmatch returns None
return None
else:
# we can't write so no point in waiting
raise IOError(errno.ENOBUFS,
os.strerror(errno.ENOBUFS),
"pyslet.http.server.Pipe.readmatch")
def read(self, nbytes=-1):
"""read data from the pipe
May return fewer than nbytes if the result can be returned
without copying data. Otherwise :meth:`readinto` is used."""
if nbytes < 0:
return self.readall()
else:
with self.lock:
if self.buffer and self.rpos == 0:
# take care of one special case
src = self.buffer[0]
if len(src) <= nbytes:
self.buffer = self.buffer[1:]
self.bsize = self.bsize - len(src)
self.rstate += 1
# successful read
if self.rflag is not None:
self.rflag.set()
self.lock.notify_all()
return src
b = bytearray(nbytes)
nbytes = self.readinto(b)
if nbytes is None:
return None
else:
return bytes(b[:nbytes])
def readinto(self, b):
"""Reads data from the Pipe into a bytearray.
Returns the number of bytes read. 0 indicates EOF, None
indicates an operation that would block in a Pipe that is
non-blocking for read operations. May return fewer bytes than
would fit into the bytearray as it returns as soon as it has at
least some data."""
if self.timeout is not None:
tstart = time.time()
with self.lock:
nbytes = len(b)
# we're now reading
if self.rflag is not None:
self.rflag.set()
while not self.buffer:
if self._eof:
return 0
elif self.rblocking:
if self.timeout is None:
twait = None
else:
twait = (tstart + self.timeout) - time.time()
if twait < 0:
logging.warning("Pipe.read timed out for %s",
repr(self))
raise IOError(errno.ETIMEDOUT,
os.strerror(errno.ETIMEDOUT),
"pyslet.http.server.Pipe.read")
logging.debug("Pipe.read waiting for %s", repr(self))
self.lock.wait(twait)
else:
return None
src = self.buffer[0]
rlen = len(src) - self.rpos
if rlen < nbytes:
nbytes = rlen
if nbytes:
b[:nbytes] = src[self.rpos:self.rpos + nbytes]
self.rpos += nbytes
if self.rpos >= len(src):
# discard src
self.buffer = self.buffer[1:]
self.bsize = self.bsize - len(src)
self.rpos = 0
if nbytes:
self.rstate += 1
self.lock.notify_all()
return nbytes
| UTF-8 | Python | false | false | 28,375 | py | 132 | streams.py | 101 | 0.530044 | 0.52793 | 0 | 750 | 36.833333 | 79 |
rrsilaya/springer_books | 11,854,109,750,593 | 0158b56aace7fda6db770cab22e3486d94f2b7d1 | cf865985f3513142725904ec4297ef189563ce74 | /downloader.py | c0e411ec2dcdf28bee76ae9214606e76eda6eafa | []
| no_license | https://github.com/rrsilaya/springer_books | 95ed77605e43f09fb0e223154888d76c20d77163 | 26d83f3f2696d0d1eaf57171a439bfd33ae0ac21 | refs/heads/master | 2022-05-29T04:34:30.269377 | 2020-04-27T12:05:36 | 2020-04-27T12:05:36 | 259,287,721 | 0 | 0 | null | true | 2020-04-27T11:01:11 | 2020-04-27T11:01:10 | 2020-04-27T08:54:36 | 2020-04-27T03:32:16 | 87 | 0 | 0 | 0 | null | false | false | import csv
import os, sys
from axel import axel
if not os.path.exists('Books'):
os.mkdir('Books')
elif not os.path.isdir('Books'):
print('Error: a file named "Books" cannot be in the execution directory.')
sys.exit(0)
links = 'downloads.csv'
with open(links) as file:
reader = csv.reader(file, delimiter=',')
for idx, row in enumerate(reader):
print('%i: [%s] Downloading %s' % (idx + 1, row[0], row[1]))
os.system('wget %s -O "Books/[%s] %s.pdf"' % (row[2], row[0], row[1]))
| UTF-8 | Python | false | false | 516 | py | 4 | downloader.py | 1 | 0.612403 | 0.598837 | 0 | 18 | 27.666667 | 78 |
GinormousSalmon/SC2020 | 18,760,417,176,001 | 4bae90ea50eb9787d74c35107f87437152af7a7d | 42e332e06748752ef38514ee30f16221533fcca7 | /examples/encoding_test.py | d4e0bcaf6f937886360daec236d8a3726980b34c | []
| no_license | https://github.com/GinormousSalmon/SC2020 | a082baece81dd65bd41b45dd33967ebbcc493aa7 | 6ac3a1807e35dc673654dd97d7bf5a7601d12b7b | refs/heads/master | 2022-10-18T17:34:51.251938 | 2020-06-08T15:45:23 | 2020-06-08T15:45:23 | 259,367,410 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | string = "кок"
enc = str(string.encode('utf-8'))
print(enc)
dec = (enc).decode('utf-8')
print(dec)
| UTF-8 | Python | false | false | 102 | py | 27 | encoding_test.py | 18 | 0.636364 | 0.616162 | 0 | 5 | 18.8 | 33 |
justanhduc/AugmentedCycleGAN | 7,017,976,583,936 | 0f8ec0809d50d8c69f7fb9051ac75b99be3444b6 | 632d82d996d8b0d3dc14317ee6e7b3db6712dc78 | /train.py | 35921d02fed385e3f998ca6e0ca0ae8b90ef2590 | [
"Unlicense"
]
| permissive | https://github.com/justanhduc/AugmentedCycleGAN | 1df2c97d5cb267c7a4e60bcd6ab7bdd9223e3977 | 4a83c2a370842a713ec1d7ebf20cbd71f4039093 | refs/heads/master | 2020-04-11T06:45:11.512754 | 2018-12-13T06:23:25 | 2018-12-13T06:23:25 | 161,590,717 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
parser = argparse.ArgumentParser('Augmented CycleGAN')
parser.add_argument('--test', action='store_true', default=False, help='Train or test')
parser.add_argument('--latent_dim', type=int, default=16, help='Latent dimension')
parser.add_argument('--n_gen_filters', type=int, default=32, help='Number of initial filters in generators')
parser.add_argument('--n_dis_filters', type=int, default=64, help='Number of initial filters in discriminators')
parser.add_argument('--n_enc_filters', type=int, default=32, help='Number of initial filters in encoders')
parser.add_argument('--use_dropout', action='store_true', default=False, help='Whether to use dropout in conditional resblock')
parser.add_argument('--use_sigmoid', action='store_true', default=False, help='Whether to use orginal sigmoid GAN')
parser.add_argument('--use_latent_gan', action='store_true', default=False, help='Whether to use GAN on latent codes')
parser.add_argument('--bs', type=int, default=80, help='Batchsize')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
parser.add_argument('--lambda_A', type=float, default=1., help='Weight for cycle loss of domain A')
parser.add_argument('--lambda_B', type=float, default=1., help='Weight for cycle loss of domain B')
parser.add_argument('--lambda_z_B', type=float, default=.025, help='Weight for cycle loss of latent of B')
parser.add_argument('--max_norm', type=float, default=500., help='Maximum gradient norm')
parser.add_argument('--beta1', type=float, default=.5, help='Momentum coefficient')
parser.add_argument('--n_epochs', type=int, default=25, help='Number of training epochs without lr decay')
parser.add_argument('--n_epochs_decay', type=int, default=25, help='Number of training epochs with lr decay')
parser.add_argument('--print_freq', type=int, default=200, help='Logging frequency')
parser.add_argument('--valid_freq', type=int, default=600, help='Validation frequency')
parser.add_argument('--n_multi', type=int, default=10, help='Number of noise samples to generate multiple images given one image')
parser.add_argument('--n_imgs_to_save', type=int, default=20, help='Number of images to save in each iteration')
parser.add_argument('--gpu', type=int, default=0, help='Which GPU to be used')
parser.add_argument('--param_file_version', type=int, default=-1, help='Weight file version to use to testing')
args = parser.parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
import neuralnet as nn
from theano import tensor as T
import numpy as np
from networks import AugmentedCycleGAN
from data_loader import Edges2Shoes, image_size
testing = args.test
latent_dim = args.latent_dim
n_gen_filters = args.n_gen_filters
n_dis_filters = args.n_dis_filters
n_enc_filters = args.n_enc_filters
use_dropout = args.use_dropout
use_sigmoid = args.use_sigmoid
use_latent_gan = args.use_latent_gan
bs = args.bs
lr = args.lr
lambda_A = args.lambda_A
lambda_B = args.lambda_B
lambda_z_B = args.lambda_z_B
max_norm = args.max_norm
beta1 = args.beta1
n_epochs = args.n_epochs
n_epochs_decay = args.n_epochs_decay
print_freq = args.print_freq
valid_freq = args.valid_freq
n_multi = args.n_multi
n_imgs_to_save = args.n_imgs_to_save
# for testing
param_file_version = args.param_file_version
def unnormalize(x):
return x / 2. + .5
def pre_process(x):
downsample = nn.DownsamplingLayer((None, 3, image_size * 4, image_size * 4), 4)
return downsample(x.dimshuffle(0, 3, 1, 2)) / 255. * 2. - 1.
def train():
X_A_full = T.tensor4('A')
X_B_full = T.tensor4('B')
X_A = pre_process(X_A_full)
X_B = pre_process(X_B_full)
z = nn.utils.srng.normal((bs, latent_dim))
idx = T.scalar('iter')
X_A_ = nn.placeholder((bs, 3, image_size*4, image_size*4), name='A_plhd')
X_B_ = nn.placeholder((bs, 3, image_size*4, image_size*4), name='B_plhd')
lr_ = nn.placeholder(value=lr, name='lr_plhd')
net = AugmentedCycleGAN((None, 3, image_size, image_size), latent_dim, n_gen_filters, n_dis_filters, n_enc_filters, 3,
use_dropout, use_sigmoid, use_latent_gan)
nn.set_training_on()
updates_dis, updates_gen, dis_losses, dis_preds, gen_losses, grad_norms = net.learn(X_A, X_B, z, lambda_A, lambda_B,
lambda_z_B, lr=lr_, beta1=beta1,
max_norm=max_norm)
train_dis = nn.function([], list(dis_losses.values()), updates=updates_dis, givens={X_A_full: X_A_, X_B_full: X_B_},
name='train discriminators')
train_gen = nn.function([], list(gen_losses.values()), updates=updates_gen, givens={X_A_full: X_A_, X_B_full: X_B_},
name='train generators')
discriminate = nn.function([], list(dis_preds.values()), givens={X_A_full: X_A_, X_B_full: X_B_}, name='discriminate')
compute_grad_norms = nn.function([], list(grad_norms.values()), givens={X_A_full: X_A_, X_B_full: X_B_},
name='compute grad norms')
nn.anneal_learning_rate(lr_, idx, 'linear', num_iters=n_epochs_decay)
train_dis_decay = nn.function([idx], list(dis_losses.values()), updates=updates_dis, givens={X_A_full: X_A_, X_B_full: X_B_},
name='train discriminators with decay')
nn.set_training_off()
fixed_z = T.constant(np.random.normal(size=(bs, latent_dim)), dtype='float32')
fixed_multi_z = T.constant(np.repeat(np.random.normal(size=(n_multi, latent_dim)), bs, 0), dtype='float32')
visuals = net.generate_cycle(X_A, X_B, fixed_z)
multi_fake_B = net.generate_multi(X_A, fixed_multi_z)
visualize_single = nn.function([], list(visuals.values()), givens={X_A_full: X_A_, X_B_full: X_B_}, name='visualize single')
visualize_multi = nn.function([], multi_fake_B, givens={X_A_full: X_A_}, name='visualize multi')
train_data = Edges2Shoes((X_A_, X_B_), bs, n_epochs + n_epochs_decay + 1, 'train', True)
val_data = Edges2Shoes((X_A_, X_B_), bs, 1, 'val', False, num_data=bs)
mon = nn.Monitor(model_name='Augmented_CycleGAN', print_freq=print_freq)
print('Training...')
for it in train_data:
epoch = 1 + it // (len(train_data) // bs)
with mon:
res_dis = train_dis() if epoch <= n_epochs else train_dis_decay(epoch - n_epochs)
res_gen = train_gen()
preds = discriminate()
grads_ = compute_grad_norms()
mon.plot('lr', lr_.get_value())
for j, k in enumerate(dis_losses.keys()):
mon.plot(k, res_dis[j])
for j, k in enumerate(gen_losses.keys()):
mon.plot(k, res_gen[j])
for j, k in enumerate(dis_preds.keys()):
mon.hist(k, preds[j])
for j, k in enumerate(grad_norms.keys()):
mon.plot(k, grads_[j])
if it % valid_freq == 0:
for _ in val_data:
vis_single = visualize_single()
vis_multi = visualize_multi()
for j, k in enumerate(visuals.keys()):
mon.imwrite(k, vis_single[j][:n_imgs_to_save], callback=unnormalize)
for j, fake_B in enumerate(vis_multi):
mon.imwrite('fake_B_multi_%d.jpg' % j, fake_B, callback=unnormalize)
mon.dump(nn.utils.shared2numpy(net.netG_A_B.params), 'gen_A_B.npy', 5)
mon.dump(nn.utils.shared2numpy(net.netG_B_A.params), 'gen_B_A.npy', 5)
mon.dump(nn.utils.shared2numpy(net.netD_A.params), 'dis_A.npy', 5)
mon.dump(nn.utils.shared2numpy(net.netD_B.params), 'dis_B.npy', 5)
mon.dump(nn.utils.shared2numpy(net.netE_B.params), 'enc_B.npy', 5)
if use_latent_gan:
mon.dump(nn.utils.shared2numpy(net.netD_z_B.params), 'dis_z_B.npy', 5)
mon.flush()
mon.dump(nn.utils.shared2numpy(net.netG_A_B.params), 'gen_A_B.npy')
mon.dump(nn.utils.shared2numpy(net.netG_B_A.params), 'gen_B_A.npy')
mon.dump(nn.utils.shared2numpy(net.netD_A.params), 'dis_A.npy')
mon.dump(nn.utils.shared2numpy(net.netD_B.params), 'dis_B.npy')
mon.dump(nn.utils.shared2numpy(net.netE_B.params), 'enc_B.npy')
if use_latent_gan:
mon.dump(nn.utils.shared2numpy(net.netD_z_B.params), 'dis_z_B.npy')
print('Training finished!')
if __name__ == '__main__':
if testing:
if args.param_file_version < 0:
raise ValueError('A checkpoint version should be provided.')
from test import test
test()
else:
train()
| UTF-8 | Python | false | false | 8,710 | py | 5 | train.py | 4 | 0.626406 | 0.615155 | 0 | 181 | 47.121547 | 130 |
UmashankarTriforce/CUDAImageManipulation | 12,558,484,396,884 | ab5c19bfd289ffff0ff61ce9ff49d2e8875d101e | 2bb157ac4f3b5e53cdf2a06c9a0b858d5c3ec938 | /gpu/server.py | fb2b28ba48a83e804c7b3b3968ce5d7a21043f27 | []
| no_license | https://github.com/UmashankarTriforce/CUDAImageManipulation | 983a3eb1988a70e6ec062a422078bb84cc9b134a | f281c09fa1845331b6f66c1505d120e819eecc78 | refs/heads/master | 2023-01-09T18:41:05.904684 | 2019-12-08T04:11:21 | 2019-12-08T04:11:21 | 194,909,442 | 0 | 1 | null | false | 2023-01-04T05:04:12 | 2019-07-02T17:45:44 | 2019-12-08T04:11:24 | 2023-01-04T05:04:12 | 8,325 | 0 | 1 | 32 | TypeScript | false | false | from flask import Flask, request, jsonify, Response
from flops import work
import pycuda.driver as cuda
app = Flask(__name__)
def initialize():
cuda.init()
DeviceID = 0
device = cuda.Device(DeviceID)
ctx = device.make_context()
return ctx
def destroy(ctx):
ctx.pop()
@app.route('/bench', methods = ["GET", "POST"])
def gauss():
content = request.json
m = int(content['m'])
k = int(content['k'])
n = int(content['n'])
ctx = initialize()
out = work(m, k, n)
output = {
"Single" : out[0],
"Double" : out[1]
}
destroy(ctx)
return jsonify(output)
app.run('0.0.0.0',debug=True, port = 80) | UTF-8 | Python | false | false | 670 | py | 15 | server.py | 5 | 0.583582 | 0.570149 | 0 | 35 | 18.171429 | 51 |
paepcke/tableauNetworkLayout | 10,694,468,592,374 | d8145ac5a0fd81483ef4a568e95b4904b909c313 | 1e9401f97f00b6b8b1c1532b8dd0b9cfbc1b2938 | /setup.py | ac33a2adfe2ba1577d62ca6a426165aee4624ec7 | []
| no_license | https://github.com/paepcke/tableauNetworkLayout | 94df998b1574345a10505cde92b99f9aa2735a90 | 72623953cc2ece4a6f3e674130946bbaa674f223 | refs/heads/master | 2021-08-17T06:28:05.024486 | 2017-11-20T21:38:26 | 2017-11-20T21:38:26 | 111,446,713 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import multiprocessing
from setuptools import setup, find_packages
setup(
name = "tableauNetworkLayout",
version = "0.01",
packages = find_packages(),
# Dependencies on other packages:
# Couldn't get numpy install to work without
# an out-of-band: sudo apt-get install python-dev
setup_requires = ['nose>=1.1.2'],
install_requires = ['networkx>=2.0',
'numpy>=1.13.3',
'configparser>=3.3.0r2',
'argparse>=1.2.1',
],
tests_require = ['sentinels>=0.0.6', 'nose>=1.0'],
# Unit tests; they are initiated via 'python setup.py test'
#test_suite = 'json_to_relation/test',
test_suite = 'nose.collector',
package_data = {
# If any package contains *.txt or *.rst files, include them:
# '': ['*.txt', '*.rst'],
# And include any *.msg files found in the 'hello' package, too:
# 'hello': ['*.msg'],
},
# metadata for upload to PyPI
author = "Andreas Paepcke",
#author_email = "me@example.com",
description = "Lays out nodes of a small network for visualization in Tableau.",
license = "BSD",
keywords = "network, nodes, layout",
url = "git@github.com/tableauNetworkLayout", # project home page, if any
)
| UTF-8 | Python | false | false | 1,329 | py | 3 | setup.py | 2 | 0.569601 | 0.551543 | 0 | 37 | 34.918919 | 84 |
dwaipayanbiswas2017/photo_uploader | 4,355,096,849,499 | a053af7154558c353297b4f20a33c0b99d8d4e43 | bdfb9ede57cf273d92e4ab5c0d483907a3c2a6a9 | /index.py | 832c4aaf4b660bfd232951c220aa7bb5f083ca21 | []
| no_license | https://github.com/dwaipayanbiswas2017/photo_uploader | 98548a10fdbd3938f87f03e10997ac9fd39d5638 | 5a1eb006fff9551e0354badfb4968d4a5bb805cd | refs/heads/master | 2020-08-10T02:57:56.234829 | 2019-10-10T15:30:00 | 2019-10-10T15:30:00 | 214,239,815 | 1 | 0 | null | true | 2019-10-10T17:05:22 | 2019-10-10T17:05:21 | 2019-10-10T17:03:07 | 2019-10-10T15:32:06 | 8 | 0 | 0 | 0 | null | false | false | from PyQt5 import uic
from PyQt5.QtWidgets import *
import sys
UI_file = "ui.ui"
class PhotoUploader(QMainWindow):
def __init__(self, ui_file):
super(PhotoUploader, self).__init__()
uic.loadUi('ui.ui', self)
self.setFixedSize(self.size())
self.console_data = ""
self.tabs = self.findChild(QTabWidget, "tabWidget")
self.browse_btn = self.findChild(QPushButton, "BrowseButton")
self.select_tab_next_btn = self.findChild(QPushButton, "Select_tab_next_btn")
self.list_view = self.findChild(QTextEdit, "textEdit_3")
self.console = self.findChild(QTextEdit, "console")
self.browse_btn.clicked.connect(self.file_browse_event)
self.select_tab_next_btn.clicked.connect(lambda: self.tab_next_event(1))
self.add_console_data("Photo Uploader By Python", "white", 'c')
self.show()
def file_browse_event(self):
files = QFileDialog.getOpenFileNames()
print(files[0])
file_list = ""
index = 1
for file in files[0]:
f = file.split("/")[-1]
print(f)
file_list += str(index) + " : " + str(f) + "\n"
index += 1
self.list_view.setText(file_list)
def tab_next_event(self,tab_num):
QTabWidget.setCurrentIndex(self.tabs, tab_num)
def add_console_data(self, text, color='white', alignment='l'):
f_alignment = ""
if alignment == 'c' or alignment == 'C':
f_alignment = "align = 'center'"
elif alignment == 'r' or alignment == 'R':
f_alignment = "align = 'right'"
elif alignment == 'j' or alignment == 'J':
f_alignment = "align = 'justify'"
else:
f_alignment = "align = 'left'"
f_color = 'color:' + color
prefix = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.0//EN' 'http://www.w3.org/TR/REC-html40/strict.dtd'>" \
"<html><head><meta name='qrichtext' content='1' /><style type='text/css'>" \
"p, li { white-space: pre-wrap; }</style></head>" \
"<body style='font-size:11pt;font-weight:400; font-style:normal;'bgcolor='#04242F'>"
suffix = "</body></html>"
self.console_data += "<p " + f_alignment + " style='margin-top:12px; margin-bottom:12px; " \
"margin-left:0px; margin-right:0px; -qt-block-indent:0;" \
"text-indent:0px;'><span style='font-weight:600; " + f_color + ";'>"
self.console_data += str(text)
self.console_data += "</span></p>"
self.console.setText(prefix + self.console_data + suffix)
scroll = self.console.verticalScrollBar()
scroll.setValue(scroll.maximum())
if __name__ == '__main__':
app = QApplication(sys.argv)
biometric = PhotoUploader(UI_file)
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 2,927 | py | 2 | index.py | 1 | 0.555518 | 0.542877 | 0 | 79 | 36.050633 | 119 |
smartxworks/cloudtower-python-sdk | 7,971,459,340,868 | 5c506a67366362ac1adc92cecfc6535bf15628b5 | 873551a415af1ed5270e3281910b07beb15afbdf | /cloudtower/models/content_library_vm_template_where_input.py | 5a3a6fce13513df5beb476f22261ada686e9f4d5 | [
"LicenseRef-scancode-unknown",
"ISC"
]
| permissive | https://github.com/smartxworks/cloudtower-python-sdk | 290ffb4677f247a9a1edfde0133015f221860da6 | da8cdca0e869278f83d33207b1fc77c6d785ac15 | refs/heads/master | 2023-08-08T07:49:10.503674 | 2023-08-01T11:38:37 | 2023-08-01T11:39:41 | 438,103,300 | 4 | 0 | ISC | false | 2023-09-01T03:36:45 | 2021-12-14T03:22:32 | 2023-02-02T19:41:28 | 2023-09-01T03:36:44 | 17,186 | 5 | 0 | 0 | Python | false | false | # coding: utf-8
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from cloudtower.configuration import Configuration
class ContentLibraryVmTemplateWhereInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_and': 'list[ContentLibraryVmTemplateWhereInput]',
'architecture': 'Architecture',
'architecture_in': 'list[Architecture]',
'architecture_not': 'Architecture',
'architecture_not_in': 'list[Architecture]',
'cloud_init_supported': 'bool',
'cloud_init_supported_not': 'bool',
'clusters_every': 'ClusterWhereInput',
'clusters_none': 'ClusterWhereInput',
'clusters_some': 'ClusterWhereInput',
'created_at': 'str',
'created_at_gt': 'str',
'created_at_gte': 'str',
'created_at_in': 'list[str]',
'created_at_lt': 'str',
'created_at_lte': 'str',
'created_at_not': 'str',
'created_at_not_in': 'list[str]',
'description': 'str',
'description_contains': 'str',
'description_ends_with': 'str',
'description_gt': 'str',
'description_gte': 'str',
'description_in': 'list[str]',
'description_lt': 'str',
'description_lte': 'str',
'description_not': 'str',
'description_not_contains': 'str',
'description_not_ends_with': 'str',
'description_not_in': 'list[str]',
'description_not_starts_with': 'str',
'description_starts_with': 'str',
'entity_async_status': 'EntityAsyncStatus',
'entity_async_status_in': 'list[EntityAsyncStatus]',
'entity_async_status_not': 'EntityAsyncStatus',
'entity_async_status_not_in': 'list[EntityAsyncStatus]',
'id': 'str',
'id_contains': 'str',
'id_ends_with': 'str',
'id_gt': 'str',
'id_gte': 'str',
'id_in': 'list[str]',
'id_lt': 'str',
'id_lte': 'str',
'id_not': 'str',
'id_not_contains': 'str',
'id_not_ends_with': 'str',
'id_not_in': 'list[str]',
'id_not_starts_with': 'str',
'id_starts_with': 'str',
'labels_every': 'LabelWhereInput',
'labels_none': 'LabelWhereInput',
'labels_some': 'LabelWhereInput',
'memory': 'int',
'memory_gt': 'int',
'memory_gte': 'int',
'memory_in': 'list[int]',
'memory_lt': 'int',
'memory_lte': 'int',
'memory_not': 'int',
'memory_not_in': 'list[int]',
'name': 'str',
'name_contains': 'str',
'name_ends_with': 'str',
'name_gt': 'str',
'name_gte': 'str',
'name_in': 'list[str]',
'name_lt': 'str',
'name_lte': 'str',
'name_not': 'str',
'name_not_contains': 'str',
'name_not_ends_with': 'str',
'name_not_in': 'list[str]',
'name_not_starts_with': 'str',
'name_starts_with': 'str',
'_not': 'list[ContentLibraryVmTemplateWhereInput]',
'_or': 'list[ContentLibraryVmTemplateWhereInput]',
'os': 'str',
'os_contains': 'str',
'os_ends_with': 'str',
'os_gt': 'str',
'os_gte': 'str',
'os_in': 'list[str]',
'os_lt': 'str',
'os_lte': 'str',
'os_not': 'str',
'os_not_contains': 'str',
'os_not_ends_with': 'str',
'os_not_in': 'list[str]',
'os_not_starts_with': 'str',
'os_starts_with': 'str',
'size': 'int',
'size_gt': 'int',
'size_gte': 'int',
'size_in': 'list[int]',
'size_lt': 'int',
'size_lte': 'int',
'size_not': 'int',
'size_not_in': 'list[int]',
'vcpu': 'int',
'vcpu_gt': 'int',
'vcpu_gte': 'int',
'vcpu_in': 'list[int]',
'vcpu_lt': 'int',
'vcpu_lte': 'int',
'vcpu_not': 'int',
'vcpu_not_in': 'list[int]',
'vm_templates_every': 'VmTemplateWhereInput',
'vm_templates_none': 'VmTemplateWhereInput',
'vm_templates_some': 'VmTemplateWhereInput'
}
attribute_map = {
'_and': 'AND',
'architecture': 'architecture',
'architecture_in': 'architecture_in',
'architecture_not': 'architecture_not',
'architecture_not_in': 'architecture_not_in',
'cloud_init_supported': 'cloud_init_supported',
'cloud_init_supported_not': 'cloud_init_supported_not',
'clusters_every': 'clusters_every',
'clusters_none': 'clusters_none',
'clusters_some': 'clusters_some',
'created_at': 'createdAt',
'created_at_gt': 'createdAt_gt',
'created_at_gte': 'createdAt_gte',
'created_at_in': 'createdAt_in',
'created_at_lt': 'createdAt_lt',
'created_at_lte': 'createdAt_lte',
'created_at_not': 'createdAt_not',
'created_at_not_in': 'createdAt_not_in',
'description': 'description',
'description_contains': 'description_contains',
'description_ends_with': 'description_ends_with',
'description_gt': 'description_gt',
'description_gte': 'description_gte',
'description_in': 'description_in',
'description_lt': 'description_lt',
'description_lte': 'description_lte',
'description_not': 'description_not',
'description_not_contains': 'description_not_contains',
'description_not_ends_with': 'description_not_ends_with',
'description_not_in': 'description_not_in',
'description_not_starts_with': 'description_not_starts_with',
'description_starts_with': 'description_starts_with',
'entity_async_status': 'entityAsyncStatus',
'entity_async_status_in': 'entityAsyncStatus_in',
'entity_async_status_not': 'entityAsyncStatus_not',
'entity_async_status_not_in': 'entityAsyncStatus_not_in',
'id': 'id',
'id_contains': 'id_contains',
'id_ends_with': 'id_ends_with',
'id_gt': 'id_gt',
'id_gte': 'id_gte',
'id_in': 'id_in',
'id_lt': 'id_lt',
'id_lte': 'id_lte',
'id_not': 'id_not',
'id_not_contains': 'id_not_contains',
'id_not_ends_with': 'id_not_ends_with',
'id_not_in': 'id_not_in',
'id_not_starts_with': 'id_not_starts_with',
'id_starts_with': 'id_starts_with',
'labels_every': 'labels_every',
'labels_none': 'labels_none',
'labels_some': 'labels_some',
'memory': 'memory',
'memory_gt': 'memory_gt',
'memory_gte': 'memory_gte',
'memory_in': 'memory_in',
'memory_lt': 'memory_lt',
'memory_lte': 'memory_lte',
'memory_not': 'memory_not',
'memory_not_in': 'memory_not_in',
'name': 'name',
'name_contains': 'name_contains',
'name_ends_with': 'name_ends_with',
'name_gt': 'name_gt',
'name_gte': 'name_gte',
'name_in': 'name_in',
'name_lt': 'name_lt',
'name_lte': 'name_lte',
'name_not': 'name_not',
'name_not_contains': 'name_not_contains',
'name_not_ends_with': 'name_not_ends_with',
'name_not_in': 'name_not_in',
'name_not_starts_with': 'name_not_starts_with',
'name_starts_with': 'name_starts_with',
'_not': 'NOT',
'_or': 'OR',
'os': 'os',
'os_contains': 'os_contains',
'os_ends_with': 'os_ends_with',
'os_gt': 'os_gt',
'os_gte': 'os_gte',
'os_in': 'os_in',
'os_lt': 'os_lt',
'os_lte': 'os_lte',
'os_not': 'os_not',
'os_not_contains': 'os_not_contains',
'os_not_ends_with': 'os_not_ends_with',
'os_not_in': 'os_not_in',
'os_not_starts_with': 'os_not_starts_with',
'os_starts_with': 'os_starts_with',
'size': 'size',
'size_gt': 'size_gt',
'size_gte': 'size_gte',
'size_in': 'size_in',
'size_lt': 'size_lt',
'size_lte': 'size_lte',
'size_not': 'size_not',
'size_not_in': 'size_not_in',
'vcpu': 'vcpu',
'vcpu_gt': 'vcpu_gt',
'vcpu_gte': 'vcpu_gte',
'vcpu_in': 'vcpu_in',
'vcpu_lt': 'vcpu_lt',
'vcpu_lte': 'vcpu_lte',
'vcpu_not': 'vcpu_not',
'vcpu_not_in': 'vcpu_not_in',
'vm_templates_every': 'vm_templates_every',
'vm_templates_none': 'vm_templates_none',
'vm_templates_some': 'vm_templates_some'
}
def __init__(self, **kwargs): # noqa: E501
"""ContentLibraryVmTemplateWhereInput - a model defined in OpenAPI""" # noqa: E501
self.local_vars_configuration = kwargs.get("local_vars_configuration", Configuration.get_default_copy())
self.__and = None
self._architecture = None
self._architecture_in = None
self._architecture_not = None
self._architecture_not_in = None
self._cloud_init_supported = None
self._cloud_init_supported_not = None
self._clusters_every = None
self._clusters_none = None
self._clusters_some = None
self._created_at = None
self._created_at_gt = None
self._created_at_gte = None
self._created_at_in = None
self._created_at_lt = None
self._created_at_lte = None
self._created_at_not = None
self._created_at_not_in = None
self._description = None
self._description_contains = None
self._description_ends_with = None
self._description_gt = None
self._description_gte = None
self._description_in = None
self._description_lt = None
self._description_lte = None
self._description_not = None
self._description_not_contains = None
self._description_not_ends_with = None
self._description_not_in = None
self._description_not_starts_with = None
self._description_starts_with = None
self._entity_async_status = None
self._entity_async_status_in = None
self._entity_async_status_not = None
self._entity_async_status_not_in = None
self._id = None
self._id_contains = None
self._id_ends_with = None
self._id_gt = None
self._id_gte = None
self._id_in = None
self._id_lt = None
self._id_lte = None
self._id_not = None
self._id_not_contains = None
self._id_not_ends_with = None
self._id_not_in = None
self._id_not_starts_with = None
self._id_starts_with = None
self._labels_every = None
self._labels_none = None
self._labels_some = None
self._memory = None
self._memory_gt = None
self._memory_gte = None
self._memory_in = None
self._memory_lt = None
self._memory_lte = None
self._memory_not = None
self._memory_not_in = None
self._name = None
self._name_contains = None
self._name_ends_with = None
self._name_gt = None
self._name_gte = None
self._name_in = None
self._name_lt = None
self._name_lte = None
self._name_not = None
self._name_not_contains = None
self._name_not_ends_with = None
self._name_not_in = None
self._name_not_starts_with = None
self._name_starts_with = None
self.__not = None
self.__or = None
self._os = None
self._os_contains = None
self._os_ends_with = None
self._os_gt = None
self._os_gte = None
self._os_in = None
self._os_lt = None
self._os_lte = None
self._os_not = None
self._os_not_contains = None
self._os_not_ends_with = None
self._os_not_in = None
self._os_not_starts_with = None
self._os_starts_with = None
self._size = None
self._size_gt = None
self._size_gte = None
self._size_in = None
self._size_lt = None
self._size_lte = None
self._size_not = None
self._size_not_in = None
self._vcpu = None
self._vcpu_gt = None
self._vcpu_gte = None
self._vcpu_in = None
self._vcpu_lt = None
self._vcpu_lte = None
self._vcpu_not = None
self._vcpu_not_in = None
self._vm_templates_every = None
self._vm_templates_none = None
self._vm_templates_some = None
self.discriminator = None
self._and = kwargs.get("_and", None)
self.architecture = kwargs.get("architecture", None)
self.architecture_in = kwargs.get("architecture_in", None)
self.architecture_not = kwargs.get("architecture_not", None)
self.architecture_not_in = kwargs.get("architecture_not_in", None)
self.cloud_init_supported = kwargs.get("cloud_init_supported", None)
self.cloud_init_supported_not = kwargs.get("cloud_init_supported_not", None)
self.clusters_every = kwargs.get("clusters_every", None)
self.clusters_none = kwargs.get("clusters_none", None)
self.clusters_some = kwargs.get("clusters_some", None)
self.created_at = kwargs.get("created_at", None)
self.created_at_gt = kwargs.get("created_at_gt", None)
self.created_at_gte = kwargs.get("created_at_gte", None)
self.created_at_in = kwargs.get("created_at_in", None)
self.created_at_lt = kwargs.get("created_at_lt", None)
self.created_at_lte = kwargs.get("created_at_lte", None)
self.created_at_not = kwargs.get("created_at_not", None)
self.created_at_not_in = kwargs.get("created_at_not_in", None)
self.description = kwargs.get("description", None)
self.description_contains = kwargs.get("description_contains", None)
self.description_ends_with = kwargs.get("description_ends_with", None)
self.description_gt = kwargs.get("description_gt", None)
self.description_gte = kwargs.get("description_gte", None)
self.description_in = kwargs.get("description_in", None)
self.description_lt = kwargs.get("description_lt", None)
self.description_lte = kwargs.get("description_lte", None)
self.description_not = kwargs.get("description_not", None)
self.description_not_contains = kwargs.get("description_not_contains", None)
self.description_not_ends_with = kwargs.get("description_not_ends_with", None)
self.description_not_in = kwargs.get("description_not_in", None)
self.description_not_starts_with = kwargs.get("description_not_starts_with", None)
self.description_starts_with = kwargs.get("description_starts_with", None)
self.entity_async_status = kwargs.get("entity_async_status", None)
self.entity_async_status_in = kwargs.get("entity_async_status_in", None)
self.entity_async_status_not = kwargs.get("entity_async_status_not", None)
self.entity_async_status_not_in = kwargs.get("entity_async_status_not_in", None)
self.id = kwargs.get("id", None)
self.id_contains = kwargs.get("id_contains", None)
self.id_ends_with = kwargs.get("id_ends_with", None)
self.id_gt = kwargs.get("id_gt", None)
self.id_gte = kwargs.get("id_gte", None)
self.id_in = kwargs.get("id_in", None)
self.id_lt = kwargs.get("id_lt", None)
self.id_lte = kwargs.get("id_lte", None)
self.id_not = kwargs.get("id_not", None)
self.id_not_contains = kwargs.get("id_not_contains", None)
self.id_not_ends_with = kwargs.get("id_not_ends_with", None)
self.id_not_in = kwargs.get("id_not_in", None)
self.id_not_starts_with = kwargs.get("id_not_starts_with", None)
self.id_starts_with = kwargs.get("id_starts_with", None)
self.labels_every = kwargs.get("labels_every", None)
self.labels_none = kwargs.get("labels_none", None)
self.labels_some = kwargs.get("labels_some", None)
self.memory = kwargs.get("memory", None)
self.memory_gt = kwargs.get("memory_gt", None)
self.memory_gte = kwargs.get("memory_gte", None)
self.memory_in = kwargs.get("memory_in", None)
self.memory_lt = kwargs.get("memory_lt", None)
self.memory_lte = kwargs.get("memory_lte", None)
self.memory_not = kwargs.get("memory_not", None)
self.memory_not_in = kwargs.get("memory_not_in", None)
self.name = kwargs.get("name", None)
self.name_contains = kwargs.get("name_contains", None)
self.name_ends_with = kwargs.get("name_ends_with", None)
self.name_gt = kwargs.get("name_gt", None)
self.name_gte = kwargs.get("name_gte", None)
self.name_in = kwargs.get("name_in", None)
self.name_lt = kwargs.get("name_lt", None)
self.name_lte = kwargs.get("name_lte", None)
self.name_not = kwargs.get("name_not", None)
self.name_not_contains = kwargs.get("name_not_contains", None)
self.name_not_ends_with = kwargs.get("name_not_ends_with", None)
self.name_not_in = kwargs.get("name_not_in", None)
self.name_not_starts_with = kwargs.get("name_not_starts_with", None)
self.name_starts_with = kwargs.get("name_starts_with", None)
self._not = kwargs.get("_not", None)
self._or = kwargs.get("_or", None)
self.os = kwargs.get("os", None)
self.os_contains = kwargs.get("os_contains", None)
self.os_ends_with = kwargs.get("os_ends_with", None)
self.os_gt = kwargs.get("os_gt", None)
self.os_gte = kwargs.get("os_gte", None)
self.os_in = kwargs.get("os_in", None)
self.os_lt = kwargs.get("os_lt", None)
self.os_lte = kwargs.get("os_lte", None)
self.os_not = kwargs.get("os_not", None)
self.os_not_contains = kwargs.get("os_not_contains", None)
self.os_not_ends_with = kwargs.get("os_not_ends_with", None)
self.os_not_in = kwargs.get("os_not_in", None)
self.os_not_starts_with = kwargs.get("os_not_starts_with", None)
self.os_starts_with = kwargs.get("os_starts_with", None)
self.size = kwargs.get("size", None)
self.size_gt = kwargs.get("size_gt", None)
self.size_gte = kwargs.get("size_gte", None)
self.size_in = kwargs.get("size_in", None)
self.size_lt = kwargs.get("size_lt", None)
self.size_lte = kwargs.get("size_lte", None)
self.size_not = kwargs.get("size_not", None)
self.size_not_in = kwargs.get("size_not_in", None)
self.vcpu = kwargs.get("vcpu", None)
self.vcpu_gt = kwargs.get("vcpu_gt", None)
self.vcpu_gte = kwargs.get("vcpu_gte", None)
self.vcpu_in = kwargs.get("vcpu_in", None)
self.vcpu_lt = kwargs.get("vcpu_lt", None)
self.vcpu_lte = kwargs.get("vcpu_lte", None)
self.vcpu_not = kwargs.get("vcpu_not", None)
self.vcpu_not_in = kwargs.get("vcpu_not_in", None)
self.vm_templates_every = kwargs.get("vm_templates_every", None)
self.vm_templates_none = kwargs.get("vm_templates_none", None)
self.vm_templates_some = kwargs.get("vm_templates_some", None)
@property
def _and(self):
"""Gets the _and of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The _and of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[ContentLibraryVmTemplateWhereInput]
"""
return self.__and
@_and.setter
def _and(self, _and):
"""Sets the _and of this ContentLibraryVmTemplateWhereInput.
:param _and: The _and of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type _and: list[ContentLibraryVmTemplateWhereInput]
"""
self.__and = _and
@property
def architecture(self):
"""Gets the architecture of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The architecture of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: Architecture
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this ContentLibraryVmTemplateWhereInput.
:param architecture: The architecture of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type architecture: Architecture
"""
self._architecture = architecture
@property
def architecture_in(self):
"""Gets the architecture_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The architecture_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[Architecture]
"""
return self._architecture_in
@architecture_in.setter
def architecture_in(self, architecture_in):
"""Sets the architecture_in of this ContentLibraryVmTemplateWhereInput.
:param architecture_in: The architecture_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type architecture_in: list[Architecture]
"""
self._architecture_in = architecture_in
@property
def architecture_not(self):
"""Gets the architecture_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The architecture_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: Architecture
"""
return self._architecture_not
@architecture_not.setter
def architecture_not(self, architecture_not):
"""Sets the architecture_not of this ContentLibraryVmTemplateWhereInput.
:param architecture_not: The architecture_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type architecture_not: Architecture
"""
self._architecture_not = architecture_not
@property
def architecture_not_in(self):
"""Gets the architecture_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The architecture_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[Architecture]
"""
return self._architecture_not_in
@architecture_not_in.setter
def architecture_not_in(self, architecture_not_in):
"""Sets the architecture_not_in of this ContentLibraryVmTemplateWhereInput.
:param architecture_not_in: The architecture_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type architecture_not_in: list[Architecture]
"""
self._architecture_not_in = architecture_not_in
@property
def cloud_init_supported(self):
"""Gets the cloud_init_supported of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The cloud_init_supported of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: bool
"""
return self._cloud_init_supported
@cloud_init_supported.setter
def cloud_init_supported(self, cloud_init_supported):
"""Sets the cloud_init_supported of this ContentLibraryVmTemplateWhereInput.
:param cloud_init_supported: The cloud_init_supported of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type cloud_init_supported: bool
"""
self._cloud_init_supported = cloud_init_supported
@property
def cloud_init_supported_not(self):
"""Gets the cloud_init_supported_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The cloud_init_supported_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: bool
"""
return self._cloud_init_supported_not
@cloud_init_supported_not.setter
def cloud_init_supported_not(self, cloud_init_supported_not):
"""Sets the cloud_init_supported_not of this ContentLibraryVmTemplateWhereInput.
:param cloud_init_supported_not: The cloud_init_supported_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type cloud_init_supported_not: bool
"""
self._cloud_init_supported_not = cloud_init_supported_not
@property
def clusters_every(self):
"""Gets the clusters_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The clusters_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: ClusterWhereInput
"""
return self._clusters_every
@clusters_every.setter
def clusters_every(self, clusters_every):
"""Sets the clusters_every of this ContentLibraryVmTemplateWhereInput.
:param clusters_every: The clusters_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type clusters_every: ClusterWhereInput
"""
self._clusters_every = clusters_every
@property
def clusters_none(self):
"""Gets the clusters_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The clusters_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: ClusterWhereInput
"""
return self._clusters_none
@clusters_none.setter
def clusters_none(self, clusters_none):
"""Sets the clusters_none of this ContentLibraryVmTemplateWhereInput.
:param clusters_none: The clusters_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type clusters_none: ClusterWhereInput
"""
self._clusters_none = clusters_none
@property
def clusters_some(self):
"""Gets the clusters_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The clusters_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: ClusterWhereInput
"""
return self._clusters_some
@clusters_some.setter
def clusters_some(self, clusters_some):
"""Sets the clusters_some of this ContentLibraryVmTemplateWhereInput.
:param clusters_some: The clusters_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type clusters_some: ClusterWhereInput
"""
self._clusters_some = clusters_some
@property
def created_at(self):
"""Gets the created_at of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ContentLibraryVmTemplateWhereInput.
:param created_at: The created_at of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at: str
"""
self._created_at = created_at
@property
def created_at_gt(self):
"""Gets the created_at_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at_gt
@created_at_gt.setter
def created_at_gt(self, created_at_gt):
"""Sets the created_at_gt of this ContentLibraryVmTemplateWhereInput.
:param created_at_gt: The created_at_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_gt: str
"""
self._created_at_gt = created_at_gt
@property
def created_at_gte(self):
"""Gets the created_at_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at_gte
@created_at_gte.setter
def created_at_gte(self, created_at_gte):
"""Sets the created_at_gte of this ContentLibraryVmTemplateWhereInput.
:param created_at_gte: The created_at_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_gte: str
"""
self._created_at_gte = created_at_gte
@property
def created_at_in(self):
"""Gets the created_at_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._created_at_in
@created_at_in.setter
def created_at_in(self, created_at_in):
"""Sets the created_at_in of this ContentLibraryVmTemplateWhereInput.
:param created_at_in: The created_at_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_in: list[str]
"""
self._created_at_in = created_at_in
@property
def created_at_lt(self):
"""Gets the created_at_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at_lt
@created_at_lt.setter
def created_at_lt(self, created_at_lt):
"""Sets the created_at_lt of this ContentLibraryVmTemplateWhereInput.
:param created_at_lt: The created_at_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_lt: str
"""
self._created_at_lt = created_at_lt
@property
def created_at_lte(self):
"""Gets the created_at_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at_lte
@created_at_lte.setter
def created_at_lte(self, created_at_lte):
"""Sets the created_at_lte of this ContentLibraryVmTemplateWhereInput.
:param created_at_lte: The created_at_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_lte: str
"""
self._created_at_lte = created_at_lte
@property
def created_at_not(self):
"""Gets the created_at_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._created_at_not
@created_at_not.setter
def created_at_not(self, created_at_not):
"""Sets the created_at_not of this ContentLibraryVmTemplateWhereInput.
:param created_at_not: The created_at_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_not: str
"""
self._created_at_not = created_at_not
@property
def created_at_not_in(self):
"""Gets the created_at_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The created_at_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._created_at_not_in
@created_at_not_in.setter
def created_at_not_in(self, created_at_not_in):
"""Sets the created_at_not_in of this ContentLibraryVmTemplateWhereInput.
:param created_at_not_in: The created_at_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type created_at_not_in: list[str]
"""
self._created_at_not_in = created_at_not_in
@property
def description(self):
"""Gets the description of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ContentLibraryVmTemplateWhereInput.
:param description: The description of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description: str
"""
self._description = description
@property
def description_contains(self):
"""Gets the description_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_contains
@description_contains.setter
def description_contains(self, description_contains):
"""Sets the description_contains of this ContentLibraryVmTemplateWhereInput.
:param description_contains: The description_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_contains: str
"""
self._description_contains = description_contains
@property
def description_ends_with(self):
"""Gets the description_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_ends_with
@description_ends_with.setter
def description_ends_with(self, description_ends_with):
"""Sets the description_ends_with of this ContentLibraryVmTemplateWhereInput.
:param description_ends_with: The description_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_ends_with: str
"""
self._description_ends_with = description_ends_with
@property
def description_gt(self):
"""Gets the description_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_gt
@description_gt.setter
def description_gt(self, description_gt):
"""Sets the description_gt of this ContentLibraryVmTemplateWhereInput.
:param description_gt: The description_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_gt: str
"""
self._description_gt = description_gt
@property
def description_gte(self):
"""Gets the description_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_gte
@description_gte.setter
def description_gte(self, description_gte):
"""Sets the description_gte of this ContentLibraryVmTemplateWhereInput.
:param description_gte: The description_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_gte: str
"""
self._description_gte = description_gte
@property
def description_in(self):
"""Gets the description_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._description_in
@description_in.setter
def description_in(self, description_in):
"""Sets the description_in of this ContentLibraryVmTemplateWhereInput.
:param description_in: The description_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_in: list[str]
"""
self._description_in = description_in
@property
def description_lt(self):
"""Gets the description_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_lt
@description_lt.setter
def description_lt(self, description_lt):
"""Sets the description_lt of this ContentLibraryVmTemplateWhereInput.
:param description_lt: The description_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_lt: str
"""
self._description_lt = description_lt
@property
def description_lte(self):
"""Gets the description_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_lte
@description_lte.setter
def description_lte(self, description_lte):
"""Sets the description_lte of this ContentLibraryVmTemplateWhereInput.
:param description_lte: The description_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_lte: str
"""
self._description_lte = description_lte
@property
def description_not(self):
"""Gets the description_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_not
@description_not.setter
def description_not(self, description_not):
"""Sets the description_not of this ContentLibraryVmTemplateWhereInput.
:param description_not: The description_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_not: str
"""
self._description_not = description_not
@property
def description_not_contains(self):
"""Gets the description_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_not_contains
@description_not_contains.setter
def description_not_contains(self, description_not_contains):
"""Sets the description_not_contains of this ContentLibraryVmTemplateWhereInput.
:param description_not_contains: The description_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_not_contains: str
"""
self._description_not_contains = description_not_contains
@property
def description_not_ends_with(self):
"""Gets the description_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_not_ends_with
@description_not_ends_with.setter
def description_not_ends_with(self, description_not_ends_with):
"""Sets the description_not_ends_with of this ContentLibraryVmTemplateWhereInput.
:param description_not_ends_with: The description_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_not_ends_with: str
"""
self._description_not_ends_with = description_not_ends_with
@property
def description_not_in(self):
"""Gets the description_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._description_not_in
@description_not_in.setter
def description_not_in(self, description_not_in):
"""Sets the description_not_in of this ContentLibraryVmTemplateWhereInput.
:param description_not_in: The description_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_not_in: list[str]
"""
self._description_not_in = description_not_in
@property
def description_not_starts_with(self):
"""Gets the description_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_not_starts_with
@description_not_starts_with.setter
def description_not_starts_with(self, description_not_starts_with):
"""Sets the description_not_starts_with of this ContentLibraryVmTemplateWhereInput.
:param description_not_starts_with: The description_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_not_starts_with: str
"""
self._description_not_starts_with = description_not_starts_with
@property
def description_starts_with(self):
"""Gets the description_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The description_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._description_starts_with
@description_starts_with.setter
def description_starts_with(self, description_starts_with):
"""Sets the description_starts_with of this ContentLibraryVmTemplateWhereInput.
:param description_starts_with: The description_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type description_starts_with: str
"""
self._description_starts_with = description_starts_with
@property
def entity_async_status(self):
"""Gets the entity_async_status of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The entity_async_status of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: EntityAsyncStatus
"""
return self._entity_async_status
@entity_async_status.setter
def entity_async_status(self, entity_async_status):
"""Sets the entity_async_status of this ContentLibraryVmTemplateWhereInput.
:param entity_async_status: The entity_async_status of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type entity_async_status: EntityAsyncStatus
"""
self._entity_async_status = entity_async_status
@property
def entity_async_status_in(self):
"""Gets the entity_async_status_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The entity_async_status_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[EntityAsyncStatus]
"""
return self._entity_async_status_in
@entity_async_status_in.setter
def entity_async_status_in(self, entity_async_status_in):
"""Sets the entity_async_status_in of this ContentLibraryVmTemplateWhereInput.
:param entity_async_status_in: The entity_async_status_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type entity_async_status_in: list[EntityAsyncStatus]
"""
self._entity_async_status_in = entity_async_status_in
@property
def entity_async_status_not(self):
"""Gets the entity_async_status_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The entity_async_status_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: EntityAsyncStatus
"""
return self._entity_async_status_not
@entity_async_status_not.setter
def entity_async_status_not(self, entity_async_status_not):
"""Sets the entity_async_status_not of this ContentLibraryVmTemplateWhereInput.
:param entity_async_status_not: The entity_async_status_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type entity_async_status_not: EntityAsyncStatus
"""
self._entity_async_status_not = entity_async_status_not
@property
def entity_async_status_not_in(self):
"""Gets the entity_async_status_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The entity_async_status_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[EntityAsyncStatus]
"""
return self._entity_async_status_not_in
@entity_async_status_not_in.setter
def entity_async_status_not_in(self, entity_async_status_not_in):
"""Sets the entity_async_status_not_in of this ContentLibraryVmTemplateWhereInput.
:param entity_async_status_not_in: The entity_async_status_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type entity_async_status_not_in: list[EntityAsyncStatus]
"""
self._entity_async_status_not_in = entity_async_status_not_in
@property
def id(self):
"""Gets the id of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ContentLibraryVmTemplateWhereInput.
:param id: The id of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id: str
"""
self._id = id
@property
def id_contains(self):
"""Gets the id_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_contains
@id_contains.setter
def id_contains(self, id_contains):
"""Sets the id_contains of this ContentLibraryVmTemplateWhereInput.
:param id_contains: The id_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_contains: str
"""
self._id_contains = id_contains
@property
def id_ends_with(self):
"""Gets the id_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_ends_with
@id_ends_with.setter
def id_ends_with(self, id_ends_with):
"""Sets the id_ends_with of this ContentLibraryVmTemplateWhereInput.
:param id_ends_with: The id_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_ends_with: str
"""
self._id_ends_with = id_ends_with
@property
def id_gt(self):
"""Gets the id_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_gt
@id_gt.setter
def id_gt(self, id_gt):
"""Sets the id_gt of this ContentLibraryVmTemplateWhereInput.
:param id_gt: The id_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_gt: str
"""
self._id_gt = id_gt
@property
def id_gte(self):
"""Gets the id_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_gte
@id_gte.setter
def id_gte(self, id_gte):
"""Sets the id_gte of this ContentLibraryVmTemplateWhereInput.
:param id_gte: The id_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_gte: str
"""
self._id_gte = id_gte
@property
def id_in(self):
"""Gets the id_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._id_in
@id_in.setter
def id_in(self, id_in):
"""Sets the id_in of this ContentLibraryVmTemplateWhereInput.
:param id_in: The id_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_in: list[str]
"""
self._id_in = id_in
@property
def id_lt(self):
"""Gets the id_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_lt
@id_lt.setter
def id_lt(self, id_lt):
"""Sets the id_lt of this ContentLibraryVmTemplateWhereInput.
:param id_lt: The id_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_lt: str
"""
self._id_lt = id_lt
@property
def id_lte(self):
"""Gets the id_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_lte
@id_lte.setter
def id_lte(self, id_lte):
"""Sets the id_lte of this ContentLibraryVmTemplateWhereInput.
:param id_lte: The id_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_lte: str
"""
self._id_lte = id_lte
@property
def id_not(self):
"""Gets the id_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_not
@id_not.setter
def id_not(self, id_not):
"""Sets the id_not of this ContentLibraryVmTemplateWhereInput.
:param id_not: The id_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_not: str
"""
self._id_not = id_not
@property
def id_not_contains(self):
"""Gets the id_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_not_contains
@id_not_contains.setter
def id_not_contains(self, id_not_contains):
"""Sets the id_not_contains of this ContentLibraryVmTemplateWhereInput.
:param id_not_contains: The id_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_not_contains: str
"""
self._id_not_contains = id_not_contains
@property
def id_not_ends_with(self):
"""Gets the id_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_not_ends_with
@id_not_ends_with.setter
def id_not_ends_with(self, id_not_ends_with):
"""Sets the id_not_ends_with of this ContentLibraryVmTemplateWhereInput.
:param id_not_ends_with: The id_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_not_ends_with: str
"""
self._id_not_ends_with = id_not_ends_with
@property
def id_not_in(self):
"""Gets the id_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._id_not_in
@id_not_in.setter
def id_not_in(self, id_not_in):
"""Sets the id_not_in of this ContentLibraryVmTemplateWhereInput.
:param id_not_in: The id_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_not_in: list[str]
"""
self._id_not_in = id_not_in
@property
def id_not_starts_with(self):
"""Gets the id_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_not_starts_with
@id_not_starts_with.setter
def id_not_starts_with(self, id_not_starts_with):
"""Sets the id_not_starts_with of this ContentLibraryVmTemplateWhereInput.
:param id_not_starts_with: The id_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_not_starts_with: str
"""
self._id_not_starts_with = id_not_starts_with
@property
def id_starts_with(self):
"""Gets the id_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The id_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._id_starts_with
@id_starts_with.setter
def id_starts_with(self, id_starts_with):
"""Sets the id_starts_with of this ContentLibraryVmTemplateWhereInput.
:param id_starts_with: The id_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type id_starts_with: str
"""
self._id_starts_with = id_starts_with
@property
def labels_every(self):
"""Gets the labels_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The labels_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: LabelWhereInput
"""
return self._labels_every
@labels_every.setter
def labels_every(self, labels_every):
"""Sets the labels_every of this ContentLibraryVmTemplateWhereInput.
:param labels_every: The labels_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type labels_every: LabelWhereInput
"""
self._labels_every = labels_every
@property
def labels_none(self):
"""Gets the labels_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The labels_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: LabelWhereInput
"""
return self._labels_none
@labels_none.setter
def labels_none(self, labels_none):
"""Sets the labels_none of this ContentLibraryVmTemplateWhereInput.
:param labels_none: The labels_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type labels_none: LabelWhereInput
"""
self._labels_none = labels_none
@property
def labels_some(self):
"""Gets the labels_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The labels_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: LabelWhereInput
"""
return self._labels_some
@labels_some.setter
def labels_some(self, labels_some):
"""Sets the labels_some of this ContentLibraryVmTemplateWhereInput.
:param labels_some: The labels_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type labels_some: LabelWhereInput
"""
self._labels_some = labels_some
@property
def memory(self):
"""Gets the memory of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory
@memory.setter
def memory(self, memory):
"""Sets the memory of this ContentLibraryVmTemplateWhereInput.
:param memory: The memory of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory: int
"""
self._memory = memory
@property
def memory_gt(self):
"""Gets the memory_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory_gt
@memory_gt.setter
def memory_gt(self, memory_gt):
"""Sets the memory_gt of this ContentLibraryVmTemplateWhereInput.
:param memory_gt: The memory_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_gt: int
"""
self._memory_gt = memory_gt
@property
def memory_gte(self):
"""Gets the memory_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory_gte
@memory_gte.setter
def memory_gte(self, memory_gte):
"""Sets the memory_gte of this ContentLibraryVmTemplateWhereInput.
:param memory_gte: The memory_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_gte: int
"""
self._memory_gte = memory_gte
@property
def memory_in(self):
"""Gets the memory_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._memory_in
@memory_in.setter
def memory_in(self, memory_in):
"""Sets the memory_in of this ContentLibraryVmTemplateWhereInput.
:param memory_in: The memory_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_in: list[int]
"""
self._memory_in = memory_in
@property
def memory_lt(self):
"""Gets the memory_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory_lt
@memory_lt.setter
def memory_lt(self, memory_lt):
"""Sets the memory_lt of this ContentLibraryVmTemplateWhereInput.
:param memory_lt: The memory_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_lt: int
"""
self._memory_lt = memory_lt
@property
def memory_lte(self):
"""Gets the memory_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory_lte
@memory_lte.setter
def memory_lte(self, memory_lte):
"""Sets the memory_lte of this ContentLibraryVmTemplateWhereInput.
:param memory_lte: The memory_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_lte: int
"""
self._memory_lte = memory_lte
@property
def memory_not(self):
"""Gets the memory_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._memory_not
@memory_not.setter
def memory_not(self, memory_not):
"""Sets the memory_not of this ContentLibraryVmTemplateWhereInput.
:param memory_not: The memory_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_not: int
"""
self._memory_not = memory_not
@property
def memory_not_in(self):
"""Gets the memory_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The memory_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._memory_not_in
@memory_not_in.setter
def memory_not_in(self, memory_not_in):
"""Sets the memory_not_in of this ContentLibraryVmTemplateWhereInput.
:param memory_not_in: The memory_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type memory_not_in: list[int]
"""
self._memory_not_in = memory_not_in
@property
def name(self):
"""Gets the name of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ContentLibraryVmTemplateWhereInput.
:param name: The name of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name: str
"""
self._name = name
@property
def name_contains(self):
"""Gets the name_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_contains
@name_contains.setter
def name_contains(self, name_contains):
"""Sets the name_contains of this ContentLibraryVmTemplateWhereInput.
:param name_contains: The name_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_contains: str
"""
self._name_contains = name_contains
@property
def name_ends_with(self):
"""Gets the name_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_ends_with
@name_ends_with.setter
def name_ends_with(self, name_ends_with):
"""Sets the name_ends_with of this ContentLibraryVmTemplateWhereInput.
:param name_ends_with: The name_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_ends_with: str
"""
self._name_ends_with = name_ends_with
@property
def name_gt(self):
"""Gets the name_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_gt
@name_gt.setter
def name_gt(self, name_gt):
"""Sets the name_gt of this ContentLibraryVmTemplateWhereInput.
:param name_gt: The name_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_gt: str
"""
self._name_gt = name_gt
@property
def name_gte(self):
"""Gets the name_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_gte
@name_gte.setter
def name_gte(self, name_gte):
"""Sets the name_gte of this ContentLibraryVmTemplateWhereInput.
:param name_gte: The name_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_gte: str
"""
self._name_gte = name_gte
@property
def name_in(self):
"""Gets the name_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._name_in
@name_in.setter
def name_in(self, name_in):
"""Sets the name_in of this ContentLibraryVmTemplateWhereInput.
:param name_in: The name_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_in: list[str]
"""
self._name_in = name_in
@property
def name_lt(self):
"""Gets the name_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_lt
@name_lt.setter
def name_lt(self, name_lt):
"""Sets the name_lt of this ContentLibraryVmTemplateWhereInput.
:param name_lt: The name_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_lt: str
"""
self._name_lt = name_lt
@property
def name_lte(self):
"""Gets the name_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_lte
@name_lte.setter
def name_lte(self, name_lte):
"""Sets the name_lte of this ContentLibraryVmTemplateWhereInput.
:param name_lte: The name_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_lte: str
"""
self._name_lte = name_lte
@property
def name_not(self):
"""Gets the name_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_not
@name_not.setter
def name_not(self, name_not):
"""Sets the name_not of this ContentLibraryVmTemplateWhereInput.
:param name_not: The name_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_not: str
"""
self._name_not = name_not
@property
def name_not_contains(self):
"""Gets the name_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_not_contains
@name_not_contains.setter
def name_not_contains(self, name_not_contains):
"""Sets the name_not_contains of this ContentLibraryVmTemplateWhereInput.
:param name_not_contains: The name_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_not_contains: str
"""
self._name_not_contains = name_not_contains
@property
def name_not_ends_with(self):
"""Gets the name_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_not_ends_with
@name_not_ends_with.setter
def name_not_ends_with(self, name_not_ends_with):
"""Sets the name_not_ends_with of this ContentLibraryVmTemplateWhereInput.
:param name_not_ends_with: The name_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_not_ends_with: str
"""
self._name_not_ends_with = name_not_ends_with
@property
def name_not_in(self):
"""Gets the name_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._name_not_in
@name_not_in.setter
def name_not_in(self, name_not_in):
"""Sets the name_not_in of this ContentLibraryVmTemplateWhereInput.
:param name_not_in: The name_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_not_in: list[str]
"""
self._name_not_in = name_not_in
@property
def name_not_starts_with(self):
"""Gets the name_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_not_starts_with
@name_not_starts_with.setter
def name_not_starts_with(self, name_not_starts_with):
"""Sets the name_not_starts_with of this ContentLibraryVmTemplateWhereInput.
:param name_not_starts_with: The name_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_not_starts_with: str
"""
self._name_not_starts_with = name_not_starts_with
@property
def name_starts_with(self):
"""Gets the name_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The name_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._name_starts_with
@name_starts_with.setter
def name_starts_with(self, name_starts_with):
"""Sets the name_starts_with of this ContentLibraryVmTemplateWhereInput.
:param name_starts_with: The name_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type name_starts_with: str
"""
self._name_starts_with = name_starts_with
@property
def _not(self):
"""Gets the _not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The _not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[ContentLibraryVmTemplateWhereInput]
"""
return self.__not
@_not.setter
def _not(self, _not):
"""Sets the _not of this ContentLibraryVmTemplateWhereInput.
:param _not: The _not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type _not: list[ContentLibraryVmTemplateWhereInput]
"""
self.__not = _not
@property
def _or(self):
"""Gets the _or of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The _or of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[ContentLibraryVmTemplateWhereInput]
"""
return self.__or
@_or.setter
def _or(self, _or):
"""Sets the _or of this ContentLibraryVmTemplateWhereInput.
:param _or: The _or of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type _or: list[ContentLibraryVmTemplateWhereInput]
"""
self.__or = _or
@property
def os(self):
"""Gets the os of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this ContentLibraryVmTemplateWhereInput.
:param os: The os of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os: str
"""
self._os = os
@property
def os_contains(self):
"""Gets the os_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_contains
@os_contains.setter
def os_contains(self, os_contains):
"""Sets the os_contains of this ContentLibraryVmTemplateWhereInput.
:param os_contains: The os_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_contains: str
"""
self._os_contains = os_contains
@property
def os_ends_with(self):
"""Gets the os_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_ends_with
@os_ends_with.setter
def os_ends_with(self, os_ends_with):
"""Sets the os_ends_with of this ContentLibraryVmTemplateWhereInput.
:param os_ends_with: The os_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_ends_with: str
"""
self._os_ends_with = os_ends_with
@property
def os_gt(self):
"""Gets the os_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_gt
@os_gt.setter
def os_gt(self, os_gt):
"""Sets the os_gt of this ContentLibraryVmTemplateWhereInput.
:param os_gt: The os_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_gt: str
"""
self._os_gt = os_gt
@property
def os_gte(self):
"""Gets the os_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_gte
@os_gte.setter
def os_gte(self, os_gte):
"""Sets the os_gte of this ContentLibraryVmTemplateWhereInput.
:param os_gte: The os_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_gte: str
"""
self._os_gte = os_gte
@property
def os_in(self):
"""Gets the os_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._os_in
@os_in.setter
def os_in(self, os_in):
"""Sets the os_in of this ContentLibraryVmTemplateWhereInput.
:param os_in: The os_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_in: list[str]
"""
self._os_in = os_in
@property
def os_lt(self):
"""Gets the os_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_lt
@os_lt.setter
def os_lt(self, os_lt):
"""Sets the os_lt of this ContentLibraryVmTemplateWhereInput.
:param os_lt: The os_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_lt: str
"""
self._os_lt = os_lt
@property
def os_lte(self):
"""Gets the os_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_lte
@os_lte.setter
def os_lte(self, os_lte):
"""Sets the os_lte of this ContentLibraryVmTemplateWhereInput.
:param os_lte: The os_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_lte: str
"""
self._os_lte = os_lte
@property
def os_not(self):
"""Gets the os_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_not
@os_not.setter
def os_not(self, os_not):
"""Sets the os_not of this ContentLibraryVmTemplateWhereInput.
:param os_not: The os_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_not: str
"""
self._os_not = os_not
@property
def os_not_contains(self):
"""Gets the os_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_not_contains
@os_not_contains.setter
def os_not_contains(self, os_not_contains):
"""Sets the os_not_contains of this ContentLibraryVmTemplateWhereInput.
:param os_not_contains: The os_not_contains of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_not_contains: str
"""
self._os_not_contains = os_not_contains
@property
def os_not_ends_with(self):
"""Gets the os_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_not_ends_with
@os_not_ends_with.setter
def os_not_ends_with(self, os_not_ends_with):
"""Sets the os_not_ends_with of this ContentLibraryVmTemplateWhereInput.
:param os_not_ends_with: The os_not_ends_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_not_ends_with: str
"""
self._os_not_ends_with = os_not_ends_with
@property
def os_not_in(self):
"""Gets the os_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[str]
"""
return self._os_not_in
@os_not_in.setter
def os_not_in(self, os_not_in):
"""Sets the os_not_in of this ContentLibraryVmTemplateWhereInput.
:param os_not_in: The os_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_not_in: list[str]
"""
self._os_not_in = os_not_in
@property
def os_not_starts_with(self):
"""Gets the os_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_not_starts_with
@os_not_starts_with.setter
def os_not_starts_with(self, os_not_starts_with):
"""Sets the os_not_starts_with of this ContentLibraryVmTemplateWhereInput.
:param os_not_starts_with: The os_not_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_not_starts_with: str
"""
self._os_not_starts_with = os_not_starts_with
@property
def os_starts_with(self):
"""Gets the os_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The os_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: str
"""
return self._os_starts_with
@os_starts_with.setter
def os_starts_with(self, os_starts_with):
"""Sets the os_starts_with of this ContentLibraryVmTemplateWhereInput.
:param os_starts_with: The os_starts_with of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type os_starts_with: str
"""
self._os_starts_with = os_starts_with
@property
def size(self):
"""Gets the size of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ContentLibraryVmTemplateWhereInput.
:param size: The size of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size: int
"""
self._size = size
@property
def size_gt(self):
"""Gets the size_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size_gt
@size_gt.setter
def size_gt(self, size_gt):
"""Sets the size_gt of this ContentLibraryVmTemplateWhereInput.
:param size_gt: The size_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_gt: int
"""
self._size_gt = size_gt
@property
def size_gte(self):
"""Gets the size_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size_gte
@size_gte.setter
def size_gte(self, size_gte):
"""Sets the size_gte of this ContentLibraryVmTemplateWhereInput.
:param size_gte: The size_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_gte: int
"""
self._size_gte = size_gte
@property
def size_in(self):
"""Gets the size_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._size_in
@size_in.setter
def size_in(self, size_in):
"""Sets the size_in of this ContentLibraryVmTemplateWhereInput.
:param size_in: The size_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_in: list[int]
"""
self._size_in = size_in
@property
def size_lt(self):
"""Gets the size_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size_lt
@size_lt.setter
def size_lt(self, size_lt):
"""Sets the size_lt of this ContentLibraryVmTemplateWhereInput.
:param size_lt: The size_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_lt: int
"""
self._size_lt = size_lt
@property
def size_lte(self):
"""Gets the size_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size_lte
@size_lte.setter
def size_lte(self, size_lte):
"""Sets the size_lte of this ContentLibraryVmTemplateWhereInput.
:param size_lte: The size_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_lte: int
"""
self._size_lte = size_lte
@property
def size_not(self):
"""Gets the size_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._size_not
@size_not.setter
def size_not(self, size_not):
"""Sets the size_not of this ContentLibraryVmTemplateWhereInput.
:param size_not: The size_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_not: int
"""
self._size_not = size_not
@property
def size_not_in(self):
"""Gets the size_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The size_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._size_not_in
@size_not_in.setter
def size_not_in(self, size_not_in):
"""Sets the size_not_in of this ContentLibraryVmTemplateWhereInput.
:param size_not_in: The size_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type size_not_in: list[int]
"""
self._size_not_in = size_not_in
@property
def vcpu(self):
"""Gets the vcpu of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu
@vcpu.setter
def vcpu(self, vcpu):
"""Sets the vcpu of this ContentLibraryVmTemplateWhereInput.
:param vcpu: The vcpu of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu: int
"""
self._vcpu = vcpu
@property
def vcpu_gt(self):
"""Gets the vcpu_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu_gt
@vcpu_gt.setter
def vcpu_gt(self, vcpu_gt):
"""Sets the vcpu_gt of this ContentLibraryVmTemplateWhereInput.
:param vcpu_gt: The vcpu_gt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_gt: int
"""
self._vcpu_gt = vcpu_gt
@property
def vcpu_gte(self):
"""Gets the vcpu_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu_gte
@vcpu_gte.setter
def vcpu_gte(self, vcpu_gte):
"""Sets the vcpu_gte of this ContentLibraryVmTemplateWhereInput.
:param vcpu_gte: The vcpu_gte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_gte: int
"""
self._vcpu_gte = vcpu_gte
@property
def vcpu_in(self):
"""Gets the vcpu_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._vcpu_in
@vcpu_in.setter
def vcpu_in(self, vcpu_in):
"""Sets the vcpu_in of this ContentLibraryVmTemplateWhereInput.
:param vcpu_in: The vcpu_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_in: list[int]
"""
self._vcpu_in = vcpu_in
@property
def vcpu_lt(self):
"""Gets the vcpu_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu_lt
@vcpu_lt.setter
def vcpu_lt(self, vcpu_lt):
"""Sets the vcpu_lt of this ContentLibraryVmTemplateWhereInput.
:param vcpu_lt: The vcpu_lt of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_lt: int
"""
self._vcpu_lt = vcpu_lt
@property
def vcpu_lte(self):
"""Gets the vcpu_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu_lte
@vcpu_lte.setter
def vcpu_lte(self, vcpu_lte):
"""Sets the vcpu_lte of this ContentLibraryVmTemplateWhereInput.
:param vcpu_lte: The vcpu_lte of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_lte: int
"""
self._vcpu_lte = vcpu_lte
@property
def vcpu_not(self):
"""Gets the vcpu_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: int
"""
return self._vcpu_not
@vcpu_not.setter
def vcpu_not(self, vcpu_not):
"""Sets the vcpu_not of this ContentLibraryVmTemplateWhereInput.
:param vcpu_not: The vcpu_not of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_not: int
"""
self._vcpu_not = vcpu_not
@property
def vcpu_not_in(self):
"""Gets the vcpu_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vcpu_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: list[int]
"""
return self._vcpu_not_in
@vcpu_not_in.setter
def vcpu_not_in(self, vcpu_not_in):
"""Sets the vcpu_not_in of this ContentLibraryVmTemplateWhereInput.
:param vcpu_not_in: The vcpu_not_in of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vcpu_not_in: list[int]
"""
self._vcpu_not_in = vcpu_not_in
@property
def vm_templates_every(self):
"""Gets the vm_templates_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vm_templates_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: VmTemplateWhereInput
"""
return self._vm_templates_every
@vm_templates_every.setter
def vm_templates_every(self, vm_templates_every):
"""Sets the vm_templates_every of this ContentLibraryVmTemplateWhereInput.
:param vm_templates_every: The vm_templates_every of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vm_templates_every: VmTemplateWhereInput
"""
self._vm_templates_every = vm_templates_every
@property
def vm_templates_none(self):
"""Gets the vm_templates_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vm_templates_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: VmTemplateWhereInput
"""
return self._vm_templates_none
@vm_templates_none.setter
def vm_templates_none(self, vm_templates_none):
"""Sets the vm_templates_none of this ContentLibraryVmTemplateWhereInput.
:param vm_templates_none: The vm_templates_none of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vm_templates_none: VmTemplateWhereInput
"""
self._vm_templates_none = vm_templates_none
@property
def vm_templates_some(self):
"""Gets the vm_templates_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:return: The vm_templates_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:rtype: VmTemplateWhereInput
"""
return self._vm_templates_some
@vm_templates_some.setter
def vm_templates_some(self, vm_templates_some):
"""Sets the vm_templates_some of this ContentLibraryVmTemplateWhereInput.
:param vm_templates_some: The vm_templates_some of this ContentLibraryVmTemplateWhereInput. # noqa: E501
:type vm_templates_some: VmTemplateWhereInput
"""
self._vm_templates_some = vm_templates_some
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContentLibraryVmTemplateWhereInput):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ContentLibraryVmTemplateWhereInput):
return True
return self.to_dict() != other.to_dict()
| UTF-8 | Python | false | false | 91,732 | py | 367 | content_library_vm_template_where_input.py | 361 | 0.628428 | 0.617494 | 0 | 2,842 | 31.27727 | 133 |
racharyy/BERT_humor | 7,954,279,432,193 | 0021216bd49b05b406fd7312df13e136bc3ec09e | e7b02652f394f12ec166d89e98d0ed520761d067 | /humor_bert_embedding_property.py | e1b0860542c538301d708e3c284a1b23fd78503d | []
| no_license | https://github.com/racharyy/BERT_humor | a95c732b0c64210bb2c4ed7052533dd491480fda | 6f135866fe29b635a9aaf05fd0c08cfb9554d073 | refs/heads/master | 2022-11-09T13:27:00.409531 | 2019-10-30T20:51:41 | 2019-10-30T20:51:41 | 199,521,323 | 0 | 0 | null | false | 2022-10-25T14:02:42 | 2019-07-29T20:18:36 | 2019-10-30T20:51:53 | 2019-10-30T20:51:50 | 1,143 | 0 | 1 | 1 | Python | false | false | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 16:14:01 2019
@author: mhasan8
"""
import pickle as pkl
dev_data=pkl.load(open("humor_bert_embeddings_dev.pkl","rb"))
train_data=pkl.load(open("humor_bert_embeddings_train.pkl","rb"))
test_data=pkl.load(open("humor_bert_embeddings_test.pkl","rb"))
humor_cls_embeddings=[]
for humor_inst in train_data:
inst={'cls_embedding':humor_inst['cls_embedding'],'label':humor_inst['label']}
humor_cls_embeddings.append(inst)
for humor_inst in dev_data:
inst={'cls_embedding':humor_inst['cls_embedding'],'label':humor_inst['label']}
humor_cls_embeddings.append(inst)
for humor_inst in test_data:
inst={'cls_embedding':humor_inst['cls_embedding'],'label':humor_inst['label']}
humor_cls_embeddings.append(inst)
pkl.dump(humor_cls_embeddings,open("humor_bert_cls_embeddings.pkl","wb")) | UTF-8 | Python | false | false | 896 | py | 21 | humor_bert_embedding_property.py | 20 | 0.686384 | 0.670759 | 0 | 37 | 23.243243 | 82 |
nharringtonwasatch/WasatchUSB | 9,096,740,751,244 | 65ed0f357ac8fc1ba92499ff75359ecd74946487 | 50469576929509d5845aaa0177c2d563e91b1f60 | /scripts/stroker.py | 34fc7789adc89901110640d67523eb13f5345aca | []
| no_license | https://github.com/nharringtonwasatch/WasatchUSB | 00294a34ae21812851bf4535f7faa319794f9fbb | 2576ded56704736c8f0d6d6de3613840fce3662f | refs/heads/master | 2020-04-12T08:01:45.651896 | 2017-09-22T21:43:07 | 2017-09-22T21:43:07 | 40,848,538 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
""" Bare bones script to connect to a Wasatch Photonics device that
supports the stroker series protocol. Will print version information of
any device connected. communication for devices from Wasatch Photonics.
Stroker in this case is an homage to automotive performance:
https://en.wikipedia.org/wiki/Stroker_kit
"""
import sys
import logging
log = logging.getLogger()
strm = logging.StreamHandler(sys.stderr)
log.addHandler(strm)
log.setLevel(logging.WARN)
graph_available = True
try:
from diagram import DGWrapper, DOption
except ImportError as exc:
graph_available = False
log.warn("No diagram module - lineplots disabled.")
log.warn("See: https://github.com/WasatchPhotonics/diagram")
log.warn("Exception: %s", exc)
from wasatchusb import stroker_protocol
def print_device():
""" Print the default set of data from the device. To diagnose these
individually, see the wasatchusb/test/test_stroker_fx2_protocol.py
file.
"""
dev_list = stroker_protocol.ListDevices()
result = dev_list.get_all()
if result == []:
print "No devices found!"
sys.exit(1)
dev_count = 0
for item in dev_list.get_all():
print "Device: %s VID: %s PID: %s" \
% (dev_count, item[0], item[1])
dev_count += 1
last_device = dev_list.get_all()[-1]
last_pid = int(last_device[1], 16)
device = stroker_protocol.StrokerProtocolDevice(pid=last_pid)
device.connect()
print "Serial: %s" % device.get_serial_number()
print "SWCode: %s" % device.get_standard_software_code()
print "FPGARev: %s" % device.get_fpga_revision()
print "Gain: %s" % device.get_ccd_gain()
print "Int Time: %s" % device.get_integration_time()
return device
def print_data(device):
data = device.get_line()
avg_data = sum(data) / len(data)
print ""
points = []
values = []
subsample_size = len(data) / 80
for item in data[::subsample_size]:
points.append(float(item))
values.append(None)
if graph_available:
gram = DGWrapper(data=[points, values])
gram.show()
else:
print "Min: %s Max: %s Avg: %s" \
% (min(data), max(data), avg_data)
if __name__ == "__main__":
device = print_device()
print_data(device)
| UTF-8 | Python | false | false | 2,365 | py | 20 | stroker.py | 19 | 0.635941 | 0.630444 | 0 | 83 | 27.493976 | 72 |
karlicoss/orger | 13,383,118,118,031 | 039d70ee1e3ab039eb43d8dc77c68af6c2ed6ef7 | 3dc867d0c91c69d3cfdad7ffc94976820056e48f | /modules/reddit_all.py | 3e6abf265f412fcf562eed6e70cd21eed61d3b9a | [
"MIT"
]
| permissive | https://github.com/karlicoss/orger | 12d698a88a7c1b8354106a296ccfeb94a8b9e989 | 9c4fb4b252aa7abd09bfed6c580781ebe4ceef70 | refs/heads/master | 2022-11-11T04:30:11.130043 | 2022-10-19T20:38:12 | 2022-10-19T20:44:12 | 186,183,159 | 287 | 16 | MIT | false | 2022-10-19T20:44:13 | 2019-05-11T21:30:23 | 2022-10-09T15:08:48 | 2022-10-19T20:44:12 | 182 | 256 | 11 | 7 | Python | false | false | #!/usr/bin/env python3
"""
Read-only reddit mirror of comments, submissions and upvoted posts; everything except saved
"""
from orger import Mirror
from orger.inorganic import node, link, Quoted
from orger.common import dt_heading
from my.reddit import upvoted, submissions, comments
class RedditAllView(Mirror):
def get_items(self) -> Mirror.Results:
yield node(
'Submissions',
children=[node( # TODO can also be iterable?
dt_heading(s.created, link(title=s.title, url=s.url)),
body=Quoted(s.text),
) for s in submissions()]
)
yield node(
'Comments', # todo parent thread??
children=[node(
dt_heading(c.created, link(title=c.url, url=c.url)),
body=Quoted(c.text),
) for c in comments()],
)
yield node(
'Upvoted',
children=[node(
dt_heading(u.created, link(title=u.title, url=u.url)),
body=Quoted(u.text),
) for u in upvoted()]
)
if __name__ == '__main__':
RedditAllView.main()
# todo not sure if for reddit worth converting bodies from md to org? I guess quoting is ok for now?
| UTF-8 | Python | false | false | 1,247 | py | 43 | reddit_all.py | 39 | 0.57097 | 0.570168 | 0 | 41 | 29.414634 | 100 |
samwar/python-class | 17,334,488,033,203 | d07878093fdda9b497de33513a0ddf8cd8a61302 | f1e9e47ca30d63f7b787b38af64aaeadede63221 | /lab03.py | 336d4d894f864a893b8cb30cff6ab9015879e2f0 | []
| no_license | https://github.com/samwar/python-class | 0e967c67d17d897fbaf33faab852f0b8d0d8e3b6 | 8696b84d8e3070d3d38bcce0586d40a03492baf0 | refs/heads/master | 2021-01-15T23:40:00.948787 | 2014-04-10T22:04:23 | 2014-04-10T22:04:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'samu6978'
x = 4
print x
print int(5)
y = [1,2,3]
print y.count(3)
y.index(1)
foo = {1:'foo', 2:'bar', 3:'baz'}
for x in foo.iteritems():
print x
print y
y.extend([4,5])
print y
y.append([4,5])
print y
z = "foo"
# z.ap("bar")
# z.ex
z += "bar"
print z | UTF-8 | Python | false | false | 271 | py | 24 | lab03.py | 24 | 0.560886 | 0.494465 | 0 | 23 | 10.826087 | 33 |
DRAR444/poei-python | 7,533,372,637,454 | 34b164d219120454e2c48243ab00279c7059b4a5 | 8854a2851cdd0666965d324a6123c0f4a7b08050 | /voiture.py | eec02f1134c9efeef3bc62ad6d408d0a11060585 | []
| no_license | https://github.com/DRAR444/poei-python | ce72b46f1a258a1f34743cb28336bcea4af55fcc | 63c55314e3d8a2e7233da00f5aefebfa9ab93138 | refs/heads/master | 2020-03-19T08:07:14.445816 | 2018-06-16T08:44:03 | 2018-06-16T08:44:03 | 136,177,167 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
class Voiture:
def vitesseMax(self):
return "500km/h"
ma_voiture = Voiture()
print(ma_voiture.vitesseMax())
# def __init__(self):
#self.nom = "Ferrari"
#self.origine = "Italienne"
#elf.couleur = "Rouge"
#ma_voiture = Voiture()
#print("Ma voiture est : " + ma_voiture.nom + " " +"elle est " + ma_voiture.origine + " "+ "et elle est de couleur " +ma_voiture.couleur) | UTF-8 | Python | false | false | 436 | py | 8 | voiture.py | 8 | 0.614679 | 0.605505 | 0 | 17 | 24.705882 | 138 |
Hackman238/ZenPacks.ShaneScott.ipSLA | 10,892,037,081,896 | e8ce4866991ac533221f2f32ab1e75224bcad435 | a5b6b8c079e5dd8480db9685295bae5b7e07fcb0 | /ZenPacks/ShaneScott/ipSLA/routers.py | dd4e92a3b1bfd7e1424fb79ded0a66bd6151dd65 | []
| no_license | https://github.com/Hackman238/ZenPacks.ShaneScott.ipSLA | 8b654a083726455430734a5dc171ff749f34d245 | ec3185dc9ea4ea2f4233b5e95dfb0bb62d626dff | refs/heads/master | 2020-12-24T14:35:24.139328 | 2012-10-08T17:41:37 | 2012-10-08T17:41:37 | 6,128,426 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Products.ZenUtils.Ext import DirectRouter, DirectResponse
from Products import Zuul
class SLARouter(DirectRouter):
def _getFacade(self):
return Zuul.getFacade('sla', self.context)
def manage_delSLAs(self, rttIndex, deviceIp, community):
facade = self._getFacade()
success, message = facade.manage_delSLAs(rttIndex, deviceIp, community)
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_writeMemSLAs(self, deviceIp, community):
facade = self._getFacade()
success, message = facade.manage_writeMemSLAs(deviceIp, community)
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addTcpSLAs(self, newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonEchoAdminTargetPort, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss"):
facade = self._getFacade()
success, message = facade.manage_addTcpSLAs(newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonEchoAdminTargetPort, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss")
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addJitterSLAs(self, newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonEchoAdminTargetPort, rttMonEchoAdminInterval=60, rttMonEchoAdminNumPackets=100, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss"):
facade = self._getFacade()
success, message = facade.manage_addJitterSLAs(newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonEchoAdminTargetPort, rttMonEchoAdminInterval=60, rttMonEchoAdminNumPackets=100, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss")
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addDnsSLAs(self, newId, rttIndex, deviceIp, community, rttMonEchoAdminNameServer, rttMonEchoAdminTargetAddressString, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss"):
facade = self._getFacade()
success, message = facade.manage_addDnsSLAs(newId, rttIndex, deviceIp, community, rttMonEchoAdminNameServer, rttMonEchoAdminTargetAddressString, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss")
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addDhcpSLAs(self, newId, rttIndex, deviceIp, community, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss"):
facade = self._getFacade()
success, message = facade.manage_addDhcpSLAs(newId, rttIndex, deviceIp, community, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminOwner="zenoss")
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addEchoSLAs(self, newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminFrequency=60, rttMonCtrlAdminOwner="zenoss", rttMonCtrlAdminThreshold=5000, rttMonCtrlAdminTimeout=5):
facade = self._getFacade()
success, message = facade.manage_addEchoSLAs(newId, rttIndex, deviceIp, community, rttMonEchoAdminTargetAddress, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminFrequency=60, rttMonCtrlAdminOwner="zenoss", rttMonCtrlAdminThreshold=5000, rttMonCtrlAdminTimeout=5)
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
def manage_addHttpSLAs(self, newId, rttIndex, deviceIp, community, rttMonEchoAdminURL, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminFrequency=60, rttMonCtrlAdminOwner="zenoss", rttMonCtrlAdminThreshold=5000, rttMonCtrlAdminTimeout=5):
facade = self._getFacade()
success, message = facade.manage_addHttpSLAs(newId, rttIndex, deviceIp, community, rttMonEchoAdminURL, rttMonScheduleAdminRttStartTime=1, rttMonCtrlAdminFrequency=60, rttMonCtrlAdminOwner="zenoss", rttMonCtrlAdminThreshold=5000, rttMonCtrlAdminTimeout=5)
if success:
return DirectResponse.succeed(jobId=message)
else:
return DirectResponse.fail(message)
| UTF-8 | Python | false | false | 4,650 | py | 22 | routers.py | 12 | 0.736774 | 0.726022 | 0 | 81 | 55.382716 | 274 |
Customer1-Everforce/Repo1 | 15,539,191,700,856 | 97c6bad2f8ab706b0a1ffbe428098cd6651dc2dd | 8b6fdd216e2a8eed2faa20fe82bf444b135dcc05 | /create_issue.py | df65786278880311007d60a60ec911f4a41ee9f4 | []
| no_license | https://github.com/Customer1-Everforce/Repo1 | 2e1dc7b7e4d6db3e94811469ef52372c1d5d449d | 5fc74c7060bb2795f80fbd0e92fcec481e8a1ff4 | refs/heads/main | 2023-07-16T03:47:16.557060 | 2021-09-03T04:21:48 | 2021-09-03T04:21:48 | 358,514,457 | 0 | 0 | null | false | 2021-05-11T07:47:50 | 2021-04-16T07:32:01 | 2021-05-07T09:36:48 | 2021-05-11T07:47:49 | 5 | 0 | 0 | 0 | HTML | false | false | from github import Github
import requests
import os
from pprint import pprint
token = os.getenv('GITHUB_TOKEN', '5b80cd6dd28577ed249e762207a2d9831cf8c727')
#repo = "API_test"
#repo = "everforce_editors"
owner = "everforce-github"
g = Github(token)
#repo = g.get_repo("{owner}/{repo}")
repo = g.get_repo("everforce-github/API_test")
#repo = g.get_repo("API_test/everforce-github")
i = repo.create_issue(
title="Issue Title-s2",
body="Text of the body.",
#assignee="Gri_Vidi",
labels=[
repo.get_label("good first issue")
]
)
print("Issue created:\n",i)
"""
g = Github(token)
repo = g.get_repo("MartinHeinz/python-project-blueprint")
i = repo.create_issue(
title="Issue Title",
body="Text of the body.",
assignee="MartinHeinz",
labels=[
repo.get_label("good first issue")
]
)
pprint(i)
"""
| UTF-8 | Python | false | false | 846 | py | 10 | create_issue.py | 4 | 0.663121 | 0.630024 | 0 | 37 | 21.864865 | 77 |
Jackiu1997/gcransac-python | 11,106,785,449,778 | 2870fd1d2ac8a9858553d2a1507fd592753baac8 | e115ea0da47f88e8d1b0bbe19c2af5e63ea80266 | /ransac/ransac_api.py | df492873131ac1d67456e28cb2fbcbf261084d7c | []
| no_license | https://github.com/Jackiu1997/gcransac-python | 8264d4efa2eafebcd9664a2692476a3bd54d76f1 | 050ee2c31f24f90551a0adb2f51aed988f39b16a | refs/heads/master | 2022-09-19T00:25:23.234923 | 2020-06-05T04:17:46 | 2020-06-05T04:17:46 | 255,355,515 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import cv2
import numpy as np
from estimator import (EstimatorEssential, EstimatorFundamental,
EstimatorHomography)
from model import *
from sampler import UniformSampler
from solver import (SolverEssentialMatrixEightPoint,
SolverEssentialMatrixFivePointStewenius,
SolverFundamentalMatrixEightPoint,
SolverFundamentalMatrixSevenPoint,
SolverHomographyFourPoint)
from .ransac import RANSAC
def __transformInliersToMask(inliers, point_number):
""" 转换 inliers 内点序号列表为 cv2 match 所需的 mask
参数
--------
inliers : list
内点序号列表
point_number : int
点集的数目
返回
--------
list
包含 0 1 的 mask 列表
"""
mask = []
for i in range(point_number):
if i in inliers:
mask.append(1)
else:
mask.append(0)
mask = np.array(mask)
return mask
""" 用于特征点匹配,对应矩阵求解的函数 """
def findHomography(src_points, dst_points, threshold=1.0, conf=0.95, max_iters=10000):
""" 单应矩阵求解
参数
--------
src_points : numpy
源图像特征点集合
dst_points : numpy
目标图像特征点集合
h1, w1: int, int
源图像高度和宽度
h2, w2: int, int
目标图像高度和宽度
threshold : float
决定内点和外点的阈值
conf : float
RANSAC置信参数
max_iters : int
RANSAC算法最大迭代次数
返回
--------
numpy, list
基础矩阵,标注内点和外点的mask
"""
# 合并points到同个矩阵:
# src在前两列,dst在后两列
points = np.c_[src_points, dst_points]
''' GC-RANSAC过程 '''
# 设置模型估计器和模型
estimator = EstimatorHomography(SolverHomographyFourPoint,
SolverHomographyFourPoint)
model = Homography()
# 设置全局采样
main_sampler = UniformSampler(points)
# 检查样本是否成功初始化
if not main_sampler.initialized:
print("采样器初始化失败\n")
return None
# 设置GC-RANSAC算法参数
gcransac = RANSAC()
gcransac.settings.threshold = threshold
gcransac.settings.confidence = conf
gcransac.settings.max_iteration_number = max_iters
# 运行GC-RANSAC算法
model, inliers = gcransac.run(points,
estimator,
main_sampler)
print(f'Number of iterations = {gcransac.statistics.iteration_number}')
# 获取特征点匹配结果(变换矩阵 和 模型对应内点)
H = model.descriptor
mask = __transformInliersToMask(inliers, gcransac.point_number)
return H, mask
| UTF-8 | Python | false | false | 2,883 | py | 92 | ransac_api.py | 36 | 0.593483 | 0.585336 | 0 | 102 | 23.068627 | 86 |
Saurabh2509/CloudCompare-PythonPlugin | 13,194,139,574,869 | 1a6208724b12bb4ffcfaa586584f983078971092 | ad6b36d1b0e0692ec2c3715f95d85f0935d680ec | /script_examples/merge_script.py | c2c9cac2fcd1a3f4b21bb95b3b66408ad19cedc4 | []
| no_license | https://github.com/Saurabh2509/CloudCompare-PythonPlugin | 6e1e2db8820b3c75d3d4727a238c8c4ad5df61d6 | a90b32b599883b1e12391996073e404d2c6a65a8 | refs/heads/master | 2023-08-15T20:14:20.921088 | 2021-09-30T09:28:13 | 2021-09-30T09:28:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pycc
def merge(clouds):
total_num_points = sum(cloud.size() for cloud in clouds)
merge_result = pycc.ccPointCloud("MergeResult")
merge_result.reserve(total_num_points)
for cloud_idx, cloud in enumerate(clouds):
for point_idx in range(cloud.size()):
merge_result.addPoint(cloud.getPoint(point_idx))
pos = 0
for cloud in clouds:
for i in range(cloud.getNumberOfScalarFields()):
scalarFieldName = cloud.getScalarFieldName(i)
idx = merge_result.getScalarFieldIndexByName(scalarFieldName)
if idx == -1:
idx = merge_result.addScalarField(scalarFieldName)
if idx == -1:
raise RuntimeError("Failed to add ScalarField")
scalarField = cloud.getScalarField(i)
sf = merge_result.getScalarField(idx)
sf.asArray()[pos: pos + scalarField.size()] = scalarField.asArray()[:]
sf.computeMinAndMax()
pos += cloud.size()
return merge_result
def main():
CC = pycc.GetInstance()
clouds = CC.getSelectedEntities()
merged_cloud = merge(clouds)
CC.addToDB(merged_cloud)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,219 | py | 196 | merge_script.py | 146 | 0.611157 | 0.608696 | 0 | 44 | 26.704545 | 82 |
xiaming9880/Curvature-Learning-Framework | 1,185,410,986,347 | 78132a4fde70df65a718d6e4909d57f88f5e99fb | 1dcb23afe2b907636345ac5f94552eca12c53999 | /examples/hgcn/data_utils.py | 466861d0341d37e3676c6bb99144cf88f86826f0 | [
"Apache-2.0"
]
| permissive | https://github.com/xiaming9880/Curvature-Learning-Framework | da0c2300b96dbcb5b48a28ddb7ce6c6b63373d04 | db1be0b7ed57f046bc5b359dbb0155708cd94611 | refs/heads/main | 2023-08-26T18:48:48.821531 | 2021-11-15T06:14:09 | 2021-11-15T06:14:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (C) 2016-2021 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
sys.path.append('.')
import numpy as np
import pickle as pkl
import tensorflow as tf
import scipy.sparse as sp
from config import training_config
def normalize(mx):
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def augment(adj, features):
deg = np.squeeze(np.sum(adj, axis=0).astype(int))
deg[deg > 5] = 5
deg_onehot = np.array(np.eye(6)[deg], dtype=np.float32).squeeze()
const_f = np.ones((features.shape[0], 1))
features = np.concatenate([features, deg_onehot, const_f], axis=1)
return features
def process(adj, features, normalize_adj, normalize_feats):
if sp.isspmatrix(features):
features = np.array(features.todense())
if normalize_feats:
features = normalize(features)
adj = adj + sp.eye(adj.shape[0])
if normalize_adj:
adj = normalize(adj)
adj = np.array(adj.toarray(), dtype=np.float32)
return adj, features
def mask_edges(adj, val_prop):
x, y = sp.triu(adj).nonzero()
pos_edges = np.array(list(zip(x, y)))
np.random.shuffle(pos_edges)
x, y = sp.triu(sp.csr_matrix(1. - adj.toarray())).nonzero()
neg_edges = np.array(list(zip(x, y)))
np.random.shuffle(neg_edges)
m_pos = len(pos_edges)
n_val = int(m_pos * val_prop)
val_edges, train_edges = pos_edges[-n_val:], pos_edges[:-n_val]
val_edges_false = neg_edges[-n_val:]
train_edges_false = np.concatenate([neg_edges, val_edges], axis=0)
adj_train = sp.csr_matrix((np.ones(train_edges.shape[0]), (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
return adj_train, train_edges, train_edges_false, val_edges, val_edges_false
def load_data_airport(data_path, normalize_adj, normalize_feat):
adj = pkl.load(open(os.path.join(data_path, "adj.pkl"), 'rb'))
features = pkl.load(open(os.path.join(data_path, "features.pkl"), 'rb'))
adj = sp.csr_matrix(adj)
data = {'adj_train': adj, 'features': features}
adj_train, train_edges, train_edges_false, val_edges, val_edges_false = mask_edges(adj, 0.2)
data['adj_train'] = adj_train
data['train_edges'], data['train_edges_false'] = train_edges, train_edges_false
data['val_edges'] = [list(p) + [1] for p in val_edges] + [list(p) + [0] for p in val_edges_false]
data['adj_train_norm'], data['features'] = process(data['adj_train'], data['features'], normalize_adj,
normalize_feat)
data['features'] = np.array(augment(data['adj_train'], data['features']), dtype=np.float32)
return data
def get_train_batch(pos_edge_set, neg_edge_set):
pos_dataset = tf.data.Dataset.from_tensor_slices(pos_edge_set)
pos_dataset = pos_dataset.shuffle(buffer_size=training_config["shuffle_buffer"]).batch(
training_config["batch_size"], drop_remainder=False)
pos_dataset = pos_dataset.repeat()
pos_iter = pos_dataset.make_one_shot_iterator()
pos_batch = pos_iter.get_next()
pos_label = tf.ones(shape=(tf.shape(pos_batch)[0]))
neg_dataset = tf.data.Dataset.from_tensor_slices(neg_edge_set)
neg_dataset = neg_dataset.shuffle(buffer_size=training_config["shuffle_buffer"]).batch(
training_config["neg_samples"] * training_config["batch_size"], drop_remainder=False)
neg_dataset = neg_dataset.repeat()
neg_iter = neg_dataset.make_one_shot_iterator()
neg_batch = neg_iter.get_next()
batch = tf.concat([pos_batch, neg_batch], axis=0)
neg_label = tf.zeros(shape=(tf.shape(neg_batch)[0]))
label = tf.concat([pos_label, neg_label], axis=0)
return batch, label
def get_test_batch(edge_set_with_label):
dataset = tf.data.Dataset.from_tensor_slices(edge_set_with_label).batch(training_config["batch_size"],
drop_remainder=False)
data_iter = dataset.make_initializable_iterator()
batch = data_iter.get_next()
init = data_iter.initializer
return batch, init
| UTF-8 | Python | false | false | 4,815 | py | 41 | data_utils.py | 23 | 0.64486 | 0.636137 | 0 | 117 | 40.153846 | 119 |
MerlinAu/leetcode-python | 16,054,587,753,105 | 561cd54e74c7278fc32e66d2f84acf6eee30a9cf | dc23985fcf3dd4d15b32a2f774f7efd45d9daab7 | /code/Template/DP.py | 2974d9c1eed7f5a3901f4c7f774195b24b2d0356 | []
| no_license | https://github.com/MerlinAu/leetcode-python | caa2da9ebe10988aa88bc64bf50be6d5b2fb6b87 | acf29a6ba46586d320e6d9acc891ba16d741a7ed | refs/heads/master | 2020-05-24T08:05:46.912648 | 2019-08-30T07:28:54 | 2019-08-30T07:28:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''template of dynamic programming'''
# 状态定义
# dp = new int [m + 1][n + 1];
# 初始状态
# dp[0][0] = x;
# dp[0][1] = y;
# ...
# DP状态的推导
'''
for i = 0; i <= n; ++i {
for j = 0; j <= m; ++j {
...
dp[i][j] = min {dp[i - 1][j], dp[i][j - 1], etc.}
}
}
'''
# return dp[m][n]; 最优解 | UTF-8 | Python | false | false | 330 | py | 48 | DP.py | 42 | 0.38255 | 0.348993 | 0 | 21 | 13.238095 | 57 |
BDNYC/astrodbkit | 15,178,414,426,317 | 58f1299b64c63323323dc587bf51c6e3efa54d7b | a6cffe95a5c5318535da8f6510f97bcfb0ffa6ea | /astrodbkit/__init__.py | ac7de65a61dcc968e5ee156adbd98bf4082a7588 | [
"MIT"
]
| permissive | https://github.com/BDNYC/astrodbkit | 51a99255fc4bc931fc7add2fd6f986696aa45887 | 00b9dc5470ddede2027faddd8ba1e8787166a54f | refs/heads/master | 2021-04-19T00:03:14.527412 | 2020-05-28T13:01:25 | 2020-05-28T13:01:25 | 49,847,131 | 2 | 6 | MIT | false | 2018-05-03T21:19:50 | 2016-01-18T02:32:16 | 2018-05-03T19:57:01 | 2018-05-03T21:19:50 | 1,897 | 1 | 5 | 20 | Python | false | null | from .astrodb import *
from .astrocat import *
from .votools import *
from pkg_resources import get_distribution
__version__ = '0.6.6' | UTF-8 | Python | false | false | 134 | py | 12 | __init__.py | 6 | 0.738806 | 0.716418 | 0 | 5 | 26 | 42 |
connectthefuture/psdmrepo | 5,669,356,860,204 | 3b6e5ce83aeb6aaab9ceb4823fdf00126a5f401f | 786de89be635eb21295070a6a3452f3a7fe6712c | /psddl/tags/V00-08-06/tags/andy-20101027-01/psddl/src/Type.py | 3c8d5cf794b17b1aec97d335c7444fb3698f9859 | []
| no_license | https://github.com/connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module Type...
#
#------------------------------------------------------------------------
"""DDL class representing a type (class).
This software was developed for the SIT project. If you use all or
part of it, please give an appropriate acknowledgment.
@see RelatedModule
@version $Id$
@author Andrei Salnikov
"""
#------------------------------
# Module's version from CVS --
#------------------------------
__version__ = "$Revision$"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
#---------------------------------
# Imports of base class module --
#---------------------------------
#-----------------------------
# Imports for other modules --
#-----------------------------
#----------------------------------
# Local non-exported definitions --
#----------------------------------
#------------------------
# Exported definitions --
#------------------------
#---------------------
# Class definition --
#---------------------
class Type ( object ) :
#----------------
# Constructor --
#----------------
def __init__ ( self, name, **kw ) :
self.name = name
self.version = kw.get('version')
self.type_id = kw.get('type_id')
self.levels = kw.get('levels')
self.comment = kw.get('comment')
self.package = kw.get('package')
self.size = kw.get('size')
self.align = kw.get('align')
self.pack = kw.get('pack')
if self.pack : self.pack = int(self.pack)
self.enums = []
self.constants = []
self.xtcConfig = []
self.attributes = []
self.methods = []
self.repeat = None
def __str__(self):
return "<Type(%s)>" % self.__dict__
def __repr__(self):
return "<Type(%s)>" % self.name
#
# In case someone decides to run this module
#
if __name__ == "__main__" :
# In principle we can try to run test suite for this module,
# have to think about it later. Right now just abort.
sys.exit ( "Module is not supposed to be run as main module" )
| UTF-8 | Python | false | false | 2,275 | py | 3,157 | Type.py | 2,769 | 0.421978 | 0.421978 | 0 | 92 | 23.728261 | 75 |
majdigital/bigworldgraph | 7,550,552,535,505 | 92518b78d9ee21f524ef3cb5685c7705c33fede6 | 7987783a15f161168a04551b4da97317c9cf4250 | /backend/bwg/tasks/relation_merging.py | 2e6766f7abc59b0a452afcfa269bead98b7c32c0 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | https://github.com/majdigital/bigworldgraph | b2f251bb1b82208878e3e99812a08791527e1423 | 8ae28f1923f1116c0595896872af23a41fcfdf99 | refs/heads/master | 2023-01-21T07:20:21.612816 | 2019-06-21T17:08:07 | 2019-06-21T17:08:07 | 88,851,746 | 4 | 0 | MIT | false | 2022-12-10T19:48:49 | 2017-04-20T10:10:39 | 2022-07-05T21:53:36 | 2022-12-10T19:48:48 | 62,677 | 3 | 0 | 27 | Python | false | false | # -*- coding: utf-8 -*-
"""
Defining a pipeline task that merges relation extracted from different tasks, e.g. participation extraction or naive
open relation extraction.
"""
# EXT
import luigi
# PROJECT
from bwg.decorators import time_function
from bwg.mixins import ArticleProcessingMixin
from bwg.serializing import serialize_relation
from bwg.tasks.naive_ore import NaiveOpenRelationExtractionTask
from bwg.tasks.participation_extraction import ParticipationExtractionTask
class RelationMergingTask(luigi.Task, ArticleProcessingMixin):
"""
Merge relations gained from OpenRelationExtractionTask and ParticipationExtractionTask.
"""
def requires(self):
return ParticipationExtractionTask(task_config=self.task_config),\
NaiveOpenRelationExtractionTask(task_config=self.task_config)
def output(self):
text_format = luigi.format.TextFormat(self.task_config["CORPUS_ENCODING"])
output_path = self.task_config["RELATION_MERGING_OUTPUT_PATH"]
return luigi.LocalTarget(output_path, format=text_format)
@time_function(is_classmethod=True)
def run(self):
with self.input()[0].open("r") as pe_file, self.input()[1].open("r") as ore_file, \
self.output().open("w") as output_file:
for pe_line, ore_line in zip(pe_file, ore_file):
self.process_articles(
(pe_line, ore_line), new_state="merged_relations", serializing_function=serialize_relation,
output_file=output_file
)
def task_workflow(self, article, **workflow_resources):
article_data = article["data"]
for sentence_id, sentence_json in article_data.items():
sentence_dates = sentence_json["data"]
sentence = sentence_dates["data_extracted_participations"]["data"]["sentence"]
relations = self._get_relations_from_sentence_json(sentence_dates["data_extracted_participations"])
relations.extend(self._get_relations_from_sentence_json(sentence_dates["data_extracted_relations"]))
serializing_arguments = {
"sentence_id": sentence_id,
"sentence": sentence,
"relations": relations,
"infix": "ORE:PE"
}
yield serializing_arguments
@staticmethod
def _get_relations_from_sentence_json(sentence_json):
"""
Create relation tuples based on a sentence JSON object.
:param sentence_json: Sentence as JSON object.
:type sentence_json: dict
:return: List of relations as tuples.
:rtype: list
"""
return [
(
relation_json["data"]["subject_phrase"],
relation_json["data"]["verb"],
relation_json["data"]["object_phrase"]
)
for relation_id, relation_json in sentence_json["data"]["relations"].items()
]
def _is_relevant_article(self, article):
"""
Override ArticleProcessingMixin's relevance criterion.
:return: Result of check.
:rtype: bool
"""
return len(article["data"]) > 0
def _is_relevant_sentence(self, sentence):
"""
Override ArticleProcessingMixin's relevance criterion.
:return: Result of check.
:rtype: bool
"""
# Separate sentence from sentence ID
sentence = list(sentence.values())[0]
return len(sentence["data"]["relations"]) > 0 | UTF-8 | Python | false | false | 3,524 | py | 111 | relation_merging.py | 58 | 0.629398 | 0.627696 | 0 | 96 | 35.71875 | 116 |
Mehedi109/Ambulance-Service | 17,944,373,371,456 | 3fecc9057a188da6ceb7c77d5e2832c64d7e7158 | 49e7d8a7574ada9bdfc8cb66a5e1afb0eab8eb57 | /project_app/views.py | dae15c8302d36a39e0fd149d33fdfe1f528422e4 | []
| no_license | https://github.com/Mehedi109/Ambulance-Service | bd5bbfc7317a5f6efd7556411202fc26487666f2 | eaa1b8bb52ee5239a89e235fdd3db5eb186ec9e4 | refs/heads/main | 2023-01-13T01:17:24.284389 | 2020-11-12T11:49:26 | 2020-11-12T11:49:26 | 304,373,928 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.views.generic import ListView
from django.shortcuts import render,redirect,HttpResponse,get_object_or_404
from django.core.paginator import Paginator
from django.db.models import Q
from . models import Category
from . models import Quot
from . models import ability_top
from . models import ability_bottom
from . models import slider
from . models import service_category
from . models import service_section
from .models import service_area
from .models import service_area_body
from .models import border_top
from .models import border_bottom
#from . forms import articleForm
from .forms import articleForm
from .models import article
'''from .models import user'''
from .forms import rent_detailsForm
from . models import rent_details
#from .forms import userCommentForm
#from . models import userComment
from . models import freezer,general,ICU,emergency
from .models import postComment
from .forms import postCommentForm
#from .models import reply
#from .forms import replyForm
class HomeView(ListView):
template_name="index.html"
model=Quot
#model=service_category
#model=service_section
def get_queryset(self):
query_set=super().get_queryset()
return query_set.select_related
('Category')
def index(request):
post=service_category.objects.all
content=service_section.objects.all
quote=Quot.objects.all
s_a_c=service_area.objects.all
s_a_b=service_area_body.objects.all
slider_post=slider.objects.all
border_top_post=get_object_or_404(border_top)
#border_bottom_post=border_bottom.objects.all
border_bottom_post=border_bottom.objects.all
ability_top_post=get_object_or_404(ability_top)
ability_bottom_post=ability_bottom.objects.all
emergency_post=get_object_or_404(emergency)
context={
"post":post,
"content":content,
"quote":quote,
"s_a_c":s_a_c,
"sab":s_a_b,
"post2":border_top_post,
"border_bottom_post":border_bottom_post,
"post3":ability_top_post,
"ability_bottom_post":ability_bottom_post,
#"post4":ability_bottom_post
"slider_post":slider_post,
"post4":emergency_post,
}
return render (request,"index.html",context)
def terms(request):
quote=Quot.objects.all
post2=slider.objects.all()
context={
"quote":quote,
"post2":post2
}
return render(request,"terms.html",context)
def ac_post(request):
post=get_object_or_404(general)
post2=slider.objects.all
context={
"post":post,
"post2":post2
}
return render(request,"ac.html",context)
def general_post(request):
post=get_object_or_404(freezer)
post2=slider.objects.all
context={
"post":post,
"post2":post2
}
return render(request,"general.html",context)
def icu_post(request):
post=get_object_or_404(ICU)
post2=slider.objects.all
context={
"post":post,
"post2":post2
}
return render(request,"icu.html",context)
def contact(request):
return render(request,"contact.html")
def post(request):
if request.user.is_authenticated:
form=articleForm(request.POST or None)
if form.is_valid():
instance=form.save(commit=False)
instance.save();
return redirect('/blog')
return render (request,"post.html",{"form":form})
def post_update(request,id):
#if request.user.is_authenticated:
if request.user.is_staff or request.user.is_superuser:
post=get_object_or_404(article,id=id)
#post=article.objects.get(id=id)
form=articleForm(request.POST or None,instance=post)
if form.is_valid():
instance=form.save(commit=False)
instance.save()
#messages.success(request,'Updated Successfully')
return redirect('/author_blog')
return render(request,"post.html",{"form":form})
return HttpResponse()
def blog_update(request,id):
post=get_object_or_404(article,id=id)
#post=article.objects.get(id=id)
return redirect('/blog')
return render(request,"blog.html",{"post":post})
#return HttpResponse()
def delete(request,id):
#if request.user.is_authenticated:
if request.user.is_staff or request.user.is_superuser:
post=get_object_or_404(article,id=id)
post.delete()
return redirect('/author_blog')
def blog(request):
post=article.objects.all().order_by('id')
##post=get_object_or_404(article,id=id)
#getComment=userComment.objects.filter()
#form=userCommentForm(request.POST or None)
#if form.is_valid():
#instance=form.save(commit=False)
#instance.post=post
#instance.save()
context={
"post":post,
#"form":form,
#"comment":getComment
}
return render(request,"blog.html",context)
#return HttpResponse()
def single_blog(request,id):
#post=article.objects.all()
#post=article.objects.get(id=id)
post=get_object_or_404(article,id=id)
#post=article.objects.get(id=postid)
getComment=postComment.objects.filter(post=id)
'''getComment=userComment.objects.filter(post=id)#reply=none user'''
#getReply=reply.objects.filter(post=id)
if request.method=="POST":
form=postCommentForm(request.POST or None)
'''form=userCommentForm(request.POST or None) user'''
if form.is_valid():
instance=form.save(commit=False)
instance.post=post
parent_id = request.POST.get('comment_id') #reply-section
comment_qs=None
if parent_id:
comment_qs = postComment.objects.get(id=parent_id)
instance.save()
'''parent_obj=None
try:
parent_id=int(request.POST.get('parent_id'))
except:
parent_id=None
if parent_id:
parent_obj=postComment.objects.get(parent_id)
if parent_obj:
replay_comment=form.save(commit=False)
replay_comment.parent=parent_obj'''
'''reply_id = request.POST.get('comment_id') #reply-section
comment_qs=None
if reply_id:
comment_qs = userComment.objects.get(id=reply_id)
comment = userComment.objects.create(post=post, user=request.user, content=content, reply=comment_qs)
comment.save()'''
return redirect('single_blog',id)
else:
'''form=userCommentForm() user'''
form=postCommentForm()
context={
"post":post,
"form":form,
"comment":getComment,
#"reply":replay_comment,
#"comment":getComment, user
}
return render(request,"single_blog.html",context)
#return HttpResponse()
def post_comment(request,id):
if request.method=="POST":
comment=request.POST.get("comment")
postid=request.POST.get("postid")
post=article.objects.get(id=postid)
parentid=request.POST.get("parentid")
if parentid== "":
comment=postComment(comment=comment,post=post)
else:
parent=postComment.objects.get(id=parentid)
comment=postComment(comment=comment,post=post,parent=parent)
comment.save()
return redirect('single_blog',id)
return render(request,"single_blog.html",context)
def comment_delete(request,id):
post=get_object_or_404(postComment,id=id)
post.delete()
return redirect('single_blog',id)
return HttpResponse()
def rent_show(request):
post=rent_details.objects.all()
search=request.POST.get('q')
if search:
post=post.filter(
Q(From__icontains=search)
)
paginator = Paginator(post, 5) # Show 25 contacts per page.
page = request.GET.get('page')
total_post = paginator.get_page(page)
context={
#"post":post,
"post":total_post
}
return render(request,"rent.html",context)
def rent(request):
#post=get_object_or_404(rent_details,id=id)
post=rent_details.objects.all()
first=rent_details.objects.first()
last=rent_details.objects.last()
search=request.POST.get('q')
if search:
post=post.filter(
Q(From__icontains=search) |
Q(To__icontains=search)
)
paginator = Paginator(post, 10) # Show 25 contacts per page.
page = request.GET.get('page')
total_post = paginator.get_page(page)
context={
"post":total_post,
"first":first,
"last":last,
}
return render(request,"rent.html",context)
def rent_post(request):
if request.user.is_authenticated:
form=rent_detailsForm(request.POST or None)
if form.is_valid():
instance=form.save(commit=False)
instance.save();
return redirect('/author_rent')
return render (request,"rental.html",{"form":form})
def rent_update(request,id):
if request.user.is_authenticated:
post=get_object_or_404(rent_details,id=id)
#post=article.objects.get(id=id)
form=rent_detailsForm(request.POST or None,instance=post)
if form.is_valid():
instance=form.save(commit=False)
instance.save()
return redirect('/author_rent')
return render(request,"rental.html",{"form":form})
return HttpResponse()
def rent_delete(request,id):
if request.user.is_authenticated:
post=get_object_or_404(rent_details,id=id)
post.delete()
return redirect('/author_rent')
| UTF-8 | Python | false | false | 8,425 | py | 43 | views.py | 22 | 0.727596 | 0.719169 | 0 | 297 | 27.367003 | 104 |
MathactwFX/python | 19,335,942,778,069 | 0924f256d030359d157e7fda448cb999f49577c9 | a249beaa5dd9f7838b2c44364c88fc7056814033 | /language/python_overview.py | da3f3b33d5da9262764ffa171980158426b853fc | []
| no_license | https://github.com/MathactwFX/python | 0d3254d9741b0d6f0570f885d53d86e7e44c267e | a0f2797feebc5188921d1e5850008c5811e9c9b8 | refs/heads/master | 2019-07-10T06:56:10.717252 | 2017-06-29T02:01:13 | 2017-06-29T02:01:13 | 91,102,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""a test module"""
# content ref = Python教程 | http://www.liaoxuefeng.com/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000
# ******** 关键字 ********
# g = lambda x: x^1
# print(g(2))
#
# def make_counter():
# count = 1
# def counter():
# nonlocal count
# count += 2
# return count
# return counter
# def make_counter_test():
# mc = make_counter()
# print(mc())
# print(mc())
# print(mc())
# make_counter_test()
#
# def g(n):
# for i in range(5):
# yield i**3
# for i in g(5):
# print(i, ':')
#
# def fab(max):
# a,b=0,1
# while a < max:
# yield a
# a,b=b,a+b
# for a in fab(20):
# print(a,',','end')
# a=fab(7)
#
# size = 20
# def fab_rec(x,y):
# # global size
# size=30
# if x>size:
# return
# print(x)
# fab_rec(y,x+y)
# fab_rec(0,1)
# print(size)
# # assert size > 50
#
# from sys import *
# print('path:', path)
# ******** 基础 ********
# a = u"中文"
# print(a, end='\n\n')
# print(u'中'.encode('utf-8'))
# print(b'\xcb\xea\xca\xb1\xb8\xc3\xd6\xaa\xb5\xc0\xb5\xc4'.decode('utf-8'))
# print(b'zhong'.decode('utf-8'))
# print(len(u'中'))
# print("hi,%s,you\n have%d %%" % ('michael', 11))
# list是一种有序的集合 can by dynamic add or remove
# classmates = ['hha', 2, [1, 2], 3, 4]
# classmates.insert(1,'e')
# classmates.pop()
# classmates2 = ['1','3','5','4','2']
# classmates2.sort()
# print(classmates2)
# classmates[2]='replace'
# print(classmates)
# classmates.append(1.2)
# print(classmates[-1])
# print(len(classmates))
# print(classmates[2])
# tuple ordered list, but couldn't be changed, the pointer is never changed
# t = (1, [1,2])
# t[1][1]=3
# print(t)
# for x in t:
# print(x)
# age = 20
# if age >= 8:
# print("teenager")
# elif age >= 18:
# print("adult")
# else:
# print(True)
# y=range(5)
# print(y.count(1))
# for x in y:
# print(x)
# birth = input('birth: ')
# if int(birth) < 2000:
# print('00前')
# else:
# print('00后')
# d={'michael':18,'bob':11, "jane":25}
# print(d['jane'])
# d['wng']=19
# d.pop('wng')
# print(d.get('wg'))
# print(d)
# print(d.items())
# s = set([1,23,3,4,5,3])
# s.add(6)
# s.add(2)
# s.remove(7)
# print(s)
# s2=set([3,4,5,111,222])
# print(s | s2)
# s2.add([1,2])
# s2.add((1,2))
# print(s2)
# s3=set((1,2,3))
# print(s3)
# ******** function ********
# s = abs(-1)
# print(s)
#
# def myabs(x):
# pass
# if not isinstance(x, (int, float)):
# raise TypeError('bad operand type')
# else:
# if x >= 0:
# return x
# else:
# return -x
#
# def mynonefunc():
# pass
#
#
# try:
# print(myabs('a'))
# except TypeError:
# print("bad operand")
# finally:
# print("arrive finally")
# print(mynonefunc())
#
# import math
# def move(x, y, step, angle=0):
# nx = x + step*math.cos(angle)
# ny = y + step*math.sin(angle)
# return nx, ny
# r = move(1, 2, 3, 45*math.pi/180.0)
# print(r)
#
# def myclosure():
# def inner():
# return [1,2,3]
# return inner
# print(myclosure()())
# def calc(*numbers):
# sum = 0
# for n in numbers:
# sum = sum + n * n
# return sum
# list = [1,2,3]
# print(calc(*list))
# print('1','2',3,']')
#
# def person(name, age, **kw):
# print('name:', name, 'age:', age, 'other:', kw)
# hah={'nan':12,"jj":'hah'}
# person(12,13,hha=12,ll='121')
#
# def fact(n):
# if n==1:
# return 1
# return n * fact(n - 1)
# print(fact(3))
# # 尾递归
# def fact_tail(n):
# return fact_iter(1, 1, n)
# def fact_iter(product, count, max):
# if count > max:
# return product
# return fact_iter(product * count, count + 1, max)
# print(fact_tail(1000))
# ******** 高级特性 ********
# L = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
# print(L[-2:])
# print(L[:2])
#
# L = range(0,100)
# print(list(L[:10:2]))
# print('afsdf'[:3])
# d = {'a': 1, 'b': 2, 'c': 3}
# for key in d:
# print(key)
# for value in d.values():
# print(value)
# from collections import Iterable
# print(isinstance(d, Iterable))
# for i, key in enumerate(d):
# print(i,key)
#
# print(list(range(1,11)))
# print([x*x for x in range(1,11) if x%2==1])
# print([m+n for n in 'abc' for m in '123'])
# import os
# print([d for d in os.listdir('.')])
# for k,v in d.items():
# print(k,'=',v)
# print([k+'='+chr(v) for k,v in d.items()])
# yint = 123
# print(isinstance(yint, str))
#
# g=(x*x for x in range(10))
# # print(g.__next__())
# # print(g.__next__())
# for n in g:
# print(n)
# ******** 函数式编程 ********
# print(abs)
# # abs = 2
# print(abs(-2))
#
# def add(x, y, f):
# return f(x) + f(y)
# print(add(-5, 6, abs))
#
# def f(x):
# return x * x
# L=[1,2,3]
# print(list(map(f, L)))
# print(list(map(str, L)))
#
# from functools import reduce
# def add(x,y):
# return x+y
# print(reduce(add,[1,2,3,4]))
# print(sum([1,2]))
#
# def char2num(s):
# return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
# def str2int(s):
# return reduce(lambda x,y:x*10+y,map(char2num,s))
# print(str2int('13579'))
#
# def prod(x,y):
# return x*y
# print(reduce(prod,[1,2,3,6]))
#
# def norm(s):
# return '%s%s' % (str.upper(s[0]), str.lower(s[1:]))
# # return list(s[0].upper()) + list(map(str.lower, s[1:]))
# print(list(map(norm,['adam', 'LISA', 'barT'])))
#
# def is_odd(n):
# return n %2 ==1
# print(list(filter(is_odd,[1,2,3,4])))
#
# def not_empty(s):
# return s and s.strip()
#
# def reversed_cmp(x, y):
# if x > y:
# return -1
# if x < y:
# return 1
# return 0
#
# # print(sorted([36, 5, 12, 9, 21], lambda x:x,True))
# print([36, 5, 12, 9, 21].sort)
#
# def lazy_sum(*args):
# def sum():
# ax = 0
# for n in args:
# ax = ax + n
# return ax
# return sum
# f1 = lazy_sum(1, 3, 5, 7, 9)
# f2 = lazy_sum(1, 3, 5, 7, 9)
# print(f1)
# print(f2)
# print(f1 is f2)
#
# print(lazy_sum.__name__)
#
# # def log(func):
# # def wrapper(*args, **kw):
# # print('call %s():' % func.__name__)
# # return func(*args, **kw)
# # return wrapper
# import functools
# def log(*hah):
# def decorator(func):
# @functools.wraps(func)
# def wrapper(*args, **kw):
# print('%s %s():' % (hah[0], func.__name__))
# f = func(*args, **kw)
# print('%s %s():' % (hah[1], func.__name__))
# return f
# return wrapper
# return decorator
#
# @log('execute','end')
# def now():
# print('2013-12-25')
# now()
# print(now.__name__)
# print(int('111', 2))
#
# import sys
# print(sys.argv)
# print(__doc__)
# ******** 模块 ********
# import Image, ImageFilter
# im = Image.open("C:\\Users\\jrm\\Desktop\\01.jpg")
# # print(im.format, im.size, im.mode)
# # im.thumbnail((200,50))
# im2 = im.filter(ImageFilter.BLUR)
# im2.save('C:\\Users\\jrm\\Desktop\\01_thumb.jpg', 'JPEG')
# ******** OOP ********
# class Student(object):
# # __slots__ = ('__name', '__score')
# def __init__(self, name, score):
# self.__name = name
# self.__score = score
# def print_score(self):
# print('%s: %s' % (self.__name, self.__score))
# bart=Student('Bart Simpson', 59)
# lisa = Student('lisa Simpson', 87)
# bart.__name = 'changed'
# bart.print_score()
# lisa.print_score()
# print(bart.__name)
# print(bart._Student__name)
# print(bart)
# print(Student)
#
# class Animal(object):
# def run(self):
# print('Animal is running...')
# class Dog(Animal):
# def run(self):
# print('Dog is running')
# class Cat(Animal):
# def run(self):
# print('Cat is running')
# dog = Dog()
# cat = Cat()
# dog.run()
# cat.run()
# def run_twice(hh):
# hh.run()
# hh.run()
# run_twice(dog)
# run_twice(cat)
#
# print(type('12'))
#
# import types
# print(type(int))
# print(dir(cat))
# print('abc'.__doc__)
# print(len('abc'), 'abc'.__len__()+1)
# print('abc'.upper())
#
# print(callable(dog.run))
# ******** 错误、调试和测试 ********
# import logging
# logging.basicConfig(level=logging.INFO)
# try:
# print('try...')
# r = 10 / 0
# print('result:', r)
# except ZeroDivisionError as e:
# # print('except:', e)
# logging.info('af')
# logging.exception(e)
# else:
# print('no fault')
# finally:
# print('finally...')
# print('END')
# ******** IO编程 ********
# with open('/path/to/file', 'r') as f:
# print(f.read())
# import os
# print(os.name, os.getenv('path'))
# print([x for x in os.listdir('.') if os.path.isdir(x)])
#
# try:
# import cPickle as pickle
# except ImportError:
# import pickle
# # d = dict(name='Bob', age=20, score=88)
# # f = open('dump.txt', 'wb')
# # pickle.dump(d, f)
# with open('dump.txt', 'rb') as f:
# d = pickle.load(f)
# # f.close()
# print(d)
# import json
# d = dict(name='Bob', age=20, score=None)
# print(json.dumps(d))
# ******** 进程和线程 ********
# ******** 正则表达式 ********
# import re
# print(re.match(r'^\d{3}\-\d{3,8}', '010-12345'))
# if re.match(r'^\d{3}\-\d{3,8}', '010-12345'):
# print('ok')
# ******** 常用内建模块 ********
# from collections import *
# namedtuple
# deque
# defaultdict
# OrderedDict
# c = Counter()
# for ch in 'programming':
# c[ch] = c[ch] + 1
# print(c)
#
# from HTMLParser import HTMLParser
# # from htmlentitydefs import name2codepoint
#
# class MyHTMLParser(HTMLParser):
#
# def handle_starttag(self, tag, attrs):
# print('<%s>' % tag)
#
# def handle_endtag(self, tag):
# print('</%s>' % tag)
#
# def handle_startendtag(self, tag, attrs):
# print('<%s/>' % tag)
#
# def handle_data(self, data):
# print('data')
#
# def handle_comment(self, data):
# print('<!-- -->')
#
# def handle_entityref(self, name):
# print('&%s;' % name)
#
# def handle_charref(self, name):
# print('&#%s;' % name)
#
# parser = MyHTMLParser()
# print(parser.feed('<html><head></head><body><p>Some <a href=\"#\">html</a> tutorial...<br>END</p></body></html>'))
# import base64
# base64.encode('fasd')
# import struct
# print(struct.pack('>I', 10240099))
# import hashlib
# md5 = hashlib.md5()
# md5.update(b'how are you')
# sha1 = hashlib.sha512()
# sha1.update(b'how fsdf fsadf fdsa f as fasd sdfa 0b064373fb4dd7356db6e83ec9c39e31ba9c335c')
# print(md5.hexdigest())
# print(sha1.hexdigest())
# ******** GUI ********
# from tkinter import *
# class Application(Frame):
# def __init__(self, master=None):
# Frame.__init__(self, master)
# self.pack()
# self.createWidgets()
#
# def createWidgets(self):
# self.helloLabel = Label(self, text='Hello, world!')
# self.helloLabel.pack()
# self.quitButton = Button(self, text='Quit', command=self.quit)
# self.quitButton.pack()
# app = Application()
# # 设置窗口标题:
# app.master.title('Hello World')
# # 主消息循环:
# app.mainloop()
# ******** 网络编程 ********
# 导入socket库:
# import socket
# # 创建一个socket:
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # 建立连接:
# s.connect(('www.sina.com.cn', 80))
# # 发送数据:
# s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
# # 接收数据:
# buffer = []
# while True:
# # 每次最多接收1k字节:
# d = s.recv(1024)
# if d:
# buffer.append(d)
# else:
# break
# data = b''.join(buffer)
# print(data)
# # 关闭连接:
# s.close()
# header, html = data.split(b'\r\n\r\n', 1)
# print(header)
# # 把接收的数据写入文件:
# with open('C:\\Users\\jrm\\Desktop\\sina.html', 'wb') as f:
# f.write(html)
# ******** 数据库 ********
# import mysql.connector
# conn = mysql.connector.connect(user='root', password='root', database='test', use_unicode=True)
# # cursor = conn.cursor()
# # print(cursor)
# # cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
# # cursor.execute('insert into user (id, name) values (%s, %s)', ['1', 'Michael'])
# # print(cursor.rowcount)
# # conn.commit()
# # cursor.close()
# #
# cursor = conn.cursor()
# cursor.execute('select * from user where id = %s' % '1')
# values = cursor.fetchall()
# print(values)
# # cursor.close()
# print('123')
# # 导入:
# from sqlalchemy import Column, String, create_engine
# from sqlalchemy.orm import sessionmaker
# from sqlalchemy.ext.declarative import declarative_base
#
# # 创建对象的基类:
# Base = declarative_base()
#
# # 定义User对象:
# class User(Base):
# # 表的名字:
# __tablename__ = 'user'
#
# # 表的结构:
# id = Column(String(20), primary_key=True)
# name = Column(String(20))
#
# # 初始化数据库连接:
# engine = create_engine('mysql+mysqlconnector://root:root@localhost:3306/test')
# # 创建DBSession类型:
# DBSession = sessionmaker(bind=engine)
# # 创建session对象:
# session = DBSession()
# # 创建新User对象:
# new_user = User(id='8', name='Bobbbbb')
# # 添加到session:
# session.add(new_user)
# # 提交即保存到数据库:
# session.commit()
# # 关闭session:
# session.close()
# 创建Session:
# session = DBSession()
# # 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行:
# user = session.query(User).filter(User.id=='5').one()
# # 打印类型和对象的name属性:
# print('type:', type(user))
# print('name:', user.name)
# # 关闭Session:
# session.close()
# ******** web开发 ********
# hello.py
# def application(environ, start_response):
# start_response('200 OK', [('Content-Type', 'text/html')])
# return '<h1>Hello, web!</h1>'
#
# # server.py
# # 从wsgiref模块导入:
# from wsgiref.simple_server import make_server
# # 导入我们自己编写的application函数:
# # from hello import application
#
# # 创建一个服务器,IP地址为空,端口是8000,处理函数是application:
# httpd = make_server('localhost', 8000, application)
# print("Serving HTTP on port 8000...")
# # 开始监听HTTP请求:
# httpd.serve_forever()
# from flask import Flask
# from flask import request
# from flask import render_template
#
# app = Flask(__name__)
#
# @app.route('/', methods=['GET', 'POST'])
# def home():
# return '<h1>Home</h1>'
#
# @app.route('/signin', methods=['GET'])
# def signin_form():
# return '''<form action="/signin" method="post">
# <p><input name="username"></p>
# <p><input name="password" type="password"></p>
# <p><button type="submit">Sign In</button></p>
# </form>'''
#
# @app.route('/signin', methods=['POST'])
# def signin():
# # 需要从request对象读取表单内容:
# if request.form['username']=='admin' and request.form['password']=='password':
# return '<h3>Hello, admin!</h3>'
# return '<h3>Bad username or password.</h3>'
#
# if __name__ == '__main__':
# app.run()
# ******** web开发 ********
# ******** 实战 ********
# db orm mvc 前端模板 部署 移动app and so on
| UTF-8 | Python | false | false | 15,162 | py | 22 | python_overview.py | 21 | 0.547586 | 0.514331 | 0.000823 | 639 | 21.823161 | 116 |
MrDeff/bitrix-crest | 18,872,086,301,379 | 80fb7ef6b6b0bc04fa443e91141019b8860c74e6 | ea12579ffe18c6530536cf7897d4559d2fb52edd | /setup.py | b44659974b4f7a6b897ae521419f3bc6689c222c | []
| no_license | https://github.com/MrDeff/bitrix-crest | b485d351c297c2955c53d3ef4288ca47e1700661 | 5da66b765d9e9f7f3b08846ac40a8befeeca9bbb | refs/heads/master | 2020-12-08T11:34:16.454459 | 2020-01-23T02:54:08 | 2020-01-23T02:54:08 | 232,972,386 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
setup(
name='bitrix-crest',
version='0.0.1',
packages=['b24rest'],
url='https://github.com/MrDeff/bitrix-crest',
license='MIT',
author='Evgeniy Pedan',
author_email='e.pedan@gmail.com',
description='Библиотека для работы с rest Bitrix24'
)
| UTF-8 | Python | false | false | 322 | py | 5 | setup.py | 4 | 0.662252 | 0.639073 | 0 | 12 | 24.166667 | 55 |
larkspur78/capstone | 3,435,973,877,808 | 584e21dea8707e74db65459b567451c25b778596 | 539d2e1e821138a541301a1d500a0b87f82f41c2 | /virtualtrainer/apps.py | bb86660cd76624d04ef1c76c4b2eb5089de03314 | []
| no_license | https://github.com/larkspur78/capstone | 6996a9521ab233a5e0f77853cd69b09a8e42f270 | 10878113263479191218387db8d44fb50cf8aecf | refs/heads/master | 2021-01-09T20:46:13.072990 | 2016-06-22T03:35:26 | 2016-06-22T03:35:26 | 59,877,096 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class VirtualtrainerConfig(AppConfig):
name = 'virtualtrainer'
| UTF-8 | Python | false | false | 103 | py | 12 | apps.py | 7 | 0.786408 | 0.786408 | 0 | 5 | 19.6 | 38 |
UVA-DSI-2019-Capstones/ARL | 11,751,030,537,089 | 06d41ee5f17be3a801f97d161ce0555ac947d1c2 | 9ad83773134d89c682405ab328d197589eb43aff | /db/lda_randomforest.py | c84d5ba08869c1f1a1036a0376becdf9b9bb0baf | [
"MIT"
]
| permissive | https://github.com/UVA-DSI-2019-Capstones/ARL | a52102419479e1cc76e2e00d4d92e38301f3f7bb | 74d43bf975db90da70696d4b68002d6971164840 | refs/heads/master | 2021-07-09T05:09:28.660761 | 2019-04-06T17:31:30 | 2019-04-06T17:31:30 | 146,603,421 | 1 | 0 | MIT | false | 2019-03-01T00:49:32 | 2018-08-29T13:24:11 | 2019-02-28T23:52:10 | 2019-03-01T00:49:32 | 92,377 | 1 | 0 | 4 | Jupyter Notebook | false | null | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaModel
from shorttext.utils import standard_text_preprocessor_1
import pandas as pd
import os
dir = os.getcwd()
#Create test set corpus
test = pd.read_csv('test_set.csv')
pre = standard_text_preprocessor_1()
test['processed'] = test['response_text'].apply(pre)
test_corpus = test['processed'].apply(lambda x : x.split(' '))
dict_test = Dictionary(test_corpus)
bow_corpus_test = [dict_test.doc2bow(doc) for doc in test_corpus]
#Create training set corpus
train = pd.read_csv('train_set.csv')
train['processed'] = train['response_text'].apply(pre)
train_corpus = train['processed'].apply(lambda x : x.split(' '))
dict_train = Dictionary(train_corpus)
bow_corpus_train = [dict_train.doc2bow(doc) for doc in train_corpus]
df_results = pd.DataFrame(columns = ['n_estimaters', 'depth', 'accuracy', 'f1','num_topics'])
#Load LDA model
for number_of_topics in range(2,30):
temp_file = 'LDA_{}_topic.model'.format(number_of_topics)
temp_file = os.path.join(dir, 'LDA_models', temp_file)
lda = LdaModel.load(temp_file)
test_df = pd.DataFrame()
train_df = pd.DataFrame()
for i in range(0, len(bow_corpus_test)):
test_df = pd.concat([test_df, pd.DataFrame(lda.get_document_topics(bow = bow_corpus_test[i], minimum_probability=0.000001))[1]], axis = 1)
for i in range(0, len(bow_corpus_train)):
train_df = pd.concat([train_df, pd.DataFrame(lda.get_document_topics(bow = bow_corpus_train[i], minimum_probability=0.000001))[1]], axis = 1)
test_df = test_df.transpose()
train_df = train_df.transpose()
print('Start of Tree')
for m_depth in range(2, 30, 2):
print(m_depth)
for n_est in range(100,300,50):
print(str(m_depth) + ' - ' + str(n_est))
clf = RandomForestClassifier(n_estimators=n_est, max_depth=m_depth, random_state=3214)
clf.fit(train_df, train['response_round_score'])
y_pred = clf.predict(test_df)
print('Number of topics {}'.format(number_of_topics))
f1 = f1_score(test['response_round_score'], y_pred, average = 'weighted')
accuracy = accuracy_score(test['response_round_score'], y_pred)
print('Score: {}'.format(accuracy_score(test['response_round_score'], y_pred)))
df_results = pd.concat([df_results, pd.DataFrame([n_est, m_depth, accuracy, f1, number_of_topics])], axis = 1)
df_results = df_results.transpose()
df_results.to_csv('Decision_trees_results.csv')
| UTF-8 | Python | false | false | 2,759 | py | 134 | lda_randomforest.py | 30 | 0.656035 | 0.638275 | 0 | 73 | 36.794521 | 152 |
xuhui1994/craft_onnx_tensorrt | 7,645,041,792,781 | 76e38b052df80320cec801449f576aad21651c64 | 008689ceb7629b9c26b475c3e5eb66bd5e2c67f3 | /export_onnx.py | 7410a0922102a594bf8c1519a60bccd728552259 | []
| no_license | https://github.com/xuhui1994/craft_onnx_tensorrt | d323f4f7de9dcbd09cbf8cc3b23241b7f322a9a2 | 443aced62125e582a7d1ef221952b365e686bb64 | refs/heads/master | 2023-03-05T05:07:08.155567 | 2021-02-20T06:24:55 | 2021-02-20T06:24:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import sys
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from PIL import Image
import cv2
from skimage import io
import numpy as np
import craft_utils
import imgproc
import file_utils
import json
import zipfile
import random
from craft import CRAFT
from collections import OrderedDict
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False ,action='store_true', help='enable polygon type')
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser.add_argument('--test_folder', default='data/', type=str, help='folder path to input images')
parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
args = parser.parse_args()
""" For test images in a folder """
image_list, _, _ = file_utils.get_files(args.test_folder)
print(image_list)
result_folder = './result/'
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
def chinese_num(line):
total = 0
for u_char in line:
if (u_char >= u'\u2f00' and u_char<=u'\u2fd5') or (u_char >= u'\u4e00' and u_char<=u'\u9fa5'):
total += 1
return total
def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
# refine link
if refine_net is not None:
with torch.no_grad():
y_refiner = refine_net(y, feature)
score_link = y_refiner[0,:,:,0].cpu().data.numpy()
t0 = time.time() - t0
t1 = time.time()
# Post-processing
boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
#Cords_list = []
#for i,box in enumerate(boxes):
# Cords = craft_utils.getVerticalCord(box,score_link,link_threshold,i)
# Cords = craft_utils.adjustResultCoordinates(Cords, ratio_w, ratio_h)
# Cords_list.append(Cords)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None: polys[k] = boxes[k]
t1 = time.time() - t1
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
#print(boxes, polys, ret_score_text)
#return boxes, polys, ret_score_text ,Cords_list
return boxes, polys, ret_score_text
def find_one_line(array_dic):
#list_1 = list_1_array[0]
final_res = {}
flag = 0
for index in sorted(txt_result.keys()):
if index in txt_result:
list_1 = txt_result[index]
final_res[index] = []
#txt_result.pop(index)
y0 = (list_1[0][1] + list_1[0][7]) / 2
w = abs(list_1[0][7] - list_1[0][1])
for index_t in sorted(txt_result.keys()):
item = txt_result[index_t]
y = (item[0][1] + item[0][7]) / 2
if abs(y - y0) < w/2:
final_res[index].append(item)
txt_result.pop(index_t)
print(final_res)
return final_res
def sorted_by_y(array):
y_list = []
for i in range(len(array)):
y_list.append(int(array[i][0][0]))
index_list = np.argsort(y_list)
return index_list
if __name__ == '__main__':
# load net
#res = open('res.txt','w',encoding='utf8')
net = CRAFT() # initialize
print('Loading weights from checkpoint (' + args.trained_model + ')')
if args.cuda:
net.load_state_dict(copyStateDict(torch.load(args.trained_model)))
else:
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
if args.cuda:
net = net.cuda()
input_batch = 1
input_channel = 3
input_h = 448
input_w = 448
output_batch = input_batch
output_h = input_h / 2
output_w = input_w / 2
inputc = torch.randn(input_batch, input_channel, \
input_h, input_w, device='cuda')
outputc = net(inputc.cuda())
dynamic_axes = {'inputc': {0: 'input_batch', 1: 'input_channel', 2: "input_h", 3: 'input_w'},'outputc': {0: 'output_batch', 1: "output_h", 2: 'output_w'}}
output_names = ["output1","output2"]
input_names = ["input"]
torch.onnx.export(
net,
inputc,
'craft.onnx',
verbose=True,
input_names=input_names,
output_names=output_names,
#dynamic_axes=dynamic_axes,
)
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
| UTF-8 | Python | false | false | 7,024 | py | 3 | export_onnx.py | 2 | 0.606065 | 0.589977 | 0 | 217 | 31.313364 | 162 |
janlindblad/eantc20 | 14,551,349,231,197 | 420e1d7ebf1fd18061790b4b452ea17611cf0e4f | c6ad3f65da16e714619bb62b27565efc00364c1c | /work-packages/f5500/python/f5500/CiscoIOSXRPerfMeasCfg_ns.py | 8eff8f4d2a84ea17c1df2b14827d5ad99da52e97 | []
| no_license | https://github.com/janlindblad/eantc20 | c0c74a1e6476ca5e5a2906f18472259d8a515b21 | e9051c77de16578c97dcf1f7ecb0a5ef30ad2e68 | refs/heads/master | 2020-12-21T19:09:36.076797 | 2020-03-17T18:17:04 | 2020-03-17T18:17:04 | 236,523,910 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE
# This file has been auto-generated by the confdc compiler.
# Source: ../load-dir/Cisco-IOS-XR-perf-meas-cfg.fxs
# BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE
#
# Autogenerated namespace class for YANG module Cisco-IOS-XR-perf-meas-cfg.yang
class ns(object):
hash = 1478311931
id = "_f5500-nc-1.0:f5500-nc-1.0#http://cisco.com/ns/yang/Cisco-IOS-XR-perf-meas-cfg"
uri = "_f5500-nc-1.0:f5500-nc-1.0#http://cisco.com/ns/yang/Cisco-IOS-XR-perf-meas-cfg"
prefix = "Cisco-IOS-XR-perf-meas-cfg"
Cisco_IOS_XR_perf_meas_cfg_interface = 221572658
Cisco_IOS_XR_perf_meas_cfg_interface_ = "interface"
Cisco_IOS_XR_perf_meas_cfg_delay_measurement = 65222513
Cisco_IOS_XR_perf_meas_cfg_delay_measurement_ = "delay-measurement"
Cisco_IOS_XR_perf_meas_cfg_enable = 448570367
Cisco_IOS_XR_perf_meas_cfg_enable_ = "enable"
Cisco_IOS_XR_perf_meas_cfg_interface_name = 1322899300
Cisco_IOS_XR_perf_meas_cfg_interface_name_ = "interface-name"
Cisco_IOS_XR_perf_meas_cfg_delay_profile_interface = 62000313
Cisco_IOS_XR_perf_meas_cfg_delay_profile_interface_ = "delay-profile-interface"
Cisco_IOS_XR_perf_meas_cfg_accelerated = 2081546535
Cisco_IOS_XR_perf_meas_cfg_accelerated_ = "accelerated"
Cisco_IOS_XR_perf_meas_cfg_count = 180894398
Cisco_IOS_XR_perf_meas_cfg_count_ = "count"
Cisco_IOS_XR_perf_meas_cfg_interval = 559952988
Cisco_IOS_XR_perf_meas_cfg_interval_ = "interval"
Cisco_IOS_XR_perf_meas_cfg_enable_performance_measurement = 606861816
Cisco_IOS_XR_perf_meas_cfg_enable_performance_measurement_ = "enable-performance-measurement"
Cisco_IOS_XR_perf_meas_cfg_periodic = 538465825
Cisco_IOS_XR_perf_meas_cfg_periodic_ = "periodic"
Cisco_IOS_XR_perf_meas_cfg_enable_interface = 1988108391
Cisco_IOS_XR_perf_meas_cfg_enable_interface_ = "enable-interface"
Cisco_IOS_XR_perf_meas_cfg_threshold = 1161930343
Cisco_IOS_XR_perf_meas_cfg_threshold_ = "threshold"
Cisco_IOS_XR_perf_meas_cfg_burst = 1259488663
Cisco_IOS_XR_perf_meas_cfg_burst_ = "burst"
Cisco_IOS_XR_perf_meas_cfg_enable_delay_measurement = 1777392156
Cisco_IOS_XR_perf_meas_cfg_enable_delay_measurement_ = "enable-delay-measurement"
Cisco_IOS_XR_perf_meas_cfg_advertise_delay = 654832865
Cisco_IOS_XR_perf_meas_cfg_advertise_delay_ = "advertise-delay"
Cisco_IOS_XR_perf_meas_cfg_performance_measurement = 1340772040
Cisco_IOS_XR_perf_meas_cfg_performance_measurement_ = "performance-measurement"
Cisco_IOS_XR_perf_meas_cfg_disable = 330763859
Cisco_IOS_XR_perf_meas_cfg_disable_ = "disable"
Cisco_IOS_XR_perf_meas_cfg_one_way_measurement = 519889523
Cisco_IOS_XR_perf_meas_cfg_one_way_measurement_ = "one-way-measurement"
Cisco_IOS_XR_perf_meas_cfg_interfaces = 321503962
Cisco_IOS_XR_perf_meas_cfg_interfaces_ = "interfaces"
Cisco_IOS_XR_perf_meas_cfg_probe = 1118433712
Cisco_IOS_XR_perf_meas_cfg_probe_ = "probe"
Cisco_IOS_XR_perf_meas_cfg_advertisement = 2100959827
Cisco_IOS_XR_perf_meas_cfg_advertisement_ = "advertisement"
Cisco_IOS_XR_perf_meas_cfg_minimum_change = 1794400902
Cisco_IOS_XR_perf_meas_cfg_minimum_change_ = "minimum-change"
| UTF-8 | Python | false | false | 3,282 | py | 2,201 | CiscoIOSXRPerfMeasCfg_ns.py | 1,006 | 0.728519 | 0.655393 | 0 | 63 | 51.095238 | 97 |
alwhite1/tech_smart | 13,666,585,977,589 | 7e32ee7e7028000334e1f0b5bf87a2013a9a46f4 | b5a30e688f83dad8eda90f47ed5cc9f4374f9de5 | /to/admin.py | bbe41c67e3b268d20d4bf3163f8c5220113e0e96 | []
| no_license | https://github.com/alwhite1/tech_smart | 915b1bfa02bbc43a394505c966180ac71e3d0762 | 6c0378d7185511f5427c33b940c46323d6f310b6 | refs/heads/master | 2021-01-15T18:14:16.846325 | 2015-11-17T10:06:33 | 2015-11-17T10:06:33 | 32,804,106 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from to.models import LOU
from to.models import Staffer
admin.site.register(LOU)
admin.site.register(Staffer)
| UTF-8 | Python | false | false | 145 | py | 47 | admin.py | 22 | 0.813793 | 0.813793 | 0 | 7 | 19.714286 | 32 |
bahamat/sdcadmin | 10,213,432,258,742 | 01646b845b48a39f945561baf63d397d3b47aff7 | e48a2cadaee548a7be5449f16ad54cdb7bffa583 | /sdcadmin/datacenter.py | 9c735341c73bdf35f63c97a65487f2925aabcad5 | [
"Apache-2.0"
]
| permissive | https://github.com/bahamat/sdcadmin | ad7502e1e8591b67599d556a698b7f009b0f7bf4 | 0a51cdbde70a3694918cc0a3c20340bc9fff54e4 | refs/heads/master | 2020-03-28T07:56:42.858083 | 2015-01-21T10:05:04 | 2015-01-21T10:05:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = 'ernm'
import json
import requests
from .machine import SmartMachine, KVMMachine, Machine
from .job import Job
from .package import Package
class DataCenter(object):
APIS = ['sapi', 'vmapi', 'fwapi', 'imgapi', 'napi', 'papi', 'workflow']
default_headers = {'Content-Type': 'application/json'}
STATE_RUNNING = Machine.STATE_RUNNING
STATE_FAILED = Machine.STATE_FAILED
STATE_DESTROYED = Machine.STATE_DESTROYED
STATE_PROVISIONING = Machine.STATE_PROVISIONING
STATE_STOPPED = Machine.STATE_STOPPED
def request(self, method, api, path, headers=None, data=None, **kwargs):
full_path = getattr(self, api) + path
request_headers = {}
request_headers.update(self.default_headers)
if headers:
request_headers.update(headers)
jdata = None
if data:
jdata = json.dumps(data)
resp = requests.request(method, full_path, headers=request_headers, data=jdata, **kwargs)
if resp.content:
if resp.headers['content-type'] == 'application/json':
return (json.loads(resp.content), resp)
else:
return (resp.content, resp)
else:
return (None, resp)
def get_ip_for_service(self, service):
resp, _ = self.request('GET', 'sapi', '/services', params={'name': service})
if not resp:
raise EnvironmentError('Could not receive service information for service %s' % service)
service = resp.pop()
resp, _ = self.request('GET', 'sapi', '/instances', params={'service_uuid': service.get('uuid')})
if not resp:
raise EnvironmentError('Could not retrieve instance information for service uuid %s' % service.get('uuid'))
instance = resp.pop()
return instance.get('metadata').get('ADMIN_IP')
def __init__(self, sapi, vmapi=None, fwapi=None, imgapi=None, napi=None, papi=None, workflow=None):
self.sapi = 'http://' + sapi
self.vmapi = 'http://' + (vmapi or self.get_ip_for_service('vmapi'))
self.fwapi = 'http://' + (fwapi or self.get_ip_for_service('fwapi'))
self.imgapi = 'http://' + (imgapi or self.get_ip_for_service('imgapi'))
self.napi = 'http://' + (napi or self.get_ip_for_service('napi'))
self.papi = 'http://' + (papi or self.get_ip_for_service('papi'))
self.workflow = 'http://' + (workflow or self.get_ip_for_service('workflow'))
def healthcheck_vmapi(self):
health_data, _ = self.request('GET', 'vmapi', '/ping')
return health_data.get('status') == 'OK'
# FIXME: Code smell, duplication
def list_smart_machines(self, owner_uuid=None, state=None):
params = {'brand': 'joyent'}
if owner_uuid:
params.update({'owner_uuid': owner_uuid})
if state:
params.update({'state': state})
raw_vms = self.__list_machines(params)
return [SmartMachine(datacenter=self, data=raw_vm) for raw_vm in raw_vms]
def list_machines(self, owner_uuid=None, state=None):
smart_machines = self.list_smart_machines(owner_uuid, state)
kvm_machines = self.list_kvm_machines(owner_uuid, state)
return smart_machines + kvm_machines
def list_kvm_machines(self, owner_uuid=None, state=None):
params = {'brand': 'kvm'}
if owner_uuid:
params.update({'owner_uuid': owner_uuid})
if state:
params.update({'state': state})
raw_vms = self.__list_machines(params)
return [KVMMachine(datacenter=self, data=raw_vm) for raw_vm in raw_vms]
def __list_machines(self, params):
vms, _ = self.request('GET', 'vmapi', '/vms', params=params)
return vms
def create_smart_machine(self, owner, networks, package, image, alias=None, user_script=""):
params = {'brand': 'joyent', 'owner_uuid': owner, 'networks': networks, 'billing_id': package,
'image_uuid': image}
if alias:
params.update({'alias': alias})
metadata = {}
if user_script:
metadata.update({'user-script': user_script})
raw_job_data = self.__create_machine(params)
# TODO: error handling
if not raw_job_data.get('vm_uuid'):
raise Exception('Could not create SmartMachine')
vm_uuid = raw_job_data.get('vm_uuid')
vm = self.get_smart_machine(vm_uuid)
vm.creation_job_uuid = raw_job_data.get('job_uuid')
return vm
def create_kvm_machine(self, owner, networks, package, image, alias=None, user_script=""):
package_obj = Package(datacenter=self, uuid=package)
params = {'brand': 'kvm',
'owner_uuid': owner,
'networks': networks,
'billing_id': package,
'disks': [{'image_uuid': image},
{'size': package_obj.quota}]}
if alias:
params.update({'alias': alias})
metadata = {}
if user_script:
metadata.update({'user-script': user_script})
raw_job_data = self.__create_machine(params)
if not raw_job_data.get('vm_uuid'):
raise Exception('Could not create KVM VM')
vm_uuid = raw_job_data.get('vm_uuid')
vm = self.get_kvm_machine(vm_uuid)
vm.creation_job_uuid = raw_job_data.get('job_uuid')
return vm
def __create_machine(self, params):
raw_job_data, _ = self.request('POST', 'vmapi', '/vms', data=params)
return raw_job_data
def get_smart_machine(self, uuid):
return SmartMachine(self, self.get_machine_raw(uuid))
def get_kvm_machine(self, uuid):
return KVMMachine(self, self.get_machine_raw(uuid))
def get_machine_raw(self, uuid):
raw_vm_data, _ = self.request('GET', 'vmapi', '/vms/' + uuid)
return raw_vm_data
def get_machine(self, uuid):
raw_vm_data = self.get_machine_raw(uuid)
if raw_vm_data.get('brand') == 'joyent':
return SmartMachine(self, raw_vm_data)
if raw_vm_data.get('brand') == 'kvm':
return KVMMachine(self, raw_vm_data)
return None
def list_jobs(self):
raw_job_data, _ = self.request('GET', 'workflow', '/jobs')
return [Job(datacenter=self, data=job) for job in raw_job_data]
def get_job(self, uuid):
return Job(self, self.get_job_raw(uuid))
def get_job_raw(self, uuid):
raw_job_data, _ = self.request('GET', 'workflow', '/jobs/' + uuid)
return raw_job_data
def list_packages(self):
raw_package_data, _ = self.request('GET', 'papi', '/packages')
return [Package(datacenter=self, data=package) for package in raw_package_data]
def get_package(self, uuid):
return Package(self, self.get_package_raw(uuid))
def get_package_raw(self, uuid):
raw_package_data, _ = self.request('GET', 'papi', '/packages/' + uuid)
return raw_package_data | UTF-8 | Python | false | false | 7,678 | py | 7 | datacenter.py | 7 | 0.605105 | 0.604064 | 0 | 195 | 38.379487 | 119 |
Simranjeet96/MovRec | 4,071,629,005,754 | 4ecd9e0ca724236675bb1545992e410dfaf22e0f | b65cb15f70b8c3a4108a75cfaa0a7675ece11830 | /src/web/views.py | 132b623baea160485ffa56390fc33cfb0a743ceb | []
| no_license | https://github.com/Simranjeet96/MovRec | 1c26f9c91dd99ce9f43f9af01300709c70bb64a1 | e731b3a430eaa9409ab22ad98ba222faa7e862e6 | refs/heads/master | 2020-05-15T06:03:53.325938 | 2019-04-19T14:18:00 | 2019-04-19T14:18:00 | 182,112,844 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.shortcuts import render,get_object_or_404,redirect
from django.db.models import Q
from django.http import Http404
from .models import Movie,Myrating
from django.contrib import messages
from .forms import UserForm
from .MovieRecmEng import Myrecommend
def recommend(request):
if not request.user.is_authenticated:
return redirect("login")
if not request.user.is_active:
raise Http404
current_user_id= request.user.id
user_id_similar_to_current_user_id=Myrecommend(current_user_id)
print('*'*20,user_id_similar_to_current_user_id)
movie_list=list(Movie.objects.filter(myrating__user_id=user_id_similar_to_current_user_id,myrating__rating__gte=4))
# print(Movie.objects.filter(myrating__user_id=current_user_id,myrating__rating__gte=4).values())
return render(request,'web/recommend.html',{'movie_list':movie_list})
# List view
def index(request):
movies = Movie.objects.all()
query = request.GET.get('q')
if query:
movies = Movie.objects.filter(Q(title__icontains=query)).distinct()
return render(request,'web/list.html',{'movies':movies})
return render(request,'web/list.html',{'movies':movies})
# detail view
def detail(request,movie_id):
if not request.user.is_authenticated:
return redirect("login")
if not request.user.is_active:
raise Http404
movies = get_object_or_404(Movie,id=movie_id)
#for rating
if request.method == "POST":
rate = request.POST['rating']
ratingObject = Myrating()
ratingObject.user = request.user
ratingObject.movie = movies
ratingObject.rating = rate
ratingObject.save()
messages.success(request,"Your Rating is submited ")
return redirect("index")
return render(request,'web/detail.html',{'movies':movies})
# Register user
def signUp(request):
form =UserForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request,user)
return redirect("index")
context ={
'form':form
}
return render(request,'web/signUp.html',context)
# Login User
def Login(request):
if request.method=="POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request,user)
return redirect("index")
else:
return render(request,'web/login.html',{'error_message':'Your account disable'})
else:
return render(request,'web/login.html',{'error_message': 'Invalid Login'})
return render(request,'web/login.html')
#Logout user
def Logout(request):
logout(request)
return redirect("login")
| UTF-8 | Python | false | false | 2,905 | py | 5 | views.py | 2 | 0.732874 | 0.726334 | 0 | 94 | 29.861702 | 116 |
sasha0/episodes | 3,307,124,832,523 | a2bf78fb19e8b2db572183a21e5fef111a43f0be | 9b50ec9706ae68b12daa56c549dd8efe8e83dc05 | /app/api_views.py | 0018f8ed3afa1458314e1fa812bdde850ccf45e1 | []
| no_license | https://github.com/sasha0/episodes | a60c84608224d85ef9491ef165bcb4ea6fe98a32 | 43667ed4f26bb6bb0bd3e5a60b2c235868b9df42 | refs/heads/master | 2021-01-25T12:14:19.592495 | 2015-03-28T21:51:29 | 2015-03-28T21:51:29 | 30,326,633 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""REST API controllers, powered by flask-restful."""
from flask.ext.restful import Api, Resource, fields, marshal_with, marshal
from flask.ext.restful import reqparse
from flask.ext.security import current_user
from app import api, es, QueryStringQuery, MultiMatchQuery, TermQuery
from app.resource_fields import *
from models import db, Episode, TVChannel, TVSeries, TVSeriesFeed
parser = reqparse.RequestParser()
class TVSeriesList(Resource):
"""Paginated list of available TV shows, including indication if current user subscribed to given TV shows."""
def get(self, page_id=1):
pagination = TVSeries.query.paginate(page_id)
user_id = getattr(current_user, 'id', None)
data = dict(marshal(pagination.items, tvseries_resource_fields, envelope='items'))
data['pagination_items'] = list(pagination.iter_pages())
data['user_id'] = user_id
if user_id:
feed = TVSeriesFeed.query.filter(TVSeriesFeed.user_id == user_id)
for tvseries in data['items']:
if tvseries['id'] in [f.tvseries_id for f in feed]:
tvseries['is_subscribed'] = True
else:
tvseries['is_subscribed'] = False
return data
class TVSeriesDetail(Resource):
"""Page with detailed information about single TV show."""
def get(self, tvseries_id):
tvseries = TVSeries.query.get(tvseries_id)
user_id = getattr(current_user, 'id', None)
data = dict(marshal(tvseries, tvseries_resource_fields))
data['roles'] = marshal(tvseries.roles.all(), role_resource_fields)
data['episodes'] = marshal(tvseries.episodes, episode_resource_fields)
data['user_id'] = user_id
if user_id:
feed = TVSeriesFeed.query.filter(TVSeriesFeed.user_id == user_id)
if data['id'] in [f.tvseries_id for f in feed]:
data['is_subscribed'] = True
else:
data['is_subscribed'] = False
return data
class TVChannelsList(Resource):
"""List of available TV channels."""
@marshal_with(tvchannel_resource_fields)
def get(self):
return TVChannel.query.all()
class TVSeriesForChannelList(Resource):
"""List of TV series, produced by given TV channel."""
def get(self, tvchannel_id):
tvchannel = TVChannel.query.get(tvchannel_id)
tvseries_for_channel_resource_fields = tvchannel_resource_fields
tvseries_for_channel_resource_fields.pop('popular_tvseries', None)
tvseries = list(tvchannel.tvseries.all())
data = dict(marshal(tvchannel, tvseries_for_channel_resource_fields))
data['tvseries'] = marshal(tvseries, tvseries_resource_fields)
return data
class UpcomingEpisodesList(Resource):
"""List of episodes, to be aired in the nearest future."""
def get(self, page_id=1):
pagination = Episode.query.filter(Episode.showed_at >= datetime.date.today())\
.order_by(db.desc(Episode.showed_at))\
.paginate(page_id)
data = dict(marshal(pagination.items, upcoming_episode_resource_fields, envelope='items'))
data['pagination_items'] = list(pagination.iter_pages())
return data
class EpisodesList(Resource):
"""List of all available episodes for the given TV show."""
@marshal_with(episode_resource_fields)
def get(self, tvseries_id):
return list(Episode.query.filter(Episode.tvseries_id == tvseries_id)\
.order_by(Episode.season_number, Episode.episode_number, ))
class TVSeriesSearch(Resource):
"""Full-text search of TV series, powered by Elasticsearch."""
@marshal_with(tvseries_resource_fields)
def get(self):
parser.add_argument('q', type=str)
params = parser.parse_args()
q = MultiMatchQuery(['title', 'description'], params['q'])
r = es.search(query=q)
return list(r)
class Subscriptions(Resource):
"""List of TV shows, user subscribed to. Ability to subscribe to given TV show to get updates."""
def post(self):
success = False
parser.add_argument('tvseries_id', type=int)
params = parser.parse_args()
if TVSeriesFeed.query.filter(TVSeriesFeed.user_id == current_user.id,
TVSeriesFeed.tvseries_id == params['tvseries_id']).count() == 0:
feed = TVSeriesFeed(user_id=current_user.id, tvseries_id=params['tvseries_id'])
db.session.add(feed)
db.session.commit()
success = True
return {'success': success}
def get(self, page_id=1):
user_id = getattr(current_user, 'id', None)
if user_id:
feed = TVSeriesFeed.query.filter(TVSeriesFeed.user_id == user_id)
tvseries_ids = [f.tvseries_id for f in feed]
pagination = Episode.query.filter(Episode.showed_at >= datetime.date.today(),
Episode.tvseries_id.in_(tvseries_ids))\
.order_by(db.desc(Episode.showed_at))\
.paginate(page_id)
data = dict(marshal(pagination.items, upcoming_episode_resource_fields, envelope='items'))
data['pagination_items'] = list(pagination.iter_pages())
return data
return {}
api.add_resource(TVSeriesList, '/series', '/series/<int:page_id>')
api.add_resource(TVSeriesDetail, '/series/i/<int:tvseries_id>')
api.add_resource(TVChannelsList, '/channels/')
api.add_resource(TVSeriesForChannelList, '/channels/<int:tvchannel_id>/tvseries')
api.add_resource(EpisodesList, '/series/i/<int:tvseries_id>/episodes')
api.add_resource(UpcomingEpisodesList, '/episodes/upcoming/', '/episodes/upcoming/<int:page_id>')
api.add_resource(TVSeriesSearch, '/series/search')
api.add_resource(Subscriptions, '/subscriptions', '/subscriptions/<int:page_id>') | UTF-8 | Python | false | false | 6,017 | py | 17 | api_views.py | 10 | 0.632874 | 0.632043 | 0 | 144 | 40.791667 | 114 |
korylprince/BeagleCommand | 2,250,562,876,144 | 7b18c964d86f40c561a87a877a5873bb46f4bf87 | 124423867d4278e7bef4463c6a7e05d9fa52346d | /client/__init__.py | 743e8e8af953bda16190be1babcc68aac83a34c8 | []
| no_license | https://github.com/korylprince/BeagleCommand | b39ae000a38cf1dd204388eaa2b987025f2199dc | 0b130684cba41b12c8ac3ca141520f081ae12185 | refs/heads/master | 2016-09-05T16:36:47.517357 | 2013-07-20T13:41:26 | 2013-07-20T13:41:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import signal, os
from Queue import Empty
from multiprocessing import Process, Queue
from BeagleCommand import QuitinTime
# create message passing queues
SerialIn = Queue()
StorageIn = Queue()
MessageBox = Queue()
QueueOwners = {'serial':SerialIn,'storage':StorageIn}
# wait until objects are defined to initialize workers
from storage import Storage
from serial import Serial
def run():
"""Run main server loop"""
# create signal handler
def signal_handler(sig, frame):
print '\nCaught signal {0}... Quitin\' Time!'.format(str(sig))
flaskProcess.terminate()
flaskProcess.join()
QuitinTime.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# create and start worker threads
StorageThread = Storage(StorageIn,MessageBox)
SerialThread = Serial(SerialIn,MessageBox)
StorageThread.start()
SerialThread.start()
# start web server
def f(MessageBox):
from web import app
app.MessageBox = MessageBox
try:
app.run(debug=True, use_reloader=False)
except AttributeError:
pass
flaskProcess = Process(target=f, args=[MessageBox])
flaskProcess.start()
# pass messages until program is ended
while True:
# check to see if it's time to quit
if QuitinTime.is_set():
break
try:
# use timeout so that main thread can catch signals
msg = MessageBox.get(timeout=0.5)
for owner in msg.to:
QueueOwners[owner].put(msg.msg)
except Empty:
pass
except IOError:
pass
| UTF-8 | Python | false | false | 1,677 | py | 15 | __init__.py | 10 | 0.645796 | 0.644007 | 0 | 61 | 26.491803 | 70 |
andy5macht/service.odroidn2.oled | 9,543,417,353,603 | 01a71c1bd54a350518bcad65f2f138cd187bbd58 | d35bf0fba626a4ab8e5bb71f689d9a5f863deb5d | /lib/settings.py | 49836367f0fecb0adc9af271b7bd956bf9696758 | [
"MIT"
]
| permissive | https://github.com/andy5macht/service.odroidn2.oled | a0cd3d0d1a25ae07bd30207d028d88d66dca1340 | 60b91680da8d8918e7dea466cf5e91d9b80e9ca4 | refs/heads/master | 2023-06-09T14:46:49.738050 | 2020-08-22T16:10:07 | 2020-08-22T16:10:07 | 348,997,189 | 0 | 0 | MIT | true | 2021-03-18T08:33:02 | 2021-03-18T08:33:01 | 2021-03-05T05:02:45 | 2021-02-25T17:35:16 | 200 | 0 | 0 | 0 | null | false | false | import xbmcaddon
from lib.logging import *
addon = xbmcaddon.Addon(id="service.odroidn2.oled")
def getSetting(id):
return addon.getSetting(id).lower()
def getBool(id):
value = getSetting(id).lower()
if (value == "true"):
return True
else:
return False
def getInt(id):
return int(getSetting(id))
def getHex(id):
return int(getSetting(id), 16)
class OledSettings:
def __init__(self):
self.readSettings()
def readSettings(self):
self._settingI2CAddress = getHex("i2c.address")
self._settingdisplayType = getSetting("display.type")
self._settingShortFormat = getBool("display.shortformat")
self._settingBrightness = getInt("display.brightness")
self._settingPlaybackBrightness = getInt("display.playbackbrightness")
self._settingFont = getSetting("display.font")
self._settingFlipDisplay = getBool("display.flip")
self._settingClockOnlyMode = getBool("display.clockonlymode")
self._settingTimeMode = getSetting("display.timemode")
self._settingHideIcons = getBool("display.hideicons")
self._settingHideSDRIcon = getBool("display.hidesdricon")
self._settingIconType = getSetting("display.icontype")
def i2cAddress(self):
return self._settingI2CAddress
def displayType(self):
return self._settingdisplayType
def shortFormat(self):
return self._settingShortFormat
def brightness(self):
return self._settingBrightness
def playbackBrightness(self):
return self._settingPlaybackBrightness
def font(self):
return self._settingFont
def flipDisplay(self):
return self._settingFlipDisplay
def clockOnlyMode(self):
return self._settingClockOnlyMode
def displayTimeElapsed(self):
if self._settingTimeMode == 'elapsed':
return True
else:
return False
def hideIcons(self):
return self._settingHideIcons
def hideSRDIcon(self):
return self._settingHideSDRIcon
def iconType(self):
if self._settingIconType == "solid":
return True
else:
return False
| UTF-8 | Python | false | false | 2,297 | py | 11 | settings.py | 7 | 0.632564 | 0.629517 | 0 | 85 | 25.023529 | 78 |
davemcg/Rosalind | 13,572,096,656,801 | 062fb8d1ca721c0baecfa42f0896bd42c510a7b8 | 05a980609170614e4aeb10d73acc1bb29ec0580a | /PROT.py | 649724fc77a020fa9f10aef5cb5727d2e89ba278 | []
| no_license | https://github.com/davemcg/Rosalind | 19536ec78594b8755c63c25e449aadb53c09102b | d4f2bad0835c410e6030ca59288f2686b97f8ef0 | refs/heads/master | 2021-01-19T07:18:52.387106 | 2015-03-13T19:50:44 | 2015-03-13T19:50:44 | 31,798,561 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
"""
Given: An RNA string s corresponding to a strand of mRNA (of length at most 10 kbp).
Return: The protein string encoded by s.
"""
from Bio.Seq import Seq
from Bio.Alphabet import generic_rna
import fileinput
for line in fileinput.input():
seq = Seq(line[:-1], generic_rna)
print(seq.translate())
| UTF-8 | Python | false | false | 333 | py | 12 | PROT.py | 11 | 0.714715 | 0.702703 | 0 | 19 | 16.526316 | 84 |
toszter/afecomputing | 9,921,374,484,196 | 6229b3fa099d7f367a2a8424fd0a1fa365cf481f | 5e104bdd7b9178ebba44a1c613a5a6a4f5a320e1 | /connect4/connect4.py | 35e9e9a5d77b2de7481f0f99a21f4f6d4f4b7e2f | []
| no_license | https://github.com/toszter/afecomputing | c716ef461b5be033bb8278d4a097bd1a3c6ca8c4 | 2a96546af62ee9756e8ec2f86d389bdb08d1511a | refs/heads/master | 2021-01-20T13:48:37.594743 | 2013-03-22T18:56:49 | 2013-03-22T18:56:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Connect 4
# AFE Python Class Spring 2013
#
# Use Lists to represent positions of X and O's on a 7x6 Board
# [[],[],[],[]]
#
# How do we check for a winning condition (4 in a row up/down/diag)
# [[],['x','x'],['o','x'],['x','o','x'],['o','x','o','x'],[],[]]
# 0 1 2 3
# Columns are named 1 - 7.
# To take a turn, type the column number you want to add to. If the column is
# full, you get a message to try again.
class Board(object):
def __init__(self, columns=7, rows=6):
self.columns = [[] for x in range(columns)]
self.rows = rows
def is_full(self):
'''Check if all the columns are full.'''
return all(len(x) == self.rows for x in self.columns)
def check_winner(self, col):
'''Checks the board for a 4 in a row connection.'''
winner = None
# Check for 4 in a row
win_count = 0
row = 0
col = self.columns[col]
# Condition 1 - Vertical
if len(col) >= 4:
try:
while win_count < 4:
if col[row+1] == col[row]:
win_count += 1
winner = col[row]
else:
win_count = 1
winner = None
row += 1
except IndexError:
winner = None
win_count = 0
# Condition 2 - Horizontal
return winner
# Condition 2 - Horizontal
# Condition 3 - Z Diagonal
# Condition 4 - S Diagonal
class Connect4(object):
def __init__(self, columns=None, rows=None):
self.board = Board()
self.players = ["X","O"]
self.current_player = 0
def take_turn(self):
'''Take a turn by slotting an "x" or "o"'''
current_player = self.players[self.current_player]
print "Player '{0}', your turn...".format(current_player)
col = raw_input("Which column deserves an '{0}'?? ".format(current_player))
self.current_col = int(col)-1
try:
target_col = self.board.columns[self.current_col]
except ValueError:
print "Oops. Looks like that's not a column number. Try again."
except IndexError:
print "Whoa. That's out of my range. Try again."
else:
# Check if the column is full
if len(target_col) == self.board.rows: # rows is int
print "Dude. That column is full, brah. Try again."
else:
# Add player's chip to the board
target_col.append(current_player)
# Switch to the next player
if self.current_player == 1:
self.current_player = 0
else:
self.current_player = 1
def print_board(self):
'''prints the board so you can decide your next move.'''
# 1 2 3 4 5 6 7
# +===+===+===+===+===+===+===+
# | | | | | | | |5
# | O | O | | | | | |4
# | O | O | | | | | |3
# | O | O | | | | | |2
# | X | X | | | | | |1
# | X | O | | | | | |0
# +===+===+===+===+===+===+===+
cols = len(self.board.columns)
rows = self.board.rows
print " " + " ".join(str(x+1).center(3) for x in range(cols))
border = "+===" * cols + "+"
print border
for r in reversed(range(rows)):
# 5, 4, 3, 2, 1, 0
row = []
# use r as a reverse column index
for c in range(cols):
try:
value = self.board.columns[c][r].center(3)
except IndexError:
value = "".center(3)
row.append(value)
print "|" + "|".join(row) + "|"
print border
print
def check_winner(self):
'''Checks for a winner.'''
# Check for a winner
winner = self.board.check_winner(self.current_col)
# Check for a draw
if winner is None and self.board.is_full():
winner = "draw"
return winner
def run(self):
'''Runs the game until exit or win!'''
winner = None
self.print_board()
while winner is None:
self.take_turn()
self.print_board()
winner = self.check_winner()
# Break out in two cases
# Win or Draw
if winner == "draw":
print "Yarrr, the cat took the game."
else:
print "{0} wins!".format(winner)
if __name__ == "__main__":
game = Connect4()
game.run()
| UTF-8 | Python | false | false | 4,743 | py | 2 | connect4.py | 1 | 0.458992 | 0.445288 | 0 | 147 | 31.258503 | 84 |
NickSwainston/misc_scripts | 15,032,385,577,814 | 60e04fbd5156948bd415fd905767f9f348cfc448 | af0c01986f6228c9b0c8da53527d757be84e8de5 | /spectra_paper/fit_pulsar_spectral_models_and_doc.py | 957e6c5d9fa12ea2aa3ae9e96baf90c1c43d6591 | []
| no_license | https://github.com/NickSwainston/misc_scripts | 9f304e304a42e624f77cd9bc1163263e537af944 | 5d61fa2e869e06d09326360484734c4fd24275eb | refs/heads/master | 2023-05-27T20:51:49.301058 | 2023-05-16T02:03:55 | 2023-05-16T02:03:55 | 187,352,432 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import os
import matplotlib.pyplot as plt
import psrqpy
import numpy as np
import shutil
from PIL import Image
import glob
import torch.multiprocessing as mp
from functools import partial
from tqdm import tqdm
import yaml
from pulsar_spectra.spectral_fit import find_best_spectral_fit, estimate_flux_density
from pulsar_spectra.catalogue import collect_catalogue_fluxes, CAT_DIR
from pulsar_spectra.models import model_settings
df = pd.read_csv("{}/../survey_paper/SMART_pulsars.csv".format(os.path.dirname(os.path.realpath(__file__))))
pulsar_obsid_df = pd.read_csv("{}/pulsar_best_obs.csv".format(os.path.dirname(os.path.realpath(__file__))))
#print(df['Pulsar'].tolist())
#pulsars = list(dict.fromkeys(df['Pulsar'].tolist()))
with open(f"{CAT_DIR}/Bhat_2022.yaml", "r") as stream:
cat_dict = yaml.safe_load(stream)
pulsars = cat_dict.keys()
print(len(pulsars))
cat_list = collect_catalogue_fluxes(exclude=["Bhat_2022"])
query = psrqpy.QueryATNF().pandas
results_record = []
#for output csv
output_df = pd.DataFrame(
columns=[
"Pulsar",
"ObservationID",
"ATNF Period (s)",
"ATNF DM",
"ATNF B_surf (G)",
"ATNF E_dot (ergs/s)",
"Offset (degrees)",
"Flux Density (mJy)",
"Flux Density Uncertainty (mJy)",
"Flux Density Scintilation Uncertainty (mJy)",
"Estimated Flux Density (mJy)",
"Estimated Flux Density Uncertainty (mJy)",
"Model",
"Model before MWA",
"Probability Best",
"Min freq before MWA (MHz)",
"Max freq before MWA (MHz)",
"N data flux",
"pl_a" ,
"pl_u_a" ,
"pl_c" ,
"pl_u_c" ,
"bpl_vb" ,
"bpl_u_vb" ,
"bpl_a1" ,
"bpl_u_a1" ,
"bpl_a2" ,
"bpl_u_a2" ,
"bpl_c" ,
"bpl_u_c" ,
"hfco_vc" ,
"hfco_u_vc" ,
"hfco_c" ,
"hfco_u_c" ,
"lfto_vpeak" ,
"lfto_u_vpeak" ,
"lfto_a" ,
"lfto_u_a" ,
"lfto_c" ,
"lfto_u_c" ,
"lfto_beta" ,
"lfto_u_beta" ,
"dtos_vpeak" ,
"dtos_u_vpeak" ,
"dtos_vc" ,
"dtos_u_vc" ,
"dtos_a" ,
"dtos_u_a" ,
"dtos_c" ,
"dtos_u_c" ,
"dtos_beta" ,
"dtos_u_beta" ,
"pre_pl_a" ,
"pre_pl_u_a" ,
"pre_pl_c" ,
"pre_pl_u_c" ,
"pre_bpl_vb" ,
"pre_bpl_u_vb" ,
"pre_bpl_a1" ,
"pre_bpl_u_a1" ,
"pre_bpl_a2" ,
"pre_bpl_u_a2" ,
"pre_bpl_c" ,
"pre_bpl_u_c" ,
"pre_hfco_vc" ,
"pre_hfco_u_vc" ,
"pre_hfco_c" ,
"pre_hfco_u_c" ,
"pre_lfto_vpeak" ,
"pre_lfto_u_vpeak" ,
"pre_lfto_a" ,
"pre_lfto_u_a" ,
"pre_lfto_c" ,
"pre_lfto_u_c" ,
"pre_lfto_beta" ,
"pre_lfto_u_beta" ,
"pre_dtos_vpeak" ,
"pre_dtos_u_vpeak" ,
"pre_dtos_vc" ,
"pre_dtos_u_vc" ,
"pre_dtos_a" ,
"pre_dtos_u_a" ,
"pre_dtos_c" ,
"pre_dtos_u_c" ,
"pre_dtos_beta" ,
"pre_dtos_u_beta" ,
]
)
model_dict = model_settings()
def fit_and_plot(pulsar):
#for pulsar in pulsars:
scale_figure = 0.9
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5.5*scale_figure,4*scale_figure))
# if pulsar != "J1136+1551":
# continue
# if os.path.exists(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/{pulsar}.rst"):
# continue
# Grab all rows with pulsar then grab their fluxes
pulsar_df = df.loc[df['Pulsar'] == pulsar]
mwa_freqs = []
mwa_fluxs = []
mwa_flux_errors = []
pulsar_plots = []
models = None
pre_models = None
estimate_flux = ""
estimate_flux_err = ""
for index, row in pulsar_df.iterrows():
if row["Plot location"] == "" or isinstance(row["Plot location"], float):
continue
if not np.isnan(row['Flux Density (mJy)']) and row['Flux Density (mJy)'] != 0:
mwa_freqs.append(154.24)
mwa_fluxs.append(row['Flux Density (mJy)'])
mwa_flux_errors.append(row['Flux Density Uncertainty (mJy)'])
# Move files
on_pulse_fit = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/on_pulse_plots/{row['ObservationID']}_{pulsar}_*_bins_gaussian_components.png")
if len(on_pulse_fit) == 0:
on_pulse_fit = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../survey_paper/{row['ObservationID']}_{pulsar}_*_bins_gaussian_components.png")
if len(on_pulse_fit) == 0:
on_pulse_fit = [""]
else:
#os.rename(on_pulse_fit[0], f"{os.path.dirname(os.path.realpath(__file__))}/../docs/on_pulse_plots/{on_pulse_fit[0]}")
shutil.copyfile(on_pulse_fit[0], f"{os.path.dirname(os.path.realpath(__file__))}/../docs/on_pulse_plots/{os.path.basename(on_pulse_fit[0])}")
on_pulse_fit = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/on_pulse_plots/{row['ObservationID']}_{pulsar}_*_bins_gaussian_components.png")
if row["Plot location"].endswith("ps"):
basename = row["Plot location"].split("/")[-1][:-2]
png_basename = f"{basename}png"
detection_plot = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename}")
if len(detection_plot) == 0:
# doesn't exist so make a png
print(f"gs -sDEVICE=eps2write -dSAFER -dBATCH -dNOPAUSE -dEPSCrop -r600 -sDEVICE=pngalpha -sOutputFile={os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename} {row['Plot location']}")
os.system(f"gs -sDEVICE=eps2write -dSAFER -dBATCH -dNOPAUSE -dEPSCrop -r600 -sDEVICE=pngalpha -sOutputFile={os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename} {row['Plot location']}")
img = Image.open(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename}")
# rotate by 90 degrees
rot_img = img.transpose(Image.ROTATE_270)
rot_img.save(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename}")
detection_plot = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{png_basename}")
else:
basename = os.path.basename(row["Plot location"])
detection_plot = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{basename}")
if len(detection_plot) == 0:
# cp
print(row["Plot location"])
shutil.copyfile(row["Plot location"], f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{os.path.basename(row['Plot location'])}")
detection_plot = glob.glob(f"{os.path.dirname(os.path.realpath(__file__))}/../docs/detection_plots/{basename}")
pulsar_plots.append((detection_plot[0], on_pulse_fit[0]))
freq_all, band_all, flux_all, flux_err_all, ref_all = cat_list[pulsar]
freqs = freq_all + [154.24]
fit_range = (np.log10(min(freqs)), np.log10(max(freqs)))
pre_pl_a = None
pre_pl_u_a = None
pre_pl_c = None
pre_pl_u_c = None
pre_bpl_vb = None
pre_bpl_u_vb = None
pre_bpl_a1 = None
pre_bpl_u_a1 = None
pre_bpl_a2 = None
pre_bpl_u_a2 = None
pre_bpl_c = None
pre_bpl_u_c = None
pre_hfco_vc = None
pre_hfco_u_vc = None
pre_hfco_c = None
pre_hfco_u_c = None
pre_lfto_vpeak = None
pre_lfto_u_vpeak = None
pre_lfto_a = None
pre_lfto_u_a = None
pre_lfto_c = None
pre_lfto_u_c = None
pre_lfto_beta = None
pre_lfto_u_beta = None
pre_dtos_vpeak = None
pre_dtos_u_vpeak = None
pre_dtos_vc = None
pre_dtos_u_vc = None
pre_dtos_a = None
pre_dtos_u_a = None
pre_dtos_c = None
pre_dtos_u_c = None
pre_dtos_beta = None
pre_dtos_u_beta = None
pl_a = None
pl_u_a = None
pl_c = None
pl_u_c = None
bpl_vb = None
bpl_u_vb = None
bpl_a1 = None
bpl_u_a1 = None
bpl_a2 = None
bpl_u_a2 = None
bpl_c = None
bpl_u_c = None
hfco_vc = None
hfco_u_vc = None
hfco_c = None
hfco_u_c = None
lfto_vpeak = None
lfto_u_vpeak = None
lfto_a = None
lfto_u_a = None
lfto_c = None
lfto_u_c = None
lfto_beta = None
lfto_u_beta = None
dtos_vpeak = None
dtos_u_vpeak = None
dtos_vc = None
dtos_u_vc = None
dtos_a = None
dtos_u_a = None
dtos_c = None
dtos_u_c = None
dtos_beta = None
dtos_u_beta = None
if len(freq_all) > 0:
pre_models, pre_iminuit_results, pre_fit_infos, pre_p_best, pre_p_catagory = find_best_spectral_fit(
pulsar, freq_all, band_all, flux_all, flux_err_all, ref_all,
plot_best=True, alternate_style=True, axis=ax, secondary_fit=True
)
else:
pre_models = pre_iminuit_results = pre_fit_infos = pre_p_best = pre_p_catagory = None
if pre_models is not None:
estimate_flux, estimate_flux_err = estimate_flux_density(154.24, pre_models, pre_iminuit_results)
# record model specific bits
if pre_models == "simple_power_law":
pre_pl_a = pre_iminuit_results.values["a"]
pre_pl_u_a = pre_iminuit_results.errors["a"]
pre_pl_c = pre_iminuit_results.values["c"]
pre_pl_u_c = pre_iminuit_results.errors["c"]
elif pre_models == "broken_power_law":
#vb, a1, a2, b
pre_bpl_vb = pre_iminuit_results.values["vb"]
pre_bpl_u_vb = pre_iminuit_results.errors["vb"]
pre_bpl_a1 = pre_iminuit_results.values["a1"]
pre_bpl_u_a1 = pre_iminuit_results.errors["a1"]
pre_bpl_a2 = pre_iminuit_results.values["a2"]
pre_bpl_u_a2 = pre_iminuit_results.errors["a2"]
pre_bpl_c = pre_iminuit_results.values["c"]
pre_bpl_u_c = pre_iminuit_results.errors["c"]
elif pre_models == "high_frequency_cut_off_power_law":
pre_hfco_vc = pre_iminuit_results.values["vc"]
pre_hfco_u_vc = pre_iminuit_results.errors["vc"]
pre_hfco_c = pre_iminuit_results.values["c"]
pre_hfco_u_c = pre_iminuit_results.errors["c"]
elif pre_models == "low_frequency_turn_over_power_law":
# vc, a, b, beta
pre_lfto_vpeak = pre_iminuit_results.values["vpeak"]
pre_lfto_u_vpeak = pre_iminuit_results.errors["vpeak"]
pre_lfto_a = pre_iminuit_results.values["a"]
pre_lfto_u_a = pre_iminuit_results.errors["a"]
pre_lfto_c = pre_iminuit_results.values["c"]
pre_lfto_u_c = pre_iminuit_results.errors["c"]
pre_lfto_beta = pre_iminuit_results.values["beta"]
pre_lfto_u_beta = pre_iminuit_results.errors["beta"]
elif pre_models == "double_turn_over_spectrum":
# vc, a, b, beta
pre_dtos_vc = pre_iminuit_results.values["vc"]
pre_dtos_u_vc = pre_iminuit_results.errors["vc"]
pre_dtos_vc = pre_iminuit_results.values["vpeak"]
pre_dtos_u_vc = pre_iminuit_results.errors["vpeak"]
pre_dtos_a = pre_iminuit_results.values["a"]
pre_dtos_u_a = pre_iminuit_results.errors["a"]
pre_dtos_c = pre_iminuit_results.values["c"]
pre_dtos_u_c = pre_iminuit_results.errors["c"]
pre_dtos_beta = pre_iminuit_results.values["beta"]
pre_dtos_u_beta = pre_iminuit_results.errors["beta"]
# calc offset
# find obsid using for this pulsar
#pulsar_obsid_df = pulsar_obsid_df[pulsar_obsid_df['Jname'].str.contains(pulsar)]
this_df = pulsar_obsid_df.loc[pulsar == pulsar_obsid_df['Jname']].reset_index()
obsid = this_df['ObsID'][0]
query_id = list(query['PSRJ']).index(pulsar)
# obsid, ra, dec, dura, [xdelays, ydelays], centrefreq, channels = get_common_obs_metadata(obsid)
# print("coords")
# print(ra,dec)
# obs_beam = SkyCoord(ra, dec, unit=(u.deg,u.deg))
# query_id = list(query['PSRJ']).index(pulsar)
# print(query["RAJ"][query_id], query["DECJ"][query_id])
# pulsar_coord = SkyCoord(query["RAJ"][query_id], query["DECJ"][query_id], unit=(u.hourangle,u.deg))
# offset = pulsar_coord.separation(obs_beam).deg
offset = None
print(f"\n{pulsar}")
if len(mwa_fluxs) == 0:
print(f"No fluxes")
else:
# Adjust uncertanty to take into account scintillation and number of detections
# Average data
S_mean = np.mean(mwa_fluxs)
u_S_mean = np.sqrt(np.sum(np.array(mwa_flux_errors)**2))
# using 728 MHz values from table 4
a = -0.47
b = 0.21
d0 = 200
d = float(row['ATNF DM'])
# Equation 18 modultaion index
m_r_v = b* (d/d0)**a
# Equation 4
u_scint = m_r_v * S_mean
# Equation 2 robust standard deviation computed using the interquartile range
std_r_v = 0.9183 * (np.quantile(mwa_fluxs, 0.75) - np.quantile(mwa_fluxs, 0.25))
N = len(mwa_fluxs)
# Equation 3
u_S = np.sqrt( u_S_mean**2 + std_r_v**2/N + (6/(5*N) - 1/5)*u_scint**2)
#print(u_S, mwa_flux_errors)
# freq_all = np.array(mwa_freqs + cat_list[pulsar][0])
# flux_all = np.array(mwa_fluxs + cat_list[pulsar][1])
# flux_err_all = np.array(mwa_flux_errors + cat_list[pulsar][2])
# ref_all = np.array(["SMART"]*len(mwa_freqs) + cat_list[pulsar][3])
freq_all = np.array([154.24] + cat_list[pulsar][0])
band_all = np.array([30.72] + cat_list[pulsar][1])
flux_all = np.array([S_mean] + cat_list[pulsar][2])
flux_err_all = np.array([u_S] + cat_list[pulsar][3])
ref_all = np.array(["SMART"] + cat_list[pulsar][4])
#for freq, flux, flux_err in zip(freq_all, flux_all, flux_err_all):
#print(freq, flux, flux_err)
models, iminuit_results, fit_infos, p_best, p_catagory = find_best_spectral_fit(
pulsar, freq_all, band_all, flux_all, flux_err_all, ref_all,
plot_best=True, alternate_style=True, axis=ax
)
plt.tight_layout(pad=2.5)
#plt.savefig(f"{pulsar}_fit.pdf", bbox_inches='tight', dpi=300)
plt.savefig(f"{pulsar}_fit.png", bbox_inches='tight', dpi=300)
models, iminuit_results, fit_infos, p_best, p_catagory = find_best_spectral_fit(pulsar, freq_all, band_all, flux_all, flux_err_all, ref_all, plot_compare=True)
if models is not None:
if len(models) > 0:
shutil.move(f"{pulsar}_fit.png", f"{os.path.dirname(os.path.realpath(__file__))}/../docs/best_fits/{pulsar}_fit.png")
print(f"{pulsar}_comparison_fit.png", f"{os.path.dirname(os.path.realpath(__file__))}/../docs/comparison_fits/{pulsar}_comparison_fit.png")
shutil.move(f"{pulsar}_comparison_fit.png", f"{os.path.dirname(os.path.realpath(__file__))}/../docs/comparison_fits/{pulsar}_comparison_fit.png")
# Record data
results_record.append((pulsar, row['ATNF DM'], models, iminuit_results, fit_infos, p_best, p_catagory, len(mwa_fluxs), S_mean, u_S, u_S_mean, u_scint, m_r_v))
# record model specific bits
if models == "simple_power_law":
pl_a = iminuit_results.values["a"]
pl_u_a = iminuit_results.errors["a"]
pl_c = iminuit_results.values["c"]
pl_u_c = iminuit_results.errors["c"]
elif models == "broken_power_law":
#vb, a1, a2, b
bpl_vb = iminuit_results.values["vb"]
bpl_u_vb = iminuit_results.errors["vb"]
bpl_a1 = iminuit_results.values["a1"]
bpl_u_a1 = iminuit_results.errors["a1"]
bpl_a2 = iminuit_results.values["a2"]
bpl_u_a2 = iminuit_results.errors["a2"]
bpl_c = iminuit_results.values["c"]
bpl_u_c = iminuit_results.errors["c"]
elif models == "high_frequency_cut_off_power_law":
hfco_vc = iminuit_results.values["vc"]
hfco_u_vc = iminuit_results.errors["vc"]
hfco_c = iminuit_results.values["c"]
hfco_u_c = iminuit_results.errors["c"]
elif models == "low_frequency_turn_over_power_law":
# vc, a, b, beta
lfto_vpeak = iminuit_results.values["vpeak"]
lfto_u_vpeak = iminuit_results.errors["vpeak"]
lfto_a = iminuit_results.values["a"]
lfto_u_a = iminuit_results.errors["a"]
lfto_c = iminuit_results.values["c"]
lfto_u_c = iminuit_results.errors["c"]
lfto_beta = iminuit_results.values["beta"]
lfto_u_beta = iminuit_results.errors["beta"]
elif models == "double_turn_over_spectrum":
# vc, a, b, beta
dtos_vc = iminuit_results.values["vc"]
dtos_u_vc = iminuit_results.errors["vc"]
dtos_vc = iminuit_results.values["vpeak"]
dtos_u_vc = iminuit_results.errors["vpeak"]
dtos_a = iminuit_results.values["a"]
dtos_u_a = iminuit_results.errors["a"]
dtos_c = iminuit_results.values["c"]
dtos_u_c = iminuit_results.errors["c"]
dtos_beta = iminuit_results.values["beta"]
dtos_u_beta = iminuit_results.errors["beta"]
if len(cat_list[pulsar][0]) == 0:
min_freq = None
max_freq = None
else:
min_freq = min(cat_list[pulsar][0])
max_freq = max(cat_list[pulsar][0])
with open(f'{os.path.dirname(os.path.realpath(__file__))}/../docs/{pulsar}.rst', 'w') as file:
file.write(f".. _{pulsar}:\n{pulsar}\n")
file.write("="*len(pulsar))
# Fit with out data
if models is not None:
print(iminuit_results.values)
file.write(f'''
Best Fit
--------
.. image:: best_fits/{pulsar}_fit.png
:width: 800
.. csv-table:: {pulsar} fit results
''')
header_str = ' :header: "model",'
data_str = f' "{models}",'
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
header_str += f'"{p} (MHz)",'
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}",'
else:
header_str += f'"{p}",'
data_str += f'"{v:.2f}±{e:.2f}",'
file.write(f'''{header_str[:-1]}
{data_str[:-1]}''')
else:
file.write(f'''
Best Fit
--------
Only {len(mwa_freqs)} MWA data and {len(cat_list[pulsar][0])} cat data available
''')
# Fit without our data
if pre_models is not None:
file.write(f'''
Fit Before MWA
--------------
.. csv-table:: {pulsar} before fit results
''')
header_str = ' :header: "model",'
data_str = f' "{pre_models}",'
for p, v, e in zip(pre_iminuit_results.parameters, pre_iminuit_results.values, pre_iminuit_results.errors):
if p.startswith('v'):
header_str += f'"{p} (MHz)",'
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}",'
else:
header_str += f'"{p}",'
data_str += f'"{v:.2f}±{e:.2f}",'
file.write(f'''{header_str[:-1]}
{data_str[:-1]}''')
file.write(f'''
Flux Density Results
--------------------
.. csv-table:: {pulsar} flux density total results
:header: "N obs", "Flux Density (mJy)", "u_S_mean", "u_scint", "m_r_v"
"{len(mwa_fluxs)}", "{S_mean:.1f}±{u_S:.1f}", "{u_S_mean:.1f}", "{u_scint:.1f}", "{m_r_v:.3f}"
.. csv-table:: {pulsar} flux density individual results
:header: "ObsID", "Flux Density (mJy)"
''')
for index, row in pulsar_df.iterrows():
file.write(f''' "{row['ObservationID']}", "{row['Flux Density (mJy)']:.1f}±{row['Flux Density Uncertainty (mJy)']:.1f}"\n''')
# Comparison fit
if models is not None:
file.write(f'''
Comparison Fit
--------------
.. image:: comparison_fits/{pulsar}_comparison_fit.png
:width: 800
''')
# Detection plots
file.write(f'''
Detection Plots
---------------
''')
for detection_plot, on_pulse_fit in pulsar_plots:
file.write(f'''
.. image:: detection_plots/{os.path.basename(detection_plot)}
:width: 800
.. image:: on_pulse_plots/{os.path.basename(on_pulse_fit)}
:width: 800''')
# Record data for csv
#output_df = output_df.append({
return {
"Pulsar":pulsar,
"ObservationID":obsid,
"ATNF Period (s)": row['ATNF Period (s)'],
"ATNF DM": row['ATNF DM'],
"ATNF B_surf (G)":query["BSURF"][query_id],
"ATNF E_dot (ergs/s)":query["EDOT"][query_id],
"Offset (degrees)":offset,
"Flux Density (mJy)":S_mean,
"Flux Density Uncertainty (mJy)":u_S_mean,
"Flux Density Scintilation Uncertainty (mJy)":u_S,
"Estimated Flux Density (mJy)":estimate_flux,
"Estimated Flux Density Uncertainty (mJy)":estimate_flux_err,
"Model":models,
"Model before MWA":pre_models,
"Probability Best":p_best,
"Min freq before MWA (MHz)":min_freq,
"Max freq before MWA (MHz)":max_freq,
"N data flux": len(flux_all),
"pl_a" : pl_a ,
"pl_u_a" : pl_u_a ,
"pl_c" : pl_c ,
"pl_u_c" : pl_u_c ,
"bpl_vb" : bpl_vb ,
"bpl_u_vb" : bpl_u_vb ,
"bpl_a1" : bpl_a1 ,
"bpl_u_a1" : bpl_u_a1 ,
"bpl_a2" : bpl_a2 ,
"bpl_u_a2" : bpl_u_a2 ,
"bpl_c" : bpl_c ,
"bpl_u_c" : bpl_u_c ,
"hfco_vc" : hfco_vc ,
"hfco_u_vc" : hfco_u_vc ,
"hfco_c" : hfco_c ,
"hfco_u_c" : hfco_u_c ,
"lfto_vpeak" : lfto_vpeak ,
"lfto_u_vpeak" : lfto_u_vpeak,
"lfto_a" : lfto_a ,
"lfto_u_a" : lfto_u_a ,
"lfto_c" : lfto_c ,
"lfto_u_c" : lfto_u_c ,
"lfto_beta" : lfto_beta,
"lfto_u_beta" : lfto_u_beta,
"dtos_vpeak" : dtos_vpeak ,
"dtos_u_vpeak": dtos_u_vpeak,
"dtos_vc" : dtos_vc ,
"dtos_u_vc" : dtos_u_vc,
"dtos_a" : dtos_a ,
"dtos_u_a" : dtos_u_a ,
"dtos_c" : dtos_c ,
"dtos_u_c" : dtos_u_c ,
"dtos_beta" : dtos_beta,
"dtos_u_beta" : dtos_u_beta,
"pre_pl_a" : pre_pl_a ,
"pre_pl_u_a" : pre_pl_u_a ,
"pre_pl_c" : pre_pl_c ,
"pre_pl_u_c" : pre_pl_u_c ,
"pre_bpl_vb" : pre_bpl_vb ,
"pre_bpl_u_vb" : pre_bpl_u_vb ,
"pre_bpl_a1" : pre_bpl_a1 ,
"pre_bpl_u_a1" : pre_bpl_u_a1 ,
"pre_bpl_a2" : pre_bpl_a2 ,
"pre_bpl_u_a2" : pre_bpl_u_a2 ,
"pre_bpl_c" : pre_bpl_c ,
"pre_bpl_u_c" : pre_bpl_u_c ,
"pre_hfco_vc" : pre_hfco_vc ,
"pre_hfco_u_vc" : pre_hfco_u_vc ,
"pre_hfco_c" : pre_hfco_c ,
"pre_hfco_u_c" : pre_hfco_u_c ,
"pre_lfto_vpeak" : pre_lfto_vpeak ,
"pre_lfto_u_vpeak" : pre_lfto_u_vpeak ,
"pre_lfto_a" : pre_lfto_a ,
"pre_lfto_u_a" : pre_lfto_u_a ,
"pre_lfto_c" : pre_lfto_c ,
"pre_lfto_u_c" : pre_lfto_u_c ,
"pre_lfto_beta" : pre_lfto_beta,
"pre_lfto_u_beta" : pre_lfto_u_beta,
"pre_dtos_vpeak" : pre_dtos_vpeak ,
"pre_dtos_u_vpeak": pre_dtos_u_vpeak,
"pre_dtos_vc" : pre_dtos_vc ,
"pre_dtos_u_vc" : pre_dtos_u_vc,
"pre_dtos_a" : pre_dtos_a ,
"pre_dtos_u_a" : pre_dtos_u_a ,
"pre_dtos_c" : pre_dtos_c ,
"pre_dtos_u_c" : pre_dtos_u_c ,
"pre_dtos_beta" : pre_dtos_beta,
"pre_dtos_u_beta" : pre_dtos_u_beta,
}
#, ignore_index=True)
pbar = tqdm(pulsars)
# freeze params/function as object
fc_ = partial(fit_and_plot)
# set number of processes
p = mp.Pool(8)
# runs mp with params on pbar
#results = p.imap(fc_, pbar)
results = list(p.imap(fc_, pbar))
#print(results)
# close out and join processes
p.close()
p.join()
output_df = pd.DataFrame(results)
output_df.to_csv('SMART_pulsars_flux_update.csv', index=False)
# Record summary results on homepage
with open(f'{os.path.dirname(os.path.realpath(__file__))}/../docs/index.rst', 'w') as file:
file.write(f'''.. pulsar_spectra documentation master file, created by
sphinx-quickstart on Sat Feb 12 11:03:47 2022.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to pulsar_spectra's documentation!
==========================================
Flux Density Results
--------------------
.. csv-table::
:header: "Pulsar", "DM", "N obs", "Flux Density (mJy)", "u_S_mean", "u_scint", "m_r_v"
''')
# setp up other tables while I'm looping
simple_power_law = []
broken_power_law = []
log_parabolic_spectrum = []
high_frequency_cut_off_power_law = []
low_frequency_turn_over_power_law = []
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory, n_obs, S_mean, u_S, u_S_mean, u_scint, m_r_v in results_record:
file.write(f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", "{n_obs}", "{S_mean:.1f}±{u_S:.1f}", "{u_S_mean:.1f}", "{u_scint:.1f}", "{m_r_v:.3f}"\n')
#sort
if models == "simple_power_law":
simple_power_law.append((pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory))
if models == "broken_power_law":
broken_power_law.append((pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory))
if models == "log_parabolic_spectrum":
log_parabolic_spectrum.append((pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory))
if models == "high_frequency_cut_off_power_law":
high_frequency_cut_off_power_law.append((pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory))
if models == "low_frequency_turn_over_power_law":
low_frequency_turn_over_power_law.append((pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory))
file.write(f'''
Single Power Law Results
------------------------
.. csv-table::
:header: "Pulsar", "DM", "a", "b"
''')
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory in simple_power_law:
data_str = f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", '
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}", '
else:
data_str += f'"{v:.2f}±{e:.2f}", '
file.write(f'{data_str[:-2]}\n')
file.write(f'''
Broken Power Law Results
------------------------
.. csv-table::
:header: "Pulsar", "DM", "vb (MHz)", "a1", "a2", "b"
''')
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory in broken_power_law:
data_str = f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", '
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}", '
else:
data_str += f'"{v:.2f}±{e:.2f}", '
file.write(f'{data_str[:-2]}\n')
file.write(f'''
Log-parabolic spectrum Results
------------------------------
.. csv-table::
:header: "Pulsar", "DM", "a", "b", "c"
''')
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory in log_parabolic_spectrum:
data_str = f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", '
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}", '
else:
data_str += f'"{v:.2f}±{e:.2f}", '
file.write(f'{data_str[:-2]}\n')
file.write(f'''
Power law with high-frequency cut-off Results
---------------------------------------------
.. csv-table::
:header: "Pulsar", "DM", "vc (MHz)", "a", "b"
''')
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory in high_frequency_cut_off_power_law:
data_str = f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", '
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}", '
else:
data_str += f'"{v:.2f}±{e:.2f}", '
file.write(f'{data_str[:-2]}\n')
file.write(f'''
Power law with low-frequency turn-over Results
----------------------------------------------
.. csv-table::
:header: "Pulsar", "DM", "vc (MHz)", "a", "b", "beta"
''')
for pulsar, dm, models, iminuit_results, fit_infos, p_best, p_catagory in low_frequency_turn_over_power_law:
data_str = f' ":ref:`{pulsar}<{pulsar}>`", "{dm}", '
for p, v, e in zip(iminuit_results.parameters, iminuit_results.values, iminuit_results.errors):
if p.startswith('v'):
data_str += f'"{int(v/1e6):d}±{int(e/1e6):d}", '
else:
data_str += f'"{v:.2f}±{e:.2f}", '
file.write(f'{data_str[:-2]}\n')
file.write(f'''
.. toctree::
:maxdepth: 1
:caption: Pulsar Fit Results:
:glob:
J*
''')
| UTF-8 | Python | false | false | 31,285 | py | 187 | fit_pulsar_spectral_models_and_doc.py | 57 | 0.515799 | 0.506684 | 0 | 775 | 39.345806 | 232 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.