repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DheerendraRathor/idc | 8,924,942,070,082 | 1fe1349c965e7556462473fb6f1ea1dc34f94a0b | d09f5de466fcb1cc395fee879598f4e86c855804 | /idc/blog/forms.py | b9b970574487b60125678495385d480d47f7e504 | []
| no_license | https://github.com/DheerendraRathor/idc | 78f991eb29bac9506294aaa78a6b8010371c38ee | 7db026ef0a24eda4a5d93d02d77c08a2d833d309 | refs/heads/master | 2020-04-14T15:04:18.980568 | 2016-04-14T15:34:11 | 2016-04-14T15:34:11 | 50,912,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
class CommentForm(forms.Form):
comment = forms.CharField(
widget=forms.Textarea(
attrs={
'rows': 3,
}
)
) | UTF-8 | Python | false | false | 196 | py | 37 | forms.py | 25 | 0.489796 | 0.484694 | 0 | 11 | 16.909091 | 30 |
gkhouri/the-technocopia-project | 11,269,994,224,079 | 6c2b0905f36a7bff39bb346a32ac3025050184fa | bf4518f1174daf1b16d7995717d9e8c26af4ffb1 | /BowlerFreecad/3DPrint/NeuronRobotics/SERIAL_GUI/myWidget_Ui.py | f9663069c9c825137b10bf2f0aa87fa0234dd462 | []
| no_license | https://github.com/gkhouri/the-technocopia-project | 531084d4401ef477bfb743cda1ba51060d9e6f12 | 356cccf13204320db18d320ae5127e162d7df096 | refs/heads/master | 2021-01-01T16:06:46.343087 | 2014-01-17T06:43:14 | 2014-01-17T06:43:14 | 32,092,044 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import PyQt4
from PyQt4 import QtGui,QtCore
import FreeCAD
import sys
import os
class myWidget_Ui(object):
def setupUi(self, myWidget):
myWidget.setObjectName("my Nice New Widget")
myWidget.resize(QtCore.QSize(QtCore.QRect(0,0,327,421).size()).expandedTo(myWidget.minimumSizeHint())) # sets size of the widget
self.DEVICE_CBOX = QtGui.QComboBox(myWidget)
self.DEVICE_CBOX.setGeometry(QtCore.QRect(20, 140, 111, 31))
self.DEVICE_CBOX.setObjectName(("DEVICE_CBOX"))
self.MainLogo = QtGui.QLabel(myWidget)
self.MainLogo.setGeometry(QtCore.QRect(90, 0, 111, 121))
self.MainLogo.setText((""))
self.MainLogo.setPixmap(QtGui.QPixmap((":/me/test/hat.png")))
self.MainLogo.setObjectName(("MainLogo"))
self.SEND_PBUTTON = QtGui.QPushButton(myWidget)
self.SEND_PBUTTON.setGeometry(QtCore.QRect(160, 380, 87, 27))
self.SEND_PBUTTON.setObjectName(("SEND_PBUTTON"))
self.DEVICE_LABEL = QtGui.QLabel(myWidget)
self.DEVICE_LABEL.setGeometry(QtCore.QRect(20, 120, 111, 20))
self.DEVICE_LABEL.setObjectName(("DEVICE_LABEL"))
self.CLOSE_PBUTTON = QtGui.QPushButton(myWidget)
self.CLOSE_PBUTTON.setGeometry(QtCore.QRect(50, 380, 87, 27))
self.CLOSE_PBUTTON.setObjectName(("CANCEL_PBUTTON"))
self.MATERIAL_CBOX = QtGui.QComboBox(myWidget)
self.MATERIAL_CBOX.setGeometry(QtCore.QRect(20, 260, 61, 31))
self.MATERIAL_CBOX.setObjectName(("MATERIAL_CBOX"))
self.MATERIAL_LABEL = QtGui.QLabel(myWidget)
self.MATERIAL_LABEL.setGeometry(QtCore.QRect(20, 240, 61, 20))
self.MATERIAL_LABEL.setObjectName(("MATERIAL_LABEL"))
self.FILAMENT_LABEL = QtGui.QLabel(myWidget)
self.FILAMENT_LABEL.setGeometry(QtCore.QRect(20, 300, 61, 20))
self.FILAMENT_LABEL.setObjectName(("FILAMENT_LABEL"))
self.FILAMENT_CBOX = QtGui.QComboBox(myWidget)
self.FILAMENT_CBOX.setGeometry(QtCore.QRect(20, 320, 71, 31))
self.FILAMENT_CBOX.setObjectName(("FILAMENT_CBOX"))
self.BAUD_LABEL = QtGui.QLabel(myWidget)
self.BAUD_LABEL.setGeometry(QtCore.QRect(20, 180, 81, 20))
self.BAUD_LABEL.setObjectName(("BAUD_LABEL"))
self.BAUD_CBOX = QtGui.QComboBox(myWidget)
self.BAUD_CBOX.setGeometry(QtCore.QRect(20, 200, 81, 31))
self.BAUD_CBOX.setObjectName(("BAUD_CBOX"))
self.SUPPORTS_CHECKBOX = QtGui.QCheckBox(myWidget)
self.SUPPORTS_CHECKBOX.setGeometry(QtCore.QRect(160, 320, 151, 22))
self.SUPPORTS_CHECKBOX.setObjectName(("SUPPORTS_CHECKBOX"))
self.LAYER_TEXTBOX = QtGui.QTextEdit(myWidget)
self.LAYER_TEXTBOX.setGeometry(QtCore.QRect(190, 200, 61, 21))
self.LAYER_TEXTBOX.setObjectName(("LAYER_TEXTBOX"))
self.LAYER_LABEL = QtGui.QLabel(myWidget)
self.LAYER_LABEL.setGeometry(QtCore.QRect(160, 180, 81, 17))
self.LAYER_LABEL.setObjectName(("LAYER_LABEL"))
self.mm_LABEL = QtGui.QLabel(myWidget)
self.mm_LABEL.setGeometry(QtCore.QRect(260, 200, 31, 17))
self.mm_LABEL.setObjectName(("mm_LABEL"))
self.RAFTS_CHECKBOX = QtGui.QCheckBox(myWidget)
self.RAFTS_CHECKBOX.setGeometry(QtCore.QRect(160, 340, 89, 22))
self.RAFTS_CHECKBOX.setObjectName(("RAFTS_CHECKBOX"))
self.RAFTS_SBOX = QtGui.QSpinBox(myWidget)
self.RAFTS_SBOX.setEnabled(False)
self.RAFTS_SBOX.setGeometry(QtCore.QRect(220, 340, 52, 27))
self.RAFTS_SBOX.setObjectName(("RAFTS_SBOX"))
self.HEATED_BED_CHECKBOX = QtGui.QCheckBox(myWidget)
self.HEATED_BED_CHECKBOX.setGeometry(QtCore.QRect(160, 300, 101, 22))
self.HEATED_BED_CHECKBOX.setObjectName(("HEATED_BED_CHECKBOX"))
self.FIRMWARE_LABEL = QtGui.QLabel(myWidget)
self.FIRMWARE_LABEL.setGeometry(QtCore.QRect(160, 120, 71, 17))
self.FIRMWARE_LABEL.setObjectName(("FIRMWARE_LABEL"))
self.FIRMWARE_CBOX = QtGui.QComboBox(myWidget)
self.FIRMWARE_CBOX.setGeometry(QtCore.QRect(160, 140, 151, 31))
self.FIRMWARE_CBOX.setObjectName(("FIRMWARE_CBOX"))
self.mm_LABEL_2 = QtGui.QLabel(myWidget)
self.mm_LABEL_2.setGeometry(QtCore.QRect(260, 260, 31, 17))
self.mm_LABEL_2.setObjectName(("mm_LABEL_2"))
self.NOZZLE_TEXTBOX = QtGui.QTextEdit(myWidget)
self.NOZZLE_TEXTBOX.setGeometry(QtCore.QRect(190, 260, 61, 21))
self.NOZZLE_TEXTBOX.setObjectName(("NOZZLE_TEXTBOX"))
self.NOZZLE_LABEL = QtGui.QLabel(myWidget)
self.NOZZLE_LABEL.setGeometry(QtCore.QRect(160, 240, 111, 17))
self.NOZZLE_LABEL.setObjectName(("NOZZLE_LABEL"))
self.REFRESH_PBUTTON = QtGui.QPushButton(myWidget)
self.REFRESH_PBUTTON.setGeometry(QtCore.QRect(70, 110, 51, 27))
self.REFRESH_PBUTTON.setObjectName(("REFRESH_PBUTTON"))
##############################################
######## Device addition code ##############
##############################################
try:
import re
import subprocess
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("ls /dev/ttyUSB*", shell=True)
count = df.count('dev')
index = 0
newlist = []
while (index < count):
endindex = df.index('\n')
newlist.append(df[:endindex])
df = df[endindex + 1:len(df)]
index = index + 1
self.DEVICE_CBOX.addItems(newlist)
except:
try:
import re
import subprocess
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("ls /dev/ttyACM*", shell=True)
count = df.count('dev')
index = 0
newlist = []
while (index < count):
endindex = df.index('\n')
newlist.append(df[:endindex])
df = df[endindex + 1:len(df)]
index = index + 1
self.DEVICE_CBOX.addItems(newlist)
except:
newlist = []
newlist.append("No devices!")
self.DEVICE_CBOX.addItems(newlist)
################################################
################################################
################################################
MATERIAL_LIST = []
MATERIAL_LIST.append('PLA')
MATERIAL_LIST.append('ABS')
self.MATERIAL_CBOX.addItems(MATERIAL_LIST)
FILAMENT_LIST = []
FILAMENT_LIST.append('3mm')
FILAMENT_LIST.append('1.75mm')
self.FILAMENT_CBOX.addItems(FILAMENT_LIST)
BAUD_LIST = []
BAUD_LIST.append('115200')
BAUD_LIST.append('250000')
self.BAUD_CBOX.addItems(BAUD_LIST)
FIRMWARE_LIST = []
FIRMWARE_LIST.append('RepRap (Marlin/Sprinter)')
FIRMWARE_LIST.append('Teacup')
FIRMWARE_LIST.append('MakerBot')
FIRMWARE_LIST.append('Sailfish')
FIRMWARE_LIST.append('Mach3/EMC')
FIRMWARE_LIST.append('No extrusion')
self.FIRMWARE_CBOX.addItems(FIRMWARE_LIST)
self.SEND_PBUTTON.setText("Print!")
self.DEVICE_LABEL.setText("Device:")
self.CLOSE_PBUTTON.setText("Close")
self.MATERIAL_LABEL.setText("Material:")
self.FILAMENT_LABEL.setText("Filament:")
self.BAUD_LABEL.setText("Baudrate:")
self.SUPPORTS_CHECKBOX.setText("Generate Supports")
self.LAYER_TEXTBOX.setText("0.4")
self.LAYER_LABEL.setText("Layer Height:")
self.RAFTS_CHECKBOX.setText("Rafts")
self.HEATED_BED_CHECKBOX.setText("Heated Bed")
self.FIRMWARE_LABEL.setText("Firmware:")
self.mm_LABEL_2.setText("mm")
self.mm_LABEL.setText("mm")
self.NOZZLE_TEXTBOX.setText("0.5")
self.NOZZLE_LABEL.setText("Nozzle Diameter:")
self.REFRESH_PBUTTON.setText("Refresh")
self.REFRESH_PBUTTON.clicked.connect(self.REFRESH_DEVICES)
self.RAFTS_CHECKBOX.clicked.connect(self.on_RAFTS_CHECKBOX_clicked)
self.SEND_PBUTTON.clicked.connect(self.pushButton_clicked)
QtCore.QObject.connect(self.CLOSE_PBUTTON, QtCore.SIGNAL(("clicked()")), myWidget.close)
def retranslateUi(self, draftToolbar): # built-in QT function that manages translations of widgets
myWidget.setWindowTitle(QtGui.QApplication.translate("myWidget", "My Widget", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("myWidget", "Welcome to my new widget!", None, QtGui.QApplication.UnicodeUTF8))
def on_RAFTS_CHECKBOX_clicked(self):
RAFTS_CHECKBOX_BOOL = self.RAFTS_CHECKBOX.isChecked()
self.RAFTS_SBOX.setEnabled(RAFTS_CHECKBOX_BOOL)
def REFRESH_DEVICES(self):
self.DEVICE_CBOX.clear()
try:
import re
import subprocess
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("ls /dev/ttyUSB*", shell=True)
count = df.count('dev')
index = 0
newlist = []
while (index < count):
endindex = df.index('\n')
newlist.append(df[:endindex])
df = df[endindex + 1:len(df)]
index = index + 1
self.DEVICE_CBOX.addItems(newlist)
except:
try:
import re
import subprocess
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("ls /dev/ttyACM*", shell=True)
count = df.count('dev')
index = 0
newlist = []
while (index < count):
endindex = df.index('\n')
newlist.append(df[:endindex])
df = df[endindex + 1:len(df)]
index = index + 1
self.DEVICE_CBOX.addItems(newlist)
except:
newlist = []
newlist.append("No devices!")
self.DEVICE_CBOX.addItems(newlist)
def pushButton_clicked(self):
FreeCAD.Console.PrintMessage("Creating gcode (may take a while) then sending to printer!\n")
BAUD_INDEX = self.BAUD_CBOX.currentIndex()
if BAUD_INDEX == 0:
BAUD_FILE = '1'
elif BAUD_INDEX == 1:
BAUD_FILE = '2'
MATERIAL_INDEX = self.MATERIAL_CBOX.currentIndex()
if MATERIAL_INDEX == 0:
if (self.HEATED_BED_CHECKBOX.isChecked()):
EXTRUDER_HEAT = '185 --bed-temperature 65'
else:
EXTRUDER_HEAT = '185'
elif MATERIAL_INDEX == 1:
if (self.HEATED_BED_CHECKBOX.isChecked()):
EXTRUDER_HEAT = '230 --bed-temperature 110'
else:
EXTRUDER_HEAT = '230'
FILAMENT_INDEX = self.FILAMENT_CBOX.currentIndex()
if FILAMENT_INDEX == 0:
DIAMETER = '3'
elif FILAMENT_INDEX == 1:
DIAMETER = '1.75'
FIRMWARE_INDEX = (self.FIRMWARE_CBOX.currentIndex())
FIRMWARES = ['reprap','teacup','makerbot','sailfish','mach3','no-extrusion']
NOZZLE_DIAMETER = self.NOZZLE_TEXTBOX.currentText()
LAYER_HEIGHT = self.LAYER_TEXTBOX.currentText()
if (self.SUPPORTS_CHECKBOX.isChecked()):
SUPPORT = " --support-material"
else:
SUPPORT = ""
if (self.RAFTS_CHECKBOX.isChecked()):
RAFTS_COUNT = str(self.RAFTS_SBOX.value())
else:
RAFTS_COUNT = '0'
if 'No device' in self.DEVICE_CBOX.currentText():
FreeCAD.Console.PrintMessage("Sorry no device selected.\n")
elif '/dev/tty' in self.DEVICE_CBOX.currentText():
slice_it = './src/NeuronRobotics/Slic3r/slic3r.pl --load src/NeuronRobotics/Slic3r/default.ini --filament-diameter '
slice_it += DIAMETER
slice_it += ' --temperature '
slice_it += EXTRUDER_HEAT
slice_it += ' --gcode-flavor '
slice_it += str(FIRMWARES[FIRMWARE_INDEX])
slice_it += ' --nozzle-diameter '
slice_it += NOZZLE_DIAMETER
slice_it += ' --layer-height '
slice_it += LAYER_HEIGHT
slice_it += SUPPORT
slice_it += ' --raft-layers '
slice_it += RAFTS_COUNT
slice_it += ' src/temp.stl'
# FreeCAD.Console.PrintMessage(slice_it) #what slic3r command we are using
os.system(slice_it)
command = 'python src/NeuronRobotics/SERIAL_GUI/Printrun/printcore_'
command += BAUD_FILE #requires 2 different printcore.py files with separate baudrates defined in each file
command += '.py '
command += str(self.DEVICE_CBOX.currentText())
command += ' src/temp.gcode'
os.system(command)
import rsrc_rc
class plane():
app = QtGui.qApp
FCmw = app.activeWindow()
myNewFreeCADWidget = QtGui.QDockWidget()
myNewFreeCADWidget.ui = myWidget_Ui()
myNewFreeCADWidget.ui.setupUi(myNewFreeCADWidget)
FCmw.addDockWidget(QtCore.Qt.RightDockWidgetArea,myNewFreeCADWidget) | UTF-8 | Python | false | false | 11,659 | py | 66 | myWidget_Ui.py | 15 | 0.683506 | 0.655974 | 0 | 328 | 34.54878 | 131 |
Ledoux/ProfessionalSYS | 9,337,258,905,886 | 4d3b51fb440ab4730e554afb04d5e6e223874546 | e73a32283194e334a9d0d7e4abea131b15a25aed | /Install/build/lib/ShareYourSystem/Object/Directer/__init__.py | 6c535be31bea45a12fcef6ddfefdf541c9d78e32 | []
| no_license | https://github.com/Ledoux/ProfessionalSYS | 300c45d524f6eeea98a6b9d77c2b47f49b36663b | 2cb9a3efc1dcc38353a239e624737bdbf4edcfa9 | refs/heads/master | 2017-01-04T12:13:28.294791 | 2014-12-02T10:17:09 | 2014-12-02T10:17:09 | 24,479,051 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #<ImportModules>
import os
from ShareYourSystem.Functions import Tool
from ShareYourSystem.Classors import Classor,Representer
from ShareYourSystem.Functers import Argumenter
import importlib
BaseModule=importlib.import_module("ShareYourSystem.Object.Writer")
DecorationModule=importlib.import_module("ShareYourSystem.Classors.Classer")
#</ImportModules>
#<DefineLocals>
BaseNameString=Classor.getNameStringWithModuleString(BaseModule.__name__)
BaseClass=getattr(
BaseModule,
Classor.getClassStringWithNameString(BaseNameString)
)
DecorationNameString=Classor.getNameStringWithModuleString(DecorationModule.__name__)
DecorationClass=getattr(
DecorationModule,
Classor.getClassStringWithNameString(DecorationNameString)
)
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class DirecterClass(BaseClass):
#@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.init}]})
def __init__(self,
_DirectingCallbackFunction=None,
_DirectingLiargVariablesList=[],
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
@Argumenter.ArgumenterClass()
def direct(self,_CallbackFunction=None,_LiargVariablesList=None,**_KwargVariablesDict):
#Call the folder method before
self.folder()
#Define the call back function if not already
if self.DirectingCallbackFunction==None:
def test(_LiargVariablesList,_FolderPathString,_DirKeyStringsList):
pass
print(_LiargVariablesList,_FolderPathString,_DirKeyStringsList)
self.DirectingCallbackFunction=test
#Walk
os.path.walk(
self.FolderingPathString,
self.DirectingCallbackFunction,
self.DirectingLiargVariablesList
)
#Return self
return self
#</DefineClass>
#<DefineAttestingFunctions>
def attest_markdown():
return MarkdownerClass().markdown()
#</DefineAttestingFunctions>
| UTF-8 | Python | false | false | 1,920 | py | 1,017 | __init__.py | 660 | 0.771875 | 0.771875 | 0 | 71 | 26.042254 | 91 |
KangboLu/Data-Structure-and-Algorithms | 12,670,153,571,353 | 34b00f9d51216e8f4109cfbbd1efa00b3f874736 | 93c1a49b80edea6ccf3f2054d30a7c7a8d0d3909 | /Algorithms Techniques/7. Tree/Traversal/traversal.py | 01c227b9f321cfcd5b41fa0b84d02f858868e65c | []
| no_license | https://github.com/KangboLu/Data-Structure-and-Algorithms | 801cad87f4ccee7b8fa9ba84733b61e5aeba8228 | 1b8d7d943c6eddad417b831a3b038b950c62ed77 | refs/heads/master | 2020-03-21T14:28:33.565035 | 2018-11-12T23:09:19 | 2018-11-12T23:09:19 | 138,659,254 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # a binary tree node
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# print binary tree in preorder: root, left, right
def print_preorder(node):
if node == None: # reached leaf node, go back to prev level
return
print(str(node.data) + " "), # output the current value
print_preorder(node.left) # recurse on left subtree
print_preorder(node.right) # recurve on right subtree
# print binary tree in inorder: left, root, right
def print_inorder(node):
if node == None: # reached leaf node, go back to prev level
return
print_inorder(node.left) # recurse on left subtree
print(str(node.data) + " "), # output the current value
print_inorder(node.right) # recurse on right subtree
# print binary tree in postorder: left, right, root
def print_postorder(node):
if node == None: # reached leaf node, go back to prev level
return
print_postorder(node.left) # recurse on left subtree
print_postorder(node.right) # recurse on right subtree
print(str(node.data) + " "), # output the value
# ================================
# testing the traversal approaches
# ================================
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
# output binary tree
print("Output the tree's structure:")
print(" 1 ")
print(" / \\ ")
print(" 2 3 ")
print(" / \\ ")
print(" 4 5 ")
# output different traversal approaches' results
print("\n- Preorder: root, left, right")
print_preorder(root)
print("\n- Inorder: left, root, right")
print_inorder(root)
print("\n- Postorder: left, right, root")
print_postorder(root) | UTF-8 | Python | false | false | 1,694 | py | 84 | traversal.py | 66 | 0.64758 | 0.641677 | 0 | 58 | 28.224138 | 61 |
betepok506/Semantic_segmetation_SSD | 171,798,720,829 | 616a89d3fe688db6d8776f42600cb723a1ceb62a | d58073f69a8881d8ee321c13935dce9063b8846d | /Parse_xml_VOC.py | d568455219e0afb559fef980ae3546ee903a9e5d | []
| no_license | https://github.com/betepok506/Semantic_segmetation_SSD | fd6272d2b4ff324a21ebe8313fb9214f1fbff6b8 | 38a0815f5813a03ad5b5f673509fd4d0d9a6469d | refs/heads/main | 2023-02-03T09:05:40.140492 | 2020-12-18T16:24:25 | 2020-12-18T16:24:25 | 322,639,478 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lxml import etree, objectify
import os
import shutil
import cv2 as cv
from PIL import Image
import sys
path_save='D:\\Kyrs\\ssd_300\\ssd_keras\\dataset'
path_folder='D:\Kyrs\ssd_300\ssd_keras\dataset\VOCdevkit'
# path_folder_annotation="D:\Kyrs\ssd_300\ssd_keras\dataset\VOCdevkit\VOC2007\Annotations"
path_dataset='VOCdevkitnew_4classes_512'
classes = {'car','aeroplane','person','dog'}
size=512,512
def parseXML(xmlFile,folder):
"""
Парсинг XML
"""
with open(xmlFile) as fobj:
xml = fobj.read()
root = etree.fromstring(xml)
image_name = 'None'
fl_save_xml = False
width = 0
height = 0
for appt in root.getchildren():
if appt.tag=='size':
for elem in appt.getchildren():
if elem.tag=='width':
width=int(elem.text)
elem.text=f'{size[0]}'
if elem.tag=='height':
height=int(elem.text)
elem.text=f'{size[1]}'
koef_y = size[0] / width
koef_x = size[1] / height
for appt in root.getchildren():
if appt.tag=='filename':
image_name =appt.text
delete_class=False
if appt.tag == 'object':
for elem in appt.getchildren():
if elem.tag=='name' and not(elem.text in classes):
delete_class=True
break
if elem.text in classes:
fl_save_xml=True
if elem.tag=='bndbox':
# if(width==0 or height==0):
# # fl_save_xml=False
# # delete_class = True
# print(xmlFile)
# break
for coord in elem.getchildren():
if coord.tag == 'ymin' or coord.tag == 'ymax':
try:
coord.text = f'{int(int(float(coord.text) * koef_x))}'
except:
print(xmlFile)
if coord.tag == 'xmin' or coord.tag == 'xmax':
try:
coord.text = f'{int(int(float(coord.text) * koef_y))}'
except:
print(xmlFile)
if delete_class==True:
root.remove(appt)
if fl_save_xml==True:
obj_xml = etree.tostring(root, pretty_print=True,
xml_declaration=False)
try:
with open(f"{os.path.join(path_save,path_dataset,folder,'Annotations',os.path.basename(xmlFile))}", "wb") as xml_writer:
xml_writer.write(obj_xml)
except IOError:
pass
else:
image_name = 'None'
return image_name
def check():
path_res_img='D:\Kyrs\ssd_300\ssd_keras\dataset\VOCdevkitnew_4classes\VOC2012\JPEGImages'
path_res_xml='D:\Kyrs\ssd_300\ssd_keras\dataset\VOCdevkitnew_4classes\VOC2012\Annotations'
path_save = 'D:\Kyrs\ssd_300\ssd_keras\dataset\VOCdevkitnew_4classes\VOC2012\\check'
for file_xml in os.listdir(path_res_xml):
with open(os.path.join(path_res_xml,file_xml)) as fobj:
xml = fobj.read()
root = etree.fromstring(xml)
for img_file in os.listdir(path_res_img):
if img_file.split('.')[0]==file_xml.split('.')[0]:
img=cv.imread(path_res_img+'\\'+file_xml.split('.')[0]+'.jpg')
for appt in root.getchildren():
if appt.tag=='object':
for elem in appt.getchildren():
if appt.tag=='object':
if elem.tag=='bndbox':
for coord in elem.getchildren():
# coord.text = f'{int(300/((width-height)*(int(coord.text)-height)))}'
if coord.tag=='ymin':
y_min=int(coord.text)
elif coord.tag=='ymax':
y_max=int(coord.text)
elif coord.tag=='xmin':
x_min=int(coord.text)
elif coord.tag=='xmax':
x_max=int(coord.text)
img=cv.rectangle(img, (x_min,y_min), (x_max,y_max), (0,255,0), 2)
cv.imwrite(f"{os.path.join(path_save,file_xml.split('.')[0]+'.jpg')}",img)
def resize_image(path_sourse,path_dest):
for img_file in os.listdir(path_sourse):
original_image = Image.open(os.path.join(path_sourse,img_file))
resized_image = original_image.resize(size)
resized_image.save(os.path.join(path_dest,img_file))
os.remove(os.path.join(path_sourse,img_file))
def create_folder(name_folder):
try:
os.mkdir(os.path.join(path_save,path_dataset))
except:
pass
try:
os.mkdir(os.path.join(os.path.join(path_save,path_dataset), name_folder))
except:
pass
try:
os.mkdir(os.path.join(path_save,path_dataset,name_folder,'ImageSets'))
except:
pass
# try:
# os.mkdir(os.path.join(path_save, path_dataset, name_folder, 'cash_image'))
# except:
# pass
try:
os.mkdir(os.path.join(path_save,path_dataset,name_folder,'ImageSets','Main'))
except:
pass
try:
os.mkdir(os.path.join(path_save,path_dataset,name_folder,'Annotations'))
except:
pass
try:
os.mkdir(os.path.join(path_save,path_dataset,name_folder,'JPEGImages'))
except:
pass
def create_txt_file(folder):
all_image = set()
type_txt_file=[]
if folder == 'VOC2012':
type_txt_file.append('train')
type_txt_file.append('trainval')
type_txt_file.append('val')
else:
type_txt_file.append('train')
type_txt_file.append('trainval')
type_txt_file.append('val')
type_txt_file.append('test')
for type_file in type_txt_file:
write_image = set()
for name_classes in classes:
file_open=open(os.path.join(path_save,path_dataset,folder,'ImageSets','Main',f'{type_file}.txt'),'a')
for file in os.listdir(os.path.join(path_folder,folder,'ImageSets','Main')):
if name_classes+'_'+type_file+'.txt'==file:
with open(os.path.join(path_folder,folder,'ImageSets','Main',file),'r') as f:
for str in f:
str_=str.split(' ')
if (str_[1]!='-1\n' or len(str_)==3) and not(str_[0] in write_image):
file_open.write(f'{str_[0]}\n')
write_image.add(str_[0])
all_image.add(str_[0])
print(f'{len(write_image)} type file {type_file}')
print(f"all image {len(all_image)}")
all_image.clear()
def main(folder):
create_folder(folder)
image_set=set()
path_folder_annotation=os.path.join(path_folder,folder,'Annotations')
for file_xml in os.listdir(path_folder_annotation):
name_image=parseXML(os.path.join(path_folder_annotation,file_xml),folder)
if name_image!='None':
image_set.add(name_image)
print(f'Process copy xml file {folder} finished')
path_folder_image=os.path.join(path_folder,folder,'JPEGImages')
for elem in os.listdir(path_folder_image):
if elem in image_set:
original_image = Image.open(os.path.join(path_folder,folder,'JPEGImages', elem))
resized_image = original_image.resize(size)
resized_image.save(os.path.join(path_save,path_dataset,folder,'JPEGImages', elem))
# shutil.copyfile(os.path.join(path_folder,folder,'JPEGImages',elem),os.path.join(path_save,path_dataset,folder,'cash_image',elem))
# resize_image(os.path.join(path_save,path_dataset,folder,'cash_image'),os.path.join(path_save,path_dataset,folder,'JPEGImages'))
print(f'Process copy image file {folder} finished')
create_txt_file(folder)
print(f'Process create txt file {folder} finished')
# os.rmdir(os.path.join(path_save, path_dataset, folder, 'cash_image'))
if __name__=="__main__":
for folder in os.listdir(path_folder):
main(folder)
| UTF-8 | Python | false | false | 8,360 | py | 7 | Parse_xml_VOC.py | 6 | 0.536215 | 0.526637 | 0 | 214 | 38.03271 | 143 |
otamori/python4linux | 6,863,357,748,806 | d5fb41d235f10b74853387727dbe45fa42ccf86d | 1265be446f94130678be929ed67932ae3c220868 | /aula02/ex07.py | 677d09348db7a8214056dce7f13d82cc0c05eb67 | []
| no_license | https://github.com/otamori/python4linux | d57056ec0ff7c275cfe305b79d4a211899178f03 | 29cbaeb4e184508ec4296a8df85c6232cb236763 | refs/heads/master | 2020-08-29T19:53:07.691299 | 2019-11-09T01:12:29 | 2019-11-09T01:12:29 | 218,153,838 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dicionario = {'nota1':5.5, 'nota2':7.5,'nota3':4.5}
soma = 0
for elemento in dicionario.values():
soma=soma+elemento
media = soma /len(dicionario)
print(media)
| UTF-8 | Python | false | false | 168 | py | 50 | ex07.py | 47 | 0.672619 | 0.613095 | 0 | 10 | 15.8 | 51 |
elthran/UndyingKingdoms | 7,533,372,662,310 | cc78e8fcdd456bf8537c1c909049b886c553c8ed | 6cafa5f61762da403f77495d39376be88e156918 | /tests/counties/battle_results_test.py | 8d9a180aef49445967839d199c564dbe7187cc05 | []
| no_license | https://github.com/elthran/UndyingKingdoms | b3284e1e4dc0c2a86539195e6794fbc9a843e091 | d6342f591f59ab4f50e95486d660bfb652fdd821 | refs/heads/master | 2022-12-12T18:09:43.373727 | 2019-08-22T22:18:36 | 2019-08-22T22:18:36 | 156,049,332 | 0 | 1 | null | false | 2022-12-08T05:02:33 | 2018-11-04T04:32:07 | 2019-08-22T22:18:57 | 2022-12-08T05:02:32 | 34,560 | 0 | 1 | 59 | Python | false | false | from app.metadata.metadata import attack_types
from app.models.exports import County
from app.models.exports import User
def test_battle_results(ctx):
user1 = User('user1', 'user1@gmail.com', 'password')
user1.save()
county = County(1, "County1", "Leader1", user1, "Human", "Sir", "Merchant")
county.save()
army = dict(
peasant=50,
soldier=100,
archer=0,
besieger=0,
summon=10,
elite=10,
monster=0
)
user2 = User('user2', 'user2@gmail.com', 'password')
user2.save()
county2 = County(1, "County2", "Leader2", user2, "Human", "Sir", "Merchant")
county2.save()
# create user 1
# create user 2
# simulate attack.
result = county.battle_results(army, county2, attack_types[0])
assert result # is non null
| UTF-8 | Python | false | false | 821 | py | 421 | battle_results_test.py | 295 | 0.611449 | 0.570037 | 0 | 30 | 26.366667 | 80 |
romainmartinez/1d-verifications | 19,224,273,626,883 | 60e788dce501cbd11c9b74294495459e7848fa57 | ce64b5edfddf1b2a75b3c084735da23ef819da5e | /src/callbacks.py | db0237654f67a70f5190306158064098a036abbb | [
"Apache-2.0"
]
| permissive | https://github.com/romainmartinez/1d-verifications | 4f1efe07dcb6e79983f7ce1deb1c0143ec21cb08 | 7fe197bb1635be23ae40b9319e81e6644eb75e83 | refs/heads/master | 2021-07-12T19:06:38.561264 | 2020-06-25T19:46:45 | 2020-06-25T19:46:45 | 171,506,790 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from pathlib import Path
import numpy as np
import pandas as pd
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
from plotly import tools
from pyosim import Analogs3dOsim
from .server import app, rof
@app.callback(
output=Output("trials", "data"),
inputs=[
Input("find", "n_clicks_timestamp"),
Input("tag-1", "n_clicks_timestamp"),
Input("tag-2", "n_clicks_timestamp"),
Input("tag-3", "n_clicks_timestamp"),
Input("note", "n_blur_timestamp"),
],
state=[
State("project", "value"),
State("glob", "value"),
State("trials", "data"),
State("current", "data"),
State("note", "value"),
],
)
def set_trials(read, t1, t2, t3, note_submit, project, glob, trials, current, note):
if read:
note_submit = pd.to_datetime(note_submit).timestamp() if note_submit else None
read = pd.to_datetime(read, unit="ms").timestamp() if read else None
t1 = pd.to_datetime(t1, unit="ms").timestamp() if t1 else None
t2 = pd.to_datetime(t2, unit="ms").timestamp() if t2 else None
t3 = pd.to_datetime(t3, unit="ms").timestamp() if t3 else None
btn = np.nanargmax(np.array([read, t1, t2, t3, note_submit], dtype=np.float))
if btn == 0:
print("finding trials...")
out = {
i: {"filename": f"{itrial}", "tag": 0, "note": ""}
for i, itrial in enumerate(Path(project).expanduser().glob(glob))
}
else:
out = trials
if btn == 1:
print("set to 1...")
out[f'{current["id"] - 1}']["tag"] = 1
elif btn == 2:
print("set to 2...")
out[f'{current["id"] - 1}']["tag"] = 2
elif btn == 3:
print("set to 3...")
out[f'{current["id"] - 1}']["tag"] = 3
elif btn == 4:
print("set note...")
out[f'{current["id"] - 1}']["note"] = note
else:
out = {}
return out
@app.callback(
output=Output("df", "data"),
inputs=[Input("read", "n_clicks_timestamp")],
state=[State("trials", "data")],
)
def read_data(_, trials):
out = {}
if trials:
print("reading files...")
ext = trials["1"]["filename"].split(".")[-1]
d = (
pd.concat(
[
getattr(Analogs3dOsim, f"from_{ext}")(trials[i]["filename"])
.time_normalization()
.update_misc({"filename": trials[i]["filename"].split("/")[-1]})
.to_dataframe(add_metadata=["misc"])
for i in trials
]
)
.assign(filename=lambda x: x["filename"].astype("category"))
.reset_index()
)
out = {
"mean": d.groupby("index").mean().to_json(),
"std": d.groupby("index").std().to_json(),
}
return out
@app.callback(output=Output("columns", "options"), inputs=[Input("df", "data")])
def set_dropdown_options(df):
out = (
[{"label": i, "value": i} for i in pd.read_json(df["mean"])]
if df
else [{"label": "", "value": ""}]
)
return out
def read_trials_and_current(df, trials, current):
mu = pd.read_json(df["mean"]).sort_index()
sigma = pd.read_json(df["std"]).sort_index()
ext = trials["1"]["filename"].split(".")[-1]
current_data = (
getattr(Analogs3dOsim, f"from_{ext}")(
trials[f'{current["id"] - 1}']["filename"]
)
.time_normalization()
.to_dataframe()
)
return mu, sigma, current_data
@app.callback(
output=Output("warnings", "children"),
inputs=[Input("current", "data"), Input("columns", "value")],
state=[State("df", "data"), State("trials", "data")],
)
def make_warnings(
current, columns, df, trials, std_threshold=15, rof_threshold=5, rof=rof
):
out = ["#### Warnings", "##### Outliers"]
if df and columns and current and current["id"] > 0:
mu, sigma, current_data = read_trials_and_current(df, trials, current)
above = (current_data > mu + 3 * sigma).sum() / mu.shape[0] * 100
below = (current_data < mu - 3 * sigma).sum() / mu.shape[0] * 100
out.extend(
[
f"- __`{icol}`__ > 3 std for __`{ival:.2f}%`__"
for icol, ival in above.loc[above > std_threshold].iteritems()
]
)
out.extend(
[
f"- __`{icol}`__ < 3 std for __`{ival:.2f}%`__"
for icol, ival in below.loc[below > std_threshold].iteritems()
]
)
if rof:
out.extend(["---", "##### Limits reach"])
r = pd.DataFrame(rof, index=["lower", "upper"])
rof_above = (current_data > r.loc["upper"] - 1).sum() / mu.shape[0] * 100
rof_below = (current_data < r.loc["lower"] + 1).sum() / mu.shape[0] * 100
out.extend(
[
f"- __`{icol}`__ reaches upper DoF limit for __`{ival:.0f}%`__"
for icol, ival in rof_above.loc[
rof_above > rof_threshold
].iteritems()
]
)
out.extend(
[
f"- __`{icol}`__ reaches lower DoF limit for __`{ival:.0f}%`__"
for icol, ival in rof_below.loc[
rof_below > rof_threshold
].iteritems()
]
)
return out
@app.callback(
output=Output("lines", "figure"),
inputs=[Input("current", "data"), Input("columns", "value")],
state=[State("df", "data"), State("trials", "data")],
)
def make_lines(current, columns, df, trials):
out = dict(data=[], layout={})
if df and columns and current and current["id"] > 0:
mu, sigma, current_data = read_trials_and_current(df, trials, current)
out = tools.make_subplots(
rows=2, cols=2, print_grid=False, shared_xaxes=True, shared_yaxes=False
)
pos = {0: [1, 1], 1: [1, 2], 2: [2, 1], 3: [2, 2]}
for i, icol in enumerate(columns):
# lower
out.append_trace(
go.Scatter(
x=mu.index,
y=mu[icol] - sigma[icol],
marker=dict(color="#444"),
line=dict(width=0),
mode="lines",
),
row=pos[i][0],
col=pos[i][1],
)
# trace
out.append_trace(
go.Scatter(
x=mu.index,
y=mu[icol],
mode="lines",
line=dict(color="black"),
fillcolor="rgba(68, 68, 68, 0.3)",
fill="tonexty",
),
row=pos[i][0],
col=pos[i][1],
)
# upper
out.append_trace(
go.Scatter(
x=mu.index,
y=mu[icol] + sigma[icol],
mode="lines",
marker=dict(color="#444"),
line=dict(width=0),
fillcolor="rgba(68, 68, 68, 0.3)",
fill="tonexty",
),
row=pos[i][0],
col=pos[i][1],
)
# current trial
out.append_trace(
go.Scatter(
x=current_data.index,
y=current_data[icol],
mode="lines",
line=dict(color="rgb(44,115,148)"),
),
row=pos[i][0],
col=pos[i][1],
)
out.layout.update(dict(showlegend=False, margin=dict(l=30, r=30, b=30, t=30)))
return out
@app.callback(
output=Output("current", "data"),
inputs=[
Input("previous", "n_clicks_timestamp"),
Input("next", "n_clicks_timestamp"),
],
state=[State("current", "data")],
)
def trial_navigation(prvs, nxt, current):
incr = 0
if nxt and not prvs:
incr = 1
elif prvs and not nxt:
incr = -1
elif nxt and prvs:
if nxt > prvs:
incr = 1
else:
incr = -1
c = current["id"] + incr
return {"id": c if c > -1 else 0}
@app.callback(
output=Output("current-output", "children"),
inputs=[Input("current", "data")],
state=[State("trials", "data")],
)
def set_current_text(current, trials):
return (
Path(trials[f'{current["id"] - 1}']["filename"]).stem if current["id"] else ""
)
@app.callback(
output=Output("current-output", "style"),
inputs=[Input("current", "data"), Input("trials", "data")],
)
def set_current_color(current, trials):
color = "gray"
if current["id"]:
tag = trials[f'{current["id"] - 1}']["tag"]
if tag == 1:
color = "#57bb8a"
elif tag == 2:
color = "#ffd666"
elif tag == 3:
color = "#e67c73"
return {
"font-family": "monospace",
"font-size": 18,
"color": "white",
"background-color": color,
"text-align": "center",
}
@app.callback(
output=Output("note", "value"),
inputs=[Input("current", "data")],
state=[State("trials", "data")],
)
def set_note(current, trials):
return trials[f'{current["id"] - 1}']["note"] if current["id"] else ""
@app.callback(
output=Output("progress", "children"),
inputs=[Input("current", "data")],
state=[State("trials", "data")],
)
def set_progression(current, trials):
return (
f"{current['id']}/{len(trials)} ({current['id'] / len(trials) * 100:.2f}%)"
if current["id"]
else ""
)
@app.callback(
output=Output("controls-output", "children"),
inputs=[Input("export", "n_clicks")],
state=[State("trials", "data"), State("project", "value")],
)
def export_csv(_, trials, project):
if trials:
pd.DataFrame(trials).T.assign(
trial=lambda x: x["filename"].str.split("/").str[-1]
)[["filename", "trial", "tag", "note"]].to_csv(f"{project}/verification.csv")
return "Controls"
| UTF-8 | Python | false | false | 10,377 | py | 5 | callbacks.py | 3 | 0.4748 | 0.458803 | 0 | 327 | 30.733945 | 86 |
zgrzebnickij/BlogApp | 12,180,527,267,509 | c8f3644ec8061a90d67cea4d6d2c206b0d162f85 | 8b1c0c45db808c48a567606f8a5f152ac5012e75 | /blogApp/config.py | c972a8787abd72ae9421e95d5453a0c707546375 | []
| no_license | https://github.com/zgrzebnickij/BlogApp | dc4c297b30ece0c11c55b3d7f6af4077993dc47b | b23b7f74927604125f4147e7c9b652f19e6240d4 | refs/heads/master | 2020-04-18T05:19:14.083269 | 2019-01-31T16:38:36 | 2019-01-31T16:38:36 | 167,274,645 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
class Config():
#move to enviroment variables
SECRET_KEY = '408573b3bb22fb76aa0c5ae611ffe5ab'
SQLALCHEMY_DATABASE_URI = "sqlite:///site.db"
#SQLALCHEMY_NATIVE_UNICODE = False
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("EMAIL_USER")
MAIL_PASSWORD = os.environ.get("EMAIL_PASS")
class ConfigTests():
TESTING = True
WTF_CSRF_ENABLED = False
DEBUG = False
SECRET_KEY = '408573b3bb22fb76aa0c5ae611ffe5ab'
SQLALCHEMY_DATABASE_URI = "sqlite:///test.db"
#SQLALCHEMY_NATIVE_UNICODE = False
# MAIL_SERVER = "smtp.gmail.com"
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get("EMAIL_USER")
# MAIL_PASSWORD = os.environ.get("EMAIL_PASS") | UTF-8 | Python | false | false | 757 | py | 8 | config.py | 5 | 0.700132 | 0.647292 | 0 | 28 | 26.071429 | 49 |
zenith-matic/100-exercise-challenge-and-Tornado | 1,236,950,590,697 | 98cd50f219f8a80ee4e7702bd784d37cd0ce911e | 91463de538c3ed525046190b7744fa48c3c97168 | /100-Exercises-Challenge/exercise_2.10_3rd.py | 07ca8b1ddb4bbd408b4ce22f1d8d3157d4aa8894 | []
| no_license | https://github.com/zenith-matic/100-exercise-challenge-and-Tornado | 289e86c95fe8a2cefaf7c879f070cbc74666438f | 3321ae7c352ce17b17e42936a883bcc86d99bd3e | refs/heads/master | 2016-09-15T05:21:04.453820 | 2016-05-20T05:24:58 | 2016-05-20T05:24:58 | 42,927,887 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Define a function that can accept two strings as input and print the string with maximum length in console.
# If two strings have the same length, then the function should print al l strings line by line.
def string_evaluation(s1, s2):
if len(s1) > len(s2):
print(s1)
elif len(s2) > len(s1):
print(s2)
else:
print(s1)
print(s2)
string_evaluation('derp', 'serp') | UTF-8 | Python | false | false | 425 | py | 73 | exercise_2.10_3rd.py | 72 | 0.630588 | 0.607059 | 0 | 15 | 26.466667 | 109 |
lj1064201288/Dell_Windowns | 18,588,618,474,332 | 6094d4be1cc3f5e996fe01cf9b89b7930437cb27 | 588e39382c19b3189cb471fcd258a1d6b56a6d4b | /Liujun_web/Liujun_web/urls.py | dc3f61a144d439b91e02c95ed3ba672296e697da | []
| no_license | https://github.com/lj1064201288/Dell_Windowns | b335b10ebf2c622a22be7a0a2234f233df453bed | 98577eba3a167cf9bac9cd8c17daae2a5032ba45 | refs/heads/master | 2020-04-06T15:24:27.156621 | 2018-11-14T17:06:49 | 2018-11-14T17:06:49 | 157,577,351 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from index import views as iv
from resume import resume_url
from outer_net import net_urls
urlpatterns = [
# Examples:
# url(r'^$', 'Liujun_web.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^index/', iv.do_index),
url(r'^resume/', include(resume_url)),
url(r'^outer/', include(net_urls)),
]
| UTF-8 | Python | false | false | 466 | py | 5 | urls.py | 5 | 0.652361 | 0.652361 | 0 | 16 | 28.125 | 55 |
monudjangocon/campus-talks | 18,794,776,906,386 | 8effd2c86f634a439bf32fb0a84eae98c4d38018 | af218a3408d44b80eddb1abb8d304005e2b1fd34 | /friend/member_profile/forms.py | d6a71b0abcdb3dbba75084adacaee9b20fc818c1 | []
| no_license | https://github.com/monudjangocon/campus-talks | 33af6a8a8736c8e0801bcb2d8d6bf730d41de2d4 | 8002aa55b90837274f6e87a9acbdccec99dd321c | refs/heads/master | 2021-01-10T06:08:36.026018 | 2016-03-28T06:46:06 | 2016-03-28T06:46:06 | 54,872,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from django.contrib.auth.models import User
class ChangePasswordForm(forms.MOdelForm):
id=forms.CharField(widget=forms.HiddenInput())
| UTF-8 | Python | false | false | 163 | py | 17 | forms.py | 7 | 0.815951 | 0.815951 | 0 | 6 | 26 | 47 |
KingTyler01/cspp10 | 9,440,338,151,324 | 8fe85a1e36e12d067e6a32929ae1a1b374bfa2cf | 3fefacb52e282994a885bff0ef66d9060f6e3d89 | /unit5/TKing_dictionary.py | df3fdde2a75b8af32dd9553ff79c480c8bee13ea | []
| no_license | https://github.com/KingTyler01/cspp10 | e6dcd9930c7bd4eae2b70644807af6754e161d9d | 009a1b86f4a70807267690682cf47319a21b1fe7 | refs/heads/master | 2020-05-30T07:12:53.372255 | 2017-02-17T17:16:14 | 2017-02-17T17:16:14 | 69,268,522 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pprint import pprint
choice = input("[Add]key-value\n[Remove]key-value\n[Update]key-value\n[Exit]\n:")
d = {}
def add(choice,d):
if choice == "add" or choice == "Add":
check = input("What new key would you like to add to the dictionary?: ")
if check in d:
print("Invalid. Input a new key into the dictionary: ")
print(check)
else:
d[check] = input("What is the value of this key?: ")
pprint(d)
def remove(choice,d):
if choice == "remove" or choice =="Remove":
remove = input("What new key would you like to remove from the dictionary?: ")
del d[remove]
pprint(d)
def update(choice,d ):
if choice == "Update" or choice == "update":
check = input ("What key do you want to add to the list?: ")
value = input ("What value do you want the key to have?: ")
if choice not in d:
d[check] = value
pprint(d)
def Exit(choice,d):
if choice == "Exit" or "exit":
print("The End")
Exit(choice,d) | UTF-8 | Python | false | false | 1,103 | py | 14 | TKing_dictionary.py | 14 | 0.546691 | 0.546691 | 0 | 38 | 28.052632 | 86 |
hibayoussef/informationretrieval | 15,152,644,642,517 | b6bb5670c01a2dfeea8a54a846049b42ab7cd407 | 35860e726bba5316e1d45a35fcc3bdfd974ed461 | /venv/Lib/site-packages/wikklytext/store/wikStore_sqlite.py | 3cfaf10f5a03502b5f7342303d9cbdf265824358 | []
| no_license | https://github.com/hibayoussef/informationretrieval | 131da7cc1a155de25817e6ab753dddc31cdd0b7b | e3faf114bb6747e2cbd0ddaf0de50153ea374d70 | refs/heads/master | 2023-06-03T23:09:47.908117 | 2021-06-26T06:52:23 | 2021-06-26T06:52:23 | 375,288,511 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
A WikklyStore where items are stored in an SQLite database.
Copyright (C) 2007,2008 Frank McIngvale
Contact: fmcingvale@gmail.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from boodebr.sql.dbtypes import obj_to_dbval
from boodebr.sql.fpmsql import fpmsql
import boodebr.sql.sqlite_util as sqlutil
import os
MAIN_TABLE_NAME = 'WikklyItems'
def detect(pathname):
"""
Detect if the given pathname is an SQLite database containing
wikkly content. (Pathname may be a directory or file.)
Returns an wikStore_sqlite instance to handle content if found,
or None if not.
"""
if not os.path.isfile(pathname):
return None
try:
sql = fpmsql(filename, 'pysqlite', obj_to_dbval)
except:
return None
if MAIN_TABLE_NAME in sqlutil.get_tables(sql):
del sql
return wikStore_sqlite(pathname)
else:
return None
class wikStore_sqlite(object):
def __init__(self, filename):
self.sql = fpmsql(os.path.abspath(filename), 'pysqlite', obj_to_dbval)
# ok to call if already exists
self.init_main_table()
def info(self):
"Return a one-line description of this instance."
return 'WikklyText, SQLite database %s' % self.sql.get_filename()
def getpath(self):
"""
Get the base directory for the store. This is used
to resolve relative paths given by the wikitext for <<include>>, etc.
"""
return os.path.split(self.sql.get_filename())[0]
def names(self):
nlist = []
for row in self.sql.query('select name from %s' % MAIN_TABLE_NAME):
nlist.append(row[0])
return nlist
def getitem(self, name):
from wikStore import tags_split, WikklyItem, WikklyDateTime
qs = 'select * from %s where name=?' % MAIN_TABLE_NAME
obj = self.sql.query(qs, (name,)).getobj()
if obj is None:
return None
item = WikklyItem(obj.name, obj.content, tags_split(obj.tags),
obj.author, WikklyDateTime(from_store=obj.ctime),
WikklyDateTime(from_store=obj.mtime),
obj.content_type, obj.revision)
return item
def getall(self):
"""
Load all content from store.
Returns list of WikklyItems.
"""
# optimize here instead of just calling getitem() ...
from wikStore import tags_split, WikklyItem, WikklyDateTime
items = []
qs = 'select * from %s' % MAIN_TABLE_NAME
for obj in self.sql.query(qs).obj():
item = WikklyItem(obj.name, obj.content, tags_split(obj.tags),
obj.author, WikklyDateTime(from_store=obj.ctime),
WikklyDateTime(from_store=obj.mtime),
obj.content_type, obj.revision)
items.append(item)
return items
def saveitem(self, item, oldname=None):
from wikStore import tags_join
# sanity
if oldname == item.name:
oldname = None
if oldname != None:
self.sql.run('delete from %s where name=?' % MAIN_TABLE_NAME, (oldname,))
# name is UNIQUE so 'insert or replace' is all that's needed to add or replace
qs = 'insert or replace into %s ' % MAIN_TABLE_NAME
qs += '(name,author,ctime,mtime,tags,content,content_type,revision) '
qs += 'values(?,?,?,?,?,?,?,?)'
# remove any \r chars that snuck in
content = item.content.replace(u'\r', u'')
self.sql.run(qs, (item.name.encode('utf-8'), item.author.encode('utf-8'),
item.ctime.to_store(), item.mtime.to_store(),
tags_join(item.tag_list()).encode('utf-8'), content.encode('utf-8'),
item.content_type.encode('utf-8'), item.revision))
def delete(self, item):
"""
Delete the given WikklyItem from the store.
"""
self.sql.run('delete from %s where name=?' % MAIN_TABLE_NAME, (item.name,))
def search(self, query):
"""
Return a list of items matching query.
'query' is one of the WikklyQuery* objects defined
in wikklytext.store.query.
"""
from wikklytext.store.wikQuery import generic_query_store
return generic_query_store(self, query)
# -*- Internal API -*-
def init_main_table(self):
qs = 'create table if not exists %s (' % MAIN_TABLE_NAME
qs += 'name text UNIQUE,'
qs += 'author text,'
qs += 'mtime text,'
qs += 'ctime text,'
qs += 'tags text,'
qs += 'content text,'
qs += 'content_type text,'
qs += 'revision text )'
self.sql.run(qs)
if __name__ == '__main__':
wf = wikStore_sqlite('test.db')
| UTF-8 | Python | false | false | 4,647 | py | 116 | wikStore_sqlite.py | 77 | 0.680439 | 0.676996 | 0 | 161 | 27.850932 | 80 |
dannyjacobs/capo | 1,881,195,701,579 | 4dc450dd3af6f360416b7d35f759635c2f88ad4d | 2fcd4499bf04aad7c1f768a855e72b71b335deb3 | /omni/omni_get_k.py | 5631a5d0c8ef062312614f7c07276316d545c545 | []
| no_license | https://github.com/dannyjacobs/capo | db9cd7008058ae6a7abe4ffc3da7cb1338f25958 | 81b12c84fb860629090272e541a12e31e44dc406 | refs/heads/master | 2017-02-24T14:48:44.135520 | 2016-12-29T15:57:19 | 2016-12-29T15:57:19 | 151,115 | 5 | 13 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
import numpy as n
import aipy as a
import optparse,sys
import capo.omni as omni, capo.zsa as zsa, capo.arp as arp
import pylab as p, os
o = optparse.OptionParser()
a.scripting.add_standard_options(o, cal=True)
opts,args = o.parse_args(sys.argv[1:])
pol='xx'
freqs = n.arange(.1,.2,.1/203)
aa = a.cal.get_aa('psa6622_v003', freqs)
blstr, _, bl2sep = zsa.grid2ij(aa.ant_layout)
m, _, mdl,_= omni.from_npz(args[0])
seps = n.arange(1,16) #* 15m = baseline lengths
bls = [(0,i) for i in seps]
sep2ij = {}
for ij in mdl['xx'].keys():
bl = a.miriad.ij2bl(ij[0],ij[1])
try:
s = tuple(map(int,bl2sep[bl].split(',')))
except(KeyError):
continue
if s in bls and s[0] == 0:
bls[s[-1]-1] = ij
#print bls
conjugate = [(0,101), (0,62), (0,100), (0,97), (12,43), (57,64)]
#kmodes = n.einsum('j,i', freqs, seps) #these are all the kmodes measured. Need to align them.
fdict = [{} for i in range(800)] #for a frequency, the seps and frequency that are redundant
du = 15*15*(freqs[2]-freqs[1])/3e8 * 1e9 #u-range covered b/w neighboring freqs
us = (n.arange(400)+.5)*du #.5*du*N
errors = n.ones(shape=(len(freqs),len(freqs)))
cut = 60/15.*du
print 'Finding baseline/frequency pairs'
for ch1,fq1 in enumerate(freqs):
#print ch1,
for ch2,fq2 in zip(range(ch1+1,len(freqs)),freqs[ch1+1:]):
if ch1 == ch2: continue
err = []
blp = []
bestsofar = [0,(0,0),(0,0),0,100000.] #u-bin, freqpair, blpair, u-value,u-error
for s1 in n.arange(15,0,-1):
for s2 in n.arange(s1-1,0,-1):
if s1==s2: continue
if n.abs(fq1*s1 - fq2*s2)*15*1e9/3e8 < min(cut,bestsofar[-1]):
u = (fq1*s1 + fq2*s2)*15/2.*1e9/3e8
#fdict[int(n.floor(u/du))][(ch1,ch2)] = [(bls[s1-1],bls[s2-1]), n.abs(fq1*s1 - fq2*s2)*15*1e9/3e8]
bestsofar[0] = int(n.floor(u/du))
bestsofar[1] = (ch1,ch2)
bestsofar[2] = (bls[s1-1],bls[s2-1])
bestsofar[3] = u
bestsofar[4] = n.abs(fq1*s1 - fq2*s2)*15*1e9/3e8
fdict[bestsofar[0]][bestsofar[1]] = [bestsofar[2],bestsofar[3],bestsofar[4]]
for i,d in enumerate(fdict):
if len(d) <= 1 : fdict[i] = {}
# err.append(n.abs(fq1*s1 - fq2*s2))
# blp.append((s1,s2))
# errors[ch1,ch2] = n.min(err)
# if errors[ch1,ch2]<cut:
# fdict[(ch1,ch2)] = n.asarray(blp)[n.where(err == n.min(err))]
# else: continue
#print
#print n.sum(errors - errors.T)
p.imshow(errors, origin='lower', aspect='equal', extent=(freqs[0],freqs[-1],freqs[0],freqs[-1]))
#read in data
data = {};
jds = []
lsts = []
files = []
prefix = '5' #change if don't want to overwrite filenames
for fl in args:
if os.path.exists(args[-1][:-3]+'ucal' + prefix + '.npz'):
print ' '+args[-1][:-3]+'ucal' + prefix + '.npz exists. Skipping...'
exit()
print 'Reading %s'%fl
meta,_,mdl,_ = omni.from_npz(fl)
jd = meta['jds']
lst = meta['lsts']
jds.append(jd)
lsts.append(lst)
files.append(fl)
for b in bls:
if b in conjugate:
_d = n.conj(mdl[pol][b])
else:
_d = mdl[pol][b]
if b not in data.keys(): data[b] = _d
else:
data[b] = n.concatenate((data[b],_d), axis=0)
reddata = {}
reddata['files'] = files
for i,d in enumerate(fdict):
for ch1,ch2 in d.keys():
b1,b2 = d[(ch1,ch2)][0]
#getting rid of partially flagged channels
w1 = data[b1][:,ch1] != 0
w2 = data[b2][:,ch2] != 0
w = n.logical_and(w1,w2)
if n.all(n.logical_not(w)): continue
avis = n.average(data[b1][:,ch1]*n.conj(data[b2][:,ch2]), weights=w)
# if not avis: continue
reddata[str((ch1,ch2))] = [ (b1,b2), i, avis ]
print ' Saving file ' + fl[:-3] + 'ucal' + prefix + '.npz' #name is last file
n.savez(fl[:-3] + 'ucal' + prefix + '.npz',**reddata)
#print len(reddata)
#import IPython
#IPython.embed()
x,y = n.meshgrid(freqs, seps)
z = x*y
cs = p.contour(x,y,z, 100)
p.scatter(x,y,s=1.5)
p.grid(True)
#p.show()
| UTF-8 | Python | false | false | 4,170 | py | 1,365 | omni_get_k.py | 1,295 | 0.5506 | 0.495444 | 0 | 126 | 32.095238 | 119 |
ch-liux/spiders | 19,232,863,551,971 | 071d3d166de6e9e95b51ed7bb1590b8fd97115ac | 2b6cdb478bd923c1f9919f31686c248ffc31a881 | /pythoning/src/asyncDemo/__init__.py | 8fd5a1984d75c674ac6e6a411659703212c85a09 | []
| no_license | https://github.com/ch-liux/spiders | 13d732e098597c61df1dbec013074c3b4c6bd6a0 | 2a005a185886ea1954efe3fbc552fd69e940ee7b | refs/heads/master | 2020-03-27T00:04:32.621431 | 2018-09-12T23:53:26 | 2018-09-12T23:53:26 | 145,591,345 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """异步IO
do_some_code()
f = open('/path/to/file', 'r')
r = f.read() # <== 线程停在此处等待IO操作结果
# IO操作完成后线程才能继续执行:
do_some_code(r)
loop = get_event_loop()
while True:
event = loop.get_event()
process_event(event)
"""
| UTF-8 | Python | false | false | 275 | py | 104 | __init__.py | 100 | 0.61086 | 0.61086 | 0 | 14 | 14.714286 | 33 |
Berdugo1994/Tweeter-Search-Engine | 884,763,285,494 | 4ca9a9b86a183e7e562afedf0f0f960338910672 | d01a85c82877d1af6cf2150c5a125223614a2eb1 | /searcher_wordnet.py | 822ec1de66729e45f7ba215be16c35f214405dc7 | [
"MIT"
]
| permissive | https://github.com/Berdugo1994/Tweeter-Search-Engine | 1ae2a86b9e77e963c3b8f9709cb21a40b4d24342 | ff80707d64b792288b877814d79e39c5b5ceb7ad | refs/heads/main | 2023-06-09T10:53:18.990260 | 2021-06-25T10:21:39 | 2021-06-25T10:21:39 | 360,846,088 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
from ranker import Ranker
# DO NOT MODIFY CLASS NAME
class Searcher:
# DO NOT MODIFY THIS SIGNATURE
# You can change the internal implementation as you see fit. The model
# parameter allows you to pass in a precomputed model that is already in
# memory for the searcher to use such as LSI, LDA, Word2vec models.
# MAKE SURE YOU DON'T LOAD A MODEL INTO MEMORY HERE AS THIS IS RUN AT QUERY TIME.
def __init__(self, parser, indexer, model=None):
self._parser = parser
self._indexer = indexer
self.dic_docs = self._indexer.get_docs_to_info_dict()
self.posting = self._indexer.get_posting_dict()
self.inverted_index = self._indexer.get_inverted_index()
self.num_of_tweets = len(self.dic_docs)
self._ranker = Ranker(self.dic_docs)
self._model = model
# DO NOT MODIFY THIS SIGNATURE
# You can change the internal implementation as you see fit.
def search(self, query, k=None):
"""
Executes a query over an existing index and returns the number of
relevant docs and an ordered list of search results (tweet ids).
parser the query and turn it into a list of terms,
then find the k best results by cosSim.
then calculate it with GloVe model weight.
Input:
query - string.
k - number of top results to return, default to everything.
Output:
A tuple containing the number of relevant search results, and
a list of tweet_ids where the first element is the most relavant
and the last is the least relevant result.
"""
query_as_list = self._parser.parse_sentence(query)
expanded_query = self._model.query_expanded(query_as_list)
if len(query_as_list) == 0:
return 0, []
tuples_tweet_sim = self.relevant_and_cosSim(expanded_query) # CosSim func
if tuples_tweet_sim is None:
return 0, []
ranked_docs = self._ranker.simple_rank(tuples_tweet_sim, reversed=True)
ranked_docs = self._ranker.retrieve_top_k(ranked_docs, k)
return len(ranked_docs), ranked_docs
# feel free to change the signature and/or implementation of this function
# or drop altogether.
def relevant_and_cosSim(self, query, k=None):
"""
This function detect the relevant docs that might be good answer.
IMPORTANT : we do here semi ranking by func of (num of each q_term in doc + num of terms belongs to query)
the flow is such that -> we iterate on every term in query, check his posting , updating the relevant docs,(by
mone and mechane) and move to the next term,until finishes the query list. by this we not pass even once
a doc that has no shared words with the query.
after finish pass all the words in term , we iterate the dictionary and sqrt the relevant parts of the equation.
:param query: query
:return: dictionary of relevant documents.
"""
dic_key_sim = {}
N = self.num_of_tweets
for term in query:
if term.lower() in self.posting:
term = term.lower()
elif term.upper() in self.posting:
term = term.upper()
else:
continue
df = self.inverted_index.get(term, 0) # df is doc frequency - in how many docs this term mentioned
posting_doc = self.posting[term]
idf = math.log(N / df, 10)
for doc_tuple in posting_doc:
tf = doc_tuple[1] / doc_tuple[2]
cos_sin_similarity_mone = (tf * idf)
if doc_tuple[0] not in dic_key_sim.keys():
dic_key_sim[doc_tuple[0]] = [cos_sin_similarity_mone,
self.calculate_wij_mehane(doc_tuple[0], len(query))]
else:
dic_key_sim[doc_tuple[0]][0] += cos_sin_similarity_mone
result_list = []
for item in dic_key_sim.items():
result_list.append((item[0], item[1][0] / item[1][1]))
return result_list
def calculate_wij_mehane(self, doc_id, query_len):
"""
calculate vector size for cosSin similarity score
:param doc_id: document id
:param query_len: size of the query
:return: vector size
"""
doc_dic, doc_len, date, full_text = self.dic_docs[doc_id]
total_wij_squared = 0
for term in doc_dic.keys():
if term.lower() in self.posting:
term_at_post = term.lower()
elif term.upper() in self.posting:
term_at_post = term.upper()
else:
continue
df = self.inverted_index.get(term_at_post, 0) # df is doc frequency - in how many docs this term mentioned
idf = math.log(self.num_of_tweets / df, 10)
wij = (doc_dic[term] / doc_len) * idf
total_wij_squared += wij ** 2
total_wij_squared = math.sqrt(total_wij_squared * query_len)
return total_wij_squared
| UTF-8 | Python | false | false | 5,237 | py | 39 | searcher_wordnet.py | 36 | 0.585259 | 0.580676 | 0 | 111 | 45.18018 | 120 |
Huahan98/deep-learning-containers | 4,818,953,320,461 | 715e50dbcba2a74719c56b4c0dba3d9134cd7d72 | 110c5310346e0db4ea399c6a553d75fe3fbf5bcd | /test/sagemaker_tests/pytorch/training/integration/sagemaker/test_distributed_operations.py | 3d08d4e417d4f474dd42ed9c9f6886676b0ea12d | [
"Apache-2.0"
]
| permissive | https://github.com/Huahan98/deep-learning-containers | dc5f3391f4099326c8402832f87cc3c4bda86cc8 | 1510b917ebfb24a3bfb744e4590d46ba78657392 | refs/heads/master | 2023-09-03T13:29:32.142167 | 2021-11-10T17:42:43 | 2021-11-10T17:42:43 | 285,139,382 | 1 | 0 | Apache-2.0 | true | 2020-08-05T00:57:54 | 2020-08-05T00:57:53 | 2020-08-05T00:53:09 | 2020-08-05T00:56:19 | 67,495 | 0 | 0 | 0 | null | false | false | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import boto3
import pytest
from sagemaker import utils
from sagemaker.pytorch import PyTorch
from sagemaker import Session
from six.moves.urllib.parse import urlparse
from test.test_utils import get_framework_and_version_from_tag, get_cuda_version_from_tag
from packaging.version import Version
from packaging.specifiers import SpecifierSet
from ...integration import (data_dir, dist_operations_path, fastai_path, mnist_script,
DEFAULT_TIMEOUT, mnist_path)
from ...integration.sagemaker.timeout import timeout
from .... import invoke_pytorch_helper_function
from . import invoke_pytorch_estimator
MULTI_GPU_INSTANCE = 'ml.p3.8xlarge'
RESOURCE_PATH = os.path.join(os.path.dirname(__file__), '..', '..', 'resources')
def validate_or_skip_smmodelparallel(ecr_image):
if not can_run_smmodelparallel(ecr_image):
pytest.skip("Model Parallelism is supported on CUDA 11 on PyTorch v1.6 and above")
def can_run_smmodelparallel(ecr_image):
_, image_framework_version = get_framework_and_version_from_tag(ecr_image)
image_cuda_version = get_cuda_version_from_tag(ecr_image)
return Version(image_framework_version) in SpecifierSet(">=1.6") and Version(
image_cuda_version.strip("cu")) >= Version("110")
def validate_or_skip_smmodelparallel_efa(ecr_image):
if not can_run_smmodelparallel_efa(ecr_image):
pytest.skip("EFA is only supported on CUDA 11, and on PyTorch 1.8.1 or higher")
def can_run_smmodelparallel_efa(ecr_image):
_, image_framework_version = get_framework_and_version_from_tag(ecr_image)
image_cuda_version = get_cuda_version_from_tag(ecr_image)
return Version(image_framework_version) in SpecifierSet(">=1.8.1") and Version(image_cuda_version.strip("cu")) >= Version("110")
@pytest.mark.processor("cpu")
@pytest.mark.multinode(3)
@pytest.mark.model("unknown_model")
@pytest.mark.skip_gpu
@pytest.mark.deploy_test
@pytest.mark.skip_test_in_region
def test_dist_operations_cpu(framework_version, ecr_image, sagemaker_regions, instance_type, dist_cpu_backend):
instance_type = instance_type or 'ml.c4.xlarge'
function_args = {
'framework_version': framework_version,
'instance_type': instance_type,
'dist_backend': dist_cpu_backend,
}
invoke_pytorch_helper_function(ecr_image, sagemaker_regions, _test_dist_operations, function_args)
@pytest.mark.processor("gpu")
@pytest.mark.multinode(3)
@pytest.mark.model("unknown_model")
@pytest.mark.skip_cpu
@pytest.mark.deploy_test
def test_dist_operations_gpu(framework_version, instance_type, ecr_image, sagemaker_regions, dist_gpu_backend):
"""
Test is run as multinode
"""
instance_type = instance_type or 'ml.p2.xlarge'
function_args = {
'framework_version': framework_version,
'instance_type': instance_type,
'dist_backend': dist_gpu_backend,
}
invoke_pytorch_helper_function(ecr_image, sagemaker_regions, _test_dist_operations, function_args)
@pytest.mark.processor("gpu")
@pytest.mark.model("unknown_model")
@pytest.mark.skip_cpu
def test_dist_operations_multi_gpu(framework_version, ecr_image, sagemaker_regions, dist_gpu_backend):
"""
Test is run as single node, but multi-gpu
"""
function_args = {
'framework_version': framework_version,
'instance_type': MULTI_GPU_INSTANCE,
'dist_backend': dist_gpu_backend,
'instance_count': 1
}
invoke_pytorch_helper_function(ecr_image, sagemaker_regions, _test_dist_operations, function_args)
@pytest.mark.processor("gpu")
@pytest.mark.integration("fastai")
@pytest.mark.model("cifar")
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
def test_dist_operations_fastai_gpu(framework_version, ecr_image, sagemaker_regions):
_, image_framework_version = get_framework_and_version_from_tag(ecr_image)
if Version("1.9") <= Version(image_framework_version) < Version("1.11"):
pytest.skip("Fast ai is not supported on PyTorch v1.9.x and v1.10.x")
with timeout(minutes=DEFAULT_TIMEOUT):
estimator_parameter = {
'entry_point': 'train_cifar.py',
'source_dir': os.path.join(fastai_path, 'cifar'),
'role': 'SageMakerRole',
'instance_count': 1,
'instance_type': MULTI_GPU_INSTANCE,
'framework_version': framework_version,
}
upload_s3_data_args = {
'path': os.path.join(fastai_path, 'cifar_tiny', 'training'),
'key_prefix': 'pytorch/distributed_operations'
}
job_name=utils.unique_name_from_base('test-pt-fastai')
pytorch, sagemaker_session = invoke_pytorch_estimator(ecr_image, sagemaker_regions, estimator_parameter, upload_s3_data_args=upload_s3_data_args, job_name=job_name)
model_s3_url = pytorch.create_model().model_data
_assert_s3_file_exists(sagemaker_session.boto_region_name, model_s3_url)
@pytest.mark.processor("gpu")
@pytest.mark.model("mnist")
@pytest.mark.multinode(2)
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
def test_mnist_gpu(framework_version, ecr_image, sagemaker_regions, dist_gpu_backend):
with timeout(minutes=DEFAULT_TIMEOUT):
estimator_parameter = {
'entry_point': mnist_script,
'role': 'SageMakerRole',
'instance_count': 2,
'framework_version': framework_version,
'instance_type': MULTI_GPU_INSTANCE,
'hyperparameters': {'backend': dist_gpu_backend},
}
upload_s3_data_args = {
'path': os.path.join(data_dir, 'training'),
'key_prefix': 'pytorch/mnist'
}
job_name=utils.unique_name_from_base('test-pt-mnist-gpu')
invoke_pytorch_estimator(ecr_image, sagemaker_regions, estimator_parameter, upload_s3_data_args=upload_s3_data_args, job_name=job_name)
@pytest.mark.integration("smmodelparallel")
@pytest.mark.model("mnist")
@pytest.mark.processor("gpu")
@pytest.mark.multinode(2)
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
@pytest.mark.parametrize("test_script, num_processes", [("smmodelparallel_pt_mnist.py", 8)])
def test_smmodelparallel_mnist_multigpu_multinode(ecr_image, instance_type, sagemaker_regions, test_script, num_processes):
"""
Tests pt mnist command via script mode
"""
instance_type = "ml.p3.16xlarge"
validate_or_skip_smmodelparallel(ecr_image)
with timeout(minutes=DEFAULT_TIMEOUT):
estimator_parameter = {
'entry_point': test_script,
'role': 'SageMakerRole',
'source_dir': mnist_path,
'instance_count': 2,
'instance_type': instance_type,
'hyperparameters': {"assert-losses": 1, "amp": 1, "ddp": 1, "data-dir": "data/training", "epochs": 5},
'distribution': {
"smdistributed": {
"modelparallel": {
"enabled": True,
"parameters": {
"partitions": 2,
"microbatches": 4,
"optimize": "speed",
"pipeline": "interleaved",
"ddp": True,
},
}
},
"mpi": {
"enabled": True,
"processes_per_host": num_processes,
"custom_mpi_options": "-verbose --mca orte_base_help_aggregate 0 -x SMDEBUG_LOG_LEVEL=error -x OMPI_MCA_btl_vader_single_copy_mechanism=none ",
},
},
}
job_name=utils.unique_name_from_base('test-pt-smdmp-multinode')
invoke_pytorch_estimator(ecr_image, sagemaker_regions, estimator_parameter, job_name=job_name)
@pytest.mark.integration("smmodelparallel")
@pytest.mark.model("mnist")
@pytest.mark.processor("gpu")
@pytest.mark.multinode(2)
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
@pytest.mark.parametrize("test_script, num_processes", [("smmodelparallel_pt_mnist.py", 8)])
@pytest.mark.efa()
def test_smmodelparallel_mnist_multigpu_multinode_efa(ecr_image, efa_instance_type, sagemaker_regions, test_script, num_processes):
"""
Tests pt mnist command via script mode
"""
validate_or_skip_smmodelparallel_efa(ecr_image)
with timeout(minutes=DEFAULT_TIMEOUT):
estimator_parameter = {
'entry_point': test_script,
'role': 'SageMakerRole',
'source_dir': mnist_path,
'instance_count': 2,
'instance_type': efa_instance_type,
'hyperparameters': {"assert-losses": 1, "amp": 1, "ddp": 1, "data-dir": "data/training", "epochs": 5},
'distribution': {
"smdistributed": {
"modelparallel": {
"enabled": True,
"parameters": {
"partitions": 2,
"microbatches": 4,
"optimize": "speed",
"pipeline": "interleaved",
"ddp": True,
},
}
},
"mpi": {
"enabled": True,
"processes_per_host": num_processes,
"custom_mpi_options": "-verbose --mca orte_base_help_aggregate 0 -x SMDEBUG_LOG_LEVEL=error -x OMPI_MCA_btl_vader_single_copy_mechanism=none -x FI_EFA_USE_DEVICE_RDMA=1 -x FI_PROVIDER=efa ",
},
}
}
job_name=utils.unique_name_from_base('test-pt-smdmp-multinode-efa')
invoke_pytorch_estimator(ecr_image, sagemaker_regions, estimator_parameter, job_name=job_name)
@pytest.mark.integration("smmodelparallel")
@pytest.mark.model("mnist")
@pytest.mark.processor("gpu")
@pytest.mark.skip_cpu
@pytest.mark.efa()
@pytest.mark.skip_py2_containers
def test_sanity_efa(ecr_image, efa_instance_type, sagemaker_regions):
"""
Tests pt mnist command via script mode
"""
validate_or_skip_smmodelparallel_efa(ecr_image)
efa_test_path = os.path.join(RESOURCE_PATH, 'efa', 'test_efa.sh')
with timeout(minutes=DEFAULT_TIMEOUT):
estimator_parameter = {
'entry_point': efa_test_path,
'role': 'SageMakerRole',
'instance_count': 1,
'instance_type': efa_instance_type,
'distribution': {
"mpi": {
"enabled": True,
"processes_per_host": 1
},
},
}
job_name=utils.unique_name_from_base('test-pt-efa-sanity')
invoke_pytorch_estimator(ecr_image, sagemaker_regions, estimator_parameter, job_name=job_name)
def _test_dist_operations(
ecr_image, sagemaker_session, framework_version, instance_type, dist_backend, instance_count=3
):
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(
entry_point=dist_operations_path,
role='SageMakerRole',
instance_count=instance_count,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_uri=ecr_image,
framework_version=framework_version,
hyperparameters={'backend': dist_backend},
)
pytorch = _disable_sm_profiler(sagemaker_session.boto_region_name, pytorch)
pytorch.sagemaker_session.default_bucket()
fake_input = pytorch.sagemaker_session.upload_data(
path=dist_operations_path, key_prefix='pytorch/distributed_operations'
)
pytorch.fit({'required_argument': fake_input}, job_name=utils.unique_name_from_base('test-pt-dist-operations'))
def _assert_s3_file_exists(region, s3_url):
parsed_url = urlparse(s3_url)
s3 = boto3.resource('s3', region_name=region)
s3.Object(parsed_url.netloc, parsed_url.path.lstrip('/')).load()
def _disable_sm_profiler(region, estimator):
"""Disable SMProfiler feature for China regions
"""
if region in ('cn-north-1', 'cn-northwest-1'):
estimator.disable_profiler = True
return estimator
| UTF-8 | Python | false | false | 12,794 | py | 293 | test_distributed_operations.py | 233 | 0.631077 | 0.623026 | 0 | 317 | 39.359621 | 210 |
google-code/0xic-zex | 15,315,853,405,298 | 3a4469afb5f9032175a2a7878c4611880c52388a | 839c3ab12d1716ccabb03775686ac5dc60379b48 | /Python/imvustuff/imvumacro/__testing__gui/test.py | 06782de82809474f89dbb6d22fe72b8829b03e10 | []
| no_license | https://github.com/google-code/0xic-zex | d7ac68cbf4304ef41662e1b88a10ef6c99371602 | b5da0a01bcd7c8a71c0b57e1513b4bba81a84ca8 | refs/heads/master | 2018-01-07T09:08:00.939937 | 2015-03-13T14:26:15 | 2015-03-13T14:26:15 | 32,160,272 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def add5 (x):
return x+5
def dotwrite(ast):
nodename = getNodename =()
label=symbol.sym_name.get(int(ast[0]),ast[0])
print ' %s [label="%s' % (nodename, label),
if isinstance(ast[1], str):
print '= %s"];' % ast[1]
add5(1) | UTF-8 | Python | false | false | 247 | py | 129 | test.py | 87 | 0.566802 | 0.534413 | 0 | 9 | 25.666667 | 47 |
hugs-cloud/hugs | 8,881,992,392,400 | 3fcf02eaa699cf0a639970f718edf2f629e9937d | ce969904073f7d6893734fac3d68a52338eb255a | /tests/mock/hugs/conftest.py | 16138758989f87eee9f85e97d481c4ba2e91ef79 | [
"Apache-2.0"
]
| permissive | https://github.com/hugs-cloud/hugs | a26808991d1413d9c3ac566b5e2de6bdb6b39f75 | 93c58c9e0381f453a604f39141f73022d4003322 | refs/heads/master | 2023-08-17T10:27:04.982632 | 2023-08-03T08:07:51 | 2023-08-03T08:07:51 | 270,334,862 | 0 | 2 | Apache-2.0 | false | 2020-09-30T13:27:45 | 2020-06-07T14:41:52 | 2020-09-30T13:09:33 | 2020-09-30T13:27:45 | 11,996 | 0 | 0 | 3 | C | false | false | # import pytest
# from HUGS.Modules import CRDS, GC
# # If this doesn't exist in the object store, create it
# @pytest.fixture(scope="session", autouse=True)
# def check_crds():
# if not CRDS.exists():
# crds = CRDS.create()
# crds.save()
# @pytest.fixture(scope="session", autouse=True)
# def check_gc():
# if not GC.exists():
# gc = GC.create()
# gc.save()
| UTF-8 | Python | false | false | 403 | py | 135 | conftest.py | 98 | 0.595533 | 0.595533 | 0 | 17 | 22.705882 | 56 |
duanzhihua/utils | 2,937,757,678,382 | 3f65c8b115935dbea5ea327d1390113f76ae6d56 | d6374661286e3b0133f894b624d5a175f28679c3 | /sklearn_wrapper.py | 59acca522bc01e2bd0753d42ed055dc31c6aacbc | [
"Apache-2.0"
]
| permissive | https://github.com/duanzhihua/utils | 7c0c468703954c0f4db97a3f463afe529a563038 | 8ad4f4e3f7913f0119bc04f84326a8ad01bf65d1 | refs/heads/master | 2021-10-25T04:25:35.440111 | 2019-04-01T00:39:58 | 2019-04-01T00:39:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# encoding: utf-8
'''
数据分析各个阶段的wrapper
* 数据探索(包含数据预处理过程)
数据编码、缺失值处理、异常值处理、字段间相关性分析、数值型字段描述性统计、
* 数据探索结果可视化
* 特征工程
* 数据集分拆
* 模型构建
*
'''
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
class sklearn_wrapper(object):
def __init__(self):
pass
def train_test_split_df(self, df, test_size=0.25):
'''
把df切分成测试集和训练集。在切分前会打乱数据集
return: train_data, test_data = train_test_split(df, test_size)
'''
return train_test_split(df, test_size)
def cosine_similarity(self, X, Y=None, dense_output=True):
'''
Compute cosine similarity between samples in X and Y.
If Y == ``None``, the output will be the pairwise
similarities between all samples(每个sample即X的一行) in ``X``.
@X : ndarray or sparse array,
@Y : ndarray or sparse array,
@return ndarray。元素值在(-1,1)
'''
return cosine_similarity(X, Y)
def cosine_distances(self,X, Y=None):
'''
cosine_distances = 1-cosine_similarity
Compute cosine distances距离 between samples in X and Y.
If Y == ``None``, the output will be the pairwise
distances between all samples(每个sample即X的一行) in ``X``.
@X : ndarray or sparse array,
@Y : ndarray or sparse array,
@return ndarray。元素值在(-1,1)
'''
return pairwise_distances(X,Y, metric='cosine')
def mean_squared_error(self,y_true, y_pred, sample_weight=None, multioutput='uniform_average'):
'''
计算均方误差。Mean squared error regression loss
@y_true:一维ndarray
'''
return mean_squared_error(y_true, y_pred, sample_weight, multioutput)
| UTF-8 | Python | false | false | 2,119 | py | 81 | sklearn_wrapper.py | 26 | 0.637154 | 0.632265 | 0 | 54 | 32.944444 | 99 |
malbingi/STUDIA | 7,516,192,781,969 | 082a87577f489c4da867fa400c2e33d65993c31b | 194ef255316397f4d77e0bcb403dd1056f0a0c4a | /IDENTYFIKACJA/IVM-1/main.py | dc7bce29577fa590847fc6ac83f4b9fc895604af | []
| no_license | https://github.com/malbingi/STUDIA | 1ef2b2fe8c206bd6157f4c8d643b7747c5fbed7b | 8d0c477456186458293f86f4dc13ca3d95d8e08e | refs/heads/master | 2023-05-25T16:54:13.583652 | 2021-06-08T21:55:00 | 2021-06-08T21:55:00 | 367,340,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import log, pow
from saveToFile import saveToFile as save
# WARTOŚCI A1, A2, A3, RO_CT I A8 PODMIENIAM DLA KAŻDEGO PRZYPADKU
A1 = 3.5
A2 = 5.0
A3 = 1
epsilon_dot = 1.0
ro_0 = 0.0
ro_cr = 0.4
a8 = 0.0
number_of_steps = 1000
historia_ro = [0.0]
t_cr = (1/A2) * log((ro_0-(A1/A2))/(ro_cr-(A1/A2)))
t_cr_step = int(t_cr*number_of_steps)
save(ro_0)
for i in range(number_of_steps):
if i < t_cr_step:
temp = 0.0
index = 0
else:
temp = 1.0
index = i-t_cr_step
delta = A1*epsilon_dot - A2*historia_ro[-1]*epsilon_dot - A3*pow(historia_ro[-1], a8)*temp*historia_ro[index]
ro_0 += delta*(1/number_of_steps)
historia_ro.append(ro_0)
save(ro_0)
| UTF-8 | Python | false | false | 709 | py | 197 | main.py | 30 | 0.599717 | 0.527581 | 0 | 30 | 22.566667 | 113 |
devsyam6/ai-email-insights | 2,250,562,880,312 | ed3f433c6aba36357ec261f09a8e60fbcd3d81f3 | f78a5539bf30315a1a9e8484f7a53ffd63b874a1 | /email_analysis.py | d72c4e2c57fa6621d9a38949c25a3b248e56be58 | [
"MIT"
]
| permissive | https://github.com/devsyam6/ai-email-insights | 8081fe8a1221ec2c8b16dca0c9a4df4fbcca3ef7 | c94131e1cb9299546664c08c48efa9372d377914 | refs/heads/main | 2023-08-27T19:38:48.223027 | 2021-10-21T15:55:51 | 2021-10-21T15:55:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
MIT License
Copyright (c) 2021 Avemac Systems LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
########################################################################################
# This source contains data analysis functions to validate the usability of the Enron
# email repository for the exercise of classifying sentiment and professional alignment
# in an unsupervised manner.
#
# -- This is the supporting code for my email insights and analysis blog series available
# on my website at https://www.avemacconsulting.com/resources/. Part 2 of the series at
# https://www.avemacconsulting.com/2021/08/27/email-insights-from-data-science-part-2/
#
# -- The dataset used for this exercise was specifically created from the raw Enron
# email repository located at https://www.cs.cmu.edu/~enron/ with 3 labels generated
# for sentiment (positive/negative/neutral/unknown) and alignment(business/personal).
#
# The code for formatting the raw email content, performing basic analysis and creating
# the supervised dataset can be found in this Github repo with details referenced on my website.
#
# Part 1. https://www.avemacconsulting.com/2021/08/24/email-insights-from-data-science-techniques-part-1/
# Part 2. https://www.avemacconsulting.com/2021/08/27/email-insights-from-data-science-part-2/
# Part 3. https://www.avemacconsulting.com/2021/09/23/email-insights-from-data-science-part-3/
# Part 4. https://www.avemacconsulting.com/2021/10/12/email-insights-from-data-science-part-4/
#
# ---- Classes ----
# class BasicEmailStats - Contains all analysis methods.
#
# ---- Methods ----
#
# def xy_gen_plot
# def df_plot
#
# def _address_clean - Regex to filter invalid email addresses
# def _fix_email_addresses - Control function to correct invalid email addresses
# def cliporpad - simple text formatting function
#
# def unique_address_count - shows overall dataset counts by unique address and type
# def _frequency_addresses_groupby - supporting routine for summarizing email address frequencies
# def _frequency_addresses_groupby_sum - supporting routine for summarizing email address frequencies
# def side_by_side_histogram - supporting routine for visualizing email address frequencies
# def frequency_addresses - routine to summarize email frequencies by type and action
# def frequency_subject_line - method to show the frequency distribution of tokens in all subject lines
# def frequency_actions - frequency distribution for email actions (i.e. sent, received, deleted, etc.)
# def frequency_time - distribution of email over time.
# def frequency_day_of_week - distribution of emails by day of time.
#
# def length_body_tokens - clustering emails by content length.
# def graph_from_to_addresses - graphing email address relationships.
# def datetime_zscore - datatime variability test.
# def range_date_time - total date/time range for the dataset.
# def manual_document_word_count - quick check of unique vocabulary size and word frequency.
#
# def _multicollinearity_transform - support function for VIF test
# def multicollinearity_test - VIF and correlation matrix tests
#
# ---- Main ----
# Processing for all content and feature analysis routines
#
########################################################################################
#!/usr/bin/python3 -W ignore::DeprecationWarning
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
import string
from time import time
import datetime as dt
import pandas as pd
import numpy as np
import collections
import re
from tqdm import tqdm
import sklearn.cluster
import matplotlib.pyplot as plt
import networkx as nx
from scipy.stats import zscore
from statsmodels.stats.outliers_influence import variance_inflation_factor
import seaborn as sns
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 20)
pd.set_option('display.max_colwidth', 100)
#####################################################################
# Statistics and Analysis Functions
#####################################################################
class BasicEmailStats():
''' Generate simple statistics about email dataset'''
def __init__(self, config):
self.data_dir = config['data_dir']
self.manual_word_counts_fn = config['manual_word_counts_fn']
self.plot_save_dir = config['plot_image_save_directory']
raw_emails = pd.read_csv(self.data_dir + config['email_extracted_fn'])
raw_emails.fillna(value="[]", inplace=True)
self.email_df = self._fix_email_addresses('From_Address', raw_emails)
self.email_df = self._fix_email_addresses('To_Address', raw_emails)
self.email_df = self._fix_email_addresses('Cc_Address', raw_emails)
self.email_df = self._fix_email_addresses('Bcc_Address', raw_emails)
self.email_df['DateTime'] = self.email_df['DateTime'].apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d %H:%M:%S%z'))
self.email_df['DateTime_TS'] = self.email_df['DateTime'].apply(lambda x: x.timestamp())
self.email_df['DateTime_HOUR'] = self.email_df['DateTime'].apply(lambda x: x.hour)
self.email_df['DateTime_MONTH'] = self.email_df['DateTime'].apply(lambda x: x.month)
print(f'\n--- Init Complete\n\n')
return
def cliporpad(self, text:str, clen):
''' Just like it sounds.'''
return text.ljust(clen)[0:clen]
def xy_gen_plot(self, X, X_label, Y, Y_label, title=None, aug_plots=None, aug_plot_labels=None, spot=None, spot_label=None, save=False, img_fn='generic_plot.png'):
'''Generic graph plot - support X, Y and augmented plot points'''
fig, (p1) = plt.subplots(1, 1)
if title is not None:
p1.set_title(title)
p1.set_ylim([min(Y)-(max(Y)*0.1), max(Y)+(max(Y)*0.1)])
p1.set_xlim([min(X)-(max(X)*0.1), max(X)+(max(X)*0.1)])
p1.plot(X, Y, 'o-', color='blue')
if aug_plots is not None:
for x in range(len(aug_plots)):
p1.plot(aug_plots[x][0], aug_plots[x][1], label=aug_plot_labels[x])
if spot is not None:
p1.plot(spot[0], spot[1], 'bo')
p1.annotate(spot_label, xy=(spot[0]+0.2, spot[1]-0.2))
p1.set_xlabel(X_label)
p1.set_ylabel(Y_label)
p1.legend()
p1.grid(True)
fig.tight_layout()
if save:
plt.savefig(self.plot_save_dir + img_fn, format='png')
plt.show()
return
def df_plot(self, ds, columns, types=['hist'], bins=20):
'''Pandas-based plots'''
for t in types:
if t == 'box':
ds.boxplot(column=columns)
elif t == 'hist':
ds.hist(column=columns, bins=bins)
elif t == 'density':
ds.plot.kde()
plt.show()
return
def _address_clean(self, addr):
''' Additional email address cleaning '''
addr = re.sub(r'e-mail <(.*)>',r'\1',addr)
addr = re.sub(r' +', '', addr)
addr = re.sub(r'/o.*=', '', addr)
addr = re.sub(r'"', '', addr)
return addr
def _fix_email_addresses(self, type, df):
''' Split email address array strings into usable arrays '''
split_embeds = lambda x: x.replace('[','').replace(']','').replace('\'','').split(',')
addrs = [split_embeds(s) for s in tqdm(df[type].values)]
u_addrs = [[self._address_clean(y) for y in x] for x in tqdm(addrs)]
df[type] = u_addrs
return df
def unique_address_count(self):
''' Find unique email address counts '''
from_addrs = list(set([y for x in self.email_df['From_Address'] for y in x]))
to_addrs = list(set([y for x in self.email_df['To_Address'] for y in x]))
cc_addrs = list(set([y for x in self.email_df['Cc_Address'] for y in x]))
bcc_addrs = list(set([y for x in self.email_df['Bcc_Address'] for y in x]))
addrs_df = pd.DataFrame([len(self.email_df)], columns=['Total_Emails'])
addrs_df['From_Addresses'] = [len(from_addrs)]
addrs_df['To_Addresses'] = [len(to_addrs)]
addrs_df['Cc_Addresses'] = [len(cc_addrs)]
addrs_df['Bcc_Addresses'] = [len(bcc_addrs)]
print(f'\n--- Unique Email Address Counts\n\n{addrs_df.head(20)}\n')
return addrs_df
def _frequency_addresses_groupby(self, type):
''' Routine to summarize by email address and other features.'''
columns = [type, 'Source','Day','Outside_Hours','Forwarded']
# using list comprehension to build address dataframe from each address array within each email
addrs = pd.DataFrame([[y, x[1], x[2], x[3], x[4]] for x in self.email_df[columns].values for y in x[0]], columns=columns)
group = addrs.groupby(by=[type]) # group by address only so all data elements are captured within one reference point
# since the grouping is by address type and we're looking for counts of multiple data elements
# we'll just spin through the group details and manually sum each data element by the element type
frequencies = []
for grp in tqdm(group.groups):
details = group.get_group(grp)
total = len(details)
sources = collections.Counter(details.Source.values)
days = collections.Counter(details.Day.values)
hours = collections.Counter(details.Outside_Hours.values)
forwards = collections.Counter(details.Forwarded.values)
# build a cum row for each group
frequency = {}
frequency['user'] = grp
frequency['total'] = total
for k,v in sources.items(): frequency['sources_'+k] = v
for k,v in days.items(): frequency['days_'+str(k)] = v
for k,v in hours.items(): frequency['after_hours_'+str(k).lower()] = v
for k,v in forwards.items(): frequency['forwards_'+str(k).lower()] = v
frequencies.append(frequency)
df = pd.DataFrame(frequencies).fillna(0.0).sort_values(by='total', ascending=False)
return df[sorted(df.columns, reverse=True)]
def _frequency_addresses_groupby_sum(self, group_details):
''' Anonymize the email address frequency data into a dataset summary.'''
columns = sorted(group_details.columns, reverse=True)
columns.remove('user')
sums = {c:group_details[c].sum() for c in columns}
return pd.DataFrame([sums])
def side_by_side_histogram(self, df, bins=30):
''' Comparison plots. '''
fig, axs = plt.subplots(1, 4, sharey=True, tight_layout=True)
fig.text(0.5, 0.01, 'email count', ha='center')
fig.text(0.01, 0.5, 'total users', va='center', rotation='vertical')
axs[0].hist(df['total'], bins=bins); axs[0].title.set_text('total')
axs[1].hist(df['sources_sent'], bins=bins); axs[1].title.set_text('sources_sent')
axs[2].hist(df['sources_responded'], bins=bins); axs[2].title.set_text('sources_responded')
axs[3].hist(df['sources_deleted'], bins=bins); axs[3].title.set_text('sources_deleted')
plt.show()
return
def frequency_addresses(self):
''' Find unique email address frequency by type, action, etc. '''
from_addrs_grp = self._frequency_addresses_groupby('From_Address')
from_addrs_grp_sum = self._frequency_addresses_groupby_sum(from_addrs_grp)
print(f'\n--- From Email Address Counts\n\n{from_addrs_grp.head(20)}\n\n{from_addrs_grp_sum.head(20)}')
self.side_by_side_histogram(from_addrs_grp)
to_addrs_grp = self._frequency_addresses_groupby('To_Address')
to_addrs_grp_sum = self._frequency_addresses_groupby_sum(to_addrs_grp)
print(f'\n--- To Email Address Counts\n\n{to_addrs_grp.head(20)}\n\n{to_addrs_grp_sum.head(20)}')
self.side_by_side_histogram(to_addrs_grp)
cc_addrs_grp = self._frequency_addresses_groupby('Cc_Address')
cc_addrs_grp_sum = self._frequency_addresses_groupby_sum(cc_addrs_grp)
print(f'\n--- Cc Email Address Counts\n\n{cc_addrs_grp.head(20)}\n\n{cc_addrs_grp_sum.head(20)}')
self.side_by_side_histogram(cc_addrs_grp)
return
def frequency_subject_line(self):
''' Find subject line word frequency - Could use CountVectorizer or simple dictionary for this as well '''
word_map = []
for x in tqdm(self.email_df['Subject']):
tokens = x.split()
for token in tokens:
if token not in ['Re:','RE:','FW:','-','[]','for','of','and','to','on','the','in','&']:
word_map.append({'word':token})
word_map = pd.DataFrame(word_map)
group = word_map.groupby(by=['word'])
counts = group.size().to_frame(name='count').sort_values(by='count', ascending=False)
print(f'\n--- Subject Line Token Frequency\n\n{counts.head(20)}')
counts.hist(column='count', bins=100); plt.show()
return
def frequency_actions(self):
''' Email action frequency counts - using simple dictionary '''
# define counts and probabilities
df = self.email_df[['Source']]
group = df.groupby('Source')
actions = group.size()
probs = actions.div(len(df))
actions['P(sent)'] = probs['sent']
actions['P(deleted)'] = probs['deleted']
actions['P(responded)'] = probs['responded']
actions['P(deleted|sent)'] = actions['P(deleted)'] * actions['P(sent)']
actions['P(deleted|responded)'] = actions['P(deleted)'] * actions['P(responded)']
actions['P(responded|sent)'] = actions['P(responded)'] * actions['P(sent)']
print(f'\n--- Email Action Frequency\n\n{actions.to_frame().T.head(20)}')
return
def frequency_time(self):
''' Email frequency by hour of day '''
group = self.email_df.groupby('DateTime_HOUR')
counts = group.size()
print(f'\n--- Email By Hour of Day Frequency\n\n{counts.to_frame().T.head(20)}')
self.df_plot(self.email_df, columns=['DateTime_HOUR'], types=['box','hist'], bins=24)
return
def frequency_day_of_week(self):
''' Email frequency by day of week '''
group = self.email_df[['Day']].groupby('Day')
counts = group.size()
print(f'\n--- Email By Day of Week Frequency\n\n{counts.to_frame().T.head(20)}')
self.df_plot(self.email_df, columns=['Day'], types=['box','hist'], bins=7)
return
def length_body_tokens(self):
''' Cluster body content lengths - Use sklearn.cluster.KMeans '''
lengths = []
for x in tqdm(self.email_df['Body']):
lengths.append([len(x)])
estimator = sklearn.cluster.KMeans(n_clusters=20).fit(lengths)
cluster_frequency = np.unique(estimator.labels_, return_counts=True)
cluster_centers = [round(x[0],1) for x in estimator.cluster_centers_]
df = pd.DataFrame([{'cluster_id':x,'cluster_count':cluster_frequency[1][x],'cluster_center':cluster_centers[x]} for x in cluster_frequency[0]]).sort_values(by='cluster_center')
print(f'\n--- Body Content Segmentation By Character Length\n\n{df.head(20)}')
self.xy_gen_plot(df.cluster_count, 'Email Group Count', df.cluster_center, 'Email Length (Center)', title='Email Body Content Length')
return
def graph_from_to_addresses(self):
''' Routine to generate a graph of email address relationships '''
graph = nx.DiGraph()
for from_a,to_arr in tqdm(self.email_df[['From_Address','To_Address']].values):
if not graph.has_node(from_a[0]):
graph.add_node(from_a[0])
for to_a in to_arr:
if not graph.has_node(to_a):
graph.add_node(to_a)
if not graph.has_edge(from_a[0], to_a):
graph.add_edge(from_a[0], to_a, count=1)
else:
graph.edges[from_a[0], to_a]['count'] = graph.edges[from_a[0], to_a]['count'] + 1
nx.write_graphml(graph, self.plot_save_dir + 'from_to_addresses.graphml')
#nx.draw_circular(graph)
print(f'\n--- Graph From Addresses - To Addresses Info\n\n{nx.info(graph)}')
print(f'\n--- Graph From Addresses - To Addresses Density\n\n{nx.density(graph)}')
print(f'\n--- Graph From Addresses - To Addresses Degrees\n\n{nx.degree_histogram(graph)}')
plt.show()
return
def datetime_zscore(self):
''' Quick check of probability for email date/time range '''
zscores = self.email_df['DateTime_TS'].to_frame().apply(zscore)
print(f'\n--- DateTime Z-Score, Oldest is {zscores["DateTime_TS"].max()} and Newest is {zscores["DateTime_TS"].min()}\n\n')
zscores['DateTime_TS'].hist(bins=20); plt.show()
zscores.boxplot(column='DateTime_TS'); plt.show()
return
def range_date_time(self):
''' Graph the email date/time range '''
print(f'\n--- DateTime Min \'{self.email_df.DateTime.min()}\' & Max \'{self.email_df.DateTime.max()}\' Range\n\n')
self.email_df['DateTime'].hist(bins=20); plt.show()
return
def manual_document_word_count(self):
''' Routine to investigate email content token frequencies '''
words = {}
punct_pattern = re.compile("[" + re.escape(string.punctuation) + "0-9" + "]")
for x in tqdm(self.email_df['Body']):
for y in re.sub(punct_pattern, "", x).lower().split(' '):
count = words[y]+1 if y in words.keys() else 1
words.update({y:count})
print(f'\n--- Raw Word Count - dictionary len is {len(words)}, min count of {min(words.values())}, max count of {max(words.values())}\n')
words = dict(filter(lambda item: item[1] > 5 and item[1] < 1000, words.items())) # roughly trim outliers
print(f'\n--- Trimmed Word Count - dictionary len is {len(words)}, min count of {min(words.values())}, max count of {max(words.values())}\n')
values = list(words.values())
self.xy_gen_plot(np.arange(start=0,stop=len(words), dtype=int), 'vocab', sorted(values), 'count')
self.df_plot(pd.DataFrame(values, columns=['count']), columns='count', bins=10)
return words
def _multicollinearity_transform(self, x):
''' Supporting function to encode data for VIF algorithm.'''
if x.name == 'Outside_Hours':
x = x.apply(lambda n: 1 if n else 0)
elif x.name == 'Source':
amap = {'deleted':0,'responded':1,'sent':2,'received':3}
x = x.apply(lambda n: amap[n])
elif x.name == 'Forwarded':
x = x.apply(lambda n: 1 if n else 0)
return x
def multicollinearity_test(self):
''' Check for intervariable dependence. '''
# vif check
columns = ['Day','DateTime_HOUR','DateTime_MONTH','Outside_Hours','Source','Forwarded']
vif_df = self.email_df[columns].apply(self._multicollinearity_transform)
vdf = pd.DataFrame()
vdf['features'] = columns
vdf['vif'] = [variance_inflation_factor(vif_df.values, i) for i in range(len(vif_df.columns))]
print(f'\n--- Variance Inflation Factor\n{vdf}\n')
# correlation matrix and heatmap
print(f'\n--- Correlation Matrix\n{vif_df.corr()}\n')
fig, ax = plt.subplots(1)
fig.subplots_adjust(bottom=0.3,left=0.3)
axs = sns.heatmap(vif_df.corr(), ax=ax); plt.show()
return
#####################################################################
# Main
#####################################################################
config = {
'email_extracted_fn': 'extracted_emails.pd',
'data_dir': '/proto/learning/avemac/email_analysis_blog/data/',
'plot_image_save_directory': '/proto/learning/avemac/email_analysis_blog/plots/',
'manual_word_counts_fn': 'email_content_word_counts.csv',
}
ebs = BasicEmailStats(config)
x = ebs.unique_address_count()
x = ebs.range_date_time()
x = ebs.datetime_zscore()
x = ebs.frequency_addresses()
x = ebs.frequency_subject_line()
x = ebs.frequency_actions()
x = ebs.frequency_time()
x = ebs.frequency_day_of_week()
x = ebs.length_body_tokens()
x = ebs.manual_document_word_count()
x = ebs.graph_from_to_addresses()
x = ebs.multicollinearity_test()
exit() | UTF-8 | Python | false | false | 21,365 | py | 5 | email_analysis.py | 4 | 0.623309 | 0.613761 | 0 | 470 | 44.459574 | 184 |
jetpotion/Python-Algorithms | 9,792,525,469,654 | a3079ac931a3dac25eebbf38f0e019e533869eb6 | fb647075525f1389ff710082e1549f31234660eb | /AMS326/Strassen.py | 11de32cd94b64db48c22e7ba6781d9296b66c93f | [
"MIT"
]
| permissive | https://github.com/jetpotion/Python-Algorithms | d7b19a66436e510bb38bbe91ec984563365aa559 | e92b8950ced0bed46edf6c4a4a7558839205093c | refs/heads/master | 2021-07-19T15:48:42.715055 | 2020-05-25T13:47:37 | 2020-05-25T13:47:37 | 163,626,683 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | "@author: William Zhang"
"@email:William.Zhang@Stonybrook.edu"
import numpy as np
from Needle import Needle
#This method will call all the other functions
def main():
pinlength = 1
pinlength2 = 0.5
pinlength3 = (1/3)
pinlength4 = 0.25
linedistance = 20
probability1 = montecarlo(linedistance,pinlength)
probability2 = montecarlo(linedistance, pinlength2)
probability3 = montecarlo(linedistance,pinlength3)
probability4 = montecarlo(linedistance,pinlength4)
print("Approximation with a pinlength 1: " + str(probability1))
print("Approximation with a pinlength 0.5: " +str( probability2))
print("Approximation with a pinlength 0.333:" +str( probability3))
print("Approximation with a pinlength 0.25: " +str( probability4))
#This will throw the needle randomly
def throwneedle(length,spacebetween):
return length,np.random.uniform(0,spacebetween),np.random.uniform(0,np.pi)
#Check intersections
def checkintersections(center,length,angle,linedistance):
#Check if the needle is intersecting the right wall
if center + length / 2 * np.sin(angle) >= linedistance :
return True
#Check if the needle is intersecting the left wall
elif center + length /2 * np.sin(angle) <= 0:
return True
#THe needle doesnt intersect at all
else:
return False
def montecarlo(linedistance, pinlength):
#This will be the number of iterations for our simulations
numberofiterations = 3000000
counter = 0
for x in range(numberofiterations):
#Count the ones that do intersect
length,center,angle = throwneedle(pinlength,linedistance)
if(checkintersections(center,length,angle,linedistance)) :
counter += 1
return (counter * 100) / (numberofiterations * 100)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,853 | py | 25 | Strassen.py | 19 | 0.698327 | 0.669185 | 0 | 48 | 37.5625 | 79 |
fangchi/python_project | 19,318,762,922,123 | 8153c282d2f25d474179b3559293380d70f4b0dc | bfc366a5ab8e255bd1db166cfe945021ab1ccde6 | /marchineLearning/ch3/3.4/solution.py | 7e48dc068d8c317ac338303dfd5d14b0aa320644 | []
| no_license | https://github.com/fangchi/python_project | c204cdc81f8acfb0feae6cd618b460ccab66270a | 8458e9b0e555212dab91f0000617cf2d573af437 | refs/heads/master | 2021-04-18T21:23:07.026291 | 2018-04-24T17:37:20 | 2018-04-24T17:37:20 | 126,780,574 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
# https://blog.csdn.net/Snoopy_Yuan/article/details/64131129
# myfont = matplotlib.font_manager.FontProperties(fname="/Library/Fonts/华文仿宋.ttf")#"/Library/Fonts/Songti.ttc")
# sns.set(style="white", color_codes=True,font=myfont.get_name())
#
# iris = sns.load_dataset("data",data_home="/Users/fangchi/PycharmProjects/python_project/marchineLearning/ch3/3.4")
#
#
# # iris.plot(kind="scatter", x="萼片_长度", y="萼片_宽度")
# sns.pairplot(iris,hue='品种')
# plt.show()
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
iris = sns.load_dataset("data",data_home="/Users/fangchi/PycharmProjects/python_project/marchineLearning/ch3/3.4")
X = iris.values[50:150,0:4]
y = iris.values[50:150,4]
# log-regression lib model
log_model = LogisticRegression()
# 10-folds CV 十折交叉验证,英文名叫做10-fold cross-validation,用来测试算法准确性
# 将数据集分成十份,轮流将其中9份作为训练数据,1份作为测试数据,进行试验。
y_pred = cross_val_predict(log_model, X, y, cv=10)
print("十折交叉验证:",metrics.accuracy_score(y, y_pred))
# LOO CV 留一叉验证,英文名叫做10-fold cross-validation,用来测试算法准确性
from sklearn.model_selection import LeaveOneOut
loo = LeaveOneOut()
accuracy = 0;
for train, test in loo.split(X):
log_model.fit(X[train], y[train]) # fitting
y_p = log_model.predict(X[test])
if y_p == y[test] : accuracy += 1
print(accuracy / np.shape(X)[0])
| UTF-8 | Python | false | false | 1,643 | py | 14 | solution.py | 13 | 0.740843 | 0.7132 | 0 | 38 | 37.052632 | 116 |
kongjingchun/gift | 16,088,947,514,701 | 3977febedc6f690a82605e7818ea444c06564884 | b01e55da3542cd3ec298ef4d5d3da4b01d002881 | /admin.py | 47aae7043dd75ac5c2a99590dff2dfc0fe42a285 | []
| no_license | https://github.com/kongjingchun/gift | 85a3a3ac3dc6a4e50bb81d7e86c582a5f0bda5f5 | 36c380d74d88fc4e6e5d14fb1bceb941446e139f | refs/heads/master | 2023-04-09T20:57:09.474169 | 2021-04-19T11:06:42 | 2021-04-19T11:06:42 | 355,089,670 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
# @Create time: 2021/4/6 3:13 下午
# @Author: KongJingchun
# @remark:
from base import Base
import os
from common.error import NotUserError, UserActiveError, NotAdminError, RoleError
class Admin(Base):
def __init__(self, username, user_json, gift_json):
self.username = username
super().__init__(user_json, gift_json)
self.get_user()
# 获取用户信息
def get_user(self):
users = self._Base__read_users()
current_user = users.get(self.username)
if current_user is None:
raise NotUserError('not user %s' % self.username)
self.username = current_user.get('username')
self.active = current_user.get('active')
self.role = current_user.get('role')
if self.active is False:
raise UserActiveError('%s active is False' % self.username)
if self.role != 'admin':
raise RoleError('User role not admin')
self.user = current_user
# 添加用户
def add_user(self, username, role):
self.__check_admin()
self._Base__write_user(username=username, role=role)
# 修改用户状态
def update_user_active(self, username):
self.__check_admin()
self._Base__change_active(username=username)
# 修改用户权限
def update_user_role(self, username, role):
self.__check_admin()
self._Base__change_role(username=username, role=role)
# 添加奖品
def add_gift(self, first_level, second_level, gift_name, gift_count):
self.__check_admin()
self._Base__add_gift(first_level=first_level, second_level=second_level, gift_name=gift_name,
gift_count=gift_count)
# 删除奖品
def delete_gift(self, first_level, second_level, gift_name):
self.__check_admin()
self._Base__delete_gift(first_level=first_level, second_level=second_level, gift_name=gift_name)
# 修改奖品数量
def update_gift(self, first_level, second_level, gift_name, gift_count):
self.__check_admin()
self._Base__update_gift(first_level=first_level, second_level=second_level, gift_name=gift_name,
gift_count=gift_count, is_admin=True)
# 检查用户权限
def __check_admin(self):
self.get_user()
if self.role != 'admin':
raise NotAdminError('%s role is not admin ' % self.username)
if __name__ == '__main__':
user_json_path = os.path.join(os.getcwd(), 'storage', 'user.json')
gift_json_path = os.path.join(os.getcwd(), 'storage', 'gift.json')
admin = Admin('kjc', user_json_path, gift_json_path)
# print(admin.username, admin.role)
# admin.update_user_role(username='lxq', role='normal')
admin.update_gift(first_level='level2', second_level='level3', gift_name='ipad1', gift_count=100)
| UTF-8 | Python | false | false | 2,869 | py | 9 | admin.py | 7 | 0.619561 | 0.613808 | 0 | 76 | 35.592105 | 104 |
hut/calq | 6,296,422,074,667 | 15bcb54bdb7ff816f1a612bf69e3f3bc5945d877 | f03bc0a2117ecfc0c69476e2af46f46e3a42262e | /src/ngstools/analyze_qual_sam.py | 63ac5b3fa4ba7d0267c7b5714e92df1713432a23 | []
| no_license | https://github.com/hut/calq | a9fff0e9f26752c0c6e1c5d4d7ca00778a1b7990 | 6292da42726904d30b0fbf157af83081b4b45296 | refs/heads/master | 2020-03-19T12:08:44.313659 | 2018-05-09T03:37:18 | 2018-05-09T03:37:18 | 136,498,577 | 0 | 0 | null | true | 2018-06-07T15:41:22 | 2018-06-07T15:41:21 | 2018-05-09T03:37:44 | 2018-05-09T03:37:43 | 19,589 | 0 | 0 | 0 | null | false | null | #!/usr/bin/env python
import sys
# Usage
if len(sys.argv) != 2:
sys.exit("Usage: python {} file.sam".format(sys.argv[0]))
# Get SAM file
sam_file_name = sys.argv[1]
if not sam_file_name.endswith(".sam"):
sys.exit("SAM file name must end with '.sam'")
sam_file = open(sam_file_name, 'r')
print("SAM file: {}".format(sam_file_name))
# Initialize statistics
qual_min = sys.maxint
qual_max = -sys.maxint - 1
qual_size = 0
total_line_cnt = 0
header_line_cnt = 0
alignment_line_cnt = 0
# Parse SAM file
while 1:
line = sam_file.readline()
if not line:
break
if line[0] != '@':
fields = line.split('\t')
qual = fields[10]
if len(qual) > 0:
qual_size += len(qual)
for q in qual:
if ord(q) > qual_max:
qual_max = ord(q)
if ord(q) < qual_min:
qual_min = ord(q)
else:
sys.exit("Error: No quality scores in line {}".format(total_line_cnt))
alignment_line_cnt += 1
else:
header_line_cnt += 1
total_line_cnt += 1
# Print statistics
print("Lines: {}".format(total_line_cnt))
print(" Header lines: {}".format(header_line_cnt))
print(" Alignment lines: {}".format(alignment_line_cnt))
print("Quality score range: [{}:{}]".format(qual_min, qual_max))
print("Quality score size: {} bytes".format(qual_size))
sys.exit()
| UTF-8 | Python | false | false | 1,403 | py | 55 | analyze_qual_sam.py | 47 | 0.575196 | 0.563792 | 0 | 55 | 24.509091 | 82 |
dannymulligan/Project_Euler.net | 19,447,611,948,911 | 49136a1bee9233bbb73a2972773f640f9c449db3 | e0ef034b87ade27dcef6afb15f6cde566fe63770 | /Prob_146/prob_146a.py | 9ce4bee23877999cab68d8781681d2daf22123c5 | []
| no_license | https://github.com/dannymulligan/Project_Euler.net | 621cae9622d224f68bb17d7033c9b3245d86a4cf | 4368aac279996e5972c479ed04fb6be23112650c | refs/heads/master | 2020-12-24T05:21:23.250238 | 2020-11-22T18:05:01 | 2020-11-22T18:05:01 | 1,063,955 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# coding=utf-8
#
# Project Euler.net Problem 146
#
# Investigating a Prime Pattern
#
# The smallest positive integer n for which the numbers n^2+1, n^2+3,
# n^2+7, n^2+9, n^2+13, and n^2+27 are consecutive primes is 10. The
# sum of all such integers n below one-million is 1242490.
#
# What is the sum of all such integers n below 150 million?
#
#
# Solved ??/??/10
# ?? problems solved
# Position #??? on level ?
# We only have to check even values of N. Odd values of N will yield
# odd numbers when squared. Adding one to an odd number will produce
# an even number so n^2+1 will not be a prime.
# This program is able to search to 9 million in 62 seconds. I need to
# make it about 15-16x faster.
#
# bash-3.2$ ./prob_146a.py
# There are 602489 primes less than 9000000
# The highest prime is 8999993
# Time taken to calculate primes = 8.095391 seconds
# n = 10: n^2+1=101, n^2+3=103, n^2+7=107, n^2+9=109, n^2+13=113, n^2+27=127
# n = 315410: n^2+1=99483468101, n^2+3=99483468103, n^2+7=99483468107, n^2+9=99483468109, n^2+13=99483468113, n^2+27=99483468127
# n = 927070: n^2+1=859458784901, n^2+3=859458784903, n^2+7=859458784907, n^2+9=859458784909, n^2+13=859458784913, n^2+27=859458784927
# Calculating 1000000, last 1,000,000 numbers took 6.529287 seconds.
# Calculating 2000000, last 1,000,000 numbers took 5.812202 seconds.
# n = 2525870: n^2+1=6380019256901, n^2+3=6380019256903, n^2+7=6380019256907, n^2+9=6380019256909, n^2+13=6380019256913, n^2+27=6380019256927
# Calculating 3000000, last 1,000,000 numbers took 6.298078 seconds.
# Calculating 4000000, last 1,000,000 numbers took 5.929381 seconds.
# Calculating 5000000, last 1,000,000 numbers took 5.714539 seconds.
# Calculating 6000000, last 1,000,000 numbers took 5.689378 seconds.
# Calculating 7000000, last 1,000,000 numbers took 5.728071 seconds.
# Calculating 8000000, last 1,000,000 numbers took 5.770876 seconds.
# n = 8146100: n^2+1=66358945210001, n^2+3=66358945210003, n^2+7=66358945210007, n^2+9=66358945210009, n^2+13=66358945210013, n^2+27=66358945210027
# Answer = 11914460
# Time taken = 62.332171 seconds
import sys
import time
LIMIT_PRIME = 9000000
prime_table = [1]*LIMIT_PRIME # table of smallest factor
primes = []
def calculate_primes():
i = 2
while (i < (LIMIT_PRIME/2)):
if (prime_table[i] == 1):
primes.append(i)
j = i*2
while (j < LIMIT_PRIME):
prime_table[j] = i
j += i
i += 1
while (i < LIMIT_PRIME):
if (prime_table[i] == 1):
primes.append(i)
i += 1
def is_prime(n):
if (n > LIMIT_PRIME*LIMIT_PRIME):
print "Error: checking n = {0}, which is larger than LIMIT_PRIME^2 (LIMIT_PRIME = {1})".format(n, LIMIT_PRIME)
sys.exit()
elif (n < LIMIT_PRIME):
return (prime_table[n] == 1)
else:
for i in primes:
if ((n % i) == 0):
return False
if (i*i > n):
return True
return True
# Quick but not 100% accurate test for primeness
# If this test returns False, definitely not a prime
# If this test returns True, might be a prime
def q0_prime(n):
# Test for divisibility by first 10 primes
for p in primes[0:10]:
if (n == p): return True
if (n%p == 0): return False
return True
# Quick but not 100% accurate test for primeness
# If this test returns False, definitely not a prime
# If this test returns True, might be a prime
def q1_prime(n):
# Test for divisibility by first 100 primes
for p in primes[10:100]:
if (n == p): return True
if (n%p == 0): return False
return True
# Quick but not 100% accurate test for primeness
# If this test returns False, definitely not a prime
# If this test returns True, might be a prime
def q2_prime(n):
# Test for divisibility by first 1,000 primes
for p in primes[100:1000]:
if (n == p): return True
if (n%p == 0): return False
return True
# Quick but not 100% accurate test for primeness
# If this test returns False, definitely not a prime
# If this test returns True, might be a prime
def q3_prime(n):
# Test for divisibility by first 10,000 primes
for p in primes[1000:10000]:
if (n == p): return True
if (n%p == 0): return False
return True
# Quick but not 100% accurate test for primeness
# If this test returns False, definitely not a prime
# If this test returns True, might be a prime
def q4_prime(n):
# Test for divisibility by first 100,000 primes
for p in primes[10000:100000]:
if (n == p): return True
if (n%p == 0): return False
return True
start_time = time.clock()
calculate_primes()
print "There are", len(primes), "primes less than", LIMIT_PRIME
#print "They are", primes
print "The highest prime is", primes[-1]
print "Time taken to calculate primes =", time.clock() - start_time, "seconds"
answer = 0
#answer = 10 + 315410 + 927070 + 2525870 + 8146100 # All solutions below n = 10 million
prev_time = time.clock()
for n in xrange(2,9000000,2):
if ((n % 1000000) == 0):
curr_time = time.clock()
print "Calculating {0}, last 1,000,000 numbers took {1} seconds.".format(n, curr_time-prev_time)
prev_time = curr_time
nn = n*n
# Quick tests to disallow many numbers before we do the expensive primaility test
if not(q0_prime(nn+1) & q0_prime(nn+3) & q0_prime(nn+7) & q0_prime(nn+9) & q0_prime(nn+13) & q0_prime(nn+27)):
continue
if not(q1_prime(nn+1) & q1_prime(nn+3) & q1_prime(nn+7) & q1_prime(nn+9) & q1_prime(nn+13) & q1_prime(nn+27)):
continue
if not(q2_prime(nn+1) & q2_prime(nn+3) & q2_prime(nn+7) & q2_prime(nn+9) & q2_prime(nn+13) & q2_prime(nn+27)):
continue
if not(q3_prime(nn+1) & q3_prime(nn+3) & q3_prime(nn+7) & q3_prime(nn+9) & q3_prime(nn+13) & q3_prime(nn+27)):
continue
if not(q4_prime(nn+1) & q4_prime(nn+3) & q4_prime(nn+7) & q4_prime(nn+9) & q4_prime(nn+13) & q4_prime(nn+27)):
continue
# Accurate but slow primaility test
if not(is_prime(nn+1) & is_prime(nn+3) & is_prime(nn+7) & is_prime(nn+9) & is_prime(nn+13) & is_prime(nn+27)):
continue
# Make sure that the prime numbers are consecutive
if (is_prime(nn+5) | is_prime(nn+11) | is_prime(nn+15) | is_prime(nn+17) | is_prime(nn+19) | is_prime(nn+21) | is_prime(nn+23) | is_prime(nn+25)):
continue
print "n = {0}: n^2+1={1}, n^2+3={2}, n^2+7={3}, n^2+9={4}, n^2+13={5}, n^2+27={6}".format(n, (nn+1), (nn+3), (nn+7), (nn+9), (nn+13), (nn+27))
answer += n
print "Answer = ", answer
print "Time taken =", time.clock() - start_time, "seconds"
sys.exit()
| UTF-8 | Python | false | false | 6,724 | py | 457 | prob_146a.py | 345 | 0.638459 | 0.496728 | 0 | 181 | 36.149171 | 150 |
youjia4321/travel315 | 19,267,223,301,895 | ae87e49a2c7fa1b253c0dae04c41bbbc74336fe8 | bd756862c23dbb7937303cd3deb872459790220c | /zhilianzhaoping/javaE.py | 52ce84bfb8097e3dac9e22973d8646c8807e168d | []
| no_license | https://github.com/youjia4321/travel315 | c2f3bb28e80b3eefdfda91367fcf09952cf991d4 | a17f1750bae97fc9ff971d6944fa3a10eb5161ea | refs/heads/master | 2020-08-13T08:11:19.865380 | 2019-10-14T03:32:32 | 2019-10-14T03:32:32 | 214,937,339 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
import re
from lxml import etree
import csv
from jieba import analyse
import pandas as pd
import numpy as np
analyse.set_stop_words("stopwords.txt") # 停用词
# 引入TF-IDF关键词抽取接口
tfidf = analyse.extract_tags
def parse(url):
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/69.0.3497.100 Safari/537.36',
}
try:
resp = requests.get(url, headers=headers)
html = resp.text
return html
except:
pass
def dealJson(data):
results = json.loads(data)
jsonData = results["data"]["results"]
return jsonData
def dealContent(jsonData, writer, total_words):
for data in jsonData:
info = {}
# content = []
info['position'] = data['city']['display'].replace(" ", "")
# info['positionUrl'] = data['positionURL']
info['salary'] = data['salary'].replace(" ", "")
info['jobName'] = data['jobName'].replace(" ", "")
if re.findall("java", data['jobName'].lower()):
# print(data['positionURL'])
job = dealPositionInfo(data['positionURL'])
count(job, total_words=total_words)
info['jobRequirement'] = job
writer.writerow((info['position'], info['salary'], info['jobName'], info['jobRequirement']))
else:
continue
def dealPositionInfo(url):
resp = parse(url=url)
html = etree.HTML(resp)
jobRequirement = html.xpath("//div[@class='pos-ul']//text()")
jobRequirement_words = ''.join(jobRequirement).replace("\xa0", "").replace("\n", "").replace(" ", "")
keyword = keywords(jobRequirement_words)
return keyword
def count(data, total_words):
for word in data:
total_words.append(word)
def keywords(text):
key = []
keywords = tfidf(text)
for keyword in keywords:
key.append(keyword)
return key
def countE(total):
n = np.unique(total, return_counts=True)
s = pd.Series(data=n[1], index=n[0])
result = s.sort_values(ascending=False)
df = pd.DataFrame(result).reset_index()
df.columns = ['高频词', '频次']
to_save = df[df["频次"] >= 50]
to_save.to_csv("keywords_tongji.csv", index=0)
if __name__ == '__main__':
from datetime import datetime
startTime = datetime.now()
total_words = []
fp = open("java.csv", 'a', newline='', encoding='utf-8')
writer = csv.writer(fp)
writer.writerow(('地点', '薪资', '职位', '岗位需求'))
for page in range(35):
url = "https://fe-api.zhaopin.com/c/i/sou?start="+str(page*90)+"&pageSize=90&cityId=489&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=java%E5%B7%A5%E7%A8%8B%E5%B8%88&kt=3&_v=0.37975790&x-zp-page-request-id=d537080296884aafa75b99b445371ad6-1551230768934-382901"
print(url)
data = parse(url=url)
jsonData = dealJson(data)
# print(jsonData)
dealContent(jsonData, writer, total_words)
print("正在统计词频...")
# print(total_words)
countE(total_words)
endTime = datetime.now()
print(startTime, endTime)
print("统计完成...")
| UTF-8 | Python | false | false | 3,234 | py | 13 | javaE.py | 8 | 0.609316 | 0.574144 | 0.000317 | 103 | 29.631068 | 306 |
chengdg/weizoom | 13,099,650,267,515 | 352205595e1c5135ca74e5ca32523b1bbbd05663 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/features/steps/market_tools_template_message_operate_template_message_steps.py | e5a5a28f2c3b41ebb3b7ac22b5f08b41fd106d32 | []
| no_license | https://github.com/chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'slzhu'
import json
import time
from test import bdd_util
from features.testenv.model_factory import *
from market_tools.tools.template_message.models import *
@given(u"{user}已有模板消息")
def step_impl(context, user):
MarketToolsTemplateMessage.objects.all().delete()
MarketToolsTemplateMessageDetail.objects.all().delete()
if hasattr(context, 'client') is False:
context.client = bdd_util.login(user, password=None, context=context)
context.template_message_details = json.loads(context.text)
context.message_detail = {}
for template_message_detail in context.template_message_details:
industry_type = INDUSTRY2TYPE.get(template_message_detail['industry'])
template_message = MarketToolsTemplateMessage.objects.create(
industry = industry_type,
title = template_message_detail['headline']
)
type = 1
if template_message_detail['type'] == u'主营行业':
type = 0
status = 1
if template_message_detail['status'] == u'未启用':
status = 0
message_detail = MarketToolsTemplateMessageDetail.objects.create(
owner = context.client.user,
template_message = template_message,
industry = industry_type,
template_id = template_message_detail['template_id'],
first_text = '',
remark_text = '',
type = type,
status = status
)
key = '%d-%s-%s' % (context.client.user.id, template_message_detail['industry'], template_message_detail['headline'])
context.message_detail[key] = message_detail
@when(u"{user}给'{industry}'行业标题为'{title}'的模板消息添加内容")
def step_impl(context, user, industry, title):
detail = json.loads(context.text)
key = '%d-%s-%s' % (context.client.user.id, industry, title)
params = {
"id": context.message_detail[key].id,
"template_id": detail['template_id'],
"first_text": detail['first'],
"remark_text": detail['remark'],
}
context.client.post('/market_tools/template_message/api/detail/update/', params)
@when(u"{user}修改'{industry}'行业标题为'{title}'的状态")
def step_impl(context, user, industry, title):
detail = json.loads(context.text)
key = '%d-%s-%s' % (context.client.user.id, industry, title)
status = 1
if detail['status'] == u'未启用':
status = 0
message_detail = context.message_detail[key]
params = {
"id": message_detail.id,
"template_id": detail['template_id'],
"first_text": 'first',
"remark_text": 'remark',
"status": status,
"action": 'enable'
}
context.client.post('/market_tools/template_message/api/detail/update/', params)
@then(u"{user}查看'{industry}'行业标题为'{title}'的模板消息")
def step_impl(context, user, industry, title):
expected = json.loads(context.text)
key = '%d-%s-%s' % (context.client.user.id, industry, title)
detail = MarketToolsTemplateMessageDetail.objects.get(id=context.message_detail[key].id)
actual = {}
actual['template_id'] = detail.template_id
actual['first'] = detail.first_text
actual['remark'] = detail.remark_text
bdd_util.assert_list(sorted(expected), sorted(actual)) | UTF-8 | Python | false | false | 3,400 | py | 3,596 | market_tools_template_message_operate_template_message_steps.py | 2,097 | 0.63054 | 0.628112 | 0 | 89 | 36.022472 | 125 |
Hanlen520/AutoTest-5 | 8,796,093,049,782 | c6f5d7232f6fe846d309564e65e6376feeab6e77 | b1fc1cb678e5e19ba64fcd06003b40e8d2ca5e5b | /InterfaceTest/db_init/test_data.py | 4aa7a7129680c09c1f003753eec32338b99ce5e0 | []
| no_license | https://github.com/Hanlen520/AutoTest-5 | 82e0aa90b13b6fcbafd9ac66d8a2975cdd0fd943 | 0b1f9a527cb50f76c17749c913b096053f695a67 | refs/heads/master | 2020-05-26T11:07:42.333635 | 2018-08-24T11:27:43 | 2018-08-24T11:27:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
# Auther:"EternalSunshine"
| UTF-8 | Python | false | false | 42 | py | 17 | test_data.py | 16 | 0.738095 | 0.714286 | 0 | 2 | 20 | 26 |
arashout/CTCI | 15,925,738,745,844 | 275c0ec5d46b91c4e3eac677de8c261e71826820 | c9904106d9b2a9bc4e5e3b0185b2ba1dd083d978 | /algo_problems/ctci/42.py | c79784022f364deec134a8bfaa2906037f7b2e38 | []
| no_license | https://github.com/arashout/CTCI | 9521509795ebdfe4bc3e57417c3434a493453564 | cd3dc66ad0ece624f8d26be72023829b22d18e76 | refs/heads/master | 2021-05-04T02:54:35.007751 | 2019-06-16T22:11:44 | 2019-06-16T22:11:44 | 120,368,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from algo_problems.utils.testing import Solution, Test
from algo_problems.utils.tree import BinaryTreeNode, is_bst, parse_tree
from typing import List
def create_bst(sorted_list: List[int]) -> BinaryTreeNode:
def helper(start: int, end: int) -> BinaryTreeNode:
if start > end:
return None
mid = (start + end)//2
node = BinaryTreeNode(sorted_list[mid])
node.left = helper(start, mid-1)
node.right = helper(mid+1, end)
return node
return helper(0, len(sorted_list) - 1)
Solution(
create_bst,
[
Test(
[
[1, 2, 3, 4, 5]
],
parse_tree('3(1(,2),4(,5))'),
None
)
]
) | UTF-8 | Python | false | false | 741 | py | 66 | 42.py | 54 | 0.531714 | 0.511471 | 0 | 35 | 20.2 | 71 |
mmcintyre1/advent_of_code_2020_python | 1,211,180,798,656 | 0d6039d79938dbcb622404a9d0a93d96a53c54eb | 637715f53689a14d57d8178cfc5f6bba7f7e3529 | /day_13/part_one.py | 51c87ef3ca9efa44819a165f91ed409d9d57123e | []
| no_license | https://github.com/mmcintyre1/advent_of_code_2020_python | 9d2ea85ef7df3b4cf0a12e7ff10684098bf9ff60 | 07cc9860147b62243f1d71f4e9aa58aaf3f3d432 | refs/heads/master | 2023-02-02T09:57:52.700604 | 2020-12-22T02:25:28 | 2020-12-22T02:25:28 | 317,666,223 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pathlib
def main():
schedule = pathlib.Path('puzzle_input.txt').read_text()
earliest_departure, available_buses = parse_schedule(schedule)
print(get_closest_arrival(earliest_departure, available_buses))
def parse_schedule(unparsed_schedule):
earliest_departure, available_buses = unparsed_schedule.split('\n')
available_buses = [
int(bus.strip())
for bus in available_buses.split(',')
if bus.strip() and bus != 'x'
]
return int(earliest_departure.strip()), available_buses
def get_closest_number(number, target):
return number * (target // number + 1)
def get_closest_arrival(earliest_departure, buses):
closest_arrivals = {}
for bus_id in buses:
closest_arrivals[bus_id] = get_closest_number(bus_id, earliest_departure)
closest = min(closest_arrivals.keys(), key=lambda k: closest_arrivals[k])
return (closest_arrivals[closest] - earliest_departure) * closest
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 998 | py | 50 | part_one.py | 49 | 0.673347 | 0.672345 | 0 | 35 | 27.514286 | 81 |
krish-bhanushali/FunWithPython | 18,425,409,717,627 | e895a055458b2d247d6706f7eb593965147b87c6 | e954938f58d1d9385a4a41fdcc256f126cf51e9c | /Text To Audio Convertor/TextToAudio.py | c8e72f43737154ef18ea1ec0c0226f7d7b983b64 | []
| no_license | https://github.com/krish-bhanushali/FunWithPython | 1a34155533f59defcd566822d6360336248edb57 | 658e0d061399197da9889695a3ac0138ac8fca54 | refs/heads/master | 2023-04-22T22:29:55.766089 | 2021-05-18T15:55:52 | 2021-05-18T15:55:52 | 299,307,938 | 2 | 2 | null | false | 2021-05-14T11:51:26 | 2020-09-28T12:52:35 | 2021-05-10T06:06:35 | 2021-05-14T11:51:25 | 123,049 | 0 | 1 | 0 | Python | false | false | #pip install -r requirements.txt
import pyttsx3
import PyPDF2
#add pdf files in the project directory
book=open('pdf-test.pdf', 'rb')
pdfreader=PyPDF2.PdfFileReader(book)
pages=pdfreader.numPages
#print(pages)
speaker=pyttsx3.init()
page=pdfreader.getPage(0)
text=page.extractText()
print('saying')
speaker.say(text)
speaker.runAndWait()
| UTF-8 | Python | false | false | 340 | py | 14 | TextToAudio.py | 11 | 0.785294 | 0.770588 | 0 | 16 | 20.25 | 39 |
macedo33/FoodFinder | 11,639,361,409,043 | d1f3c760e2e11900f99b7c5bd486bea5d9b40038 | 9e2a73b8d48cc8b125793137c34accee63792cd1 | /drf_base/env/bin/django-admin.py | d51b3f2921f98c3b58d9b32d20946ae18fcc4110 | []
| no_license | https://github.com/macedo33/FoodFinder | e41a00d8e999198a6cce468babeb1ac9190458a8 | 3deee7afb0ec77ea052a0e708bdcaf2c98bf8ecf | refs/heads/master | 2023-05-24T06:10:16.646911 | 2018-07-06T12:51:51 | 2018-07-06T12:51:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/Users/andremachado/Desktop/drf_base/env/bin/python2.7
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| UTF-8 | Python | true | false | 163 | py | 14 | django-admin.py | 9 | 0.717791 | 0.705521 | 0 | 5 | 31.6 | 56 |
M-Sanli/WaterStones.com_Scraping | 11,338,713,709,639 | 8c87a46b69596a3f25092236194664a83bbb446b | 4bf1b5727df099823c46602c61cba32e3a562f9d | /Analyze and Description/Analysis.py | ffa6ac47d8fb540e44a9cc0c240bc8df1e4a2192 | []
| no_license | https://github.com/M-Sanli/WaterStones.com_Scraping | 43ca348ac0ec070f18b8f89376ad5385b42647ad | bc44b408ae1240152631d843ff451503876e092f | refs/heads/main | 2023-04-25T11:11:40.057759 | 2021-05-09T21:58:47 | 2021-05-09T21:58:47 | 363,954,916 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Required libraries:
import pandas as pd
import matplotlib.pyplot as pl
# Reading the data from csv file:
df = pd.read_csv('data_scrapy.csv', sep = ";")
# Mean of the book prices:
df["Price of the Book"].mean()
# Max price of the books:
df["Price of the Book"].max()
# Min price of the books:
df["Price of the Book"].min()
# First 4 lines of the data frame:
df["Price of the Book"].head(4)
# Visualization of the data that we scraped:
df['Price of the Book'].plot.hist()
pl.title("Histogram of Scraped Data")
pl.xlabel("Prices")
pl.ylabel("Number of Books")
| UTF-8 | Python | false | false | 618 | py | 6 | Analysis.py | 4 | 0.676375 | 0.671521 | 0 | 40 | 14.425 | 46 |
cash2one/xai | 10,325,101,404,511 | a5b0e7620b5224190e5ee26b3d463200b475c216 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_spokes.py | 999a862a1aaf91cb0a614c963502d3e25a4e1eb4 | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#calss header
class _SPOKES():
def __init__(self,):
self.name = "SPOKES"
self.definitions = spoke
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['spoke']
| UTF-8 | Python | false | false | 218 | py | 37,275 | _spokes.py | 37,266 | 0.591743 | 0.591743 | 0 | 13 | 15.615385 | 26 |
Honoo/CViA | 8,856,222,566,466 | b4049fca6b1f0b0f43708072fe114eaaad8a05c1 | b85c506e92589f81875e1e6f78a133f4c4e6f2c9 | /CViA/backend/JobMatcher/job_matcher.py | f9310982fa8af641557940dfc8d4968e6198a33f | []
| no_license | https://github.com/Honoo/CViA | 4cb217107b7060afd6a2ceb57764dd3929a3213c | 096fea13b854f6aca8385d00b36e2932f2b77970 | refs/heads/master | 2020-05-01T12:58:33.206717 | 2015-11-10T05:20:25 | 2015-11-10T05:20:25 | 42,951,869 | 1 | 0 | null | false | 2015-10-04T07:33:30 | 2015-09-22T18:11:25 | 2015-10-04T03:30:14 | 2015-10-04T07:33:30 | 120 | 0 | 0 | 0 | Python | null | null | from sources.json_data_source import JSONDataSource
from filters.education_filter import EducationFilter
from filters.experience_filter import ExperienceFilter
from filters.skills_filter import SkillsFilter
from filters.languages_filter import LanguagesFilter
class JobMatcher(object):
def __init__(self, query, weights={}):
self.query = query
self.weights = weights
# Returns individual and total scores
def score(cls, resume):
scores = {}
for filter in cls.steps():
attribute = filter.__name__.replace("Filter", "").lower()
scores[attribute] = filter.run(resume[attribute].lower(), cls.query[attribute])
scores = cls.weight(scores)
scores['total'] = sum(scores.values())
return {
'resume': resume,
'score': scores
}
# Declarative steps of the pipeline
def steps(cls):
return [
EducationFilter,
ExperienceFilter,
SkillsFilter,
LanguagesFilter
]
# Returns scores weighted by user-specified weights
def weight(cls, scores):
for attribute in scores.keys():
if attribute in cls.weights:
scores[attribute] = cls.weights[attribute] * scores[attribute]
return scores
| UTF-8 | Python | false | false | 1,313 | py | 36 | job_matcher.py | 28 | 0.628332 | 0.628332 | 0 | 39 | 32.666667 | 91 |
LiliBourgeois/Zappy | 7,808,250,569,990 | ee88b1affb8d4cf78783f6a57d48b151a3dc8847 | 1ad9967bda4a2a5adbf21583d5019f725ed9ce58 | /src/connection.py | f22ec7dc4d6afa83216d3925955bb4562f83fa5c | []
| no_license | https://github.com/LiliBourgeois/Zappy | b2706140953d69d8e641f988606d2b3fd420d477 | dbab93f44bcb6d7c9c29f7e8d3dd2f882df5bafc | refs/heads/main | 2023-07-06T00:33:43.379066 | 2021-08-12T17:36:18 | 2021-08-12T17:36:18 | 395,395,195 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python3
# coding: utf-8
import socket
from globals import setalive, getalive, setmessage, setlevel, getlevel, setm_direction
BUFFER = 1
def get_data(s):
data = ""
tmp = ""
while "\n" not in tmp:
tmp = s.recv(BUFFER).decode("utf8")
data = data + tmp
return data
def client_connect(arguments, s):
data = get_data(s)
team_name = arguments["team_name"] + "\n"
s.sendall(team_name.encode("utf-8"))
data = get_data(s)
if "No more place in this team" in data:
return None
data = get_data(s)
if "No more place in this team" in data:
return None
return s
def parse_message(data):
x = data.split(",")
x.append(None)
tmp = x[0].split("-")
if (tmp[0]):
setm_direction(int(tmp[0]))
tmp = x[1].split("-")
if (tmp[1]):
setmessage([tmp[1], tmp[1]])
def checkdata(s, data, command):
if data == "dead\n":
setalive(False)
return None
elif "message" in data:
parse_message(data)
elif "Elevation underway" in data:
level = str(int(getlevel()) + 1)
setlevel(level)
pass
elif "Current level:" in data:
if command == "Incantation\n":
return data
elif data == "ok\n" or data == "ko\n":
s.sendall(b"Connect_nbr\n")
data2 = get_data(s)
while data2 == "ok\n" or data2 == "ko\n":
data2 = get_data(s)
data2 = checkdata(s, data2, command)
return data
else:
return data
data = get_data(s)
data = checkdata(s, data, command)
return data
def send_command(s, command):
if getalive() == False:
return None
s.sendall(command.encode("utf-8"))
data = get_data(s)
print("command = ", command, "data = ", data)
data = checkdata(s, data, command)
#print("second data = ", data)
return data
| UTF-8 | Python | false | false | 1,897 | py | 10 | connection.py | 10 | 0.560358 | 0.549815 | 0 | 79 | 23.012658 | 86 |
rubelw/auth0_client | 3,839,700,765,701 | b9a3892f254d00dbfce380691e92a8f5607a40b2 | 539ceb194e55f01f2a60ba1e9fa0c661f68a9731 | /auth0_client/menu/commons/functional.py | 23203b16ed03728b92d99754cbc785bf3b174c38 | [
"MIT"
]
| permissive | https://github.com/rubelw/auth0_client | fdeda1df26dcf978ea7837690c35414ee68b8043 | 51e68239babcf7c40e40491d1aaa3f8547a67f63 | refs/heads/master | 2020-04-02T11:16:57.891730 | 2018-10-29T02:42:29 | 2018-10-29T02:42:29 | 154,380,362 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division, print_function, absolute_import, unicode_literals
import inspect
DEBUG=0
#
# functions for handling optional values
#
def omap(function, optional):
"""Map optional value"""
if (DEBUG):
print('commons.functional.py - omap(function,optional)- called by:'+str(inspect.stack()[1][3]))
return None if optional is None else function(optional)
def oget(optional, default=None):
"""Get optional value or default value"""
if (DEBUG):
print('commons.functional.py - oget(optinal,default=None)- called by:'+str(inspect.stack()[1][3]))
return default if optional is None else optional
def ozip(*optionals):
"""Zip optional values. Return None if one value or the other is None."""
if (DEBUG):
print('commons.functional.py - ozip(*optionals)- called by:'+str(inspect.stack()[1][3]))
return None if any(x is None for x in optionals) else tuple(optionals)
| UTF-8 | Python | false | false | 948 | py | 100 | functional.py | 97 | 0.683544 | 0.67616 | 0 | 35 | 26.085714 | 106 |
ThomasAndrasek/demonetization_bot | 7,310,034,363,937 | 443737fb996d21f3cde5ff8f8c428aad2158572f | 620cc60040d6b920a8373c38a57ab9e9fb109eac | /utilities/message_logger.py | e566c2f26cc92807a5922ab7934347cc78b16390 | []
| no_license | https://github.com/ThomasAndrasek/demonetization_bot | 7fddd9c877b34002ce8ec5874c13d3c593f34cdb | 4e7c101cc05f1653da34c3f1f3eb21843fba1e24 | refs/heads/master | 2020-06-11T03:21:41.110098 | 2019-06-29T06:58:31 | 2019-06-29T06:58:31 | 193,741,335 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
# Enable and Disable
# Set Channel
storage_path = 'databases\\logger\\logger_storage.db'
def check_for_server(server_id):
conn = sqlite3.connect(storage_path)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS loggerStorage(serverID TEXT, enable INTEGER, channelID TEXT)')
c.execute('SELECT serverID FROM loggerStorage WHERE serverID = ?', (server_id,))
data = c.fetchall()
if len(data) == 0:
c.execute('INSERT INTO loggerStorage VALUES(?, 0, "null")', (server_id,))
conn.commit()
c.close()
conn.close()
def toggle_logger(server_id, enable):
conn = sqlite3.connect(storage_path)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS loggerStorage(serverID TEXT, enable INTEGER, channelID TEXT)')
check_for_server(server_id)
c.execute('UPDATE loggerStorage SET enable = ? WHERE serverID = ?', (enable, server_id,))
conn.commit()
c.close()
conn.close()
def set_channel(server_id, channel_id):
conn = sqlite3.connect(storage_path)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS loggerStorage(serverID TEXT, enable INTEGER, channelID TEXT)')
check_for_server(server_id)
c.execute('UPDATE loggerStorage SET channelID = ? WHERE serverID = ?', (channel_id, server_id,))
conn.commit()
c.close()
conn.close()
def get_channel(server_id):
conn = sqlite3.connect(storage_path)
c = conn.cursor()
c.execute('SELECT channelID FROM loggerStorage WHERE serverID = ?', (server_id,))
data = c.fetchall()
for thing in data:
for item in thing:
c.close()
conn.close()
return item
def is_enable(server_id):
check_for_server(server_id)
conn = sqlite3.connect(storage_path)
c = conn.cursor()
c.execute('SELECT enable FROM loggerStorage WHERE serverID = ?', (server_id,))
data = c.fetchall()
for thing in data:
for item in thing:
c.close()
conn.close()
return item
async def check_permissions(message):
user_permissions = message.channel.permissions_for(message.author)
if user_permissions.administrator or user_permissions.manage_messages:
return True
else:
await message.channel.send('Sorry, you do not have the appropriate server permissions to use this command.')
return False
async def command_enable_logger(message):
if await check_permissions(message):
server_id = message.guild.id
toggle_logger(server_id, 1)
await message.channel.send('Logger has been enabled.')
async def command_disable_logger(message):
if await check_permissions(message):
server_id = message.guild.id
toggle_logger(server_id, 0)
await message.channel.send('Logger has been disabled.')
async def command_set_channel(message):
if await check_permissions(message):
server_id = message.guild.id
channel_id = message.channel.id
set_channel(server_id, channel_id)
await message.channel.send('I will now log deleted messages in this channel.')
async def log(message, reason):
server_id = message.guild.id
if is_enable(server_id) == 1:
channel_id = get_channel(server_id)
if channel_id != 'null':
channel_id = int(channel_id)
channel = message.guild.get_channel(channel_id)
await channel.send('***************************************\n**Message Author:** {}'.format(message.author) +
'\n**Reason Deleted:** {}'.format(reason) +
'\n**Message Content:** *{}*\n***************************************'.format(message.content)) | UTF-8 | Python | false | false | 3,818 | py | 6 | message_logger.py | 5 | 0.613148 | 0.610267 | 0 | 108 | 33.37037 | 126 |
Dnguye1393/RasberryProjects | 7,370,163,912,098 | 5cfe8f414bc738ec781d8f999e3d8bcbb9bbab59 | 9ad46ef05c872746dce75b3be6f8ce1362f17764 | /SenseHat/helloworld.py | f4bc632722a51786c776b9d47942635cd156a316 | [
"Unlicense"
]
| permissive | https://github.com/Dnguye1393/RasberryProjects | 4473370b079a27d6022909295334f9e95803b5fd | e93249f18d24ffdb3beb2d7db36de55ef74c6bcf | refs/heads/master | 2021-01-19T11:29:53.434348 | 2017-04-04T05:44:52 | 2017-04-04T05:44:52 | 82,248,311 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sense_hat import SenseHat
from flask import Flask, render_template, jsonify, request
import mysql.connector
#from mysql.connector import MySQLConnection, Error
import requests
app = Flask(__name__)
sense = SenseHat()
languages = []
cnx = mysql.connector.connect(user = 'davidn', password='raspberry' ,
host='localhost', database='senseHat')
cursor = cnx.cursor()
cursor.execute("SELECT name from languages")
data = cursor.fetchall()
for row in data :
language ={'name': row[0]}
languages.append(language)
print ('Adding to Languages')
@app.route('/')
def index():
return render_template('index.html', title="Home Page")
@app.route('/helloWorld')
def my_link():
sense.show_message("hello world!")
print ('I got Clicked')
return render_template('helloWorld.html' , title="Hello World")
@app.route('/temperature')
def get_temp():
temp = sense.get_temperature()
print("Temperature: %s C", temp)
f = open('test.txt','w')
f.write("Temperature: " + format(temp) + " C")
f.close()
return render_template('temperature.html', title="Temperature")
@app.route('/restget' , methods=['GET'])
def test():
return jsonify({'languages':languages})
@app.route('/restget' , methods=['POST'])
def addOne():
val = request.json['name']
alreadyExist = False
for language in languages :
if ( language['name']==val ) :
alreadyExist = True
if(not alreadyExist) :
language ={'name': val}
languages.append(language)
sql = 'INSERT INTO languages(name) VALUES("%s")' % (val)
try:
cursor.execute(sql)
cnx.commit()
except Error as error:
print(error)
else :
print("This value, ", val, ", already exists" )
return jsonify({'languages':languages})
@app.route('/restget/<string:name>', methods=['GET'])
def returnOne(name):
langs = [language for language in languages if language['name']==name]
return jsonify({'language':langs[0]})
@app.route('/restget/<string:name>', methods=['PUT'])
def editOne(name):
langs = [language for language in languages if language['name']==name]
langs[0]['name'] = request.json['name']
return jsonify({'language':langs[0]})
@app.route('/restget/<string:name>', methods=['DELETE'])
def deleteOne(name):
langs = [language for language in languages if language['name']==name]
alreadyExist = False
val = langs[0]
for language in languages :
if ( language['name']==val ) :
alreadyExist = True
if(alreadyExist) :
languages.remove(langs[0])
sql ='DELETE FROM languages WHERE name = %s' % (val)
try:
cursor.execute(sql)
cnx.commit()
except Error as error:
print(error)
else :
print("This value, ", val, ", does not exists" )
return jsonify({'language':languages})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| UTF-8 | Python | false | false | 3,008 | py | 20 | helloworld.py | 15 | 0.615691 | 0.612367 | 0 | 100 | 29.08 | 74 |
Irikos/Programming | 14,113,262,541,586 | 16de65d637f8f096243ec9e0e8db35abd8a088f5 | 344df5cc5395c2443507e2ecf2096c84fac111d3 | /Python/lesson_4_homework.py | 1aabf7ad37a949ea7daeaecb9884382bd6c5d5f6 | []
| no_license | https://github.com/Irikos/Programming | 6888d824ae547464908232a3dfa69bf1326aec50 | ef65c9e6493a177b65b278d421ad7866e3cc610a | refs/heads/master | 2015-08-14T14:19:28 | 2015-05-27T10:19:56 | 2015-05-27T10:19:56 | 26,400,095 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import math
import re
from random import randrange
### Ganditi-va la ceea ce vreti sa faceti si apoi apucati-va sa scrieti cod.
### Exercitiile sunt mai scurte decat enuntul daca le faceti cum trebuie cu
### notiunile invatate pana acum
"""
Ex1:
Implementati o functie echivalenta cu functia built-in ``map`` din Python,
care primeste 2 parametri: o functie si un iterabil.
Iterabilul poate fi o lista sau un string.
"""
def my_map(func, iterable):
mapped = []
[mapped.append(func(i)) for i in iterable]
return mapped
"""
Ex2:
Implementati o functie echivalenta cu functia built-in ``filter`` din Python,
care primeste 2 parametri: o functie si un iterabil.
Iterabilul poate fi o lista sau un string.
"""
def my_filter(func, iterable):
filtered = []
[filtered.append(i) for i in iterable if func(i) == True and i != None]
if type(iterable) == str:
filtered = ''.join(filtered)
return filtered
"""
Ex3:
Simulați o mică bază de date de persoane folosind un dicționar:
a) Vom avea o bază de date key -> value, key este un id, value este un dicționar
cu 2 chei: nume și vârstă. Inițializați o bază de date goală.
"""
db = {} # Modificati valoarea de initializare corespunzator
"""
b) Scrieti o clasa Person care primeste la initializare nume, varsta si sex.
Pentru a fi mai usor la gender folositi 'F' pt feminin si 'M' pt masculin.
"""
class Person(object):
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
def roll_dice(self):
return (randrange(1,7),randrange(1,7))
"""
c) Scrieti o functie ``person_factory`` care primeste o lista de tupluri.
Fiecare tuplu este o pereche de forma (name, age, gender). Functia intoarce o
lista de obiecte de tip Person initializate cu valorile primite. Lista va
creea obiecte de tip Person doar daca varsta este mai mare sau egala cu 18 si
va fi ordonata dupa acest atribut.
"""
def person_factory(persons_data):
return sorted([Person(x[0], x[1], x[2]) for x in persons_data if x[1] >= 18], key=lambda Person: Person.age)
"""
d) Creați o metodă ``add_persons`` care primește o bază de date și o listă de
persoane pe care le introduce în această bază de date. Apelați-o și introduceți
câteva persoane.
Atentie la felul in care alageti indexul, sa nu suprascrieti intrari care
exista deja in baza de date. Functia poate fi apelata de mai multe ori pt
a introduce persoane in baza de date
"""
def add_persons(db, persons):
if db == {}:
id = 0
else:
id = max(db.keys()) + 1
for i in persons:
db[id] = Person(i.name, i.age, i.gender)
id += 1
return db
"""
e) Scrieti un query pe aceasta baza de date (o functie) care intoarce Persoane
al caror nume se termina in 'escu', sunt de sex feminin si au varsta intre 20 si
30 de ani. Rezultatul intors trebuie sa fie de aceeasi forma cu baza de date,
adica un dictionar unde cheia este indexul, iar valoarea este persoana.
"""
def name_gender_query(db):
s = re.compile(r'([A-Z]*)([a-z]*)escu')
dPers = {id: db[id] for id in db.keys() if s.search(db[id].name) != None and db[id].gender == 'F' and db[id].age in range(20,31)}
return dPers
"""
f) Implementati pe clasa Person o metoda ``roll_dice``, prin care o persoana
executa o aruncare random cu 2 zaruri. Functia returneaza un tuplu reprezentand
valorile celor 2 zaruri. Exemplu: (6, 4)
Implementati apoi functia ``see_statistics`` de mai jos care:
1. Pentru fiecare persoana din baza de date executa o aruncare a zarurilor.
2. Intoarce un dictionar de forma {suma_zaruri: numar_persoane}. suma_zaruri
este suma numerelor de pe fețele celor 2 zaruri, iar numar_persoane este
numarul de persoane care au aceasta suma dupa ce au aruncat cu zarul
"""
def see_statistics(db):
summed = 0
nrPers = 0
ls = { i: 0 for i in range(2,13)}
for pers in db.values():
aux = pers.roll_dice()
auxV = aux[0] + aux[1]
ls[auxV] += 1
return ls
"""
BONUS (este un pic mai grea)
g) Scrieti o functie ``query`` care face un query generic pe baza de date.
Rezultatul intors este la fel ca la e)
Exemplu:
query(db, name='Gigi', age=25)
query(db, age=12, gender='M')
query(db, gender='F')
query(db, gender='F', age=25, name='Andi')
query(db, gender='F', birthday='never', age=25)
query(db, plays_poker=True, is_genius=False)
Toate acestea sunt query-uri valide pe structura noastra a bazei de date
"""
def query(db, **kwargs):
ld = {}
id = 0
for pers in db.values():
x = True
for key in kwargs.keys():
if hasattr(pers,key):
if getattr(pers,key) == kwargs[key]:
x = True
else:
x = False
if x == True:
ld[id] = pers
id += 1
return ld
def main():
def _helper(ex, got, expected, data):
try:
assert got == expected
print ex + " : PASS"
except AssertionError:
print ex + " : FAIL\n\tgot: {}\n\texpected: {}\n\tTest data was: {}"\
.format(got, expected, data)
# Exercitiul 1
data_r, data_s = range(1, 10), 'A'*10
_helper('ex1', my_map(math.sqrt, data_r), map(math.sqrt, data_r),
'my_map(math.sqrt, {})'.format(str(data_r)))
_helper('ex1', my_map(str.split, data_s), map(str.split, data_s),
'my_map(math.split, {})'.format(str(data_s)))
# Exercitiul 2
_helper('ex2',
my_filter(lambda x: x % 2 == 0, data_r),
filter(lambda x: x % 2 == 0, data_r),
"my_filter(lambda x: x % 2 == 0, {})".format(str(data_r)))
_helper('ex2',
my_filter(lambda x: x == 'A', data_s),
filter(lambda x: x == 'A', data_s),
"my_filter(lambda x: x == 'A', '{}')".format(str(data_s)))
# Exercitiul 3a
global db
if type(db) is not dict:
_helper('ex3 a)', db, 'db has to be intialized', db)
else:
_helper('ex3 a)', db, db, db)
# Exercitiul 3b
try:
p = Person('A', 1, 'F')
_helper('ex3 b)', (p.name, p.age, p.gender), ('A', 1, 'F'), None)
except Exception:
_helper('ex3 b)', None, "Invalid Person Instance", "Person('A', 1, 'F')")
return
# Exercitiul 3c
p1 = ('C', 21, 'F')
p2 = ('A', 15, 'M')
p3 = ('B', 18, 'M')
l = person_factory([p1, p2, p3])
_helper('ex3 c)', [x.name for x in l], ['B', 'C'], [p1, p2, p3])
# Exercitiul 3d
test_db = {}
add_persons(test_db, person_factory([('A', 18, 'M'), ('B', 21, 'F')]))
add_persons(test_db, person_factory([('C', 20, 'F')]))
_helper('ex3 d)',
[x.name for x in sorted(test_db.values(), key=lambda x: x.name)],
['A', 'B', 'C'],
[('A', 18, 'M'), ('B', 21, 'F'), ('C', 20, 'F')])
# Exercitiul 3e
test_db = {
1: Person('A', 25, 'F'),
2: Person('Popescu', 25, 'M'),
3: Person('Popescu', 31, 'F'),
4: Person('Popescu', 25, 'F'),
5: Person('Gigi', 25, 'M'),
}
query = name_gender_query(test_db).values()[0]
_helper('ex3 e)', (query.name, query.age, query.gender),
('Popescu', 25, 'F'), test_db)
name_gender_query(test_db)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 7,580 | py | 55 | lesson_4_homework.py | 37 | 0.583344 | 0.565735 | 0 | 233 | 30.416309 | 133 |
ateexD/Intelligent-Bus-Stop-Recognition-System | 18,545,668,797,410 | ff0b1b2c5d41a303acdfb9887122987fab95ccc6 | 8c7c19a36114f685aa3d0e3f5a34e19a253d276c | /code/random_trials_day.py | 472ea0a19e13f26e617bff5d583ec3875379581e | [
"MIT"
]
| permissive | https://github.com/ateexD/Intelligent-Bus-Stop-Recognition-System | 37b9ebc21f9f88a0bbb8663c904ed438763ca925 | d9a94a13adf026b329903743b54e46c30ac8cf00 | refs/heads/master | 2021-04-29T23:11:24.705644 | 2018-02-14T18:55:46 | 2018-02-14T18:55:46 | 121,549,466 | 2 | 0 | null | true | 2018-02-14T19:03:54 | 2018-02-14T19:03:54 | 2017-06-16T18:43:34 | 2018-02-14T18:55:47 | 1,359 | 0 | 0 | 0 | null | false | null |
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc as sc
from time import time
from collections import Counter
# In[2]:
d = {}
bus_stop_names = ["JJ Nagar East", "Anna Nagar West Depot", "Collector Nagar", "Mogappair East", "Thirumangalam",
"Gurunath Stores", "Incubation Centre", "SOMCA Block"]
for it in xrange(1, len(bus_stop_names)+1):
d[it] = bus_stop_names[it-1]
# In[3]:
def image_resize(im):
im = sc.imresize(im, (32, 32, 3))
return im
# In[4]:
X_train = np.array(image_resize(plt.imread("./TrainDataDay/Train1.jpg")).flatten().astype("float32"))
for i in range(2, 721):
if i % 25 == 0:
print "Reading train image " + str(i)
img = plt.imread("./TrainDataDay/Train" + str(i) + ".jpg")
X_train = np.vstack((X_train,image_resize(img).flatten().astype("float32")))
print len(X_train)
# In[5]:
X_train = X_train / 255.0
# In[6]:
y_train = []
for bus_stop in range(1, 9):
for train_images in range(90):
y_train.append(bus_stop)
y_train = np.array(y_train)
# In[7]:
def compute_distances(X_train, X):
X = np.array(X)
X = X.astype("float32")
X /= 255.0
num_test = X.shape[0]
num_train = X_train.shape[0]
dists = np.zeros((num_test, num_train))
dists = np.sqrt((np.square(X).sum(axis=1, keepdims=True)) - (2*X.dot(X_train.T)) + (np.square(X_train).sum(axis=1)))
return dists
# In[8]:
for n_images in [2, 3, 5, 7, 9, 11]:
t1 = time()
for iterx in range(100):
print "Iter: " + str(iterx)
class_name = np.random.randint(1, 9)
print "Class name: " + str(d[class_name]), "; No. of Images: " + str(n_images)
print
X_test = np.zeros((n_images, np.prod(X_train.shape[1:])))
x = 0
if class_name == 1:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/7H/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 2:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/AN West Depot/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 3:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/Collector Nagar/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 4:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/Mogappair East/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 5:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/Thirumangalam/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 6:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/Gurunath/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 7:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/Incubation Centre/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
elif class_name == 8:
test_images = np.random.randint(1, 31, size=n_images)
for i in test_images:
X_test_temp = image_resize(plt.imread("./TestDataDay/SOMCA/Test" + str(i) + ".jpg")).flatten()
X_test[x, :] = X_test_temp
x += 1
dis = compute_distances(X_train, X_test)
for k in range(1, 6):
count = 0
correct_classes = []
for i in range(dis.shape[0]):
l = y_train[np.argsort(dis[i, :])].flatten()
closest_y = l[:k]
correct_classes.append(Counter(closest_y).most_common(1)[0][0])
correct_classes = np.array(correct_classes)
#print l[:10]
for v in range(correct_classes.shape[0]):
if correct_classes[v] == class_name:
count += 1
print "k = " + str(k)
print "Predicted as: ",
for cc in correct_classes:
print d[cc] + ", ",
print
print "Groundtruth : " + str(d[class_name])
accuracy = float(count) / dis.shape[0]
print "Accuracy: " + str(accuracy)
print
#print
t2 = time()
print "Time Taken: " + str(t2-t1)
print
| UTF-8 | Python | false | false | 5,259 | py | 7 | random_trials_day.py | 2 | 0.520441 | 0.498003 | 0 | 150 | 34.053333 | 122 |
leinian85/year2019 | 12,120,397,730,425 | 48ae8166e04cef4e7440c78dd31a31af09b07463 | 3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72 | /month04/spider/day02/03_spider.py | 072699c5fe82fe2b43ce7854d398de12dbb30e4a | []
| no_license | https://github.com/leinian85/year2019 | 30d66b1b209915301273f3c367bea224b1f449a4 | 2f573fa1c410e9db692bce65d445d0543fe39503 | refs/heads/master | 2020-06-21T20:06:34.220046 | 2019-11-04T06:37:02 | 2019-11-04T06:37:02 | 197,541,549 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib import request
from fake_useragent import UserAgent
import time
import random
import re
import os
class movie:
def __init__(self):
self.url1 = []
self.url2 = []
self.movies = []
self.imgslit = []
pass
def get_urllvele1(self, page):
for p in range(1, page + 1):
if p == 1:
self.url1.append("https://maoyan.com/board/4")
else:
self.url1.append("https://maoyan.com/board/4?offset=" + str((p - 1) * 10))
def set_headers(self):
us = UserAgent()
self.headers = {"User-Agent": us.random}
def gethtml1(self, url1):
url = request.Request(url=url1, headers=self.headers)
req = request.urlopen(url)
return req.read().decode()
def get_url2(self, html):
pattern = re.compile('<dd>.*?<a href="(.*?)".*?</dd>', re.S)
urllist = pattern.findall(html)
self.url2.append(['https://maoyan.com'+url for url in urllist])
def __get_html2(self,url):
print(url)
url = request.Request(url=url,headers=self.headers)
req = request.urlopen(url)
return req.read().decode()
def get_info2(self,url):
movie = {}
html = self.__get_html2(url)
pattern = re.compile('<h3 class="name">(.*?)</h3>',re.S)
name = pattern.findall(html)
# print("name:",name)
movie["name"] = name[0]
pattern = re.compile('<div class="comment-content">(.*?)</div>',re.S)
context = pattern.findall(html)
# print(context)
movie["context"] = context
imgs = self.__save_imgs(html)
movie["imgs"] = imgs
self.movies.append(movie)
def __save_imgs(self,html):
pattern = re.compile('<img class="default-img" data-act="movie-img-click" data-src="(.*?)" alt="">',re.S)
return pattern.findall(html)
def save_movies(self):
with open("movis.html","a") as f:
f.write(str(self.movies))
def __save_files(self,name,imgs,count):
if not os.path.exists(name):
os.mkdir(name)
counti = 0
for img in imgs:
counti += 1
filename = img.split("@")[0].split("/")[-1]
file = name + '/' + filename
iurl = request.Request(url=img,headers=self.headers)
phont = request.urlopen(iurl)
with open(file,'wb') as i:
i.write(phont.read())
if counti > count:
break
time.sleep(random.randint(1, 3))
def save_photos(self):
for movie in self.movies:
name = movie["name"]
imgs = movie["imgs"]
self.__save_files(name, imgs,10)
mv = movie()
mv.get_urllvele1(1)
mv.set_headers()
for url in mv.url1:
html = mv.gethtml1(url)
mv.get_url2(html)
time.sleep(random.randint(1, 3))
# print(mv.url2)
for url2 in mv.url2:
for aurl in url2:
mv.get_info2(aurl)
mv.save_movies()
time.sleep(random.randint(1, 3))
mv.save_photos()
| UTF-8 | Python | false | false | 3,072 | py | 477 | 03_spider.py | 372 | 0.54069 | 0.525716 | 0 | 111 | 26.675676 | 113 |
LeeKom-Niu/graphics_search | 9,775,345,600,005 | 5b830df42901fc5c900a2fd9601af2cb4940f050 | 345717807c433c72c41f2f58380fb8adad9e213c | /mongodb/create_database.py | dba148e1d44f5f589751a10a7809ec8454e32fd1 | []
| no_license | https://github.com/LeeKom-Niu/graphics_search | e11fa9fe5f33120259e4a5dde156e4135ded1300 | 4012f5375ac36fdd76affa57282799658f974af4 | refs/heads/master | 2022-04-12T00:14:10.934536 | 2020-01-26T03:20:55 | 2020-01-26T03:20:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import pymongo
data_file = '../scratch/kesenhuang/graphics.json'
with open(data_file, 'r') as f:
data = json.load(f)
cg_client = pymongo.MongoClient("mongodb://localhost:27017")
cg_db = cg_client["kesenhuang"]
cg_col = cg_db["papers"]
cg_col.insert_many(data)
| UTF-8 | Python | false | false | 281 | py | 2 | create_database.py | 2 | 0.697509 | 0.679715 | 0 | 13 | 20.538462 | 60 |
bkayed/posrocket-python-sdk | 12,292,196,448,537 | 1424f4e6f6f4f263aa78350d11eaad3dd57678d4 | 1b7f17ff9f31abd7abb42da5621c89a65130808a | /posrocket/models/catalog/variation.py | 1841a0d64074dd698c070e2a0c53c3a0ead501ba | []
| no_license | https://github.com/bkayed/posrocket-python-sdk | ff604586e4d928366016726ce643c258982fd3ea | fec3ca9099faa1071b4e1d14cc0b3ac1d5c2ca4c | refs/heads/master | 2020-08-04T00:33:04.751579 | 2019-09-26T14:10:42 | 2019-09-26T14:10:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Catalog Variation Python model
"""
from typing import List
from posrocket.models.catalog.pricing import CatalogPricingModel
__author__ = "Ahmad Bazadough, Hamzah Darwish"
__copyright__ = "Copyright 2019, POSRocket"
__credits__ = ["Ahmad Bazadough", "Hamzah Darwish"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Ahmad Bazadough, Hamzah Darwish"
__email__ = "a.bazadough@posrocket.com"
__status__ = "Beta"
from posrocket.utils.prices_mixin import PricingMixin
class CatalogVariationModel(PricingMixin):
"""mapper class for Catalog Variation object from Json Dict
"""
id: str
name: str
local_name: str
pricing_type: str
barcode: str
sku: str
image: str
_pricing: List[CatalogPricingModel]
def __init__(self, id=None, name=None, local_name=None, pricing_type=None, barcode=None, sku=None, image=None, pricing=None,
**kwargs):
"""map a dict to Catalog Variation object
:param kwargs: Catalog Variation json dict
"""
self.id = id
self.name = name
self.local_name = local_name
self.pricing_type = pricing_type
self.barcode = barcode
self.sku = sku
self.image = image
self.pricing = pricing
def __str__(self) -> str:
""" String representation for the Catalog Variation model
:return: Catalog Variation name
"""
return f'{self.name}'
@property
def pricing(self) -> List[CatalogPricingModel]:
"""getter for Variation pricing
:return: list of pricing for the Variation
"""
return self._pricing
@pricing.setter
def pricing(self, json_pricing: List[dict]):
"""setter for Variation pricing
:param json_pricing:json list of pricing dicts
:return: None
"""
self._pricing = []
for json_price in json_pricing or []:
self._pricing.append(CatalogPricingModel(**json_price))
def get_price_for_location(self, location_id: str) -> float:
"""return the price for the variation on a specific location
:param location_id: location id to get the price for
:return: the price in that location
"""
for pricing in self._pricing:
if pricing.location_id == location_id:
return pricing.price
def is_available_in_location(self, location_id: str) -> float:
"""return the price for the variation on a specific location
:param location_id: location id to get the price for
:return: the price in that location
"""
for pricing in self._pricing:
if pricing.location_id == location_id:
return pricing.available
@property
def lowest_price(self) -> float:
"""get the lowest price for the variation between all locations
:return: the lowest price between all locations
"""
low = None
for pricing in self._pricing:
if not low or pricing.price < low:
low = pricing.price
return low
@property
def highest_price(self) -> float:
"""get the highest price for the variation between all locations
:return: the highest price between all locations
"""
low = None
for pricing in self._pricing:
if not low or pricing.price < low:
low = pricing.price
return low
| UTF-8 | Python | false | false | 3,452 | py | 59 | variation.py | 58 | 0.61095 | 0.608922 | 0 | 116 | 28.758621 | 128 |
zunkzz/airu-proxy | 17,566,416,249,295 | f5b9663509d5b35b36bc09c2d625b6a3ffbe5dcb | ca2e734b8203939ae9ea1ad9380a6cc82ce7a7c5 | /proxy.py | c83a8f2f27249168a8edeead8aec7868edd1bf8f | []
| no_license | https://github.com/zunkzz/airu-proxy | a10e062d99629976307eb8bd1c314a076cca04d0 | 69f411f62ae71f3b88fd9e8def5cc36e19e82f58 | refs/heads/main | 2023-04-04T13:06:51.266573 | 2021-04-12T20:58:04 | 2021-04-12T20:58:04 | 354,711,122 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###########################
# Meu Twitter #
# @Souichi285 #
###########################
import requests
from bs4 import BeautifulSoup
import os
os.system('clear')
print("""
#################################################################################
# aaaaaaaaaaaaaaaa * #
# aaaaaaaaaaaaaaaaaaaaaaaa #
# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa #
# aaaaaaaaaaaaaaaaa aaaaaa #
# aaaaaaaaaaaaaaaa aaaa #
# aaaaaaaaaaaaa aa aa #
#* aaaaaaaa aa a #
# aaaaaaa aa aaaa #
# * aaaaaaaaa aaa #
# aaaaaaaaaaa aaaaaaa * #
# aaaaaaa aaaaaaaaaa #
# aaaaaa a aaaaaa aaaaaa #
# aaaaaaa aaaaaaa #
# aaaaaaaa a #
# aaaaaaaaaa aa #
# aaaaaaaaaaaaaaaa aaaa #
# aaaaaaaaaaaaaaaaa aaaaaa * #
# * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa #
# aaaaaaaaaaaaaaaaaaaaaaaa Airu Moonlight #
# * aaaaaaaaaaaaaaaa 放棄された Proxy #
#################################################################################""")
resposta = requests.get('https://free-proxy-list.net/')
bs = BeautifulSoup(resposta.text, 'lxml')
table = bs.find('table')
linhas = bs.find_all('tr')
for linha in linhas:
ip = linha.contents[0].text
porta = linha.contents[1].text
anonimo = linha.contents[4].text
segundos = linha.contents[6].text
if(segundos == 'yes' and (anonimo == 'anonymous' or anonimo == 'elite proxy')):
airu = 'http://' + ip + ':' + porta
proxies = {'http': airu, 'https': airu}
try:
testIP = requests.get('https://httpbin.org/ip', proxies = proxies , timeout = 3)
print(testIP.text)
resIP = testIP.json()['origin']
origin = resIP.append(',')
if origin[0] == ip:
print('Proxy bom !!')
count += 1
if count == 5:
break
except:
print('Proxy queimado!')
| UTF-8 | Python | false | false | 2,937 | py | 1 | proxy.py | 1 | 0.327981 | 0.323881 | 0 | 64 | 44.734375 | 92 |
osamascience96/Object-Oriented-Programms | 11,793,980,234,873 | 4d56d6a13a4426e8152059d2d184576b0f89d1f6 | 991d59be2758b7d5e0fb59debfcf31b3aed9bbe8 | /Python/Inheritance in Python (Overriding Example).py | ff7badc0f60a90d3e0c76e92c719f51102f17a0b | []
| no_license | https://github.com/osamascience96/Object-Oriented-Programms | d20bbd29cb550e0100ddab48be1ff4ee66d4cea6 | ddcad59a75897dd0d3eb48694b67a98d9feab85c | refs/heads/master | 2020-05-14T13:53:37.064642 | 2019-06-01T09:56:07 | 2019-06-01T09:56:07 | 181,823,010 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Parent Class
class Parent:
# member fuction
def print_last_name(self):
print('Ahmed')
# Child Class
class Child(Parent):
# member function
def print_first_name(self):
print('Osama')
# overriding the function in the parent class
def print_last_name(self):
print('Rasheed')
# instance variable for child class
osama = Child()
osama.print_first_name()
osama.print_last_name()
| UTF-8 | Python | false | false | 427 | py | 24 | Inheritance in Python (Overriding Example).py | 14 | 0.65808 | 0.65808 | 0 | 18 | 22.722222 | 50 |
YousefbnK/Classrooms | 5,987,184,443,350 | 57b6841ac05673dceadfd7f8a79f6e7afa70b04d | 44995217d756a080d7184a67e8d675ba21b39220 | /classes/models.py | df4da85b8e64ab8afd8c1dcd03c3bfb5f8e87fe5 | []
| no_license | https://github.com/YousefbnK/Classrooms | 291448f0d0f87d81af7d368def8d8d2be11ccc22 | d2dc3259f1c96070993552368b24fd6d8d80f07c | refs/heads/master | 2020-12-19T03:00:15.552269 | 2020-01-23T15:28:00 | 2020-01-23T15:28:00 | 235,600,716 | 0 | 0 | null | true | 2020-01-22T15:18:54 | 2020-01-22T15:18:53 | 2020-01-15T12:02:35 | 2020-01-15T12:02:32 | 29 | 0 | 0 | 0 | null | false | false | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
from datetime import date
class Classroom(models.Model):
name = models.CharField(max_length=120)
subject = models.CharField(max_length=120)
year = models.IntegerField()
teacher = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('classroom-detail', kwargs={'classroom_id':self.id})
class Student(models.Model):
name = models.CharField(max_length=120)
date_of_birth = models.DateField(max_length = 10)
gender_choices = (
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
)
gender = models.CharField(max_length = 12, choices = gender_choices)
exam_grade = models.CharField(max_length=4)
classroom = models.ForeignKey(Classroom, on_delete=models.CASCADE)
class Meta:
ordering = ['name', 'exam_grade']
def __str__(self):
return self.name
| UTF-8 | Python | false | false | 954 | py | 3 | models.py | 1 | 0.715933 | 0.701258 | 0 | 33 | 27.878788 | 69 |
letfoolsdie/algorithm_specialization | 10,900,627,003,607 | d9fd1edd26f2054659b0352b04405bb1d39db68a | 89e003f6ad5051695ca9eac7e2886379cfcfb29b | /w2/fibonacci/fib.py | 1b78856f6d1f60c5110d824d99d70c349944f033 | []
| no_license | https://github.com/letfoolsdie/algorithm_specialization | 0a85ce41610ab946b07a3488d9e4ba93bb3b3004 | b4bbb930a281b0a4138acd17ce0f9f840d361f68 | refs/heads/master | 2021-01-10T08:39:31.426310 | 2016-09-08T21:36:02 | 2016-09-08T21:36:02 | 54,419,391 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Uses python3
#def calc_fib(n):
# if (n <= 1):
# return n
#
# return calc_fib(n - 1) + calc_fib(n - 2)
def calc_fib(n):
if n == 0:
return 0
if n <= 2:
return 1
d1 = 1
d2 = 1
for i in range(2,n):
d3 = d1+d2
d1 = d2
d2 = d3
return d3
n = int(input())
print(calc_fib(n))
| UTF-8 | Python | false | false | 349 | py | 15 | fib.py | 15 | 0.438395 | 0.378223 | 0 | 23 | 14.173913 | 45 |
GKCY/Leetcode | 5,334,349,388,807 | 271e14e53b00a6cc316293fd3daa6103a71172f4 | c10cebbe003de5a905873f032d9ec79b5c8b67d3 | /480.二叉树的所有路径(Binary Tree Paths).py | 51e70e1873e57b6257642536d5465cb8b991e356 | []
| no_license | https://github.com/GKCY/Leetcode | a6aba3ef4e9f96d54a17e592eb5270483c8ff450 | 2630d49aeaa04c2bcb617f9bc1183d2a29eeedb9 | refs/heads/master | 2021-01-24T01:35:47.932055 | 2017-03-27T14:00:04 | 2017-03-27T14:00:04 | 68,601,897 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
# @param {TreeNode} root the root of the binary tree
# @return {List[str]} all root-to-leaf paths
def binaryTreePaths(self, root):
def find(root, path, result):
if path == '':
path = str(root.val)
else:
path = path + '->'+ str(root.val)
if root.left:
find(root.left, path, result)
if root.right:
find(root.right, path, result)
if root.right == None and root.left == None:
result.append(path)
# Write your code here
res = []
path = ''
if root:
find(root, path, res)
return res
| UTF-8 | Python | false | false | 857 | py | 40 | 480.二叉树的所有路径(Binary Tree Paths).py | 28 | 0.490082 | 0.490082 | 0 | 29 | 28.37931 | 56 |
Kaper156/call_recognizer | 18,193,481,501,242 | 3513350d49dfa2e64a36ca8ce4c0f7b9722bfa1f | 07b62ec4e6cb37427d38e0a419023bd3dda98e24 | /run.py | 47bfb1f8819024cde41598c0572a9e33bc800e94 | []
| no_license | https://github.com/Kaper156/call_recognizer | aa6286870af4f5dec683b04f6624ebaef4a9e42f | 5788c4be5e957ecadbc12aad7b9be48702cf0733 | refs/heads/master | 2022-12-13T05:12:28.318777 | 2020-08-28T13:35:02 | 2020-08-28T13:35:02 | 290,432,799 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from recognizer.api import ApiClient
from recognizer.cli import parse_args
from recognizer.config import Config
from recognizer.database import update_or_insert_phone_call
from recognizer.helpers import remove_file, get_wav_last_modify_date_time, format_phone_call_to_log
def main(args):
# Load config (API and DB configuration)
config = Config('configuration1.ini')
# Get result logger
result_logger = logging.getLogger('result')
# Init API
api = ApiClient(**config.get_api_credentials()) # , stubs_filepath='./tests/files/stubs.txt')
# Get response from API and recognize by stage
api_response = api.recognize_wav(args.filepath, args.stage)
# Get datetime of last modify input wav file (used as call-datetime)
call_date_time = get_wav_last_modify_date_time(args.filepath)
phone_call_values = {
'date_time': call_date_time,
'stage_number': api_response['stage_number'],
'answer': api_response['answer'],
'phone_number': args.phone,
'duration': api_response['duration'],
'transcription': api_response['transcription']
}
# Save result to log file
result_logger.info(format_phone_call_to_log(**phone_call_values))
# If need saving to DB
if args.db:
# Import controller only if need save data to DB
from recognizer.database import PostgresDatabaseController
# Init DB controller
db = PostgresDatabaseController(**config.get_db_config())
with db as session:
# Insert or change instance of phone call
update_or_insert_phone_call(session=session, **phone_call_values,
project_name=args.project_name,
server_name=args.server_name, server_ip=args.server_ip)
# Remove file when work with him completed
remove_file(args.filepath)
if __name__ == '__main__':
import sys
# Parse command line arguments
args = parse_args(sys.argv[1:])
# Run main algorithm
main(args)
| UTF-8 | Python | false | false | 2,065 | py | 21 | run.py | 17 | 0.656659 | 0.65569 | 0 | 58 | 34.603448 | 99 |
tu95ctv/rnoc2 | 15,607,911,160,276 | 82c9eb65aa7a35e8d79dbe8f6e83255e42cd64aa | 8044de4715ec7ac86d1fab984fd807a37a967d85 | /rnoc/migrations/0007_auto_20160608_2339.py | 92622c1dbf432de09bda90a2abdb51f3c15cb157 | []
| no_license | https://github.com/tu95ctv/rnoc2 | a8a70b2e214d29465ad89c1e0799d649c5244a2a | 53e4aeb7c052490b3379c16d92470156b6b25f03 | refs/heads/master | 2020-12-04T03:15:18.013065 | 2016-09-15T07:14:50 | 2016-09-15T07:14:50 | 67,463,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('rnoc', '0006_auto_20160602_1120'),
]
operations = [
migrations.AddField(
model_name='bscrnc',
name='so_luong_tram',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='doitac',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 521330), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='duan',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 522658), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='faultlibrary',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 531806), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='lenh',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 532925), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='mll',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 544259), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='nguyennhan',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 16, 39, 28, 526562, tzinfo=utc), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='suco',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 16, 39, 28, 525396, tzinfo=utc), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='thaotaclienquan',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 530434), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='thietbi',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 520083), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='tram',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 16, 39, 28, 539834, tzinfo=utc), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
migrations.AlterField(
model_name='tram',
name='tinh',
field=models.ForeignKey(related_name='tinh_dot_tram_set', verbose_name='T\u1ec9nh', blank=True, to='rnoc.Tinh', null=True),
),
migrations.AlterField(
model_name='trangthai',
name='ngay_gio_tao',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 8, 23, 39, 28, 528635), verbose_name='Ng\xe0y gi\u1edd t\u1ea1o', blank=True),
),
]
| UTF-8 | Python | false | false | 3,646 | py | 17 | 0007_auto_20160608_2339.py | 15 | 0.593801 | 0.521942 | 0.003017 | 82 | 43.463415 | 164 |
NateWeiler/Resources | 15,522,011,838,930 | f7c8478c68eadfcc6d6b3d8eb70d082ab1e7405a | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Pygame Tutorials/shmup/shmup.py | 7a6d12b35b80098f44aa6d715aead0d9e5d4eeb6 | []
| no_license | https://github.com/NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | false | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | 2021-12-09T12:39:04 | 2022-09-08T15:20:18 | 2,434,051 | 1 | 0 | 32 | null | false | false | version https://git-lfs.github.com/spec/v1
oid sha256:c08dac996effe1661c1c112865ad37866b4ea4f2b66386fe671ae000bfab2abe
size 13308
| UTF-8 | Python | false | false | 130 | py | 36,207 | shmup.py | 16,386 | 0.884615 | 0.538462 | 0 | 3 | 42.333333 | 75 |
j0shuasm1th/sportsref | 858,993,481,293 | ce516b5d2bff3afd52a82620f52f7a44390b413c | 8ec0e80af3a30f36e9faebe5d37ee86b6b9f51c0 | /sportsref/ncaaf/players.py | 72e45b348fb40029ce35f126edbb240ca03ebcf3 | []
| no_license | https://github.com/j0shuasm1th/sportsref | 216f8f495736a57c1631dabc8978ea64eb75e08d | 62df006cd699151ec77e4c4096e39947a99b6aef | refs/heads/master | 2021-01-20T09:07:31.561197 | 2016-07-25T02:00:41 | 2016-07-25T02:00:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
import re
import urlparse
import numpy as np
import pandas as pd
from pyquery import PyQuery as pq
import sportsref
__all__ = [
'Player',
]
yr = datetime.datetime.now().year
@sportsref.decorators.memoized
class Player:
def __init__(self, playerID):
self.pID = playerID
self.mainURL = (sportsref.ncaaf.BASE_URL +
'/players/{0}.html'.format(self.pID))
def __eq__(self, other):
return self.pID == other.pID
def __hash__(self):
return hash(self.pID)
@sportsref.decorators.memoized
def getDoc(self):
doc = pq(sportsref.utils.getHTML(self.mainURL))
return doc
@sportsref.decorators.memoized
def name(self):
doc = self.getDoc()
name = doc('div#info_box h1:first').text()
return name
@sportsref.decorators.memoized
def position(self):
doc = self.getDoc()
rawText = (doc('div#info_box p')
.filter(lambda i,e: 'Position' in e.text_content())
.text())
rawPos = re.search(r'Position: (\S+)', rawText, re.I).group(1)
allPositions = rawPos.split('-')
# TODO: right now, returning just the primary position for those with
# multiple positions
return allPositions[0]
@sportsref.decorators.memoized
def height(self):
doc = self.getDoc()
try:
rawText = (doc('div#info_box p')
.filter(
lambda i,e: 'height:' in e.text_content().lower()
).text())
rawHeight = (re.search(r'Height: (\d\-\d{1,2})', rawText, re.I)
.group(1))
except AttributeError:
return np.nan
feet, inches = map(int, rawHeight.split('-'))
return feet*12 + inches
@sportsref.decorators.memoized
def weight(self):
doc = self.getDoc()
try:
rawText = (doc('div#info_box p')
.filter(lambda i,e: 'Weight:' in e.text_content())
.text())
rawWeight = re.search(r'Weight: (\S+)', rawText, re.I).group(1)
except AttributeError:
return np.nan
return int(rawWeight)
@sportsref.decorators.memoized
def draftPick(self):
doc = self.getDoc()
rawDraft = doc('div#info_box > p:contains("Draft")').text()
m = re.search(r'Draft:.*?, (\d+).*?overall.*', rawDraft, re.I)
# if not drafted or taken in supplemental draft, return NaN
if not m:
return np.nan
else:
return int(m.group(1))
@sportsref.decorators.memoized
def draftClass(self):
doc = self.getDoc()
rawDraft = doc('div#info_box > p:contains("Draft")').text()
m = re.search(r'Draft:.*?of the (\d+) NFL', rawDraft, re.I)
# if not drafted or taken in supplemental draft, return NaN
if not m:
return np.nan
else:
return int(m.group(1))
@sportsref.decorators.memoized
def draftTeam(self):
doc = self.getDoc()
rawDraft = doc('div#info_box > p:contains("Draft")')
draftStr = sportsref.utils.flattenLinks(rawDraft)
m = re.search(r'by the (\w{3})', draftStr)
if not m:
return np.nan
else:
return m.group(1)
@sportsref.decorators.memoized
def college(self):
"""Gets the last college that the player played for."""
doc = self.getDoc()
aTag = doc('div#info_box > p:first a:last')
college = sportsref.utils.relURLToID(aTag.attr['href'])
return college
# TODO: scrape player features that will be used for analysis
# ex: pass/rush/rec/def season/career stats + awards
# after that, get college-level and conference-level features
@sportsref.decorators.memoized
@sportsref.decorators.kindRPB(include_type=True)
def gamelog(self, kind='R', year=None):
"""Gets the career gamelog of the given player.
:kind: One of 'R', 'P', or 'B' (for regular season, playoffs, or both).
Case-insensitive; defaults to 'R'.
:year: The year for which the gamelog should be returned; if None,
return entire career gamelog. Defaults to None.
:returns: A DataFrame with the player's career gamelog.
"""
url = urlparse.urljoin(
sportsref.nfl.BASE_URL, '/players/{0[0]}/{0}/gamelog'
).format(self.pID)
doc = pq(sportsref.utils.getHTML(url))
table = doc('#stats') if kind == 'R' else doc('#stats_playoffs')
df = sportsref.utils.parseTable(table)
if year is not None:
df = df.query('year == @year')
return df
@sportsref.decorators.memoized
@sportsref.decorators.kindRPB(include_type=True)
def passing(self, kind='R'):
"""Gets yearly passing stats for the player.
:kind: One of 'R', 'P', or 'B'. Case-insensitive; defaults to 'R'.
:returns: Pandas DataFrame with passing stats.
"""
doc = self.getDoc()
table = doc('#passing') if kind == 'R' else doc('#passing_playoffs')
df = sportsref.utils.parseTable(table)
return df
# TODO: differentiate regular season and playoffs
@sportsref.decorators.memoized
def rushing_and_receiving(self):
doc = self.getDoc()
table = doc('#rushing_and_receiving')
df = sportsref.utils.parseTable(table)
return df
| UTF-8 | Python | false | false | 5,520 | py | 14 | players.py | 13 | 0.575181 | 0.572101 | 0 | 165 | 32.454545 | 79 |
ianjward/Distributed-FileShare-App | 18,219,251,292,036 | 53995abfc603671727ca3efc1ffa3344070a2a09 | 62bb6c4b1819550078a7b1d783e8bfa796674d4c | /src/launch_backend.py | 6a087a3184c223ca4360a77e28192a1549674a9c | []
| no_license | https://github.com/ianjward/Distributed-FileShare-App | fce4979b4d32db85b0d8297e9decd47dc85ea05d | 5c1d812a79815782bc6333ab99b2df2521ceddd0 | refs/heads/master | 2022-11-11T10:58:03.319282 | 2020-05-08T16:29:09 | 2020-05-08T16:29:09 | 240,789,805 | 0 | 0 | null | false | 2022-11-04T19:31:54 | 2020-02-15T21:24:12 | 2020-05-08T16:29:19 | 2022-11-04T19:31:50 | 173,959 | 0 | 0 | 1 | Python | false | false | from twisted.internet import reactor
import src.network_node_types.broadcast_node as broadcast
import src
if __name__ == '__main__':
broadcast.search_for(["ians_share"])
reactor.run()
| UTF-8 | Python | false | false | 195 | py | 15 | launch_backend.py | 14 | 0.702564 | 0.702564 | 0 | 8 | 23.25 | 57 |
Ali-BigData/HHH | 764,504,200,972 | 408dca2a73d3db7d85ff4f4615dbb2033775d9e9 | 46835ccd24e9938f4c0aed11ec8cf8e1717757d1 | /request.py | 86d98cbba57bf7cacc07ea7584fbb8ab437b28c3 | []
| no_license | https://github.com/Ali-BigData/HHH | b78ccb4b19b6efbb14e9307c30d60ae929bcc409 | d7cb12e0eb25b3ca0a50fff7707c84a5f3efd72b | refs/heads/main | 2023-04-19T09:11:47.747643 | 2021-05-02T12:30:57 | 2021-05-02T12:30:57 | 363,645,720 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'Cement':12, 'Big-Gravel':29,
'Flyash':6,'Water':44,'Superplasticizer':12,'Small-Gravel':37,'Sand':8,'Days':5})
print(r.json())
| UTF-8 | Python | false | false | 219 | py | 2 | request.py | 2 | 0.675799 | 0.598174 | 0 | 7 | 30 | 81 |
Prodinal/GateSpacyWrapping | 18,193,481,478,922 | ee69c3e861948f14d8f03488a69526a9000809e0 | 26c103061ad3bd8c3065d4a3e84b52bca964dfb5 | /SpacyHu/SpacyHu/DependencyParser.py | c1e7303f9d81bd7bad7342a7886270736b3b8106 | [
"MIT"
]
| permissive | https://github.com/Prodinal/GateSpacyWrapping | b7330d271722e4eabe7b8edcc7b15f921322f2e4 | 046c415eb22ce7c2cc4aaca904410f852e993974 | refs/heads/master | 2020-04-10T00:53:14.594375 | 2018-12-06T16:03:51 | 2018-12-06T16:03:51 | 160,698,922 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import spacy
from spacy.tokens import Doc, Span, Token
import urllib
import xml.etree.ElementTree as ET
import re
from SpacyHu.BaseSpacyHuComponent import BaseSpacyHuComponent
class DependencyParser(BaseSpacyHuComponent):
def __init__(self,
nlp,
label='DepParser',
url='http://localhost:8000/process?run='):
necessary_modules = ['QT', 'ML3-PosLem-hfstcode', 'ML3-Dep']
super().__init__(nlp, label, url, necessary_modules)
Token.set_extension('dep_type', default='')
def get_token_by_idx(self, idx, doc):
for token in doc:
if token.idx == idx:
return token
def get_value_from_annotation(self, annotation, attr_name):
for child in annotation.getchildren():
if child.find('Name').text == attr_name:
return child.find('Value').text
def __call__(self, doc):
text = urllib.parse.quote_plus(doc.text)
result = urllib.request.urlopen(self.url + text).read()
annotationset = ET.fromstring(result).find('AnnotationSet')
for annotation in annotationset.getchildren():
if annotation.get('Type') != 'Token':
continue
word_index = int(annotation.get('StartNode'))
token = self.get_token_by_idx(word_index, doc)
# Setting head
deptarget = self.get_value_from_annotation(annotation, 'depTarget')
if deptarget is None:
continue
target_token_idx = None
for i in annotationset.getchildren():
if i.get('Id') == deptarget:
target_token_idx = int(i.get('StartNode'))
break
# Setting depType
deptype = self.get_value_from_annotation(annotation, 'depType')
if deptype is None:
raise Exception('This should not have happened, if'
'deptarget is present so should depType be')
token.head = self.get_token_by_idx(target_token_idx, doc)
token._.dep_type = deptype
# token.dep = deptype # needs to conver string to int
# https://github.com/explosion/spaCy/blob/master/spacy/symbols.pyx
return doc
if __name__ == '__main__':
from Tokenizer import HuTokenizer
debug_text = u'Autonóm autók hárítják a biztosítás terhét gyártók felé'
# debug_text = 'megszentségteleníthetetlenségeitekért meghalnak'
remote_url = 'http://hlt.bme.hu/chatbot/gate/process?run='
nlp = spacy.blank('en')
nlp.tokenizer = HuTokenizer(nlp.vocab, url=remote_url)
dependeny_parser = DependencyParser(nlp, url=remote_url)
nlp.add_pipe(dependeny_parser, last=True)
doc = nlp(debug_text)
for token in doc:
print('Token is: ' + token.text)
print('Head is: ' + token.head.text)
print('DepType is: ' + token._.dep_type)
print()
| UTF-8 | Python | false | false | 2,983 | py | 18 | DependencyParser.py | 13 | 0.595013 | 0.592992 | 0 | 81 | 35.641975 | 79 |
marmibv/yuzu-trade-bot | 18,098,992,186,836 | 6ebc277bdee03c9828da8d5828af6a97f0b94647 | 6a6f5f02b7127124fe32c40b4958394cd8cff985 | /archive/macdas_strat.py | 51f443b2ff64bc8edfcea8913689acac2cd0c162 | [
"Apache-2.0"
]
| permissive | https://github.com/marmibv/yuzu-trade-bot | 608cdf79b3f9690072c29edec29225028d700080 | 34c67d7a19a9d51a7478050ada6286d97ffa9910 | refs/heads/main | 2023-08-26T18:24:45.669570 | 2021-10-18T18:02:40 | 2021-10-18T18:02:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ta.trend import EMAIndicator
from ta.momentum import RSIIndicator
from pandas import DataFrame, Series
EMA = lambda close, len: EMAIndicator(close, len).ema_indicator()
xup = lambda left, right=0: (left.shift() < (right.shift() if isinstance(right, Series) else right)) & (left > right)
xdn = lambda left, right=0: (left.shift() > (right.shift() if isinstance(right, Series) else right)) & (left < right)
min_ticks = 44
def strategy(
data: DataFrame,
config: dict
) -> DataFrame:
data['buy_rsi'] = RSIIndicator(data.close, config['buy_rsi_len']).rsi()
data['buy_fast'] = EMA(data.close, config['buy_fast_len'])
data['buy_slow'] = EMA(data.close, config['buy_slow_len'])
macd = data['buy_fast'] - data['buy_slow']
signal = EMA(macd, config['buy_sig_len'])
macdas = macd - signal
sigdas = EMA(macdas, config['buy_sig_len'])
data['buy_hist'] = macdas - sigdas
data['sell_rsi'] = RSIIndicator(data.close, config['sell_rsi_len']).rsi()
data['sell_fast'] = EMA(data.close, config['sell_fast_len'])
data['sell_slow'] = EMA(data.close, config['sell_slow_len'])
macd = data['sell_fast'] - data['sell_slow']
signal = EMA(macd, config['sell_sig_len'])
macdas = macd - signal
sigdas = EMA(macdas, config['sell_sig_len'])
data['sell_hist'] = macdas - sigdas
data.loc[((data['buy_hist'] > 0) & (data['buy_rsi'] < config['rsi_lb'])), 'buy'] = data.close
data.loc[((data['sell_hist'] < 0) & (data['sell_rsi'] > config['rsi_ub'])), 'sell'] = data.close
return data
configs = {
'1h': {'buy_slow_len': 30, 'buy_fast_len': 11, 'buy_sig_len': 8, 'buy_rsi_len': 6, 'sell_slow_len': 41, 'sell_fast_len': 5, 'sell_sig_len': 3, 'sell_rsi_len': 10, 'rsi_lb': 39.9, 'rsi_ub': 31.9},
'1m': {'buy_slow_len': 37, 'buy_fast_len': 6, 'buy_sig_len': 3, 'buy_rsi_len': 7, 'sell_slow_len': 28, 'sell_fast_len': 11, 'sell_sig_len': 7, 'sell_rsi_len': 36, 'rsi_lb': 18.0, 'rsi_ub': 30.0, 'stop_limit_buy': 0.0029, 'stop_limit_sell': 0.0020, 'stop_limit_loss': 0.0081}
}
config_range = {
'buy_slow_len': [25,50],
'buy_fast_len': [5,24],
'buy_sig_len': [1,12],
'buy_rsi_len': [5,50],
'sell_slow_len': [25,50],
'sell_fast_len': [5,24],
'sell_sig_len': [1,12],
'sell_rsi_len': [5,50],
'rsi_lb': [0.0,70.0],
'rsi_ub': [30.0,100.0]
} | UTF-8 | Python | false | false | 2,335 | py | 34 | macdas_strat.py | 28 | 0.599143 | 0.557602 | 0 | 53 | 43.075472 | 279 |
jgericke/nd064_course_1 | 5,703,716,619,237 | a75e755cdae19523b7a26e6bcba30b83d5e350fc | 9c44d6e7d47fcb29a761bd4fa9ff804aade93941 | /project/techtrends/app.py | 0be9ffab3ee6060e21c9068c8d45c717cd371559 | []
| no_license | https://github.com/jgericke/nd064_course_1 | b288de8810857e10cf5caf36b5cf97639de3682a | 0ebcca18ecc2d1ba92072f4ef225f8c4ca5a5f42 | refs/heads/main | 2023-08-28T08:18:28.248453 | 2021-11-14T05:00:54 | 2021-11-14T05:00:54 | 427,581,861 | 0 | 0 | null | true | 2021-11-13T05:54:40 | 2021-11-13T05:54:39 | 2021-11-12T11:37:42 | 2021-11-12T06:22:34 | 19 | 0 | 0 | 0 | null | false | false | import sqlite3
import functools
import logging
import sys
from flask import (
Flask,
jsonify,
json,
render_template,
request,
url_for,
redirect,
flash,
make_response,
)
from werkzeug.exceptions import abort
# Metrics wrapper for counting (read) connections to database
def metrics_collect(func):
# Stores our global database read count
metrics_collect.read_counter = 0
# Wrap with functools to preserve function parameters
@functools.wraps(func)
def select_counter(*args, **kwargs):
# Increment read count when a (decorated) function is called
metrics_collect.read_counter += 1
return func(*args, **kwargs)
return select_counter
# Function to get a database connection.
# This function connects to database with the name `database.db`
def get_db_connection():
connection = sqlite3.connect("database.db")
connection.row_factory = sqlite3.Row
return connection
# Function to get a post using its ID
def get_post(post_id):
connection = get_db_connection()
post = connection.execute("SELECT * FROM posts WHERE id = ?", (post_id,)).fetchone()
connection.close()
return post
# Define the Flask application
app = Flask(__name__)
app.config["SECRET_KEY"] = "your secret key"
# Define the main route of the web application
@app.route("/")
@metrics_collect
def index():
connection = get_db_connection()
posts = connection.execute("SELECT * FROM posts").fetchall()
connection.close()
return render_template("index.html", posts=posts)
# Define how each individual article is rendered
# If the post ID is not found a 404 page is shown
@app.route("/<int:post_id>")
@metrics_collect
def post(post_id):
post = get_post(post_id)
if post is None:
app.logger.debug('Article id "{}" not found'.format(post_id))
return render_template("404.html"), 404
else:
# Log article title
app.logger.debug('Article "{}" retrieved!'.format(post["title"]))
return render_template("post.html", post=post)
# Define the About Us page
@app.route("/about")
def about():
app.logger.debug("About Us retrieved!")
return render_template("about.html")
# Define the post creation functionality
@app.route("/create", methods=("GET", "POST"))
def create():
if request.method == "POST":
title = request.form["title"]
content = request.form["content"]
if not title:
flash("Title is required!")
else:
connection = get_db_connection()
connection.execute(
"INSERT INTO posts (title, content) VALUES (?, ?)", (title, content)
)
connection.commit()
connection.close()
app.logger.debug('New article "{}" created!'.format(title))
return redirect(url_for("index"))
return render_template("create.html")
# Healthcheck endpoint
@app.route("/healthz", endpoint="healthcheck")
def healthcheck():
healthcheck_resp = make_response(jsonify(result="OK - healthy"), 200)
healthcheck_resp.mimetype = "application/json"
return healthcheck_resp
# Metrics endpoint
@app.route("/metrics", endpoint="metrics")
@metrics_collect
def metrics():
try:
connection = get_db_connection()
# Retrieve total posts for post_count
post_count = connection.execute(
"SELECT COUNT(id) FROM posts as posts_count"
).fetchone()[0]
except Exception as e:
app.logger.error(
"Error occurred fetching total posts from database: {}".format(e)
)
raise
# db_connection count is the total amount of connections to the database
# counting read operations via metrics_collect.read_counter and write operations
# via sqlite's builtin 'total_changes'
# (https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.total_changes)
db_connection_count = metrics_collect.read_counter + connection.total_changes
metrics_resp = make_response(
jsonify(db_connection_count=db_connection_count, post_count=post_count),
200,
)
metrics_resp.mimetype = "application/json"
return metrics_resp
# start the application on port 3111
if __name__ == "__main__":
# Defines logging configuration
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
handlers = [stderr_handler, stdout_handler]
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s, %(message)s",
datefmt="%d/%m/%Y, %H:%M:%S",
handlers=handlers,
)
app.run(host="0.0.0.0", port="3111")
| UTF-8 | Python | false | false | 4,668 | py | 5 | app.py | 1 | 0.657669 | 0.649957 | 0 | 157 | 28.732484 | 88 |
heteroscedastic/pontoon | 12,025,908,467,461 | e61affab35a27e82859416705cecbdf4afd029fb | f6dbcb0aa824a046b983af22ea262a8d2f530b06 | /apps/pontoon/urls.py | b62b2a6f7c4cec69435a48b0428d1716feb02522 | [
"BSD-3-Clause"
]
| permissive | https://github.com/heteroscedastic/pontoon | 1b10232e9a30fd533164ad0dc47419c7b2dafd60 | 9b825b2c3d1efc48f582ec70e057baff09aaac47 | refs/heads/master | 2021-01-18T06:00:45.517270 | 2012-06-15T14:39:11 | 2012-06-15T14:39:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^$', views.home, name='pontoon.home'),
url(r'^locale/(?P<locale>[A-Za-z0-9\-\@\.]+)/url/(?P<url>\S+)/$', views.home, name='pontoon.translate'),
url(r'^checkurl/', views.check_url, name='pontoon.checkurl'),
url(r'^download/', views.download, name='pontoon.download'),
url(r'^transifex/', views.transifex, name='pontoon.transifex'),
)
| UTF-8 | Python | false | false | 449 | py | 11 | urls.py | 5 | 0.643653 | 0.639198 | 0 | 12 | 36.416667 | 108 |
projectclassB172/SE_HW_B17-2 | 3,624,952,443,198 | bc4fc4fe2c963a8e610511eb8a7d9a7902a3a5fc | b5763d1c9248319db3a4d37b13323d6d8e854417 | /homework9/Group9/hw9_1720393.py | 8cbe3ff8cab1c6ee8fe345defa87bd676fe4880c | []
| no_license | https://github.com/projectclassB172/SE_HW_B17-2 | 86a4597cfa6ca2ac87588f92570baa33a649422d | ce49c4bef937c6e9f69f9170456d284dfc1eac2d | refs/heads/master | 2021-03-17T10:56:01.253381 | 2020-06-05T04:55:25 | 2020-06-05T04:55:25 | 246,984,206 | 11 | 3 | null | false | 2020-05-13T06:17:37 | 2020-03-13T03:55:45 | 2020-05-13T06:16:47 | 2020-05-13T06:17:28 | 384 | 2 | 2 | 1 | Python | false | false | #创建数据库表;
import sqlite3
conn=sqlite3.connect('D:\\untitled\\phone.db')
print("打开数据库成功")
conn.execute('''CREATE TABLE USER
(PHONENUMBER INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL ,
ADDRESS TEXT ,
COMPANY CHAR(50));''')
print("表创建成功")
conn.close()
#运行结果:
#打开数据库成功
#表创建成功
#新增联系人;
import sqlite3
conn=sqlite3.connect('D:\\untitled\\phone.db')
print("打开数据库成功")
conn.execute("INSERT INTO USER (PHONENUMBER,NAME,ADDRESS,COMPANY) \
VALUES (11245655, '张三', '上海浦东', '阿里巴巴')")
conn.commit()
num1=conn.total_changes
print("{0} rows changed in table USER.".format(num1))
#运行结果:
#打开数据库成功
#1 rows changed in table USER.
#按姓名查询联系人详细信息;
import sqlite3
conn=sqlite3.connect('D:\\untitled\\phone.db')
print("打开数据库成功")
cursor1 = conn.execute("SELECT PHONENUMBER,ADDRESS,COMPANY from USER where NAME ='张三'")
for row in cursor1:
print("phone= ", row[0])
print("ADDRESS = ", row[1])
print("COMPANY = ", row[2])
conn.close()
#运行结果:
#phone= 11245655
#ADDRESS = 上海浦东
#OMPANY = 阿里巴巴
#删除联系人;
import sqlite3
conn=sqlite3.connect('D:\\untitled\\phone.db')
print("打开数据库成功")
conn.execute("delete from USER where NAME='张三'")
conn.commit()
print("Total number of rows updated :", conn.total_changes)
num1=conn.total_changes
print("{0} rows changed in table USER.".format(num1))
conn.close()
#运行结果:
#打开数据库成功
#Total number of rows updated : 1
#1 rows changed in table USER.
| UTF-8 | Python | false | false | 1,652 | py | 325 | hw9_1720393.py | 319 | 0.698413 | 0.669553 | 0 | 61 | 21.672131 | 87 |
izmogui/ProjectEuler | 8,804,682,979,951 | b8e0570f609f1b66c1b85eecb8fc4e19b42070e0 | e58ed9f8e0537c720e68285a4c34ff8497cf83f8 | /p13.py | 0b39652d799e9ec67b9c98f71b9aab32a8153e77 | []
| no_license | https://github.com/izmogui/ProjectEuler | 5733e2a376987fc6850c18cebfdf4a767a51389f | f1cff36021e61ee06d965abf29e420e1d762d07a | refs/heads/master | 2021-05-15T12:12:48.832259 | 2017-11-01T17:45:27 | 2017-11-01T17:45:27 | 108,414,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import numpy
import time
import sys
import math
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input_file")
args = parser.parse_args()
#millisec
start = int(round(time.time() * 1000))
original_matrix = numpy.genfromtxt(args.input_file, dtype="str", delimiter="\n")
length = original_matrix.size/10
if length == 0:
digits_per_member = len(str(sys.maxint)) - 1
else:
digits_per_member = len(str(sys.maxint)) - length
# print (sys.maxsize)
print "Chunk Digits: ", digits_per_member
parcial_sum = 0
print "Members: ", len(original_matrix[0])
rounds = int(len(original_matrix[0])/digits_per_member) + 1
first = 0
last = 0
partial_sum = 0
partial_results = [0] * (rounds)
#print(partial_results[0])
print "Rounds: ", (rounds)
for i in range(0, rounds):
first = len(original_matrix[0]) - (digits_per_member * (i+1))
last = len(original_matrix[0]) - (digits_per_member * i)
if first < 0:
first = 0
for j in range(0, original_matrix.size):
#print "\tPos [", first, "][", last, "]"
#print "\t", original_matrix[j][first:last]
partial_sum += int(original_matrix[j][first:last])
carry = int (partial_sum / math.pow(10, digits_per_member))
#print "Partial Sum = ", partial_sum, "index = ", rounds - i - 1, "partial_result = ", partial_sum - carry * math.pow(10, digits_per_member)
if first == 0:
partial_results[rounds - i - 1] = partial_sum
else:
carry = int (partial_sum / math.pow(10, digits_per_member))
partial_results[rounds - i - 1] = int(partial_sum - carry * math.pow(10, digits_per_member))
partial_sum = carry
#print "Carry = ", partial_sum
print partial_results
print "Elapsed Time: ", int(round(time.time() * 1000)) - start, "milliseconds"
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,901 | py | 5 | p13.py | 5 | 0.615992 | 0.59495 | 0 | 67 | 27.373134 | 142 |
weichen2046/IntellijPluginDevDemo | 2,937,757,667,854 | fffea169055c18421a8b565ab9d2219986dffb26 | efbc988c3963de201957da6ecbf37a7c1f4a360e | /enterprise-repo/enterprepo/pluginrepo/views.py | 90f59a56bd8c8cc400d4b67a72aa5ece7bb09e49 | [
"Apache-2.0"
]
| permissive | https://github.com/weichen2046/IntellijPluginDevDemo | 1f49efb26c360ab9b4bd6e7dc2cededeb3711d4f | 433c11b439dbfc4911684ae9d7d6419aac362533 | refs/heads/master | 2021-01-22T11:03:47.354267 | 2017-09-25T23:13:23 | 2017-09-25T23:13:23 | 102,345,253 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from models import Plugin
# Create your views here.
def index(request):
context = {
'plugins': Plugin.objects.all()
}
return render(request, "pluginrepo/updatePlugins.xml", context, content_type="text/xml") | UTF-8 | Python | false | false | 329 | py | 79 | views.py | 43 | 0.693009 | 0.68997 | 0 | 13 | 24.384615 | 92 |
sdlivingstone/imdying | 2,499,671,012,091 | dda2509901cd857eaf3fd2b4a614b2eb5691208c | b42666917c301d001d56a51a55cd5879c36f410a | /cipheryo.py | 0f2a51e3d820e70e3f63c04da031187f32414e39 | []
| no_license | https://github.com/sdlivingstone/imdying | 42fc2133adef384a1e107a5a9a34cce3062af2cd | 6632c44c4b79fefef4003495e99391cb072e800d | refs/heads/master | 2021-01-19T06:36:47.756132 | 2016-07-22T15:21:56 | 2016-07-22T15:21:56 | 63,963,297 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def cipher(message,shift):
encryptedmessage = "" #we will add encyption char by char to this string
for char in message:
asc =ord(char)
if asc >= 65 and asc<=91:
asc+=shift
if asc>91:
asc-= 26
encryptedmessage += chr(asc)
elif asc <=122 and asc>=97:
asc +=shift
if asc>122:
asc-=26
encryptedmessage += chr(asc)
else:
encryptedmessage += chr(asc)
print("The message", message, "was shifted by", shift)
return encryptedmessage
msg = input("Secret Message")
shft= int(input("shift"))
print(cipher(msg,shft))
| UTF-8 | Python | false | false | 755 | py | 14 | cipheryo.py | 13 | 0.482119 | 0.458278 | 0 | 22 | 30.909091 | 76 |
ppinchuk/Lux-Design-2021 | 13,443,247,663,103 | 84a3a08a9d98f2f13bc16a694249e2dc65b0588d | 1af3bd9aefaec158812399a3f2327bfff5ea30b1 | /kits/python/mediocre/mediocre_lux/game.py | 5dc4e33593bfdd2166883507d00e7cf176895f4a | [
"Apache-2.0"
]
| permissive | https://github.com/ppinchuk/Lux-Design-2021 | dd260205552c8eea2827e4434bda54eb481754ac | 8a04ad48c6749cafc9aca986f14e75daaa31c789 | refs/heads/master | 2023-09-04T22:45:28.511504 | 2021-11-18T23:40:53 | 2021-11-18T23:40:53 | 402,617,664 | 0 | 0 | Apache-2.0 | true | 2021-11-10T22:46:34 | 2021-09-03T02:03:03 | 2021-11-10T22:36:14 | 2021-11-10T22:46:33 | 17,973 | 0 | 0 | 0 | Jupyter Notebook | false | false | import getpass
from .constants import GAME_CONSTANTS, InputConstants, LogicGlobals
from .game_map import GameMap, Position
from .game_objects import Player, Unit, City, CityTile
class Game:
def __init__(self, map_id, size_str):
self.id = int(map_id)
self.turn = -1
self.turns_until_next_night = GAME_CONSTANTS["PARAMETERS"]["DAY_LENGTH"]
self.turns_until_next_day = GAME_CONSTANTS["PARAMETERS"]["CYCLE_LENGTH"]
# get some other necessary initial input
mapInfo = size_str.split(" ")
self.map_width = int(mapInfo[0])
self.map_height = int(mapInfo[1])
self.players = [Player(0), Player(1)]
self.map = None
def _end_turn(self):
print("D_FINISH")
def _reset_player_states(self):
for p in self.players:
p.reset_turn_state()
def update(self, messages, player_id):
"""
update state
"""
self.map = GameMap(self.map_width, self.map_height)
self.turn += 1
self.turns_until_next_night = max(0,
GAME_CONSTANTS["PARAMETERS"]["DAY_LENGTH"] - self.turn % GAME_CONSTANTS["PARAMETERS"]["CYCLE_LENGTH"]
)
self.turns_until_next_day = GAME_CONSTANTS["PARAMETERS"]["CYCLE_LENGTH"] - self.turn % GAME_CONSTANTS["PARAMETERS"]["CYCLE_LENGTH"]
self._reset_player_states()
if getpass.getuser() == 'Paul':
messages = sorted(messages)
for update in messages:
if update == "D_DONE":
continue
strs = update.split(" ")
input_identifier = strs[0]
if input_identifier == InputConstants.RESEARCH_POINTS:
team = int(strs[1])
self.players[team].research_points = int(strs[2])
elif input_identifier == InputConstants.RESOURCES:
r_type = strs[1]
x = int(strs[2])
y = int(strs[3])
amt = int(float(strs[4]))
self.map._setResource(r_type, x, y, amt)
elif input_identifier == InputConstants.UNITS:
unittype = int(strs[1])
team = int(strs[2])
unitid = strs[3]
x = int(strs[4])
y = int(strs[5])
cooldown = float(strs[6])
wood = int(strs[7])
coal = int(strs[8])
uranium = int(strs[9])
self.players[team].units.append(Unit(team, unittype, unitid, x, y, cooldown, wood, coal, uranium))
self.players[team].unit_pos.add(Position(x, y))
self.players[team].unit_ids.add(unitid)
elif input_identifier == InputConstants.CITY:
team = int(strs[1])
cityid = strs[2]
fuel = float(strs[3])
lightupkeep = float(strs[4])
self.players[team].cities[cityid] = City(team, cityid, fuel, lightupkeep)
self.players[team].city_ids.add(cityid)
elif input_identifier == InputConstants.CITY_TILES:
team = int(strs[1])
cityid = strs[2]
x = int(strs[3])
y = int(strs[4])
cooldown = float(strs[5])
city = self.players[team].cities[cityid]
citytile = city._add_city_tile(x, y, cooldown)
self.map.get_cell(x, y).citytile = citytile
self.players[team].city_tile_count += 1
self.players[team].city_pos.add(Position(x, y))
elif input_identifier == InputConstants.ROADS:
x = int(strs[1])
y = int(strs[2])
road = float(strs[3])
self.map.get_cell(x, y).road = road
LogicGlobals.player = LogicGlobals.game_state.players[player_id]
LogicGlobals.opponent = LogicGlobals.game_state.players[(player_id + 1) % 2]
if self.map.resource_clusters is None:
self.map.find_clusters()
self.map.update_clusters(LogicGlobals.opponent)
if self.turn == 0:
for cluster in self.map.resource_clusters:
if len(cluster.pos_defended) >= 2:
cluster.sort_position = list(LogicGlobals.player.city_pos)[0]
elif len(cluster.pos_defended) == 1:
pos = cluster.pos_defended[0]
if pos in LogicGlobals.player.city_pos:
cluster.sort_position = list(LogicGlobals.player.city_pos)[0]
else:
cluster.sort_position = pos.reflect_about(cluster.center_pos)
self.map.update_clusters(LogicGlobals.opponent)
| UTF-8 | Python | false | false | 4,709 | py | 171 | game.py | 122 | 0.538118 | 0.528775 | 0 | 109 | 42.201835 | 139 |
dattnguyen/Leetcode_exercises | 19,456,201,877,325 | ca5c9648eda34b90fdd7ad6d240af3748fd67af6 | 72ddc142197b20ea9dec88c40d46448b56aa4f1c | /290. Word Pattern.py | efcfc8979a4d16f8620e2a7dfdb8b2d297bf66c8 | []
| no_license | https://github.com/dattnguyen/Leetcode_exercises | ee6a4325b47f8a07844bbb84511b1e158edfc7ac | d74d8987f07dcfd4e02348385f88381802adb1aa | refs/heads/master | 2022-12-31T10:15:31.443277 | 2020-10-09T20:28:56 | 2020-10-09T20:28:56 | 267,236,435 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Given a pattern and a string str, find if str follows the same pattern.
# Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
def wordPattern(pattern, str):
hmap = {}
for i, char in enumerate(pattern):
if char in hmap:
hmap[char] += [i]
else:
hmap[char] = [i]
hmap2 = {}
for i, word in enumerate(str.split(' ')):
if word in hmap2:
hmap2[word] += [i]
else:
hmap2[word] = [i]
if len(hmap) != len(hmap2):
return print('False')
for key1, key2 in zip(hmap, hmap2):
if hmap[key1] != hmap2[key2]:
return print ('False')
else:
return print('True')
pattern = 'jquery'
str = 'jquery'
wordPattern(pattern, str)
#%%
def wordPattern2(pattern, str):
words = str.split(' ')
hmap = {}
if len(words) != len(pattern):
return print('False')
for i in range(len(pattern)):
if pattern[i] in hmap:
if hmap[pattern[i]] != words[i]:
return print('False')
else:
if words[i] not in hmap.values():
hmap[pattern[i]] = words[i]
else:
return print('False')
return print('True')
wordPattern2('abaa','dog dog dog dog')
#%%
def wordPattern_set(pattern, str):
| UTF-8 | Python | false | false | 1,389 | py | 100 | 290. Word Pattern.py | 99 | 0.538517 | 0.529158 | 0 | 58 | 22.948276 | 121 |
GBuella/vltrace | 8,272,107,058,594 | a18dc8b3bda5bbc43de8e9e227b2bd9c49924d21 | 1f8bb1345510e2369be49a726548d452e5f6d35c | /tools/bin2txt/listsyscalls.py | f83f2e65e981ca8d099444ddfa9465da72170d07 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/GBuella/vltrace | c2383b7e982fee79aec218e096ce26060d4c82c7 | d10b4f2d5acef9383694161555729c618b7edc5d | refs/heads/master | 2021-01-21T12:46:42.496099 | 2017-10-02T13:33:00 | 2017-10-02T13:33:00 | 91,800,396 | 0 | 0 | null | true | 2017-05-19T11:44:43 | 2017-05-19T11:44:43 | 2017-05-12T14:40:30 | 2017-05-19T11:26:34 | 869 | 0 | 0 | 0 | null | null | null | #!/usr/bin/python3
#
# Copyright (c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from syscall import *
from utils import *
########################################################################################################################
# ListSyscalls
########################################################################################################################
class ListSyscalls(list):
def __init__(self, script_mode, debug_mode, verbose_mode):
list.__init__(self)
self.log_anls = logging.getLogger("analysis")
self.script_mode = script_mode
self.debug_mode = debug_mode
self.verbose_mode = verbose_mode
self.print_progress = not (self.debug_mode or self.script_mode)
self.time0 = 0
self.cwd_table = []
####################################################################################################################
def print(self):
for syscall in self:
syscall.print()
####################################################################################################################
def print_always(self):
for syscall in self:
syscall.print_always()
####################################################################################################################
# look_for_matching_record -- look for matching record in a list of incomplete syscalls
####################################################################################################################
def look_for_matching_record(self, info_all, pid_tid, sc_id, name, retval):
for syscall in self:
check = syscall.check_read_data(info_all, pid_tid, sc_id, name, retval, DEBUG_OFF)
if check == CHECK_OK:
self.remove(syscall)
return syscall
return -1
####################################################################################################################
def log_print_path(self, is_pmem, name, path):
if is_pmem:
self.log_anls.debug("{0:20s} \"{1:s}\" [PMEM]".format(name, path))
else:
self.log_anls.debug("{0:20s} \"{1:s}\"".format(name, path))
####################################################################################################################
@staticmethod
def log_build_msg(msg, is_pmem, path):
if is_pmem:
msg += " \"{0:s}\" [PMEM]".format(path)
else:
msg += " \"{0:s}\"".format(path)
return msg
####################################################################################################################
def set_first_cwd(self, cwd):
assert_msg(len(self.cwd_table) == 0, "cwd_table is not empty")
self.cwd_table.append(cwd)
####################################################################################################################
def set_cwd(self, new_cwd, syscall):
self.cwd_table[syscall.pid_ind] = new_cwd
####################################################################################################################
def get_cwd(self, syscall):
return self.cwd_table[syscall.pid_ind]
| UTF-8 | Python | false | false | 4,741 | py | 2 | listsyscalls.py | 2 | 0.475427 | 0.47142 | 0 | 105 | 44.152381 | 120 |
jsyadav/CrawlerFramework | 13,769,665,190,787 | 976078d1c61371387a411e1653c7451f9e21af6c | 13944ac8e5b2ee9785a5264fbb4a959594fc0cd5 | /crawler/connectors/stuvuconnector.py | bb32f31d51cd8d5243f818f1f056e8964ff97ae9 | []
| no_license | https://github.com/jsyadav/CrawlerFramework | 9bdad211bf5217ed85added15b111c7e4e6cb53f | dbd14efb81b28be6340dfd00df9d31cc6a290b08 | refs/heads/master | 2021-05-02T14:27:19.610279 | 2017-05-24T13:45:38 | 2017-05-24T13:45:38 | 54,789,643 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
from BeautifulSoup import BeautifulSoup
from datetime import datetime
import logging
from urllib2 import urlparse,unquote
import copy
from baseconnector import BaseConnector
from utils.utils import stripHtml,get_hash
from utils.urlnorm import normalize
from utils.decorators import logit
from utils.sessioninfomanager import checkSessionInfo, updateSessionInfo
log = logging. getLogger("stuvuConnector")
class StuvuConnector(BaseConnector):
@logit(log,'fetch')
def fetch(self):
self.genre = "Review"
try:
self.parent_uri = self.currenturi
if not self._setSoup():
log.info(self.log_msg("Soup not set,returning false"))
return False
if not self._getParentPage():
log.info(self.log_msg("Parent page not found"))
self._addReviews()
return True
except:
log.exception(self.log_msg("Exception in fetch"))
return False
@logit(log,'getParentPage')
def _getParentPage(self):
page = {}
page['uri'] = self.currenturi
page['title'] = stripHtml(str(self.soup.find('div','header_text_left').find('h1').find('a').renderContents()))
try:
if checkSessionInfo(self.genre, self.session_info_out, self.parent_uri, \
self.task.instance_data.get('update')):
log.info(self.log_msg('Session infor return True'))
return False
post_hash = get_hash(page)
id=None
if self.session_info_out=={}:
id = self.task.id
result = updateSessionInfo(self.genre, self.session_info_out,self.parent_uri, post_hash,'Post',self.task.instance_data.get('update'), Id=id)
if not result['updated']:
return False
page['path'] = [self.parent_uri]
page['parent_path'] = []
page['uri'] = normalize(self.parent_uri)
page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])
page['priority'] = self.task.priority
page['level'] = self.task.level
page['pickup_date'] = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")
page['posted_date'] = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")
page['connector_instance_log_id'] = self.task.connector_instance_log_id
page['connector_instance_id'] = self.task.connector_instance_id
page['workspace_id'] = self.task.workspace_id
page['client_id'] = self.task.client_id
page['client_name'] = self.task.client_name
page['last_updated_time'] = page['pickup_date']
page['versioned'] = False
page['data'] = ''
page['task_log_id'] = self.task.id
page['entity'] = 'Post'
page['category'] = self.task.instance_data.get('category','')
self.updateParentExtractedEntities(page)
log.info(self.parent_extracted_entites)
self.pages.append(page)
log.info(self.log_msg('Parent Page added'))
log.info(page)
return True
except Exception,e:
log.exception(self.log_msg("parent post couldn't be parsed"))
return False
@logit(log,'addReviews')
def _addReviews(self):
try:
reviews = self.soup.findAll('div','picbyrow')
log.info(self.log_msg('no of reviews is %s'%len(reviews)))
if not reviews:
return False
except:
log.exception(self.log_msg('No Reviews are found'))
return False
for review in reviews:
page={}
try:
page['title'] = stripHtml(review.find('div','pictext reviewtext').find('p').find('a').renderContents())
except:
page['title'] = ''
log.exception(self.log_msg("Title not found!!"))
try:
page['et_author_name'] = stripHtml(str(review.find('div','picinfo').find('p').find('a').renderContents()))
except:
log.exception(self.log_msg("Author not mentioned!!"))
try:
main_page_soup = copy.copy(self.soup)
main_page_uri = self.currenturi
self.currenturi = 'http://www.stuvu.com'+ review.find('div','pictext reviewtext').find('a')['href']
if self._setSoup():
page['data'] = stripHtml(str(self.soup.find('div','review_content').findAll('p')))
page['uri']=self.currenturi
except:
log.exception(self.log_msg("Next page couldn be parsed!!"))
try:
self.soup = copy.copy(main_page_soup)
self.currenturi = main_page_uri
self.currenturi = 'http://www.stuvu.com'+ review.find('div','picinfo').find('a')['href']
if self._setSoup():
type = self.soup.findAll('div','profilecontent')
page['et_Student_type'] = stripHtml(str(type[0]))
page['et_class_of'] = stripHtml(str(type[1]))
except:
log.exception(self.log_msg("No author details found!!"))
try:
log.info(page)
review_hash = get_hash(page)
unique_key = get_hash({'data':page['data'],'title':page['title']})
if checkSessionInfo(self.genre, self.session_info_out, review_hash,\
self.task.instance_data.get('update'),parent_list\
=[self.parent_uri]):
log.info(self.log_msg('session info return True'))
result = updateSessionInfo(self.genre, self.session_info_out, review_hash, \
review_hash,'Review', self.task.instance_data.get('update'),\
parent_list=[self.parent_uri])
if not result['updated']:
log.info(self.log_msg('result not updated'))
parent_list = [self.parent_uri]
page['parent_path'] = copy.copy(parent_list)
parent_list.append(unique_key)
page['path'] = parent_list
page['priority'] = self.task.priority
page['level'] = self.task.level
page['pickup_date'] = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")
page['posted_date'] = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")
page['connector_instance_log_id'] = self.task.connector_instance_log_id
page['connector_instance_id'] = self.task.connector_instance_id
page['workspace_id'] = self.task.workspace_id
page['client_id'] = self.task.client_id
page['client_name'] = self.task.client_name
page['last_updated_time'] = page['pickup_date']
page['versioned'] = False
page['entity'] = 'Review'
page['category'] = self.task.instance_data.get('category','')
page['task_log_id'] = self.task.id
page['uri_domain'] = urlparse.urlparse(page['uri'])[1]
self.pages.append(page)
log.info(page) # To do, remove this
log.info(self.log_msg('Review Added'))
except:
log.exception(self.log_msg('Error while adding session info'))
@logit(log,'setSoup')
def _setSoup(self, url=None, data=None, headers={}):
"""
It will set the uri to current page, written in seperate
method so as to avoid code redundancy
"""
if url:
self.currenturi = url
try:
log.info(self.log_msg( 'for uri %s' %(self.currenturi) ))
res = self._getHTML(data=data, headers=headers)
if res:
self.rawpage = res['result']
else:
log.info(self.log_msg('self.rawpage not set.... so Sorry..'))
return False
self._setCurrentPage()
return True
except Exception, e:
log.exception(self.log_msg('Page not for :%s' %uri))
raise e
| UTF-8 | Python | false | false | 8,551 | py | 269 | stuvuconnector.py | 260 | 0.525786 | 0.525085 | 0 | 186 | 44.876344 | 152 |
chiggs/cocotb | 12,859,132,115,430 | 785dcb1c40b9f1696558d190ae3840c4dcb2c9bf | 76bb4052aa3eb6e5a396d9570b4e26ec7bfd5a77 | /bin/combine_results.py | f8cf78ac088e05419d3af5edde70da05564eb78f | [
"BSD-3-Clause"
]
| permissive | https://github.com/chiggs/cocotb | 54f251ef589e87a34a40e395dde66930496bda54 | 9c89c4ffa5b1ba38859dd5ef49a2417e41ad938a | refs/heads/master | 2021-01-18T05:17:57.016180 | 2015-07-24T17:04:48 | 2015-07-24T17:04:48 | 34,111,443 | 1 | 0 | NOASSERTION | true | 2019-01-02T14:20:30 | 2015-04-17T10:47:51 | 2015-04-17T10:47:53 | 2018-12-12T10:54:03 | 3,287 | 0 | 0 | 1 | Python | false | null | #!/usr/bin/env python
"""
Simple script to combine JUnit test results into a single XML file.
Useful for Jenkins.
"""
import os
import sys
from xml.etree import cElementTree as ET
def find_all(name, path):
result = []
for root, dirs, files in os.walk(path):
if name in files:
yield os.path.join(root, name)
def main(path, output):
rc = 0
testsuite = ET.Element("testsuite", name="all", package="all", tests="0")
for fname in find_all("results.xml", path):
tree = ET.parse(fname)
for element in tree.getiterator("testcase"):
testsuite.append(element)
for child in element:
if child.tag in ["failure", "error"]:
sys.stderr.write("FAILURE: %s.%s\n" %
(element.attrib["classname"],
element.attrib["name"]))
rc = 1
result = ET.Element("testsuites", name="results")
result.append(testsuite)
ET.ElementTree(result).write(output, encoding="UTF-8")
return rc
if __name__ == "__main__":
rc = main(".", "combined_results.xml")
# Suppress exit code if run with any arguments
# Slightly hacky but argparse isnt' in 2.6
if len(sys.argv) > 1:
sys.exit(0)
sys.exit(rc)
| UTF-8 | Python | false | false | 1,319 | py | 15 | combine_results.py | 5 | 0.56558 | 0.559515 | 0 | 48 | 26.479167 | 77 |
kyosukekita/ROSALIND | 17,755,394,803,032 | 52b230971a4c7b8546ef81dadab82c8705467390 | 06cab728715b5913b3a5e8a7827e00aa061ec24c | /Algorithmic Heights/general_sink.py | 703d4a439bb611f5bcda546fe3abcb6a58f8f036 | []
| no_license | https://github.com/kyosukekita/ROSALIND | afdbd1033c55c12022f6bdcbc27b9f36c01448fd | 0547e1d55233af0e4eafcb20ff7035b802d573f1 | refs/heads/master | 2023-02-21T17:59:19.965447 | 2023-02-16T12:06:44 | 2023-02-16T12:06:44 | 231,326,082 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | file = open('Desktop/Downloads/rosalind_gs.txt', 'r').read()
N=int(file.split()[0])
tmp=[(blocks) for blocks in file.split("\n\n")]
tmp1=[block.split("\n") for block in tmp][1:]
def GeneralSink(graph,NodeNum):
answer=-1
for i in range(NodeNum):
visited=[False for _ in range(NodeNum)]
def dfs(s):
visited[s]=True
for t in graph[s]:
if visited[t]==False:
dfs(t)
visited=[False for _ in range(NodeNum)]
dfs(i)
if visited.count(True)==NodeNum:
answer=i+1
break
return answer
answer=[]
for i in range(N):
graphi=[[] for _ in range(len(tmp1[i]))]
NodeNum=int(tmp1[i][0].split()[0])
for ele in tmp1[i][1:]:
ele=list(map(int,ele.split(" ")))
graphi[ele[0]-1].append(ele[1]-1)
answer.append(GeneralSink(graphi,NodeNum))
print(' '.join(map(str,answer)))
| UTF-8 | Python | false | false | 960 | py | 195 | general_sink.py | 194 | 0.528125 | 0.5125 | 0 | 37 | 24.945946 | 60 |
wh-forker/Adversarial-Dropout | 816,043,834,107 | 8d651a45f3230a60aaa75270a91e166498f68a60 | 9afabd5f9a1018fc78ec213cd93f5b9d55ffd732 | /cnn.py | 064123d47fc7eea5b07b7f8d7f488d1cda49a99e | []
| no_license | https://github.com/wh-forker/Adversarial-Dropout | 4c564a00c2fb73f8058975809f1d176a2eeb5954 | f2594e2d9360f7d526d2ff6944919419ce24a999 | refs/heads/master | 2020-12-05T04:45:32.413674 | 2019-04-22T12:03:57 | 2019-04-22T12:03:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy
import sys, os
import layers as L
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.framework import ops
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('keep_prob_hidden', 0.5, "dropout rate")
tf.app.flags.DEFINE_float('sigma', 0.0, "gaussian noise (std)")
tf.app.flags.DEFINE_float('lrelu_a', 0.1, "lrelu slope")
tf.app.flags.DEFINE_boolean('top_bn', False, "")
tf.app.flags.DEFINE_boolean('mean_only_bn', False, "")
layer_sizes = [128, 256, 512, 256, 128] #Conv-Large
#layer_sizes = [96, 192, 192, 192, 192] #Conv-Small
#layer_sizes = [64, 128, 128, 128, 128] #Conv-Small SVHN
if FLAGS.mean_only_bn:
bn = L.mean_only_bn
else:
bn = L.bn
def logit(x, dropout_mask=None, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):
rng = numpy.random.RandomState(seed)
h = L.gl(x, std=FLAGS.sigma)
h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=layer_sizes[0], seed=rng.randint(123456), name='c1')
h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c2')
h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c3')
h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)
h = L.max_pool(h, ksize=2, stride=2)
h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[1], seed=rng.randint(123456), name='c4')
h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c5')
h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c6')
h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)
h = L.max_pool(h, ksize=2, stride=2)
h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[2], seed=rng.randint(123456), padding="VALID", name='c7')
h = L.lrelu(bn(h, layer_sizes[2], is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[2], f_out=layer_sizes[3], seed=rng.randint(123456), name='c8')
h = L.lrelu(bn(h, layer_sizes[3], is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[3], f_out=layer_sizes[4], seed=rng.randint(123456), name='c9')
h = L.lrelu(bn(h, layer_sizes[4], is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)
h = tf.reduce_mean(h, reduction_indices=[1, 2]) # Global average pooling
# dropout with mask
if dropout_mask is None:
# Base dropout mask is 1 (Fully Connected)
dropout_mask = tf.ones_like(h)
h = h*dropout_mask
h = L.fc(h, layer_sizes[4], 10, seed=rng.randint(123456), name='fc')
if FLAGS.top_bn:
h = bn(h, 10, is_training=is_training,
update_batch_stats=update_batch_stats, name='bfc')
return h, dropout_mask
if __name__ == "__main__":
tf.app.run()
| UTF-8 | Python | false | false | 4,054 | py | 5 | cnn.py | 4 | 0.647015 | 0.596695 | 0 | 78 | 49.974359 | 133 |
HenryFBP/itms-428 | 7,602,092,159,631 | e9928f123af39ce588bef8c2ee097684ddf49502 | 7aca68eb0c1b27d2d19babb772caa59ba9e0e579 | /assignments/group-project/shared_lib.py | fd1e99db9328d011085821c5f192db79da289245 | []
| no_license | https://github.com/HenryFBP/itms-428 | 341b6e750922619b6d75840c32c33c805df9cdf6 | cc13d02728d03bf0d8a53711fd1ce2ccde524af1 | refs/heads/master | 2022-04-30T22:05:55.180384 | 2022-04-22T02:46:44 | 2022-04-22T02:46:44 | 145,616,067 | 0 | 5 | null | false | 2018-11-06T22:15:59 | 2018-08-21T20:30:17 | 2018-11-06T21:48:13 | 2018-11-06T22:15:58 | 264,874 | 0 | 2 | 0 | C++ | false | null | from constants import Config
def get_login_creds(path: str) -> (str, str,):
"""Human-proof way to get login info from plaintext file."""
try:
file = open(path, 'r')
except FileNotFoundError as e:
print(f"Please make {Config.LOGIN_FILE_NAME}, and put your username and password in it, separated by a newline.")
exit(1)
except Exception as e:
print(f"Not sure what went wrong.")
print(f"Make sure all the paths and locations are right and try again.")
print(f"Also, send this error to me, please:")
print(e)
lines = file.readlines()
# Fix for blank password.
if (len(lines) is 1) and ('\n' in lines[0]):
lines = lines[0].split('\n')
if len(lines) < 2:
print(f"There's less than two lines in {Config.LOGIN_FILE_NAME}.")
print(f"Please make sure you have a username and password in there, separated by a newline.")
exit(1)
username, password = (item.strip() for item in lines[0:2])
return username, password
| UTF-8 | Python | false | false | 1,040 | py | 83 | shared_lib.py | 27 | 0.625962 | 0.618269 | 0 | 31 | 32.548387 | 121 |
wilkowski/GitCode | 12,799,002,584,340 | 3d0fbe730acba14961f723a82d5045cb48072530 | 29f75f856ef777e624c7d6a0e83dbecd6bd259d9 | /EulerProblems/Euler70.py | efe74a03777ff56b2f0e4f37aeef1665b3b7cb4c | []
| no_license | https://github.com/wilkowski/GitCode | fcabfd2329f94c3cad3f5c8e8ab5ac9552f25312 | 6e26102add798f553347cd46e658375f7b66ec1c | refs/heads/master | 2020-05-18T21:53:21.472335 | 2015-07-30T08:05:36 | 2015-07-30T08:05:36 | 12,389,461 | 0 | 0 | null | false | 2015-05-11T02:12:44 | 2013-08-26T20:53:23 | 2015-05-11T01:22:14 | 2015-05-11T02:12:44 | 28,616 | 0 | 0 | 6 | JavaScript | null | null | PRIME_RANGE = 10000000 #memory is largest constraint here
isPrimeList = [True]*PRIME_RANGE
isPrimeList[0] = False
isPrimeList[1] = False
for p in range(4,PRIME_RANGE,2):
isPrimeList[p] = False
for x in range(3,int(PRIME_RANGE**.5+1)):
if(isPrimeList[x]):
val = x*x
x2 = 2*x
while(val <PRIME_RANGE):
isPrimeList[val] = False
val += x2
#make a list of just primes
prime_count = 0
for b in isPrimeList:
if b:
prime_count+=1
prime_list = [0]*prime_count
index = 0
for i in range(0, PRIME_RANGE):
if(isPrimeList[i]):
prime_list[index] = i
index +=1
print "prime checker made"
prod = 1
#I realized halfway through the execution that this is a silly way to calculate phi(n)
#it would be much easier to calculate all of them at once
def phi(n):
start = n
for p in prime_list:
if n%p == 0:
start = (start/p)*(p-1)
while(n%p == 0):
n = n/p
if(isPrimeList[n]):
start = (start/n)*(n-1)
break
if n == 1:
break
return start
def to_list(num):
result = []
val = num
while(val >0):
result = [val%10] + result
val = val/10
return result
min_ratio = 100000000.0
best_n = 0
for n in range(2,PRIME_RANGE):
digits_used = [0]*10
phi_n = phi(n)
n_list = to_list(n)
phi_list = to_list(phi_n)
for d in n_list:
digits_used[d] +=1
for dd in phi_list:
digits_used[dd] -=1
permutation = True
for x in digits_used:
if x!= 0:
permutation = False
break
if permutation:
ratio = float(n)/float(phi_n)
if ratio < min_ratio:
print "is phi permutation", n, phi_n
print "new best ratio", ratio
best_n = n
min_ratio = ratio
print n, "with ratio", min_ratio
| UTF-8 | Python | false | false | 2,009 | py | 116 | Euler70.py | 103 | 0.519164 | 0.492782 | 0 | 88 | 21.693182 | 86 |
heyuanlong1/test7879 | 13,700,945,715,430 | 8c1a34e12757f048ee22431516f56031e991cbb1 | 0136056dd39ac03260a58153041dafc2c82dc696 | /__python/project/rank/test2.py | 60ff4ef76c88ff6a87420efdf90ade2f860bc054 | []
| no_license | https://github.com/heyuanlong1/test7879 | 4fd4ca8c25c56e7498f8160f366cc85344d6e31e | 4f411fc44fb2b0fc95b034f374769ca6aa66b67e | refs/heads/master | 2020-06-17T20:46:55.003633 | 2019-08-15T03:45:24 | 2019-08-15T03:45:24 | 74,970,442 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding:UTF-8 -*-
"""
"""
import time
import math
import random
from RankDB import RankDB
import time, os, sched
m_db = RankDB()
inc_second = 1
def work():
tt = time.time();
tt = math.ceil(tt)
for x in xrange(20,50):
sql_str = "INSERT INTO ko_rank_config.`rank_config` VALUES ("+ str(x) +","+ str(x + 1000) +",'历史榜',1,0,'ko_user_own.user_property','value','dec','propertyID=103',500,'对战积分','根据每个玩家的天梯做排名','all','',1,'0000-00-00 00:00:00','','0000-00-00 00:00:00','')"
print sql_str
m_db.excuteSql(sql_str)
work()
| UTF-8 | Python | false | false | 595 | py | 225 | test2.py | 90 | 0.617594 | 0.533214 | 0 | 28 | 18.857143 | 252 |
akidescent/GWC2019 | 13,108,240,223,071 | 125a05d31bf7c18802dae3322b4cbdc49e927043 | 298b2282d0fd37f768fee0c01518e712b900165d | /dictionary.py | a7e8500bb4c5b6394d0613cdedbac5f39748733f | []
| no_license | https://github.com/akidescent/GWC2019 | 0d13bd5614a29f6ff709c1dcddd4226cdeb40ceb | 5bcc23796b321c9af3a8036b554901c4e9d63e10 | refs/heads/master | 2022-04-02T21:15:11.026717 | 2020-01-29T22:37:58 | 2020-01-29T22:37:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #key-value pairs
names = {
'trish': ['Val', 'Charlotte', 'Alex', 'Miya',],
'wen': ['sophia', 'manalie', 'trish'],
'alicia': ['Jade', 'Trinity', 'Sophia']
}
user = {}
user['bri'] = 30
user['cam'] = 50
user['Tyra'] = 'cat'
user['1'] = 'tree'
names['Sophia'] = 'bird'
print(names)
print(user)
student = {'name': 'John', 'age': 25, 'courses': ['math', 'compsci']}
print(student['courses'])
| UTF-8 | Python | false | false | 403 | py | 24 | dictionary.py | 19 | 0.545906 | 0.528536 | 0 | 21 | 18.190476 | 69 |
drizztSun/common_project | 11,441,792,900,230 | 1e88bf49d246db24681165eca6cd370189a97702 | 9ee51d5d6d976a4ad99704a334185f60df65f7ba | /PythonLeetcode/leetcodeM/1604_AlertUsingSameKey-CardThreeorMoreTimesinOneHourPeriod.py | b25064128b57e46dab9336baf7c4326c8d0217bb | []
| no_license | https://github.com/drizztSun/common_project | 348756c2b88c485fb9d192f5a7304c2a38cf4219 | cf4e1f1a96db896f1325de0283f8607bba5f55aa | refs/heads/master | 2021-06-05T22:35:27.071506 | 2021-06-05T17:26:46 | 2021-06-05T17:26:46 | 143,934,217 | 0 | 1 | null | false | 2021-06-05T13:01:52 | 2018-08-07T22:45:56 | 2021-06-04T22:53:01 | 2021-06-05T13:01:52 | 95,247 | 0 | 1 | 11 | Python | false | false | """
1604. Alert Using Same Key-Card Three or More Times in a One Hour Period
LeetCode company workers use key-cards to unlock office doors. Each time a worker uses their key-card, the security system saves the worker's name and the time when it was used.
The system emits an alert if any worker uses the key-card three or more times in a one-hour period.
You are given a list of strings keyName and keyTime where [keyName[i], keyTime[i]] corresponds to a person's name and the time when their key-card was used in a single day.
Access times are given in the 24-hour time format "HH:MM", such as "23:51" and "09:49".
Return a list of unique worker names who received an alert for frequent keycard use. Sort the names in ascending order alphabetically.
Notice that "10:00" - "11:00" is considered to be within a one-hour period, while "22:51" - "23:52" is not considered to be within a one-hour period.
Example 1:
Input: keyName = ["daniel","daniel","daniel","luis","luis","luis","luis"], keyTime = ["10:00","10:40","11:00","09:00","11:00","13:00","15:00"]
Output: ["daniel"]
Explanation: "daniel" used the keycard 3 times in a one-hour period ("10:00","10:40", "11:00").
Example 2:
Input: keyName = ["alice","alice","alice","bob","bob","bob","bob"], keyTime = ["12:01","12:00","18:00","21:00","21:20","21:30","23:00"]
Output: ["bob"]
Explanation: "bob" used the keycard 3 times in a one-hour period ("21:00","21:20", "21:30").
Example 3:
Input: keyName = ["john","john","john"], keyTime = ["23:58","23:59","00:01"]
Output: []
Example 4:
Input: keyName = ["leslie","leslie","leslie","clare","clare","clare","clare"], keyTime = ["13:00","13:20","14:00","18:00","18:51","19:30","19:49"]
Output: ["clare","leslie"]
Constraints:
1 <= keyName.length, keyTime.length <= 105
keyName.length == keyTime.length
keyTime[i] is in the format "HH:MM".
[keyName[i], keyTime[i]] is unique.
1 <= keyName[i].length <= 10
keyName[i] contains only lowercase English letters.
"""
class AlertNames:
def doit_hashtable(self, keyName: list, keyTime: list) -> list:
from collections import defaultdict
def timeToInt(keyTime: str) -> int:
[h, m] = keyTime.split(":")
return int(h) * 60 + int(m)
nameToTimes = defaultdict(list)
for name, time in zip(keyName, keyTime):
nameToTimes[name].append(timeToInt(time))
alerted = set()
for name, times in nameToTimes.items():
times.sort()
for i in range(2, len(times)):
# Check times that are two elements apart.
if times[i] <= times[i - 2] + 60:
alerted.add(name)
break
return sorted(alerted)
def doit_(self, keyName: list, keyTime: list) -> list:
from collections import defaultdict, deque
worker = set()
buff = defaultdict(deque)
for name, time in zip(keyName, keyTime):
if name in worker:
continue
hour, minutes = time.split(':')
hour, minutes = int(hour), int(minutes)
def in_one_hour(c1, c2):
return
while buff[name] and in_one_hour(buff[name][0], (hour, minutes)):
buff[name].popleft()
buff[name].append((hour, minutes))
if len(buff[name]) == 3:
del buff[name]
worker.add(name)
return sorted(list(worker))
if __name__ == '__main__':
AlertNames().doit_(["john","john","john"], ["23:58","23:59","00:01"]) | UTF-8 | Python | false | false | 3,699 | py | 2,891 | 1604_AlertUsingSameKey-CardThreeorMoreTimesinOneHourPeriod.py | 2,842 | 0.584753 | 0.534739 | 0 | 107 | 33.579439 | 178 |
Djusk8/CodeWars | 2,516,850,868,388 | 5ad3a3f74e6cdec9979ce059136d313bf1dae1ea | c07ae35b94c770cdfde26a3084ca13560a29113b | /7 kyu/Greet Me.py | f7c2432da6958f06173d997d86546ba198ff8452 | []
| no_license | https://github.com/Djusk8/CodeWars | 2f860efdf9c93e02170a61bd3345c1a6c57b85f0 | 75cdaaab3f9152032aeaa05d06ef67599aff710b | refs/heads/master | 2021-06-24T09:33:33.393342 | 2021-03-22T06:19:56 | 2021-03-22T06:19:56 | 208,574,077 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ------------ KATA DESCRIPTION ------------
"""
https://www.codewars.com/kata/535474308bb336c9980006f2
7 kyu - Greet Me
Write a method that takes one argument as name and then greets that name, capitalized and ends with an exclamation
point.
Example:
"riley" --> "Hello Riley!"
"JACK" --> "Hello Jack!"""
# --------------- SOLUTION ---------------
import codewars_test as test
def greet(name):
return "Hello {}!".format(name.capitalize())
# --------------- TEST CASES ---------------
@test.describe('Example Tests')
def example_tests():
test.assert_equals(greet('riley'), 'Hello Riley!')
test.assert_equals(greet('molly'), "Hello Molly!")
test.assert_equals(greet('BILLY'), "Hello Billy!")
| UTF-8 | Python | false | false | 720 | py | 162 | Greet Me.py | 161 | 0.608333 | 0.579167 | 0 | 27 | 25.666667 | 114 |
findcomrade/isbio | 9,474,697,866,477 | c94dd4456755960d10628c7049c380a04081ec8f | f8d4ed6846155f64590b75c30a5267f9ee80131c | /isbio/utilz/networking.py | 8853fc2a0924b4df2fca647f8c68f9d978d14778 | []
| no_license | https://github.com/findcomrade/isbio | 5fbc11c31d6ae49d6567f9cbde9825a59c78db67 | 5935f28defe76acfdb2fa4d034104f85759c285a | refs/heads/master | 2021-01-17T03:25:45.452894 | 2017-10-18T19:01:33 | 2017-10-18T19:01:33 | 7,003,083 | 2 | 4 | null | false | 2017-01-25T15:35:20 | 2012-12-04T16:27:52 | 2016-10-24T10:34:35 | 2017-01-25T15:35:01 | 14,442 | 1 | 2 | 181 | HTML | null | null | import socket
from . import get_logger, sp
__version__ = '0.1.1'
__author__ = 'clem'
__date__ = '27/05/2016'
# clem on 20/08/2015
def is_host_online(host, deadline=5):
""" Check if given host is online (whether it respond to ping)
:param host: the IP address to test
:type host: str
:param deadline: the maximum time to wait in second (text format)
:type deadline: str | int
:rtype: bool
"""
res = sp.call(['ping', '-c', '3', '-i', '0.2', '-w', str(deadline), host], stdout=sp.PIPE)
return res == 0
# clem 08/09/2016 moved here on 25/05/2016
def test_tcp_connect(host, port, timeout=2):
""" Test if TCP can connect to target host on specified port
:param host: ip address or FQDN of the target host
:type host: str
:param port: TCP port number to attempt connection to
:type port: int | str
:param timeout: connection timeout time in seconds
:type timeout: int
:return: if TCP connect is successful
:rtype: bool
:raises: socket.error or Exception
"""
try:
s = socket.socket()
if type(port) is not int:
port = int(port)
try:
s.settimeout(timeout)
s.connect((host, port))
s.send('PING')
get_logger().debug('TCP can connect to %s:%s' % (host, port))
return True
finally:
s.close()
except Exception:
get_logger().debug('Failed connection to %s:%s' % (host, port))
raise
# clem 29/04/2016
def get_free_port():
"""
:return: the number of a free TCP port on the local machine
"""
sock = socket.socket()
sock.bind(('', 0))
return sock.getsockname()[1]
# clem 12/10/2016
def get_http_response(target_url, timeout=5):
""" Return the urllib2 response object from target url
Warning : No exception management. Do it yourself
:param target_url: url to reach or request object
:type target_url: str | urllib2.Request
:param timeout: time out in seconds
:type timeout: int
:return: the response object
:rtype: urllib2.OpenerDirector
:raises: (urllib2.URLError, urllib2.HTTPError)
"""
import urllib2
opener = urllib2.build_opener()
get_response = opener.open(target_url, None, timeout=timeout) or False
return get_response
# clem 12/10/2016
def get_http_code(target_url, timeout=5):
""" Return the HTTP code returned from target url
:param target_url: url to reach or request object
:type target_url: str | urllib2.Request
:param timeout: time out in seconds
:type timeout: int
:return: the response HTTP code
:rtype: int
"""
from urllib2 import URLError, HTTPError
code = 520
try:
response = get_http_response(target_url, timeout)
if hasattr(response, 'code'):
code = response.code
except (URLError, HTTPError) as e:
get_logger().warning('%s : %s' % (e, target_url))
return code
# clem 12/10/2016
def test_url(target_url, timeout=5):
""" Tells whether or not the target_url is properly reachable (HTTP200 or HTTP302)
:param target_url: url to reach or request object
:type target_url: str | urllib2.Request
:param timeout: time out in seconds
:type timeout: int
:return: does it return a proper HTTP code ?
:rtype: bool
"""
return get_http_code(target_url, timeout) in [200, 302]
# clem 18/10/2016
def network_info(network_addr):
import sys
# Get address string and CIDR string from command line
(ip_addr, cidr) = network_addr.split('/')
# Split address into octets and turn CIDR into int
addr = ip_addr.split('.')
cidr = int(cidr)
# Initialize the netmask and calculate based on CIDR mask
mask = [0, 0, 0, 0]
for i in range(cidr):
mask[i / 8] += + (1 << (7 - i % 8))
# Initialize net and binary and netmask with addr to get network
net = []
for i in range(4):
net.append(int(addr[i]) & mask[i])
# Duplicate net into broad array, gather host bits, and generate broadcast
broad = list(net)
b_range = 32 - cidr
for i in range(b_range):
broad[3 - i / 8] += (1 << (i % 8))
# Print information, mapping integer lists to strings for easy printing
print "Address: ", ip_addr
print "Netmask: ", ".".join(map(str, mask))
print "Network: ", ".".join(map(str, net))
print "Broadcast: ", ".".join(map(str, broad))
# clem 18/10/2016
def is_ip_in_network(ip_addr, network):
if isinstance(ip_addr, str):
ip_addr = unicode(ip_addr)
if isinstance(network, str):
network = unicode(network)
from ipaddress import ip_network, ip_address
return ip_address(ip_addr) in ip_network(network)
# clem 19/10/2016
def is_ip_in_fimm_network(ip_addr):
return is_ip_in_network(ip_addr, '128.214.0.0/16')
# clem 19/10/2016
def is_http_client_in_fimm_network(request):
from webhooks.hooker import HookWSGIReq
return is_ip_in_fimm_network(HookWSGIReq(request).http_remote_ip)
| UTF-8 | Python | false | false | 4,628 | py | 124 | networking.py | 57 | 0.684745 | 0.650173 | 0 | 177 | 25.146893 | 91 |
YenFus/Cat_Or_Dog | 15,719,580,304,515 | 4348dfd7971e2cb621cf34e373f796ce6034d82b | fb4507d2176e15f566512ab0c2945dc9ddbd962b | /Cat_Or_Dog.py | cfc395f60b26fda489138eac3fad842cc94519ab | []
| no_license | https://github.com/YenFus/Cat_Or_Dog | 3cee1549404906ea8fe2b8a3babdeefa1dd322f4 | 536180614b40303c40d2c12594b2faf7e8f339ee | refs/heads/master | 2022-10-11T13:39:50.731755 | 2020-06-13T16:55:57 | 2020-06-13T16:55:57 | 272,049,401 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Building CNN
from keras.models import Sequential
from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense
#Initialing CNN
classifier=Sequential()
#Step-1 Convolution
classifier.add(Convolution2D(32, (3, 3), input_shape=(64,64,3),activation="relu"))
#Step-2 Pooling
classifier.add(MaxPooling2D(pool_size=(2,2)))
#Step-3 Flattening
classifier.add(Flatten())
#Step-3 Full Connection
classifier.add(Dense(units=128,activation="relu"))
classifier.add(Dense(units=1,activation="sigmoid"))
#Compiling CNN
classifier.compile(optimizer="adam",loss="binary_crossentropy",metrics=['accuracy'])
#Fitting CNN
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',target_size=(64, 64),batch_size=32,class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',target_size=(64, 64),batch_size=32,class_mode='binary')
classifier.fit_generator(training_set,steps_per_epoch=8000,epochs=25,validation_data=test_set,validation_steps=2000)
# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_2.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat' | UTF-8 | Python | false | false | 1,666 | py | 2 | Cat_Or_Dog.py | 1 | 0.746699 | 0.706483 | 0 | 49 | 32.040816 | 127 |
kunweiTAN/techgym_ai | 13,907,104,111,395 | 0da92d728f285c52591488738df28ca7de3d9ea2 | 90f545733f076747bad979faa3a8cf23867f7a3a | /Qf5R.py | b43981ebbf85fae51f6caae0d13e314cacb50079 | []
| no_license | https://github.com/kunweiTAN/techgym_ai | f85dc52ce6e75f4c08213d5796171908beb9a69e | 051274bcc789a563c46ed5661301535e76ae1e18 | refs/heads/master | 2023-08-17T05:15:18.758183 | 2021-09-21T11:57:07 | 2021-09-21T11:57:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #AI-TECHGYM-1-7-A-4
#教師なし学習 アソシエーション分析
import pandas as pd
import urllib.request as req
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
#githubからファイルをDownloadできない場合は以下を実行
#url = "http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx"
#req.urlretrieve(url, "Online_Retail.xlsx")
#trans = pd.read_excel('Online_Retail.xlsx', sheet_name='Online Retail')
#trans.to_csv("./Online_Retail.csv")
#購買データの読み込み
trans = pd.read_csv('Online_Retail.csv')
#####前処理#####
#キャンセルデータと不明なデータを除くための処理をする
# InoivceNoの先頭1文字をcancel_flgとして追加
trans['cancel_flg'] = trans.InvoiceNo.map(lambda x:str(x)[0])
# cancel_flgでグルーピングして集計
trans.groupby('cancel_flg').size()
#有効なデータに上書きする
trans = trans[(trans.cancel_flg == '5') & (trans.CustomerID.notnull())]
################
#対象の商品名を調べる
display(trans[trans['StockCode'] == '85123A'].head(1))
display(trans[trans['StockCode'] == '47566'].head(1))
#画像を持ってくる
url = "https://giftsatpinkparrot.com/wp-content/uploads/2013/02/AX182.jpg"
req.urlretrieve(url, "HEART.jpg")
#画像の指定
image = "HEART.jpg"
img_r = cv2.imread(image)
#画像を表示
plt.axis("off")
plt.imshow(img_r)
plt.title("85123A : WHITE HANGING HEART T-LIGHT HOLDER ")
plt.show()
#画像を持ってくる
url = "https://previews.123rf.com/images/tribalium123/tribalium1231210/tribalium123121000184/15870997-party-flags-party-pennant-bunting-bunting-flags.jpg"
req.urlretrieve(url, "FLAG.jpg")
#画像の指定
image = "FLAG.jpg"
img_f = cv2.imread(image)
#画像を表示
plt.axis("off")
plt.imshow(img_f)
plt.title("47566 : PARTY BUNTING ")
plt.show()
#考察
print("イギリスのバレンタインデーではキャンドルを灯しながら祝うことが多いので")
print("パーティーグッツとして併売されることが多いと思われる")
| UTF-8 | Python | false | false | 2,134 | py | 517 | Qf5R.py | 412 | 0.707303 | 0.660188 | 0 | 68 | 22.970588 | 154 |
jojonki/atcoder | 3,556,232,951,356 | e694a038bb211a99c354d02a907da32e6a207ec3 | 3df995fa02a43932ab2ea5fea26c06403f139f1f | /abc/abc150c.py | 81d9aa789d85e0f3b34308f53008ebdfb334fbee | []
| no_license | https://github.com/jojonki/atcoder | 75fb7016dd90b3b7495f1ff558eedcdc755eac11 | ec487b4e11835f25c6770f0115b98b7e93b16466 | refs/heads/master | 2021-06-23T09:56:05.636055 | 2021-03-13T03:38:50 | 2021-03-13T03:38:50 | 201,834,404 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import permutations
def tuple_to_int(tp):
return int(''.join(map(str, list(tp))))
def main():
N = int(input())
d = [i + 1 for i in range(N)]
P = list(map(int, input().split()))
P = tuple_to_int(P)
Q = list(map(int, input().split()))
Q = tuple_to_int(Q)
data = []
for v in permutations(d, N):
data.append(tuple_to_int(v))
data.sort()
print(abs(data.index(P) - data.index(Q)))
main()
| UTF-8 | Python | false | false | 456 | py | 822 | abc150c.py | 452 | 0.554825 | 0.552632 | 0 | 23 | 18.826087 | 45 |
zopefoundation/zope.app.pagetemplate | 2,740,189,151,010 | 0e80122a2400ec43beeafd5c778ddc5b66e42404 | 6c445fc96d8769dd993f42b7079df430fa6c0101 | /src/zope/app/pagetemplate/tests/test_nested.py | 2d07ad83752dbf8c285f92230fb252319479bfca | [
"ZPL-2.1"
]
| permissive | https://github.com/zopefoundation/zope.app.pagetemplate | 397536cb8021a7a014099643b69720c7ebcbfa70 | c1dcd73b212313aec6b4d6aa7a6ed5fc7636e544 | refs/heads/master | 2023-08-18T12:44:57.918934 | 2023-02-07T09:12:45 | 2023-02-07T09:12:45 | 8,727,833 | 0 | 1 | NOASSERTION | false | 2023-02-07T09:09:09 | 2013-03-12T13:00:41 | 2022-09-16T08:56:18 | 2023-02-07T09:09:08 | 761 | 0 | 3 | 0 | Python | false | false | """Test that nested macro references do the right thing.
"""
__docformat__ = "reStructuredText"
import unittest
from zope.browserpage import ViewPageTemplateFile
from zope.component.testing import PlacelessSetup
from zope.publisher.browser import TestRequest
class Context:
pass
class View:
def __init__(self, context, request):
self.context = context
self.request = request
EXPECTED = """\
<html>
<head>
<title>Example: outer</title>
</head>
<body>
hello
<div>
<div>
inner body slot content
</div>
intermediate body slot stuff
</div>
</body>
</html>
"""
class Test(PlacelessSetup, unittest.TestCase):
def testMacroExtension(self):
# This test demonstrates how macro extension allows a macro to extend
# and re-offer a slot for a client template to fill.
outer = ViewPageTemplateFile('outer.pt')
intermediate = ViewPageTemplateFile('intermediate.pt')
inner = ViewPageTemplateFile('inner.pt')
context = Context()
request = TestRequest()
view = View(context, request)
self.assertIn('outer body slot', outer(view))
namespace = inner.pt_getContext(view, request)
namespace['outer'] = outer
namespace['intermediate'] = intermediate
result = inner.pt_render(namespace)
self.assertEqual(result.replace("\r\n", "\n"), EXPECTED)
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromTestCase(Test)
| UTF-8 | Python | false | false | 1,474 | py | 23 | test_nested.py | 11 | 0.679783 | 0.679783 | 0 | 63 | 22.396825 | 77 |
abmestri25/pythonPrograms | 5,050,881,549,046 | 3d44286ee83057dc211ce060eb5b807fa9973aa9 | 4992a6006685757f7fd35e2c665f1cfd4ae0bc74 | /prime.py | 33387ed8f971edc54d29f2591e89512b9c7de046 | []
| no_license | https://github.com/abmestri25/pythonPrograms | b991c2625d3c448bf782f25eb60112390d208496 | a8162cac8f5c7d4a967125a94ab98cc491fd17bf | refs/heads/master | 2020-05-05T06:35:14.391912 | 2019-04-06T05:57:10 | 2019-04-06T05:57:10 | 179,794,135 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=int(input("Enter number : "))
#logic goes here
flag=0
for i in range (2,n):
if(n%i==0):
flag=1
break
if(n==1):#this should not be include in for loop
print("1 is neither prime nor composite number")
elif(flag==0):
print("Number is prime")
else:
print("Number is not prime")
| UTF-8 | Python | false | false | 318 | py | 13 | prime.py | 13 | 0.600629 | 0.578616 | 0 | 16 | 18.5 | 52 |
wencong1724427771/Blog-system | 13,649,406,071,832 | 4286af408ba6d6009c0fea09334f5bef433a6578 | 39592843b892fb4a57ca254f776f5b0e47d36888 | /comment/migrations/0002_rename_targrt_comment_target.py | 3af31b80ac374ed4f8cb0b9691dd9881a25f4cfe | []
| no_license | https://github.com/wencong1724427771/Blog-system | 667dbbb1d2e895aa15b9feba8bd9fec24e8d47c1 | b99bf16a2e21307a69f20376be5376246d8a7dfd | refs/heads/main | 2023-08-25T04:21:31.156357 | 2021-09-27T16:19:16 | 2021-09-27T16:19:16 | 410,961,678 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2.7 on 2021-09-21 01:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='targrt',
new_name='target',
),
]
| UTF-8 | Python | false | false | 371 | py | 23 | 0002_rename_targrt_comment_target.py | 16 | 0.533693 | 0.48248 | 0 | 18 | 18.611111 | 47 |
Evoletto/PDB_analysis_tools | 11,098,195,530,854 | 374d8fb31e616cc0006beb8991165ba809994e07 | f5c6039478113e483b5de790d74ca9eb48aa39c8 | /FAB_Two_filters_final_whole_folder_automated.py | f66304e91f042d5d7dfb46fa53b8a4f226202413 | []
| no_license | https://github.com/Evoletto/PDB_analysis_tools | e1c2950929ee9e9c8d516322c8aa553e4fdf6313 | 25b1adad6a7588fd6aebe7be4b55c21836e4130d | refs/heads/master | 2020-05-23T09:27:27.575535 | 2019-08-09T19:05:11 | 2019-08-09T19:05:11 | 186,706,818 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #*************************************************************************
#
# Program: Extraction of FAB information
# File: FAB_Two_filters_final_whole_folder_automated.py
#
# Version: V1.0
# Date: 27.06.19
# Function: Analysis of the FAB .pdb files. Looking for CYS on the C-terminus of
# Light Chain. Downloads the correct pdb files from www.rcsb.org
#
#
# Copyright: (c) Alina Chrzastek, UCL, 2019
# Author: Alina Chrzastek
# Address: Institute of Structural and Molecular Biology
# Division of Biosciences
# University College
# Gower Street
# London
# WC1E 6BT
# EMail: a.chrzastek.18@ucl.ac.uk
#*************************************************************************
import re
import os, shutil
import os.path
import urllib.request
import sys
from subprocess import call
# Get all .pdb file names from given directory
def get_file_content(directory_path, filename):
if not filename.endswith(".pdb"):
# prevents reading hidden files like .DS_Sore
return None
# read the content
return open(directory_path + filename).readlines()
# Download file from web to set directory path
def download_file(file, to_directory):
striped_filename = file.rsplit('_', 1)[0]
url = 'https://files.rcsb.org/download/' + striped_filename + ".pdb"
urllib.request.urlretrieve(url, to_directory + file)
# Download files from provided array of files to set directory path
def download_files(files, path):
count = len(files)
print("\n\nWill attempt to download ", count, " files")
input("Press enter to continue")
for index, file_to_download in enumerate(files, start=1):
print("Downloading file ", index, "/", count)
download_file(file_to_download, path)
#print(file.rsplit('.', 1)[0], light_chain, heavy_chain)
# Copy files that met given condition to a new directory
def check_downloaded_files_for_compatibility(directory_to_files):
all_files = sorted(os.listdir(directory_to_files))
path_to_compatible_files = directory_to_files + "Compatible"
# create folder if does not exists and remove old version if already exists
if os.path.exists(path_to_compatible_files):
shutil.rmtree(path_to_compatible_files)
os.makedirs(path_to_compatible_files)
for index, file in enumerate(all_files, start=1):
print("Checking file ", index, "/", len(all_files))
file_contents = get_file_content(directory_to_files, file)
if file_contents is None:
continue
for line in file_contents:
if line.strip().startswith("SEQRES 17 L" and "TER") and line.strip().find("CYS") > 0:
#print("LC ends on " + line.strip()[-9:-6])
shutil.copy(directory_to_files + file, path_to_compatible_files)
break
call(["open", path_to_compatible_files])
# Finish program
def close():
sys.exit()
files_to_download = []
directory_path = input("Please provide directory path to your original repository\n")
if len(directory_path) > 0:
directory_to_download_files = input("Please provide directory path where compatible files will be downloaded\n")
if len(directory_to_download_files) > 0:
all_files = sorted(os.listdir(directory_path))
for index, file in enumerate(all_files, start=1):
print("Checking file ", index, "/", len(all_files))
file_contents = get_file_content(directory_path, file)
if file_contents is None:
continue
light_chain = ""
heavy_chain = ""
for line in file_contents:
striped_line = line.strip()
if line.startswith("REMARK 950 CHAIN L L"):
light_chain = striped_line[-1]
if line.startswith("REMARK 950 CHAIN H H"):
heavy_chain = striped_line[-1]
if striped_line.startswith("SEQRES 17 L") and striped_line.find("CYS") > 0:
files_to_download.append(file)
break
file_count = len(files_to_download)
files_in_download_directory = os.listdir(directory_to_download_files)
if len(files_in_download_directory) > 1:
print("Files in directory where you want to download new files already exists,\ndo you want to override them?")
action = input("Download or Skip? (D/S): ")
if action == "D":
download_files(files_to_download, directory_to_download_files)
check_downloaded_files_for_compatibility(directory_to_download_files)
else:
print("Skipping downloading files")
check_downloaded_files_for_compatibility(directory_to_download_files)
else:
download_files(files_to_download, directory_to_download_files)
check_downloaded_files_for_compatibility(directory_to_download_files)
print("Done")
else:
print("Directory path provided is empty, closing\n")
close()
else:
print("Directory path provided is empty, closing\n")
close()
| UTF-8 | Python | false | false | 4,720 | py | 19 | FAB_Two_filters_final_whole_folder_automated.py | 18 | 0.674364 | 0.665466 | 0 | 129 | 35.550388 | 114 |
Aasthaengg/IBMdataset | 9,070,970,934,653 | 39ab4587652a9f052d58298c43e73aa21261859d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03050/s858343523.py | 6f469dfab8a635b844e298fab5b44809c8f2acea | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | MOD = 10 ** 9 + 7
INF = 10 ** 12
import sys
sys.setrecursionlimit(100000000)
dy = (-1,0,1,0)
dx = (0,1,0,-1)
from itertools import permutations
from heapq import heapify,heappop,heappush
def main():
n = int(input())
ans = 0
for i in range(1,n):
if i * i > n:
break
if n%i == 0:
j = n//i
if j - 1 > i:
ans += j - 1
print(ans)
if __name__ =='__main__':
main() | UTF-8 | Python | false | false | 454 | py | 202,060 | s858343523.py | 202,055 | 0.473568 | 0.407489 | 0 | 23 | 18.695652 | 42 |
blue1335/Object-Oriented-Programming | 2,628,519,998,013 | 95353c68dd283266693e4c641957d2982304d786 | b605808d5e24d026775ed9378d05023bf847f95f | /OOP Codes/classes.py | 5240751caeb26f1b6cf2a7176d443ddd01589d80 | []
| no_license | https://github.com/blue1335/Object-Oriented-Programming | 195f43e4b27addf3df5a0f2ebf63c9f584967de7 | a14adf8d90fdc41da3d09505e622071f04932020 | refs/heads/master | 2022-04-04T21:55:59.129942 | 2019-12-28T18:46:04 | 2019-12-28T18:46:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # integer = 10
# string = "messi" "barcelona football team"
# %%
integer1 = 33
string1 = "messi"
# %% classes
employee1_name = "messi"
employee1_age = 33
employee1_address = "asdasdas"
class Employee:
# attribute = age, address, name
# behaviour = pass
pass
employee1 = Employee()
# %% attribute
class Footballer:
football_club = "barcelona"
age = 30
f1 = Footballer()
print(f1)
print(f1.age)
print(f1.football_club)
f1.football_club = "real madrid"
print(f1.football_club)
# %% methods
class Square(object):
edge = 5 # meter
area = 0
def area1(self):
self.area = self.edge * self.edge # 5*5
print("Area: ",self.area)
###############
s1 = Square()
print(s1)
print(s1.edge)
print(s1.area1())
s1.edge = 7
s1.area1()
# %% methods vs funtions
class Emp(object):
age = 25 #
salary = 1000 # $
def ageSalaryRatio(self):
a = self.age / self.salary
print("method: ", a)
e1 = Emp()
e1.ageSalaryRatio()
#╦ ------------------------------------------------------
# function
def ageSalaryRatio(age, salary):
a = age / salary
print("function: ",a)
ageSalaryRatio(30, 3000)
#
def findArea(a, b): # a = pi, b = r
area = a*b**2
# print(area)
return area
pi = 3.14
r = 5
result1 = findArea(pi, r)
print(result1)
result2 = findArea(pi, 10)
print(result1 + result2)
# %% initializer or contructor
class Animal(object):
def __init__(self, a, b): # ( name, age) = ("dog", 2) = (a, b)
self.name = a
self.age = b
def getAge(self):
print("")
return self.age
def getName(self):
print(self.name)
a1 = Animal("dog", 2)
a2 = Animal("cat",4)
a3 = Animal("bird", 6)
| UTF-8 | Python | false | false | 1,805 | py | 1 | classes.py | 1 | 0.54132 | 0.50416 | 0 | 111 | 15.234234 | 66 |
pandasasa/ivalice | 18,837,726,566,418 | dc0c22ad9f3bf15c4fdc8febf5a143c2433aa16a | a183b0b638b25445b162df655271f890345e7c04 | /ivalice/ranking.py | 881d6e03407f45ff72e8d43de5dc3a854bc1bb63 | []
| no_license | https://github.com/pandasasa/ivalice | e57d774602931cfccc79d8c73a05dd03adf1df43 | d3888c73aec3f2968febb091d02dd5c6dafa6b27 | refs/heads/master | 2021-01-14T14:18:15.455904 | 2015-04-06T02:25:09 | 2015-04-06T02:25:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .impl.lambda_mart import LambdaMART
from .impl.mcrank import McRank
from .impl.mcrank import OrdinalMcRank
| UTF-8 | Python | false | false | 112 | py | 20 | ranking.py | 19 | 0.830357 | 0.830357 | 0 | 3 | 36.333333 | 40 |
qianliu013/leetcode | 11,149,735,116,264 | adbbbc78257d8f06cb703bffa9bbb67ec0858c6f | a273d3f988c53773b528572c9907eea961e22114 | /medium/113.py | 99f9647ca3357906546d9483d34fdb2b08c406ba | []
| no_license | https://github.com/qianliu013/leetcode | 40995cc83a765383b59605a9438a1cbfde0cf370 | b7ecd191411804b2d6d58e2e8233c3bc07aada59 | refs/heads/master | 2020-12-30T14:55:19.616392 | 2018-04-21T12:13:49 | 2018-04-21T12:13:49 | 91,094,164 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
"""Path Sum II."""
def _solve(root, sum):
res = []
def _dfs(node, path, path_sum):
if node:
if node.left or node.right:
_dfs(node.left, path + [node.val], path_sum + node.val)
_dfs(node.right, path + [node.val], path_sum + node.val)
elif path_sum + node.val == sum:
res.append(path + [node.val])
_dfs(root, [], 0)
return res
def _solve1(root, sum):
res, path = [], []
def _dfs(node, remaining):
if node:
path.append(node.val)
if not node.left and not node.right and node.val == remaining:
res.append(list(path))
else:
_dfs(node.left, remaining - node.val)
_dfs(node.right, remaining - node.val)
path.pop()
_dfs(root, sum)
return res
| UTF-8 | Python | false | false | 872 | py | 380 | 113.py | 378 | 0.493119 | 0.489679 | 0 | 34 | 24.647059 | 74 |
moviedatascience/DS-Unit-3-Sprint-3-Productization-and-Cloud | 15,264,313,783,769 | 172dc836ea1dd07c962e2de4ce462b7c01639aec | 5162f6d36f29c868bb7e7d7d0e86f49be2f22e0b | /sprint-challenge/aquality/models.py | a27aade117ca329d57e1027acd976ac62d5f11dd | [
"MIT"
]
| permissive | https://github.com/moviedatascience/DS-Unit-3-Sprint-3-Productization-and-Cloud | a197518b88ffde5bee31c4cc9973b9d1bbc8cfe8 | 053f742e8736c29ad2fa7f21e9f9182ef1aa0ef1 | refs/heads/master | 2020-08-13T21:26:11.049782 | 2019-10-18T20:11:31 | 2019-10-18T20:11:31 | 215,040,563 | 0 | 0 | null | true | 2019-10-14T12:39:04 | 2019-10-14T12:39:03 | 2019-07-05T16:44:03 | 2019-10-14T03:12:39 | 19 | 0 | 0 | 0 | null | false | false | from flask_sqlalchemy import SQLAlchemy
#instantiating database
DB = SQLAlchemy()
#creating records class
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return "< Record {} --- Time {} >".format(self.datetime, self.value)
| UTF-8 | Python | false | false | 371 | py | 4 | models.py | 2 | 0.679245 | 0.673854 | 0 | 13 | 27.538462 | 76 |
frontendprof/Learn-Python-by-Doing-by-Jose-Espanol | 2,439,541,442,741 | fd7f279a9501b3838ce79ad1e56830a93a01822e | 30ceeacbd734af4d76b4fb0ddf4812dfa615bd35 | /MileStone_1/app.py | 6bdc219e9e70e66b9ca57f89632d71386bfe83a8 | [
"BSD-3-Clause"
]
| permissive | https://github.com/frontendprof/Learn-Python-by-Doing-by-Jose-Espanol | e000d211b6e7f3608d9cb1fb57d4300761ef03a5 | 5a4a6a6843fec6bbe231900aa0053021c69649d9 | refs/heads/master | 2020-06-03T09:05:38.819433 | 2019-07-30T22:50:20 | 2019-07-30T22:50:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # B_R_R
# M_S_A_W
"""
Create movie collector app:
that adds new movies,
that lists movies,
that finds movies,
and quits from app...
"""
movies=[]
def menu():
user_input=input("""
Enter 'a' to add a new movie, \t\t'l' to list the movies,
'f' to find a movie, \t\t'q' to quit from the programm\n""")
while user_input!='q':
if user_input=='a':
add_movie()
elif user_input=='l':
list_movies(movies)
elif user_input=='f':
find_movies()
else:
print("Unknown command. Can you repeat again please")
user_input = input("""
Enter 'a' to add a new movie, \t\t'l' to list the movies,
'f' to find a movie, \t\t'q' to quit from the programm\n""")
def add_movie():
name=input("What is the name of the movie you want to add? ")
director=input("Who is the director of the movie? ")
year=input("What is the year of the movie? ")
movie={'name':name, 'director':director,'year': year}
movies.append(movie)
def list_movies(movies_list):
for mov in movies_list:
show_mov_details(mov)
def show_mov_details(movie):
print(f"Name: {movie['name']}")
print(f'Director: {movie["director"]}')
print(f'Year: {movie["year"]}')
def find_movies():
find_by=input("WHat parameter are you looking for with? ")
looking_for=input("WHat is the value of the parameter are you looking for? ")
found_movies=find_movies_by_attribute(movies,looking_for,lambda x: x[find_by])
show_mov_details(found_movies)
def find_movies_by_attribute(items,expected,finder):
found=[]
for i in items:
if finder(i)==expected:
found.append(i)
return found
menu()
| UTF-8 | Python | false | false | 1,833 | py | 33 | app.py | 29 | 0.573377 | 0.573377 | 0 | 82 | 21.341463 | 92 |
KaranToor/MA450 | 6,820,408,079,718 | 08efa7e1e708ca63d5e9450572ace61683cd22bf | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/compute/copy_files.py | 4dd967e96d6572401578413fc7402b3b1020c4ff | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | https://github.com/KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | false | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | 2020-12-24T00:36:47 | 2020-12-24T00:38:08 | 46,069 | 1 | 1 | 4 | Python | false | false | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for copying files from and to virtual machines."""
import collections
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute import ssh_utils
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.util import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
RemoteFile = collections.namedtuple(
'RemoteFile', ['user', 'instance_name', 'file_path'])
LocalFile = collections.namedtuple(
'LocalFile', ['file_path'])
class CopyFiles(ssh_utils.BaseSSHCLICommand):
"""Copy files to and from Google Compute Engine virtual machines."""
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLICommand.Args(parser)
parser.add_argument(
'sources',
help='Specifies a source file.',
metavar='[[USER@]INSTANCE:]SRC',
nargs='+')
parser.add_argument(
'destination',
help='Specifies a destination for the source files.',
metavar='[[USER@]INSTANCE:]DEST')
# TODO(user): Use flags.AddZoneFlag when copy_files supports URIs
zone = parser.add_argument(
'--zone',
help='The zone of the instance to copy files to/from.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
zone.detailed_help = (
('The zone of the instance to copy files to/from. If omitted, '
'you will be prompted to select a zone.\n\n') +
flags.ZONE_PROPERTY_EXPLANATION)
def Run(self, args):
super(CopyFiles, self).Run(args)
file_specs = []
# Parses the positional arguments.
for arg in args.sources + [args.destination]:
if ssh.IsScpLocalPath(arg):
file_specs.append(LocalFile(arg))
else:
user_host, file_path = arg.split(':', 1)
user_host_parts = user_host.split('@', 1)
if len(user_host_parts) == 1:
user = ssh.GetDefaultSshUsername(warn_on_account_user=True)
source_instance = user_host_parts[0]
else:
user, source_instance = user_host_parts
file_specs.append(RemoteFile(user, source_instance, file_path))
log.debug('Normalized arguments: %s', file_specs)
# Validates the positional arguments.
# TODO(user): Look into relaxing these conditions.
sources = file_specs[:-1]
destination = file_specs[-1]
if isinstance(destination, LocalFile):
for source in sources:
if isinstance(source, LocalFile):
raise exceptions.ToolException(
'All sources must be remote files when the destination '
'is local.')
else: # RemoteFile
for source in sources:
if isinstance(source, RemoteFile):
raise exceptions.ToolException(
'All sources must be local files when the destination '
'is remote.')
destination_instances = set()
for file_spec in file_specs:
if isinstance(file_spec, RemoteFile):
destination_instances.add(file_spec.instance_name)
if len(destination_instances) > 1:
raise exceptions.ToolException(
'Copies must involve exactly one virtual machine instance; '
'your invocation refers to [{0}] instances: [{1}].'.format(
len(destination_instances), ', '.join(
sorted(destination_instances))))
source_instance_ref = instance_flags.SSH_INSTANCE_RESOLVER.ResolveResources(
[source_instance], compute_scope.ScopeEnum.ZONE, args.zone,
self.resources,
scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))[0]
source_instance = self.GetInstance(source_instance_ref)
external_ip_address = ssh_utils.GetExternalIPAddress(source_instance)
# Builds the scp command.
scp_args = [self.env.scp]
if not args.plain:
scp_args.extend(ssh.GetDefaultFlags(self.keys.key_file))
host_key_alias = self.HostKeyAlias(source_instance)
scp_args.extend(ssh.GetHostKeyArgs(host_key_alias, args.plain,
args.strict_host_key_checking))
scp_args.append('-r')
for file_spec in file_specs:
if isinstance(file_spec, LocalFile):
scp_args.append(file_spec.file_path)
else:
scp_args.append('{0}:{1}'.format(
ssh.UserHost(file_spec.user, external_ip_address),
file_spec.file_path))
self.ActuallyRun(
args, scp_args, user, source_instance, source_instance_ref.project)
CopyFiles.detailed_help = {
'brief': 'Copy files to and from Google Compute Engine virtual machines',
'DESCRIPTION': """\
*{command}* copies files between a virtual machine instance
and your local machine.
To denote a remote file, prefix the file name with the virtual
machine instance name (e.g., _example-instance_:~/_FILE_). To
denote a local file, do not add a prefix to the file name
(e.g., ~/_FILE_). For example, to copy a remote directory
to your local host, run:
$ {command} example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a
In the above example, ``~/REMOTE-DIR'' from ``example-instance'' is
copied into the ~/_LOCAL-DIR_ directory.
Conversely, files from your local computer can be copied to a
virtual machine:
$ {command} ~/LOCAL-FILE-1 ~/LOCAL-FILE-2 example-instance:~/REMOTE-DIR --zone us-central1-a
If a file contains a colon (``:''), you must specify it by
either using an absolute path or a path that begins with
``./''.
Under the covers, *scp(1)* is used to facilitate the transfer.
When the destination is local, all sources must be the same
virtual machine instance. When the destination is remote, all
source must be local.
This command ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
}
| UTF-8 | Python | false | false | 6,945 | py | 1,342 | copy_files.py | 977 | 0.666811 | 0.663067 | 0 | 180 | 37.583333 | 102 |
swipswaps/Indian-Sign-Language-Gesture-Recognition | 18,365,280,172,498 | b9033c55c8514bca59c191f3101513f37ea9c0bc | 9d508d4f71062b3fdf2090c40da2705d03794ad2 | /aud2gest/views.py | e30084a58efb09f441e1ed2d27fee291b88486b9 | []
| no_license | https://github.com/swipswaps/Indian-Sign-Language-Gesture-Recognition | ea4ac533fc80d838090e74712637ab432485041f | 785b2d1e3ead88001d3ef6fbaf3b525ef38a57ce | refs/heads/master | 2022-12-16T12:26:03.482686 | 2020-04-24T07:04:25 | 2020-04-24T07:04:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render,redirect
from django.http import HttpResponse
import speech_recognition as sr
from .forms import UploadAudio
from .models import AudioDb
from knk.settings import *
from django.core.files.base import ContentFile
import matplotlib
from matplotlib.pyplot import figure
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.core.files.storage import default_storage
from random import randint
from pydub import AudioSegment
from os import path
from pydub import AudioSegment
import json
# Create your views here.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def filename(audio):
return str(audio) , str(audio).split(".")[0]+'.txt' , str(audio).split(".")[0]+'.png'
def audio_url(audio):
return 'aud2gest/audioFiles/'+str(audio)
def image_url(text,image_name):
Alp = {}
for code in range(ord('A'), ord('Z') + 1):
Alp[chr(code)]=os.path.join(MEDIA_ROOT,"Alphabets",chr(code)+".jpg")
words=text.split(' ')
max_len=max(len(w) for w in words)
if len(words) < 4:
words+=['','','']
plt.subplot()
j=0
for word in words:
i=1+j*max_len
for key in word:
image = mpimg.imread((Alp[key.upper()]))
plt.subplot(len(words),max_len,i)
plt.axis('off')
plt.imshow(image, aspect='auto')
plt.subplots_adjust(left=0, right=1, top=1, bottom=0,hspace=0, wspace=0)
i+=1
j+=1
image_path = os.path.join(MEDIA_ROOT,"aud2gest/imageFiles",image_name)
plt.savefig(image_path,figsize=(15,15))
# plt.show(image_path)
return image_path , 'aud2gest/imageFiles/'+image_name
def text_url(text,text_name):
text_path=os.path.join(MEDIA_ROOT,"aud2gest/textFiles",text_name)
file1=open(os.path.join(text_path),'w')
file1.write(text)
file1.close()
return text_path , 'aud2gest/textFiles/'+text_name
def audio_to_text(audio_voice):
r = sr.Recognizer()
audio_name=str(audio_voice)
audio_path=os.path.join(MEDIA_ROOT,"aud2gest/audioFiles",audio_name)
text=""
with sr.AudioFile(audio_path) as source:
audio = r.record(source)
print ('Done!')
try:
text = r.recognize_google(audio)
print (text)
except Exception as e:
print (e)
return text
@csrf_exempt
# @login_required
def home(request):
if request.user.is_authenticated:
if request.method=="POST":
print("Brook was here")
# print(request.FILES['choice'])
instance = AudioDb()
if "file" in request.FILES:
audio=request.FILES["file"]
instance.audiofile.save(audio.name, audio)
instance.save()
else:
audio=request.session["filename"]
audio_p = audio_url(audio)
instance.audiofile=audio_p
text=audio_to_text(audio)
audio_name , text_name , image_name = filename(audio)
text_path , text_p = text_url(text, text_name)
image_path , image_p = image_url(text,image_name)
instance.textfile=text_p
instance.imagefile=image_p
instance.content=text
instance.save()
audio=None
data = {}
data['text']=text
data['image']=image_name
# data['image'] =instance.imagefile.url
json_data = json.dumps(data)
return HttpResponse(json_data, content_type="application/json")
else:
form=UploadAudio()
context={
"form":form,
}
return render(request,'aud2gest/home.html',context)
else:
return redirect("../login")
def index(request):
if request.user.is_authenticated:
return render(request,'aud2gest/index.html',{})
else:
return redirect('../login')
@csrf_exempt
def ajax(request):
if request.user.is_authenticated:
filename="voice_"+str(randint(1000,9999))
request.session["filename"]=filename+".wav"
file_obj = request.FILES['audio'].read()
print(type(file_obj))
with default_storage.open('Z:/BTP/knk/media/aud2gest/audioFiles/'+filename+".bin", 'wb+') as destination:
destination.write(file_obj)
src = "Z:/BTP/knk/media/aud2gest/audioFiles/"+filename+".bin"
dst = "Z:/BTP/knk/media/aud2gest/audioFiles/"+filename+".wav"
sound = AudioSegment.from_file(src)
sound.export(dst, format="wav")
print('File Stored @ audio')
os.remove(src) # to delete the .bin file
return redirect("../home")
else:
return redirect('../login')
def about_project(request):
return render(request, 'aud2gest/about_project.html', {})
def about_team(request):
return render(request, 'aud2gest/about_team.html', {})
def instruction(request):
return render(request, 'aud2gest/instructions.html', {})
| UTF-8 | Python | false | false | 4,494 | py | 26 | views.py | 13 | 0.70494 | 0.695372 | 0 | 155 | 27.987097 | 107 |
cailllev/PublicCyberSec | 16,621,523,463,700 | cdaac2a6ec382fd1bc4cc91bdce452078824bc76 | 5cc8683688a3cf8aec8f61f0e01f16b840d1f1d8 | /ctf/1_HELPERS/scripts/t.py | 46126cfba2ddb7fe97d5dba8c2674163c8e282b1 | []
| no_license | https://github.com/cailllev/PublicCyberSec | 6f0104fc7738ce804c01fa66969c28a121b9f5c3 | 6752074d703b15601bdc07ae849232cc0558a63e | refs/heads/main | 2023-03-11T12:35:48.400598 | 2021-02-28T20:01:04 | 2021-02-28T20:01:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | f = open("words_en_sorted.txt", "r")
ltemp = f.readlines()
f.close()
l1 = []
for line in ltemp:
l1.append(line.split(" ")[0] + "\n")
l1 = sorted(set(l1))
print(l1[:10])
f = open("words_en_nums.txt", "r")
l2 = f.readlines()
f.close()
l2 = sorted(set(l2))
print(l2[:10])
l1.extend(l2)
l = sorted(set(l1))
f = open("words_en", "w")
f.writelines(l)
f.close() | UTF-8 | Python | false | false | 363 | py | 15 | t.py | 7 | 0.586777 | 0.539945 | 0 | 25 | 13.56 | 37 |
rhutuja3010/function | 8,246,337,210,173 | 1d64cfa6908b05da284734881f9e92f8ff95c73b | c88ba74667f5dd8094a34a3ff54ac7a0e9de59fd | /meraki Q3.py | 0c364bdec6db912f4fcfa8e0db7dbc2280f547c4 | []
| no_license | https://github.com/rhutuja3010/function | bb2795b6d2a34f6cbbf6ff0d05756920a91302bf | cba05f67669669cf999abd893c05f89f2a443210 | refs/heads/main | 2023-08-25T12:29:15.630203 | 2021-10-19T04:40:22 | 2021-10-19T04:40:22 | 408,160,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # def add_number(num1,num2):
# num1=56
# num2=12
# num3=num1+num2
# print("addition",num3)
# add_number(56,12)
# a=[10,20]
# b=a
# b+=[30,40]
# print(a)
# print(b)
number=int(input("enter the number"))
modulus=number%10
if modulus==3:
print("yes")
else:
print("false") | UTF-8 | Python | false | false | 295 | py | 30 | meraki Q3.py | 30 | 0.586441 | 0.494915 | 0 | 20 | 13.8 | 37 |
deepakkumar6/invoice-dsa | 17,617,955,859,116 | c7703de936af5e8a178dec3fed47291a03c1c5a1 | f76c13deabe0f0dada7ebac9b0ab3cec967d9558 | /dsa/invoice/admin.py | 127645376bb75380cc43764f2e91469d22463ce6 | []
| no_license | https://github.com/deepakkumar6/invoice-dsa | 4aec54d0377eaab719e62d3e85f60e1de6514c9a | dd8975e3e851b0c29000bc85ec6a631d32caed59 | refs/heads/master | 2023-07-15T13:12:08.742033 | 2021-09-07T10:40:04 | 2021-09-07T10:40:04 | 393,980,682 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Customer,Products
admin.site.register(Customer)
admin.site.register(Products)
| UTF-8 | Python | false | false | 133 | py | 24 | admin.py | 9 | 0.827068 | 0.827068 | 0 | 5 | 25.4 | 37 |
ynnsau/CS411-ezcourseatuiuc | 12,309,376,312,089 | ba97a411ef66c99c69136b50239050b60a34039f | 3f61c7d74c457f21f92c9079c19438174a1c73a9 | /rateMyProf/rateMyProf/spiders/profCourseExplorer.py | 5d121a13eb3cf77149c5e34a005163773e30271d | []
| no_license | https://github.com/ynnsau/CS411-ezcourseatuiuc | 4ad10dc71417a0f1bbca6c3d8621b217be9dbcaf | 050dd1863958efa605f42bd9229ada8d27d4ee27 | refs/heads/master | 2023-01-27T13:24:20.750406 | 2020-08-05T17:52:37 | 2020-08-05T17:52:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
import time
from rateMyProf.items import RatemyprofItem
from scrapy.spiders import XMLFeedSpider
class profCourseExplorer(XMLFeedSpider):
name = "profCourseExplorer"
allowed_domains = ["courses.illinois.edu"]
year = str(2020)
semester = "fall"
base_url = "http://courses.illinois.edu/cisapp/explorer/schedule/{year}/{semester}.xml"
url = base_url.format(year = year, semester = semester)
start_urls = [url]
itertag = 'subject'
def parse_node(self, response, node):
subjName = node.xpath(".//@id").get()
link = node.xpath(".//@href").get()
self.logger.debug(subjName)
self.logger.debug(link)
yield scrapy.Request(link, callback = self.parse_subj, meta={'subject_name': subjName})
def parse_subj(self, response):
for course in response.xpath("//course[@id]"):
courseNum = course.xpath(".//@id").get()
courseLink = course.xpath(".//@href").get()
courseTittle = course.xpath(".//text()").get()
self.logger.debug(courseNum)
self.logger.debug(courseTittle)
self.logger.debug(courseLink)
yield scrapy.Request(courseLink, callback = self.parse_course,
meta={'course_dept': response.meta.get('subject_name'),
'course_num': courseNum,
'course_tittle': courseTittle}
)
def parse_course(self, response):
# parse gened
course_gened = response.xpath('//category[@id]//@id').getall()
course_gened_str = ''
if len(course_gened) != 0:
self.logger.debug("FOUND gened<++++++++++++++++++++++++++++++++++++")
for num, gened in enumerate(course_gened):
course_gened_str += gened
if (num + 1) < len(course_gened):
course_gened_str += ', '
# parse section
for section in response.xpath("//section[@id]"):
crn = section.xpath(".//@id").get()
section_link = response.request.url[:-4] + "/" + crn + ".xml"
#section_link = section.xpath(".//@href").get()
description = response.xpath('.//description/text()').get()
course_sec_info = response.xpath('.//courseSectionInformation/text()').get()
credit_hour_str = response.xpath('.//creditHours/text()').get()
credit_hour = int(credit_hour_str[0]) # <--
self.logger.debug(str(crn))
yield scrapy.Request(section_link, callback = self.parse_section,
meta={'course_dept': response.meta.get('course_dept'),
'course_num': response.meta.get('course_num'),
'crn' : crn,
'description': description,
'credit_hour': credit_hour,
'course_sec_info': course_sec_info,
'course_tittle': response.meta.get('course_tittle'),
'course_gened': course_gened_str
}
)
def parse_section(self, response):
section_type = response.xpath("//type[@code]/text()").get()
if (section_type == None):
self.logger.debug("NO TYPE FOUND!!!!!++++++++++++++++++++++++++++")
else:
instructor_arr = response.xpath('.//instructor')
if len(instructor_arr) != 0:
for prof in instructor_arr:
item = RatemyprofItem()
item['prof_fname'] = prof.xpath('.//@firstName').get()
item['prof_lname'] = prof.xpath('.//@lastName').get()
item['section_info'] = response.meta.get('course_sec_info')
item['section_num'] = response.xpath('//sectionNumber/text()').get()
item['section_type'] = section_type;
item['course_name'] = response.meta.get('course_tittle')
item['course_dept'] = response.meta.get('course_dept')
item['course_num'] = response.meta.get('course_num')
item['crn'] = response.meta.get('crn')
item['course_semester'] = self.semester
item['course_year'] = self.year
item['course_description'] = response.meta.get('description')
item['course_gened'] = response.meta.get('course_gened')
item['course_credit_hour'] = response.meta.get('credit_hour')
item['course_explorer_link'] = response.request.url
item['course_hour_start'] = response.xpath("//start/text()").get()
item['course_hour_end'] = response.xpath("//end/text()").get()
item['course_date_start'] = response.xpath("//startDate/text()").get()
if item['course_date_start'] != None:
item['course_date_start'] = item['course_date_start'][:-1] # remove last char 'Z'
item['course_date_end'] = response.xpath("//endDate/text()").get()
if item['course_date_end'] != None:
item['course_date_end'] = item['course_date_end'][:-1] # remove last char 'Z'
item['course_dayOfWeek'] = response.xpath("//daysOfTheWeek/text()").get()
item['course_location'] = response.xpath("//buildingName/text()").get()
item['course_room_num'] = response.xpath("//roomNumber/text()").get()
item['course_term'] = response.xpath("//partOfTerm/text()").get()
yield item
else:
item = RatemyprofItem()
item['prof_fname'] = response.xpath('//@firstName').get()
item['prof_lname'] = response.xpath('//@lastName').get()
item['section_info'] = response.meta.get('course_sec_info')
item['section_num'] = response.xpath('//sectionNumber/text()').get()
item['section_type'] = section_type;
item['course_name'] = response.meta.get('course_tittle')
item['course_dept'] = response.meta.get('course_dept')
item['course_num'] = response.meta.get('course_num')
item['crn'] = response.meta.get('crn')
item['course_semester'] = self.semester
item['course_year'] = self.year
item['course_description'] = response.meta.get('description')
item['course_gened'] = response.meta.get('course_gened')
item['course_credit_hour'] = response.meta.get('credit_hour')
item['course_explorer_link'] = response.request.url
item['course_hour_start'] = response.xpath("//start/text()").get()
item['course_hour_end'] = response.xpath("//end/text()").get()
item['course_date_start'] = response.xpath("//startDate/text()").get()
if item['course_date_start'] != None:
item['course_date_start'] = item['course_date_start'][:-1] # remove last char 'Z'
item['course_date_end'] = response.xpath("//endDate/text()").get()
if item['course_date_end'] != None:
item['course_date_end'] = item['course_date_end'][:-1] # remove last char 'Z'
item['course_dayOfWeek'] = response.xpath("//daysOfTheWeek/text()").get()
item['course_location'] = response.xpath("//buildingName/text()").get()
item['course_room_num'] = response.xpath("//roomNumber/text()").get()
item['course_term'] = response.xpath("//partOfTerm/text()").get()
yield item
| UTF-8 | Python | false | false | 7,792 | py | 23 | profCourseExplorer.py | 11 | 0.522587 | 0.520919 | 0 | 146 | 52.369863 | 105 |
godfredakpan/blue | 3,324,304,723,223 | 70d4427bc5069c02b8234a09e6837485c6d30104 | e9b41aeb684e647b762595d921aaa5e11f740a51 | /backend/migrations/0001_initial.py | ab36e5e5dea9a85d9b3a62afb64f60f740840687 | []
| no_license | https://github.com/godfredakpan/blue | f7297fbd53d262700fd7b8f91d38a27cd8a4298c | b8c36dcb0fd00e825d5c6a23ddfabfab2a2de19f | refs/heads/master | 2020-06-14T14:45:25.946693 | 2019-07-19T14:53:27 | 2019-07-19T14:53:27 | 195,030,387 | 0 | 2 | null | false | 2019-07-18T18:32:21 | 2019-07-03T10:17:09 | 2019-07-18T18:30:19 | 2019-07-18T18:32:20 | 8,586 | 0 | 2 | 0 | HTML | false | false | # Generated by Django 2.2.2 on 2019-07-17 11:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('FirstName', models.CharField(max_length=38, null=True)),
('Surname', models.CharField(max_length=38, null=True)),
('Role', models.CharField(max_length=38, null=True)),
('EmailAddress', models.EmailField(max_length=254, null=True)),
('MobileNumber', models.DecimalField(decimal_places=0, max_digits=11, null=True)),
('Password', models.CharField(max_length=38, null=True)),
('active', models.CharField(default='pending', max_length=100)),
('ConfirmPassword', models.CharField(max_length=38, null=True)),
('Created', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='personalinfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('MiddleName', models.CharField(max_length=32)),
('MobileNumber2', models.IntegerField(null=True)),
('DateOfBirth', models.DateField(null=True)),
('MaritalStatus', models.CharField(max_length=50)),
('PlaceOfBirth', models.CharField(max_length=100)),
('NumberOfDependent', models.IntegerField(null=True)),
('DateAtAddress', models.DateField(null=True)),
('HomeAddress', models.TextField(null=True)),
('email', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='backend.user')),
],
),
migrations.CreateModel(
name='bvn_details',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bvn', models.IntegerField(null=True)),
('first_name', models.CharField(max_length=38, null=True)),
('last_name', models.CharField(max_length=38, null=True)),
('middle_name', models.CharField(max_length=38, null=True)),
('date_of_birth', models.DateField(null=True)),
('phone_number', models.IntegerField(null=True)),
('registration_date', models.DateField(null=True)),
('enrollment_bank', models.IntegerField(null=True)),
('enrollment_branch', models.CharField(max_length=38, null=True)),
('email', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='backend.user')),
],
),
]
| UTF-8 | Python | false | false | 2,991 | py | 44 | 0001_initial.py | 21 | 0.571046 | 0.55433 | 0 | 61 | 48.032787 | 114 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.