repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sapirshuker/Digital-Humanities-project
| 2,869,038,190,325 |
27c4da707273f88d4c5d728ddb8f71d88945da27
|
869cfc5f1033410147bbca6e606e2ddf7a1506b3
|
/code/Refine.py
|
43c943ca7e95ca225586e1605b8fba0e1206a35a
|
[] |
no_license
|
https://github.com/sapirshuker/Digital-Humanities-project
|
23986bbc2ecc47a8655f14fcd296e822125d3e3c
|
1e6138c50d377ccf6d68b0e4156c20cb1f798bf6
|
refs/heads/main
| 2023-03-12T05:40:15.487844 | 2021-03-09T15:14:52 | 2021-03-09T15:14:52 | 343,729,676 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 14:38:39 2021
@author: sapirshuker
"""
import urllib, os.path, time, json
import urllib.parse as urlparse
import urllib.request
import requests
from refineProject import RefineProject
class Refine:
def __init__(self, server='http://127.0.0.1:3333' , options = None):
self.server = server[0,-1] if server.endswith('/') else server
def get_number_of_column(self,project_id):
data = {'project' : project_id}
ontorefine_response = requests.get(self.server + '/command/core/get-models?', params=data)
response_json = ontorefine_response.json()
number_of_columns=len(response_json['columnModel']['columns'])
return number_of_columns
def get_project_id_from_url(self,response,char_to_find):
index = 1 + response.url.rfind(char_to_find)
return response.url[index:]
def get_token(self):
ontorefine_response = self.get_request('/command/core/get-csrf-token')
data = ontorefine_response.json()
token = data['token']
params = {
'csrf_token': token
}
return params
def get_request(self, command ,data = None,params = None):
ontorefine_response = None
if ((params==None) & (data==None)):
ontorefine_response = requests.get(self.server + command)
elif (params==None):
ontorefine_response = requests.get(self.server + command,data=data)
elif (data==None):
ontorefine_response = requests.get(self.server + command,params=params)
else:
ontorefine_response = requests.get(self.server + command,data=data,params=params)
return ontorefine_response
def post_request(self, command ,data = None,params = None, token=False):
ontorefine_response = None
if(not token & (params==None)):
params = self.get_token()
if ((params==None) & (data==None)):
ontorefine_response = requests.post(self.server + command)
elif (params==None):
ontorefine_response = requests.post(self.server + command,data=data)
elif (data==None):
ontorefine_response = requests.post(self.server + command,params=params)
else:
ontorefine_response = requests.post(self.server + command,data=data,params=params)
return ontorefine_response
def new_project(self, file_path, options=None):
file_name = os.path.split(file_path)[-1]
project_name = options['project_name'] if options != None and 'project_name' in options else file_name
data = {
'project-file' : {
'fd' : open(file_path, "rb"),
'filename' : file_name
},
'project-name' : project_name,
"format": "text/line-based" ,
}
ontorefine_file = {'project-file': open(file_path, "rb")}
params = self.get_token()
ontorefine_response = requests.post(
self.server + '/command/core/create-project-from-upload', data=data, files=ontorefine_file , params= params)
project_id = self.get_project_id_from_url(ontorefine_response,r'=')
column_number = self.get_number_of_column(project_id)
return RefineProject(self.server, project_id, project_name, column_number, column_number)
|
UTF-8
|
Python
| false | false | 3,205 |
py
| 10 |
Refine.py
| 5 | 0.648986 | 0.640562 | 0 | 80 | 38.0625 | 112 |
UWPCE-PythonCert-ClassRepos/Self_Paced-Online
| 7,456,063,233,743 |
d068c2186e78ac02374d3ab3ba3884754c0a39a2
|
d4ea02450749cb8db5d8d557a4c2616308b06a45
|
/students/Josh_HOff/lesson08/test_circle_class.py
|
9a9b7068f4f73a4003fcca963cb62e0dc6d4f002
|
[] |
no_license
|
https://github.com/UWPCE-PythonCert-ClassRepos/Self_Paced-Online
|
75421a5bdd6233379443fc310da866ebfcd049fe
|
e298b1151dab639659d8dfa56f47bcb43dd3438f
|
refs/heads/master
| 2021-06-16T15:41:07.312247 | 2019-07-17T16:02:47 | 2019-07-17T16:02:47 | 115,212,391 | 13 | 160 | null | false | 2019-11-13T16:07:35 | 2017-12-23T17:52:41 | 2019-11-13T16:05:43 | 2019-11-06T00:23:13 | 19,146 | 5 | 145 | 0 |
Python
| false | false |
import io
import pytest
from circle_class import *
def test_radius():
c = Circle(40)
assert c.radius == 40
def test_diameter():
c = Circle(80)
assert c.diameter == 160
def test_edit_radius():
c = Circle(30)
assert c.radius == 30
assert c.diameter == 60
c.radius = 40
assert c.radius == 40
assert c.diameter == 80
def test_edit_diameter():
c = Circle(50)
assert c.radius == 50
assert c.diameter == 100
c.diameter = 300
assert c.radius == 150
assert c.diameter == 300
def test_area():
c = Circle(50)
assert round(c.area, 4) == 7853.9816
def test_editing_area():
c = Circle(50)
c.radius = 40
print(round(c.area, 4))
assert round(c.area, 4) == 5026.5482
c.diameter = 150
print(round(c.area, 4))
assert round(c.area, 4) == 17671.4587
with pytest.raises(AttributeError):
c.area = 50
def test_from_diameter_method():
c = Circle.from_diameter(80)
assert c.radius == 40
assert c.diameter == 80
assert round(c.area, 4) == 5026.5482
def test_addition_method():
c1 = Circle(2)
c2 = Circle(4)
assert c1 + c2 == Circle(6)
def test_addition_with_from_diameter_method():
c1 = Circle.from_diameter(20)
c2 = Circle(15)
assert c1 + c2 == Circle(25)
def test_multiplication():
c1 = Circle(2)
c2 = Circle(4)
assert c2 * 3 == Circle(12)
assert 3 * c2 == Circle(12)
def test_less_than():
c1 = Circle(2)
c2 = Circle(4)
assert c1 < c2
def test_greater_than():
c1 = Circle(2)
c2 = Circle(4)
assert c2 > c1
def test_equals():
c1 = Circle(10)
c2 = Circle(10)
assert c1 == c2
def test_less_than_equals():
c1 = Circle(5)
c2 = Circle(8)
assert c1 <= c2
c2 = Circle(5)
assert c1 <= c2
def test_greater_than_equals():
c1 = Circle(5)
c2 = Circle(8)
assert c2 >= c1
c2 = Circle(5)
assert c2 >= c1
def test_not_equals():
c1 = Circle(10)
c2 = Circle(11)
assert c1 != c2
|
UTF-8
|
Python
| false | false | 2,125 |
py
| 2,428 |
test_circle_class.py
| 2,245 | 0.552941 | 0.472941 | 0 | 115 | 17.391304 | 46 |
filiplindau/Raytracer
| 3,023,656,981,072 |
b91abc310ad1a0939f9398759ce68d91371a0118
|
498c53e897242f40e34e048faffebf4442e15f0e
|
/src/Raytracer/OpticalElement.py
|
f46a83afe47fb4377977410453aa0abaadddfe7d
|
[] |
no_license
|
https://github.com/filiplindau/Raytracer
|
aabc9a591749277d456cdde11aadd8f0c9f81a84
|
a1bb0e58cefdacfb4905e78420dd037444ab2137
|
refs/heads/master
| 2021-01-10T11:31:28.599484 | 2020-05-04T20:08:45 | 2020-05-04T20:08:45 | 43,961,381 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Created on 9 Oct 2015
@author: Filip Lindau
"""
import numpy as np
import copy
import Raytracer.OpticalSurface as os
import Raytracer.OpticalMaterial as om
import Raytracer.OpticalAperture as oa
import Raytracer.Ray as rr
import logging
air = om.OpticalMaterial('air', [0.0002433468, 2.927321e-5], [0.00420135, 0.0174331])
ml = om.MaterialLibrary()
class OpticalElement(object):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]), n=1.0,
thickness=1.0e-3, material=air, size=12.7e-3, name="Element"):
"""
:param x: Position vector in global coordinates
:param xn: Element normal vector
:param xt: Element tangent vector
:param n: Refractive index if no material is specified
:param thickness: Element thickness in meters
:param material: Material as a OpticalMaterial instance
:param size: Element aperture size in meters
:param name: Element name
:returns:
"""
self.n = n
self.thickness = thickness
self.material = copy.copy(material)
self.size = size
self.name = name
self.logger = logging.getLogger("Element.")
self.logger.setLevel(logging.INFO)
self.xM = None # Transform matrix for position
self.xMT = None # Transposed position transform matrix
self.xpM = None # Transform matrix for angle
self.surfaces = list()
if x.shape[0] == 3:
self.x = np.hstack((x, 1))
else:
self.x = x
if xn.shape[0] == 3:
self.xn = np.hstack((xn, 0))
else:
self.xn = xn
if xt.shape[0] == 3:
self.xt = np.hstack((xt, 0))
else:
self.xt = xt
self.generate_transform_matrix()
self.init_surfaces()
def set_position(self, new_pos):
if new_pos.shape[0] == 3:
self.x = np.hstack((new_pos, 1))
else:
self.x = new_pos
self.generate_transform_matrix()
def generate_transform_matrix(self):
self.xM = np.array([[1.0, 0.0, 0.0, -self.x[0]],
[0.0, 1.0, 0.0, -self.x[1]],
[0.0, 0.0, 1.0, -self.x[2]],
[0.0, 0.0, 0.0, 1.0]])
self.xMT = np.array([[1.0, 0.0, 0.0, self.x[0]],
[0.0, 1.0, 0.0, self.x[1]],
[0.0, 0.0, 1.0, self.x[2]],
[0.0, 0.0, 0.0, 1.0]])
# Third coordinate axis by cross product:
xt2 = np.hstack((np.cross(self.xt[0:3], self.xn[0:3]), 0))
self.xpM = np.transpose(np.vstack((xt2, self.xt, self.xn, np.array([0, 0, 0, 1]))))
def set_rotation(self, theta, phi):
"""
Set element rotation to theta, phi angles
:param theta: Rotation along theta axis
:param phi: Rotation along phi axis
"""
thM = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, +np.cos(theta), np.sin(theta), 0.0],
[0.0, -np.sin(theta), np.cos(theta), 0.0],
[0.0, 0.0, 0.0, 1.0]])
phM = np.array([[+np.cos(phi), 0.0, np.sin(phi), 0.0],
[0.0, 1.0, 0.0, 0.0],
[-np.sin(phi), 0.0, np.cos(phi), 0.0],
[0.0, 0.0, 0.0, 1.0]])
self.xpM = np.dot(thM, phM)
self.xn = np.dot(self.xpM, self.xn)
self.xt = np.dot(self.xpM, self.xt)
def rotate_element(self, theta, phi):
"""
Rotate the element relative to the current rotation
:param theta: Rotation along theta axis
:param phi: Rotatino along phi axis
"""
thM = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, +np.cos(theta), np.sin(theta), 0.0],
[0.0, -np.sin(theta), np.cos(theta), 0.0],
[0.0, 0.0, 0.0, 1.0]])
phM = np.array([[+np.cos(phi), 0.0, np.sin(phi), 0.0],
[0.0, 1.0, 0.0, 0.0],
[-np.sin(phi), 0.0, np.cos(phi), 0.0],
[0.0, 0.0, 0.0, 1.0]])
self.xpM = np.dot(self.xpM, np.dot(thM, phM))
self.xn = np.dot(self.xpM, self.xn)
self.xt = np.dot(self.xpM, self.xt)
# for s in self.surfaces:
# s.rotateExternal(theta, phi)
def flip_element(self):
nList = [surf.n for surf in self.surfaces]
nList.reverse()
matList = [surf.material for surf in self.surfaces]
matList.reverse()
for (ind, surf) in enumerate(self.surfaces):
surf.n = nList[ind]
surf.material = matList[ind]
self.surfaces.reverse()
self.rotate_element(np.pi, 0)
def reverse_element(self):
nList = [surf.n for surf in self.surfaces]
nList.reverse()
matList = [surf.material for surf in self.surfaces]
matList.reverse()
for (ind, surf) in enumerate(self.surfaces):
surf.n = nList[ind]
surf.material = matList[ind]
self.surfaces.reverse()
def set_rotation_matrix(self, xpM):
self.xn = np.dot(xpM, self.xn)
self.xt = np.dot(xpM, self.xt)
for s in self.surfaces:
s.set_rotation_external_matrix(xpM)
def get_edges(self):
edges = []
for surf in self.surfaces:
xe = surf.get_edges()
edges.append(np.transpose(np.dot(self.xMT, np.dot(np.transpose(self.xpM), np.transpose(xe)))))
return edges
def init_surfaces(self):
ap = oa.CircularAperture(self.size)
s1 = os.Surface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n,
material=self.material, aperture=ap)
s2 = os.Surface(x=np.array([0, 0, self.thickness, 1]), xn=self.xn, xt=self.xt, n=self.n,
material=air, aperture=ap)
self.surfaces = [s1, s2]
def propagate_rays(self, rays):
""" Propagate rays using the new rays matrix
The rays are transformed to element local coordinates before
being sent to the surfaces.
"""
# raysEl contains the rays transformed to element local coordinates
raysEl = rays.copy()
# raysGl contains the returned coordinates from a surface transformed back to global coordinates
raysGl = rays.copy()
raysGlList = []
raysEl[:, 0, :] = np.transpose(np.dot(self.xpM, np.dot(self.xM, np.transpose(rays[:, 0, :]))))
raysEl[:, 1, :] = np.transpose(np.dot(self.xpM, np.transpose(rays[:, 1, :])))
# print "raysEl in: ", raysEl[0, 1, :]
# print "raysGl in: ", raysGl[0, 1, :]
self.logger.info("{0}".format(self))
i = 0
for surf in self.surfaces:
i += 1
self.logger.debug("\n\n============================================\nSurface {0}/{1}\n"
"============================================\n\n".format(i, len(self.surfaces)))
rays_inp_l = raysEl.copy()
raysEl = surf.find_intersection_rays(raysEl)
# self.logger.info("{0} raysEl: {1}".format(self, raysEl[0, 1, :]))
raysGlNew = raysEl.copy()
raysGlNew[:, 0, :] = np.transpose(np.dot(self.xMT, np.dot(np.transpose(self.xpM),
np.transpose(raysEl[:, 0, :]))))
raysGlNew[:, 1, :] = np.transpose(np.dot(np.transpose(self.xpM), np.transpose(raysEl[:, 1, :])))
raysGlList.append(raysGlNew)
self.logger.debug("{0} Ray data\n\nGlobal input rays:\n{1}\n\nLocal input rays:\n{2}\n\n"
"Local output rays:\n{3}\n\n"
"Global output rays:\n{4}".format(self, rays, rays_inp_l, raysEl, raysGlNew))
return raysGlList
def get_rays_footprint(self, rays, surface_number):
raysT = np.transpose(np.dot(self.xpM, np.dot(self.xM, np.transpose(rays))))
return self.surfaces[surface_number].get_rays_footprint(raysT)
def propagate_rays_old(self, rays):
""" Propagate rays using the old rays structure (class with lists)
"""
for ray in rays:
self.logger.debug("\n=+=+=+=+ Ray =+=+=+=+=+=+=")
for surf in self.surfaces:
self.logger.debug("--------------------------------")
surf.find_intersection_ray(ray, self.xM, self.xMT, self.xpM)
def __repr__(self):
return "Element {0}".format(self.name)
class PrismElement(OpticalElement):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]), n=1.0,
apex_angle=60 * np.pi / 180, side_length=25e-3, material=air, name="Prism"):
self.apex_angle = apex_angle
self.side_length = side_length
OpticalElement.__init__(self, x=x, xn=xn, xt=xt, n=n, material=material, name=name)
def init_surfaces(self):
ap = oa.RectangularAperture([self.side_length, self.side_length])
# s1 = os.Surface(x=self.x, xn=-self.xn, xt=self.xt, n=self.n, material=self.material, aperture=ap)
s1 = os.Surface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n,
material=self.material, aperture=ap)
s1.set_rotation_internal(0, self.apex_angle / 2)
s1_pos = np.array([0, 0, -np.sin(self.apex_angle / 2) * self.side_length / 2, 1])
s1.set_position(s1_pos)
# s2 = os.Surface(x=self.x, xn=self.xn, xt=self.xt, n=1.0, material=air, aperture=ap)
s2 = os.Surface(x=np.array([0, 0, 0, 1]), xn=self.xn, xt=self.xt, n=1.0, material=air, aperture=ap)
s2.set_rotation_internal(0, -self.apex_angle / 2)
s2_pos = np.array([0, 0, np.sin(self.apex_angle / 2) * self.side_length / 2, 1])
s2.set_position(s2_pos)
self.surfaces = [s1, s2]
class PCXElement(OpticalElement):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]), n=1.0, r=1.0,
thickness=5e-3, material=air, size=12.7e-3, name="PCX lens"):
self.r1 = r
self.size = size
OpticalElement.__init__(self, x=x, xn=xn, xt=xt, n=n, thickness=thickness, material=material, name=name)
def init_surfaces(self):
ap = oa.CircularAperture(self.size)
# s1 = os.SphericalSurface(x=self.x, xn=-self.xn, xt=self.xt, n=self.n, r=self.r1,
# material=self.material, aperture=ap)
s1 = os.SphericalSurface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n, r=self.r1,
material=self.material, aperture=ap)
# s2 = os.Surface(x=self.x, xn=self.xn, xt=self.xt, n=1.0, material=air, aperture=ap)
s2 = os.Surface(x=np.array([0, 0, self.thickness, 1]), xn=self.xn, xt=self.xt, n=1.0,
material=air, aperture=ap)
# s2.setPosition(self.x+np.array([0,0,self.thickness,0]))
self.surfaces = [s1, s2]
class ScreenElement(OpticalElement):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]), material=air,
name="Screen"):
super(ScreenElement, self).__init__(x=x, xn=xn, xt=xt, n=1.0, material=material, thickness=0.0, name=name)
def init_surfaces(self):
ap = oa.InifiniteAperture()
self.surfaces = [os.Surface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n,
material=self.material, aperture=ap)]
class GratingElement(OpticalElement):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]), n=1.0,
thickness=2e-3, grating_period=1.0/1800e3, m=1, side_length=25e-3, material=ml.get_material("fs"),
name="Grating"):
self.grating_period = grating_period
self.side_length = side_length
self.thickness = thickness
self.m = m
OpticalElement.__init__(self, x=x, xn=xn, xt=xt, n=n, material=material, name=name)
def init_surfaces(self):
self.logger.info("{0} Init grating surfaces".format(self))
ap = oa.RectangularAperture([self.side_length, self.side_length])
# s1 = os.Surface(x=self.x, xn=-self.xn, xt=self.xt, n=self.n, material=self.material, aperture=ap)
s1 = os.Surface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n,
material=self.material, aperture=ap)
s1.set_rotation_internal(0, 0)
# s2 = os.Surface(x=self.x, xn=self.xn, xt=self.xt, n=1.0, material=air, aperture=ap)
s2 = os.Surface(x=np.array([0, 0, self.thickness, 1]), xn=self.xn, xt=self.xt, n=1.0, material=air,
aperture=ap, grating_period=self.grating_period, m=self.m)
s2.set_rotation_internal(0, 0)
self.surfaces = [s1, s2]
class MirrorElement(OpticalElement):
def __init__(self, x=np.array([0, 0, 0, 1]), xn=np.array([0, 0, 1, 0]), xt=np.array([0, 1, 0, 0]),
n=1.0, material=air, thickness=5e-5, size=25.4e-3):
self.size = size
self.thickness = thickness
super(MirrorElement, self).__init__(x=x, xn=xn, xt=xt, n=1.0, material=material, thickness=0.0)
self.material.reflector = True
def init_surfaces(self):
ap = oa.CircularAperture(size=self.size)
s1 = os.Surface(x=np.array([0, 0, 0, 1]), xn=-self.xn, xt=self.xt, n=self.n,
material=self.material, aperture=ap)
s1.set_rotation_internal(0, 0)
# s2 = os.Surface(x=self.x, xn=self.xn, xt=self.xt, n=1.0, material=air, aperture=ap)
s2 = os.Surface(x=np.array([0, 0, self.thickness, 1]), xn=self.xn, xt=self.xt, n=1.0,
material=air, aperture=ap, )
s2.set_rotation_internal(0, 0)
self.surfaces = [s1]
|
UTF-8
|
Python
| false | false | 14,253 |
py
| 17 |
OpticalElement.py
| 15 | 0.52831 | 0.493089 | 0 | 322 | 43.254658 | 116 |
Gavin-McQuaid/multiples_of_3_and_5
| 3,040,836,876,375 |
b937dd5808df6820d0267be443fcd5d37142d399
|
78008b3e8053fa3dd9f03c3df2ed66333c0ad97b
|
/program.py
|
83d93a6bd222a8b4585f81a80c009d9eb3b04f0a
|
[] |
no_license
|
https://github.com/Gavin-McQuaid/multiples_of_3_and_5
|
b3d5b442959f8aa1f97735557bada9d8bf4f42db
|
51147991c1ba1b963835f0908c51d5bab0dfb72d
|
refs/heads/master
| 2021-01-18T23:15:41.591271 | 2016-11-02T20:27:25 | 2016-11-02T20:27:25 | 72,669,437 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
i = 1
total = 0
while i < 1000:
if i % 3 == 0 and i % 5 == 0:
total += i
elif i % 3 == 0:
total += i
elif i % 5 == 0:
total += i
i += 1
print total
|
UTF-8
|
Python
| false | false | 159 |
py
| 1 |
program.py
| 1 | 0.459119 | 0.36478 | 0 | 12 | 12.25 | 30 |
aleksandrawy/first_tests
| 867,583,425,928 |
cd12000470370824444751b97b7d324905fde9a6
|
80ece3e083edf07febf72d9193363fc299e21c5c
|
/eye_game.py
|
e3cdb2e3d619e2a7021e5d1d8f4274db59406711
|
[] |
no_license
|
https://github.com/aleksandrawy/first_tests
|
213cc0c5a0af2553afcf4fafc734f7e91d9c9b6c
|
312cf675f4fdf674ac5ddf099622d6ae7780599a
|
refs/heads/master
| 2020-12-02T16:17:07.563412 | 2017-07-07T10:50:55 | 2017-07-07T10:50:55 | 96,529,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def load_eye_game(driver):
driver.get("https://www.igame.com/eye-test/")
driver.switch_to.frame(driver.find_element_by_tag_name("iframe"))
def element_click(driver):
el = driver.find_element_by_class_name("thechosenone")
print(el)
el.click()
|
UTF-8
|
Python
| false | false | 266 |
py
| 3 |
eye_game.py
| 3 | 0.676692 | 0.676692 | 0 | 9 | 28.333333 | 69 |
domfarolino/deep-learning
| 13,426,067,794,194 |
d0bfa4ab5f3136dd99d5468d5288f17d316fc854
|
b9af51231301cb94dd2d47cd2736734ef6ee4906
|
/Assignment6/DL6G.py
|
c4053a934b7298c9cf5b568b4307b071ee41d4a9
|
[] |
no_license
|
https://github.com/domfarolino/deep-learning
|
d7618c14672be80a4562f2d5c64b27baa0398419
|
d691d2e57260a16e235420f061cfd191a8e081cf
|
refs/heads/master
| 2020-04-18T01:26:36.272257 | 2019-04-26T00:05:58 | 2019-04-26T00:06:01 | 167,118,866 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# DL6G.py CS5173/6073 cheng 2019
# from tensorflow.org/tutorials/keras
# three-layer neural network for fashion_mnist data classification
# Usage: python DL6G.py
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
graph = tf.get_default_graph()
operations = graph.get_operations()
print(operations)
print(len(operations))
print(operations[9])
|
UTF-8
|
Python
| false | false | 911 |
py
| 28 |
DL6G.py
| 27 | 0.686059 | 0.64764 | 0 | 30 | 28.3 | 66 |
TrishaAndalene/mySchoolProject
| 4,535,485,467,472 |
ac7af454058ad7fc5561843396f978edb7d04da1
|
91e01736514cd2836ea71fbdf8f1e80b0e544d89
|
/Learning step/Project/programlingkaran.py
|
f9be58dcf8766b5e9f53915c22fcbd69fc504e78
|
[] |
no_license
|
https://github.com/TrishaAndalene/mySchoolProject
|
3c1fe55b070bfbf1593077054e555218fa2a4bbd
|
ab8e2e680444eadacb9d9e58d1ec7d166963ef18
|
refs/heads/master
| 2022-12-01T23:08:01.336556 | 2020-08-15T03:55:24 | 2020-08-15T03:55:24 | 274,874,174 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#program Lingkaran
data = []
def luas_lingkaran(r):
result = 3.14*r*r
return result
def luas_segitiga(a,t):
result = (a*t)/2
return result
def ambil_data(namafile):
listData = []
"""
with open(namafile) as f:
lines = f.readlines()
for line in lines:
number = int(line.rstrip())
listData.append(number)
"""
with open(namafile) as f:
for line in f:
listData.append(int(line.rstrip()))
return listData
#print(listData)
data = ambil_data("data.txt")
print(luas_segitiga(data[0], data[1]))
#print("Jarijari = ", jariJari)
#print("Luasnya = ", luas_lingkaran(jariJari))
|
UTF-8
|
Python
| false | false | 597 |
py
| 94 |
programlingkaran.py
| 82 | 0.664992 | 0.654941 | 0 | 36 | 15.611111 | 46 |
spotify/cassandra-medusa
| 11,132,555,274,856 |
b790f7a4add8a0be072e900f400f1d6f7418fff4
|
7277aa3c7b7c14f1522080be189113ef412cdfc0
|
/medusa/restore_cluster.py
|
8349799febd736bd86c8077bce364cd2c82bb408
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/spotify/cassandra-medusa
|
bf37b18bb443a520edca077102cd500880916bcf
|
d286ec420bde09f05d4f0c14b2968ff482067604
|
refs/heads/master
| 2023-08-25T05:03:42.519918 | 2019-11-12T14:56:14 | 2019-11-13T16:18:19 | 210,613,958 | 29 | 11 |
Apache-2.0
| false | 2022-03-29T21:56:46 | 2019-09-24T13:46:53 | 2021-11-24T05:27:14 | 2022-03-29T21:56:46 | 108 | 25 | 4 | 6 |
Python
| false | false |
# -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import sys
import time
import uuid
import datetime
import traceback
import paramiko
import subprocess
import os
from medusa.monitoring import Monitoring
from medusa.cassandra_utils import CqlSessionProvider
from medusa.schema import parse_schema
from medusa.storage import Storage
from medusa.verify_restore import verify_restore
Remote = collections.namedtuple('Remote', ['target', 'connect_args', 'client', 'channel', 'stdout', 'stderr'])
SSH_ADD_KEYS_CMD = 'ssh-add'
SSH_AGENT_CREATE_CMD = 'ssh-agent'
SSH_AGENT_KILL_CMD = 'ssh-agent -k'
SSH_AUTH_SOCK_ENVVAR = 'SSH_AUTH_SOCK'
SSH_AGENT_PID_ENVVAR = 'SSH_AGENT_PID'
def orchestrate(config, backup_name, seed_target, temp_dir, host_list, keep_auth, bypass_checks,
verify, keyspaces, tables, use_sstableloader=False):
monitoring = Monitoring(config=config.monitoring)
try:
restore_start_time = datetime.datetime.now()
if seed_target is not None:
keep_auth = False
if seed_target is None and host_list is None:
err_msg = 'You must either provide a seed target or a list of host'
logging.error(err_msg)
raise Exception(err_msg)
if seed_target is not None and host_list is not None:
err_msg = 'You must either provide a seed target or a list of host, not both'
logging.error(err_msg)
raise Exception(err_msg)
if not temp_dir.is_dir():
err_msg = '{} is not a directory'.format(temp_dir)
logging.error(err_msg)
raise Exception(err_msg)
if keep_auth:
logging.info('system_auth keyspace will be left untouched on the target nodes')
else:
logging.info('system_auth keyspace will be overwritten with the backup on target nodes')
storage = Storage(config=config.storage)
try:
cluster_backup = storage.get_cluster_backup(backup_name)
except KeyError:
err_msg = 'No such backup --> {}'.format(backup_name)
logging.error(err_msg)
raise Exception(err_msg)
restore = RestoreJob(cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify,
keyspaces, tables, bypass_checks, use_sstableloader)
restore.execute()
restore_end_time = datetime.datetime.now()
restore_duration = restore_end_time - restore_start_time
logging.debug('Emitting metrics')
logging.info('Restore duration: {}'.format(restore_duration.seconds))
tags = ['medusa-cluster-restore', 'restore-duration', backup_name]
monitoring.send(tags, restore_duration.seconds)
tags = ['medusa-cluster-restore', 'restore-error', backup_name]
monitoring.send(tags, 0)
logging.debug('Done emitting metrics')
logging.info('Successfully restored the cluster')
except Exception as e:
tags = ['medusa-cluster-restore', 'restore-error', backup_name]
monitoring.send(tags, 1)
logging.error('This error happened during the cluster restore: {}'.format(str(e)))
traceback.print_exc()
sys.exit(1)
def expand_repeatable_option(option, values):
return ' '.join(['--{} {}'.format(option, value) for value in values])
class RestoreJob(object):
def __init__(self, cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify,
keyspaces={}, tables={}, bypass_checks=False, use_sstableloader=False):
self.id = uuid.uuid4()
self.ringmap = None
self.cluster_backup = cluster_backup
self.session_provider = None
self.config = config
self.host_list = host_list
self.seed_target = seed_target
self.keep_auth = keep_auth
self.verify = verify
self.in_place = None
self.temp_dir = temp_dir # temporary files
self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id)
self.host_map = {} # Map of backup host/target host for the restore process
self.keyspaces = keyspaces
self.tables = tables
self.bypass_checks = bypass_checks
self._ssh_agent_started = False
self.use_sstableloader = use_sstableloader
def execute(self):
logging.info('Ensuring the backup is found and is complete')
if not self.cluster_backup.is_complete():
raise Exception('Backup is not complete')
# CASE 1 : We're restoring in place and a seed target has been provided
if self.seed_target is not None:
logging.info('Restore will happen "In-Place", no new hardware is involved')
self.in_place = True
self.session_provider = CqlSessionProvider([self.seed_target],
username=self.config.cassandra.cql_username,
password=self.config.cassandra.cql_password)
with self.session_provider.new_session() as session:
self._populate_ringmap(self.cluster_backup.tokenmap, session.tokenmap())
# CASE 2 : We're restoring out of place, i.e. doing a restore test
if self.host_list is not None:
logging.info('Restore will happen on new hardware')
self.in_place = False
self._populate_hostmap()
logging.info('Starting Restore on all the nodes in this list: {}'.format(self.host_list))
self._restore_data()
if self._ssh_agent_started is True:
self.ssh_cleanup()
def _validate_ringmap(self, tokenmap, target_tokenmap):
for host, ring_item in target_tokenmap.items():
if not ring_item.get('is_up'):
raise Exception('Target {host} is not up!'.format(host=host))
if len(target_tokenmap) != len(tokenmap):
return False
return True
def _populate_ringmap(self, tokenmap, target_tokenmap):
def _tokens_from_ringitem(ringitem):
return ','.join(map(str, ringitem['tokens']))
def _token_counts_per_host(tokenmap):
for host, ringitem in tokenmap.items():
yield len(ringitem['tokens'])
def _hosts_from_tokenmap(tokenmap):
hosts = set()
for host, ringitem in tokenmap.items():
hosts.add(host)
return hosts
def _chunk(my_list, nb_chunks):
groups = []
for i in range(nb_chunks):
groups.append([])
for i in range(len(my_list)):
groups[i % nb_chunks].append(my_list[i])
return groups
topology_matches = self._validate_ringmap(tokenmap, target_tokenmap)
if topology_matches:
target_tokens = {_tokens_from_ringitem(ringitem): host for host, ringitem in target_tokenmap.items()}
backup_tokens = {_tokens_from_ringitem(ringitem): host for host, ringitem in tokenmap.items()}
target_tokens_per_host = set(_token_counts_per_host(tokenmap))
backup_tokens_per_host = set(_token_counts_per_host(target_tokenmap))
# we must have the same number of tokens per host in both vnode and normal clusters
if target_tokens_per_host != backup_tokens_per_host:
logging.info('Source/target rings have different number of tokens per node: {}/{}'.format(
backup_tokens_per_host,
target_tokens_per_host
))
topology_matches = False
# if not using vnodes, the tokens must match exactly
if len(backup_tokens_per_host) == 1 and target_tokens.keys() != backup_tokens.keys():
extras = target_tokens.keys() ^ backup_tokens.keys()
logging.info('Tokenmap is differently distributed. Extra items: {}'.format(extras))
topology_matches = False
if topology_matches:
# We can associate each restore node with exactly one backup node
ringmap = collections.defaultdict(list)
for ring in backup_tokens, target_tokens:
for token, host in ring.items():
ringmap[token].append(host)
self.ringmap = ringmap
for token, hosts in ringmap.items():
self.host_map[hosts[1]] = {'source': [hosts[0]], 'seed': False}
else:
# Topologies are different between backup and restore clusters. Using the sstableloader for restore.
self.use_sstableloader = True
backup_hosts = _hosts_from_tokenmap(tokenmap)
restore_hosts = list(_hosts_from_tokenmap(target_tokenmap))
if len(backup_hosts) >= len(restore_hosts):
grouped_backups = _chunk(list(backup_hosts), len(restore_hosts))
else:
grouped_backups = _chunk(list(backup_hosts), len(backup_hosts))
for i in range(min([len(grouped_backups), len(restore_hosts)])):
# associate one restore host with several backups as we don't have the same number of nodes.
self.host_map[restore_hosts[i]] = {'source': grouped_backups[i], 'seed': False}
def _populate_hostmap(self):
with open(self.host_list, 'r') as f:
for line in f.readlines():
seed, target, source = line.replace('\n', '').split(self.config.storage.host_file_separator)
# in python, bool('False') evaluates to True. Need to test the membership as below
self.host_map[target.strip()] = {'source': [source.strip()], 'seed': seed in ['True']}
def _restore_data(self):
# create workdir on each target host
# Later: distribute a credential
# construct command for each target host
# invoke `nohup medusa-wrapper #{command}` on each target host
# wait for exit on each
logging.info('Starting cluster restore...')
logging.info('Working directory for this execution: {}'.format(self.work_dir))
for target, sources in self.host_map.items():
logging.info('About to restore on {} using {} as backup source'.format(target, sources))
logging.info('This will delete all data on the target nodes and replace it with backup {}.'
.format(self.cluster_backup.name))
proceed = None
while (proceed != 'Y' and proceed != 'n') and not self.bypass_checks:
proceed = input('Are you sure you want to proceed? (Y/n)')
if proceed == 'n':
err_msg = 'Restore manually cancelled'
logging.error(err_msg)
raise Exception(err_msg)
if self.use_sstableloader is False:
# stop all target nodes
stop_remotes = []
logging.info('Stopping Cassandra on all nodes')
for target, source in [(t, s['source']) for t, s in self.host_map.items()]:
client, connect_args = self._connect(target)
if self.check_cassandra_running(target, client, connect_args):
logging.info('Cassandra is running on {}. Stopping it...'.format(target))
command = 'sh -c "{}"'.format(self.config.cassandra.stop_cmd)
stop_remotes.append(self._run(target, client, connect_args, command))
else:
logging.info('Cassandra is not running on {}.'.format(target))
# wait for all nodes to stop
logging.info('Waiting for all nodes to stop...')
finished, broken = self._wait_for(stop_remotes)
if len(broken) > 0:
err_msg = 'Some Cassandras failed to stop. Exiting'
logging.error(err_msg)
raise Exception(err_msg)
else:
# we're using the sstableloader, which will require to (re)create the schema and empty the tables
logging.info("Restoring schema on the target cluster")
self._restore_schema()
# work out which nodes are seeds in the target cluster
target_seeds = [t for t, s in self.host_map.items() if s['seed']]
# trigger restores everywhere at once
# pass in seed info so that non-seeds can wait for seeds before starting
# seeds, naturally, don't wait for anything
remotes = []
for target, source in [(t, s['source']) for t, s in self.host_map.items()]:
logging.info('Restoring data on {}...'.format(target))
seeds = None if target in target_seeds else target_seeds
remote = self._trigger_restore(target, source, seeds=seeds)
remotes.append(remote)
# wait for the restores
logging.info('Starting to wait for the nodes to restore')
finished, broken = self._wait_for(remotes)
if len(broken) > 0:
err_msg = 'Some nodes failed to restore. Exiting'
logging.error(err_msg)
raise Exception(err_msg)
logging.info('Restore process is complete. The cluster should be up shortly.')
if self.verify:
hosts = list(map(lambda r: r.target, remotes))
verify_restore(hosts, self.config)
def _restore_schema(self):
schema = parse_schema(self.cluster_backup.schema)
with self.session_provider.new_session() as session:
for keyspace in schema.keys():
if keyspace.startswith("system"):
continue
else:
self._create_or_recreate_schema_objects(session, keyspace, schema[keyspace])
def _create_or_recreate_schema_objects(self, session, keyspace, keyspace_schema):
logging.info("(Re)creating schema for keyspace {}".format(keyspace))
if (keyspace not in session.cluster.metadata.keyspaces):
# Keyspace doesn't exist on the target cluster. Got to create it and all the tables as well.
session.execute(keyspace_schema['create_statement'])
for mv in keyspace_schema['materialized_views']:
# MVs need to be dropped before we drop the tables
logging.debug("Dropping MV {}.{}".format(keyspace, mv[0]))
session.execute("DROP MATERIALIZED VIEW {}.{}".format(keyspace, mv[0]))
for table in keyspace_schema['tables'].items():
logging.debug("Dropping table {}.{}".format(keyspace, table[0]))
if table[0] in session.cluster.metadata.keyspaces[keyspace].tables.keys():
# table already exists, drop it first
session.execute("DROP TABLE {}.{}".format(keyspace, table[0]))
for udt in keyspace_schema['udt'].items():
# then custom types as they can be used in tables
if udt[0] in session.cluster.metadata.keyspaces[keyspace].user_types.keys():
# UDT already exists, drop it first
session.execute("DROP TYPE {}.{}".format(keyspace, udt[0]))
# Then we create the missing ones
session.execute(udt[1])
for table in keyspace_schema['tables'].items():
logging.debug("Creating table {}.{}".format(keyspace, table[0]))
# Create the tables
session.execute(table[1])
for index in keyspace_schema['indices'].items():
# indices were dropped with their base tables
logging.debug("Creating index {}.{}".format(keyspace, index[0]))
session.execute(index[1])
for mv in keyspace_schema['materialized_views']:
# Base tables are created now, we can create the MVs
logging.debug("Creating MV {}.{}".format(keyspace, mv[0]))
session.execute(mv[1])
def _trigger_restore(self, target, source, seeds=None):
client, connect_args = self._connect(target)
# TODO: If this command fails, the node is currently still marked as finished and not as broken.
in_place_option = '--in-place' if self.in_place else ''
keep_auth_option = '--keep-auth' if self.keep_auth else ''
seeds_option = '--seeds {}'.format(','.join(seeds)) if seeds else ''
keyspace_options = expand_repeatable_option('keyspace', self.keyspaces)
table_options = expand_repeatable_option('table', self.tables)
# We explicitly set --no-verify since we are doing verification here in this module
# from the control node
verify_option = '--no-verify'
command = 'nohup sh -c "cd {work} && medusa-wrapper sudo medusa --fqdn={fqdn} -vvv restore-node ' \
'{in_place} {keep_auth} {seeds} {verify} --backup-name {backup} {use_sstableloader} ' \
'{keyspaces} {tables}"' \
.format(work=self.work_dir,
fqdn=','.join(source),
in_place=in_place_option,
keep_auth=keep_auth_option,
seeds=seeds_option,
verify=verify_option,
backup=self.cluster_backup.name,
use_sstableloader='--use-sstableloader' if self.use_sstableloader is True else '',
keyspaces=keyspace_options,
tables=table_options)
logging.debug('Restoring on node {} with the following command {}'.format(target, command))
return self._run(target, client, connect_args, command)
def _wait_for(self, remotes):
finished, broken = [], []
while True:
time.sleep(5) # TODO: configure sleep
if len(remotes) == len(finished) + len(broken):
# TODO: make a nicer exit condition
logging.info('Exiting because all jobs are done.')
break
for i, remote in enumerate(remotes):
if remote in broken or remote in finished:
continue
# If the remote does not set an exit status and the channel closes
# the exit_status is negative.
logging.debug('remote.channel.exit_status: {}'.format(remote.channel.exit_status))
if remote.channel.exit_status_ready and remote.channel.exit_status >= 0:
if remote.channel.exit_status == 0:
finished.append(remote)
logging.info('Command succeeded on {}'.format(remote.target))
else:
broken.append(remote)
logging.error('Command failed on {} : '.format(remote.target))
logging.error('Output : {}'.format(remote.stdout.readlines()))
logging.error('Err output : {}'.format(remote.stderr.readlines()))
try:
stderr = self.read_file(remote, self.work_dir / 'stderr')
except IOError:
stderr = 'There was no stderr file'
logging.error(stderr)
# We got an exit code that does not indicate an error, but not necessarily
# success. Cleanup channel and move to next remote.
remote.channel.close()
# also close the client. this will free file descriptors
# in case we start re-using remotes this close will need to go away
remote.client.close()
continue
if remote.client.get_transport().is_alive() and not remote.channel.closed:
# Send an ignored packet for keep alive and later noticing a broken connection
logging.debug('Keeping {} alive.'.format(remote.target))
remote.client.get_transport().send_ignore()
else:
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.connect(**remote.connect_args)
# TODO: check pid to exist before assuming medusa-wrapper to pick it up
command = 'cd {work}; medusa-wrapper'.format(work=self.work_dir)
remotes[i] = self._run(remote.target, client, remote.connect_args, command)
if len(broken) > 0:
logging.info('Command failed on the following nodes:')
for remote in broken:
logging.info(remote.target)
else:
logging.info('Commands succeeded on all nodes')
return finished, broken
def _connect(self, target):
logging.debug('Connecting to {}'.format(target))
pkey = None
if self.config.ssh.key_file is not None and self.config.ssh.key_file != '':
pkey = paramiko.RSAKey.from_private_key_file(self.config.ssh.key_file, None)
if self._ssh_agent_started is False:
self.create_agent()
add_key_cmd = '{} {}'.format(SSH_ADD_KEYS_CMD, self.config.ssh.key_file)
subprocess.check_output(add_key_cmd, universal_newlines=True, shell=True)
self._ssh_agent_started = True
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
connect_args = {
'hostname': target,
'username': self.config.ssh.username,
'pkey': pkey,
'compress': True,
'password': None
}
client.connect(**connect_args)
logging.debug('Successfully connected to {}'.format(target))
sftp = client.open_sftp()
try:
sftp.mkdir(str(self.work_dir))
except OSError:
err_msg = 'Working directory {} on {} failed.' \
'Folder might exist already, ignoring exception'.format(str(self.work_dir), target)
logging.debug(err_msg)
except Exception as ex:
err_msg = 'Creating working directory on {} failed: {}'.format(target, str(ex))
logging.error(err_msg)
raise Exception(err_msg)
finally:
sftp.close()
return client, connect_args
def _run(self, target, client, connect_args, command):
transport = client.get_transport()
session = transport.open_session()
session.get_pty()
paramiko.agent.AgentRequestHandler(session)
session.exec_command(command.replace('sudo', 'sudo -S'))
bufsize = -1
stdout = session.makefile('r', bufsize)
stderr = session.makefile_stderr('r', bufsize)
logging.debug('Running \'{}\' remotely on {}'.format(command, connect_args['hostname']))
return Remote(target, connect_args, client, stdout.channel, stdout, stderr)
def read_file(self, remote, remotepath):
with remote.client.open_sftp() as ftp_client:
with ftp_client.file(remotepath.as_posix(), 'r') as f:
return str(f.read(), 'utf-8')
def check_cassandra_running(self, host, client, connect_args):
command = 'sh -c "{}"'.format(self.config.cassandra.check_running)
remote = self._run(host, client, connect_args, command)
return remote.channel.recv_exit_status() == 0
def create_agent(self):
"""
Function that creates the agent and sets the environment variables.
"""
output = subprocess.check_output(SSH_AGENT_CREATE_CMD, universal_newlines=True, shell=True)
if output:
output = output.strip().split('\n')
for item in output[0:2]:
envvar, val = item.split(';')[0].split('=')
logging.debug('Setting environment variable: {}={}'.format(envvar, val))
os.environ[envvar] = val
def ssh_cleanup(self):
"""
Function that kills the agents created so that there aren't too many agents lying around eating up resources.
"""
# Kill the agent
subprocess.check_output(SSH_AGENT_KILL_CMD, universal_newlines=True, shell=True)
# Reset these values so that other function
os.environ[SSH_AUTH_SOCK_ENVVAR] = ''
os.environ[SSH_AGENT_PID_ENVVAR] = ''
|
UTF-8
|
Python
| false | false | 24,806 |
py
| 13 |
restore_cluster.py
| 6 | 0.595461 | 0.593687 | 0 | 538 | 45.107807 | 117 |
aryyawijaya/learn_Django
| 5,007,931,886,229 |
37bf866643ef0e5586d0f0dc968716dfb6e425f5
|
a5c437424559320c0bc391fcf53fbf3df4c9b4f6
|
/perpus/perpustakaan/views.py
|
82e7b9d2420ae6365b4238ee2839773d3282a48f
|
[] |
no_license
|
https://github.com/aryyawijaya/learn_Django
|
32c595db0e2001818dc013902764be38c31cba87
|
94591a682bc5a620541ab67a2a00c18cbbad164e
|
refs/heads/master
| 2023-04-02T14:36:59.616213 | 2021-04-16T09:07:47 | 2021-04-16T09:07:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, redirect
from perpustakaan.models import Buku
from perpustakaan.forms import FormBuku, FormKategori
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib.auth.forms import UserCreationForm
@login_required(login_url=settings.LOGIN_URL) # hanya yg bisa login yg bisa membuat user baru
# tambah user di sini tidak sebagai superuser, jadi tidak bisa login ke django admin
def signup(request):
if request.POST:
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'User berhasil dibuat.')
return redirect('signup')
else:
messages.error(request, 'Terjadi kesalahan.')
return redirect('signup')
else:
form = UserCreationForm()
konteks = {
'form': form
}
return render(request, 'signup.html', konteks)
@login_required(login_url=settings.LOGIN_URL)
def hapusBuku(request, id_buku):
buku = Buku.objects.filter(id=id_buku)
buku.delete()
messages.success(request, 'Data berhasil dihapus.')
return redirect('buku')
@login_required(login_url=settings.LOGIN_URL)
def ubahBuku(request, id_buku):
buku = Buku.objects.get(id=id_buku)
template = 'ubah-buku.html'
if request.POST:
form = FormBuku(request.POST, instance=buku) # form berisi data buku tertentu
if form.is_valid():
form.save()
messages.success(request, 'Data berhasil diperbaharui.')
# setelah berhasil, akan di redirect ke url ubah_buku lagi
return redirect('ubah_buku', id_buku=id_buku)
else:
form = FormBuku(instance=buku)
konteks = {
'form': form,
'buku': buku,
}
return render(request, template, konteks)
@login_required(login_url=settings.LOGIN_URL)
def buku(request):
# subtitute variable
books = Buku.objects.all()
# ORM (Object-Relational Mapping) --> mengambil model tampa query sql
# FILTER table
# select * form Buku where penerbit=Gramedia
# books = Buku.objects.filter(penerbit='Gramedia')
# filter pakai foreign key
# books = Buku.objects.filter(kategori_id__nama='Novel') # inner join di django (ORM)
# limit data yang ditampilkan
# books = Buku.objects.filter()[:3]
konteks = {
'Buku' : books
}
return render(request, 'buku.html', konteks)
@login_required(login_url=settings.LOGIN_URL)
def penerbit(request):
books = Buku.objects.all()
konteks = {
'Buku' : books
}
return render(request, 'penerbit.html', konteks)
# client --> urls --> view --> model --> view --> templates --> client
@login_required(login_url=settings.LOGIN_URL)
def tambahBuku(request):
if request.POST: # jika ada data yg dikirim menggunakan method POST maka
form = FormBuku(request.POST) # form dengan data inputan
if form.is_valid(): # cek validasi inputan user/client
form.save() # menyimpan data inputan ke database
form = FormBuku()
pesan = 'Data berhasil disimpan'
konteks = {
'form': form,
'pesan': pesan,
}
return render(request, 'tambah-buku.html', konteks)
else: # jika tidak
form = FormBuku() # buat form kosong langsung render ke template tambah-buku.html
konteks = {
'form' : form,
}
return render(request, 'tambah-buku.html', konteks)
@login_required(login_url=settings.LOGIN_URL)
def tambahKategori(request):
if request.POST:
form = FormKategori(request.POST)
if form.is_valid():
form.save()
form = FormKategori()
pesan = 'Data berhasil disimpan'
konteks = {
'form': form,
'pesan': pesan,
}
return render(request, 'tambah-kategori.html', konteks)
else:
form = FormKategori()
konteks = {
'form': form,
}
return render(request, 'tambah-kategori.html', konteks)
|
UTF-8
|
Python
| false | false | 4,210 |
py
| 11 |
views.py
| 7 | 0.621853 | 0.621615 | 0 | 132 | 30.901515 | 93 |
alexandraback/datacollection
| 18,519,899,019,849 |
2316d038279a2a995087ed81c9f2dd0f8e76d6e0
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5709773144064000_1/Python/ororog/B.py
|
7497a3d0343382021fd137a45a809179c3a4c4a4
|
[] |
no_license
|
https://github.com/alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
from numpy import *
def main():
T = input()
for i in range(T): solve(i)
def solve(num):
C, F, X = map(float, raw_input().split())
speed, time = 2.0, 0.0
ans = 1.0e100
while True:
nans = time + X / speed
if nans > ans: break
ans = nans
time += C / speed
speed += F
print 'Case #%d: %.10f' % (num + 1, ans)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 430 |
py
| 122,470 |
B.py
| 117,790 | 0.47907 | 0.451163 | 0 | 25 | 16.2 | 46 |
poetchess/pythonrunner
| 6,493,990,567,989 |
a8f05c76e6a6e8fdaaf559e11a0076cfbc3c1359
|
da1b31516177038cbc66f814e54220dd3a54b422
|
/function_object/function_introspection/clip_info_inspect.py
|
ff168984be7a6080bb6a5e8a02178cde4dead4d7
|
[] |
no_license
|
https://github.com/poetchess/pythonrunner
|
60c27b48d789aa97e823e248401c2e3002e7c286
|
ecb9d1350bcfe950ceec9da805baa86e9a73238c
|
refs/heads/master
| 2021-01-09T06:09:06.883488 | 2017-09-24T08:23:00 | 2017-09-24T08:23:00 | 80,926,138 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
if __name__ == '__main__':
from clip_info import clip
from inspect import signature
sig = signature(clip)
#print(sig)
print(str(sig))
for name, param in sig.parameters.items():
print(param.kind, ':', name, '=', param.default)
|
UTF-8
|
Python
| false | false | 257 |
py
| 49 |
clip_info_inspect.py
| 46 | 0.603113 | 0.603113 | 0 | 8 | 31.25 | 56 |
lintosh/intelijencee
| 2,078,764,210,329 |
db07f8fc29317ef7b139f9d39f9f151ca0306c67
|
1992d95b477e3777c376bdd2b077a11852b450a9
|
/inteliJence/apps/client.intelijence/init.py
|
fd75b0bb69bf17011c2ca16609d593701108ccd4
|
[] |
no_license
|
https://github.com/lintosh/intelijencee
|
454adbc4f84f15d829e8db468a8c23dea88c2f94
|
2ab82279b44b96831cee268ced4e0cf3f1b524e9
|
refs/heads/master
| 2021-01-19T19:50:45.912504 | 2017-04-16T23:48:31 | 2017-04-16T23:48:31 | 88,449,406 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#(@:Description): "Client testing for email Management, and automation api code"
#(@:Author): "inteliJence development team"
#under the license of Apache License 2.0 and intelijence ProtectiveRights please edit and use it with all the care you can give
#(@:App-Type): "Dynamic"
#(@:Language): "Python"
#(@:Language Compiler): "python 2.9"
#(@:Framework): "Flask"
#(@:Required-Modules): "Flask, Flask SQL Alchemy, Flask_MYSQL, Flask-WTF, Flask_Socket, html5lib, Jinja, Flask_Sijax, Flask_Admin, Flask_Login, Request **more to come**"
#wow if you are done with the above
#and
#you get it
#you are on the right track
#boom!!!!!!!! now lets start the job
#import the flask framework and all it's modules and start testing
#import all classes used in routing from the classes package/directory to access all api handlers
from flask import Flask,request,jsonify
from requests import put,get
import json
# from classes import *
#Instantiate all classes and modules to start operation
#modules like flask and restful api
app=Flask(__name__)
#let the coding begin
@app.route("/")
def loadMain():##connect client with mail.intelijence api so as to access all other api calls
returnedVal=json.loads(get('http://localhost:9000/connect/pichaBot/hf234we6f984ewsd32j89yds').text)#decode response from json format to type 'dict'
# return returnedVal["Auth"]
if(returnedVal["connect"]==True):
return ("AuthKey: {} <br/>User: {} <br/>connect: {}".format(returnedVal["Auth"],returnedVal["user"],returnedVal["connect"]))#end format and return block
else:
return "Sorry An Error Occured, relaunch this app please"
#end of codes
#now start the engine and let the car start moving
#run flask here
if __name__ == '__main__':
app.run(debug=True,port=8000)
|
UTF-8
|
Python
| false | false | 1,823 |
py
| 9 |
init.py
| 5 | 0.708722 | 0.695557 | 0 | 57 | 29.912281 | 170 |
vanya22551/sibadiservice
| 15,101,105,035,888 |
d1d3f62ead812b7bb6365c7a29f1dbdfa6077738
|
0580e6030374d5d77999f3760b57e63b4ca8a03a
|
/telebotsibadi/botbi20i1/forms.py
|
94b29782308e05102f177e94ed05ca603f9c7767
|
[] |
no_license
|
https://github.com/vanya22551/sibadiservice
|
315c373943fd8e208d45721969ef23d0a5adb2de
|
cc8efe424464b69315a5e91cb48c96cb81d97be3
|
refs/heads/master
| 2023-05-27T00:11:58.021004 | 2021-06-16T16:50:57 | 2021-06-16T16:50:57 | 326,341,435 | 0 | 0 | null | false | 2021-03-02T16:23:09 | 2021-01-03T06:16:01 | 2021-03-02T16:11:32 | 2021-03-02T16:11:29 | 5,590 | 0 | 0 | 1 |
Python
| false | false |
from django.contrib.auth.models import User
from django.forms import ModelForm, HiddenInput, ModelChoiceField
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from .models import Laboratory, Student, File, Teacher
class LabForm(ModelForm):
class Meta:
model = Laboratory
fields = '__all__'
class FileForm(ModelForm):
class Meta:
model = File
fields = {'file', 'lab', 'student'}
class AuthUserForm(AuthenticationForm, ModelForm):
class Meta:
model = User
fields = {'password', 'username'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
class ChangePasswordForm(PasswordChangeForm, ModelForm):
class Meta:
model = User
fields = {'old_password', 'new_password1', 'new_password2'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
class RegisterUserForm(ModelForm):
class Meta:
model = User
fields = {'username', 'password'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class EditTeacherInformationForm(ModelForm):
class Meta:
model = Teacher
fields = {'email', 'phone', 'additional_information', }
|
UTF-8
|
Python
| false | false | 1,828 |
py
| 84 |
forms.py
| 63 | 0.599562 | 0.598468 | 0 | 66 | 26.651515 | 76 |
w1907/Tarea2_desarrollo_web
| 15,848,429,324,807 |
431f71fce8cb3c7209a67c0494dcfdc96f4eda47
|
01ebaf3626a63e5eeea8a32da7fbca4a848e881b
|
/basket/views.py
|
221aaf8e7e9e39c7e50b875ce3eb64c8048fc521
|
[] |
no_license
|
https://github.com/w1907/Tarea2_desarrollo_web
|
622b0520a3909f1a78ee909210ceaeb47243582f
|
0b7bada48d6ad7e4e34f74f60f2ec4ed95153189
|
refs/heads/master
| 2020-03-17T22:49:23.324274 | 2018-05-19T00:35:26 | 2018-05-19T00:35:26 | 134,019,512 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from basket.models import Player
from basket.forms import PlayerForm
from django.shortcuts import redirect
def index(request):
data = {}
# SELECT * FROM player
data['object_list'] = Player.objects.all().order_by('id')
template_name = 'player/list_player.html'
return render(request, template_name, data)
def list(request):
jugador = Player.objects.all()
data = {'jugadores':jugador}
template_name = 'player/listar.html'
return render(request, template_name, data)
def add(request):
data = {}
if request.method == "POST":
data['form'] = PlayerForm(request.POST, request.FILES)
if data['form'].is_valid():
# aca el formulario valido
data['form'].save()
return redirect('player_list')
else:
data['form'] = PlayerForm()
template_name = 'player/add_player.html'
return render(request, template_name, data)
def edit(request, player_id):
jugador = Player.objects.get(id=player_id)
if request.method == 'GET':
form = PlayerForm(instance=jugador)
else:
form = PlayerForm(request.POST, instance=jugador)
if form.is_valid():
form.save()
return redirect('player_list')
template_name = 'player/add_player.html'
return render(request, template_name, {'form':form})
def delete(request, player_id):
jugador = Player.objects.get(id=player_id)
if request.method == 'POST':
jugador.delete()
return redirect('player_list')
template_name = 'player/delete_player.html'
return render(request, template_name, {'jugadores':jugador})
def detail(request, player_id):
data = {}
template_name = 'player/detail_player.html'
# SELECT * FROM player WHERE id = player_id
data['player'] = Player.objects.get(pk=player_id)
# import pdb;pdb.set_trace()
return render(request, template_name, data)
|
UTF-8
|
Python
| false | false | 1,951 |
py
| 2 |
views.py
| 1 | 0.646848 | 0.646848 | 0 | 68 | 27.661765 | 64 |
khalil-Hennara/KBS_diagnoses_diseases
| 4,647,154,617,823 |
a76cf5d1ea7eb78af6da98129475bd0abce9e118
|
a48180bd94679479e9a5cd15125eef0ea95b5b39
|
/Get_Value.py
|
6b6aca98600deba7b38f0afd40b845441623ae8a
|
[] |
no_license
|
https://github.com/khalil-Hennara/KBS_diagnoses_diseases
|
daf4ec0909b021f69aaae658bd4198bd6b6f5494
|
8d6e97e3a49703e711388e149f4011405bad8f26
|
refs/heads/main
| 2023-07-17T05:28:33.386933 | 2021-09-05T06:19:24 | 2021-09-05T06:19:24 | 403,231,485 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from manifestation import *
def get_The_Right_EGD(value):
if value == 0:return EGD.ULCERS_AND_EROSIONS_IN_DOUDENUM
elif value == 1:return EGD.ULCERS_AND_EROSIONS_IN_STOMACH
elif value == 2:return EGD.BLUNTED_VILLI
elif value == 3:return EGD.SUPER_FACIAL_INFLAMMATORY_INFILTRATE
elif value == 4:return EGD.DIFFUSE_ULCERAION
elif value == 5:return EGD.PSEUDO_POLPLUS
elif value == 6:return EGD.SUPERFICIAL_CHRONIC_INFLAMMATION
elif value == 7:return EGD.CRYPT_ABSCESS
elif value == 8:return EGD.DEEP_AND_LONG_FISSURES
elif value == 9:return EGD.COBBLESTONING
def get_The_Right_Labs(value):
if value == 0:return LABS.IGA_ATTA
elif value == 1:return LABS.IGA_AGA
elif value == 2:return LABS.AMYLAS
elif value == 3:return LABS.LIBAS
elif value == 4:return LABS.BILIRUBIN
elif value == 5:return LABS.WHITE_BLOOD_CELLS
elif value == 6:return LABS.PLATLTE
elif value == 7:return LABS.HIMOGLOBIN
def get_The_Right_Echo(value):
if value == 0:return ECHO.GALLBLADDER_WALL_THICKENING
elif value == 1:return ECHO.PERICHOLECYSTIC_FLUID
elif value == 2:return ECHO.HIBATOMIGALI
elif value == 3:return ECHO.SPLINO_MIGALI
elif value == 4:return ECHO.FREE_LIQUADE
def get_the_Right_CF(value):
if value == 1:return 0.6
elif value == 2:return 1.0
elif value == 3:return 0.2
elif value == 4:return 0.5
elif value == 5:return 1.0
elif value == 6:return -1.0
elif value == 7:return 0
elif value == 8:return -1.0
def get_CF_for_EGD(value):
if value==0:
return 0.9
elif value==1:
return 0.4
else :
return 0
|
UTF-8
|
Python
| false | false | 1,791 |
py
| 15 |
Get_Value.py
| 12 | 0.61809 | 0.588498 | 0 | 48 | 36.25 | 71 |
lexicalx/ML4T
| 14,723,147,908,302 |
dbf3481978ef9b143da2a903ae29dbcbde5f71e8
|
eb7bea51c7d91902f42af87086bfcd7702aec02c
|
/vinay/numpy_6.py
|
cf98f98d6dcda99cce4eb2f2ab724727f9f4967c
|
[] |
no_license
|
https://github.com/lexicalx/ML4T
|
2c8a84c63e856e3b9441eacb850b67f391edfddd
|
567b41d9377b41b28d078e858155a35eb3021ce0
|
refs/heads/master
| 2022-04-04T20:54:23.208693 | 2017-12-05T05:02:27 | 2017-12-05T05:02:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
def test_run():
a = np.random.randint(0,10,size=(5))
print "1d array:\n",a
indices = np.array([1,1,2,3])
print "Array values using indices:\n",a[indices]
print "-------------------"
a = np.random.randint(0,10,size=(2,3))
print "Array:\n",a
mean = a.mean()
print "Mean:\n",mean
print "Elements less than mean:\n",a[a<mean]
print "-------------------"
a[a<mean] = mean
print "Array with elements less than mean replaced by mean:\n",a
print "-------------------"
print "Multiply by 2:\n",a*2
print "-------------------"
print "Divide by 2:\n",a/2
b = np.random.randint(10,20,size=(2,3))
print "-------------------"
print "a Array:\n",a
print "-------------------"
print "b Array:\n",b
print "-------------------"
print "Adding a+b:\n",a+b
print "-------------------"
print "Subtracting b-a:\n",b-a
print "-------------------"
print "Multiplying a*b:\n",a*b
print "-------------------"
print "Dividing b/a:\n",b/a
print "-------------------"
#print "Matrix multiplication a.b\n",np.dot(a,b)
if __name__ == "__main__":
test_run()
|
UTF-8
|
Python
| false | false | 1,088 |
py
| 39 |
numpy_6.py
| 17 | 0.497243 | 0.475184 | 0 | 44 | 23.704545 | 65 |
francoischalifour/todo-cli
| 7,129,645,725,448 |
38557696e2b73ecf94fdb112d7b6ef4c3084e083
|
3c695c52bc27723cbcb678164f5de0b8358723f6
|
/todo/utils/compatibility.py
|
798c7f29ce6ce3812f116409c4df9d364ecece26
|
[
"MIT"
] |
permissive
|
https://github.com/francoischalifour/todo-cli
|
3324728eaae024421fb1eb2c5fb740e89f1a4893
|
8376541fc528f9433b5ff2c538c3410fd611fe1b
|
refs/heads/master
| 2023-08-16T19:47:04.606314 | 2023-08-14T09:18:21 | 2023-08-14T09:18:21 | 58,815,486 | 101 | 22 |
MIT
| false | 2023-08-14T09:18:23 | 2016-05-14T15:35:19 | 2023-05-31T13:17:55 | 2023-08-14T09:18:21 | 26 | 89 | 12 | 0 |
Python
| false | false |
# -*- coding: utf-8 -*-
import sys
def safe_print(s):
"""Encodes the output in utf-8 for Windows console"""
try:
print(s)
except UnicodeEncodeError:
print(
s
.replace('✓', '-') # change the checkmark symbol
.encode('utf8')
.decode(sys.stdout.encoding)
)
|
UTF-8
|
Python
| false | false | 342 |
py
| 22 |
compatibility.py
| 20 | 0.511765 | 0.502941 | 0 | 15 | 21.666667 | 60 |
SaurabhRNayak/FasTAG_2.0
| 14,929,306,328,288 |
6cdbe265961231aed4aaa7d9e50b5a6142ce1c26
|
48a120b1bc958aeab12dc879026a20d369f160ed
|
/jav_trigger.py
|
49cb249dc4196ec59ec4a202ab2649507493060e
|
[] |
no_license
|
https://github.com/SaurabhRNayak/FasTAG_2.0
|
078e81cfa765e42394fb5f858cce244abf249506
|
77bbcf8ee064671167a760cefd458e90c560edd3
|
refs/heads/master
| 2022-10-28T03:13:30.252355 | 2020-06-11T19:42:00 | 2020-06-11T19:42:00 | 271,630,704 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import subprocess
import json
def jar_trigger(numplate):
# sub = subprocess.call(['java', '-jar', 'Validate.jar', 'MH12DE1433'])
sub=subprocess.run(['java', '-jar', 'Validate.jar', numplate],capture_output=True,text=True)
sub1=sub.stdout
# print('sub',sub1)
# print(type(sub1))
if "Error" in sub1:
return (False,'')
sub_dict=json.loads(sub1)
# print(sub_dict["Vehicle Class"])
return(True,sub_dict["Vehicle Class"])
if __name__=='__main__':
jar_trigger('UP16AT8647')
|
UTF-8
|
Python
| false | false | 515 |
py
| 5 |
jav_trigger.py
| 4 | 0.629126 | 0.596117 | 0 | 16 | 31.25 | 96 |
01BTC10/doconce
| 10,127,532,899,866 |
d63ad38308cddacf06078c96efcd6b62af033f74
|
03b385537e579eec44ce0e048b45887e9f374d5a
|
/lib/doconce/sphinx.py
|
9f5874c4bb5dfea84d5bf1925c6c09859577ce35
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/01BTC10/doconce
|
129a179b774cba25ebc836af107b9597dd40e45b
|
3b39cf3ba1ffe3fcc9ea81a942651a18e856e81d
|
refs/heads/master
| 2020-06-21T20:02:02.848468 | 2017-06-08T13:06:08 | 2017-06-08T13:06:08 | 94,205,427 | 1 | 0 | null | true | 2017-06-13T11:25:11 | 2017-06-13T11:25:11 | 2017-06-06T22:42:32 | 2017-06-08T16:07:47 | 472,709 | 0 | 0 | 0 | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
# http://sphinx.pocoo.org/ext/math.html#
# can reuse most of rst module:
from .rst import *
from .common import align2equations, online_python_tutor, \
get_legal_pygments_lexers, has_custom_pygments_lexer
from .misc import option, _abort
from .doconce import errwarn
# RunestoneInteractive book counters
question_counter = 0
video_counter = 0
edit_markup_warning = False
def sphinx_figure(m):
result = ''
# m is a MatchObject
filename = m.group('filename')
caption = m.group('caption').strip()
# Stubstitute DocOnce label by rst label in caption
# (also, remove final period in caption since caption is used as hyperlink
# text to figures).
m_label = re.search(r'label\{(.+?)\}', caption)
if m_label:
label = m_label.group(1)
result += '\n.. _%s:\n' % label
# remove . at the end of the caption text
parts = caption.split('label')
parts[0] = parts[0].rstrip()
if parts[0] and parts[0][-1] == '.':
parts[0] = parts[0][:-1]
parts[0] = parts[0].strip()
# insert emphasize marks if not latex $ at the
# beginning or end (math subst does not work for *$I=1$*)
# or if not boldface or emphasize already in the caption
caption_font = option('sphinx_figure_captions=', 'emphasize')
if parts[0] and \
caption_font == 'emphasize' and \
not parts[0].startswith('$') and \
not parts[0].endswith('$') and \
not '*' in parts[0] and \
not '_' in parts[0]:
parts[0] = '*' + parts[0] + '*'
#caption = ' label'.join(parts)
caption = parts[0]
# contrary to rst_figure, we do not write label into caption
# since we just want to remove the whole label as part of
# the caption (otherwise done when handling ref and label)
else:
if caption and caption[-1] == '.':
caption = caption[:-1]
# math is ignored in references to figures, test for math only
if caption.startswith('$') and caption.endswith('$'):
errwarn('*** warning: math only in sphinx figure caption (it will be ignored by sphinx, resulting in empty caption)\n %s\n FIGURE: [%s' % (caption, filename))
#stem = os.path.splitext(filename)[0]
#result += '\n.. figure:: ' + stem + '.*\n' # utilize flexibility # does not work yet
result += '\n.. figure:: ' + filename + '\n'
opts = m.group('options')
if opts:
# opts: width=600 frac=0.5 align=center
# opts: width=600, frac=0.5, align=center
info = [s.split('=') for s in opts.split()]
fig_info = [' :%s: %s' % (opt, value.replace(',', ''))
for opt, value in info
if opt not in ['frac', 'sidecap']]
result += '\n'.join(fig_info)
if caption:
result += '\n\n ' + caption + '\n'
else:
result += '\n\n'
#errwarn('sphinx figure: caption=\n', caption, '\nresult:\n', result)
return result
def sphinx_movie(m):
filename = m.group('filename')
special_movie = '*' in filename or '->' in filename or 'youtu.be' in filename or 'youtube.com' in filename or 'vimeo.com' in filename
if option('runestone') and not special_movie:
# Use RunestoneInteractive video environment
global video_counter
video_counter += 1
text = """
.. video:: video_%d
:controls:
%s
""" % (video_counter, filename)
return text
else:
# Use plain html code
return rst_movie(m)
def sphinx_quiz_runestone(quiz):
quiz_feedback = option('quiz_explanations=', 'on')
text = ''
if 'new page' in quiz:
text += '.. !split\n%s\n%s' % (quiz['new page'], '-'*len(quiz['new page']))
text += '.. begin quiz\n\n'
global question_counter
question_counter += 1
# Multiple correct answers?
if sum([1 for choice in quiz['choices'] if choice[0] == 'right']) > 1:
text += '.. mchoicema:: question_%d' % question_counter + '\n'
else:
text += '.. mchoicemf:: question_%d' % question_counter + '\n'
def fix_text(s, tp='answer'):
"""
Answers and feedback in RunestoneInteractive book quizzes
cannot contain math, figure and rst markup. Perform fixes.
"""
drop = False
if 'math::' in s:
errwarn('\n*** warning: quiz %s with math block not supported:' % tp)
errwarn(s)
drop = True
if '.. code-block::' in s:
errwarn('\n*** warning: quiz %s with code block not supported:' % tp)
errwarn(s)
drop = True
if '.. figure::' in s:
errwarn('\n*** warning: quiz %s with figure not supported:' % tp)
errwarn(s)
drop = True
if drop:
return ''
# Make multi-line paragraph a one-liner
s = ' '.join(s.splitlines()).rstrip()
# Fixes
pattern = r'`(.+?) (<https?.+?)>`__' # URL
s = re.sub(pattern, '<a href="\g<2>"> \g<1> </a>', s)
pattern = r'``(.+?)``' # verbatim
s = re.sub(pattern, '<tt>\g<1></tt>', s)
pattern = r':math:`(.+?)`' # inline math
s = re.sub(pattern, '<em>\g<1></em>', s) # mimic italic....
pattern = r':\*(.+?)\*' # emphasize
s = re.sub(pattern, '\g<1>', s, flags=re.DOTALL)
return s
import string
correct = []
for i, choice in enumerate(quiz['choices']):
if i > 4: # not supported
errwarn('*** warning: quiz with %d choices gets truncated (first 5)' % len(quiz['choices']))
break
letter = string.ascii_lowercase[i]
text += ' :answer_%s: ' % letter
answer = fix_text(choice[1], tp='answer')
if not answer:
answer = 'Too advanced typesetting prevents the text from being rendered'
text += answer + '\n'
if choice[0] == 'right':
correct.append(letter)
if correct:
text += ' :correct: ' + ', '.join(correct) + '\n'
else:
errwarn('*** error: correct choice in quiz has index > 5 (max 5 allowed for RunestoneInteractive books)')
errwarn(quiz['question'])
_abort()
for i, choice in enumerate(quiz['choices']):
if i > 4: # not supported
break
letter = string.ascii_lowercase[i]
text += ' :feedback_%s: ' % letter # must be present
if len(choice) == 3 and quiz_feedback == 'on':
feedback = fix_text(choice[2], tp='explanation')
if not feedback:
feedback = '(Too advanced typesetting prevents the text from being rendered)'
text += feedback
text += '\n'
text += '\n' + indent_lines(quiz['question'], 'sphinx', ' '*3) + '\n\n\n'
return text
def sphinx_quiz(quiz):
if option('runestone'):
return sphinx_quiz_runestone(quiz)
else:
return rst_quiz(quiz)
from .latex import fix_latex_command_regex as fix_latex
def sphinx_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# default mappings of !bc environments and pygments languages:
envir2pygments = dict(
cod='python', pro='python',
pycod='python', cycod='cython',
pypro='python', cypro='cython',
fcod='fortran', fpro='fortran',
ccod='c', cppcod='c++',
cpro='c', cpppro='c++',
mcod='matlab', mpro='matlab',
plcod='perl', plpro='perl',
shcod='bash', shpro='bash',
rbcod='ruby', rbpro='ruby',
#sys='console',
sys='text',
rst='rst',
css='css', csspro='css', csscod='css',
dat='text', csv='text', txt='text',
cc='text', ccq='text', # not possible with extra indent for ccq
ipy='ipy',
xmlcod='xml', xmlpro='xml', xml='xml',
htmlcod='html', htmlpro='html', html='html',
texcod='latex', texpro='latex', tex='latex',
latexcod='latex', latexpro='latex', latex='latex',
do='doconce',
pyshell='python',
pyoptpro='python', pyscpro='python',
)
# grab line with: # sphinx code-blocks: cod=python cpp=c++ etc
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'.. *[Ss]phinx +code-blocks?:(.+)', filestr)
if m:
defs_line = m.group(1)
# turn specifications into a dictionary:
for definition in defs_line.split():
key, value = definition.split('=')
envir2pygments[key] = value
# First indent all code blocks
for i in range(len(code_blocks)):
if code_block_types[i].startswith('pyoptpro') and not option('runestone'):
code_blocks[i] = online_python_tutor(code_blocks[i],
return_tp='iframe')
if code_block_types[i].endswith('-h'):
indentation = ' '*8
else:
indentation = ' '*4
code_blocks[i] = indent_lines(code_blocks[i], format,
indentation)
# After transforming align environments to separate equations
# the problem with math labels in multiple eqs has disappeared.
# (doconce.py applies align2equations, which takes all align
# envirs and translates them to separate equations, but align*
# environments are allowed.
# Any output of labels in align means an error in the
# align -> equation transformation...)
math_labels = []
multiple_math_labels = [] # sphinx has problems with multiple math labels
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# extract all \label{}s inside tex blocks and typeset them
# with :label: tags
label_regex = fix_latex( r'label\{(.+?)\}', application='match')
labels = re.findall(label_regex, tex_blocks[i])
if len(labels) == 1:
tex_blocks[i] = ' :label: %s\n' % labels[0] + tex_blocks[i]
elif len(labels) > 1:
multiple_math_labels.append(labels)
if len(labels) > 0:
math_labels.extend(labels)
tex_blocks[i] = re.sub(label_regex, '', tex_blocks[i])
# fix latex constructions that do not work with sphinx math
# (just remove them)
commands = [r'\begin{equation}',
r'\end{equation}',
r'\begin{equation*}',
r'\end{equation*}',
#r'\begin{eqnarray}',
#r'\end{eqnarray}',
#r'\begin{eqnarray*}',
#r'\end{eqnarray*}',
#r'\begin{align}',
#r'\end{align}',
#r'\begin{align*}',
#r'\end{align*}',
r'\begin{multline}',
r'\end{multline}',
r'\begin{multline*}',
r'\end{multline*}',
#r'\begin{split}',
#r'\end{split}',
#r'\begin{gather}',
#r'\end{gather}',
#r'\begin{gather*}',
#r'\end{gather*}',
r'\[',
r'\]',
# some common abbreviations (newcommands):
r'\beqan',
r'\eeqan',
r'\beqa',
r'\eeqa',
r'\balnn',
r'\ealnn',
r'\baln',
r'\ealn',
r'\beq',
r'\eeq', # the simplest name, contained in others, must come last!
]
for command in commands:
tex_blocks[i] = tex_blocks[i].replace(command, '')
# &=& -> &=
tex_blocks[i] = re.sub('&\s*=\s*&', ' &= ', tex_blocks[i])
# provide warnings for problematic environments
# Replace all references to equations that have labels in math environments:
for label in math_labels:
filestr = filestr.replace('(:ref:`%s`)' % label, ':eq:`%s`' % label)
multiple_math_labels_with_refs = [] # collect the labels with references
for labels in multiple_math_labels:
for label in labels:
ref = ':eq:`%s`' % label # ref{} is translated to eq:``
if ref in filestr:
multiple_math_labels_with_refs.append(label)
if multiple_math_labels_with_refs:
errwarn("""
*** warning: detected non-align math environment with multiple labels
(Sphinx cannot handle this equation system - labels will be removed
and references to them will be empty):""")
for label in multiple_math_labels_with_refs:
errwarn(' label{%s}' % label)
print()
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'sphinx')
# Remove all !bc ipy and !bc pyshell since interactive sessions
# are automatically handled by sphinx without indentation
# (just a blank line before and after)
filestr = re.sub(r'^!bc +d?ipy *\n(.*?)^!ec *\n',
'\n\g<1>\n', filestr, re.DOTALL|re.MULTILINE)
filestr = re.sub(r'^!bc +d?pyshell *\n(.*?)^!ec *\n',
'\n\g<1>\n', filestr, re.DOTALL|re.MULTILINE)
# Check if we have custom pygments lexers
if 'ipy' in code_block_types:
if not has_custom_pygments_lexer('ipy'):
envir2pygments['ipy'] = 'python'
if 'do' in code_block_types:
if not has_custom_pygments_lexer('doconce'):
envir2pygments['do'] = 'text'
# Make correct code-block:: language constructions
legal_pygments_languages = get_legal_pygments_lexers()
for key in set(code_block_types):
if key in envir2pygments:
if not envir2pygments[key] in legal_pygments_languages:
errwarn("""*** warning: %s is not a legal Pygments language (lexer)
found in line:
%s
The 'text' lexer will be used instead.
""" % (envir2pygments[key], defs_line))
envir2pygments[key] = 'text'
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % envir2pygments[key], filestr,
# flags=re.MULTILINE)
# Check that we have code installed to handle pyscpro
if 'pyscpro' in filestr and key == 'pyscpro':
try:
import icsecontrib.sagecellserver
except ImportError:
errwarn("""
*** warning: pyscpro for computer code (sage cells) is requested, but'
icsecontrib.sagecellserver from https://github.com/kriskda/sphinx-sagecell
is not installed. Using plain Python typesetting instead.""")
key = 'pypro'
if key == 'pyoptpro':
if option('runestone'):
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. codelens:: codelens_\n :showoutput:\n\n',
filestr, flags=re.MULTILINE)
else:
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. raw:: html\n\n',
filestr, flags=re.MULTILINE)
elif key == 'pyscpro':
if option('runestone'):
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: python
""", filestr, flags=re.MULTILINE)
else:
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. sagecellserver::\n\n',
filestr, flags=re.MULTILINE)
elif key == 'pysccod':
if option('runestone'):
# Include (i.e., run) all previous code segments...
# NOTE: this is most likely not what we want
include = ', '.join([i for i in range(1, activecode_counter)])
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: python
"include: %s
""" % include, filestr, flags=re.MULTILINE)
else:
errwarn('*** error: pysccod for sphinx is not supported without the --runestone flag\n (but pyscpro is via Sage Cell Server)')
_abort()
elif key == '':
# any !bc with/without argument becomes a text block:
filestr = re.sub(r'^!bc$', '\n.. code-block:: text\n\n', filestr,
flags=re.MULTILINE)
elif key.endswith('hid'):
if key in ('pyhid', 'jshid', 'htmlhid') and option('runestone'):
# Allow runestone books to run hidden code blocks
# (replace pyhid by pycod, then remove all !bc *hid)
for i in range(len(code_block_types)):
if code_block_types[i] == key:
code_block_types[i] = key.replace('hid', 'cod')
key2language = dict(py='python', js='javascript', html='html')
language = key2language[key.replace('hid', '')]
include = ', '.join([i for i in range(1, activecode_counter)])
filestr = re.sub(r'^!bc +%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: %s
:include: %s
:hidecode:
""" % (language, include), filestr, flags=re.MULTILINE)
else:
# Remove hidden code block
pattern = r'^!bc +%s\n.+?^!ec' % key
filestr = re.sub(pattern, '', filestr,
flags=re.MULTILINE|re.DOTALL)
else:
show_hide = False
if key.endswith('-h'):
key_orig = key
key = key[:-2]
show_hide = True
# Use the standard sphinx code-block directive
if key in envir2pygments:
pygments_language = envir2pygments[key]
elif key in legal_pygments_languages:
pygments_language = key
else:
errwarn('*** error: detected code environment "%s"' % key)
errwarn(' which is not registered in sphinx.py (sphinx_code)')
errwarn(' or not a language registered in pygments')
_abort()
if show_hide:
filestr = re.sub(r'^!bc +%s\s*\n' % key_orig,
'\n.. container:: toggle\n\n .. container:: header\n\n **Show/Hide Code**\n\n .. code-block:: %s\n\n' % \
pygments_language, filestr, flags=re.MULTILINE)
# Must add 4 indent in corresponding code_blocks[i], done above
else:
filestr = re.sub(r'^!bc +%s\s*\n' % key,
'\n.. code-block:: %s\n\n' % \
pygments_language, filestr, flags=re.MULTILINE)
# any !bc with/without argument becomes a text block:
filestr = re.sub(r'^!bc.*$', '\n.. code-block:: text\n\n', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^!ec *\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!bt *\n', '\n.. math::\n', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', '\n', filestr, flags=re.MULTILINE)
# Fix lacking blank line after :label:
filestr = re.sub(r'^( :label: .+?)(\n *[^ ]+)', r'\g<1>\n\n\g<2>',
filestr, flags=re.MULTILINE)
# Insert counters for runestone blocks
if option('runestone'):
codelens_counter = 0
activecode_counter = 0
lines = filestr.splitlines()
for i in range(len(lines)):
if '.. codelens:: codelens_' in lines[i]:
codelens_counter += 1
lines[i] = lines[i].replace('codelens_', 'codelens_%d' %
codelens_counter)
if '.. activecode:: activecode_' in lines[i]:
activecode_counter += 1
lines[i] = lines[i].replace('activecode_', 'activecode_%d' %
activecode_counter)
filestr = '\n'.join(lines)
# Final fixes
filestr = fix_underlines_in_headings(filestr)
# Ensure blank line before and after comments
filestr = re.sub(r'([.:;?!])\n^\.\. ', r'\g<1>\n\n.. ',
filestr, flags=re.MULTILINE)
filestr = re.sub(r'(^\.\. .+)\n([^ \n]+)', r'\g<1>\n\n\g<2>',
filestr, flags=re.MULTILINE)
# Line breaks interfer with tables and needs a final blank line too
lines = filestr.splitlines()
inside_block = False
for i in range(len(lines)):
if lines[i].startswith('<linebreakpipe>') and not inside_block:
inside_block = True
lines[i] = lines[i].replace('<linebreakpipe> ', '') + '\n'
continue
if lines[i].startswith('<linebreakpipe>') and inside_block:
lines[i] = '|' + lines[i].replace('<linebreakpipe>', '')
continue
if inside_block and not lines[i].startswith('<linebreakpipe>'):
inside_block = False
lines[i] = '| ' + lines[i] + '\n'
filestr = '\n'.join(lines)
# Remove double !split (TOC with a prefix !split gives two !splits)
pattern = '^.. !split\s+.. !split'
filestr = re.sub(pattern, '.. !split', filestr, flags=re.MULTILINE)
if option('html_links_in_new_window'):
# Insert a comment to be recognized by automake_sphinx.py such that it
# can replace the default links by proper modified target= option.
#filestr = '\n\n.. NOTE: Open external links in new windows.\n\n' + filestr
# Use JavaScript instead
filestr = """.. raw:: html
<script type="text/javascript">
$(document).ready(function() {
$("a[href^='http']").attr('target','_blank');
});
</script>
""" + filestr
# Remove too much vertical space
filestr = re.sub(r'\n{3,}', '\n\n', filestr)
return filestr
def sphinx_ref_and_label(section_label2title, format, filestr):
# Special fix early in the process:
# Deal with !split - by default we place splits before
# the all the topmost sections
# (This must be done before labels are put above section
# headings)
if '!split' in filestr and not option('sphinx_keep_splits'):
errwarn('*** warning: new !split inserted (override all existing !split)')
# Note: the title is at this stage translated to a chapter heading!
# This title/heading must be removed for the algorithm below to work
# (remove it, then insert afterwards)
pattern = r'^.. Document title:\n\n={3,9}.+?={3,9}'
m = re.search(pattern, filestr, flags=re.MULTILINE)
title_replacement = '<<<<<<<DOCUMENT TITLE>>>>>>>>>>>>' # "unlikely" str
if m:
title = m.group()
filestr = filestr.replace(title, title_replacement)
else:
title = ''
topmost_section = 0
for i in [9, 7, 5]:
if re.search(r'^%s' % ('='*i), filestr, flags=re.MULTILINE):
topmost_section = i
errwarn(' before every %s heading %s' % \
('='*topmost_section, '='*topmost_section))
errwarn(' because this strategy gives a well-functioning')
errwarn(' table of contents in Sphinx')
errwarn(' (use --sphinx_keep_splits to enforce your own !split commands)')
break
if topmost_section:
# First remove all !split
filestr = re.sub(r'^!split *\n', '', filestr, flags=re.MULTILINE)
# Insert new splits before all topmost sections
pattern = r'^%s (.+?) %s' % \
('='*topmost_section, '='*topmost_section)
lines = filestr.splitlines()
for i in range(len(lines)):
if re.search(pattern, lines[i]):
lines[i] = '!split\n' + lines[i]
filestr = '\n'.join(lines)
filestr = filestr.replace(title_replacement, title)
filestr = ref_and_label_commoncode(section_label2title, format, filestr)
# replace all references to sections:
for label in section_label2title:
filestr = filestr.replace('ref{%s}' % label, ':ref:`%s`' % label)
# Not of interest after sphinx got equation references:
#from common import ref2equations
#filestr = ref2equations(filestr)
# Replace remaining ref{x} as :ref:`x`
filestr = re.sub(r'ref\{(.+?)\}', ':ref:`\g<1>`', filestr)
return filestr
def sphinx_index_bib(filestr, index, citations, pubfile, pubdata):
filestr = rst_bib(filestr, citations, pubfile, pubdata)
from .common import INLINE_TAGS
for word in index:
# Drop verbatim, emphasize, bold, and math in index
word2 = word.replace('`', '')
word2 = word2.replace('$', '').replace('\\', '')
word2 = re.sub(INLINE_TAGS['bold'],
r'\g<begin>\g<subst>\g<end>', word2,
flags=re.MULTILINE)
word2 = re.sub(INLINE_TAGS['emphasize'],
r'\g<begin>\g<subst>\g<end>', word2,
flags=re.MULTILINE)
# Typeset idx{word} as ..index::
if '!' not in word and ',' not in word:
# .. index:: keyword
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index:: ' + word2 + '\n')
elif '!' not in word:
# .. index::
# single: keyword with comma
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index::\n single: ' + word2 + '\n')
else:
# .. index::
# single: keyword; subentry
word3 = word2.replace('!', '; ')
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index::\n single: ' + word3 + '\n')
# Symmetric keyword; subentry and subentry; keyword
#filestr = filestr.replace(
# 'idx{%s}' % word,
# '\n.. index::\n pair: ' + word3 + '\n')
return filestr
def sphinx_inline_comment(m):
# Explicit HTML typesetting does not work, we just use bold
name = m.group('name').strip()
comment = m.group('comment').strip()
global edit_markup_warning
if (not edit_markup_warning) and \
(name[:3] in ('add', 'del', 'edi') or '->' in comment):
errwarn('*** warning: sphinx/rst is a suboptimal format for')
errwarn(' typesetting edit markup such as')
errwarn(' ' + m.group())
errwarn(' Use HTML or LaTeX output instead, implement the')
errwarn(' edits (doconce apply_edit_comments) and then use sphinx.')
edit_markup_warning = True
chars = {',': 'comma', ';': 'semicolon', '.': 'period'}
if name[:4] == 'del ':
for char in chars:
if comment == char:
return r' (**edit %s**: delete %s)' % (name[4:], chars[char])
return r'(**edit %s**: **delete** %s)' % (name[4:], comment)
elif name[:4] == 'add ':
for char in chars:
if comment == char:
return r'%s (**edit %s: add %s**)' % (comment, name[4:], chars[char])
return r' (**edit %s: add**) %s (**end add**)' % (name[4:], comment)
else:
# Ordinary name
comment = ' '.join(comment.splitlines()) # '\s->\s' -> ' -> '
if ' -> ' in comment:
# Replacement
if comment.count(' -> ') != 1:
errwarn('*** wrong syntax in inline comment:')
errwarn(comment)
errwarn('(more than two ->)')
_abort()
orig, new = comment.split(' -> ')
return r'(**%s: remove** %s) (**insert:**)%s (**end insert**)' % (name, orig, new)
else:
# Ordinary comment
return r'[**%s**: %s]' % (name, comment)
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
if not 'rst' in BLANKLINE:
# rst.define is not yet ran on these dictionaries, do it:
from . import rst
rst.define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
FIGURE_EXT,
INTRO,
OUTRO,
filestr)
FILENAME_EXTENSION['sphinx'] = FILENAME_EXTENSION['rst']
BLANKLINE['sphinx'] = BLANKLINE['rst']
CODE['sphinx'] = CODE['rst']
LIST['sphinx'] = LIST['rst']
FIGURE_EXT['sphinx'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['sphinx'] = sphinx_ref_and_label
INDEX_BIB['sphinx'] = sphinx_index_bib
TABLE['sphinx'] = TABLE['rst']
EXERCISE['sphinx'] = EXERCISE['rst']
ENVIRS['sphinx'] = ENVIRS['rst']
INTRO['sphinx'] = INTRO['rst'].replace(
'.. Automatically generated reStructuredText',
'.. Automatically generated Sphinx-extended reStructuredText')
# make true copy of INLINE_TAGS_SUBST:
INLINE_TAGS_SUBST['sphinx'] = {}
for tag in INLINE_TAGS_SUBST['rst']:
INLINE_TAGS_SUBST['sphinx'][tag] = INLINE_TAGS_SUBST['rst'][tag]
# modify some tags:
#INLINE_TAGS_SUBST['sphinx']['math'] = r'\g<begin>:math:`\g<subst>`\g<end>'
# Important to strip the math expression
INLINE_TAGS_SUBST['sphinx']['math'] = lambda m: r'%s:math:`%s`%s' % (m.group('begin'), m.group('subst').strip(), m.group('end'))
#INLINE_TAGS_SUBST['sphinx']['math2'] = r'\g<begin>:math:`\g<latexmath>`\g<end>'
INLINE_TAGS_SUBST['sphinx']['math2'] = lambda m: r'%s:math:`%s`%s' % (m.group('begin'), m.group('latexmath').strip(), m.group('end'))
INLINE_TAGS_SUBST['sphinx']['figure'] = sphinx_figure
INLINE_TAGS_SUBST['sphinx']['movie'] = sphinx_movie
INLINE_TAGS_SUBST['sphinx']['inlinecomment'] = sphinx_inline_comment
CODE['sphinx'] = sphinx_code # function for typesetting code
ARGLIST['sphinx'] = {
'parameter': ':param',
'keyword': ':keyword',
'return': ':return',
'instance variable': ':ivar',
'class variable': ':cvar',
'module variable': ':var',
}
TOC['sphinx'] = lambda s, f: '' # Sphinx automatically generates a toc
QUIZ['sphinx'] = sphinx_quiz
#---------------------------------------------------------------------------
def sphinx_code_orig(filestr, format):
# NOTE: THIS FUNCTION IS NOT USED!!!!!!
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# grab #sphinx code-blocks: cod=python cpp=c++ etc line
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'#\s*[Ss]phinx\s+code-blocks?:(.+?)\n', filestr)
if m:
defs_line = m.group(1)
# turn defs into a dictionary definition:
defs = {}
for definition in defs_line.split():
key, value = definition.split('=')
defs[key] = value
else:
# default mappings:
defs = dict(cod='python',
pro='python',
pycod='python', cycod='cython',
pypro='python', cypro='cython',
fcod='fortran', fpro='fortran',
ccod='c', cppcod='c++',
cpro='c', cpppro='c++',
mcod='matlab', mpro='matlab',
plcod='perl', plpro='perl',
shcod='bash', shpro='bash',
rbcod='ruby', rbpro='ruby',
sys='console',
dat='python',
ipy='python',
xmlcod='xml', xmlpro='xml', xml='xml',
htmlcod='html', htmlpro='html', html='html',
texcod='latex', texpro='latex', tex='latex',
)
# (the "python" typesetting is neutral if the text
# does not parse as python)
# first indent all code/tex blocks by 1) extracting all blocks,
# 2) intending each block, and 3) inserting the blocks:
filestr, code_blocks, tex_blocks = remove_code_and_tex(filestr, format)
for i in range(len(code_blocks)):
code_blocks[i] = indent_lines(code_blocks[i], format)
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# remove all \label{}s inside tex blocks:
tex_blocks[i] = re.sub(fix_latex(r'\label\{.+?\}', application='match'),
'', tex_blocks[i])
# remove those without \ if there are any:
tex_blocks[i] = re.sub(r'label\{.+?\}', '', tex_blocks[i])
# side effects: `label{eq1}` as verbatim, but this is mostly a
# problem for doconce documentation and can be rephrased...
# fix latex constructions that do not work with sphinx math
commands = [r'\begin{equation}',
r'\end{equation}',
r'\begin{equation*}',
r'\end{equation*}',
r'\begin{eqnarray}',
r'\end{eqnarray}',
r'\begin{eqnarray*}',
r'\end{eqnarray*}',
r'\begin{align}',
r'\end{align}',
r'\begin{align*}',
r'\end{align*}',
r'\begin{multline}',
r'\end{multline}',
r'\begin{multline*}',
r'\end{multline*}',
r'\begin{split}',
r'\end{split}',
r'\begin{gather}',
r'\end{gather}',
r'\begin{gather*}',
r'\end{gather*}',
r'\[',
r'\]',
# some common abbreviations (newcommands):
r'\beqan',
r'\eeqan',
r'\beqa',
r'\eeqa',
r'\balnn',
r'\ealnn',
r'\baln',
r'\ealn',
r'\beq',
r'\eeq', # the simplest, contained in others, must come last!
]
for command in commands:
tex_blocks[i] = tex_blocks[i].replace(command, '')
tex_blocks[i] = re.sub('&\s*=\s*&', ' &= ', tex_blocks[i])
# provide warnings for problematic environments
#if '{alignat' in tex_blocks[i]:
# errwarn('*** warning: the "alignat" environment will give errors in Sphinx:\n' + tex_blocks[i] + '\n')
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'rst')
for key in defs:
language = defs[key]
if not language in legal_pygments_languages:
raise TypeError('%s is not a legal Pygments language '\
'(lexer) in line with:\n %s' % \
(language, defs_line))
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % defs[key], filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr, n = cpattern.subn('\n.. code-block:: %s\n\n' % defs[key], filestr)
errwarn(key + ' ' + n)
if n > 0:
errwarn('sphinx: %d subst %s by %s' % (n, key, defs[key]))
# any !bc with/without argument becomes a py (python) block:
#filestr = re.sub(r'^!bc.+\n', '\n.. code-block:: py\n\n', filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc.+$', flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: py\n\n', filestr)
filestr = re.sub(r'^!ec *\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!bt *\n', '\n.. math::\n\n', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', '\n\n', filestr,
flags=re.MULTILINE)
return filestr
def sphinx_code_newmathlabels(filestr, format):
# NOTE: THIS FUNCTION IS NOT USED!!!!!!
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# grab #sphinx code-blocks: cod=python cpp=c++ etc line
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'#\s*[Ss]phinx\s+code-blocks?:(.+?)\n', filestr)
if m:
defs_line = m.group(1)
# turn defs into a dictionary definition:
defs = {}
for definition in defs_line.split():
key, value = definition.split('=')
defs[key] = value
else:
# default mappings:
defs = dict(cod='python', pycod='python', cppcod='c++',
fcod='fortran', ccod='c',
pro='python', pypro='python', cpppro='c++',
fpro='fortran', cpro='c',
sys='console', dat='python')
# (the "python" typesetting is neutral if the text
# does not parse as python)
# First indent all code/tex blocks by 1) extracting all blocks,
# 2) intending each block, and 3) inserting the blocks.
# In between, handle the math blocks.
filestr, code_blocks, tex_blocks = remove_code_and_tex(filestr, format)
for i in range(len(code_blocks)):
code_blocks[i] = indent_lines(code_blocks[i], format)
math_labels = []
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# extract all \label{}s inside tex blocks and typeset them
# with :label: tags
label_regex1 = fix_latex(r'\label\{(.+?)\}', application='match')
label_regex2 = fix_latex( r'label\{(.+?)\}', application='match')
math_labels.extend(re.findall(label_regex1, tex_blocks[i]))
tex_blocks[i] = re.sub(label_regex1,
r' :label: \g<1> ', tex_blocks[i])
# handle also those without \ if there are any:
math_labels.extend(re.findall(label_regex2, tex_blocks[i]))
tex_blocks[i] = re.sub(label_regex2, r' :label: \g<1> ', tex_blocks[i])
# replace all references to equations:
for label in math_labels:
filestr = filestr.replace(':ref:`%s`' % label, ':eq:`%s`' % label)
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'rst')
for key in defs:
language = defs[key]
if not language in legal_pygments_languages:
raise TypeError('%s is not a legal Pygments language '\
'(lexer) in line with:\n %s' % \
(language, defs_line))
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % defs[key], filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: %s\n\n' % defs[key], filestr)
# any !bc with/without argument becomes a py (python) block:
#filestr = re.sub(r'^!bc.+\n', '\n.. code-block:: py\n\n', filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc.+$', flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: py\n\n', filestr)
filestr = re.sub(r'!ec *\n', '\n', filestr)
#filestr = re.sub(r'!ec\n', '\n', filestr)
#filestr = re.sub(r'!ec\n', '', filestr)
filestr = re.sub(r'!bt *\n', '\n.. math::\n :nowrap:\n\n', filestr)
filestr = re.sub(r'!et *\n', '\n\n', filestr)
return filestr
|
UTF-8
|
Python
| false | false | 40,910 |
py
| 371 |
sphinx.py
| 86 | 0.521168 | 0.517551 | 0 | 992 | 40.239919 | 170 |
AnshumanKumar14/python_basics
| 16,973,710,765,056 |
a5870aff777f4630085081d893002c9fab90a4d9
|
5af61e0f80badfd9383e5da275c246ac1f52d1b3
|
/second.py
|
141eaed1dbb20e9d26ceecc7e56eccd1114b53b4
|
[] |
no_license
|
https://github.com/AnshumanKumar14/python_basics
|
759728146a28285a8c63410cf2ff768e8c63a94e
|
21de598f21d66a8ee3a3b29e506ea3e893b3c15f
|
refs/heads/master
| 2020-05-21T16:47:00.417079 | 2019-06-14T16:23:56 | 2019-06-14T16:23:56 | 186,109,280 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Split function
names = 'anshuman, satya, shikha, dipkia'
print(names.split(','))
#List and dictionary type
userinfolist = ['anshu','29','male','gurgaon']
userinfodict = {'name': 'Anshu', 'age': 29}
# Formatting in python using .format
# My informatio
firstname = 'Anshuman'
lastname = 'Tiwari'
fullname = firstname + ' ' + lastname
age = 29
location = 'Gurgaon'
office = 'Sapient'
# This is used for formatting a string
# First way - Simple .format
myinfo = 'My information is -> Fullname: {}, Age: {}, Location: {}, Office: {}' .format(fullname, age, location, office)
print(myinfo) #This will print my information
#Second way -> Fstring
myinfo = f'My information is -> Fullname: {fullname}, Age: {age}, Location: {location}, Office: {office}'
|
UTF-8
|
Python
| false | false | 750 |
py
| 8 |
second.py
| 8 | 0.689333 | 0.681333 | 0 | 24 | 30.291667 | 120 |
alejo8591/culttume2
| 7,567,732,406,231 |
2aec1fef920378fe1c544755ab792ff9eae7798b
|
a6f45a369ba7988ebd0c6eb863af23f3f56d7485
|
/mysite/polls/views.py
|
109346c2a7fa4b84306294335ed8dda12e08c20a
|
[
"MIT"
] |
permissive
|
https://github.com/alejo8591/culttume2
|
6718c09508b8ba718296d8e4726bd0eb2e778e0f
|
9c38afa715eef0fee750ed2694077e5986144cca
|
refs/heads/master
| 2020-04-04T15:18:19.473200 | 2014-02-04T19:34:57 | 2014-02-04T19:34:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.http import HttpResponse
from polls.models import Poll
def index(request):
latest_poll_list = Poll.objects.order_by('-pub_date')[:5]
output = ', '.join([p.question for p in latest_poll_list])
return HttpResponse(output)
def detail(request, poll_id):
return HttpResponse("Your looking details of polls %s" % poll_id)
def results(request, poll_id):
return HttpResponse("Your looking result of polls %s" % poll_id)
def vote(request, poll_id):
return HttpResponse("You're voting on poll %s" %poll_id)
|
UTF-8
|
Python
| false | false | 518 |
py
| 4 |
views.py
| 1 | 0.735521 | 0.733591 | 0 | 16 | 31.4375 | 66 |
whisperity/dotfiles-framework
| 2,808,908,616,530 |
f152abd2ec4024516573e258438940e4c1ef9443
|
261169a74d78f85c51f98c38a592ee8edf666640
|
/dotfiles/saved_data.py
|
62283faa9645d0400f77423c9296b38a22df9afe
|
[] |
no_license
|
https://github.com/whisperity/dotfiles-framework
|
0684786189821a2d31f6a8987d9737843469a769
|
36d617f5629134ad8cffbf99aca76ea87146a47b
|
refs/heads/master
| 2023-08-27T21:12:01.086588 | 2022-12-20T10:58:37 | 2022-12-20T10:58:37 | 342,549,225 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from contextlib import contextmanager
from datetime import datetime
import json
import os
import zipfile
from dotfiles.status import Status
class UserSave:
"""
The `UserSave` instance represents the persistent storage where the
Dotfiles-specific user information, such as list of installed packages,
is stored.
"""
config_dir = os.path.join(os.path.expanduser('~'),
'.local', 'share', 'Dotfiles')
state_file = os.path.join(config_dir, 'state.json')
lock_file = os.path.join(config_dir, '.state.json.lock')
def __init__(self):
"""
Initialize the storage by loading the save from the user's disk.
"""
# Handle a simple indicator lock on the state file.
if os.path.exists(UserSave.lock_file):
with open(UserSave.lock_file, 'r') as lock_fd:
lock_pid = lock_fd.readline()
raise PermissionError("The configuration state is locked "
"by " + lock_pid)
try:
# Open the file normally.
self._handle = open(UserSave.state_file, 'r+')
self._lock_handle = open(UserSave.lock_file, 'w+')
try:
self._data = json.load(self._handle)
except json.JSONDecodeError:
self.close()
raise
except OSError:
# If the file doesn't exist, create it.
os.makedirs(os.path.dirname(UserSave.state_file), exist_ok=True)
self._handle = open(UserSave.state_file, 'w+')
self._lock_handle = open(UserSave.lock_file, 'w+')
self._data = {
'packages': {}
}
# Prepare the file to have a valid format.
json.dump(self._data, self._handle)
# Mark the state locked when the process starts running.
self._lock_handle.write(".pid: " + str(os.getpid()) + '\n')
self._lock_handle.flush()
self._uncommitted_archives = {}
def __del__(self):
self.close()
def close(self):
"""
Gracefully flush status changes to disk, close the open resources
and clear up.
"""
if getattr(self, '_handle', None) and not self._handle.closed:
if getattr(self, '_data', None):
self._handle.seek(0)
self._handle.truncate(0)
json.dump(self._data, self._handle,
indent=None,
separators=(',', ':'))
self._handle.close()
if getattr(self, '_lock_handle', None):
self._lock_handle.close()
try:
os.unlink(UserSave.lock_file)
except FileNotFoundError:
pass
def is_installed(self, package_name):
"""
:return: If the saved configuration has the package marked as
installed.
"""
return self._data['packages'].get(
package_name, {'status': Status.NOT_INSTALLED.name})['status'] == \
Status.INSTALLED.name
def save_status(self, package):
"""
Saves the status information of the given package to the user
configuration.
"""
pkg_dict = self._data['packages'].get(package.name, {})
if not pkg_dict:
self._data['packages'][package.name] = pkg_dict
pkg_dict['status'] = package.status.name
pkg_dict_st_changes = pkg_dict.get('latest_status_changes', {})
if not pkg_dict_st_changes:
pkg_dict['latest_status_changes'] = pkg_dict_st_changes
pkg_dict_st_changes[package.status.name] = datetime.now().isoformat()
try:
pkg_dict['relevant_backup'] = os.path.basename(
self._uncommitted_archives[package.name])
except KeyError:
# If a backup is not relevant, make sure the key is removed.
try:
del pkg_dict['relevant_backup']
except KeyError: # The key wasn't there.
pass
@property
def installed_packages(self):
"""
Generates the collection of packages that are considered installed.
"""
for package, st_dict in self._data['packages'].items():
if st_dict['status'] == Status.INSTALLED.name:
yield package
@contextmanager
def get_package_archive(self, package_name):
"""
Returns the `zipfile.ZipFile` context for the backup storage of the
given `package`.
If the package is not yet installed, a new archive will be created and
it's context will be returned. (Multiple calls in the program to this
function will return the same archive.)
If the package has been installed, return the archive - if exists -
that corresponds to the state of the most recent install.
"""
if self.is_installed(package_name):
mode = 'r'
archive = os.path.join(
UserSave.config_dir,
self._data['packages'][package_name].get('relevant_backup'))
else:
mode = 'a'
archive = self._uncommitted_archives.get(package_name)
if not archive:
print("Creating backup archive for '%s'..." % package_name)
archive = os.path.join(UserSave.config_dir,
package_name + '_' +
datetime.now().strftime('%s') +
'_0.zip')
while os.path.isfile(archive):
# Unlikely, but the user might end up running the same
# installer in a quick succession, in which these archives
# could end up being filled multiple times.
archive = archive.split('_')
counter = int(archive[-1].replace('.zip', '', 1)) + 1
archive[-1] = str(counter) + '.zip'
archive = '_'.join(archive)
self._uncommitted_archives[package_name] = archive
zip_ = zipfile.ZipFile(archive, mode,
compression=zipfile.ZIP_DEFLATED)
try:
yield zip_
finally:
zip_.close()
|
UTF-8
|
Python
| false | false | 6,355 |
py
| 30 |
saved_data.py
| 27 | 0.536585 | 0.535484 | 0 | 174 | 35.522989 | 79 |
Aasthaengg/IBMdataset
| 3,770,981,288,469 |
7f524dcd58a2bfcae52f1c4c58519f9f24afda38
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02398/s692824965.py
|
a88e01e4501b902b759539766f671a1ec5e4f610
|
[] |
no_license
|
https://github.com/Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
a, b, c = [int(w) for w in input().split()]
print(len([i for i in range(a, b+1) if c % i == 0]))
|
UTF-8
|
Python
| false | false | 96 |
py
| 202,060 |
s692824965.py
| 202,055 | 0.53125 | 0.510417 | 0 | 2 | 47.5 | 52 |
yudumerg/ArucoDetection_FindDistanceCameraandObject
| 11,845,519,837,816 |
300756af392fd25b267f5604ca9371171146b478
|
70e3238b22201b60ca0b2151b5d1004c4a4d8786
|
/detect_aruco_video.py
|
101d247fa56080c7fa719fa212e8b8818d4a68d5
|
[] |
no_license
|
https://github.com/yudumerg/ArucoDetection_FindDistanceCameraandObject
|
92c0f1d69bdf6d1ffaad788e9f7fccedd3c6174a
|
924054578f9320b45352204f3145d852af3f5b04
|
refs/heads/main
| 2023-02-27T04:44:44.617442 | 2021-02-10T10:20:36 | 2021-02-10T10:20:36 | 337,686,436 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from imutils.video import VideoStream
import argparse
import imutils
import time
import cv2
import sys
import numpy as np
def distance_to_camera(knownWidth,focalLength, perWidth):
return (knownWidth * focalLength) / perWidth
KNOWN_DISTANCE = 60.0 #Distance from camera to marker
KNOWN_WIDTH = 6.3 #original width of marker
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--type", type=str,
default="DICT_ARUCO_ORIGINAL",
help="type of ArUCo tag to detect")
args = vars(ap.parse_args())
# define names of 7x7 aruco
ARUCO_DICT = {
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL
}
if ARUCO_DICT.get(args["type"], None) is None:
print("[INFO] ArUCo tag of '{}' is not supported".format(
args["type"]))
sys.exit(0)
# Loading the ArUco dictionary
print("[INFO] detecting '{}' tags...".format(args["type"]))
arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[args["type"]])
arucoParams = cv2.aruco.DetectorParameters_create()
#Video Stream Settings
print("[INFO] starting video stream...")
vs = VideoStream(src=1).start()
time.sleep(2.0)
frame = cv2.imread(r'C:\Users\asus\Desktop\pycharm\pythonProject1\GP\detection\cameracalibration.jpeg')
x = imutils.resize(frame, width=1000)
# detect ArUco markers in the video stream
(corners, ids, rejected) = cv2.aruco.detectMarkers(frame,arucoDict, parameters=arucoParams)
# verify *at least* one ArUco marker was detected
if len(corners) > 0:
# flatten the ArUco IDs list
ids = ids.flatten()
# loop over the detected ArUCo corners
for (markerCorner, markerID) in zip(corners,ids):
# extract the marker corners (which are always returned in top-left, top-right, bottom-right, and bottom-left order)
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
font = cv2.FONT_HERSHEY_PLAIN
x_max = int(topRight[0])
x_min = int(topRight[0])
if (int(bottomRight[0]) > x_max):
x_max = int(bottomRight[0])
elif (int(bottomLeft[0]) > x_max):
x_max = int(bottomLeft[0])
elif (int(topLeft[0]) > x_max):
x_max = int(topLeft[0])
if (int(bottomRight[0]) < x_min):
x_min = int(bottomRight[0])
elif (int(bottomLeft[0]) < x_min):
x_min = int(bottomLeft[0])
elif (int(topLeft[0]) < x_min):
x_min = int(topLeft[0])
initialWidth = x_max - x_min
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)
break
focalLength = (initialWidth * KNOWN_DISTANCE) / KNOWN_WIDTH
while True:
frame = vs.read()
frame = imutils.resize(frame, width=1000)
# detect ArUco markers in the input frame
(corners, ids, rejected) = cv2.aruco.detectMarkers(frame, arucoDict, parameters=arucoParams)
if len(corners) > 0:
# flatten the ArUco IDs list
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
# returned in top-left, top-right, bottom-right, and bottom-left order)
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
x_mean = int((int(topRight[0]) + int(bottomRight[0]) + int(bottomLeft[0]) + int(topLeft[0]))/4)
y_mean = int((int(topRight[1]) + int(bottomRight[1]) + int(bottomLeft[1]) + int(topLeft[1]))/4)
font = cv2.FONT_HERSHEY_PLAIN
x_max = int(topRight[0])
x_min = int(topRight[0])
if(int(bottomRight[0]) > x_max):
x_max = int(bottomRight[0])
elif(int(bottomLeft[0]) > x_max):
x_max = int(bottomLeft[0])
elif(int(topLeft[0]) > x_max):
x_max = int(topLeft[0])
if(int(bottomRight[0]) < x_min):
x_min = int(bottomRight[0])
elif(int(bottomLeft[0]) < x_min):
x_min = int(bottomLeft[0])
elif(int(topLeft[0]) < x_min):
x_min = int(topLeft[0])
w = x_max - x_min
if (w != 0):
inches = distance_to_camera(KNOWN_WIDTH, focalLength, w)
cv2.putText(frame, "%.2fcm" % (inches),
(x_mean, y_mean), cv2.FONT_HERSHEY_SIMPLEX,
0.6, (0, 0, 255), 2)
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)
# compute and draw the center (x, y)-coordinates of the ArUco marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
center = (cX,cY)
radius = 5
cv2.circle(frame, center, radius, (0, 255, 255), -1)
print("Center coordinate of marker: " + str(center))
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
|
UTF-8
|
Python
| false | false | 5,646 |
py
| 2 |
detect_aruco_video.py
| 1 | 0.666312 | 0.626638 | 0 | 188 | 29.031915 | 118 |
LukaszMalucha/Python_ETL
| 12,463,995,143,456 |
2c9057da0635c4ea26bf1e3a833fc959cfb693cb
|
4914c3cfcb2abe020b54a9712d5cd9c61da0f198
|
/Foundations/sqlite_demo.py
|
6962c7d205ea5bcf763f72c0c93b3cb1162e5b24
|
[] |
no_license
|
https://github.com/LukaszMalucha/Python_ETL
|
c3edc8e6d0c477b1961072e6f8e534f06dab05a7
|
234f90014d76bef4041d1d17b698dcbe7539ac4f
|
refs/heads/master
| 2020-03-31T08:47:26.425996 | 2018-10-14T08:16:44 | 2018-10-14T08:16:44 | 152,072,590 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import sqlite3
from employee import Employee
## Connection Object
conn = sqlite3.connect('employee.db')
## For memory quick test runs
conn = sqlite3.connect(':memory:')
## Create cursor
c = conn.cursor()
######################################## CREATE TABLE ##########################
c.execute("""CREATE TABLE employees (
first text,
last text,
pay integer
)""")
## Commit current transaction
conn.commit()
conn.close()
##################################### INSERT DATA ##############################
c.execute("INSERT INTO employees VALUES ('John', 'Dudley', 50000)")
conn.commit()
conn.close()
################################### INSERT DATA 2 ##############################
emp_1 = Employee('John', 'Dean', 9000)
emp_2 = Employee('Jane', 'Dean', 12000)
## First way
c.execute("INSERT INTO employees VALUES (?, ?, ?)", (emp_1.first, emp_1.last, emp_1.pay))
## Second way
c.execute("INSERT INTO employees VALUES (:first, :last, :pay)", {'first': emp_2.first, 'last': emp_2.last, 'pay':emp_2.pay})
conn.commit()
conn.close()
################################## SELECT STATEMENT ############################
c.execute("SELECT * FROM employees WHERE last=:last", {'last':'Dean'})
# Deifferent fetches
print(c.fetchone())
c.fetchmany(5)
c.fetchall()
|
UTF-8
|
Python
| false | false | 1,422 |
py
| 7 |
sqlite_demo.py
| 5 | 0.489451 | 0.469761 | 0 | 61 | 21.016393 | 124 |
fmpisantos/tese
| 9,131,100,478,763 |
b3babefea97bedf704efa8a4feac091def6b602d
|
edefa8c03edd97bfa9c85251e085f60f3892b9f0
|
/algoritmos/orb.py
|
38a48763ff60cd6e2a177b2af86c84b73c5be67b
|
[] |
no_license
|
https://github.com/fmpisantos/tese
|
2ae76c7e6826ba420edf0086f94db3868f2f38c3
|
443388ad1af689738f74a00d81a4974c4d4c2dbf
|
refs/heads/master
| 2023-06-12T22:17:30.730345 | 2021-07-03T15:50:10 | 2021-07-03T15:50:10 | 348,005,667 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from PIL import Image
import imagehash
def test():
img1 = cv.imread('../Photos/original/img_0576.jpg',3) # queryImage
img2 = cv.imread('../Photos/original/img_0577.jpg',3) # trainImage
b,g,r = cv.split(img1) # get b, g, r
img1 = cv.merge([r,g,b]) # switch it to r, g,
b,g,r = cv.split(img2) # get b, g, r
img2 = cv.merge([r,g,b]) # switch it to r, g,
method = 'ORB' # 'SIFT'
lowe_ratio = 0.89
magic_number = 0.75
if method == 'ORB':
finder = cv.ORB_create()
elif method == 'SIFT':
finder = cv.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = finder.detectAndCompute(img1,None)
kp2, des2 = finder.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
average = 0
for m,n in matches:
if m.distance < lowe_ratio*n.distance:
print(abs(m.distance-n.distance))
average += abs(m.distance-n.distance)
good.append([m])
average /= len(good)
print(average)
msg1 = 'using %s with lowe_ratio %.2f' % (method, lowe_ratio)
msg2 = 'there are %d good matches' % (len(good))
img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good, None, flags=2)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img3,msg1,(10, 250), font, 0.5,(255,255,255),1,cv.LINE_AA)
cv.putText(img3,msg2,(10, 270), font, 0.5,(255,255,255),1,cv.LINE_AA)
fname = 'output_%s_%.2f.png' % (method, magic_number)
cv.imwrite(fname, img3)
plt.imshow(img3),plt.show()
img = Image.open('../Photos/original/img_0593.jpg')
image_one_hash = imagehash.whash(img)
img2 = Image.open('../Photos/original/img_0581.jpg')
image_two_hash = imagehash.whash(img2)
similarity = image_one_hash - image_two_hash
print(similarity)
|
UTF-8
|
Python
| false | false | 1,990 |
py
| 25 |
orb.py
| 16 | 0.613568 | 0.566332 | 0 | 67 | 28.716418 | 79 |
TomsaBogdan/Exercitiile
| 1,047,972,054,335 |
2e7a4577441f8836fc2dbe8c11e443396b6c6b22
|
24c7bdd4fa3510720c5409a99a756dfaf08f123e
|
/p6.py
|
186f4c6bd98d6934fba04f05de27acf3bc3d052d
|
[] |
no_license
|
https://github.com/TomsaBogdan/Exercitiile
|
acc95b68042919560793250dfe86a1d3c0226d12
|
ada0706e7e798cc3ef5363a84d522e82b698838a
|
refs/heads/master
| 2016-08-07T07:03:03.671168 | 2015-03-24T16:12:37 | 2015-03-24T16:12:37 | 31,974,779 | 0 | 3 | null | false | 2015-03-17T08:53:19 | 2015-03-10T18:35:05 | 2015-03-13T17:23:29 | 2015-03-13T17:23:29 | 0 | 0 | 1 | 1 |
Python
| null | null |
# take both parameters being passed and return
# the string true if a portion of str1 characters
# can be rearranged to match str2, otherwise return the string false.
def scramble(str1, str2):
if sorted(str1) == sorted(str2):
print True
else:
print False
print scramble("cdore","coder")
|
UTF-8
|
Python
| false | false | 311 |
py
| 9 |
p6.py
| 9 | 0.70418 | 0.684887 | 0 | 13 | 23 | 69 |
xz1082/assignment5
| 13,881,334,328,116 |
3523209dacd83439aad6c74cface246f73c9a6d0
|
9ef6dde178e3bc8ff9c7c5c9c7888451f1133feb
|
/jz1584/assignment5.py
|
a4dde469b7bc7eb195597c5bdcca5ba0d0bef54c
|
[] |
no_license
|
https://github.com/xz1082/assignment5
|
2d568f9f063da65804c10f8939f140c264a3ab4a
|
aca54cb9445de38021b5a92fc91c5ce0ccf879fc
|
refs/heads/master
| 2020-12-02T15:03:31.305458 | 2014-10-18T00:01:57 | 2014-10-18T00:01:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Q1 import *
from Q2 import *
from Q3 import *
from Q4 import *
#Q1 testing
print "Q1 testing:"
a=interval("[2,2]")
b=interval('[-2,2)')
c=interval("(2,400)")
d=interval("(-3,-2]")
print a, b, c, d
#f=interval("(-2,-1)") #error should be expected
#print f
#Q2 testing
print "Q2 testing:"
q2="[1,5)"
q21="(-2,7]"
#note [-1,7] is the same as (-2,7]
mergeIntervals(q2,q21)
#Q3 testing
print "Q3 testing:"
#"Note:Merged all intervals into equivalent inclusive-interval form:"
intlist='(2,5],[-2,7),(7,10],[8,18]'
Q3=mergeOverlapping(intlist)
print Q3
#Q4 testing
print "Q4 testing:"
Q4=insert("[1,2], (3,5), [6,7), (8,10], [12,16]", '[4,9]')
print Q4 #We know (3,5) is the same as [2,4]
#Q5 is in a seperate files called assignment5_Q5.py
|
UTF-8
|
Python
| false | false | 748 |
py
| 31 |
assignment5.py
| 31 | 0.639037 | 0.540107 | 0 | 37 | 19.189189 | 69 |
karoldiasb/lpc_tarefas
| 16,209,206,619,725 |
a0a306ede826d6edadf43793986f3c6306ca8baf
|
e55322838442b552d66e24fc167f6ac5debbc37b
|
/tarefas/api/resources.py
|
40db5a73c72ec920d38b7b67690c8a244e695957
|
[] |
no_license
|
https://github.com/karoldiasb/lpc_tarefas
|
9ea9e957021282bd4a5bb0774e639bc67ac45a3e
|
84ccd1e1e4d0075e4be02b83a112fa2a96c6f209
|
refs/heads/master
| 2018-05-18T09:49:17.171437 | 2017-06-02T20:30:29 | 2017-06-02T20:30:29 | 93,197,662 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tastypie.resources import ModelResource
from tastypie import fields, utils
from tastypie.authorization import Authorization
from tarefas.models import *
from django.contrib.auth.models import User
from tastypie.exceptions import Unauthorized
class UsuarioResource(ModelResource):
def obj_delete_list(self,bundle,**kwargs):
raise Unauthorized('Você não pode deletar essa lista');
class Meta:
queryset = Usuario.objects.all()
allowed_methods = ['get','post','put','delete']
filtering = {
"nome": ('exact', 'startswith',)
}
authorization = Authorization()
class ProjetoResource(ModelResource):
def obj_delete_list(self,bundle,**kwargs):
raise Unauthorized('Você não pode deletar essa lista');
class Meta:
queryset = Projeto.objects.all()
allowed_methods = ['get','post','put','delete']
filtering = {
"nome": ('exact', 'startswith',)
}
authorization = Authorization()
class TarefaResource(ModelResource):
def obj_create(self, bundle, **kwargs):
nomeTarefa = bundle.data['nomeTarefa']
usuario = bundle.data['usuario'].split("/")
projeto = bundle.data['projeto'].split("/")
if not Tarefa.objects.filter(nomeTarefa=nomeTarefa):
tipo = Tarefa()
tipo.nomeTarefa = bundle.data['nomeTarefa']
tipo.usuario = Usuario.objects.get(pk=usuario[4])
tipo.projeto = Projeto.objects.get(pk=projeto[4])
tipo.save()
bundle.obj = tipo
return bundle
else:
raise Unauthorized('Já existe tarefa com essa pessoa');
def obj_delete_list(self,bundle,**kwargs):
raise Unauthorized('Você não pode deletar essa lista');
class Meta:
queryset = Tarefa.objects.all()
allowed_methods = ['get','post','put','delete']
filtering = {
"nome": ('exact', 'startswith',)
}
authorization = Authorization()
class ProjetoUsuarioResource(ModelResource):
def obj_delete_list(self,bundle,**kwargs):
raise Unauthorized('Você não pode deletar essa lista');
class Meta:
queryset = ProjetoUsuario.objects.all()
allowed_methods = ['get','post','put','delete']
filtering = {
"nome": ('exact', 'startswith',)
}
authorization = Authorization()
|
UTF-8
|
Python
| false | false | 2,415 |
py
| 4 |
resources.py
| 4 | 0.620948 | 0.620116 | 0 | 73 | 31.958904 | 67 |
yycptt/Feedforward_NN_library
| 13,082,470,390,382 |
92549b9c2bbf5452cfa9115c17967d77db842a44
|
4feac7ddac07e3f04b58ec8f4d632658ca774960
|
/loss/softmax_cross_entropy_loss.py
|
53905e20ad159e5ebf79a466cb59cf45009bdb39
|
[] |
no_license
|
https://github.com/yycptt/Feedforward_NN_library
|
8fddf217117b675ce4af3164be96106b89f0d215
|
16040f41e6d9a7c3fe108be4f2218bd1847adda4
|
refs/heads/master
| 2021-01-23T19:13:48.530032 | 2017-09-21T04:26:40 | 2017-09-21T04:26:40 | 102,812,227 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
from loss import Loss
class SoftmaxCrossEntropyLoss(Loss):
def __init__(self):
super(SoftmaxCrossEntropyLoss, self).__init__()
@staticmethod
def compute_loss_and_grad(Y, X):
assert (np.all(Y.shape == X.shape))
m = Y.shape[1]
temp = np.exp(X)
Y_hat = np.divide(temp, np.sum(temp, axis=0))
loss = 1.0 / m * np.nansum(-np.multiply(Y, np.log(Y_hat)))
dX = Y_hat - Y
return loss, dX
|
UTF-8
|
Python
| false | false | 476 |
py
| 22 |
softmax_cross_entropy_loss.py
| 21 | 0.571429 | 0.563025 | 0 | 17 | 27 | 66 |
rossigee/site-domain-manager
| 10,746,008,208,941 |
860cd277662d3b8cfe35c91727c528cca55bcfe4
|
c2235c931a371e467c96708e9a88183e6c2bd15a
|
/sdmgr/hosting/__init__.py
|
573a085601db6384b63f5932c4a9355911bb3878
|
[] |
no_license
|
https://github.com/rossigee/site-domain-manager
|
27a3a148bf0b05e50c41eb7105de4d34ee74f750
|
a9eca92d98e4b219cc6b9278ad2a22d243d95663
|
refs/heads/master
| 2023-05-27T05:52:29.879128 | 2023-05-20T06:19:32 | 2023-05-20T06:19:32 | 186,814,637 | 0 | 0 | null | false | 2023-05-20T06:19:33 | 2019-05-15T11:42:41 | 2023-02-15T04:20:54 | 2023-05-20T06:19:32 | 113 | 0 | 0 | 1 |
Python
| false | false |
available_agents = []
def register_agent(agent):
available_agents.append(agent)
|
UTF-8
|
Python
| false | false | 85 |
py
| 59 |
__init__.py
| 47 | 0.729412 | 0.729412 | 0 | 4 | 20.25 | 34 |
NervanaSystems/ngraph-neon
| 1,992,864,845,973 |
4f1f0e987ad80787950e2f9fb862bf6be8b1f36c
|
0cef8deb473ffac47344dd46039b9f962d9d0ccf
|
/examples/walk_through/gendata.py
|
19ec65edef29d0a1294e41a58cff210ff337c06f
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/NervanaSystems/ngraph-neon
|
6045d51b6c67348b0df8cbe051253543b691f29d
|
3d17f06ae723ec5c2e3a52c9c840b4d6c7640f22
|
refs/heads/master
| 2023-06-22T02:54:20.143134 | 2023-01-03T22:54:35 | 2023-01-03T22:54:35 | 83,846,593 | 14 | 6 |
Apache-2.0
| false | 2022-10-17T03:51:08 | 2017-03-03T22:19:31 | 2022-06-10T05:16:35 | 2019-04-17T23:57:02 | 17,431 | 13 | 6 | 1 |
Python
| false | false |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import numbers
class MixtureGenerator(object):
def __init__(self, pvals, shape, seed=0):
if isinstance(shape, numbers.Integral):
shape = (shape,)
self.__rng = np.random.RandomState(seed)
self.nclasses = len(pvals)
self.shape = shape
self.size = 1
for s in shape:
self.size = self.size * s
self.As = self.__rng.uniform(-1, 1, (self.size, self.size, self.nclasses,))
self.bs = self.__rng.uniform(-1, 1, (self.size, self.nclasses,))
self.accum = []
s = 0
for pval in pvals:
s = s + pval
self.accum.append(s)
self.accum[-1] = 2
def multichoose(self):
x = self.__rng.uniform(0, 1)
for i, aval in enumerate(self.accum):
if x < aval:
return i
def multinomial(self, ys):
"""
Initialize y with multinomial values distributed per pvals.
Arguments:
ys: 1-d tensor.
"""
i = 0
for i in range(ys.size):
ys[i] = self.multichoose()
def fill_mixture(self, xs, ys):
self.multinomial(ys)
xs[...] = self.__rng.normal(0, 0.3, xs.shape)
for i in range(ys.size):
y = ys[i]
x = xs[..., i].reshape(self.size)
A = self.As[..., y]
b = self.bs[..., y]
x0 = np.dot(A, x) + b
xs[..., i] = x0.reshape(self.shape)
def make_mixture(self, N):
return np.empty(self.shape + (N,)), np.empty((N,), dtype=int)
def gen_data(self, batch_size, n_batches):
XS = []
YS = []
for i in range(n_batches):
xs, ys = self.make_mixture(batch_size)
self.fill_mixture(xs, ys)
XS.append(xs)
YS.append(ys)
return XS, YS
|
UTF-8
|
Python
| false | false | 2,609 |
py
| 152 |
gendata.py
| 116 | 0.523956 | 0.512457 | 0 | 80 | 31.6125 | 83 |
RTEMS/rtems-tools
| 6,201,932,792,162 |
f2ffb8052b2e7a1a5a4fa5066ceddf97abb6e888
|
49d71dec3f8feedc7e008988c27192e30a0e2d62
|
/tester/rt/console.py
|
13300dae57e3a1680ca498a3ee96d9e7fe25ff88
|
[] |
no_license
|
https://github.com/RTEMS/rtems-tools
|
f7a142867dac17070122dfe88537c8ceb89724dc
|
eda9325e583f761c53ee3db83124cc77cb4fefb5
|
refs/heads/master
| 2023-09-01T00:38:20.180455 | 2023-08-29T23:51:47 | 2023-08-29T23:51:47 | 10,755,322 | 33 | 47 | null | false | 2023-09-07T05:44:57 | 2013-06-18T06:23:31 | 2023-07-11T22:44:52 | 2023-09-07T05:44:57 | 6,079 | 29 | 39 | 1 |
C
| false | false |
#
# RTEMS Tools Project (http://www.rtems.org/)
# Copyright 2013-2020 Chris Johns (chrisj@rtems.org)
# All rights reserved.
#
# This file is part of the RTEMS Tools package in 'rtems-tools'.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# RTEMS Testing Consoles
#
from __future__ import print_function
import errno
import os
import threading
import time
from rtemstoolkit import path
import tester.rt.telnet
#
# Not available on Windows. Not sure what this means.
#
if os.name != 'nt':
import tester.rt.stty as stty
else:
stty = None
def save():
if stty is not None:
return stty.save()
return None
def restore(attributes):
if attributes is not None and stty is not None:
stty.restore(attributes)
class console(object):
'''RTEMS Testing console base.'''
def __init__(self, name, trace):
self.name = name
self.trace = trace
def __del__(self):
pass
def _tracing(self):
return self.trace
def open(self):
pass
def close(self):
pass
class stdio(console):
'''STDIO console.'''
def __init__(self, trace = False):
super(stdio, self).__init__('stdio', trace)
class tty(console):
'''TTY console connects to the target's console.'''
def __init__(self, dev, output, setup = None, trace = False):
self.tty = None
self.read_thread = None
self.dev = dev
self.output = output
self.setup = setup
super(tty, self).__init__(dev, trace)
def __del__(self):
super(tty, self).__del__()
self.close()
def open(self):
def _readthread(me, x):
line = ''
while me.running:
time.sleep(0.05)
try:
data = me.tty.read()
if isinstance(data, bytes):
data = data.decode('utf-8', 'ignore')
except IOError as ioe:
if ioe.errno == errno.EAGAIN:
continue
raise
except:
raise
for c in data:
if ord(c) > 0 and ord(c) < 128:
line += c
if c == '\n':
me.output(line)
line = ''
if stty and path.exists(self.dev):
self.tty = stty.tty(self.dev)
else:
self.tty = tester.rt.telnet.tty(self.dev)
self.tty.set(self.setup)
self.tty.on()
self.read_thread = threading.Thread(target = _readthread,
name = 'tty[%s]' % (self.dev),
args = (self, 0))
self.read_thread.daemon = True
self.running = True
self.read_thread.start()
def close(self):
if self.tty:
time.sleep(1)
if self.read_thread:
self.running = False
self.read_thread.join(1)
self.tty = None
|
UTF-8
|
Python
| false | false | 4,293 |
py
| 292 |
console.py
| 165 | 0.587002 | 0.58211 | 0 | 143 | 29.020979 | 78 |
airman41/packages
| 1,245,540,550,214 |
6ff3b1d8fac0093a0e28fa802d04e7128a03d89b
|
6ad334cdbb9f8f24bef5bbc1aa9752ac24345ccf
|
/udiskie/setup.py
|
9ae1ab4b2fa78d20fb880eb3bdfed80efc01c5d5
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
https://github.com/airman41/packages
|
d0dff222807a16b2bd1cc3faabd2ae1262e9980e
|
ee05bfe68b7bc2aa63ef15e414c6c165dbac6bcf
|
refs/heads/master
| 2020-12-24T20:43:37.020583 | 2014-05-15T13:20:10 | 2014-05-15T13:20:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# encoding: utf-8
#!/usr/bin/env python
from distutils.core import setup
from subprocess import call
import sys
import logging
from os import path
# read long_description from README.rst
long_description = None
try:
long_description = open('README.rst').read()
long_description += '\n' + open('CHANGES.rst').read()
except IOError:
pass
theme_base = path.join(sys.prefix, 'share/icons/hicolor')
icon_resolutions = ([('scalable', 'svg')] +
[('{0}x{0}'.format(res), 'png') for res in [16]])
icon_classes = {'actions': ('mount', 'unmount',
'lock', 'unlock',
'eject', 'detach')}
setup(
name='udiskie',
version='0.6.4',
description='Removable disk automounter for udisks',
long_description=long_description,
author='Byron Clark',
author_email='byron@theclarkfamily.name',
maintainer='Thomas Gläßle',
maintainer_email='t_glaessle@gmx.de',
url='https://github.com/coldfix/udiskie',
license='MIT',
packages=[
'udiskie',
],
data_files=[
(path.join(theme_base, icon_resolution, icon_class), [
path.join('icons', icon_resolution, icon_class,
'udiskie-%s.%s' % (icon_name, icon_ext))
for icon_name in icon_names])
for icon_resolution,icon_ext in icon_resolutions
for icon_class,icon_names in icon_classes.items()
],
entry_points={
'console_scripts': [
'udiskie = udiskie.cli:Daemon.main',
'udiskie-mount = udiskie.cli:Mount.main',
'udiskie-umount = udiskie.cli:Umount.main',
],
},
install_requires=[
# Currently not building out of the box:
# 'PyGObject',
# 'dbus-python',
# 'pygtk>=2.10',
],
tests_require=[
'python-dbusmock>=0.7.2'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: MIT License',
'Topic :: Desktop Environment',
'Topic :: Software Development',
'Topic :: System :: Filesystems',
'Topic :: System :: Hardware',
'Topic :: Utilities',
],
)
|
UTF-8
|
Python
| false | false | 2,507 |
py
| 7 |
setup.py
| 4 | 0.57485 | 0.566467 | 0 | 80 | 30.3125 | 69 |
zhkbmstu/bmstu.py
| 10,780,367,925,000 |
54ce6dda4a9cbf5b3a0181702fd1bcf3086ef076
|
a522c3c1a95d3073ebeda058f304c6a529322698
|
/blog_builder.py
|
1fedb933718f5a52fb71714a522c62eb5a9eb95e
|
[] |
no_license
|
https://github.com/zhkbmstu/bmstu.py
|
dbb546acb96c7c69e777a5120d6f4051cb9260b1
|
0c0256a40990ed7f6aac2307b1e5e7e7e7cbc005
|
refs/heads/master
| 2021-01-10T01:44:35.156446 | 2015-10-02T18:28:59 | 2015-10-02T18:28:59 | 43,523,499 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
import json
articles = json.load(open('articles.json', 'r'))
blog_file = open('blog.html', 'a')
blog_file.write('<html><head><meta charset="utf-8"><title>Статьи</title></head>\n<body>\n')
for article in articles:
print "%s:\"%s\"\n\t%s\n\n" % (article['author'], article['title'], article['text'])
blog_file.write('<article>\n<h2>' + article['author'].encode('utf8') + ':"' + article['title'].encode('utf8') + '"</h2>\n<div>\n' + article['text'].encode('utf8') + '\n</div>\n</article>\n\n')
blog_file.write("</body></html>")
blog_file.close()
|
UTF-8
|
Python
| false | false | 574 |
py
| 7 |
blog_builder.py
| 4 | 0.612676 | 0.600352 | 0 | 15 | 36.866667 | 194 |
mkotsi/TimeLapse_UQ_localsolver-master
| 10,273,561,805,482 |
c357ddf30e85225a2dd2966deda93772674aa602
|
6ae1034e48ec38a5880a448e1f7a134cbe3e5535
|
/docker_image/pysit_extensions/normalize_amplitudes/__init__.py
|
d0736ac4013163ca6d51df67a6027e3a4f206326
|
[] |
no_license
|
https://github.com/mkotsi/TimeLapse_UQ_localsolver-master
|
9d83f98e971a71bdaf0f7e931e41e87b7f539738
|
224f6972499d74ac734e3b31bbc6840f35423e68
|
refs/heads/master
| 2021-02-13T05:10:28.449924 | 2020-03-30T13:07:21 | 2020-03-30T13:07:21 | 244,664,925 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pysit_extensions.normalize_amplitudes.normalize_amplitudes import *
from pysit_extensions.normalize_amplitudes.get_average_energy_ratio import *
|
UTF-8
|
Python
| false | false | 149 |
py
| 101 |
__init__.py
| 74 | 0.85906 | 0.85906 | 0 | 2 | 74 | 76 |
s2motion/learning_python
| 17,970,143,179,985 |
36f54548ddb23414f78129eb9a84f497c786206b
|
50dc44b5f2183ba102ca1a391eadc492e45b98a8
|
/sources/redis_washer.py
|
831bb3fa85aab09c69b7560aa006607ad7d62df2
|
[] |
no_license
|
https://github.com/s2motion/learning_python
|
fa1bc17c09658253305dbd8dd4aff65e681e9462
|
7821019a7118cb4c08afc3fd7525f08239bac586
|
refs/heads/master
| 2020-03-16T21:12:29.500599 | 2018-05-11T04:28:24 | 2018-05-11T04:28:24 | 132,988,982 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import redis
conn = redis.Redis()
print('Washer is starting')
dishes = ['salad', 'bread', 'entree', 'dessert']
for dish in dishes:
msg = dish.encode('utf-8')
conn.rpush('dishes', msg)
print('Washed', num)
conn.rpush('dishes', 'quit')
print('Washer is done')
|
UTF-8
|
Python
| false | false | 261 |
py
| 4 |
redis_washer.py
| 4 | 0.670498 | 0.666667 | 0 | 11 | 22.818182 | 48 |
Thuku777/church
| 10,239,202,068,879 |
e72bc9aa0170167d8edd9f167a0290084b0459c1
|
8ac14cf6a07348e61ebd6b523b00ca87c40eeb25
|
/about/migrations/0002_auto_20201027_1931.py
|
92b3b2039539f889fda8b6a09b22fd854777755a
|
[] |
no_license
|
https://github.com/Thuku777/church
|
556ff964337061e72e779fe30d6a582b4551ac12
|
98e6bbbed36005abc1b798254a3089040a1a1e0b
|
refs/heads/main
| 2023-01-09T10:47:19.323210 | 2020-11-17T21:31:11 | 2020-11-17T21:31:11 | 309,812,757 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.1.2 on 2020-10-27 16:31
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('about', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='about',
options={'verbose_name': 'About', 'verbose_name_plural': 'About'},
),
migrations.AlterModelOptions(
name='leader',
options={'ordering': ['date_posted'], 'verbose_name': 'Leaders', 'verbose_name_plural': 'DC Kenol Team'},
),
migrations.AlterModelOptions(
name='mission',
options={'verbose_name': 'Mission', 'verbose_name_plural': 'Mission'},
),
migrations.RemoveField(
model_name='leader',
name='photo',
),
migrations.AddField(
model_name='about',
name='date_posted',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='about',
name='is_published',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='about',
name='slug',
field=models.SlugField(default='about', max_length=200, null=True, unique=True),
),
migrations.AddField(
model_name='leader',
name='date_posted',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='leader',
name='image',
field=models.ImageField(null=True, upload_to='images/Leaders/%Y/%m/%d/'),
),
migrations.AddField(
model_name='leader',
name='is_leader',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='leader',
name='is_pastor',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='leader',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='leader',
name='slug',
field=models.SlugField(max_length=200, null=True, unique=True),
),
migrations.AlterField(
model_name='leader',
name='title',
field=models.CharField(max_length=400),
),
]
|
UTF-8
|
Python
| false | false | 2,547 |
py
| 54 |
0002_auto_20201027_1931.py
| 29 | 0.53671 | 0.525717 | 0 | 80 | 30.8375 | 117 |
Sfoozz/Text-Mining-IE-2017
| 18,966,575,603,604 |
2ccacd90b290ca73f4f1b33224b75bc745f58b97
|
7392a8c0bf09043f78f55efe24bc461a90ff463a
|
/test_textmining.py
|
9cfd83786ebfc4c4ab15edef43fd45ff691dbc40
|
[] |
no_license
|
https://github.com/Sfoozz/Text-Mining-IE-2017
|
e57dbf261f26774dc47070813aa54783929ff9a0
|
b6abae65945c56d0262b3611643b3873a977c276
|
refs/heads/master
| 2021-01-11T22:46:38.463273 | 2017-02-04T07:02:05 | 2017-02-04T07:02:05 | 79,032,491 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
with open('../corpus_goed.json') as json_data:
d = json.load(json_data)
eerste = d[0]
print(eerste)
eerstevalue = eerste['workID']
print(eerstevalue)
|
UTF-8
|
Python
| false | false | 184 |
py
| 25 |
test_textmining.py
| 22 | 0.63587 | 0.630435 | 0 | 8 | 21.875 | 46 |
fatine7359/Reviews-Site
| 11,338,713,675,247 |
1b0136f520e8ce97d61c61da51fbdb0995299b24
|
174259be7790b16fc7c13b17fb6343420017d6d2
|
/ManageInterface3.py
|
56d1c35d95b3b1e863e5200bc4751eb4f2eb0350
|
[] |
no_license
|
https://github.com/fatine7359/Reviews-Site
|
e96b928dfb2804cd597f4ea97fc1c39eb1f96fa5
|
8faa5a94287b04b40b86f8658979c10d3291641f
|
refs/heads/main
| 2023-05-31T00:22:32.838720 | 2021-06-10T08:36:17 | 2021-06-10T08:36:17 | 375,502,613 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'manageSite1.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from AddInterface3 import Ui_AddWindow
from DeleteInterface2 import Ui_DeleteWindow
from ModifyInterface3 import Ui_ModifyWindow
class Ui_ManageWindow(object):
def openAdd(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_AddWindow()
self.ui.setupUi(self.window)
self.window.show()
def openDelete(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_DeleteWindow()
self.ui.setupUi(self.window)
self.window.show()
def openModify(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_ModifyWindow()
self.ui.setupUi(self.window)
self.window.show()
def setupUi(self, ManageWindow):
ManageWindow.setObjectName("ManageWindow")
ManageWindow.resize(806, 588)
ManageWindow.setStyleSheet("*{\n"
" font-family:century gothic;\n"
" font-size:24px;\n"
"}\n"
"QFrame\n"
"{\n"
" background:rgba(0,0,0,0.8);\n"
" border-radius:15px\n"
" \n"
"}\n"
"#ManageWindow{\n"
" border-image: url(:/pref0/images/InterfaceChoisie.jpeg);\n"
"}\n"
"QToolButton\n"
"{\n"
" background:#ffb860;\n"
" border-radius:15px;\n"
"}\n"
"QLabel\n"
"{\n"
" color:#ffb860;\n"
" background:transparent;\n"
"}\n"
"QPushButton\n"
"{\n"
" color:#333;\n"
" border-radius:15px;\n"
" background:#ffb860;\n"
"}\n"
"QPushButton:hover\n"
"{\n"
" color:#333;\n"
" border-radius:15px;\n"
" background:white;\n"
"}\n"
"QLineEdit\n"
"{\n"
"background:transparent;\n"
"border:none;\n"
"color:#717072;\n"
"border-bottom:1px solid #717072;\n"
"font-size:14px;\n"
"}")
self.centralwidget = QtWidgets.QWidget(ManageWindow)
self.centralwidget.setObjectName("centralwidget")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(220, 110, 441, 391))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.label = QtWidgets.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(140, 30, 161, 51))
self.label.setObjectName("label")
self.toolButton = QtWidgets.QToolButton(self.frame)
self.toolButton.setGeometry(QtCore.QRect(50, 90, 81, 81))
self.toolButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/pref1/images/AddIcon.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon)
self.toolButton.setIconSize(QtCore.QSize(39, 39))
self.toolButton.setObjectName("toolButton")
self.toolButton.clicked.connect(self.openAdd)
self.toolButton_2 = QtWidgets.QToolButton(self.frame)
self.toolButton_2.setGeometry(QtCore.QRect(280, 80, 81, 81))
self.toolButton_2.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/pref1/images/ModifyIcon.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_2.setIcon(icon1)
self.toolButton_2.setIconSize(QtCore.QSize(39, 39))
self.toolButton_2.setObjectName("toolButton_2")
self.toolButton_2.clicked.connect(self.openModify)
self.toolButton_3 = QtWidgets.QToolButton(self.frame)
self.toolButton_3.setGeometry(QtCore.QRect(160, 220, 81, 81))
self.toolButton_3.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/pref1/images/DeleteIcon.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon2)
self.toolButton_3.setIconSize(QtCore.QSize(39, 39))
self.toolButton_3.setObjectName("toolButton_3")
self.toolButton_3.clicked.connect(self.openDelete)
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(70, 180, 81, 41))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.frame)
self.label_3.setGeometry(QtCore.QRect(290, 170, 101, 41))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.frame)
self.label_4.setGeometry(QtCore.QRect(180, 310, 111, 41))
self.label_4.setObjectName("label_4")
self.toolButton_4 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_4.setGeometry(QtCore.QRect(370, 60, 111, 81))
self.toolButton_4.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/pref1/images/ManageSiteIcon.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_4.setIcon(icon3)
self.toolButton_4.setIconSize(QtCore.QSize(65, 65))
self.toolButton_4.setObjectName("toolButton_4")
ManageWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(ManageWindow)
self.statusbar.setObjectName("statusbar")
ManageWindow.setStatusBar(self.statusbar)
self.retranslateUi(ManageWindow)
QtCore.QMetaObject.connectSlotsByName(ManageWindow)
def retranslateUi(self, ManageWindow):
_translate = QtCore.QCoreApplication.translate
ManageWindow.setWindowTitle(_translate("ManageWindow", "MainWindow"))
self.label.setText(_translate("ManageWindow", "<html><head/><body><p><span style=\" font-size:18pt;\">Manage Site</span></p></body></html>"))
self.label_2.setText(_translate("ManageWindow", "<html><head/><body><p><span style=\" font-size:12pt;\">Add </span></p></body></html>"))
self.label_3.setText(_translate("ManageWindow", "<html><head/><body><p><span style=\" font-size:12pt;\">Modify</span></p></body></html>"))
self.label_4.setText(_translate("ManageWindow", "<html><head/><body><p><span style=\" font-size:12pt;\">Delete</span></p></body></html>"))
import images
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ManageWindow = QtWidgets.QMainWindow()
ui = Ui_ManageWindow()
ui.setupUi(ManageWindow)
ManageWindow.show()
sys.exit(app.exec_())
|
UTF-8
|
Python
| false | false | 6,375 |
py
| 24 |
ManageInterface3.py
| 10 | 0.664314 | 0.628392 | 0 | 169 | 36.721893 | 149 |
AlphaTechnic/SOGANG_ICPC_training_day
| 17,755,394,838,445 |
f821a504b3b6b9f0c7eccf9ed60405ef15626874
|
cecd261be2557a29b2e8eb7a22533945c1711450
|
/2021-04-03/1798.py
|
5a46120f2a687da0d1dd1582a0e43cff3fb57cba
|
[] |
no_license
|
https://github.com/AlphaTechnic/SOGANG_ICPC_training_day
|
3267540275c867a43d95d1a471fe0e95a08c060b
|
29244ae5a6767679466ab4ac3a32d4a024878e74
|
refs/heads/master
| 2023-05-05T16:25:11.381038 | 2021-05-22T14:04:46 | 2021-05-22T14:04:46 | 347,415,881 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
input :
3.0 4.0 2.0 0.0 4.0 0.0
3.0 4.0 2.0 90.0 4.0 0.0
6.0 8.0 2.14 75.2 9.58 114.3
3.0 4.0 5.0 0.0 5.0 90.0
ouput :
2.00
3.26
7.66
4.54
"""
import sys
import math
sys.stdin = open("input.txt", "r")
input = sys.stdin.readline
for line in sys.stdin:
r, h, d1, a1, d2, a2 = map(float, line.rstrip().split())
R = math.sqrt(r ** 2 + h ** 2)
Arc = 2 * math.pi * r
A = Arc / R
target_A = A * abs(a2 - a1) / 360
if target_A > A / 2:
target_A = A - target_A
print("%0.2f" % math.sqrt(d1 ** 2 + d2 ** 2 - 2 * d1 * d2 * math.cos(target_A)))
|
UTF-8
|
Python
| false | false | 577 |
py
| 32 |
1798.py
| 32 | 0.523397 | 0.369151 | 0 | 30 | 18.266667 | 84 |
yooseungju/TIL
| 17,652,315,593,928 |
dde8a6b0ef990c51134bd082670e8c19919565bb
|
5b77ea24ccda4fcf6ed8a269f27ac33d0a47bcad
|
/Algorithm_class02/AD/B3_[TST] 구슬 고르기3.py
|
7ef96e692aa16733581eb019a8016e8f6704a421
|
[] |
no_license
|
https://github.com/yooseungju/TIL
|
98acdd6a1f0c145bff4ae33cdbfbef4f45d5da42
|
e6660aaf52a770508fe8778994e40aa43d2484d4
|
refs/heads/master
| 2020-04-11T21:13:56.981903 | 2019-09-28T12:34:03 | 2019-09-28T12:34:03 | 162,099,150 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def perm(k, n):
if n == k:
for t in T:
print(t, end=' ')
print()
return
for i in arr:
if not chk[i]:
chk[i] = 1
T[k] = i
perm(k+1, N)
T[k] = i
chk[i] = 0
T = [0] *3
N = 3
chk = [0] * (N+1)
arr = [ i for i in range(1,4)]
perm(0, N)
|
UTF-8
|
Python
| false | false | 346 |
py
| 298 |
B3_[TST] 구슬 고르기3.py
| 266 | 0.33237 | 0.300578 | 0 | 22 | 14.772727 | 30 |
ChelaNew/2013PythonCode
| 3,246,995,285,230 |
a7dbc2bfc8ef6ffbf754f2d472c5ebc431eb9b45
|
48e9b92ea0c488bb2548d4821edc1764e0b1edc9
|
/py/grt/mechanism/mechcontroller.py
|
3c8cf39d44a8cbfaad23074fbd9ec6e4d77e0d69
|
[] |
no_license
|
https://github.com/ChelaNew/2013PythonCode
|
541f7055dd5c7bafb71ae37f573aea0c58eb615d
|
d457f3eb070337baef5484b99110978793d65930
|
refs/heads/master
| 2016-09-13T18:45:03.796724 | 2016-05-13T18:09:50 | 2016-05-13T18:09:50 | 57,171,586 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from grt.sensors.dummy import Mimic
class MechController:
def __init__(self, joystick, xbox_controller, pickup, shooter, belts, mimic_joystick):
self.belts = belts
self.joystick = joystick
self.xbox_controller = xbox_controller
self.pickup = pickup
self.shooter = shooter
#This adds the listener to mimic joystick, mimic joystick class will then call when state changes
mimic_joystick.add_listener(self._dummy_vision_listener)
def _dummy_vision_listener(self, sensor, state_id, datum):
print(state_id)
if state_id == "button3":
if datum:
self.shooter.shoot(0.5)
print("x")
if state_id == "button2":
if datum:
self.shooter.stop()
print("y")
if state_id == "trigger":
if datum:
self.pickup.pickUp(0.5, 0.5)
print("b")
if state_id == "button4":
if datum:
self.pickup.stop()
print("a")
def _xbox_controller_listener(self, sensor, state_id, datum):
if state_id == "x_button":
if datum:
self.shooter.shoot(0.5)
if state_id == "y_button":
if datum:
self.shooter.stop()
if state_id == "b_button":
if datum:
self.pickup.pickUp(0.5, 0.5)
if state_id == "a_button":
if datum:
self.pickup.spitOut(0.5, 0.5)
if state_id == "r_trigger":
if datum:
self.pickup.stop()
if state_id == "l_trigger":
if datum:
self.belts.backwards()
if state_id == "r_button":
if datum:
self.belts.stop()
if state_id == "l_button":
if datum:
self.belts.forewards()
# def _driver_joystick_listener(self, sensor, state_id, datum):
|
UTF-8
|
Python
| false | false | 1,975 |
py
| 13 |
mechcontroller.py
| 13 | 0.504304 | 0.494684 | 0 | 61 | 31.311475 | 105 |
ShintaroChiba/rakuten-scraping
| 14,147,622,300,893 |
b17b0423d4f21afc62dada36d47d8b101d0a7eb1
|
189566d8051db0c37ad8fc093fd200a0bec4e645
|
/isbn/management/commands/get_isbn_info.py
|
9595fc46fc7cac9f91119bc9d8325a1e0b891212
|
[] |
no_license
|
https://github.com/ShintaroChiba/rakuten-scraping
|
4ee7c67a753ee16e126ec345251698df912a235e
|
183bfdbf8d6decbabb104262bb0ceb209edfb379
|
refs/heads/master
| 2023-03-12T11:02:10.821844 | 2021-03-03T11:01:09 | 2021-03-03T11:01:09 | 344,096,403 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.management.base import BaseCommand
from isbn.models import Book, SearchWord
from datetime import datetime
from isbn.utils import get_word_list, create_url, lineNotify, regist_data
import requests
import urllib.request
import urllib.parse
import json
import logging
#初期パラメータ設定
logdir = r"C:\django\books\log"
#現在時刻の取得
date_name = datetime.now().strftime("%Y%m%d-%H%M%S")
#ファイル名の生成
file_name = logdir + "\\" + date_name + "_" + "GET_ISBN_INFO.log"
logging.basicConfig(filename=file_name,level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
class Command(BaseCommand):
""" カスタムコマンド定義 """
def handle(self, *args, **options):
# ここに実行したい処理を書く
logging.info('[正常]楽天書籍情報収集処理を開始します。')
#検索ワードの取得
word_list = get_word_list()
for word in word_list:
# urlを生成
url = create_url(word)
# ダウンロード
req = requests.get(url)
# json形式で取得
data = json.loads(req.text)
#データの登録、変更
regist_data(data, word)
logging.info('[正常]楽天書籍情報収集処理が正常終了しました。')
|
UTF-8
|
Python
| false | false | 1,363 |
py
| 11 |
get_isbn_info.py
| 9 | 0.634667 | 0.634667 | 0 | 38 | 28.605263 | 124 |
zaazbb/ymmeterreader
| 15,925,738,752,325 |
fbb212129bb685671a34b0b56143f32471083e79
|
8f79de16f4be85ae15fa03e8af6bbeb5bd836f55
|
/mainwindow.py
|
f67f2ca898300d43dca072e7801924fa60df3c2a
|
[] |
no_license
|
https://github.com/zaazbb/ymmeterreader
|
b254f1c20234be3461dc3af93099da2cac99fc12
|
d21d46c70f45f3fd81afba38c0813692dafccdf0
|
refs/heads/master
| 2020-12-31T08:09:15.155354 | 2016-07-13T09:07:47 | 2016-07-13T09:07:47 | 62,806,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os.path
import pickle
from datetime import datetime
import traceback
from PyQt5.QtWidgets import QMainWindow, QTreeWidgetItem, QMenu, QFileDialog, QMessageBox#, QInputDialog
from PyQt5.QtGui import QBrush, QCursor
from PyQt5.QtCore import QTimer, pyqtSlot, Qt, QPoint
import serial
from serial.tools.list_ports import comports
from Ui_mainwindow import Ui_MainWindow
from packet import PacketParser
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.meters = []
self.meteritems = []
with open('meters.txt') as f:
for line in f:
line = line.strip()
if line.startswith('#') or not line:
continue
minfo = line.split()
# addr.
minfo[2] = minfo[2][:12].zfill(12)
self.meters.append(minfo[2])
item = QTreeWidgetItem(None, minfo)
self.ui.treeWidget.addTopLevelItem(item)
self.meteritems.append(item)
self.ui.treeWidget.resizeColumnToContents(0)
self.ui.treeWidget.resizeColumnToContents(1)
self.ui.treeWidget.resizeColumnToContents(2)
self.ui.progressBar.setMaximum(len(self.meters))
self.ui.comboBox_port.addItems([i[0] for i in comports()])
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.txtimer = QTimer(self)
self.txtimer.timeout.connect(self.send)
self.txtimer. setSingleShot(True)
self.ser = None
self.buf = bytearray()
self.pktidx = [0]
self.dstmeters = self.meters
# self.rxd = 0
# self.binfo = ['aRoute', 0, 0, 0, self.ui.lineEdit_localaddr.text(), '']
# self.xinfo = {'data': '68 77 44 10 03 00 00 68 91 18 33 32 34 33 AC 34 33 33 58 33 33 33 95 33 33 33 99 33 33 33 58 33 33 33 9B 16'}
def update(self):
# if self.rxd:
# self.rxd = 0
# baseinfo = self.binfo
# extinfo = self.xinfo
# if baseinfo[4] == self.ui.lineEdit_localaddr.text().strip() and baseinfo[0] == 'aRoute':
# item = self.ui.treeWidget.topLevelItem(self.meters.index(baseinfo[5]))
# item.setText(8, extinfo['data'])
# d = [i-0x33 for i in bytes.fromhex(extinfo['data'])[14:-2]]
# for i in range(0, 20 if len(d)>20 else len(d), 4):
# item.setText(3+i//4, '%02X%02X%02X.%02X' % (d[i+3], d[i+2], d[i+1], d[i]))
# for i in range(3, item.columnCount()):
# self.ui.treeWidget.resizeColumnToContents(i)
# if self.ui.pushButton_start.text() == '停止抄表' and item is self.meteritems[self.i]:
# self.meteritems.remove(item)
# item.setBackground(2, Qt.green)
# oks = len(self.meters)-len(self.meteritems)
# self.ui.progressBar.setValue(oks)
# okrate = oks*100//len(self.meters)
# self.ui.label_currate.setText('%d' % okrate)
# if not self.meteritems or okrate == self.ui.spinBox_okrate.value():
# self.txtimer.stop()
# self.ui.pushButton_start.setText('开始抄表')
# self.ui.label_curmeter.setText('------------')
# self.ui.label_retry.setText('0')
# else:
# #self.retry = self.ui.spinBox_retry.value()
# self.retry = 0
# self.txtimer.start(0)
# return
if serial.VERSION == '2.7':
inwaiting = self.ser.inWaiting()
else:
inwaiting = self.ser.in_waiting
if inwaiting:
self.buf.extend(self.ser.read(inwaiting))
#print(self.buf)
i = self.buf.find(b'\xFE\xFE\xFE\xFE')
if i != -1:
# from wl, no crc.
if len(self.buf) > i+4 and len(self.buf)>= i + self.buf[i+4] + 5 +2:
#print(' '.join('%02X'%ii for ii in self.buf[i+8: i+self.buf[i+4]+5]))
try:
baseinfo, extinfo = PacketParser(self.buf[i+8: i+self.buf[i+4]+5])
#print(baseinfo)
#print(extinfo)
if baseinfo[4] == self.ui.lineEdit_localaddr.text().strip() and baseinfo[0] == 'aRoute':
item = self.ui.treeWidget.topLevelItem(self.meters.index(baseinfo[5]))
item.setText(8, extinfo['data'])
d = [i-0x33 for i in bytes.fromhex(extinfo['data'])[14:-2]]
for i in range(0, 20 if len(d)>20 else len(d), 4):
item.setText(3+i//4, '%02X%02X%02X.%02X' % (d[i+3], d[i+2], d[i+1], d[i]))
for i in range(3, item.columnCount()):
self.ui.treeWidget.resizeColumnToContents(i)
if self.ui.pushButton_start.text() == '停止抄表' and item is self.meteritems[self.i]:
self.meteritems.remove(item)
item.setBackground(2, Qt.green)
oks = len(self.meters)-len(self.meteritems)
self.ui.progressBar.setValue(oks)
okrate = oks*100//len(self.meters)
self.ui.label_currate.setText('%d' % okrate)
if not self.meteritems or okrate == self.ui.spinBox_okrate.value():
self.txtimer.stop()
self.ui.pushButton_start.setText('开始抄表')
self.ui.label_curmeter.setText('------------')
self.ui.label_retry.setText('0')
else:
#self.retry = self.ui.spinBox_retry.value()
self.retry = 0
self.txtimer.start(0)
except:
pass
del self.buf[: i+self.buf[i+4]+5+2]
def send(self):
if self.retry == self.ui.spinBox_retry.value():
self.retry = 0
self.i += 1
if self.i == len(self.meteritems):
self.i = 0
self.round += 1
self.ui.label_curround.setText(str(self.round))
if self.round == self.ui.spinBox_round.value():
self.ui.pushButton_start.setText('开始抄表')
self.ui.label_curmeter.setText('------------')
self.ui.label_retry.setText('0')
return
item = self.meteritems[self.i]
self.ui.treeWidget.setCurrentItem(item)
#item.setBackground(2, Qt.cyan)
meter = item.text(2)
self.read_meter(meter)
self.ui.label_curmeter.setText(meter)
self.retry += 1
self.ui.label_retry.setText(str(self.retry))
self.txtimer.start(self.ui.spinBox_interval.value() * 1000)
# if 1:#self.retry == self.ui.spinBox_retry.value():
# if meter in ['000000000002']:
# self.rxd=1
# self.binfo[5] = meter
def read_meter(self, meter):
dstaddr = bytearray.fromhex(meter)
dstaddr.reverse()
srcaddr = bytearray.fromhex(self.ui.lineEdit_localaddr.text())
srcaddr.reverse()
pkt = bytearray.fromhex(
'FE FE FE FE 25 00 01 24 '
'41 CD 5E FF FF 80 24 59 01 00 14 88 00 00 00 10 00 '
'3C 80 24 59 01 00 14 AA AA AA AA AA AA 11 02 13 00 ')
pkt[10] = self.pktidx[0] # mac index.
pkt[13:19] = dstaddr
pkt[19:25] = srcaddr
pkt[26:32] = dstaddr
pkt[32:38] = srcaddr
pkt[38] = (pkt[38] & 0x0F) + ((self.pktidx[0] % 16) << 4) # nwk index.
pkt[40] = self.i % 0x100 # aps index.
pkt645 = bytearray.fromhex(self.ui.lineEdit_pkt645.text())
pkt645[1:7] = dstaddr
pkt645[-2] = sum(pkt645[:-2]) % 0x100
pkt.extend(pkt645)
pkt[4] = len(pkt) - 5
pkt[5] = int(self.ui.comboBox_chnlgrp.currentText())*2 + int(self.ui.comboBox_chnl.currentText())
pkt[7] = pkt[4] ^ pkt[5] ^ pkt[6]
self.ser.write(pkt)
self.pktidx[0] += 1
if self.pktidx[0] > 255:
self.pktidx[0] = 0
@pyqtSlot()
def on_pushButton_swithport_clicked(self):
if self.ui.pushButton_swithport.text() == '打开':
port = self.ui.comboBox_port.currentText()
try:
self.ser = serial.Serial(port, 115200, parity=serial.PARITY_EVEN, timeout=0)
self.timer.start(1)
self.set_channel(int(self.ui.comboBox_chnlgrp.currentText()), int(self.ui.comboBox_chnl.currentText()))
self.ui.pushButton_swithport.setText('关闭')
except:
self.timer.stop()
QMessageBox.warning(self, '警告', '无法打开 %s!' % port)
else:
self.ser.close()
self.ser = None
self.ui.pushButton_swithport.setText('打开')
@pyqtSlot()
def on_pushButton_start_clicked(self):
if self.ui.pushButton_start.text() == '开始抄表':
if int(self.ui.label_currate.text()) == self.ui.spinBox_okrate.value() or not self.meteritems:
QMessageBox.information(self, '提示', '条件已满足,你可以重启软件来重新抄表!')
return
if self.ser:
self.i = 0
self.retry = 0
self.round = 0
self.ui.label_curround.setText('0')
self.ui.label_retry.setText('0')
self.txtimer.start(0)
self.ui.pushButton_start.setText('停止抄表')
else:
QMessageBox.warning(self, '警告', '请先打开串口!')
else:
self.txtimer.stop()
self.ui.pushButton_start.setText('开始抄表')
def set_channel(self, chnlgrp, chnl):
pkt = bytearray.fromhex('FE FE FE FE 03 00 01 24')
pkt[5] = chnlgrp*2 + chnl
pkt[7] = pkt[4] ^ pkt[5] ^ pkt[6]
if self.ser:
self.ser.write(pkt)
else:
QMessageBox.warning(self, '警告', '请先打开串口!')
@pyqtSlot(int)
def on_comboBox_chnlgrp_currentIndexChanged(self, index):
self.set_channel(index, int(self.ui.comboBox_chnl.currentText()))
@pyqtSlot(int)
def on_comboBox_chnl_currentIndexChanged(self, index):
self.set_channel(int(self.ui.comboBox_chnlgrp.currentText()), index)
@pyqtSlot()
def on_pushButton_save_clicked(self):
t = datetime.now().strftime('%y-%m-%d_%H-%M-%S')
file = QFileDialog.getSaveFileName(self, 'Save file', './result/' + t, 'data file(*.txt)')[0]
if file:
with open(file, 'w') as f:
f.write('户号\t姓名\t表号\t总电能\t费率1电能\t费率2电能\t费率3电能\t费率4电能\n')
for i in range(self.ui.treeWidget.topLevelItemCount()):
item = self.ui.treeWidget.topLevelItem(i)
colstrs = []
for col in range(8):
colstrs.append(item.text(col))
f.write('\t'.join(colstrs))
f.write('\n')
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_itemDoubleClicked(self, item, column):
if self.ser:
self.i = self.ui.treeWidget.indexOfTopLevelItem(item)
self.read_meter(item.text(0))
else:
QMessageBox.warning(self, '警告', '请先打开串口!')
|
UTF-8
|
Python
| false | false | 12,097 |
py
| 7 |
mainwindow.py
| 3 | 0.50956 | 0.477891 | 0.000337 | 262 | 44.312977 | 141 |
pmplewa/GC-CloudFactory
| 5,282,809,780,850 |
0adf0b5fe9f65d91f5100670d80e1a7211f7faa1
|
f7a14026c0761f8f120134285077b9f2e9f8ab35
|
/cloudfactory/plot.py
|
e99f109d12fe9dee3211cf65c7805045b943c708
|
[] |
no_license
|
https://github.com/pmplewa/GC-CloudFactory
|
04a4c94e405efa4b6158ae1f76cce65882624e27
|
4183d423992ef5f070f9cf119bf1b65450d8c7ea
|
refs/heads/master
| 2019-02-01T07:44:27.736245 | 2018-05-12T20:23:26 | 2018-05-12T20:23:26 | 99,001,580 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ipywidgets import interactive, IntSlider
import matplotlib.pyplot as plt
import numpy as np
from .kde import fit_kde1D, fit_kde2D
__all__ = ["make_velocityplot", "make_skyplot", "make_skyvelplot",
"make_pvplot"]
colors = ["C0", "C1", "C2"]
def parse_args(args):
sims = list(args)
assert len(sims) > 0
assert len(sims) <= len(colors)
times = sims[0].times
assert all([np.array_equal(times, sim.times) for sim in sims])
return sims, times
def time_slider(n_steps):
return IntSlider(min=0, max=n_steps-1, step=1, description="time step")
def make_velocityplot(*args, animate=True, figsize=(10, 2)):
"""Plot the distribution of radial velocities."""
sims, times = parse_args(args)
n_steps = len(times)
x_val, z_val = np.transpose([[
fit_kde1D(sim.data["vz"].loc[time_step].values,
120, x_min=-3000, x_max=3000)
for time_step in range(n_steps)] for sim in sims],
axes=[2, 0, 1, 3])
if animate:
def plot(time_step):
fig, ax = plt.subplots(figsize=figsize)
ax.set_ylim([0, np.max(z_val[:,time_step])])
for i in range(len(sims)):
ax.plot(x_val[i,time_step],
z_val[i,time_step],
color=colors[i])
plt.show()
return interactive(plot, time_step=time_slider(n_steps))
else:
fig, ax = plt.subplots(figsize=figsize)
dz_val = 0.001
ax.set_ylim([-dz_val, (n_steps+1)*dz_val + np.max(z_val)])
ax.set_yticks([])
for i, sim in enumerate(sims):
for time_step in range(n_steps):
dz = (n_steps - time_step)*dz_val
ax.plot(x_val[i,time_step],
z_val[i,time_step]+dz,
color=colors[i])
plt.show()
def make_skyplot(*args, animate=True, figsize=(6, 6)):
"""Plot the distribution of on-sky positions."""
sims, times = parse_args(args)
n_steps = len(times)
if animate:
x_val, y_val, z_val = np.transpose([[
fit_kde2D(sim.data["x"].loc[time_step].values,
sim.data["y"].loc[time_step].values,
80e-3, x_min=-1, x_max=1, y_min=-1, y_max=1)
for time_step in range(n_steps)] for sim in sims],
axes=[2, 0, 1, 3, 4])
def plot(time_step):
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect("equal")
levels = np.linspace(0, np.max(z_val[:,time_step]), 5)[1:]
for i in range(len(sims)):
ax.contour(x_val[i,time_step],
y_val[i,time_step],
z_val[i,time_step],
levels=levels,
colors=colors[i])
plt.show()
return interactive(plot, time_step=time_slider(n_steps))
else:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect("equal")
for i, sim in enumerate(sims):
ax.scatter(sim.com_particle["x"].values,
sim.com_particle["y"].values,
color=colors[i])
plt.show()
def make_skyvelplot(sim, figsize=(6, 6)):
"""Plot the radial velocity as a function of on-sky position."""
def plot(time_step):
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect("equal")
ax.hexbin(sim.data["x"].loc[time_step].values,
sim.data["y"].loc[time_step].values,
C=sim.data["vz"].loc[time_step].values,
extent=(-1, 1, -1, 1),
vmin=-3000, vmax=3000,
gridsize=50,
mincnt=1,
cmap="RdBu_r")
plt.show()
return interactive(plot, time_step=time_slider(len(sim.times)))
def make_pvplot(*args, animate=True, figsize=(10, 6)):
"""Plot the radial velocity as a function of path length along the orbit."""
sims, times = parse_args(args)
n_steps = len(times)
if animate:
x_val, y_val, z_val = np.transpose([[
fit_kde2D(sim.data["p"].loc[time_step].values,
sim.data["vz"].loc[time_step].values,
80e-3, 120, x_min=-0.2, x_max=1, y_min=-5000, y_max=5000)
for time_step in range(1, n_steps)] for sim in sims],
axes=[2, 0, 1, 3, 4])
def plot(time_step):
fig, ax = plt.subplots(figsize=figsize)
levels = np.linspace(0, np.max(z_val[:,time_step]), 5)[1:]
for i in range(len(sims)):
ax.contour(x_val[i,time_step],
y_val[i,time_step],
z_val[i,time_step],
levels=levels,
colors=colors[i])
plt.show()
return interactive(plot, time_step=time_slider(n_steps-1))
else:
fig, ax = plt.subplots(figsize=figsize)
for i, sim in enumerate(sims):
ax.scatter(sim.com_particle["p"].values,
sim.com_particle["vz"].values,
color=colors[i])
plt.show()
|
UTF-8
|
Python
| false | false | 5,313 |
py
| 5 |
plot.py
| 3 | 0.500847 | 0.481837 | 0 | 148 | 34.898649 | 80 |
geomstats/geomstats
| 18,459,769,449,528 |
11f3bbe032f2b46b54dad6a2b43baf6c7d7aa4b1
|
16d430421e56ef878bf0d39d873931ffdbf2a523
|
/geomstats/_backend/autograd/autodiff.py
|
e8a08caf550cce8d297418e336d9bf5273aab95b
|
[
"MIT"
] |
permissive
|
https://github.com/geomstats/geomstats
|
c0b24527e2b0c66f5dffdd40419660d5db84dc03
|
78a5778b5d5ce85225fd97e765d43047fb4526d1
|
refs/heads/master
| 2023-09-04T03:34:41.740174 | 2023-08-30T05:27:08 | 2023-08-30T05:27:08 | 108,200,238 | 1,017 | 237 |
MIT
| false | 2023-09-10T09:17:34 | 2017-10-25T00:44:57 | 2023-09-06T19:39:56 | 2023-09-10T09:17:33 | 203,659 | 985 | 219 | 209 |
Jupyter Notebook
| false | false |
"""Wrapper around autograd functions to be consistent with backends."""
import autograd as _autograd
import autograd.numpy as _np
from autograd import jacobian
def custom_gradient(*grad_funcs):
"""Create a decorator that allows a function to define its custom gradient(s).
Parameters
----------
*grad_funcs : callables
Custom gradient functions.
Returns
-------
decorator : callable
This decorator, used on any function func, associates the
input grad_funcs as the gradients of func.
"""
def decorator(func):
"""Decorate a function to define its custome gradient(s).
Parameters
----------
func : callable
Function whose gradients will be assigned by grad_funcs.
Returns
-------
wrapped_function : callable
Function func with gradients specified by grad_funcs.
"""
wrapped_function = _autograd.extend.primitive(func)
def wrapped_grad_func(i, ans, *args, **kwargs):
grads = grad_funcs[i](*args, **kwargs)
if isinstance(grads, float):
return lambda g: g * grads
if grads.ndim == 2:
return lambda g: g[..., None] * grads
if grads.ndim == 3:
return lambda g: g[..., None, None] * grads
return lambda g: g * grads
if len(grad_funcs) == 1:
_autograd.extend.defvjp(
wrapped_function,
lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
)
elif len(grad_funcs) == 2:
_autograd.extend.defvjp(
wrapped_function,
lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
)
elif len(grad_funcs) == 3:
_autograd.extend.defvjp(
wrapped_function,
lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs),
lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs),
lambda ans, *args, **kwargs: wrapped_grad_func(2, ans, *args, **kwargs),
)
else:
raise NotImplementedError(
"custom_gradient is not yet implemented " "for more than 3 gradients."
)
return wrapped_function
return decorator
def _grad(func, argnums=0):
def _wrapped_grad(*x, **kwargs):
if not hasattr(x[0], "ndim") or x[0].ndim < 2:
return _autograd.grad(func, argnum=argnums)(*x, **kwargs)
return _autograd.elementwise_grad(func, argnum=argnums)(*x, **kwargs)
return _wrapped_grad
@_autograd.differential_operators.unary_to_nary
def _elementwise_value_and_grad(fun, x):
# same as autograd.elementwise_grad, but also returning ans
vjp, ans = _autograd.differential_operators._make_vjp(fun, x)
if _autograd.differential_operators.vspace(ans).iscomplex:
raise TypeError("Elementwise_grad only applies to real-output functions.")
return ans, vjp(_autograd.differential_operators.vspace(ans).ones())
def value_and_grad(func, argnums=0, to_numpy=False):
"""Wrap autograd value_and_grad function.
Parameters
----------
func : callable
Function whose value and gradient values
will be computed.
to_numpy : bool
Unused. Here for API consistency.
Returns
-------
value_and_grad : callable
Function that returns func's value and
func's gradients' values at its inputs args.
"""
def _value_and_grad(*x, **kwargs):
if not hasattr(x[0], "ndim") or x[0].ndim < 2:
return _autograd.value_and_grad(func, argnum=argnums)(*x, **kwargs)
return _elementwise_value_and_grad(func, argnum=argnums)(*x, **kwargs)
return _value_and_grad
@_autograd.differential_operators.unary_to_nary
def _value_and_jacobian_op(fun, x):
# same as autograd.jacobian, but also returning ans
vjp, ans = _autograd.differential_operators._make_vjp(fun, x)
ans_vspace = _autograd.differential_operators.vspace(ans)
jacobian_shape = ans_vspace.shape + _autograd.differential_operators.vspace(x).shape
grads = map(vjp, ans_vspace.standard_basis())
return ans, _np.reshape(_np.stack(grads), jacobian_shape)
def _value_and_jacobian(fun, point_ndim=1):
def _value_and_jacobian_vec(x):
if x.ndim == point_ndim:
return _value_and_jacobian_op(fun)(x)
ans = []
jac = []
for one_x in x:
ans_, jac_ = _value_and_jacobian_op(fun)(one_x)
ans.append(ans_)
jac.append(jac_)
return _np.stack(ans), _np.stack(jac)
return _value_and_jacobian_vec
def jacobian_vec(fun, point_ndim=1):
"""Wrap autograd jacobian function.
We note that the jacobian function of autograd is not vectorized
by default, thus we modify its behavior here.
Default autograd behavior:
If the jacobian for one point of shape (2,) is of shape (3, 2),
then calling the jacobian on 4 points with shape (4, 2) will
be of shape (3, 2, 4, 2).
Modified behavior:
Calling the jacobian on 4 points gives a tensor of shape (4, 3, 2).
We use a for-loop to allow this function to be vectorized with
respect to several inputs in point, because the flag vectorize=True
fails.
Parameters
----------
fun : callable
Function whose jacobian values
will be computed.
Returns
-------
func_with_jacobian : callable
Function that returns func's jacobian
values at its inputs args.
"""
def _jac(x):
if x.ndim == point_ndim:
return jacobian(fun)(x)
return _np.stack([jacobian(fun)(one_x) for one_x in x])
return _jac
def hessian(fun, func_out_ndim=None):
"""Wrap autograd hessian function.
For consistency with the other backend, we convert this to a tensor
of shape (dim, dim).
Parameters
----------
func : callable
Function whose hessian values
will be computed.
func_out_ndim : int
Unused. Here for API consistency.
Returns
-------
func_with_hessian : callable
Function that returns func's hessian
values at its inputs args.
"""
def _hess(x):
return _autograd.hessian(fun)(x)
return _hess
def hessian_vec(func, point_ndim=1, func_out_ndim=None):
"""Wrap autograd hessian function.
We note that the hessian function of autograd is not vectorized
by default, thus we modify its behavior here.
We force the hessian to return a tensor of shape (n_points, dim, dim)
when several points are given as inputs.
Parameters
----------
func : callable
Function whose hessian values
will be computed.
func_out_ndim : int
Unused. Here for API consistency.
Returns
-------
func_with_hessian : callable
Function that returns func's hessian
values at its inputs args.
"""
hessian_func = hessian(func)
def _hess(x):
if x.ndim == point_ndim:
return hessian_func(x)
return _np.stack([hessian_func(one_x) for one_x in x])
return _hess
def jacobian_and_hessian(func, func_out_ndim=None):
"""Wrap autograd jacobian and hessian functions.
Parameters
----------
func : callable
Function whose jacobian and hessian values
will be computed.
func_out_ndim : int
Unused. Here for API consistency.
Returns
-------
func_with_jacobian_and_hessian : callable
Function that returns func's jacobian and
func's hessian values at its inputs args.
"""
return _value_and_jacobian(jacobian_vec(func))
def value_jacobian_and_hessian(func, func_out_ndim=None):
"""Compute value, jacobian and hessian.
func is only called once.
Parameters
----------
func : callable
Function whose jacobian and hessian values
will be computed.
func_out_ndim : int
Unused. Here for API consistency.
"""
cache = []
def _cached_value_and_jacobian(fun, return_cached=False):
def _jac(x):
ans, jac = _value_and_jacobian(fun)(x)
if not return_cached:
cache.append(ans)
return jac
value = _np.stack(cache)._value if len(cache) > 1 else cache[0]._value
cache.clear()
return value, ans, jac
return _jac
return _cached_value_and_jacobian(
_cached_value_and_jacobian(func), return_cached=True
)
|
UTF-8
|
Python
| false | false | 8,767 |
py
| 410 |
autodiff.py
| 342 | 0.602258 | 0.59781 | 0 | 301 | 28.126246 | 88 |
naong95/baekjoonPython
| 8,873,402,437,277 |
373c8d6f328a1a3edcc2b06d25a632c84542147d
|
941332dfff208537807ce3ae14ca9546c04acd51
|
/NO1110.py
|
5a4d0706e9184160fe84b2edc87c5a79ddbeac71
|
[] |
no_license
|
https://github.com/naong95/baekjoonPython
|
362cafbe8256a282be33e3bbc257c1b474fdeaa9
|
3c6e213b9d6da7e103793127a51a9dcdb8878459
|
refs/heads/master
| 2023-09-05T06:41:08.396515 | 2021-11-07T06:12:35 | 2021-11-07T06:12:35 | 383,971,566 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input())
if(n < 10):
n = n * 10
k = n
i = 0
while True:
a = k / 10
a = int(a)
b = k % 10
b = int(b)
if(a + b < 10):
k = (b * 10) + a + b
else:
k = (b * 10) + ((a + b)%10)
i = i + 1
if(k == n):
break
print(i)
|
UTF-8
|
Python
| false | false | 289 |
py
| 16 |
NO1110.py
| 16 | 0.314879 | 0.252595 | 0 | 23 | 11.565217 | 35 |
15051882416/food_shop
| 6,897,717,477,729 |
f575c4da3a9640cf10327391438971678d55ab4a
|
60c18eefd903957622a8bd9dc2b7c8522d13552b
|
/app/validators/api_forms/my_forms.py
|
540b2b0d8f98f489f4c968a1e9468f74f646b753
|
[] |
no_license
|
https://github.com/15051882416/food_shop
|
f2868ac7ca63e9e8e36564f979c0c9585e5a22f0
|
0033580a08da6e7f043153e5d3dd382333a9eac2
|
refs/heads/master
| 2022-03-03T01:45:25.648296 | 2019-02-21T03:25:58 | 2019-02-21T03:25:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from wtforms import StringField, IntegerField
from wtforms.validators import DataRequired
from app.validators.base import BaseForm
class AddCommentForm(BaseForm):
order_sn = StringField(validators=[DataRequired(message='订单号不能为空')])
score = IntegerField(default=10)
content = StringField(validators=[DataRequired(message='您还没有评价, 请填写评价内容')])
|
UTF-8
|
Python
| false | false | 396 |
py
| 47 |
my_forms.py
| 45 | 0.789326 | 0.783708 | 0 | 10 | 34.6 | 79 |
GalinaDimitrova/Hack
| 206,158,464,127 |
fcafaf3b99110edb351a7b73b045fadb5a1990ff
|
be1a45b4ee526ec3cd81a2bcd06404db90c097fb
|
/week0/1-upr/10--is_number_balanced.py
|
9b37880001d079bed1768f8a648580211efb7f17
|
[] |
no_license
|
https://github.com/GalinaDimitrova/Hack
|
c48e69c80678fa24937ca7dd4a36b1050e05d66e
|
186e6f3520183565765569e63b39b25249c82f11
|
refs/heads/master
| 2021-01-25T08:55:23.349341 | 2014-12-29T23:25:54 | 2014-12-29T23:25:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def sum_of_digits(n):
n = abs(n)
result = 0
while n > 0:
result += n % 10
n = n // 10
return result
def is_number_balanced(n):
if n < 10:
print(True)
return True
else:
str_num = n
# Get lenght of the integer
lenght = len(str(str_num))
# Check if the number is not with odd lenght
if lenght % 2 == 0:
suff = n % 10 ** (lenght // 2)
pref = n // 10 ** (lenght // 2)
if sum_of_digits(suff) == sum_of_digits(pref):
print(True)
return True
else:
print(False)
return False
else:
# if the number is with odd lenght, to get the \
# prefix we should divide it to 10**(lenght // 2 + 1)
suff = n % 10 ** (lenght // 2)
pref = n // 10 ** (lenght // 2 + 1)
if sum_of_digits(suff) == sum_of_digits(pref):
print(True)
return True
else:
print(False)
return False
is_number_balanced(9)
is_number_balanced(11)
is_number_balanced(13)
is_number_balanced(121)
is_number_balanced(4518)
is_number_balanced(28471)
is_number_balanced(1238033)
|
UTF-8
|
Python
| false | false | 1,274 |
py
| 93 |
10--is_number_balanced.py
| 86 | 0.481947 | 0.441915 | 0 | 47 | 26.085106 | 65 |
iMu21/Dynamic-Programming
| 13,597,866,508,122 |
e9a2401008ba67cfb33ee1c98b88fd7a8ef92ae9
|
f8b441caa20aaf7b4e93d6c22a858f4c12b6f7ec
|
/Palindromic Partitioning.py
|
4f378cba2cf783ecbe8bffab08d43733274dbc7e
|
[] |
no_license
|
https://github.com/iMu21/Dynamic-Programming
|
1d0219e80ad7f77dfc942737c7f4a75c718d3678
|
05ed3ee5bf743564c8d137834fc28290fcecf4d3
|
refs/heads/master
| 2020-05-19T13:50:31.395730 | 2019-05-14T16:44:45 | 2019-05-14T16:44:45 | 185,048,451 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def MinimumPalindromePartition(s):
length=len(s)
Cuts=[0 for i in range(length)]
Partition=[[False for i in range(length)]for i in range(length)]
for i in range(length):
Partition[i][i]=True
Cuts[i]=0
for L in range(2,length+1):
for i in range(0,length-L+1):
j=i+L-1
if L==2:
Partition[i][j]=(s[i]==s[j])
else:
Partition[i][j]=((s[i]==s[j])and Partition[i+1][j-1])
for i in range(length):
if Partition[0][i]!=True:
Cuts[i]=length+1000 #put a number that would be larger then the maximum cut number
for j in range(i):
if Partition[j+1][i] and 1+Cuts[j]<Cuts[i]:
Cuts[i]=1+Cuts[j]
return Cuts[length-1]
s=input()
print( "Minimum cuts needed for Palindrome Partitioning is ",MinimumPalindromePartition(s))
|
UTF-8
|
Python
| false | false | 894 |
py
| 9 |
Palindromic Partitioning.py
| 9 | 0.552573 | 0.53132 | 0 | 26 | 33.384615 | 94 |
gabivoicu/recipes-app-django
| 4,647,154,631,762 |
c6c39eb06dd1fc8e44869b1243ab8bb165c025b4
|
141a21c07005e93034c97ab2093d3c994cdd5830
|
/mysite/recipes/urls.py
|
516f40a03b9d3c088e91ccff0669067a2a2c6193
|
[] |
no_license
|
https://github.com/gabivoicu/recipes-app-django
|
19fb47bb79227eff8f9ad5998aad13a926b8cffa
|
97c3fbb3fe4f81b88774697610362d1729b0be9d
|
refs/heads/master
| 2018-01-07T17:35:46.396766 | 2015-04-20T22:48:48 | 2015-04-20T22:48:48 | 33,158,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, url
from recipes import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /recipes/5/
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
# ex: /recipes/5/edit/
url(r'^(?P<pk>\d+)/edit/$', views.EditView.as_view(), name='edit'),
# route for updating a recipe
url(r'^(?P<recipe_id>\d+)/update/$', views.update, name='update'),
)
|
UTF-8
|
Python
| false | false | 439 |
py
| 9 |
urls.py
| 4 | 0.626424 | 0.621868 | 0 | 12 | 35.583333 | 69 |
spconger/Householdtasks
| 17,532,056,512,718 |
313d3a4aa952cae4a37d91a1ba6c89cc02277dcc
|
c41010cc8383f6909785bee3eb51476834e5e5d1
|
/householdtasks/houseapp/views.py
|
57ca987a217de5968532cafc79a2ce6bde5731d7
|
[] |
no_license
|
https://github.com/spconger/Householdtasks
|
8ecea637c759d45227fed58cc4ce88fce06c93c4
|
94f681678567ba55d986c3a07c81df9065834a80
|
refs/heads/master
| 2022-02-25T00:44:13.697230 | 2019-07-22T02:38:42 | 2019-07-22T02:38:42 | 198,132,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render
from .models import Task
# Create your views here.
def index(request):
task_count=Task.objects.all().count()
context={
'task_count': task_count,
}
return render(request, 'houseapp/index.html', context=context)
def allTasks(request):
task_list=Task.objects.all()
return render (request, 'houseapp/alltasks.html', {'task_list' : task_list})
|
UTF-8
|
Python
| false | false | 408 |
py
| 9 |
views.py
| 6 | 0.693627 | 0.693627 | 0 | 14 | 28.214286 | 80 |
yiwangchunyu/CVTR
| 652,835,075,352 |
ef006694084676586c72aadec06a672accaff405
|
b476a55d6943953666f59334913bd5a594fad247
|
/crnn_ctc/predict_ctc.py
|
d92f7156f4a83de9cdc4808fee5b2cb78d9afc28
|
[] |
no_license
|
https://github.com/yiwangchunyu/CVTR
|
38e7bda64ba55d6f9a01ca6636f54c26a2c81a08
|
91940a92f2447a34e2bfd1d2fdbf3824818c13e7
|
refs/heads/master
| 2021-01-06T16:32:59.072967 | 2020-05-08T16:05:43 | 2020-05-08T16:05:43 | 241,398,728 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import numpy as np
import torch
from PIL import Image, ImageColor, ImageDraw, ImageFont
from torch.autograd import Variable
from torchvision.transforms import ToTensor
from tqdm import tqdm
import crnn_ctc, utils_ctc
def process_img(img_path, imgW=160, imgH=32):
padding_color=(255,255,255)
image = Image.open(img_path).convert('RGB')
#图片保持比例转成32*280
ratio = 280/32
res=[]
def crop(img_crop):
res=[]
return res
def process_single(image_single):
image_singleW, image_singleH = image_single.size
target_singleH = int(image_singleW * ratio)
image_32_280 = None
if target_singleH > image_singleH:
# add padding
image_pad = Image.new('RGB', (image_singleW, target_singleH), padding_color)
image_pad = np.array(image_pad)
image_pad[:image_singleH, :image_singleW, :] = np.array(image_single)
image_pad = Image.fromarray(image_pad)
image_32_280 = image_pad.resize((32, 280), Image.BILINEAR)
else:
target_single_W = int(image_singleH / ratio)
image_pad = Image.new('RGB', (target_single_W, image_singleH), padding_color)
image_pad = np.array(image_pad)
image_pad[:image_singleH, :image_singleW, :] = np.array(image_single)
image_pad = Image.fromarray(image_pad)
image_32_280 = image_pad.resize((32, 280), Image.BILINEAR)
return image_32_280
imageW,imageH=image.size
if imageH/imageW<=10:
image_32_280=process_single(image)
res.append(image_32_280)
else:
#需要切割成多张图片
target_H = int(imageW * ratio)
times = imageH // target_H +1
if imageH%target_H==0:
times-=1
eachH=imageH//times
for i in range(times):
slice=image.crop((0,i*eachH,imageW,(i+1)*eachH))
image_32_280 = process_single(slice)
res.append(image_32_280)
pass
return res
def resizeNormalize(image,imgW,imgH):
toTensor = ToTensor()
image = image.convert('L')
image = image.transpose(Image.ROTATE_90)
image = image.resize((imgW, imgH), Image.BILINEAR)
image = toTensor(image)
image.sub_(0.5).div_(0.5)
return image
def predict(pth='expr/200/crnn_best_200.pth', img_path='', imgW=160,imgH=32, display=True):
with open('../data/images/alphabet.txt',encoding='utf-8') as f:
alphabet=f.read()
nclass=len(alphabet)+1
res_path='expr/{}'.format(nclass-1)
if not os.path.exists(res_path):
os.makedirs(res_path)
converter = utils_ctc.strLabelConverter(alphabet)
batch_size=1
text=''
raw_text=''
image_long=None
images=process_img(img_path, imgW=imgW, imgH=imgH)
for image in images:
image_tensor=resizeNormalize(image,imgW,imgH)
image_tensor_batch1=image_tensor.view(1, *image_tensor.size())
image_variable=Variable(image_tensor_batch1)
crnn = crnn_ctc.CRNN(32, 1, nclass, 256)
crnn.load_state_dict(torch.load(pth, map_location='cpu'))
crnn.eval()
preds = crnn(image_variable)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)
raw_text+=raw_preds
text+=sim_preds
if image_long is None:
image_long=np.array(image)
else:
image_long=np.vstack((image_long,image))
if display:
image=Image.fromarray(image_long)
raw_text_size=len(raw_text)
image = image.resize((image.size[0]*raw_text_size,image.size[1]*raw_text_size), Image.BILINEAR)
raw_w=sim_w=image.size[0]//2
image_dsp = Image.new('RGB',(image.size[0]+raw_w+sim_w,image.size[1]),ImageColor.getrgb('cornsilk'))
image_dsp = np.array(image_dsp)
image_dsp[:,image.size[0]+raw_w:,:]=ImageColor.getrgb('bisque')
image_dsp[:image.size[1],:image.size[0],:]=np.array(image)
image_dsp = Image.fromarray(image_dsp)
draw=ImageDraw.Draw(image_dsp)
rect_h=image_dsp.size[1]//raw_text_size
for i in range(raw_text_size):
draw.rectangle((0,i*rect_h,image.size[0],(i+1)*rect_h),outline='red',width=5)
draw.text((image.size[0]+raw_w//3,i*rect_h),raw_text[i],fill='red',font=ImageFont.truetype('../data/fonts/simkai.ttf', rect_h))
if (i==0 and raw_text[i]!='-') or (i>0 and raw_text[i]!='-' and raw_text[i]!=raw_text[i-1]):
draw.text((image.size[0]+raw_w+sim_w//3, i * rect_h), raw_text[i], fill='red',font=ImageFont.truetype('../data/fonts/simkai.ttf', rect_h))
image_dsp.show()
image_dsp.save('{0}/{1}_res.png'.format(res_path,img_path.split('/')[-1].split('.')[0]))
print(len(raw_text), raw_text)
print(len(text), text)
return raw_text,text
def predictBatch(root='../data/images/test/'):
img_names=os.listdir(root)
for img_name in tqdm(img_names):
predict(pth='expr/800/crnn_best_800.pth', img_path=root+img_name)
if __name__=='__main__':
predictBatch()
# print(predict(pth='expr/800/crnn_best_800.pth',img_path='../data/images/test/test_0000000009.png'))
|
UTF-8
|
Python
| false | false | 5,444 |
py
| 12 |
predict_ctc.py
| 8 | 0.613124 | 0.582994 | 0 | 139 | 37.928058 | 154 |
ritwik1993/dreammissile_hack
| 9,680,856,336,826 |
9475d67e9f63d604a338c70e0ca0c7f096a43319
|
9e73eaad6676cb8702bb101e422dbc8c6c39dcee
|
/scripts/missile.py
|
5615fc2f041c49c463c6cc52e71bc5ace8917d97
|
[] |
no_license
|
https://github.com/ritwik1993/dreammissile_hack
|
f5b0881e7ec077890954bc18263b41b4bece0bb3
|
ea9171aba79ff26c436869ebabb11eacf421ef27
|
refs/heads/master
| 2016-09-06T15:31:05.939731 | 2014-10-21T22:01:23 | 2014-10-21T22:01:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import time
import thread
import rospy
import roslib
import math
from geometry_msgs.msg import Point
from sensor_msgs.msg import JointState
from std_msgs.msg import String
import sys
import usb
LAUNCHER_NODE = "/dev/launcher0;"
LAUNCHER_FIRE = 0x10
LAUNCHER_STOP = 0x20
LAUNCHER_UP = 0x02
LAUNCHER_DOWN = 0x01
LAUNCHER_LEFT = 0x04
LAUNCHER_RIGHT = 0x08
#LAUNCHER_UP_LEFT = LAUNCHER_UP | LAUNCHER_LEFT
#LAUNCHER_DOWN_LEFT = LAUNCHER_DOWN | LAUNCHER_LEFT
#LAUNCHER_UP_RIGHT = LAUNCHER_UP | LAUNCHER_RIGHT
#LAUNCHER_DOWN_RIGHT = LAUNCHER_DOWN | LAUNCHER_RIGHT
def servo_control():
rospy.init_node('missile' , anonymous=True)
rospy.Subscriber("rocket_command", String, callback)
# orientation_pub = rospy.Publisher("orientation", JointState)
rospy.spin()
def getch():
import sys, tty, termios
fd=sys.stdin.fileno()
old_settings=termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch=sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def launcher_cmd(cmd):
driverFile = open(LAUNCHER_NODE, 'w')
driverFile.write(chr(cmd))
driverFile.close()
def callback(data):
print "RECEIVED DATA"
print data
cmd = 0x00
# PANNING IS BACKWARDS
command_string = data.data.lower()
if command_string == 'right':
cmd = LAUNCHER_RIGHT
elif command_string == 'left':
cmd = LAUNCHER_LEFT
elif command_string == 'down':
cmd = LAUNCHER_DOWN
elif command_string == 'up':
cmd = LAUNCHER_UP
elif command_string == 'fire':
cmd = LAUNCHER_FIRE
elif command_string == 'stop':
cmd = LAUNCHER_STOP
else:
print "Invalid command string"
cmd = LAUNCHER_STOP
launcher_cmd(cmd)
def main(args):
servo_control()
if __name__ == '__main__':
main(sys.argv)
|
UTF-8
|
Python
| false | false | 1,760 |
py
| 12 |
missile.py
| 5 | 0.715341 | 0.702273 | 0 | 91 | 18.340659 | 62 |
benureau/dovecot
| 5,214,090,299,337 |
2c526b7a68b2c40326cb4c01fc08fcfa9746f5ff
|
360650c7f7871e1ea3deaeb0074226bc141a579d
|
/zoo/ping.py
|
b218dd09b14e727348d3f03076b9f97b66114f48
|
[] |
no_license
|
https://github.com/benureau/dovecot
|
11f3b73754948fd4e6e7f60f230662b153460aef
|
63eb883b8b0eac3b105a5f0e31708b2e8d4da0a8
|
refs/heads/master
| 2021-01-15T09:37:05.259800 | 2016-02-25T10:07:59 | 2016-02-25T10:23:39 | 46,895,394 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import subprocess
mac_address = '00:13:f6:01:52:d6'
def ping():
ping_response = subprocess.Popen(["ping", "-c2", "-W100", sys.argv[1]], stdout=subprocess.PIPE).stdout.read()
ping_lines = ping_response.split('\n')[1:-5]
return [ping_line.split()[3][:-1] for ping_line in ping_lines]
def macmap():
ip_addresses = ping()
arp_resps = subprocess.Popen(["arp", "-an"], stdout=subprocess.PIPE).stdout.read()
mac_dict = {}
for arp_resp in arp_resps.split('\n'):
if len(arp_resp) > 4:
ip = arp_resp.split(' ')[1][1:-1]
mac = arp_resp.split(' ')[3]
mac_dict[mac] = ip
return mac_dict
ping()
print(macmap()[mac_address])
|
UTF-8
|
Python
| false | false | 703 |
py
| 98 |
ping.py
| 91 | 0.58606 | 0.55192 | 0 | 24 | 28.291667 | 113 |
hmyan90/CV_HDR
| 11,871,289,611,096 |
df50987e7988328dd17a8c3031242db32bc0ca49
|
745e4f7813f6dbcafb1611d6adeb84f4888bd1ac
|
/Part2_fig/test.py
|
6d4a2f478aaeaa2f17ef87cc5c410179caf70503
|
[] |
no_license
|
https://github.com/hmyan90/CV_HDR
|
f78f23090a5f8541d3e23a6bb3202967d922ef0b
|
f59abf2eb3d310a63bd77bce3322186f3797131d
|
refs/heads/master
| 2020-04-06T07:44:45.840386 | 2018-11-15T00:43:24 | 2018-11-15T00:43:24 | 157,283,845 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import cv2
name = "6667"
cnt = 0
img = cv2.imread(name+".JPG")
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
for k in range(0, img.shape[2]):
if img[i][j][k] == 255:
cnt += 1
print i,j,k,img[i][j][k]
print cnt
|
UTF-8
|
Python
| false | false | 308 |
py
| 6 |
test.py
| 5 | 0.503247 | 0.448052 | 0 | 15 | 19.533333 | 40 |
kgfig/pcdp-data
| 154,618,843,855 |
1226b787075fbaf9805b6533dd7f9e5671de99ca
|
be50bdc7ef63ebbe54c0357e8b250f038a3ccaa4
|
/pcdpdata/models.py
|
321d31c95a3608fd9a726a315c29e2ca5ad79520
|
[] |
no_license
|
https://github.com/kgfig/pcdp-data
|
e843e0298d752c3e6d6279834003b566d18050cd
|
98ab95083753c5b7e2e91ff195edc1f2d238524c
|
refs/heads/master
| 2020-04-19T15:18:21.146080 | 2016-09-06T07:00:34 | 2016-09-06T07:00:34 | 67,291,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Assessment(models.Model):
title = models.CharField(max_length=64)
project = models.ForeignKey(Project, on_delete=models.SET_NULL, blank=True, null=True, related_name='assesment_list')
original_id = models.IntegerField()
type = models.IntegerField()
def __str__(self):
return self.title
class Question(models.Model):
assessment = models.ForeignKey(Assessment, on_delete=models.CASCADE, related_name='questions')
content = models.TextField()
original_id = models.IntegerField()
seq_num = models.IntegerField()
points = models.IntegerField(default=1)
def __str__(self):
return "%d:%s" % (self.original_id, self.content,)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
content = models.TextField()
original_id = models.IntegerField()
letter = models.CharField(max_length=1)
correct = models.BooleanField(default=False)
def __str__(self):
return "%d:%s" % (self.original_id, self.content,)
class User(models.Model):
firstname = models.CharField(max_length=64)
surname = models.CharField(max_length=64)
username = models.CharField(max_length=64)
email = models.EmailField()
original_id = models.IntegerField()
answers = models.ManyToManyField(Choice)
def __str__(self):
return self.username
|
UTF-8
|
Python
| false | false | 1,551 |
py
| 12 |
models.py
| 11 | 0.685364 | 0.676983 | 0 | 48 | 31.291667 | 121 |
alliemclean/BioLlama
| 9,199,819,960,176 |
2d0e48b84e9db282dfee71635ba1e70cf5f755a4
|
2d4e60f45e2c14375b5a4bf13337d8e40ba5d746
|
/biollama/core/__init__.py
|
f13c43b0e64ed896413bf833ec9dd9f37b77a873
|
[
"MIT"
] |
permissive
|
https://github.com/alliemclean/BioLlama
|
4275e02b0e688931e9f25ffc4f981270aa1f1e32
|
b3d0b01701ff21732dea2af3f84b0b7fa17187a3
|
refs/heads/master
| 2021-06-23T19:26:41.493442 | 2021-06-08T19:55:00 | 2021-06-08T19:55:00 | 159,747,357 | 1 | 0 |
MIT
| false | 2021-06-08T19:55:01 | 2018-11-30T00:50:40 | 2020-01-14T16:41:34 | 2021-06-08T19:55:00 | 32 | 2 | 0 | 0 |
Python
| false | false |
import pandas as pd
def get_pos_flds(region):
""" return chrom, start, end from genomic region chr:start-end"""
chrom, span = region.split(':')
start, end = span.split('-')
return chrom, start, end
def exon_df_from_ref(exon_starts, exon_ends, cds_start=0, cds_end=None, strand='+'):
"""
:param exon_starts:
:param exon_ends:
:param cds_start:
:param cds_end:
:param strand:
:return: dataframe
if cds_start and end aren't supplied cds_status is unset. if it is
1 = entirely in coding region
0 = not in coding region
.5 = partially in coding region
"""
starts = [int(exon) for exon in exon_starts.split(',') if exon != '']
ends = [int(exon) for exon in exon_ends.split(',') if exon != '']
exon_count = len(ends)
cds_length = 0
dd = {k: [] for k in ['exon_id', 'cds_pos', 'start', 'end', 'cds_status']}
cds_unset = False
if cds_end is None:
cds_end = ends[-1] + 1
cds_unset = True
for i, (s, e) in enumerate(zip(starts, ends)):
if s < cds_start:
if e < cds_start:
cds_status = 0
else:
cds_length += e - cds_start
cds_status = .5
elif e > cds_end:
if s > cds_end:
cds_status = 0
else:
cds_length += cds_end - s
cds_status = .5
else:
cds_length += e - s
cds_status = 1
if strand == '+':
exon = i + 1
else:
exon = exon_count - i
dd['exon_id'].append(exon)
dd['cds_pos'].append(cds_length)
dd['start'].append(s)
dd['end'].append(e)
dd['cds_status'].append(cds_status)
df = pd.DataFrame(dd)
df['strand'] = strand
df['cds_length'] = cds_length
if cds_unset:
df['cds_status'] = 'No CDS info'
return df
|
UTF-8
|
Python
| false | false | 1,912 |
py
| 12 |
__init__.py
| 11 | 0.512029 | 0.50523 | 0 | 61 | 30.360656 | 84 |
Raj-Parekh24/NUFLIX
| 17,093,969,843,803 |
21a854ef77c19a7caccdee33112d684434db722b
|
02ea7be2d6d335f35c9d0c5fe8421107e5882dcb
|
/user/urls.py
|
f75bdab61a1d7c1aee7d92f13be80e8ccc9a32a4
|
[] |
no_license
|
https://github.com/Raj-Parekh24/NUFLIX
|
78bca1c9886281bab9e059753192251567f558d4
|
43c801f0554d2a9fa154ed3ba19330107cee826f
|
refs/heads/main
| 2023-03-20T19:56:55.357798 | 2021-03-17T05:21:42 | 2021-03-17T05:21:42 | 320,753,666 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',views.home,name='home'),
path('admin',views.admin,name='admin'),
path('se',views.se,name='se'),
path('ml',views.ml,name='ml'),
path('daa',views.daa,name='daa'),
path('cn',views.cn,name='cn'),
path('admin_login',views.admin_login,name='admin_login'),
path('submission',views.submission,name='submission')
]
urlpatterns += static(settings.MEDIA_URL, document_root= settings.MEDIA_ROOT)
|
UTF-8
|
Python
| false | false | 565 |
py
| 13 |
urls.py
| 6 | 0.690265 | 0.690265 | 0 | 16 | 34.375 | 77 |
lbryio/lbry-android-sdk
| 1,443,109,045,559 |
15d9b50babc4c2785eba36740b3d119c6ca93140
|
32747097a4eae95a85c0e153a797c2689eb76052
|
/p4a/pythonforandroid/recipes/pyleveldb/__init__.py
|
61477092f6ab42f2ceba725846bfc07be29e8b30
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Python-2.0"
] |
permissive
|
https://github.com/lbryio/lbry-android-sdk
|
fa973940c38c8eb7d81a0b4698fa4de353eaf58c
|
d9d81957647cb81f7227205cbb4ecba515556e74
|
refs/heads/master
| 2022-12-14T10:36:05.641392 | 2022-11-23T20:03:41 | 2022-11-23T20:03:41 | 241,861,403 | 3 | 4 |
MIT
| false | 2022-12-11T20:47:57 | 2020-02-20T10:57:08 | 2022-11-18T20:36:21 | 2022-12-11T20:44:30 | 183,046 | 2 | 4 | 6 |
C
| false | false |
from pythonforandroid.recipe import CppCompiledComponentsPythonRecipe
class PyLevelDBRecipe(CppCompiledComponentsPythonRecipe):
version = '0.193'
url = 'https://pypi.python.org/packages/source/l/leveldb/leveldb-{version}.tar.gz'
depends = ['snappy', 'leveldb', ('hostpython2', 'hostpython3'), 'setuptools']
patches = ['bindings-only.patch']
call_hostpython_via_targetpython = False # Due to setuptools
site_packages_name = 'leveldb'
recipe = PyLevelDBRecipe()
|
UTF-8
|
Python
| false | false | 489 |
py
| 152 |
__init__.py
| 57 | 0.736196 | 0.723926 | 0 | 13 | 36.615385 | 86 |
Geek-Z/Algorithmic-Study
| 9,388,798,545,046 |
49e2628fa1e3bbffb45bebeae9788514b2fbf076
|
eb145c64fda7760ba5d5337135492a871699a69e
|
/week6-DP/E53MaximumSubarray.py
|
c72fcd284d7f5047e6e0eed46c76119985f12e68
|
[] |
no_license
|
https://github.com/Geek-Z/Algorithmic-Study
|
a75376a4b2b7d619324117010ec9e189e7807f7b
|
8eef2656d143e5bc59b787f59b75ebf2d7462750
|
refs/heads/master
| 2020-06-27T20:43:52.752986 | 2019-10-25T14:34:25 | 2019-10-25T14:34:25 | 200,044,387 | 0 | 2 | null | true | 2019-08-01T12:14:00 | 2019-08-01T12:13:59 | 2019-07-26T10:25:11 | 2019-06-24T02:24:57 | 806 | 0 | 0 | 0 | null | false | false |
from typing import List
# Input: [-2, 1, -3, 4, -1, 2, 1, -5, 4],
# Output: 6
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
dp = nums
for i in range(1, len(nums)):
dp[i] = max(dp[i], dp[i - 1] + nums[i])
return max(dp)
g = Solution()
print(g.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
# Runtime: 40 ms, faster than 97.12 % of Python3 online submissions for Maximum Subarray.
# Memory Usage: 13.7 MB, less than 37.70 % of Python3 online submissions for Maximum Subarray.
|
UTF-8
|
Python
| false | false | 536 |
py
| 49 |
E53MaximumSubarray.py
| 47 | 0.589552 | 0.522388 | 0 | 16 | 32.5 | 94 |
zoomjuice/CodeCademy_Learn_Python_3
| 12,567,074,321,986 |
4aed2bf5d6a941b1d4d11347f531febae7d62bb4
|
2d86a8fae21b88e77f5a5a582ecbf6f035a41936
|
/Unit 01 - Hello World/01_01_13-PlusEquals.py
|
910802c692d866d024cba2913c729d35587b6e3d
|
[] |
no_license
|
https://github.com/zoomjuice/CodeCademy_Learn_Python_3
|
6dfa32f57165fb30407ef4747259083131837cbb
|
7eab4f0953d177cbda24ee3b616407e47f854bf9
|
refs/heads/master
| 2023-03-14T14:54:03.978392 | 2021-03-03T14:02:27 | 2021-03-03T14:02:27 | 336,159,040 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
total_price = 0
new_sneakers = 50.00
total_price += new_sneakers
nice_sweater = 39.00
fun_books = 20.00
# Update total_price here:
total_price += nice_sweater
total_price += fun_books
print("The total price is", total_price)
|
UTF-8
|
Python
| false | false | 231 |
py
| 157 |
01_01_13-PlusEquals.py
| 155 | 0.705628 | 0.649351 | 0 | 15 | 14.4 | 40 |
czimi/conduit
| 12,154,757,490,922 |
6e2d809f748599301ea17449f34e7abb2b5969e8
|
bd458748b7fedcb52c9243b4c18ee1d90bc85a74
|
/test_conduit/test_TC06_listing.py
|
6a92549375530c7047b05fa9837484650d7d7275
|
[
"MIT"
] |
permissive
|
https://github.com/czimi/conduit
|
5408b4d6bb656507179ccbadf69a0bf560a9b1e5
|
102d24b050271b70a7f1c2c15138a580a5f66953
|
refs/heads/master
| 2023-07-12T05:15:12.248798 | 2021-08-22T18:29:31 | 2021-08-22T18:29:31 | 375,459,816 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from conduit_methods import *
class TestConduitApp(object):
def setup(self):
browser_options = Options()
browser_options.headless = True
self.browser = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.browser.get("http://localhost:1667/")
self.content_testuser2_articles_about_main = []
self.content_testuser2_articles_about_user = []
def teardown(self):
self.browser.quit()
# TC6 listing all blogposts of a testuser (precondition: registration of a new user)
def test_list_all_posts_of_testuser2(self):
conduit_login(self.browser)
time.sleep(2)
list_testuser2_articles_about_main = self.browser.find_elements_by_xpath('//a[@href="#/@testuser2/"]//parent::div//following-sibling::a/p')
for i in range(len(list_testuser2_articles_about_main)):
self.content_testuser2_articles_about_main.append(list_testuser2_articles_about_main[i].text)
self.browser.find_element_by_xpath('//a[@href="#/@testuser2/"][@class="author"]').click()
time.sleep(3)
list_testuser2_articles_about_user = self.browser.find_elements_by_xpath('//p[normalize-space()]')
for i in range(len(list_testuser2_articles_about_user)):
self.content_testuser2_articles_about_user.append(list_testuser2_articles_about_user[i].text)
if self.content_testuser2_articles_about_user[i] in self.content_testuser2_articles_about_main:
assert True
|
UTF-8
|
Python
| false | false | 1,848 |
py
| 14 |
test_TC06_listing.py
| 13 | 0.707251 | 0.695346 | 0 | 42 | 43 | 147 |
alex216/codeforce
| 9,225,589,762,071 |
0629a76b7bc563d0d428c10ded6a19f2cb469737
|
84042dbca99df46a707e50a988169391e7be9d4b
|
/1343_a.py
|
f5b9ff31696bb8b230b051e8003e1365f6a7b26a
|
[] |
no_license
|
https://github.com/alex216/codeforce
|
d166d51985363f7d9f458a65c3a08171580da998
|
9d01b8e0f59bca9b769c4b2ce88d6fe36f8d3efb
|
refs/heads/master
| 2023-07-23T19:39:18.914372 | 2023-07-12T02:04:39 | 2023-07-12T02:04:39 | 264,169,363 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
a = []
for k in range(2,31):
a.append(2**k-1)
t = int(input())
for _ in range(t):
m = int(input())
for d in a:
if m%d == 0:
print(m//d)
break
|
UTF-8
|
Python
| false | false | 187 |
py
| 3 |
1343_a.py
| 3 | 0.417112 | 0.385027 | 0 | 11 | 16 | 23 |
IIC2613-Inteligencia-Artificial-2021-2/Grading-1
| 3,539,053,088,546 |
ae6b3ba83a764b9b3efd4ce01a0e7ad34654f300
|
bdb2c66608845da7bb3766e49737e65ac218eb2f
|
/tests/secret_astar_tie_breaking_test.py
|
043e722aace645bc9d57e0a3aafa6635b609e39c
|
[] |
no_license
|
https://github.com/IIC2613-Inteligencia-Artificial-2021-2/Grading-1
|
5e88b9deea4585d50474caa2d8abfdc459644c7d
|
3a36ef6398cd80165cc7a5696b73fa82e5ef2695
|
refs/heads/master
| 2023-08-19T17:40:59.399569 | 2021-10-24T20:02:29 | 2021-10-24T20:06:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Tests for A* Search algorithm.
"""
from search.algorithms.astar import AStar
from search.problems.grid.board2d import Grid2D
def test_tie_breaking():
state = Grid2D.State((0, 0))
# pylint: disable=invalid-name
f = 5
g1 = 3
h1 = f - g1
node_1 = AStar.AStarNode(state, action=None, parent=None, g=g1, h=h1)
g2 = 2
h2 = f - g2
node_2 = AStar.AStarNode(state, action=None, parent=None, g=g2, h=h2)
assert node_1 < node_2
def test_tie_breaking():
state = Grid2D.State((0, 0))
# pylint: disable=invalid-name
f = 5
for g1 in range(f + 1):
h1 = f - g1
node_1 = AStar.AStarNode(state, action=None, parent=None, g=g1, h=h1)
for g2 in range(f + 1):
h2 = f - g2
node_2 = AStar.AStarNode(state, action=None, parent=None, g=g2, h=h2)
if h1 < h2:
assert node_1 < node_2
elif h1 > h2:
assert node_1 > node_2
|
UTF-8
|
Python
| false | false | 964 |
py
| 19 |
secret_astar_tie_breaking_test.py
| 15 | 0.561203 | 0.511411 | 0 | 38 | 24.368421 | 81 |
weilin2018/CS_ML_DL_Courses
| 19,361,712,596,747 |
f80767118d6c0aba6a6fd9a75807c5d002817900
|
a9ee46a3a19b9de76af8271d8e9ce888b76ddd40
|
/ML_A_Z_Hands_On_Python_And_R_(Udemy)/dev/dev_py/multiple_linear_regression.py
|
5a9dcab7a50b647619cacc28891eccb964fda062
|
[] |
no_license
|
https://github.com/weilin2018/CS_ML_DL_Courses
|
1b901bc2d19509e3ac5e015be1e163f3b83f0020
|
6ecd493dedc15c470aa42dc9e9601d5e7e20a648
|
refs/heads/master
| 2020-08-03T14:30:42.169134 | 2019-09-27T00:57:13 | 2019-09-27T00:57:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : May 09, 2017
# Last update :
#
# Imports
import pydoc
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
X[:, 3] = labelencoder.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the optimal model using Backward Elimination
import statsmodels.formula.api as sm
# Add a column of 50 ones at the beginning of X
X = np.append(arr=np.ones((50,1)).astype(int), values=X, axis=1)
# Initialize optimal X as original matrix, we will backprop later.
X_opt = X[:,[0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit() # ordinary least sq
print(regressor_OLS.summary())
# Look for highest Probability (lowest significance) and remove this
# remove if it is above 5%.
X_opt = X[:,[0,1,3,4, 5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit() # ordinary least sq
print(regressor_OLS.summary())
# remove if it is above 5%.
X_opt = X[:,[0,3,4,5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit() # ordinary least sq
print(regressor_OLS.summary())
# remove if it is above 5%.
X_opt = X[:,[0,3]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit() # ordinary least sq
print(regressor_OLS.summary())
# some helps
#print(pydoc.render_doc(np.append, "Help on %s"))
#print(pydoc.render_doc(sm.OLS, "Help on %s"))
#print(help(sm.OLS))
|
UTF-8
|
Python
| false | false | 2,448 |
py
| 292 |
multiple_linear_regression.py
| 11 | 0.714869 | 0.696487 | 0 | 77 | 30.792208 | 92 |
Xeldal/daily_payroll
| 12,867,722,032,549 |
24aa523f69bc9437dce24409c735c830ff8c19a6
|
29fbad5b7c28efb4312fb4b3085c759cd7365d36
|
/balance_keeper.py
|
e18e030db0080b4ea77e78180ef94dbdef3c74b3
|
[] |
no_license
|
https://github.com/Xeldal/daily_payroll
|
18665a23abdf221cfdb1a08e1447e2a149ae30c8
|
29f4456379bfc572f0fb0fbbd5a8c0dcf70f0d93
|
refs/heads/master
| 2020-12-31T02:01:41.584803 | 2015-01-12T22:54:28 | 2015-01-12T22:54:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# coding=utf8
# This is a script which connects to a delegate periodically to check if it is time to send a payment
# set in the config.json file. It sends the amount to the payto account specified.
import requests
import sys
import os
import json
import getpass
import time
import datetime
from pprint import pprint
BTS_PRECISION = 100000
config_data = open('config.json')
config = json.load(config_data)
config_data.close()
##Send payments to hosted delegate at defined time once per day
global x_just_sent
x_time_to_send = config["x_time_to_send"]
x_just_sent = False
x_fulltime = datetime.time(x_time_to_send, 0, 0)
x_hour_chosen = int(x_fulltime.hour)
auth = (config["bts_rpc"]["username"], config["bts_rpc"]["password"])
url = config["bts_rpc"]["url"]
WALLET_NAME = config["wallet_name"]
DELEGATE_NAME = config["delegate_name"]
PAYTO = config["payto_account"]
AMOUNT = config["x_amount"]
MARKETUSD = "USD"
MARKETBTS = "BTS"
def parse_date(date):
return datetime.datetime.strptime(date, "%Y%m%dT%H%M%S")
def call(method, params=[]):
headers = {'content-type': 'application/json'}
request = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 1
}
while True:
try:
response = requests.post(url, data=json.dumps(request), headers=headers, auth=auth)
result = json.loads(vars(response)["_content"])
#print "Method:", method
#print "Result:", result
return result
except:
print "Warning: rpc call error, retry 5 seconds later"
time.sleep(5)
continue
break
return None
while True:
try:
global x_just_sent
os.system("clear")
print("\nRunning Balance Keeper\n")
response = call("wallet_get_account", [DELEGATE_NAME] )
if "error" in response:
print("FATAL: Failed to get info:")
print(result["error"])
exit(1)
response = response["result"]
balance = response["delegate_info"]["pay_balance"] / BTS_PRECISION
print ("Balance for %s is currently: %s BTS\n" % (DELEGATE_NAME, balance))
x_nowtime = datetime.datetime.time(datetime.datetime.now())
x_hour_current = int(x_nowtime.hour)
print("Payment will be sent at hour: %d" % x_hour_chosen)
print("Checking the time... %s" % x_nowtime)
print("Sent Recently?: %d" % x_just_sent)
if x_hour_chosen == x_hour_current:
print("Hours Match!\n")
if x_just_sent == False:
## Send one payment per day
response = call("wallet_delegate_withdraw_pay", [DELEGATE_NAME, PAYTO, AMOUNT])
print("Sending Payment Now...\n")
x_just_sent = True
response = call("blockchain_market_status", [MARKETUSD, MARKETBTS])
if "error" in response:
print("FATAL: Failed to get market info:")
print(result["error"])
exit(1)
response = response["result"]
feed_price = response["current_feed_price"]
USDequiv = AMOUNT * feed_price
response = call("wallet_account_transaction_history", [DELEGATE_NAME])
if "error" in response:
print("FATAL: Failed to get account history info:")
print(result["error"])
exit(1)
response = response["result"]
k = 0
for i in response:
k = k + 1
xTrxId = response[k-1]["trx_id"]
timeStamp = response[k-1]["timestamp"]
f = open("payroll.txt","a")
f.write('Payment Sent! TimeStamp: %s Amount: %.5f BTS ($%.5f) Rate: $%.5f /BTS Trx_ID: %s\n' % (timeStamp, AMOUNT, USDequiv, feed_price, xTrxId))
f.close()
print("Payment Sent! TimeStamp: %s Amount: %.5f BTS ($%.5f) Rate: $%.5f /BTS Trx_ID: %s\n" % (timeStamp, AMOUNT, USDequiv, feed_price, xTrxId))
else:
print("Payment has already been sent. Nothing to do.\n")
else:
x_just_sent = False
print("\nNot time yet...\n")
print("going to sleep")
time.sleep(60)
except:
print("exception")
time.sleep(60)
|
UTF-8
|
Python
| false | false | 4,105 |
py
| 2 |
balance_keeper.py
| 1 | 0.608526 | 0.600974 | 0 | 136 | 29.183824 | 157 |
Coderash1998/LeetCode
| 13,812,614,827,765 |
549f4ca217567555ee54948b15c19c48f8c9349f
|
9ee2cab97854e665b297acce1dcc81a0b339f1f2
|
/824/824.py
|
2b630874caa0c936373d188b291590e102fc1a1f
|
[] |
no_license
|
https://github.com/Coderash1998/LeetCode
|
ebf7a05ccdbad289ce341ca286c10f204cfb8daa
|
78a416c2f0e00bbc19ba60e585bc58ae2dc811e4
|
refs/heads/main
| 2023-08-05T09:15:52.629727 | 2021-09-14T22:57:22 | 2021-09-14T22:57:22 | 325,938,325 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution:
def toGoatLatin(self, S: str) -> str:
c=0
x=[]
for i in S.split():
h=i
if h[0] in "aAeEiIoOuU":
c+=1
x.append(h+"ma"+"a"*c)
else:
c+=1
x.append(h[1:]+h[0]+"ma"+"a"*c)
return ' '.join(x)
|
UTF-8
|
Python
| false | false | 349 |
py
| 185 |
824.py
| 105 | 0.335244 | 0.318052 | 0 | 13 | 25 | 47 |
cwm-kylin/webmonitor
| 13,134,010,022,642 |
9ef2f5a9068e19ad7c156d5155b255f052231da1
|
4997a36d185de94b78f8a1e0fb1bf11bbf9b3e96
|
/monitor/webmonitor/templatetags/AppFilter.py
|
560b73d9dad2ddb4ec722adaa2790dceca9e5360
|
[] |
no_license
|
https://github.com/cwm-kylin/webmonitor
|
47a2a0d67b2f1be3bd2e78564b0a9d059ee65966
|
54c275c40f4a3e2da369e4e9f71c5d1c6bccf9c1
|
refs/heads/master
| 2021-01-23T22:11:16.455494 | 2016-08-16T08:11:52 | 2016-08-16T08:18:13 | 65,800,746 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django import template
from webmonitor.public import *
from webmonitor.models import MonitorAppInfo
register = template.Library()
def result_domain(value):
return GetURLdomain(value)
def result_datetime(value):
return stamp2time(value)
register.filter(result_domain)
register.filter(result_datetime)
|
UTF-8
|
Python
| false | false | 345 |
py
| 25 |
AppFilter.py
| 17 | 0.762319 | 0.756522 | 0 | 18 | 18.111111 | 44 |
cacampbell/pythonmisc
| 2,396,591,798,694 |
3a7dcce658a318b46579729496e5b98830faa4ec
|
7f7e98e9947aad5d95b38dbf89f5b9d2b775c9c1
|
/OasesAssemble.py
|
ced611870facd4de1ee1f2da9f3da8a6969fb4f8
|
[] |
no_license
|
https://github.com/cacampbell/pythonmisc
|
27aab7b7c2edb6b53a662c4f065ad07a383cb92e
|
d70edf4c695bbe55cf52ae82d025def285f4a1dc
|
refs/heads/master
| 2021-01-24T08:08:57.767324 | 2017-01-18T22:44:30 | 2017-01-18T22:44:30 | 50,535,716 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
from sys import stderr
from Bash import mkdir_p
from VelvetAssemble import VelvetAssemble
class OasesAssemble(VelvetAssemble):
def __init__(self, *args, **kwargs):
super(OasesAssemble, self).__init__(*args, **kwargs)
self.set_default("startk", "21")
self.set_default("endk", "37")
self.set_default("contig_len", "250")
self.set_default("reference_guided", False)
self.set_default("reference", "reference.fa")
self.set_default("all_merged", "{}/all_merged".format(self.input_root))
def make_command(self, filename):
pass
def format_commands(self):
job_name = "{}".format(self.cluster_options["job_name"])
command = ('export OMP_NUM_THREADS={omp} && oases_pipeline.py -m '
'{startk} -M {endk} -p "-min_trans_lgth {contig_len} -scaffolding yes" '
'-o {out} --data "{libraries}').format(
startk=self.startk,
endk=self.endk,
threads=self.get_threads(),
contig_len=self.contig_len,
out=self.output_root,
libraries=self.format_libraries(guided=self.reference_guided),
omp=self.get_threads(0.95)
) # Command
if self.reference_guided:
command += ' -reference={}"'.format(self.reference)
else:
command += '"'
self.commands[job_name] = command
if self.verbose:
print(command, file=stderr)
def run(self):
"""
Run the Parallel Command from start to finish
1) Load Environment Modules
2) Gather input files
3) Remove exclusions
4) Make Directories
5) Format Commands
6) Dispatch Scripts to Cluster Scheduler
7) Unload the modules
:return: list<str>: a list of job IDs returned by cluster scheduler
"""
if self.verbose:
print('Loading environment modules...', file=stderr)
if self.modules is not None:
self.module_cmd(['load'])
if self.verbose:
print('Gathering input files...', file=stderr)
self.get_files()
if self.verbose:
print('Removing exclusions...', file=stderr)
if self.verbose:
print("Making output directories...", file=stderr)
mkdir_p(self.output_root)
if self.exclusions_paths:
self.exclude_files_below(self.exclusions_paths)
self.exclude_files_below(self.output_root)
if self.exclusions:
self.remove_regex_from_input(self.exclusions)
if self.verbose:
print('Formatting commands...', file=stderr)
self.format_commands()
if self.verbose:
print('Dispatching to cluster...', file=stderr)
jobs = self.dispatch() # Return the job IDs from the dispatched cmds
return (jobs)
|
UTF-8
|
Python
| false | false | 2,967 |
py
| 101 |
OasesAssemble.py
| 101 | 0.572969 | 0.566903 | 0 | 89 | 32.337079 | 91 |
invm/euler-project
| 5,257,039,973,836 |
225a85053f61d47844e16ded6bf41e41e1b11024
|
a6cba4e4bb2dd96558c5dadc1531e7dfef0f85ea
|
/16.py
|
ce0ca3cdda96bd8a4c8294320e8afc5e556fa4ec
|
[] |
no_license
|
https://github.com/invm/euler-project
|
6b642e9ae9633e11c406fb687dae2f9dc7a5fbbc
|
561328a537bb1e98e6ffc85eaedffbf32eea9b3a
|
refs/heads/master
| 2020-12-10T06:52:41.659750 | 2020-01-14T07:21:18 | 2020-01-14T07:21:18 | 233,529,250 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
# 215 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
# What is the sum of the digits of the number 21000?
def count_sum_of_digits(num):
n = num
total = 0
while n > 0:
total += n % 10
n //= 10
return total
print(count_sum_of_digits(pow(2, 1000)))
|
UTF-8
|
Python
| false | false | 317 |
py
| 13 |
16.py
| 12 | 0.564669 | 0.463722 | 0 | 17 | 17.647059 | 66 |
elizlieu/Panlex
| 3,324,304,707,266 |
fcf0515ad3b61e9d1337251d711b5e678fbb5f6e
|
958204a905955e5548a751b82c4a3b445f544f0e
|
/done/opm-eng-tpi-Lawrence/opm-eng-tpi-Lawrence-0to1.py
|
265767611f7dfd0865ce43df504b1c1d65cf7125
|
[] |
no_license
|
https://github.com/elizlieu/Panlex
|
111f5545f8f9df3b3b649e8f1deb4fe906053aad
|
30be6a79b4f194b40e02a672ee8808dd1d02a713
|
refs/heads/master
| 2019-01-16T10:41:39.264670 | 2016-11-12T12:21:55 | 2016-11-12T12:21:55 | 73,549,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
### Elizabeth Lieu
### 7/29/2016
import regex as re
import copy
from ben.panlex import *
base_file_name = 'opm-eng-tpi-Lawrence'
version = 0
lv_list = [
'opm-000',
'eng-000',
'tpi-000',
]
source = []
with open(base_file_name + '.txt', 'r') as f:
text = f.read()
prev = None
for entry in re.split(r'\n\n(?=\\lx)', text)[1:]:
if entry != prev:
source.append(re.sub('\n\n', '\n', entry))
prev = entry
with open(base_file_name + '-' + str(version + 1) + '.txt', 'w', encoding='utf-8') as f:
f.write('\n\n'.join(source))
|
UTF-8
|
Python
| false | false | 575 |
py
| 14 |
opm-eng-tpi-Lawrence-0to1.py
| 14 | 0.587826 | 0.549565 | 0 | 32 | 17 | 88 |
felipedbene/oktank-video
| 1,803,886,277,388 |
bd77819be970e56867fe7b8db24bb4a28c091b51
|
a3d82436578560e999c8a1fda7fd1ec0c0173beb
|
/tk3.py
|
68708fb9873d50010a54cb6c5132b481bf2cd1b2
|
[] |
no_license
|
https://github.com/felipedbene/oktank-video
|
9796da90df33f7d3583f440a610cba93d9b526b3
|
5fc6231796a9e6a2fb24109a8f3b892d3e3abe1a
|
refs/heads/master
| 2020-12-08T20:26:08.631684 | 2020-01-10T16:24:10 | 2020-01-10T16:24:10 | 233,085,980 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import os
image_folder = '.'
video_name = 'video_sorted.mp4'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
images.sort()
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
video = cv2.VideoWriter(video_name, fourcc, 20.0, (width, height))
#video = cv2.VideoWriter(video_name, 0, 1, (width,height))
for image in images:
img = os.path.join(image_folder, image)
print(img)
video.write(cv2.imread(img))
cv2.destroyAllWindows()
video.release()
|
UTF-8
|
Python
| false | false | 657 |
py
| 1 |
tk3.py
| 1 | 0.713851 | 0.69102 | 0 | 24 | 26.416667 | 74 |
AdityaVikramSingh21/Miscellaneaus
| 11,854,109,782,726 |
bc08552cd798dcce949c7a080d462fdbc42409f4
|
e0b6df3e9176fb3a3e85ea41b01d91ae1e03c646
|
/NMI.py
|
2d47fdde8cd22984b0a58cf52d13027acb392420
|
[] |
no_license
|
https://github.com/AdityaVikramSingh21/Miscellaneaus
|
78e44c6306e7a164515bf0b2b4bb8a16d602d0ea
|
bceb3a3b7cf4320122385895f68b19e10fe855b4
|
refs/heads/master
| 2020-05-18T22:15:27.807088 | 2020-01-27T10:07:25 | 2020-01-27T10:07:25 | 184,687,424 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import print_function
from pyspark.sql import SparkSession
import math as m
from pyspark.sql import functions as F
from pyspark.sql.types import IntegerType,FloatType
from pyspark.sql.functions import lit,expr,udf,row_number,col
from pyspark.sql.window import Window
import itertools
import numpy as np
if __name__ == "__main__":
spark = SparkSession.builder.appName("NMI_2").getOrCreate()
#The below function calculates the overlap NMI for communities in between rows no.s start and stop from both results and groundtruth
#we need to specify the num of nodes,starting and ending rows and behind the scenes it's the same as comparing list of lists(comunities from starting to end rows for bothe the tables)
#there were some mathematical issues with the reference implemmentation()
def overal_nmi(start,stop,num_nodes):
data_r = spark.read.csv("resultant.csv", header= False)
data_g = spark.read.csv("groundtruth.csv", header= False)
#below function forms new cols for no. of nodes in a community and the prob distribution for each community
def make_df(df,num_nodes):
df=df.withColumn('null_count',sum([F.isnull(df[col]).cast(IntegerType()) for col in df.columns]))
df=df.withColumn('prob',((10-df['null_count'])/(num_nodes)))
df=df.withColumn('1-prob',1-df['prob'])
df=df.withColumn('prob_arr',F.array([F.col(str(i)) for i in ['prob','1-prob']]))
df=df.withColumn('nodes',(9-df['null_count']))
df=df.na.fill('0')
return df
#below function finds the entropy sum for p,1-p row_wise for every community with a udf and stores the results in an array type col
def sum_entropy_single_dist(df):
sum_entropy = F.udf(lambda x: sum(-i * m.log(2, i) for i in x),FloatType())
df=df.withColumn('sum_entropy', sum_entropy('prob_arr'))
return df
#below function does some custom preprocesing on resultant df to give it a better shape and form
def preprocess_r(df):
df=df.withColumn('comm',F.array([F.col(str(i)) for i in ['_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8']]))
df=df.withColumn("community", expr("filter(comm, x -> not(x <=> 0))"))
df=df.drop('comm','null_count','comm','_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8')
df=sum_entropy_single_dist(df)
return df
#below function does some custom preprocesing on groundtruth df to give it a better shape and form
def preprocess_g(df):
df=df.withColumn('comm',F.array([F.col(str(i)) for i in ['_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8','_c9']]))
df=df.withColumn("community", expr("filter(comm, x -> not(x <=> 0))"))
df=df.drop('comm','null_count','comm','_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8','_c9')
df=sum_entropy_single_dist(df)
return df
# formula for entropy calc
def entp(num):
return -num * m.log(2, num)
#below function creates a list of lists(communities between the specified rows) for the dataframe in its arguement, it also gives the entropy sum for each of these lists(communities)
def list_comm_entps(start,stop,df):
w = Window().partitionBy(lit('a')).orderBy(lit('a'))
df = df.withColumn("row_num", row_number().over(w))
df_working=df.filter(col("row_num").between(start,stop))
l_f=df_working.select("community").collect()
mvv_array_f = [row.community for row in l_f]
s_e_f=df_working.select("sum_entropy").collect()
sum_entropy_f = [row.sum_entropy for row in s_e_f]
return mvv_array_f,sum_entropy_f
#below function gets the joint prob distribution and conditional entropy for the case Y given X
def get_capx_given_capy(arr_1,arr_3,num_nodes):
cond_entropy_list =[]
for comm_x,comm_y in itertools.product(arr_1,arr_3):
prob_matrix = np.ndarray(shape=(2, 2), dtype=float)
intersect_size = float(len(set(comm_x) & set(comm_y)))
cap_n = num_nodes+4
prob_matrix[1][1] = (intersect_size + 1) / cap_n
prob_matrix[1][0] = (len(comm_x) - intersect_size + 1) / cap_n
prob_matrix[0][1] = (len(comm_y) - intersect_size + 1) / cap_n
prob_matrix[0][0] = (3 - intersect_size + 1) / cap_n
entropy_list = map(entp,(prob_matrix[0][0], prob_matrix[0][1], prob_matrix[1][0], prob_matrix[1][1]))
k=list(entropy_list)
if k[3] + k[0] <= k[1] + k[2]:
cond_entropy_list.append(np.inf)
else:
cond_entropy_list.append(sum(k)-sum_entropy_g[arr_3.index(comm_y)])
cond_entropy_list
partial_res_list = []
for comm_x in arr_1:
min_cond_entropy=float(min(cond_entropy_list))
partial_res_list.append(min_cond_entropy /(sum_entropy_r[arr_1.index(comm_x)]))
return(np.mean(partial_res_list))
#same as above but for the case Y given X (ideally these 2 funcs should be merged into a single func but why they were kept separate is mentioned in slide 4
def get_capy_given_capx(arr_1,arr_3,num_nodes):
cond_entropy_list =[]
for comm_x,comm_y in itertools.product(arr_1,arr_3):
prob_matrix = np.ndarray(shape=(2, 2), dtype=float)
intersect_size = float(len(set(comm_x) & set(comm_y)))
cap_n = num_nodes+4
prob_matrix[1][1] = (intersect_size + 1) / cap_n
prob_matrix[1][0] = (len(comm_x) - intersect_size + 1) / cap_n
prob_matrix[0][1] = (len(comm_y) - intersect_size + 1) / cap_n
prob_matrix[0][0] = (3 - intersect_size + 1) / cap_n
entropy_list = map(entp,(prob_matrix[0][0], prob_matrix[0][1], prob_matrix[1][0], prob_matrix[1][1]))
k=list(entropy_list)
if k[3] + k[0] <= k[1] + k[2]:
cond_entropy_list.append(np.inf)
else:
cond_entropy_list.append(sum(k)-sum_entropy_r[arr_3.index(comm_y)])
cond_entropy_list
partial_res_list = []
for comm_x in arr_1:
min_cond_entropy=float(min(cond_entropy_list))
partial_res_list.append(min_cond_entropy /(sum_entropy_g[arr_1.index(comm_x)]))
return(np.mean(partial_res_list))
data_g=make_df(data_g,num_nodes)
data_g=preprocess_g(data_g)
data_r=make_df(data_r,num_nodes)
data_r=preprocess_r(data_r)
mvv_array_g,sum_entropy_g=list_comm_entps(start,stop,data_g)
mvv_array_r,sum_entropy_r=list_comm_entps(start,stop,data_r)
a=get_capx_given_capy(mvv_array_r,mvv_array_g,num_nodes)
b=get_capy_given_capx(mvv_array_g,mvv_array_r,num_nodes)
overlap_nmi=1-0.5*a-0.5*b
return overlap_nmi
#1 sample implementation
overal_nmi(1,20,1000) #1 sample
spark.stop()
|
UTF-8
|
Python
| false | false | 7,260 |
py
| 6 |
NMI.py
| 2 | 0.579752 | 0.560882 | 0 | 138 | 51.615942 | 183 |
Seite50/seite50
| 12,515,534,748,875 |
8d1a88f3b22b571d2bcd11064997f80ec7d26008
|
60de5f7e6a5cfe2598df6e1ac21a80be7bf14e87
|
/api/tests/__init__.py
|
e40250dd3d195fd4f2751d1f85e4a9d50c46518d
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/Seite50/seite50
|
154ed8861a262a3f09b01ad0a788e0413e729124
|
761a8e76f9e4473f70a8705dce169d61bf660267
|
refs/heads/master
| 2021-06-09T02:08:42.594803 | 2020-02-12T17:24:46 | 2020-02-12T17:24:46 | 129,201,104 | 0 | 1 |
Apache-2.0
| false | 2021-03-19T22:00:29 | 2018-04-12T06:02:39 | 2020-02-12T17:24:49 | 2021-03-19T22:00:29 | 50 | 0 | 1 | 3 |
Python
| false | false |
from .book_model import *
from .book_view import *
from .author_model import *
|
UTF-8
|
Python
| false | false | 79 |
py
| 22 |
__init__.py
| 17 | 0.734177 | 0.734177 | 0 | 3 | 25.333333 | 27 |
Shrosy2511/werken-met-gegevens
| 2,061,584,332,290 |
0b0882bb25e53bc3e2aa239843d5df1e2dcccd21
|
8eb630c1de4e2d5a4c3861f8cbaac5fa27b0241c
|
/feestlunch.py
|
b7ed26c037205e4a51ad0bc40856471edad9e00d
|
[] |
no_license
|
https://github.com/Shrosy2511/werken-met-gegevens
|
4e029d690aafbae172c7297c97e5446a6be8f4ef
|
3e305f815f24636399d63ce6efebf1151ac7d64d
|
refs/heads/main
| 2023-07-22T15:08:15.565656 | 2021-09-11T08:35:17 | 2021-09-11T08:35:17 | 405,006,553 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
crossaint = 0.39
stokbrood = 2.78
kortingsbon = 1.50
hoeveelheid = 17
aantal = 2
factuurtekst = 'de feestlunch kost bij de bakker ' + str((hoeveelheid) * (crossaint) + (aantal) * (stokbrood) - (kortingsbon)) + ' euro voor de 17 croissantjes en de 2 stokbroden als de 3 kortingsbonnen nog geldig zijn!'
print(factuurtekst)
|
UTF-8
|
Python
| false | false | 323 |
py
| 2 |
feestlunch.py
| 2 | 0.727554 | 0.678019 | 0 | 8 | 39.375 | 220 |
ariel-brassesco/CS50-Project3
| 910,533,100,713 |
7ad13a287792ee86ca7cdf413aae4595d9502089
|
499715b1ba009b2081547bbf70a2f9ef02ca1162
|
/orders/migrations/0010_auto_20200505_1804.py
|
08a31d14f6ae66afb7537701c7b7f53260bb113c
|
[] |
no_license
|
https://github.com/ariel-brassesco/CS50-Project3
|
09f84030ad3d93ec4af2871bf00e2b04dd974894
|
cd7b99965f9f391b73bcfbd33d3a41fc36f2fbb2
|
refs/heads/master
| 2023-08-11T12:16:06.021418 | 2020-06-20T19:20:00 | 2020-06-20T19:20:00 | 273,132,885 | 0 | 0 | null | false | 2021-09-22T19:16:02 | 2020-06-18T03:28:28 | 2020-06-20T19:20:24 | 2021-09-22T19:16:02 | 4,201 | 0 | 0 | 2 |
Python
| false | false |
# Generated by Django 3.0.4 on 2020-05-05 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0009_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='max_add',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='topping',
name='price',
field=models.FloatField(default=0.0, verbose_name='Price'),
),
migrations.AlterField(
model_name='productvariation',
name='type_variation',
field=models.CharField(default='base', max_length=20),
),
migrations.DeleteModel(
name='PriceToppingList',
),
]
|
UTF-8
|
Python
| false | false | 818 |
py
| 84 |
0010_auto_20200505_1804.py
| 48 | 0.555012 | 0.525672 | 0 | 31 | 25.387097 | 71 |
ZimmermanGroup/pyGSM
| 17,540,646,467,609 |
7dd288ba4794134bcee089485bbd7593969a52a4
|
5952bdeb83ae935ad3bea1ef526834e0a5707096
|
/pyGSM/level_of_theories/pytc.py
|
5f3cdea06ad7e19c2a2914628e26d96f284178dc
|
[
"MIT"
] |
permissive
|
https://github.com/ZimmermanGroup/pyGSM
|
59780b8b1b40c14a7ae74cf16d20988574332989
|
3b920bb11953f49673d3a2e87ee474b6315a0b4c
|
refs/heads/master
| 2023-07-06T12:55:26.520360 | 2023-06-28T19:49:14 | 2023-06-28T19:49:14 | 152,341,841 | 45 | 30 |
MIT
| false | 2023-06-28T19:30:16 | 2018-10-10T01:01:17 | 2023-04-14T13:56:57 | 2023-06-28T19:17:59 | 3,816 | 39 | 22 | 19 |
Python
| false | false |
# standard library imports
from .casci_lot_svd import CASCI_LOT_SVD
from .rhf_lot import RHF_LOT
from utilities import *
from .base_lot import Lot
import sys
from os import path
# third party
import numpy as np
import lightspeed as ls
#import psiw
import est
import json
# local application imports
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
#TODO get rid of get_energy, get_gradient
class PyTC(Lot):
"""
Level of theory is a wrapper object to do DFT and CASCI calculations
Inherits from Lot. Requires a PSIW object
"""
def __init__(self, options):
super(PyTC, self).__init__(options)
if self.lot_inp_file is not None and self.lot is None:
self.build_lot_from_dictionary()
#print(self.lot)
#print(' done executing lot_inp_file')
#exec(open(self.lot_inp_file).read())
#print(lot)
#self.options['job_data']['lot'] = lot
def build_lot_from_dictionary(self):
d = {}
d = json.load(open(self.lot_inp_file))
print(d)
filepath = d.get('filepath', None)
# QM
basis = d.get('basis', None)
charge = d.get('charge', 0)
S_inds = d.get('S_inds', [0])
S_nstates = d.get('S_nstates', [1])
# SCF
diis_max_vecs = d.get('diis_max_vecs', 6)
maxiter = d.get('maxiter', 200)
cphf_diis_max_vecs = d.get('cphf_diis_max_vecs', 6)
diis_use_disk = d.get('diis_use_disk', False)
rhf_guess = d.get('rhf_guess', True)
rhf_mom = d.get('rhf_mom', True)
# active space
doCASCI = d.get('doCASCI', False)
nactive = d.get('nactive', 0)
nocc = d.get('nocc', 0)
nalpha = d.get('nalpha', int(nactive/2))
nbeta = d.get('nbeta', nalpha)
# FOMO
doFOMO = d.get('doFOMO', False)
fomo = d.get('fomo', True)
fomo_temp = d.get('fomo_temp', 0.3)
fomo_nocc = d.get('fomo_nocc', nocc)
fomo_nact = d.get('fomo_nact', nactive)
fomo_method = d.get('fomo_method', 'gaussian')
# QMMM
doQMMM = d.get('doQMMM', False)
prmtopfile = d.get('prmtopfile', None)
inpcrdfile = d.get('inpcrdfile', None)
qmindsfile = d.get('qmindsfile', None)
# DFT
doDFT = d.get('doDFT', False)
dft_functional = d.get('dft_functional', 'None')
dft_grid_name = d.get('dft_grid_name', 'SG0')
nifty.printcool("Building Resources")
resources = ls.ResourceList.build()
nifty.printcool("{}".format(resources))
if not doQMMM:
nifty.printcool("Building Molecule and Geom")
molecule = ls.Molecule.from_xyz_file(filepath)
geom = est.Geometry.build(
resources=resources,
molecule=molecule,
basisname=basis,
)
else:
nifty.printcool("Building QMMM Molecule and Geom")
qmmm = est.QMMM.from_prmtop(
prmtopfile=prmtopfile,
inpcrdfile=inpcrdfile,
qmindsfile=qmindsfile,
charge=charge,
)
geom = est.Geometry.build(
resources=resources,
qmmm=qmmm,
basisname=basis,
)
nifty.printcool("{}".format(geom))
if doFOMO:
nifty.printcool("Building FOMO RHF")
ref = est.RHF.from_options(
geometry=geom,
diis_max_vecs=diis_max_vecs,
maxiter=maxiter,
cphf_diis_max_vecs=cphf_diis_max_vecs,
diis_use_disk=diis_use_disk,
fomo=fomo,
fomo_method=fomo_method,
fomo_temp=fomo_temp,
fomo_nocc=nocc,
fomo_nact=nactive,
)
ref.compute_energy()
elif doDFT:
nifty.printcool("Building DFT LOT")
ref = est.RHF.from_options(
geometry=geom,
diis_max_vecs=diis_max_vecs,
maxiter=maxiter,
cphf_diis_max_vecs=cphf_diis_max_vecs,
diis_use_disk=diis_use_disk,
dft_functional=dft_functional,
dft_grid_name=dft_grid_name
)
self.lot = RHF_LOT.from_options(rhf=ref)
else:
raise NotImplementedError
if doCASCI:
nifty.printcool("Building CASCI LOT")
casci = est.CASCI.from_options(
reference=ref,
nocc=nocc,
nact=nactive,
nalpha=nalpha,
nbeta=nbeta,
S_inds=S_inds,
S_nstates=S_nstates,
print_level=1,
)
casci.compute_energy()
self.lot = est.CASCI_LOT.from_options(
casci=casci,
print_level=1,
rhf_guess=rhf_guess,
rhf_mom=rhf_mom,
)
@property
def lot(self):
return self.options['job_data']['lot']
@lot.setter
def lot(self, value):
self.options['job_data']['lot'] = value
def get_energy(self, coords, multiplicity, state):
if self.hasRanForCurrentCoords == False or (coords != self.currentCoords).all():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom, self.currentCoords)
self.run(geom)
tmp = self.search_tuple(self.E, multiplicity)
return tmp[state][1]*units.KCAL_MOL_PER_AU
def get_mm_energy(self, coords):
if self.hasRanForCurrentCoords == False or (coords != self.currentCoords).all():
self.currentCoords = coords.copy()
self.lot.update_qmmm(coords*units.ANGSTROM_TO_AU)
if self.lot.__class__.__name__ == "CASCI_LOT" or self.lot.__class__.__name__ == "CASCI_LOT_SVD":
return self.lot.casci.ref.geometry.qmmm.mm_energy
else:
return self.lot.rhf.geometry.qmmm.mm_energy
def get_mm_gradient(self, coords):
#TODO need diff variable for hasRan MM energy
if self.hasRanForCurrentCoords == False or (coords != self.currentCoords).all():
self.currentCoords = coords.copy()
self.lot.update_qmmm(coords*units.ANGSTROM_TO_AU)
if self.lot.__class__.__name__ == "CASCI_LOT" or self.lot.__class__.__name__ == "CASCI_LOT_SVD":
return self.lot.casci.ref.geometry.qmmm.mm_gradient
else:
return self.lot.rhf.geometry.qmmm.mm_gradient
def run_code(self, T):
self.lot = self.lot.update_xyz(T)
print(" after update xyz")
#print(self.lot)
print(self.lot.options)
for state in self.states:
multiplicity = state[0]
ad_idx = state[1]
S = multiplicity-1
if self.lot.__class__.__name__ == "CASCI_LOT" or self.lot.__class__.__name__ == "CASCI_LOT_SVD":
self.E.append((multiplicity, self.lot.compute_energy(S=S, index=ad_idx)))
tmp = self.lot.compute_gradient(S=S, index=ad_idx)
elif self.lot.__class__.__name__ == "RHF_LOT":
self.E.append((multiplicity, self.lot.compute_energy()))
tmp = self.lot.compute_gradient()
self.grada.append((multiplicity, tmp[...]))
if self.do_coupling == True:
state1 = self.states[0][1]
state2 = self.states[1][1]
tmp = self.lot.compute_coupling(S=S, indexA=state1, indexB=state2)
self.coup = tmp[...]
def run(self, geom, verbose=False):
self.E = []
self.grada = []
#normal update
coords = manage_xyz.xyz_to_np(geom)
T = ls.Tensor.array(coords*units.ANGSTROM_TO_AU)
print(" In run")
print("Lot {} casci {} ref {}".format(id(self.lot), id(self.lot.casci), id(self.lot.casci.reference)))
if not verbose:
with open('lot_jobs.txt', 'a') as out:
with nifty.custom_redirection(out):
self.run_code(T)
filename = "{}.molden".format(self.node_id)
self.lot.casci.reference.save_molden_file(filename)
else:
self.run_code(T)
#filename="{}_rhf_update.molden".format(self.node_id)
#self.lot.casci.reference.save_molden_file(filename)
print(" after run")
print("Lot {} casci {} ref {}".format(id(self.lot), id(self.lot.casci), id(self.lot.casci.reference)))
self.hasRanForCurrentCoords = True
return
def get_gradient(self, coords, multiplicity, state):
if self.hasRanForCurrentCoords == False or (coords != self.currentCoords).all():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom, self.currentCoords)
self.run(geom)
tmp = self.search_tuple(self.grada, multiplicity)
return np.asarray(tmp[state][1])*units.ANGSTROM_TO_AU
def get_coupling(self, coords, multiplicity, state1, state2):
if self.hasRanForCurrentCoords == False or (coords != self.currentCoords).all():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom, self.currentCoords)
self.run(geom)
return np.reshape(self.coup, (3*len(self.coup), 1))*units.ANGSTROM_TO_AU
if __name__ == "__main__":
import psiw
from utilities import nifty
##### => Job Data <= #####
states = [(1, 0), (1, 1)]
charge = 0
nocc = 7
nactive = 2
basis = '6-31gs'
filepath = '../../data/ethylene.xyz'
#### => PSIW Obj <= ######
nifty.printcool("Build resources")
resources = ls.ResourceList.build()
nifty.printcool('{}'.format(resources))
molecule = ls.Molecule.from_xyz_file(filepath)
geom = psiw.geometry.Geometry.build(
resources=resources,
molecule=molecule,
basisname=basis,
)
nifty.printcool('{}'.format(geom))
ref = psiw.RHF.from_options(
geometry=geom,
g_convergence=1.0E-6,
fomo=True,
fomo_method='gaussian',
fomo_temp=0.3,
fomo_nocc=nocc,
fomo_nact=nactive,
print_level=1,
)
ref.compute_energy()
casci = psiw.CASCI.from_options(
reference=ref,
nocc=nocc,
nact=nactive,
nalpha=nactive/2,
nbeta=nactive/2,
S_inds=[0],
S_nstates=[2],
print_level=1,
)
casci.compute_energy()
psiw = psiw.CASCI_LOT.from_options(
casci=casci,
rhf_guess=True,
rhf_mom=True,
orbital_coincidence='core',
state_coincidence='full',
)
nifty.printcool("Build the pyGSM Level of Theory object (LOT)")
lot = PyTC.from_options(states=[(1, 0), (1, 1)], job_data={'psiw': psiw}, do_coupling=False, fnm=filepath)
geoms = manage_xyz.read_xyz(filepath, scale=1.)
coords = manage_xyz.xyz_to_np(geoms)
e = lot.get_energy(coords, 1, 0)
print(e)
g = lot.get_gradient(coords, 1, 0)
print(g)
|
UTF-8
|
Python
| false | false | 11,144 |
py
| 73 |
pytc.py
| 62 | 0.551059 | 0.545406 | 0 | 325 | 33.289231 | 110 |
x5698lk/pythoon-machine-learning
| 11,261,404,274,080 |
2ea5ca7908b9a651f2df890a11bc10af4776b7f2
|
c08f2115cfe53f7103a6544de0415c7efe0d1fc4
|
/test.py
|
738f6eeac680c02e0fd3b9dba36903fe7f1877d3
|
[] |
no_license
|
https://github.com/x5698lk/pythoon-machine-learning
|
ecaced76271d1579b6f4023de6acce4030c448bd
|
3a9fab34b7e1df198a09a28974d9af6e3395648e
|
refs/heads/master
| 2022-11-11T03:01:30.918880 | 2020-06-29T10:18:32 | 2020-06-29T10:18:32 | 275,784,759 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
batch_size = 128
epochs = 10
IMG_HEIGHT = 150
IMG_WIDTH = 150
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 10, activation = 'softmax'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255, # 圖片做標準化
shear_range = 0.2, # 做圖片偏移
zoom_range = 0.2, # 放大縮小
horizontal_flip = True) # 水平翻轉
test_datagen = ImageDataGenerator(rescale = 1./255) #圖片做標準化
training_set = train_datagen.flow_from_directory('F:/python-project/dataset1/train',
target_size = (64, 64),
batch_size = 128,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('F:/python-project/dataset1/test',
target_size = (64, 64),
batch_size = 128,
class_mode = 'categorical')
#fit the model
history = classifier.fit_generator(training_set,
steps_per_epoch = 10,
epochs = epochs,
validation_data = test_set,
validation_steps = 10)
#save weights
classifier.save("406410208.h5")
|
UTF-8
|
Python
| false | false | 2,789 |
py
| 2 |
test.py
| 1 | 0.567725 | 0.53012 | 0 | 80 | 33.25 | 97 |
nagareproject/examples
| 4,999,341,968,398 |
69369c78381f9f35c9740c81ed5cb72ab105e665
|
591f05ab526016c0d35c638b36a648668a296236
|
/nagare/examples/portal/calculator.py
|
eeade11e16f1c1d284dbfcb23fb2abd87bd93dfc
|
[] |
no_license
|
https://github.com/nagareproject/examples
|
10b35b55b543481834aa3899a4b8fd89a7ae3b19
|
ef0476578e9618af37e06965d010a83cf933396a
|
refs/heads/master
| 2020-06-05T05:04:51.138314 | 2017-10-24T10:52:54 | 2017-10-24T10:52:54 | 4,214,531 | 5 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# --
# Copyright (c) 2008-2017 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
import operator
from nagare import presentation
class Calculator(object):
def __init__(self):
self.display = ''
self.stack = []
def digit(self, digit):
self.display += str(digit)
def enter(self):
if self.display:
self.stack.append(int(self.display))
self.display = ''
def operand(self, op):
self.enter()
if len(self.stack) >= 2:
v = self.stack.pop()
self.stack[-1] = op(self.stack[-1], v)
def drop(self):
if self.display:
self.display = ''
return
if not self.stack:
return
self.stack.pop()
def get_last(self):
if self.display:
return self.display
if self.stack:
return str(self.stack[-1])
return '0'
@presentation.render_for(Calculator)
def render(self, h, *args):
h.head << h.head.css_url('calculator.css')
with h.div(class_='calculator'):
h << h.div(self.get_last(), class_='calculator_display')
with h.table:
with h.tr:
h << h.td(colspan=3)
h << h.td(h.a('C').action(self.drop))
h << h.td(h.a(u'\N{DIVISION SIGN}').action(self.operand, operator.div))
with h.tr:
h << h.td(h.a(7).action(self.digit, 7))
h << h.td(h.a(8).action(self.digit, 8))
h << h.td(h.a(9).action(self.digit, 9))
h << h.td
h << h.td(h.a(u'\N{MULTIPLICATION X}').action(self.operand, operator.mul))
with h.tr:
h << h.td(h.a(4).action(self.digit, 4))
h << h.td(h.a(5).action(self.digit, 5))
h << h.td(h.a(6).action(self.digit, 6))
h << h.td
h << h.td(h.a(u'\N{MINUS SIGN}').action(self.operand, operator.sub))
with h.tr:
h << h.td(h.a(1).action(self.digit, 1))
h << h.td(h.a(2).action(self.digit, 2))
h << h.td(h.a(3).action(self.digit, 3))
h << h.td
h << h.td(h.a(u'\N{PLUS SIGN}').action(self.operand, operator.add))
with h.tr:
h << h.td
h << h.td(h.a(0).action(self.digit, 0))
h << h.td(colspan=2)
h << h.td(h.a(u'\N{WHITE RIGHT-POINTING POINTER}').action(self.enter))
return h.root
# -----------------------------------------------------------------------------
hl_lines = (
range(12, 95),
(
(1,),
'Definition of a Plain Old Calculator Python Object',
range(1, 39)
),
(
(42,),
'<p>Default view for a <code>Calculator</code></p>'
'<p>Parameters are:'
'<ol>'
'<li><code>self</code>: the <code>Calculator</code> object</li>'
'<li><code>h</code>: a HTML renderer</li>'
'</ol>',
range(42, 82)
),
(
(51,),
'Direct association of the <code>drop</code> method to a link',
(51,)
),
(
(52, 59, 66, 73),
'Association of the <code>operand</code> method to links, with a parameter',
(52, 59, 66, 73)
),
(
(55, 56, 57, 62, 63, 64, 69, 70, 71, 77),
'Association of the <code>digit</code> method to links, with a parameter',
(55, 56, 57, 62, 63, 64, 69, 70, 71, 77)
),
(
(79,),
'Direct association of the <code>enter</code> method to link',
(79,)
)
)
|
UTF-8
|
Python
| false | false | 3,766 |
py
| 56 |
calculator.py
| 36 | 0.47265 | 0.442645 | 0 | 141 | 25.70922 | 90 |
DevanshArora-2002/CO_Proj1
| 8,744,553,428,964 |
8120f79cabb5c68dde863c646df3e8f6b2c8fa8c
|
bcf58cea44c67eb7b5b15f857a58b09fa5d270ce
|
/SimpleSimulator/shift_func.py
|
8b051dc13ab304b0c8873c760024dc917da936b2
|
[] |
no_license
|
https://github.com/DevanshArora-2002/CO_Proj1
|
41fc0e15247c8cf1629967b00e4e50117f54aaec
|
5695eff4ca1c718fe21abebbaabfea3d1a4da42d
|
refs/heads/master
| 2023-07-17T15:19:16.229160 | 2021-08-26T14:59:56 | 2021-08-26T14:59:56 | 395,772,718 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from binary_to_decimal import binary_to_decimal
from decimal_to_binary import decimal_to_binary
def ls_func(register_file,reg_code1,imm):
dict = {'000': 0, '001': 1, '010': 2, '011': 3, '100': 4, '101': 5, '110': 6, '111': 7}
str_val1=register_file[dict[reg_code1]]
val1=binary_to_decimal(str_val1)
imm1=binary_to_decimal(imm)
val=val1>>imm1
val=decimal_to_binary(val)
register_file[dict[reg_code1]]='0'*8+str(val)
return register_file
def rs_func(register_file,reg_code1,imm):
dict = {'000': 0, '001': 1, '010': 2, '011': 3, '100': 4, '101': 5, '110': 6, '111': 7}
str_val1=register_file[dict[reg_code1]]
val1=binary_to_decimal(str_val1)
imm1=binary_to_decimal(imm)
val=val1<<imm1
val=decimal_to_binary(val)
register_file[dict[reg_code1]]='0'*8+str(val)
return register_file
|
UTF-8
|
Python
| false | false | 840 |
py
| 20 |
shift_func.py
| 20 | 0.640476 | 0.538095 | 0 | 20 | 41 | 91 |
geek8565/build-kg-from-scratch
| 8,126,078,173,381 |
5ed6a7747ae161402783f72a66137d625d679ee1
|
596a6675370c8b14d23661b4d86782a273eff3a3
|
/ch6/6.3/kgqa/util/tools.py
|
d5390f38f9f098087f254d1a5f9e9a698fc67f6b
|
[] |
no_license
|
https://github.com/geek8565/build-kg-from-scratch
|
b7478df4a0d61713eb7d70d85cc05452192bb206
|
776e59b8b14ca104872957c78187c779f0abb012
|
refs/heads/main
| 2023-07-26T06:33:47.507119 | 2021-09-12T13:04:27 | 2021-09-12T13:04:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding:utf8 -*-
__author__ = 'winnie'
import re
import json
from collections import OrderedDict
class Utils():
punc_ch = "[!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆《》「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…﹏]"
punc_en = "[!\"#$%&\'()*+,;<=>?@[\\]^_`{|}~]"
punc_ch_pattern = re.compile(punc_ch)
punc_en_pattern = re.compile(punc_en)
def __init__(self):
pass
def read_file_into_list(self, filename):
line_list = []
with open(filename, encoding='utf8') as infile:
for line in infile:
line = line.strip()
if line:
line_list.append(line)
return line_list
def write_list_into_file(self, object, filename):
with open(filename, 'w',encoding='utf8') as outfile:
for line in object:
outfile.write(line+'\n')
def read_es(self,filename):
ret_dict = OrderedDict()
with open(filename) as infile:
for line in infile:
lineL = line.split('\t')
if lineL[1] in ret_dict:
ret_dict[lineL[1]].append(lineL[2:])
else:
ret_dict[lineL[1]] = [lineL[2:]]
return ret_dict
def write_dict_into_file_according_key(self, subject_dict, out_path):
# 写入文件
for key in subject_dict:
with open(out_path+key+'.txt','wb', encoding='utf8') as outfile:
for line in subject_dict[key]:
#print type(line),line
# if isinstance(line,unicode):
# line = line.encode('utf8')
outfile.write(line+'\n')
# dict的value是一个list
def update_dict(self, origin_dict,new_dict):
for key in new_dict:
if key not in origin_dict:
origin_dict[key] = new_dict[key]
else:
origin_dict[key].extend(new_dict[key])
return origin_dict
def remove_punctuation(self, sent):
sent = self.punc_ch_pattern.sub('', sent)
sent = self.punc_en_pattern.sub('', sent)
# sent = re.sub(self.punc_ch_pattern, '', sent)
# sent = re.sub(self.punc_en_pattern, '', sent)
# sent = re.sub('[ ]{2,}', ' ', sent) # 去掉多余空格
sent = ' '.join(sent.split())
return sent
def make_kg_mapping(self):
json_file = 'hupo_kgqa/data/kg_mapping/amber-kg.json'
with open(json_file, encoding='utf8') as infile:
kg_dict = json.load(infile)
for key in kg_dict:
for ele in kg_dict[key]:
print(ele)
print()
# def check_unicode(self, text):
# if isinstance(text,str):
# text = unicode(text,'utf8')
# elif isinstance(text, int) or isinstance(text, float):
# text = str(text)
#
# return text
# def read_json(self,filename):
# with open(filename,) as infile:
# return json.load(infile)
def json_dump(self, object, json_file):
with open(json_file, 'w', encoding='utf8') as f:
json.dump(object, f, indent=4, ensure_ascii=False)
def read_json(self, json_file):
with open(json_file, 'r', encoding='utf8') as f:
structure = json.load(f)
return structure
util = Utils()
#util.make_kg_mapping()
# 展示字典,二维
# raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
# 'last_name': ['Miller', 'Jacobson', ".", 'Milner', 'Cooze'],
# 'age': [42, 52, 36, 24, 73],
# 'preTestScore': [4, 24, 31, ".", "."],
# 'postTestScore': ["25,000", "94,000", 57, 62, 70]}
# df = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'preTestScore', 'postTestScore'])
# print df
# 展示效果
# first_name last_name age preTestScore postTestScore
# 0 Jason Miller 42 4 25,000
# 1 Molly Jacobson 52 24 94,000
# 2 Tina . 36 31 57
# 3 Jake Milner 24 . 62
# 4 Amy Cooze 73 . 70
# d1 = {'a':1,'b':2}
# d2 = {'a':2,'b':3}
# d3 = {'a':3,'b':1}
# l = [d1,d2,d3]
# l.sort(key=lambda e: e['a'],reverse=True)
# print l
|
UTF-8
|
Python
| false | false | 4,487 |
py
| 126 |
tools.py
| 86 | 0.498251 | 0.476091 | 0 | 131 | 31.717557 | 108 |
krallistic/presentations
| 19,370,302,520,040 |
77f1f5e10ea66c71e4dbb652c0095eab570ca1c1
|
e9f39211022e6f2705945ff9750a6aac4ee2730b
|
/data2day-17/examples/basic-mnist/mnist.py
|
ce0cdb562c599fc1b570109a45c8b6e21a6bcb73
|
[] |
no_license
|
https://github.com/krallistic/presentations
|
4ff66f93173c012c29f32697ebb46ba862606321
|
9adfb84949fe6b25dec2bf3d1d87d311083439fa
|
refs/heads/master
| 2021-01-22T20:13:11.746865 | 2017-11-30T15:30:54 | 2017-11-30T15:30:54 | 85,296,886 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tensorflow as tf
import argparse
import sys
import time
from tensorflow.examples.tutorials.mnist import input_data
# reset everything to rerun in jupyter
# config
batch_size = 100
learning_rate = 0.01
training_epochs = 10
def main(_):
# load mnist data set
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# input images
# None -> batch size can be any size, 784 -> flattened mnist image
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
# target 10 output classes
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
# model parameters will change during training so we use tf.Variable
W = tf.Variable(tf.zeros([784, 10]))
# bias
b = tf.Variable(tf.zeros([10]))
# implement model
# y is our prediction
y = tf.nn.softmax(tf.matmul(x,W) + b)
# specify cost function
# this is our cost
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Accuracy
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# specify optimizer
# optimizer is an "operation" which we can execute in a session
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
start_training_time = time.time()
with tf.Session() as sess:
# variables need to be initialized before we can use them
sess.run(tf.initialize_all_variables())
# perform training cycles
for epoch in range(training_epochs):
# number of batches in one epoch
batch_count = int(mnist.train.num_examples/batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# perform the operations we defined earlier on batch
sess.run([train_op], feed_dict={x: batch_x, y_: batch_y})
if epoch % 2 == 0:
print "Epoch: ", epoch
print("Test-Accuracy: %2.2f" % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print("Training Time: %3.2fs" % float(time.time() - start_training_time))
print("Done Training example")
if __name__ == '__main__':
tf.reset_default_graph()
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--epoch",
type=int,
default=10,
help="Amount of epochs"
)
# Flags for defining the tf.train.ClusterSpec
parser.add_argument(
"--ps_hosts",
type=str,
default="",
help="Comma-separated list of hostname:port pairs"
)
parser.add_argument(
"--worker_hosts",
type=str,
default="",
help="Comma-separated list of hostname:port pairs"
)
parser.add_argument(
"--job_name",
type=str,
default="",
help="One of 'ps', 'worker'"
)
# Flags for defining the tf.train.Server
parser.add_argument(
"--task_index",
type=int,
default=0,
help="Index of task within the job"
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
UTF-8
|
Python
| false | false | 3,415 |
py
| 31 |
mnist.py
| 12 | 0.595608 | 0.582723 | 0 | 115 | 28.66087 | 115 |
manthan99/swarm_search
| 10,960,756,576,226 |
d86798ed3b1d305c6247cb24ea01b2b8e160fba8
|
d8cbf9bad5a92b31e3b1e3e661c0ece290ec0b88
|
/local_navigate.py
|
fa99e471ae3bbee3bd02c08957fe8d27276cbf63
|
[
"MIT"
] |
permissive
|
https://github.com/manthan99/swarm_search
|
a2078a38dd840721f6e69c6c91443a5c9d9fdc59
|
3857edde0238c2f5d83a33c8969e6e3e3b9a3dcf
|
refs/heads/master
| 2022-04-02T08:57:10.565490 | 2020-01-05T16:13:46 | 2020-01-05T16:13:46 | 272,955,589 | 1 | 0 |
MIT
| true | 2020-06-17T11:24:15 | 2020-06-17T11:24:15 | 2020-02-18T02:44:03 | 2020-01-05T16:13:47 | 6,138 | 0 | 0 | 0 | null | false | false |
#!/usr/bin/env python
import rospy
import mavros
from math import *
import thread
import threading
import time
from mavros.utils import *
from mavros import setpoint
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import *
class SetpointPosition:
def __init__(self):
self.x = 0.0
self.y = 0.0
self.z = 0.0
# publisher for mavros/setpoint_position/local
self.pub = setpoint.get_pub_position_local(queue_size=10) # warum 10?
# subsrciber for mavros/local_position/local
self.sub = rospy.Subscriber(mavros.get_topic('local_position', 'pose'), setpoint.PoseStamped, self.reached)
try:
thread.start_new_thread(self.navigate, ())
except:
fault("Error: Unable to start thread")
self.done = False
self.done_evt = threading.Event()
def navigate(self):
rate = rospy.Rate(10)
msg = setpoint.PoseStamped(
header=setpoint.Header(
frame_id="map", # isn't used anyway
stamp=rospy.Time.now()),
)
while not rospy.is_shutdown():
#msg.pose.position.x = self.y
#msg.pose.position.y = self.x
#msg.pose.position.z = - self.z
msg.pose.position.x = self.x
msg.pose.position.y = self.y
msg.pose.position.z = self.z
self.pub.publish(msg)
rate.sleep()
def set(self, x, y, z, delay=0, wait=True):
self.done = False
self.x = x
self.y = y
self.z = z
if wait:
rate = rospy.Rate(5)
while not self.done and not rospy.is_shutdown():
rate.sleep()
time.sleep(delay)
def reached(self, topic):
def is_near(msg, x, y):
rospy.logdebug("Position %s: local: %d, target: %d, abs diff: %d",
msg, x, y, abs(x - y))
print x, y
print abs(x - y)
return abs(x - y) < 0.5
# print topic.pose.position.x
if is_near('X', topic.pose.position.x, self.x) and is_near('Y', topic.pose.position.y, self.y) and is_near('Z', topic.pose.position.z, self.z):
self.done = True
self.done_evt.set()
rospy.loginfo("setpoint reached!")
def setpoint_demo():
rospy.init_node('setpoint_position')
mavros.set_namespace() # initialize mavros module with default namespace, warum ist das noetig?
rate = rospy.Rate(10)
setpoint = SetpointPosition()
rospy.loginfo("move forward")
setpoint.set(2, 1.0, 2, 2)
setpoint.set(2, 3.5, 2, 2)
setpoint.set(2, 5.5, 2, 2) # desired position in ENU
#setpoint.set(2, 2.5, 2, 2)
rospy.loginfo("Bye!")
if __name__ == '__main__':
try:
setpoint_demo()
except rospy.ROSInterruptException:
pass
|
UTF-8
|
Python
| false | false | 2,892 |
py
| 54 |
local_navigate.py
| 33 | 0.565698 | 0.552559 | 0 | 103 | 27.07767 | 151 |
katetushkan/PIRIS
| 7,258,494,782,940 |
25954c431a6e3c864d40491c547451eeddc49256
|
3bbc5340893e6666a89f400ca92b6cf36f773a3e
|
/atm/utils.py
|
8288c7e5db8c5b22464a5547774bef5a95999b93
|
[] |
no_license
|
https://github.com/katetushkan/PIRIS
|
dc68a9331fff3d82c0ddc709e4798df90750ac3c
|
f908f4cc3bfb88476d3a1017b9449733ecdb0d81
|
refs/heads/master
| 2023-03-21T11:48:04.161023 | 2021-02-28T16:08:55 | 2021-02-28T16:08:55 | 343,147,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from fpdf import FPDF
def create_check(date, price, balance, card_number, type):
pdf = FPDF(orientation='P', unit='mm', format='A4')
pdf.add_page()
pdf.set_xy(10.0, 80.0)
pdf.set_font('Arial', 'B', 14)
pdf.set_text_color(0, 0, 0)
if type == "balance":
text = f"""
Check for operation:
...............................
date of operation: {str(date)} \n\r
balance: {balance} \n\r
card number: {card_number} \n\r
operation type: {type} \n\r
"""
else:
text = f"""
Check for operation:
...............................
date of operation: {str(date)} \n\r
price: {price} \n\r
balance: {balance} \n\r
card number: {card_number} \n\r
operation type: {type} \n\r
"""
pdf.multi_cell(0, 10, txt=text, border=0)
pdf.output(f'check_{type}.pdf', 'F')
pass
|
UTF-8
|
Python
| false | false | 1,006 |
py
| 35 |
utils.py
| 18 | 0.44334 | 0.427435 | 0 | 36 | 26.972222 | 58 |
randomUser8096/python
| 8,237,747,297,048 |
86d6870ccbb485aaa647157bd5b1d5b2ed639a77
|
90e37fac102091d39851233cd4f6d58e68f3bed1
|
/dphowSum.py
|
560dffdb9673ed77719f5c65a17c8a257d3f1f44
|
[] |
no_license
|
https://github.com/randomUser8096/python
|
de6d683aa386ea03019eb63381ecf3e126836f20
|
29b4c7c8e5c991633e619b18bfb7bb8d66af7c43
|
refs/heads/master
| 2023-04-25T05:47:37.158960 | 2021-08-26T10:52:51 | 2021-08-26T10:52:51 | 361,334,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def howSum(sum, arr):
if sum <0:
return None
if sum ==0:
return []
for num in arr:
k = sum-num
j = howSum(k,arr)
if (j != None):
j.append(num)
return j
#time -> n^m *m
#space -> m
def howSum2(sum, arr, memo = {}):
if sum in memo:
return memo[sum]
if sum <0:
return None
if sum ==0:
return []
for num in arr:
k = sum-num
j = howSum2(k,arr,memo)
if (j != None):
j.append(num)
return j
else:
memo[k] = None
#time -> n *m *m
#space -> m*m
if __name__ == '__main__':
print (howSum(3000, [7,2]))
|
UTF-8
|
Python
| false | false | 701 |
py
| 32 |
dphowSum.py
| 31 | 0.417974 | 0.400856 | 0 | 36 | 18.5 | 33 |
seandst/linch-pin
| 14,439,680,061,849 |
7cbb01dcb261ffbe9f8b8667189721a6fa1a2e58
|
925206a15e67e718013dd95fe16a62616764c34a
|
/linchpin/api/__init__.py
|
6f59ff5677a33a58c2e20bed9af2f453f55c1932
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
https://github.com/seandst/linch-pin
|
8bbccf69209e2d6eca0b10d6a21ff17bc355e4b0
|
427b6fb61f550a4d1120ac94c55d121fbecd70a6
|
refs/heads/develop
| 2021-01-19T20:38:12.300527 | 2017-04-08T22:13:10 | 2017-04-08T22:13:10 | 88,527,489 | 0 | 0 | null | true | 2017-04-17T16:29:08 | 2017-04-17T16:29:08 | 2017-04-04T20:33:01 | 2017-04-08T22:13:11 | 10,315 | 0 | 0 | 0 | null | null | null |
import os
import sys
import inspect
import ansible
import pprint
import requests
import jsonschema as jsch
from tabulate import tabulate
from ansible import utils
from collections import namedtuple
from ansible import utils
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins.callback import CallbackBase
from callbacks import PlaybookCallback
from invoke_playbooks import invoke_linchpin
from linchpin.cli.utils import search_path
from utils import get_file, list_files, parse_yaml
from github import GitHub
class LinchpinAPI:
UPSTREAM_EXAMPLES_PATH = "linchpin/examples"
def __init__(self, context):
base_path = os.path.dirname(__file__).split("/")[0:-2]
self.base_path = "/{}/linchpin/".format('/'.join(base_path))
self.excludes = set(["topology_upstream",
"layout_upstream",
"post_actions"])
self.context = context
def get_config_path(self):
try:
cwd = os.getcwd()
config_files = [
cwd+"/linchpin_config.yml",
cwd+"/linch-pin/linchpin_config.yml", #for jenkins
"~/.linchpin_config.yml",
self.base_path+"/linchpin_config.yml",
"/etc/linchpin_config.yml"]
for p in sys.path:
config_files.extend(['{}/linchpin/linchpin_config.yml'.format(p)])
for c_file in config_files:
# print(c_file)
if os.path.isfile(c_file):
# print("debug:: File found returning ::")
# print(c_file)
return c_file
except Exception as e:
print(e)
def get_config(self):
config_path = self.get_config_path()
config = parse_yaml(config_path)
return config
def get_evars(self, pf):
""" creates a group of extra vars on basis on linchpin file dict """
e_vars = []
for group in pf:
if not (group in ["post_actions",
"topology_upstream",
"layout_upstream"]):
topology = pf[group].get("topology")
layout = pf[group].get("layout")
e_var_grp = {}
e_var_grp["topology"] = search_path(topology, os.getcwd())
e_var_grp["layout"] = search_path(layout, os.getcwd())
if None in e_var_grp.values():
raise Exception("Topology or Layout mentioned \
in pf file not found . \
Please check your pf file.")
e_vars.append(e_var_grp)
return e_vars
def lp_topo_list(self, upstream=None):
"""
search_order : list topologies from upstream if mentioned
list topologies from current folder
"""
if upstream is None:
t_files = list_files(self.base_path + "/examples/topology/")
return t_files
else:
print("getting from upstream")
g = GitHub(upstream)
t_files = []
repo_path = LinchpinAPI.UPSTREAM_EXAMPLES_PATH + "/topology"
files = g.list_files(repo_path)
return files
def find_topology(self, topology, topolgy_registry):
print("searching for topology in configured workspace: "+self.context.workspace)
try:
topos = os.listdir(self.context.workspace+"/topologies")
if topology in topos:
return os.path.abspath(self.context.workspace+"/topologies/"+topology)
except OSError as e:
click.echo(str(e))
click.echo("topologies directory not found in workspace.")
except Exception as e:
click.echo(str(e))
click.echo("Searching for topology in linchpin package.")
topos = self.lp_topo_list()
topos = [t["name"] for t in topos]
if topology in topos:
click.echo("Topology file found in linchpin package.")
click.echo("Copying it to workspace")
self.lp_topo_get(topology)
return os.path.abspath(self.context.workspace+"/topologies/"+topology)
click.echo("Topology file not found")
click.echo("Searching for topology from upstream")
# currently supports only one topology registry per PinFile
if topology_registry:
try:
topos = self.lp_topo_list(topology_registry)
topos = [x["name"] for x in topos]
if topology in topos:
click.echo("Found topology in registry")
click.echo("Fetching topology from registry")
self.lp_topo_get(topology, topology_registry)
return os.path.abspath(self.context.workspace+"/topologies/"+topology)
except Exception as e:
click.echo("Exception occurred "+str(e))
raise IOError("Topology file not found. Invalid topology reference in PinFile")
def find_layout(self, layout, layout_registry=None):
print("searching for layout in configured workspace: "+self.context.workspace)
try:
layouts = os.listdir(self.context.workspace+"/layouts")
if layout in layouts:
return os.path.abspath(self.context.workspace+"/layouts/"+layout)
except OSError as e:
click.echo(str(e))
click.echo("layouts directory not found in workspace.")
except Exception as e:
click.echo(str(e))
click.echo("Searching for layout in linchpin package.")
layouts = self.lp_layout_list()
layouts = [t["name"] for t in layouts]
if layout in layouts:
click.echo("layout file found in linchpin package.")
click.echo("Copying it to workspace")
self.lp_layout_get(layout)
return os.path.abspath(self.context.workspace+"/layouts/"+layout)
click.echo("layout file not found")
click.echo("Searching for layout from upstream")
# currently supports only one layout registry per PinFile
if layout_registry:
try:
layouts = self.lp_layout_list(layout_registry)
layouts = [x["name"] for x in layouts]
if layout in layouts:
click.echo("Found layout in registry")
click.echo("Fetching layout from registry")
self.lp_layout_get(layout, layout_registry)
return os.path.abspath(self.context.workspace+"/layouts/"+layout)
except Exception as e:
click.echo("Exception occurred "+str(e))
raise IOError("layout file not found. Invalid layout reference in PinFile")
def lp_topo_get(self, topo, upstream=None):
"""
search_order : get topologies from upstream if mentioned
get topologies from core package
# need to add checks for ./topologies
"""
if upstream is None:
pkg_file_path = self.base_path + "/examples/topology/" + topo
return open(pkg_file_path).read()
#get_file(self.base_path + "/examples/topology/" + topo,
# "./topologies/")
else:
g = GitHub(upstream)
repo_path = LinchpinAPI.UPSTREAM_EXAMPLES_PATH + "/topology"
files = g.list_files(repo_path)
link = filter(lambda link: link['name'] == topo, files)
link = link[0]["download_url"]
return requests.get(link).text
#get_file(link, "./topologies", True)
#return link
def lp_layout_list(self, upstream=None):
"""
search_order : list layouts from upstream if mentioned
list layouts from core package
"""
if upstream is None:
l_files = list_files(self.base_path + "examples/layouts/")
return l_files
else:
g = GitHub(upstream)
l_files = []
repo_path = LinchpinAPI.UPSTREAM_EXAMPLES_PATH + "/layouts"
files = g.list_files(repo_path)
return files
def lp_layout_get(self, layout, upstream=None):
"""
search_order : get layouts from upstream if mentioned
get layouts from core package
"""
if upstream is None:
pkg_file_path = self.base_path + "/examples/layouts/" + layout
return open(pkg_file_path, "r").read()
#get_file(self.base_path + "/examples/layouts/" + layout,
# "./layouts/")
else:
g = GitHub(upstream)
repo_path = LinchpinAPI.UPSTREAM_EXAMPLES_PATH + "/layouts"
files = g.list_files(repo_path)
link = filter(lambda link: link['name'] == layout, files)
link = link[0]["download_url"]
return requests.get(link).text
def lp_rise(self, pf, targets):
pf = parse_yaml(pf)
e_vars = {}
e_vars['linchpin_config'] = self.get_config_path()
e_vars['outputfolder_path'] = self.context.workspace+"/outputs"
e_vars['inventory_outputs_path'] = self.context.workspace+"/inventories"
e_vars['keystore_path'] = self.context.workspace+"/keystore"
e_vars['state'] = "present"
# checks wether the targets are valid or not
if set(targets) == set(pf.keys()).intersection(targets) and len(targets) > 0:
for target in targets:
topology = pf[target]['topology']
topology_registry = pf.get("topology_registry", None)
e_vars['topology'] = self.find_topology(pf[target]["topology"],
topology_registry)
if pf[target].has_key("layout"):
e_vars['inventory_layout_file'] = self.context.workspace+"/layouts/"+pf[target]["layout"]
output = invoke_linchpin(self.base_path,
e_vars,
"PROVISION",
console=True)
elif len(targets) == 0:
for target in set(pf.keys()).difference(self.excludes):
topology = pf[target]['topology']
topology_registry = pf.get("topology_registry", None)
e_vars['topology'] = self.find_topology(pf[target]["topology"],
topology_registry)
if pf[target].has_key("layout"):
e_vars['inventory_layout_file'] = self.context.workspace+"/layouts/"+pf[target]["layout"]
output = invoke_linchpin(self.base_path, e_vars, "PROVISION",
console=True)
else:
raise KeyError("One or more Invalid targets found")
def lp_drop(self, pf, targets):
""" drop module of linchpin cli """
pf = parse_yaml(pf)
e_vars = {}
e_vars['linchpin_config'] = self.get_config_path()
e_vars['inventory_outputs_path'] = self.context.workspace + "/inventories"
e_vars['keystore_path'] = self.context.workspace+"/keystore"
e_vars['state'] = "absent"
# checks wether the targets are valid or not
if set(targets) == set(pf.keys()).intersection(targets) and len(targets) > 0:
for target in targets:
topology = pf[target]['topology']
topology_registry = pf.get("topology_registry", None)
e_vars['topology'] = self.find_topology(pf[target]["topology"],
topology_registry)
output_file = ( self.context.workspace + "/outputs/" +
topology.strip(".yaml").strip(".yml") +
".output")
e_vars['topology_output_file'] = output_file
output = invoke_linchpin(self.base_path,
e_vars,
"TEARDOWN",
console=True)
elif len(targets) == 0:
for target in set(pf.keys()).difference(self.excludes):
e_vars['topology'] = self.find_topology(pf[target]["topology"],
pf)
topology = pf[target]["topology"].strip(".yml").strip(".yaml")
output_file = (self.context.workspace + "/outputs/" +
topology.strip("yaml").strip("yml") +
".output")
e_vars['topology_output_file'] = output_file
output = invoke_linchpin(self.base_path,
e_vars,
"TEARDOWN",
console=True)
else:
raise KeyError("One or more Invalid targets found")
def lp_validate_topology(self, topology):
e_vars = {}
e_vars["schema"] = self.base_path + "/schemas/schema_v3.json"
e_vars["data"] = topology
result = invoke_linchpin(self.base_path, e_vars,
"SCHEMA_CHECK", console=True)
print(result)
return result
def lp_invgen(self, topoout, layout, invout, invtype):
""" invgen module of linchpin cli """
e_vars = {}
e_vars['linchpin_config'] = self.get_config_path()
e_vars['output'] = os.path.abspath(topoout)
e_vars['layout'] = os.path.abspath(layout)
e_vars['inventory_type'] = invtype
e_vars['inventory_output'] = invout
result = invoke_linchpin(self.base_path,
e_vars,
"INVGEN",
console=True)
def lp_test(self, topo, layout, pf):
""" test module of linchpin.api"""
e_vars = {}
e_vars['data'] = topo
e_vars['schema'] = self.base_path + "/schemas/schema_v3.json"
result = invoke_linchpin(self.base_path, e_vars, "TEST", console=True)
return result
|
UTF-8
|
Python
| false | false | 14,570 |
py
| 5 |
__init__.py
| 4 | 0.534592 | 0.533905 | 0 | 328 | 43.420732 | 109 |
GHAGG/ITAO-Coding-Journal
| 15,393,162,821,820 |
7db390b208d3de844204d74b682870d41389f5b3
|
a2bfa88775ac41c1f9342168fb636e1a76085711
|
/drinks.py
|
9c6b2feadd617ecc0d0a7096fadd40d6a5558d09
|
[] |
no_license
|
https://github.com/GHAGG/ITAO-Coding-Journal
|
d412cc1bea52552c71adbc3423895c05644c8c39
|
edcfa32e5f90f750483565177d1b1fe86f15fb35
|
refs/heads/main
| 2023-08-24T15:14:52.029041 | 2021-10-07T22:06:22 | 2021-10-07T22:06:22 | 404,051,199 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Drink:
def __init__(self, syrup = "Water", water = "plain", calories = 0, iceratio = 0):
self.syrup = "water"
self.water = "plain"
self._calories = 0
self.iceratio = 0
def __repr__(self):
return self.syrup
def cal(self):
return self._calories
class Soda(Drink):
def __init__(self, syrup = 'none', water = 'carbonated'):
self.syrup = "none"
self.water = "carbonated"
self._calories = 100
class Sprite(Soda):
def __init__(self, syrup = 'Sprite', water = 'carbonated'):
self.syrup = "Sprite"
self.water = "Carbonated"
self._calories = 100
class DietSprite(Sprite):
def __init__(self, syrup = 'Sprite', water = 'carbonated'):
self.syrup = "Sprite"
self.water = "Carbonated"
self._calories = 0
class Coke(Soda):
def __init__(self, syrup = 'Coke', water = 'carbonated'):
self.syrup = "Coke"
self.water = "Carbonated"
self._calories = 100
class DietCoke(Coke):
def __init__(self, syrup = 'Coke', water = 'carbonated'):
self.syrup = "Coke"
self.water = "Carbonated"
self._calories = 0
|
UTF-8
|
Python
| false | false | 1,228 |
py
| 11 |
drinks.py
| 11 | 0.538274 | 0.526059 | 0 | 40 | 28.7 | 85 |
fpsebastiam/live-de-python
| 10,926,396,819,148 |
b867e63bb28e47018fd6fe6e701a0436c9cf3c5a
|
62b748a4f6e7c4eeff7160aecec38d6cf2151316
|
/codigo/Live119/pipe_and_filter/exemplo_1.py
|
4acf2fe6bfe9b3a106a6ff5ef03ec5a10829fda4
|
[
"MIT"
] |
permissive
|
https://github.com/fpsebastiam/live-de-python
|
5675f9683294262dccfcb991bd9609b3d3bb0ecd
|
054b412b5646a6ecc88fea50fe1cd930797ec0e1
|
refs/heads/main
| 2023-07-22T12:40:58.435786 | 2023-07-04T03:32:37 | 2023-07-04T03:32:37 | 378,723,143 | 0 | 0 |
MIT
| true | 2021-06-20T19:19:03 | 2021-06-20T19:19:02 | 2021-06-20T19:16:17 | 2021-06-15T02:41:43 | 374,396 | 0 | 0 | 0 | null | false | false |
from typing import Text
class Pipeline:
def __init__(self, *filters):
self.filters = filters
def __call__(self, value):
final_value = value
for filter in self.filters:
final_value = filter(final_value)
return final_value
def limpa_texto(texto: Text):
return texto.replace('\n', '')
def achar_nome(texto: Text):
nomes = ('Eduardo', 'Fausto')
final_value = texto
for nome in nomes:
final_value = final_value.replace(nome, f'NOME({nome})')
return final_value
def achar_verbos(texto: Text):
nomes = ('é', )
final_value = texto
for nome in nomes:
final_value = final_value.replace(nome, f'VERBO({nome})')
return final_value
def achar_adjetivos(texto: Text):
nomes = ('massa', 'chatão')
final_value = texto
for nome in nomes:
final_value = final_value.replace(nome, f'ADJ({nome})')
return final_value
def trocar_massa(texto: Text):
return texto.replace('massa', 'chatão')
texto = "\n\n\n\n Eduardo é massa\n\n\n"
print(texto)
ans_morfologica = Pipeline(
trocar_massa, achar_adjetivos, achar_verbos, achar_nome, limpa_texto
)
# print(
# trocar_massa(
# achar_adjetivos(
# achar_verbos(
# achar_nome(
# limpa_texto(texto)
# )
# )
# )
# )
# )
print(ans_morfologica(texto))
|
UTF-8
|
Python
| false | false | 1,423 |
py
| 1,077 |
exemplo_1.py
| 831 | 0.579986 | 0.579986 | 0 | 67 | 20.179104 | 72 |
cms-sw/cmssw
| 17,798,344,511,758 |
3faabe8233dcfad57d00f7f2e4c2a018938056f9
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DPGAnalysis/MuonTools/python/muNtupleProducer_cff.py
|
993e2e1098e9097dc040cad5941e51dae95ed13b
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 |
Apache-2.0
| false | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | 2023-09-09T18:47:07 | 2023-09-14T19:14:27 | 1,330,249 | 980 | 4,104 | 807 |
C++
| false | false |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
from DPGAnalysis.MuonTools.nano_mu_digi_cff import *
from DPGAnalysis.MuonTools.nano_mu_local_reco_cff import *
from DPGAnalysis.MuonTools.nano_mu_reco_cff import *
from DPGAnalysis.MuonTools.nano_mu_l1t_cff import *
muNtupleProducer = cms.Sequence(muDigiProducers
+ muLocalRecoProducers
+ muRecoProducers
+ muL1TriggerProducers
)
def nanoAOD_customizeCommon(process) :
if hasattr(process, "muGEMMuonExtTableProducer") or hasattr(process, "muCSCTnPFlatTableProducer"):
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi")
if hasattr(process, "NANOAODoutput"):
process.NANOAODoutput.outputCommands.append("keep nanoaodFlatTable_*Table*_*_*")
process.NANOAODoutput.outputCommands.append("drop edmTriggerResults_*_*_*")
return process
|
UTF-8
|
Python
| false | false | 1,355 |
py
| 46,375 |
muNtupleProducer_cff.py
| 40,422 | 0.689299 | 0.687823 | 0 | 28 | 47.392857 | 103 |
douglascarlini/examples
| 4,131,758,547,762 |
a39e16c55b08039d034486b806d088239cc4e055
|
ccc16a23ab917da18eed4c1f66efc00914c39510
|
/python/face-detection.py
|
367499eff576d15ed264f83acc91cb0e8b0199a5
|
[] |
no_license
|
https://github.com/douglascarlini/examples
|
409a723a40801139cb2274efefefa5a4c6f2dd80
|
5045ec35de9962d005e319c8e69d3ddb87c63a7e
|
refs/heads/master
| 2022-12-04T16:55:49.953625 | 2020-08-07T06:34:46 | 2020-08-07T06:34:46 | 285,750,087 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Author: https://github.com/douglascarlini
# pip install face-recognition
# pip install opencv-python
import face_recognition as fr
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
rgb = frame[:, :, ::-1]
faces = fr.face_locations(rgb)
for y1, x2, y2, x1 in faces:
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
|
UTF-8
|
Python
| false | false | 476 |
py
| 5 |
face-detection.py
| 3 | 0.598739 | 0.546218 | 0 | 27 | 16.592593 | 64 |
Influencer/HTPC-Manager
| 755,914,258,430 |
7b14aa893333a599f3b5375012d7b781903edff9
|
4b88451dc6e9950cf09d9b9ace42c7069fa53f2b
|
/modules/sickbeard.py
|
1a24260efda058156f9015c4beffbc73ddbb1f08
|
[] |
no_license
|
https://github.com/Influencer/HTPC-Manager
|
d6a0ba66db84d0550506128ab5d482c413983b24
|
9a43b3544c3fb36f5fb79c1defc2c224dded3284
|
refs/heads/master
| 2019-05-08T09:20:29.582029 | 2012-09-03T22:00:45 | 2012-09-03T22:00:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os, cherrypy, htpc
from urllib import quote
from urllib2 import urlopen
from json import loads
class Sickbeard:
def __init__(self):
htpc.modules.append({
'name': 'Sickbeard',
'id': 'sickbeard',
'test': '/sickbeard/ping',
'fields': [
{'type':'bool', 'label':'Enable', 'name':'sickbeard_enable'},
{'type':'text', 'label':'Menu name', 'name':'sickbeard_name'},
{'type':'text', 'label':'IP / Host *', 'name':'sickbeard_host'},
{'type':'text', 'label':'Port *', 'name':'sickbeard_port'},
{'type':'text', 'label':'API key', 'name':'sickbeard_apikey'}
]})
@cherrypy.expose()
def index(self):
return htpc.lookup.get_template('sickbeard.html').render()
@cherrypy.expose()
@cherrypy.tools.json_out()
def ping(self, sickbeard_host, sickbeard_port, sickbeard_apikey, **kwargs):
try:
url = 'http://'+sickbeard_host+':'+sickbeard_port+'/api/'+sickbeard_apikey+'/?cmd=';
response = loads(urlopen(url+'sb.ping', timeout=10).read())
if response.get('result') == "success":
return response
except:
return
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetShowList(self):
return self.fetch('shows&sort=name')
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetNextAired(self):
return self.fetch('future')
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetPoster(self, tvdbid):
return self.fetch('show.getposter&tvdbid='+tvdbid)
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetHistory(self, limit=''):
return self.fetch('history&limit='+limit)
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetLogs(self):
return self.fetch('logs&min_level=info')
@cherrypy.expose()
@cherrypy.tools.json_out()
def AddShow(self, tvdbid):
return self.fetch('show.addnew&tvdbid='+tvdbid)
@cherrypy.expose()
@cherrypy.tools.json_out()
def GetShow(self, tvdbid):
return self.fetch('show&tvdbid='+tvdbid)
@cherrypy.expose()
def SearchShow(self, query):
try:
url = 'http://www.thetvdb.com/api/GetSeries.php?seriesname='+quote(query)
return loads(urlopen(url, timeout=10).read())
except:
return
def fetch(self, cmd):
try:
settings = htpc.settings.Settings()
host = settings.get('sickbeard_host', '')
port = str(settings.get('sickbeard_port', ''))
apikey = settings.get('sickbeard_apikey', '')
url = 'http://' + host + ':' + str(port) + '/api/' + apikey + '/?cmd='+cmd;
return loads(urlopen(url, timeout=10).read())
except:
return
htpc.root.sickbeard = Sickbeard()
|
UTF-8
|
Python
| false | false | 2,915 |
py
| 12 |
sickbeard.py
| 9 | 0.565009 | 0.562607 | 0 | 89 | 31.752809 | 96 |
teamWSIZ/python1-2020
| 17,334,488,029,779 |
c9100a4f7b9d8844b21e5db2cefea935a86e18fd
|
17c10f0677db31713ac721845c8b2ead5d64d0fa
|
/_algorytmy/sudoku/encoding.py
|
e64cdd3f2d1fa5a0d99bd2b6a89a2b6f8e66a51d
|
[] |
no_license
|
https://github.com/teamWSIZ/python1-2020
|
0a2e5f03d980082534e2032c3e9172ff23dbedb7
|
cac1646c6c766aa96de5fc8c36d85d12ce0e7da2
|
refs/heads/master
| 2023-08-01T01:38:18.888641 | 2021-09-15T13:21:50 | 2021-09-15T13:21:50 | 302,949,303 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Kodujemy rząd pozycji
00 00 00 00 4x2B... rząd wartości = 8bitów = 1B
flaga... 4-bitowa...maska zer rzędu = 4bity = 0.5B ...
====
łącznie 12 bitów na rząd...
czyli 48 bitów na board ... zmieści się w pojedynczym int-cie 8-bytes...
"""
from typing import List
# b = [[0, 0, 0, 1], [1, 0, 2, 3], [0, 1, 2, 4], [0, 0, 0, 0]] # 16 * 8 = 128 B
def decode_row(x: int) -> List[int]:
row = []
for i in range(4):
row.append(1 + (x & 0b11))
x >>= 2
row = row[::-1]
for i in range(4):
mask = x & 0b1
x >>= 1
if mask == 0:
row[3 - i] = 0
return row
def encode_row(row: List[int]) -> int:
mask = 0
for x in row:
mask <<= 1
if x > 0:
mask |= 0b1
result = mask
for x in row:
result <<= 2
if x > 0:
x -= 1
result |= (x & 0b11)
return result
def encode_board(board) -> int:
nums = [encode_row(r) for r in board]
result = 0
for n in nums:
result <<= 12
result |= n
return result
def decode_board(x: int) -> List[List[int]]:
result = []
for i in range(4):
encodedrow = x & 0b111111111111 # ostatnie 12 bitów
result.append(decode_row(encodedrow))
x >>= 12
return result[::-1]
print(decode_row(0b011000011011)) # [0, 2, 3, 0]
print(decode_row(0b111100011011)) # [1, 2, 3, 4]
print(decode_row(0b111000011011)) # [1, 2, 3, 0]
print(decode_row(0b111110101111)) # [3, 3, 4, 4]
print('--------')
code = encode_row([0,4,2,1])
print(decode_row(code))
print('*********')
b = [[0, 0, 0, 1], [1, 0, 2, 3], [0, 1, 2, 4], [4, 1, 1, 1]]
x = encode_board(b)
print(bin(x))
B = decode_board(x)
print(B)
|
UTF-8
|
Python
| false | false | 1,735 |
py
| 195 |
encoding.py
| 181 | 0.506969 | 0.4036 | 0 | 78 | 21.076923 | 82 |
jgmize/nucleus
| 4,904,852,677,560 |
1e355b23e11c18d6c425c864631c7616cf14b41a
|
dfc0288b0d69de3e044b685b4d3f92cbd8a536b4
|
/nucleus/urls.py
|
c912f1ff2dd588849093da696727fb679864e800
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/jgmize/nucleus
|
cdfead9afd60f76bc59ac621dad46ef83b352428
|
1fd9d069103b7be00f5815ae1f3eac6ba0e3530d
|
refs/heads/master
| 2020-04-08T10:07:14.115162 | 2015-12-17T20:38:06 | 2015-12-17T20:38:06 | 12,463,026 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http import HttpResponse
from django_browserid.admin import site as browserid_admin_site
from funfactory.monkeypatches import patch
patch() # Apply funfactory monkeypatches.
admin.autodiscover() # Discover admin.py files for the admin interface.
# Copy ModelAdmin entries from the default admin site.
browserid_admin_site.copy_registry(admin.site)
urlpatterns = patterns('',
(r'', include('nucleus.base.urls')),
url(r'^admin/', include(browserid_admin_site.urls)),
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^rna/', include('rna.urls')),
(r'', include('django_browserid.urls')),
(r'^robots\.txt$',
lambda r: HttpResponse(
"User-agent: *\n%s: /" % 'Allow' if settings.ENGAGE_ROBOTS else 'Disallow' ,
mimetype="text/plain"
)
),
)
## In DEBUG mode, serve media files through Django.
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
|
UTF-8
|
Python
| false | false | 1,172 |
py
| 26 |
urls.py
| 16 | 0.709898 | 0.709898 | 0 | 37 | 30.675676 | 88 |
serccakir/web-app
| 16,827,681,872,493 |
e14409d0babaf6f5c0486137b575d771be80eb18
|
ed8ab98a13d0ec701e03f28366bb752301d3bb23
|
/frontend/urls.py
|
53b2e1b947f43461f452c331eead761124ff3df9
|
[] |
no_license
|
https://github.com/serccakir/web-app
|
a8825680118aaa3946aab2ba02657e031838ab77
|
9ac38ad8661af77b854c6a31e1e5bb6b85223cc5
|
refs/heads/master
| 2023-02-23T08:59:49.696291 | 2023-01-10T20:20:48 | 2023-01-10T20:20:48 | 334,659,617 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url, include
from django.views.generic.base import RedirectView
from .views import countries
urlpatterns = [
url(r'^$', RedirectView.as_view(url='static/index.html', permanent=False), name='index'),
url(r'^countriesdata/', countries, name='countries'),
]
|
UTF-8
|
Python
| false | false | 293 |
py
| 12 |
urls.py
| 8 | 0.733788 | 0.733788 | 0 | 9 | 31.666667 | 93 |
CNXTEoE/pyquil
| 10,625,749,123,459 |
93c7bdf7cc6de9b6d0180d83abf0a52e913fcb95
|
9e9ba8e8b9ee7e6b63193c472dbbc17fc258a0a6
|
/pyquil/tests/test_resource_manager.py
|
981d93e78283b97388e3fa978feba197f6692960
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/CNXTEoE/pyquil
|
56d52d5d31407103910e0e091138c812fc40e476
|
3df1ec586053a3f6b38c39b77df10f89d1cb88ca
|
refs/heads/master
| 2017-07-31T19:17:12.637028 | 2017-07-17T19:53:16 | 2017-07-17T19:53:16 | 95,025,296 | 1 | 0 | null | true | 2017-06-21T16:42:03 | 2017-06-21T16:42:03 | 2017-06-21T16:23:54 | 2017-06-21T16:38:00 | 655 | 0 | 0 | 0 | null | null | null |
from pyquil.resource_manager import *
import pyquil.quil as pq
from pyquil.gates import *
import pytest
@pytest.fixture
def five_qubit_prog():
p = pq.Program()
qubits = [p.alloc() for _ in range(5)]
p.inst([H(q) for q in qubits])
return p, qubits
def test_alloc():
p, qubits = five_qubit_prog()
for qubit in qubits:
assert qubit in p.resource_manager.live_qubits
assert not instantiated(qubit)
check_live_qubit(qubit)
# Give the qubits labels
for qubit in qubits:
p.resource_manager.instantiate(qubit)
assert instantiated(qubit)
for qubit in qubits:
p.free(qubit)
assert qubit in p.resource_manager.dead_qubits
def test_add_resource_managers():
p, p_qubits = five_qubit_prog()
q, q_qubits = five_qubit_prog()
summed_program = p + q
assert (set(summed_program.resource_manager.live_qubits)
== set.union(set(p_qubits), set(q_qubits)))
def test_direct_qubit_comparison():
q0 = DirectQubit(0)
q1 = DirectQubit(0)
assert q0 == q1
if q0 != q1:
# This is a weird behavior in python2. x!=y and x<>y call __ne__() instead of negating
# __eq__(). In python 3 __ne__ defaults to the inversion of__eq__
raise AssertionError("Should never have gotten here! Python2 bites again")
q2 = DirectQubit(2)
assert q0 != q2
|
UTF-8
|
Python
| false | false | 1,381 |
py
| 16 |
test_resource_manager.py
| 9 | 0.635771 | 0.624185 | 0 | 50 | 26.62 | 94 |
HuangOwen/NLP-learning
| 2,130,303,788,516 |
704c509b7a8a240b52f4f9e9c27595284137c149
|
c5356e57101c5e7c35b78e2bc9c819bf0ff6473e
|
/jieba_freq_calc.py
|
bd9784014dc67744663d8ce25f8e72cf3d7c05fa
|
[] |
no_license
|
https://github.com/HuangOwen/NLP-learning
|
bd7e969634d5d883ced860984140d2d670d17994
|
c110477b7c75f3fe73642456937243469b7f637c
|
refs/heads/master
| 2018-02-08T04:02:10.066646 | 2018-01-29T06:13:05 | 2018-01-29T06:13:05 | 96,376,364 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -----------词频统计--------------------------
import jieba
with open('路人女主的养成方法.txt','r') as article:
words = jieba.cut(article.read())
word_freq = {}
for w in words:
if w in word_freq:
word_freq[w] += 1
else:
word_freq[w] = 1
freq_word = []
for w, freq in word_freq.items():
freq_word.append((w,freq))
freq_word.sort(key = lambda x: x[1], reverse = True)
with open('result_jieba_cut.txt','w') as result:
for word, freq in freq_word:
result.write(str(freq)+':'+str(word)+'\n')
|
UTF-8
|
Python
| false | false | 622 |
py
| 6 |
jieba_freq_calc.py
| 1 | 0.488255 | 0.483221 | 0 | 19 | 29.157895 | 56 |
JacklineKigombe/Jenga-School-Lecture-9-assignment
| 10,488,310,184,617 |
4bfacd9c72e829688b5f1c875b335be41604322c
|
8f99303bc6f3477bcdf3d1bc85ee85680fbeb12f
|
/assignment4.py
|
843a578974040827d7a3238c02f48b2985afde87
|
[] |
no_license
|
https://github.com/JacklineKigombe/Jenga-School-Lecture-9-assignment
|
295bc178aaa65e09e2f810838568f008a7c0f2df
|
efcf92be9861bffb42a158693a91fbf48942c3d8
|
refs/heads/main
| 2023-08-21T02:33:24.507088 | 2021-10-31T17:53:33 | 2021-10-31T17:53:33 | 420,386,756 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#This function computes the factorial
#of a number entered by the user
def factor():
n = int(input("Please enter a whole number: "))
fact = 1
for factor in range(1, n+1):
fact = fact * factor
print("The factorial of ",n," is",fact)
factor()
|
UTF-8
|
Python
| false | false | 268 |
py
| 6 |
assignment4.py
| 5 | 0.626866 | 0.615672 | 0 | 12 | 21.333333 | 51 |
arianasatryan/physionet-challenge-2020
| 1,872,605,770,887 |
858052a2edb379fc37031b6cedf589bb94af04f5
|
e1d94f57eb846d99d5494f2d5b4256cfaaccab86
|
/kardioml/segmentation/teijeiro/model/FreezableObject.py
|
9443f4788329fa23c6f09f008f1b15fb755c2507
|
[
"BSD-2-Clause"
] |
permissive
|
https://github.com/arianasatryan/physionet-challenge-2020
|
99638860759b769cc8552144bbd4aea5eb29a9db
|
c6f1648a148335babc0a26d8a589120616327548
|
refs/heads/master
| 2023-03-15T04:49:09.977797 | 2020-08-24T18:06:25 | 2020-08-24T18:06:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
"""
Created on Tue Jul 2 19:44:31 2013
@author: T. Teijeiro
"""
from abc import ABCMeta
import copy
class FreezableMeta(ABCMeta):
'''The metaclass for the abstract base class of Freezable objects'''
def __new__(mcls, name, bases, namespace):
fields = namespace.get('__slots__', ())
for base in bases:
oldfields = getattr(base, '_fields', None)
if oldfields:
fields = oldfields + fields
# All freezable objects must use __slots__ for attribute definition.
namespace.setdefault('__slots__', ())
namespace['_fields'] = fields
return ABCMeta.__new__(mcls, name, bases, namespace)
class FreezableObject(object):
"""
This class provides utilities to "freeze" an object, this is, to guarantee
that after the freeze operation no attributes of the object can be
modified. If any of the attributes is also a FreezableObject, the freeze
operation is called in depth-last order.
"""
__metaclass__ = FreezableMeta
__slots__ = ('__weakref__', '__frozen__')
def __init__(self):
self.__frozen__ = False
def __eq__(self, other):
"""
Implements equality comparison, by equality comparison of all the
attributes but __frozen__
"""
return (
type(self) is type(other)
and self._fields == other._fields
and all(
getattr(self, f, None) == getattr(other, f, None)
for f in self._fields
if f not in ('__frozen__', '__weakref__')
)
)
@property
def frozen(self):
"""
Checks if this object is frozen, this is, no attributes nor
methods can be set.
"""
return getattr(self, '__frozen__', False)
def __setattr__(self, name, value):
if self.frozen and name != '__frozen__':
raise AttributeError(self, 'Object {0} is now frozen'.format(self))
return super(FreezableObject, self).__setattr__(name, value)
def freeze(self):
"""
Freezes the object, ensuring that no setting operations can be
performed after that.
"""
if not self.frozen:
self.__frozen__ = True
for field in self._fields:
attr = getattr(self, field, None)
if isinstance(attr, FreezableObject):
attr.freeze()
def unfreeze(self):
"""
Unfreezes the object, allowing for attribute modifications.
"""
if self.frozen:
self.__frozen__ = False
for field in self._fields:
attr = getattr(self, field, None)
if isinstance(attr, FreezableObject):
attr.unfreeze()
def references(self, obj):
"""
Checks if this object references another one, this is, another object
is an attribute of this object. If any attribute is a
**FreezableObject** instance, then the property is checked recursively.
"""
for field in self._fields:
attr = getattr(self, field, None)
if attr is obj:
return True
if isinstance(attr, FreezableObject) and attr.references(obj):
return True
return False
def clone_attrs(obs, ref):
"""
Performs a deep copy of the attributes of **ref**, setting them in **obs**.
The procedure ensures that all the attributes of **obs** are not frozen.
Parameters
----------
obs, ref:
Observations. The attributes of *obs* are set to be equal that the
attributes of *ref*.
"""
memo = {}
frozen = ref.frozen
if frozen:
ref.unfreeze()
for field in ref._fields:
if field not in ('__frozen__', '__weakref__'):
attr = getattr(ref, field, None)
if id(attr) not in memo:
memo[id(attr)] = copy.deepcopy(attr)
setattr(obs, field, memo[id(attr)])
if frozen:
ref.freeze()
if __name__ == "__main__":
# pylint: disable-msg=W0201
class FreezableTest(FreezableObject):
__slots__ = ('attr1', 'attr2', 'attr3')
"""Dummy class to test the FreezableObject hierarchy"""
def __init__(self):
super(FreezableTest, self).__init__()
self.attr1 = "val1"
self.attr2 = "val2"
freezable = FreezableTest()
print(freezable.attr1, freezable.attr2, freezable.frozen)
freezable.attr1 = "val1_updated"
print(freezable.attr1, freezable.attr2, freezable.frozen)
freezable.attr3 = FreezableTest()
freezable.attr3.attr2 = "val2_updated"
freezable.freeze()
print(freezable.attr1, freezable.attr2, freezable.frozen)
try:
freezable.attr2 = "val2_updated"
except TypeError:
print(freezable.attr1, freezable.attr2, freezable.frozen)
|
UTF-8
|
Python
| false | false | 4,975 |
py
| 205 |
FreezableObject.py
| 118 | 0.572261 | 0.563417 | 0 | 157 | 30.687898 | 79 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.