repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Artanic30/django | 15,126,874,847,996 | 701f8aca1c6bd3be04a01e4f50feb0fbfc0df3cd | c902ff61216383da5707c1658f6eae6fdc94c2ba | /untitled/gsurvey/admin.py | 2d6d9e6ab3e7ba7becac0a917cf290b73939dd46 | [] | no_license | https://github.com/Artanic30/django | 6d3e683ec81c2e5f6b7bd97d11e1ed33d36750f7 | b148e39a166542b12ab26ea8968bbaa1f21ee0cd | refs/heads/master | 2020-04-06T18:42:55.090518 | 2019-10-16T15:18:49 | 2019-10-16T15:18:49 | 157,709,579 | 0 | 1 | null | false | 2019-11-02T03:57:23 | 2018-11-15T12:42:17 | 2019-10-16T15:19:02 | 2019-11-02T03:57:21 | 4,683 | 0 | 0 | 1 | CSS | false | false | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Site
class SiteAdmin(admin.ModelAdmin):
list_display = ('location', 'time', 'capacity')
fieldsets = [
(None, {'fields': ['location', 'time', 'capacity', 'students']}),
]
filter_horizontal = ('students',)
// testcode
admin.site.register(User, UserAdmin)
admin.site.register(Site, SiteAdmin)
| UTF-8 | Python | false | false | 438 | py | 14 | admin.py | 9 | 0.666667 | 0.666667 | 0 | 15 | 28.2 | 87 |
arita37/pyspark | 18,391,049,975,090 | e05c3134c0ad315532bdbeb4f14a3fe7b5555d25 | 91003d702014ddbdd133066a234d16f53335bef7 | /test.py | c1becd2f970ba8a43ba9affc081efc44408bc572 | [] | no_license | https://github.com/arita37/pyspark | dc3f29e4e13fba894b3bfa0762cbccae3e00e157 | bccfc8b33e7482075b803df69863e4838226ed3d | refs/heads/main | 2023-02-06T23:57:03.407396 | 2020-12-30T04:49:37 | 2020-12-30T04:49:37 | 318,725,847 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import findspark
findspark.init()
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
simpleData = [("James", "Sales", 3000),
("Michael", "Sales", 4600),
("Robert", "Sales", 4100),
("Maria", "Finance", 3000),
("James", "Sales", 3000),
("Scott", "Finance", 3300),
("Jen", "Finance", 3900),
("Jeff", "Marketing", 3000),
("Kumar", "Marketing", 2000),
("Saif", "Sales", 4100)
]
schema = ["employee_name", "department", "salary"]
df = spark.createDataFrame(data=simpleData, schema = schema)
df.printSchema()
spark.
spark.
##########################################################################
#### pyspark.sql.functions module #####################################
from pyspark.sql.functions import ( add_months, current_date, current_timestamp, date_add, date_format,
date_sub, date_trunc, datediff, dayofmonth, dayofweek, dayofyear, from_unixtime, from_utc_timestamp, hour,
minute, month, months_between, next_day, quarter, second,
to_timestamp, to_utc_timestamp, unix_timestamp, weekofyear, year,
trunc, # Returns date truncated to the unit specified by the format.
window, ### pyspark.sql.types.TimestampType. w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
last_day, # Returns the last day of the month which the given date belongs to.
)
##### Numerics Operations #########################################
from pyspark.sql.functions import ( abs, acos, asin, atan, atan2, cos, cosh, exp, factorial, log, log10, log1p,
log2, pow, sin, sinh, sqrt, tan, tanh, round, floor, ceil, bround, expm1,cbrt,
rint, # double value that is closest in value to the argument and is equal to a mathematical integer.
signum, # Computes the signum of the given value.
hypot, # (col1, col2) # sqrt(a^2 + b^2)
rand, ### rand uniform
randn, # Generates a column with independent and identically distributed (i.i.d.) samples from the standard normal distribution.
)
##### string #########################################################
from pyspark.sql.functions import ( instr, lower, lpad, ltrim, regexp_extract, regexp_replace, rpad, rtrim,
split, substring, substring_index, trim, upper,
levenshtein, # () # Levenstsin sds df0.select(levenshtein('l', 'r').alias('d')).collect()
locate, # Find position of elt in string
translate, # reaplce
initcap, # to cap Translate the first letter of each word to upper ca
length, #Computes the character length of string data
repeat, # creates an array containing a column repeated count times. df.select(repeat(df.s, 3).alias('s')).collect()
reverse
)
#### Aggregation #####################################################
from pyspark.sql.functions import (
approxCountDistinct, approx_count_distinct, avg, corr, count, countDistinct, kurtosis, max, mean, min, skewness,
stddev, stddev, stddev_pop, stddev_pop, stddev_samp, stddev_samp, sum, sumDistinct, variance,
covar_pop, ## covaraince
covar_samp, ### coariance sample
var_pop, var_samp
)
##### Operations ###################################################################
from pyspark.sql.functions import (
isnan
isnull
expr ### execute dynamically df.select(expr("length(name)")).collect()
when # df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
)
#### Array #########################################################################
from pyspark.sql.functions import ( array, array_contains, array_distinct, array_except, array_intersect, array_join, array_max, array_min,
array_position, array_remove, array_repeat, array_sort, array_union, arrays_overlap, arrays_zip, sort_array,
element_at, ### arrdf.select(element_at(df.data, 1)).collect()ay
sequence, ### Generate a sequence of integers from start to stop, incrementing by step. If step is not set, incrementing by 1 if start is less than or equal to stop, otherwise -1.
shuffle, ### Shuffle of the array
size, ### len of array , df.select(size(df.data)).collect()
slice , ###v Collection function: returns an array containing all the elements in x from index start (or starting from the end if start is negative) with the specified length.
)
##### json ###################################################
from pyspark.sql.functions import (
get_json_object, from_json, schema_of_json, json_tuple, to_json
)
##### map, dictionnary ####################################################################
from pyspark.sql.functions import (
create_map ## dictionnary key,value df.select(create_map('name', 'age').alias("map")).collect()
map_concat
map_from_arrays
map_from_entries
map_keys
map_values
)
##### Ordering functions #######################################################
from pyspark.sql.functions import (
asc
asc_nulls_first
asc_nulls_last
dense_rank
desc
desc_nulls_first
desc_nulls_last
)
#### Window ######################################################################
from pyspark.sql.functions import ( lag, lead, percent_rank, cume_dist, ntil, rank
)
lag(col, count=1, default=None) # Window function: returns the value that is offset rows before the current row
lead(col, count=1, default=None) # Window function: returns the value that is offset rows after the current row,
percent_rank
cume_dist # Window function: returns the cumulative distribution of values within a window partition
ntile(n=4) #Window function: returns the ntile group id (from 1 to n inclusive) in an ordered window partition.
rank # Window function: returns the rank of rows within a window partition.
#### Column ##################################################################
from pyspark.sql.functions import (
concat, concat_ws, collect_list, collect_set, explode,
explode_outer, flatten, greatest, least, posexplode, posexplode_outer, struct
)
concat(*col) # Concatenates multiple input columns together into a single column. The function works with strings, binary and
concat_ws(sep=";", *col) # speration Concatenates multiple input string columns together into a single string column, using the given separator.
collect_list ## df2.agg(collect_list('age')).collect() Aggregate function: returns a list of objects with duplicates.
collect_set ### Aggregate function: returns a set of objects with duplicate elements eliminated.
explode ## array --> column eDF.select(explode(eDF.intlist).alias("anInt")).collect()
explode_outer ### array --> column Unlike explode, if the array/map is null or empty then null
flatten ## flatten array into flat Collection function: creates a single array from an array of arrays
greatest # Returns the greatest value of the list of column name df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
least(col1, col2, col3) # Returns the least value of the list of column names, skipping null values
posexplode(col ) # Returns a new row for each element with position in the given array or map. eDF.select(posexplode(eDF.intlist)).collect()
posexplode_outer ### explode array into new new row
struct ## new struct columns, df.select(struct('age', 'name').alias("struct")).collect()
#### Rows Agg operation #######################################################
from pyspark.sql.functions import (
grouping, grouping_id, first, last )
grouping # df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
grouping_id # df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show() returns the level of grouping,
first ### 1st row
last ### last row
#### Various
from pyspark.sql.functions import (
format_number, format_string, PandasUDFType, pandas_udfE, udf, broadcast, coalesce, col,
column, input_file_name, lit, row_number,
monotonically_increasing_id, spark_partition_id
)
format_number
format_string
PandasUDFType
pandas_udfE ## Vectorize UDF
udf
broadcast # Marks a DataFrame as small enough for use in broadcast joins.
coalesce ###
col , column # a Column based on the given column name.
input_file_name() #Creates a string column for the file name of the current Spark task.
lit
row_number
monotonically_increasing_id
spark_partition_id
##### Encoding ###################################################
bin ## Returns the string representation of the binary value of the given column.
encode
decode
conv # Convert a number in a string column from one base to another. Convert a number in a string column from one base to another.
ascii
base64
hash
hex
sha1
sha2
md5
unbase64
unhex
crc32
soundex
degrees
#### bits operation #############################################
shiftLeft
shiftRight
shiftRightUnsigned
bitwiseNOT
#### Angles ####################################################
radians
toDegreesD
toRadiansD
from pyspark.sql.types import ( DataType, NullType, StringType, BinaryType, BooleanType, DateType,
TimestampType, DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType, ShortType,
ArrayType, MapType, StructField, StructType )
from pyspark.sql.types import StringType
from pyspark.sql.functions import *
from pyspark.sql import functions as f, types as t
import findspark
findspark.init()
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import approx_count_distinct,collect_list
from pyspark.sql.functions import collect_set,sum,avg,max,countDistinct,count
from pyspark.sql.functions import first, last, kurtosis, min, mean, skewness
from pyspark.sql.functions import stddev, stddev_samp, stddev_pop, sumDistinct
from pyspark.sql.functions import variance,var_samp, var_pop
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType,StructField, StringType, IntegerType,
ArrayType, DoubleType, BooleanType)
spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
simpleData = [("James", "Sales", 3000),
("Michael", "Sales", 4600),
("Robert", "Sales", 4100),
("Maria", "Finance", 3000),
("James", "Sales", 3000),
("Scott", "Finance", 3300),
("Jen", "Finance", 3900),
("Jeff", "Marketing", 3000),
("Kumar", "Marketing", 2000),
("Saif", "Sales", 4100)
]
schema = ["employee_name", "department", "salary"]
df = spark.createDataFrame(data=simpleData, schema = schema)
df.printSchema()
df.show(truncate=False)
df.printSchema()
from pyspark.sql.types import StringType, StructType, IntegerType, FloatType
from pyspark.sql import functions as f
f.last()
# Convenience function for turning JSON strings into DataFrames.
def jsonToDataFrame(json, schema=None):
# SparkSessions are available with Spark 2.0+
reader = spark.read
if schema:
reader.schema(schema)
return reader.json(sc.parallelize([json]))
# Using a struct
schema = StructType().add("a", StructType().add("b", IntegerType()))
events = jsonToDataFrame("""
{
"a": {
"b": 1
}
}
""", schema)
display(events.select("a.b"))
| UTF-8 | Python | false | false | 11,094 | py | 8 | test.py | 7 | 0.654588 | 0.643771 | 0 | 336 | 31.997024 | 182 |
AleksandraAnn/pp1 | 12,670,153,525,781 | 28faedff489cfa30213ab27293c29bbaf973fe71 | c33e767499b886517329c39ce13590de373927bf | /02-ControlStructures/Zadania/Zadania 1-10/7.py | 28a3ca476715626dbf30c87243fee20bf123bf49 | [] | no_license | https://github.com/AleksandraAnn/pp1 | 74f5b8f5f6a389a1e9ef5c50d9a85e2b6323806f | 176985d892d7ecc9584ceb8faed3bdcc516664e7 | refs/heads/master | 2020-08-20T23:29:17.805733 | 2019-10-27T20:57:53 | 2019-10-27T20:57:53 | 216,077,656 | 0 | 0 | null | true | 2019-10-18T17:43:28 | 2019-10-18T17:43:27 | 2019-10-15T23:06:12 | 2019-10-15T23:06:10 | 1,400 | 0 | 0 | 0 | null | false | false | i = 0
while i < 5:
print('Aleksandra')
i += 1 | UTF-8 | Python | false | false | 53 | py | 12 | 7.py | 10 | 0.490566 | 0.433962 | 0 | 4 | 12.5 | 23 |
ehsansh84/Customs | 6,536,940,262,615 | 99e05de471f2d8a61047e994cf95591c6f1e3069 | fa3e53a81b41f541fccb8ccbd8da3208bacca740 | /controllers/borrow.py | f4ec6161090030e075b3fe3069acb4f078754f54 | [] | no_license | https://github.com/ehsansh84/Customs | 29bb905e20c8243c7e5e73f56e8f5bce774a5b53 | 2865a22ab8c24f7fa3ff108389031cb4a450784e | refs/heads/master | 2016-09-05T11:00:26.572593 | 2014-09-16T04:47:18 | 2014-09-16T04:47:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'ehsan'
from models.borrow import Borrow as Model
class Borrow():
def __init__(self):
pass
@classmethod
def add(cls, id='', personnel_id=0, file_id=0, date='', flow=[], returned=False):
if id != '':
obj = Model.objects(Kootaj=id).first()
else:
obj = Model()
if obj != None:
obj.personnel_id = personnel_id
obj.file_id = file_id
obj.date = date
obj.flow = flow
obj.returned = returned
obj.save()
@classmethod
def exists(cls ,kootaj):
obj = Model()
return obj.find({{'kootaj': kootaj}})
@classmethod
def find(cls, _filter={}, page=-1, per_page=15, sort='personnel_id', order=1):
try:
obj = Model.objects(__raw__=_filter)
result = []
for item in obj:
result.append({
'id': obj.id,
'personnel_id': obj.personnel_id,
'file_id': obj.file_id,
'date': obj.date,
'flow': obj.flow,
'returned': obj.returned,
})
return result
except Exception, err:
return err.message
@classmethod
def get(cls, id):
try:
obj = Model.objects.get(id=ObjectId(id))
result = {
'id': obj.id,
'personnel_id': obj.personnel_id,
'file_id': obj.file_id,
'date': obj.date,
'flow': obj.flow,
'returned': obj.returned,
}
return result
except Exception, err:
return err.message
#TODO: need to implementation
@classmethod
def delete(cls, id):
try:
return True
except Exception, err:
return err.message
| UTF-8 | Python | false | false | 1,971 | py | 88 | borrow.py | 48 | 0.446981 | 0.443937 | 0 | 78 | 24.269231 | 85 |
pymft/mft-vanak-2020 | 19,292,993,118,528 | 6a6fd8b9f04194caf79b83743422560ce63fee72 | f86e9e3d529b78cb9ea33db43bd9499b762d2ccd | /S15/oop/geometry_4.py | a9ec8f4b99b21d21013fc5205a211a01070372c4 | [] | no_license | https://github.com/pymft/mft-vanak-2020 | 43ad9e9e0e34bd2d28313dd39650f737fa874f8b | 89a7473f66b9b270136987df550b59b6aee91148 | refs/heads/master | 2020-12-14T12:37:01.154158 | 2020-07-15T14:38:14 | 2020-07-15T14:38:14 | 234,746,257 | 3 | 12 | null | false | 2020-09-08T14:54:51 | 2020-01-18T14:23:46 | 2020-07-15T14:38:25 | 2020-07-15T14:38:22 | 435 | 3 | 10 | 2 | Python | false | false | class Area:
def __get__(self, instance, owner):
if owner == Rectangle or owner == Square:
return instance.width * instance.height
elif owner == Circle:
return instance.radius * instance.radius * 3.14
class Perimeter:
def __get__(self, instance, owner):
if owner == Rectangle or owner == Square:
return (instance.width + instance.height) * 2
elif owner == Circle:
return 2 * instance.radius * 3.14
class Geometry:
area = Area()
perimeter = Perimeter()
class Rectangle(Geometry):
def __init__(self, a, b):
self.width = a
self.height = b
class Square(Rectangle):
def __init__(self, a):
super().__init__(a, a)
class Circle(Geometry):
def __init__(self, r):
self.radius = r
sq = Square(10)
rect = Rectangle(2, 5)
circle = Circle(10)
print("square:", sq.perimeter, sq.area)
print("rectangle", rect.perimeter, rect.area)
print("circle:", circle.perimeter, circle.area) | UTF-8 | Python | false | false | 1,013 | py | 113 | geometry_4.py | 101 | 0.595262 | 0.581441 | 0 | 43 | 22.581395 | 59 |
avaneesh-pandey5/matrixcalculator | 395,137,031,691 | d76c620d6336b6b086841a284720c0a9307512bc | ce996101c91ff8466c3152b15f4f5a4e85bf8185 | /matrix_functions.py | 8586faa6e402bedbba2477ae8e6f12977ae676e9 | [] | no_license | https://github.com/avaneesh-pandey5/matrixcalculator | 02d4fff31c4df5b3bc851c06900028c3cd3eb4a5 | d0a0dd43cecb2c0844d5b5b2608a73ad53337096 | refs/heads/main | 2023-04-16T01:03:05.333955 | 2023-04-06T14:25:02 | 2023-04-06T14:25:02 | 332,392,685 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #----------INPUT MATRIX------------
def input_matrix():
matrix=[]
col=[]
no_of_row=int(input('Enter no. of rows :: '))
no_of_col=int(input('Enter no. of Columns :: '))
for ir in range (1,no_of_row+1):
for ic in range(1,no_of_col+1):
print('Enter value of a',ir,ic)
val=int(input())
col.append(val)
matrix.append(col)
col=[]
return no_of_row, no_of_col, matrix
#-----------ADDITION------------------
def addition_of_matrix(matrixA,matrixB):
addition_matrix=[]
addition_col=[]
no_of_rowA=len(matrixA)
no_of_colA=len(matrixA[0])
no_of_rowB=len(matrixB)
no_of_colB=len(matrixB[0])
if no_of_rowA == no_of_rowB and no_of_colA == no_of_colB:
for r in range (no_of_rowA):
for c in range(no_of_colA):
val=matrixA[r][c] + matrixB[r][c]
addition_col.append(val)
addition_matrix.append(addition_col)
addition_col=[]
return addition_matrix
#--------------SUBTRACTION---------------
def subtraction_of_matrix(matrixA,matrixB):
subtraction_matrix=[]
subtraction_col=[]
no_of_rowA=len(matrixA)
no_of_colA=len(matrixA[0])
no_of_rowB=len(matrixB)
no_of_colB=len(matrixB[0])
if no_of_rowA == no_of_rowB and no_of_colA == no_of_colB:
for r in range (no_of_rowA):
for c in range(no_of_colA):
val=matrixA[r][c] - matrixB[r][c]
subtraction_col.append(val)
subtraction_matrix.append(subtraction_col)
subtraction_col=[]
return subtraction_matrix
#---------------PRODUCT---------------
def product_of_matrix(matrixA,matrixB):
product_matrix=[]
product_col=[]
no_of_rowA=len(matrixA)
no_of_colA=len(matrixA[0])
no_of_rowB=len(matrixB)
no_of_colB=len(matrixB[0])
val=0
for i in range(no_of_rowA):
for j in range(no_of_colB):
for k in range(no_of_colA):
val=val+(matrixA[i][k]*matrixB[k][j])
product_col.append(val)
val=0
product_matrix.append(product_col)
product_col=[]
return product_matrix
#------------ADJOINT OF MATRIX-------------
def adjoint_matrix(matrix):
cofac=cofactor_matrix(matrix)
adjoint=transpose(cofac)
return adjoint
#------------INVERSE OF A MATRIX-------------
def reduce_for_inverse(minor,i,j):
row_len=len(minor)
for k in range(row_len):
del minor[k][j]
del minor[i]
return minor
def inverse_of_matrix(matrix):
row_inverse=[]
inverse_matrix=[]
determinant=main_determinant(matrix)
adjoint=adjoint_matrix(matrix)
for i in adjoint:
for v in i:
inverse=v/determinant
inverse=round(inverse,3)
row_inverse.append(inverse)
inverse_matrix.append(row_inverse)
row_inverse=[]
return inverse_matrix
#------------COFACTOR OF MATRIX------------
def cofactor_matrix(matrix):
co_factor_matrix=[]
row_cofactor=[]
minor=[]
temporary=[]
for a in matrix:
for b in a:
temporary.append(b)
minor.append(temporary)
temporary=[]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
reduce_for_inverse(minor,i,j)
cofactor=(((-1)**(i+j))*(main_determinant(minor)))
minor=[]
temporary=[]
for a in matrix:
for b in a:
temporary.append(b)
minor.append(temporary)
temporary=[]
row_cofactor.append(cofactor)
co_factor_matrix.append(row_cofactor)
row_cofactor=[]
return co_factor_matrix
#---------------DETERMINANT---------------
def reduce_for_determinant(minor,i):
row_len=len(minor)
for k in range(row_len):
del minor[k][0]
del minor[i]
return minor
def main_determinant(matrix):
if len(matrix) == 1:
value=matrix[0][0]
elif len(matrix) == 2:
value=(matrix[0][0]*matrix[1][1]) - (matrix[1][0]*matrix[0][1])
elif len(matrix) > 2:
determinant=0
minor=[]
temporary=[]
for a in matrix:
for b in a:
temporary.append(b)
minor.append(temporary)
temporary=[]
for i in range(len(matrix)):
reduce_for_determinant(minor,i)
determinant= determinant+((matrix[i][0])*((-1)**(i))*(main_determinant(minor)))
minor=[]
temporary=[]
for a in matrix:
for b in a:
temporary.append(b)
minor.append(temporary)
temporary=[]
return determinant
return value
#---------------TRANSPOSE---------------
def transpose(matrix):
row_len=len(matrix)
col_len=len(matrix[0])
row_tr=[]
transpose_matrix=[]
for z in range (row_len):
for e in range (col_len):
row_tr.append(matrix[e][z])
transpose_matrix.append(row_tr)
row_tr=[]
return transpose_matrix
| UTF-8 | Python | false | false | 5,375 | py | 3 | matrix_functions.py | 2 | 0.507349 | 0.501209 | 0 | 192 | 25.984375 | 91 |
EgorKhodunov/2021-1-MAILRU-SDET-Python-E-Khodunov | 5,411,658,821,834 | d93f2e5324b2a338f955a86a1f086b9c27faf2d8 | c1ecf53f80af0bf32ace5ae65bc12d1813a2cbab | /final_project/myapp_test/code/docker_client/nginx_docker.py | 1a2be41eff30aa959a5efab53478a4df687e9aaf | [] | no_license | https://github.com/EgorKhodunov/2021-1-MAILRU-SDET-Python-E-Khodunov | d57445515845c60800cf52d69f963ccab0d3209d | 315b7c1c858c90fc64081222e744e6b8d6536587 | refs/heads/main | 2023-06-02T19:26:34.241703 | 2021-06-09T00:15:13 | 2021-06-20T19:06:49 | 349,153,967 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import allure
import docker
import os
from docker.models.containers import Container
def read_template(template_path: str):
with open(template_path, 'r') as file:
template = file.read()
return template
def generate_nginx_config_file(template: str, app_host: str, test_dir: str) -> str:
config = template.format(f'http://{app_host}:80/')
allure.attach(config, 'nginx.conf',
attachment_type=allure.attachment_type.TEXT)
file_path = os.path.join(test_dir, 'nginx.conf')
with open(file_path, 'w') as file:
file.write(config)
return file_path
def write_to_file(data, file_path):
with open(file_path, 'wb') as file:
file.write(data)
class NginxDockerClient:
client = docker.from_env()
def __init__(self, tag: str, label: str, test_dir: str):
self.test_dir = test_dir
proxy_name = 'proxy_' + label
app_host = 'myapp_' + label
template = read_template(os.path.join('code', 'docker_client', 'config_templates', 'nginx.conf'))
proxy_config_file = generate_nginx_config_file(
template=template,
app_host=app_host,
test_dir=self.test_dir
)
self.proxy_container: Container = self.client.containers.run(
name=proxy_name,
image='nginx:alpine',
volumes_from=[os.getenv('HOSTNAME')],
command=f"nginx -c {proxy_config_file} -g 'daemon off;'",
detach=True,
network=tag + '_network'
)
def save_logs(self):
file_path = os.path.join(self.test_dir, 'proxy_docker.log')
write_to_file(self.proxy_container.logs(), file_path)
allure.attach(self.proxy_container.logs().decode('utf-8'), 'nginx_docker.log',
attachment_type=allure.attachment_type.TEXT)
def shutdown(self):
self.save_logs()
self.proxy_container.remove(v=True, force=True)
| UTF-8 | Python | false | false | 1,950 | py | 75 | nginx_docker.py | 66 | 0.608205 | 0.606667 | 0 | 63 | 29.936508 | 105 |
Alexyyek/home_recommendation | 9,801,115,412,075 | e102e4c74d15950b7625530ba5d78cbd034635cc | 75618ab4efb6cb98e439d61ff0076c4a0ea3bff3 | /house_cosine/bin/house_cosine_topN_map.py | 6adfca799f8494baefcdb5c240515ba1ccde322c | [] | no_license | https://github.com/Alexyyek/home_recommendation | 22df8d4fe968c1570f89f4bfecefe25e0fa7975f | f02daf591c05c1f91e4378b8a81beaa675f0076b | refs/heads/master | 2021-01-11T02:11:22.871894 | 2016-10-13T10:32:57 | 2016-10-13T10:32:57 | 70,795,095 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
#!/bin/python
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def read_input(seperator='\t'):
for line in sys.stdin:
line = line.strip()
resblock_alpha, resblock_beta, similarity = line.split(seperator)
resblock_id_alpha, resblock_name_alpha, room_cnt_alpha, build_area_alpha = resblock_alpha.split(':')
resblock_alpha = '{resblock_id_alpha}:{room_cnt_alpha}:{build_area_alpha}'.format(
resblock_id_alpha = resblock_id_alpha,
room_cnt_alpha = room_cnt_alpha,
build_area_alpha = build_area_alpha)
resblock_id_beta, resblock_name_beta, room_cnt_beta, build_area_beta = resblock_beta.split(':')
resblock_beta = '{resblock_id_beta}:{room_cnt_beta}:{build_area_beta}'.format(
resblock_id_beta = resblock_id_beta,
room_cnt_beta = room_cnt_beta,
build_area_beta = build_area_beta)
print '{resblock_alpha}\t{resblock_beta}\t{similarity}'.format(
resblock_alpha = resblock_alpha,
resblock_beta = resblock_beta,
similarity = similarity)
if __name__ == "__main__":
read_input()
| UTF-8 | Python | false | false | 1,264 | py | 61 | house_cosine_topN_map.py | 61 | 0.573576 | 0.571994 | 0 | 30 | 41.133333 | 108 |
suzukiken/cdkgluetable | 17,214,228,958,177 | ba0aaed251947b7ba6aa1c44e1847ccc53659112 | a3a779efe894276ef51f329f28239b8e490df7e8 | /test/putdata.py | 13241d9eaa31aa488a2de6a01f726e055831d252 | [] | no_license | https://github.com/suzukiken/cdkgluetable | 00f6c54de0b00ac155fbe56ff8a81e74a798445c | 338dca83ad8f045819622f0805509ee6d039519c | refs/heads/master | 2023-04-25T13:24:53.237109 | 2021-07-09T12:09:46 | 2021-07-09T12:09:46 | 349,581,306 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
from faker import Faker
import json
fake = Faker()
client = boto3.client('s3')
lines = []
for i in range(1, 10000):
line = {
'id': i,
'created': fake.date_time().isoformat(sep=' '),
'name': fake.license_plate(),
'pcs': fake.random_int()
}
lines.append(json.dumps(line))
client.put_object(
Body='\n'.join(lines),
Bucket='cdkgluetable-bucket',
Key='data/20210320120000.json',
) | UTF-8 | Python | false | false | 422 | py | 3 | putdata.py | 1 | 0.632701 | 0.578199 | 0 | 22 | 18.227273 | 52 |
Monolith195/geekbrains_python | 9,955,734,232,100 | f4731bbe2645c6dd96bbbc6dadf027ed80aa4162 | 35807d5e95c8ee68d0703e23b0d4f3f6e5e999b1 | /lesson01/task6.py | dc4705c16de76f0ffb043e02e5cd4c7a252c867b | [] | no_license | https://github.com/Monolith195/geekbrains_python | 3245d0cef98a5604dc9d371d277636c1238f7736 | b37bf832ad3a2c1c420cea47180e82feeca16dc0 | refs/heads/master | 2020-05-17T03:52:49.929921 | 2019-05-14T18:35:14 | 2019-05-14T18:35:14 | 172,539,153 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 6. Создать текстовый файл test_file.txt, заполнить его тремя строками: «сетевое программирование», «сокет»,
# «декоратор». Проверить кодировку файла по умолчанию. Принудительно открыть файл в формате Unicode и вывести его
# содержимое.
file = open('test_file.txt', encoding='utf-8', mode='w')
file.write('сетевое программирование\nсокет\nдекоратор')
file.close()
print(file)
with open('test_file.txt', encoding='utf-8') as f:
for line in f:
print(line, end='')
| UTF-8 | Python | false | false | 689 | py | 8 | task6.py | 8 | 0.725367 | 0.719078 | 0 | 12 | 38.75 | 113 |
HRLeem/stock_dart | 7,868,380,119,117 | da874f050a23c2084d22bdd05fcc612bbc22891c | 2bfcd913de89c91d5ec6d110304031e15c410a49 | /execution/crawl.py | 9c7b71aed65ab5f2f0afd10d9a1960dae6cf3239 | [] | no_license | https://github.com/HRLeem/stock_dart | f013e19b43b6e08b35b45cc8cfdefc5ec8db50b5 | 833076cb4b0550e1c1f54ca180dff1055f93f89c | refs/heads/main | 2023-03-21T20:29:45.178315 | 2021-03-10T23:00:27 | 2021-03-10T23:00:27 | 334,500,317 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # # -*- coding: utf-8 -*-
# import execution.defs.defs_crawl as defs_crawl
#
# crawl = defs_crawl.Crawl()
# def traffic_light(name, code, sort):
# if sort == 'simple':
# pass | UTF-8 | Python | false | false | 186 | py | 7 | crawl.py | 7 | 0.596774 | 0.591398 | 0 | 7 | 25.714286 | 48 |
AK-1121/code_extraction | 8,040,178,819,745 | 77cd7e564642e2e5728748fa9f00d927b39504d1 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_20679.py | 56a5ea16aebc8994fc5fb2c0600fd5b5a0d82b7e | [] | no_license | https://github.com/AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # setting up a dsn to a pervasive DB on Debian through pyodbc
dsnadd
| UTF-8 | Python | false | false | 69 | py | 29,367 | python_20679.py | 29,367 | 0.782609 | 0.782609 | 0 | 2 | 33.5 | 61 |
csiifmo/AllInfo | 12,962,211,348,354 | 91567498b08ad4c3ac61ca09b3bedbd411c661a6 | 7def7bd86abe418e2f5c4d35ca374ac9934d83de | /KawasakiSystem_old/Schunk Release/Manuals/SDHLibrary-2014-09-30-reduced/python/sdh/sdhserial.py | 70a2a33d43f5dcc5e9b43e47a7d125456160f0b7 | [] | no_license | https://github.com/csiifmo/AllInfo | c0fc4ebd086eb7f810d78e6358f71e52d3eb344f | cf502e4d2ef5f8e514e0cd58fb0c04428498216a | refs/heads/master | 2021-01-25T04:15:50.791051 | 2017-06-05T16:00:02 | 2017-06-05T16:00:02 | 93,418,565 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: latin-1 -*-
#######################################################################
#
## \file
# \section sdhlibrary_python_sdhserial_py_general General file information
#
# \author Dirk Osswald
# \date 2007-06-13
#
# \brief
# Implementation of class to access SDH via RS232
#
# \section sdhlibrary_python_sdhserial_py_copyright Copyright
#
# Copyright (c) 2007 SCHUNK GmbH & Co. KG
#
# <HR>
# \internal
#
# \subsection sdhlibrary_python_sdhserial_py_details SVN related, detailed file specific information:
# $LastChangedBy: Osswald2 $
# $LastChangedDate: 2014-02-28 15:24:55 +0100 (Fri, 28 Feb 2014) $
# \par SVN file revision:
# $Id: sdhserial.py 11438 2014-02-28 14:24:55Z Osswald2 $
#
# \subsection sdhlibrary_python_sdhserial_py_changelog Changelog of this file:
# \include sdhserial.py.log
#
#######################################################################
import time, sys, re
# pySerial module from http://pyserial.sourceforge.net/
import serial
from sdhbase import *
import socket
# Try to import sdh.canserial: Will only work:
# - if using native windows python (not cygwin)
# - if using ESD CAN
# - if the ESD python wrapper is installed
try:
from . import canserial
except ImportError:
pass
from . import tcpserial
#######################################################################
## \anchor sdhlibrary_python_sdhserial_py_python_vars
# \name Python specific variables
#
# Some definitions that describe the module for python.
#
# @{
__doc__ = "Class to access SDH via RS232"
__author__ = "Dirk Osswald: dirk.osswald@de.schunk.com"
__url__ = "http://www.schunk.com"
__version__ = "$Id: sdhserial.py 11438 2014-02-28 14:24:55Z Osswald2 $"
__copyright__ = "Copyright (c) 2007 SCHUNK GmbH & Co. KG"
# end of doxygen name group sdhlibrary_python_sdhserial_py_python_vars
# @}
######################################################################
#-----------------------------------------------------------------
## \brief The class to communicate with a SDH via RS232.
#
# End-Users should \b NOT use this class directly! The interface
# of cSDHSerial is subject to change in future releases. End users
# should use the class cSDH instead, as that interface is considered
# more stable.
#
# <hr>
class cSDHSerial( cSDHBase ):
'''
The class to communicate with a SDH via RS232. See html/pdf documentation for details.
\bug SCHUNK-internal bugzilla ID: Bug 1517<br>
With SDH firmware 0.0.3.x the first connection to a newly
powered up SDH can yield an error especially when connecting via TCP.
<br><b>=> Resolved in SDHLibrary-python 0.0.2.8</b>
'''
##################################################################
## \anchor sdhlibrary_python_csdhserial_internal
# \name Internal methods
#
# @{
#-----------------------------------------------------------------
## \brief Constructor of cSDHSerial.
#
# - Open the serial port
# - Check connection to SDH by querying the SDH firmware version
#
# This may raise an exception on failure
#
# \param self - reference to the object itself
# \param options - a dictionary of additional settings, like the
# options.__dict__ returned from cSDHOptionParser.parse_args()
# - Settings used by the base class cSDHBase:
# - \c "debug_level" : if set, then it is used as debug level of
# the created object, else a default of 0 is used
# - Settings used by this cSDHSerial class:
# - \c "port": if set, then it is used as the port number or the device name of
# the serial port to use. The default
# value port=0 refers to 'COM1' in Windows and
# to the corresponding '/dev/ttyS0' in Linux.
# - \c "timeout" : the timeout to use:
# - None : wait forever
# - T : wait for T seconds (float accepted)
# - (Superclasses of cSDHSerial use additional settings, see there.)
# - (Using classes of cSDHSerial like cSDH use additional settings, see there.)
#
# <hr>
def __init__( self, options=None ):
'''
Constructor of cSDHSerial. See html/pdf documentation for details.
'''
#---------------------
# Option handling:
# Set class specific default options:
default_options = dict( port=0, timeout=None )
# Overwrite class specific defaults with settings from caller, if any:
if ( options ): default_options.update( options )
#---------------------
# Call base class constructor using default + user options:
cSDHBase.__init__( self, options=default_options )
# use green as color for messages from cSDHSerial
self.dbg.SetColor( "green" )
self.dbg.PDM( "Debug messages of cSDHSerial are printed like this." )
#---------------------
# initialize additional member variables:
## \brief additional time in seconds to wait for sequential
# execution of "m"-command (as these are always executed
# non-sequentially by the SDH firmware)
# (no longer needed since WaitAxis() is used to ensure movement has ended)
self.m_sequtime = 0.0
## String to use as "End Of Line" marker when sending to SDH
self.EOL="\r\n"
#---------------------
#---------------------
# open connection to SDH:
self.com = None
if (self.options[ "port" ] < 0):
# "virtual" port for offline tests
return
try:
dummy = self.options[ "usecan" ]
except KeyError:
self.options[ "usecan" ] = False
try:
dummy = self.options[ "baudrate" ]
except KeyError:
self.options[ "baudrate" ] = 0
if (self.options[ "usecan" ]):
# try using CAN via ESD
if ( not "sdh.canserial" in sys.modules ):
print "Importing sdh.canserial failed! Is this Winpython calling? If you want CAN try:"
cmdline = ""
for a in sys.argv:
cmdline += " " + a
print "pythonwin %s" % cmdline
print
raise ImportError
if ( self.options[ "baudrate" ] == 0 ):
self.options[ "baudrate" ] = 1000000
self.com = canserial.tCANSerial( self.options[ "id_read" ], self.options[ "id_write" ], self.options[ "baudrate" ], self.options[ "net" ], timeout=self.options[ "timeout" ] )
self.dbg.PDM( "Using (ESD) CAN, id_read=0x%03x, id_write=0x%03x baudrate=%d timeout=%r" % (self.options[ "id_read" ], self.options[ "id_write" ], self.options[ "baudrate" ], self.options["timeout"]) )
sys.stdout.flush()
elif ( self.options[ "usetcp" ] ):
self.dbg.PDM( "Using TCP/IP to %s:%d with timeout %r" % (self.options[ "tcp_adr" ], self.options[ "tcp_port" ], self.options["timeout"]) )
self.com = tcpserial.tTCPSerial( self.options[ "tcp_adr" ], self.options[ "tcp_port" ] )
sys.stdout.flush()
else:
if ( self.options[ "baudrate" ] == 0 ):
self.options[ "baudrate" ] = 115200
## the RS232 connection to use for communication
self.com = serial.Serial( port=self.options[ "port" ], baudrate=self.options[ "baudrate" ], rtscts=0, xonxoff=0, timeout=self.options[ "timeout" ] )
# the above call will succeed even if the hand is connected but off
# to make shure that the SDH is connected:
# try to get the SDH firmware version with timeout
old_timeout = self.com.timeout
#print "cSDHSerial.__init__, modifying self.com.timeout"
self.com.timeout = 1
try:
self.com.write( " " ) # empty command to terminate any potential partly received previous command
except cSDHErrorCommunication, e:
self.dbg << "caught <%s> (ignored while cleaning up communication 1)\n" % str(e);
try:
# Now try to read anything available.
# This is only necessary if the SDH with a debug firmware (like
# all v0.0.3.x releases) has been switched on recently and we are
# communicating via TCP (since the TCP stack on the SDH buffers the
# debug start messages forever).
# In all other cases this does no harm other than a small delay.
dummy = self.com.read( 1024 );
self.dbg << "Read and ignored %d bytes \"%s\"\n" % (len(dummy),dummy)
self.dbg.flush()
except socket.timeout, e:
self.dbg << "caught <%s> (ignored while cleaning up communication 2)\n" % str(e);
except cSDHErrorCommunication, e:
self.dbg << "caught <%s> (ignored while cleaning up communication 2)\n" % str(e);
#---------------------
try:
#self.Send( "ver" )
ver = self.ver()
if ( ver == "" ):
raise cSDHErrorCommunication( "Could not get version info from SDH. Either it is switched off or not connected to selected port." )
except IndexError, e:
if (self.options[ "usecan" ]):
raise cSDHErrorTimeout( "Error while opening ESD CAN interface on net %d: %s" % (self.options[ "net" ], str(e)) )
else:
raise cSDHErrorTimeout( "Timeout while opening port %r" % self.options[ "port" ] )
self.com.timeout = old_timeout
#---------------------
#-----------------------------------------------------------------
def Close( self ):
'''
Close connection to serial interface.
'''
if self.com:
self.com.close()
self.com = None
#-----------------------------------------------------------------
def SendParse( self, s, re_obj ):
'''
Simplified parsing of 1 line commands.
s is the command to send
re_obj is a compiled regular expression object
the reply for s from the SDH is matched against re_obj
and the group 1 of the resulting match object is returned.
In case of errors the procedure is repeated up to 3 times
after syncing the output
'''
self.dbg << "Sendparse( %s, %s )\n" % (repr(s), repr(re_obj.pattern)) # pylint: disable-msg=W0104
retries = 3 # retry sending at most this many times
while retries > 0:
reply=None
try:
reply = self.Send( s, 1 )
mo = re_obj.match( reply[0] )
if ( mo ):
return mo.group(1)
except cSDHErrorCommunication,e:
self.dbg << "Ignoring exception in SendParse: %r\n" % e # pylint: disable-msg=W0104
retries -= 1
if retries> 0:
self.dbg << "reply %s from SDH does not match, syncing and retrying\n" % (repr(reply)) # pylint: disable-msg=W0104
old_nb_lines_to_ignore = self.nb_lines_to_ignore
self.nb_lines_to_ignore = 5
self.Sync()
self.nb_lines_to_ignore = old_nb_lines_to_ignore
raise cSDHErrorCommunication( "Could not get matching reply in SendParse( '%s', '%s' )" % (s,re_obj.pattern) )
#-----------------------------------------------------------------
def Send( self, s, nb_lines=All, nb_lines_total=All ):
'''
Send command string s+EOL to self.com and read reply according to nb_lines.
If nb_lines == All then reply lines are read until a line
without "@" prefix is found.
If nb_lines != All it is the number of lines to read.
self.firmware_state is set according to reply (if read)
nb_lines_total contains the total number of lines replied for
the s command. If fewer lines are read then
nb_lines_total-nb_lines will be remembered to be ignored
before the next command can be sent.
Return a list of all read lines of the reply from the SDH hardware.
'''
if (self.options[ "port" ] < 0):
# "virtual" port for offline tests
for (request,answer) in [ ("power=0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000", "POWER=0.0,0.0,0.0,0.0,0.0,0.0,0.0"),
("vp", ["@bla", "VP=0"]),
("p_max", ["P_MAX=90.0,90.0,90.0,90.0,90.0,90.0,90.0"]),
("p_min", ["P_MIN=0.0,-90.0,-90.0,-90.0,-90.0,-90.0,-90.0"]),
("vlim", ["VLIM=83,140,200,140,200,140,200"]),
("ver", ["VER=0.0.0.0"]),
]:
if (s == request):
self.dbg << "!!! Virtual COM port, faking reply '%s' for request '%s'\n" % (answer,request) # pylint: disable-msg=W0104
return answer
self.dbg << "!!! Virtual COM port, ignoring '%s'\n" % s # pylint: disable-msg=W0104
return []
retries = 3 # retry sending at most this many times
while retries > 0:
try:
#---------------------
# first read all lines to ignore (replies of previous commands)
while ( self.nb_lines_to_ignore > 0 ):
l = self.com.readline()
self.nb_lines_to_ignore -= 1
self.dbg.PDM( "ignoring line", l )
#---------------------
self.firmware_state = self.eErrorCode[ "E_SUCCESS" ]
lines = []
#---------------------
# send new command to SDH
self.dbg.PDM( "sending command "+repr(s+self.EOL)+" to SDH" )
self.dbg.PDM( "nb_lines=", nb_lines, " nb_lines_total=", nb_lines_total, " self.nb_lines_to_ignore=", self.nb_lines_to_ignore )
self.com.write(s+self.EOL)
#---------------------
#---------------------
# read reply if requested
while (nb_lines == All or nb_lines > 0):
#---------------------
# now read requested reply lines of current command
l = self.com.readline()
if (nb_lines != All):
nb_lines -= 1
if (nb_lines_total != All):
nb_lines_total -= 1
# append line l without beginning or trailing "\r\n" to lines list
start = 0
while (start < len(l) and l[start] in ('\r', '\n')):
start += 1
end = len(l)-1
while (end > 0 and l[end] in ('\r', '\n')):
end -= 1
lines.append( l[start:end+1] )
self.dbg.PDM( "appended '%s' for l='%s'" %(lines[-1],l) )
if (len(lines[-1])>0 and lines[-1][0] != '@'): # ??? or better and (nb_lines != All and nb_lines <= 0)
break
if ( len(lines[-1]) == 0 ):
self.dbg << "breaking for empty line\n" # pylint: disable-msg=W0104
break # !!! needed, but why????
self.dbg << "not breaking for line '%s'\n" % l # pylint: disable-msg=W0104
sys.stdout.flush()
sys.stderr.flush()
#---------------------
sys.stdout.flush()
sys.stderr.flush()
#---------------------
# remember if there are more lines to be ignored next time
if (nb_lines_total != All):
self.nb_lines_to_ignore = nb_lines_total
self.dbg.PDM( "%d lines remain to be ignored" % self.nb_lines_to_ignore )
#---------------------
#---------------------
# set state if possible
if (self.nb_lines_to_ignore == 0):
self.ExtractFirmwareState( lines )
#---------------------
# finished, so no more retries needed
retries = 0
except cSDHErrorCommunication, e:
# some communication error occured, so retry:
retries -= 1
if (retries <= 0):
self.dbg << "Retried sending, but still got errors from SDH!\n" # pylint: disable-msg=W0104
# reraise e:
raise
self.dbg << "ignoring cSDHErrorCommunication:", e, "\n" # pylint: disable-msg=W0104
# resync first:
self.Sync()
# now start over again
self.dbg << "got reply:\n" # pylint: disable-msg=W0104
self.dbg.SetColor( "blue" )
for (i,l) in zip(range(0,len(lines)),lines):
self.dbg << "%2d: " % i << repr( l ) << "\n" # pylint: disable-msg=W0104
self.dbg.SetColor( "green" )
return lines
#-----------------------------------------------------------------
def ExtractFirmwareState( self, lines ):
'''
Try to extract the state of the SDH firmware from the lines reply
'''
#---------------------
# check first char of last line of lines
if (lines == []):
raise cSDHErrorCommunication( "Cannot get SDH firmware state from empty lines" )
elif (lines[-1] == ""):
raise cSDHErrorCommunication( "Cannot get SDH firmware state from empty line" )
elif (lines[-1][0] == 'E'):
# it is an error message:
self.firmware_state = int(lines[-1][1:])
self.dbg.PDM( "got error reply '%s' = %d = %s" % (lines[-1], self.firmware_state, self.firmware_error_codes[self.firmware_state]) )
raise cSDHErrorInvalidParameter( "SDH firmware reports error %d = %s" % (self.firmware_state, self.firmware_error_codes[self.firmware_state]) )
elif (lines[-1][0] == '@'):
# it is an debug message (should not happen):
raise cSDHErrorCommunication( "Cannot get SDH firmware state from lines %r" % lines )
else:
# it is a normal "command completed" line:
self.firmware_state = self.eErrorCode[ "E_SUCCESS" ]
#-----------------------------------------------------------------
def GetDuration( self, line ):
'''
Return duration of the execution of a SDH command as reported by line
'''
m = self.re_get_T.match( line )
if (m is None):
raise cSDHErrorCommunication( "Could not extract duration from lines '%s'" % (line) )
return float( m.group(1) )
#-----------------------------------------------------------------
def Sync( self ):
'''
Read all pending lines from SDH to resync execution of PC and SDH.
'''
lines = []
# read all lines to ignore (replies of previous commands)
while ( self.nb_lines_to_ignore > 0 ):
l = self.com.readline()
self.nb_lines_to_ignore -= 1
self.dbg.PDM( "syncing: ignoring line %r" % l )
# append line l without trailing "\n\r" to lines list
if (len(l) > 2):
lines.append( l[:-2] )
#---------------------
if (lines != []):
try:
self.ExtractFirmwareState( lines )
except cSDHErrorCommunication,e:
self.dbg.PDM( "syncing: ignoring error from ExtractFirmwareState (%r)", e )
#-----------------------------------------------------------------
def AxisCommand( self, command, axis=All, value=None ):
'''
Get/Set values.
- If axis is All and value is None then a
NUMBER_OF_AXES-list of the current values
read from the SDH is returned
- If axis is a single number and value is None then the
current value for that axis is read from the SDH and is returned
- If axis and value are single numbers then that value
is set for that axis and returned.
- If axis is All and value is a NUMBER_OF_AXES-vector then all axes
values are set accordingly, a NUMBER_OF_AXES-list is returned.
'''
#cutoff = len( command ) + 1
#cutoff1 = len( command ) + 4
cmd_answer = command.upper()
retries = 3 # retry sending at most this many times
while (retries > 0):
try:
if (type(axis) == int):
self.CheckIndex( axis, self.NUMBER_OF_AXES, "axis" )
if (value is None):
#reply = self.Send( "%s(%d)" % (command,axis) )
#return float( reply[-1][cutoff1:] )
answer = self.SendParse( "%s(%d)" % (command,axis),
re.compile("%s\(%d\)=([-+]?(\d+(\.\d*)?|\.\d+)?)" % (cmd_answer,axis)) )
return float( answer )
if (type(value) == int):
#reply = self.Send( "%s(%d)=%d" % (command,axis,value ) )
#return float( reply[-1][cutoff1:] )
answer = self.SendParse( "%s(%d)=%d" % (command,axis,value ),
re.compile("%s\(%d\)=([-+]?(\d+(\.\d*)?|\.\d+)?)" % (cmd_answer,axis)) )
return float( answer )
if (type(value) == float):
#reply = self.Send( "%s(%d)=%f" % (command,axis,value ) )
#return float( reply[-1][cutoff1:] )
answer = self.SendParse( "%s(%d)=%f" % (command,axis,value ),
re.compile("%s\(%d\)=([-+]?(\d+(\.\d*)?|\.\d+)?)" % (cmd_answer,axis)) )
return float( answer )
if (axis == All):
if ( value is None):
#reply = self.Send( command )
#return eval( "[" + reply[-1][cutoff:] + "]" ) # this will raise an TypeError exception if not enough data was read
answer = self.SendParse( command,
re.compile("%s=(.*)" % (cmd_answer)) )
return eval( "[" + answer + "]" ) # this will raise an TypeError exception if not enough data was read
# if a single value was given for All axes then create a list of NUMBER_OF_AXES values first:
if (type(value) in [int, float]):
value = [ value for ai in self.all_axes ]
if ( (type(value) in self.vector_types) and len(value) == self.NUMBER_OF_AXES):
#reply = self.Send( "%s=%f,%f,%f,%f,%f,%f,%f" % ((command,)+tuple(value)) )
#self.dbg.var( "command reply cutoff" )
#return eval( "[" + reply[-1][cutoff:] + "]" )
#self.dbg.var( "command reply cutoff" )
#return eval( "[" + reply[-1][cutoff:] + "]" )
answer = self.SendParse( "%s=%f,%f,%f,%f,%f,%f,%f" % ((command,)+tuple(value)),
re.compile( "%s=(.*)" % (cmd_answer)) )
self.dbg.var( "command answer cmd_answer" )
return eval( "[" + answer + "]" )
raise cSDHErrorInvalidParameter( "Invalid parameter in call' %s(axis = %s, value = %s )'" % (command, repr(axis), repr(value) ) )
# end of try
except TypeError,e:
# these errors seem to happen on linux only (not cygwin) where a reply can be partly received:
# assume some communication error occured, so retry:
retries -= 1
if (retries > 0):
self.dbg << "ignoring TypeError: " << e << "\n" # pylint: disable-msg=W0104
# resync first:
self.Sync()
#now start over again
except SyntaxError,e:
# these errors happen on windows if CAN is used, so a reply can be partly received:
# assume some communication error occured, so retry:
retries -= 1
if (retries > 0):
self.dbg << "ignoring SyntaxError: " << e << "\n" # pylint: disable-msg=W0104
# resync first:
self.Sync()
#now start over again
# end of except
# end of while (retries > 0)
self.dbg << "Retried sending, but still got errors from SDH!\n" # pylint: disable-msg=W0104
# reraise e:
raise
# end of doxygen name group sdhlibrary_python_csdhserial_internal
## @}
##################################################################
##################################################################
## \anchor sdhlibrary_python_csdhserial_setup_commands
# \name Setup and configuration methods
# @{
#-----------------------------------------------------------------
def pid( self, axis, p=None, i=None, d=None ):
'''
Get/Set PID controller parameters
- axis must be a single number: the index of the axis to get/set
- If p,i,d are None then a list of the currently
set PID controller parameters of the axis is returned
- If p,i,d are numbers then the PID controller parameters for
that axis are set (and returned).
\bug With SDH firmware 0.0.2.9 pid() might not respond
pid values correctly in case these were changed before.
With SDH firmwares 0.0.2.10 and newer this now works.
<br><b>=> Resolved in SDH firmware 0.0.2.10</b>
'''
self.CheckIndex( axis, self.NUMBER_OF_AXES, "axis" )
if (p is None and i is None and d is None):
reply = self.Send( "pid(%d)" % (axis) )
return eval( "[" + reply[0][7:] + "]" )
if (type(p) in (int,float) and type(i) in (int,float) and type(d) in (int,float)):
reply = self.Send( "pid(%d)=%f,%f,%f" % (axis,p,i,d) )
return eval( "[" + reply[0][7:] + "]" )
raise cSDHErrorInvalidParameter( "Invalid parameter in call' pid(axis=%s, p=%s, i=%s, d=%s )'" % (repr(axis), repr(p),repr(i), repr(d)) )
#-----------------------------------------------------------------
def kv( self, axis=All, kv=None ):
'''
Get/Set kv parameter
- If axis is All and kv is None then a
NUMBER_OF_AXES-list of the currently set kv parameters is returned
- If axis is a single number and kv is None then the
kv parameter for that axis is returned.
- If axis and kv are single numbers then the kv parameter
for that axis is set (and returned).
- If axis is All and kv is a NUMBER_OF_AXES-vector then all axes
kv parameters are set accordingly, NUMBER_OF_AXES-list is returned.
\bug With SDH firmware 0.0.2.9 kv() might not respond
kv value correctly in case it was changed before.
With SDH firmwares 0.0.2.10 and newer this now works.
<br><b>=> Resolved in SDH firmware 0.0.2.10</b>
'''
if axis == All:
# SDH firmware cannot handle setting / getting all values at once
# so emulate that
if kv is None:
return [ self.AxisCommand( "kv", a, None ) for a in self.all_axes ]
if (type( kv ) in self.vector_types and len(kv) == self.NUMBER_OF_AXES):
return [ self.AxisCommand( "kv", a, kv[a] ) for a in self.all_axes ]
raise cSDHErrorInvalidParameter( "Invalid parameter in call 'kv( axis=%s, kv=%s )'" % (repr(axis), repr(kv)) )
else:
return self.AxisCommand( "kv", axis, kv )
#-----------------------------------------------------------------
def ilim( self, axis=All, limit=None ):
'''
Get/Set current limit for m command
- If axis is All and limit is None then a NUMBER_OF_AXES-list
of the currently set current limits is returned
- If axis is a single number and limit is None then the
current limit for that axis is returned.
- If axis and limit are single numbers then the current limit
for that axis is set (and returned).
- If axis is All and limit is a NUMBER_OF_AXES-vector then
all axes current limits are set accordingly, the NUMBER_OF_AXES-list is returned.
'''
return self.AxisCommand( "ilim", axis, limit )
#-----------------------------------------------------------------
def power( self, axis=All, flag=None ):
'''
Get/Set current power state
- If axis is All and flag is None then a NUMBER_OF_AXES-list
of the currently set power states is returned
- If axis is a single number and flag is None then the
power state for that axis is returned.
- If axis is a single number and flag is a single number or a
boolean value then the power state
for that axis is set (and returned).
- If axis is All and flag is a NUMBER_OF_AXES-vector then all axes
power states are set accordingly, the NUMBER_OF_AXES-list is returned.
- If axis is All and flag is a a single number or a boolean
value then all axes power states are set to that value, the
NUMBER_OF_AXES-list is returned.
'''
# Actual input/output for the command looks like:
#--
# power=0,0,0,0,0,0,0
# POWER=0,0,0,0,0,0,0
if ( axis == All and type(flag) in (int, bool) ):
if (flag):
flag = [ 1 for i in self.all_axes ]
else:
flag = [ 0 for i in self.all_axes ]
elif ( type(flag) in self.vector_types ):
# make flag a vector of ints (not vector of bools)
flag = map( int, flag )
if ( type(flag) == bool ):
# make flag an int (not bool)
flag = int( flag )
rc = self.AxisCommand( "power", axis, flag )
if (type(axis) == int):
return int( rc )
return rc
# end of doxygen name group sdhlibrary_python_csdhserial_setup_commands
## @}
##################################################################
##################################################################
## \anchor sdhlibrary_python_csdhserial_misc_commands
# \name Misc. methods
# @{
#-----------------------------------------------------------------
def demo( self, onoff ):
'''
Enable/disable SCHUNK demo
'''
return self.Send( "demo=%d" % onoff )
#-----------------------------------------------------------------
def property( self, propname, value ):
'''
Set named property
Valid propnames are:
- "user_errors"
- "terminal"
- "debug"
'''
reply = self.Send( "%s=%d" % (propname, value) )
return int( reply[0][len(propname):] )
#-----------------------------------------------------------------
def user_errors( self, value ):
'''
'''
return self.property( "user_errors", value )
#-----------------------------------------------------------------
def terminal( self, value ):
'''
'''
return self.property( "terminal", value )
#-----------------------------------------------------------------
def debug( self, value ):
'''
'''
return self.property( "debug", value )
# end of doxygen name group sdhlibrary_python_csdhserial_misc_commands
## @}
##################################################################
##################################################################
## \anchor sdhlibrary_python_csdhserial_movement_commands
# \name Movement methods
# @{
#-----------------------------------------------------------------
def v( self, axis=All, velocity=None ):
'''
Get/Set target velocity. (NOT the current velocity!)
The default velocity set on power on is 40 deg/s.
- If axis is All and velocity is None then a NUMBER_OF_AXES-list
of the currently set target velocities is returned
- If axis is a single number and velocity is None then the
target velocity for that axis is returned.
- If axis and velocity are single numbers then the target
velocity for that axis is set (and returned).
- If axis is All and velocity is a NUMBER_OF_AXES-vector
then all axes target velocities are set accordingly, the NUMBER_OF_AXES-list
is returned.
Velocities are set/reported in degrees per second.
'''
if (type( velocity ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( velocity, self.min_angular_velocity_a[a], self.max_angular_velocity_a[a], "axis %s velocity" % (repr(a)) )
else:
self.CheckRange( velocity, self.min_angular_velocity_a[axis], self.max_angular_velocity_a[axis], "axis %s velocity" % (repr(axis)) )
elif (type( velocity ) in self.vector_types):
self.CheckRange( velocity, self.min_angular_velocity_a, self.max_angular_velocity_a, "axis velocity" )
return self.AxisCommand( "v", axis, velocity )
#-----------------------------------------------------------------
def vlim( self, axis=All ):
'''
Get velocity limits.
- If axis is All then a NUMBER_OF_AXES-list
of the velocity limits is returned
- If axis is a single number then the
velocity limit for that axis is returned.
Velocity limits are reported in degrees per second.
'''
return self.AxisCommand( "vlim", axis, None )
#-----------------------------------------------------------------
def alim( self, axis=All ):
'''
Get acceleration limits.
- If axis is All then a NUMBER_OF_AXES-list
of the acceleration limits is returned
- If axis is a single number then the
acceleration limit for that axis is returned.
Acceleration limits are reported in degrees per (second*second).
'''
return self.AxisCommand( "alim", axis, None )
#-----------------------------------------------------------------
def a( self, axis=All, acceleration=None ):
'''
Get/Set target acceleration. (NOT the current acceleration!)
The default acceleration set on power on is 100 deg/(s*s).
- If axis is All and acceleration is None then a NUMBER_OF_AXES-list
of the currently set target accelerations is returned
- If axis is a single number and acceleration is None then the
target acceleration for that axis is returned.
- If axis and acceleration are single numbers then the target
acceleration for that axis is set (and returned).
- If axis is All and acceleration is a NUMBER_OF_AXES-vector
then all axes target accelerations are set accordingly, the NUMBER_OF_AXES-list
is returned.
Accelerations are set/reported in degrees per (second*second).
'''
if (type( acceleration ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( acceleration, self.min_angular_acceleration_a[a], self.max_angular_acceleration_a[a], "axis %s acceleration" % (repr(a)) )
else:
self.CheckRange( acceleration, self.min_angular_acceleration_a[axis], self.max_angular_acceleration_a[axis], "axis %s acceleration" % (repr(axis)) )
elif (type( acceleration ) in self.vector_types):
self.CheckRange( acceleration, self.min_angular_acceleration_a, self.max_angular_acceleration_a, "axis acceleration" )
return self.AxisCommand( "a", axis, acceleration )
#-----------------------------------------------------------------
def p( self, axis=All, angle=None ):
'''
Get/Set target angle for axis. (NOT the current angle!)
- If axis is All and angle is None then a NUMBER_OF_AXES-list
of the currently set target angles is returned
- If axis is a single number and angle is None then the
target angle for that axis is returned.
- If axis and angle are single numbers then the target
angle for that axis is set (and returned).
- If axis is All and angle is a NUMBER_OF_AXES-vector
then all axes target angles are set accordingly, the NUMBER_OF_AXES-list
is returned.
Angles are set/reported in degrees.
'''
if (type( angle ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( angle, self.min_angle_a[a], self.max_angle_a[a], "axis %s angle" % (repr(a)) )
else:
self.CheckRange( angle, self.min_angle_a[axis], self.max_angle_a[axis], "axis %d angle" % axis )
elif (type( angle ) in self.vector_types):
self.CheckRange( angle, self.min_angle_a, self.max_angle_a, "axis angle" )
return self.AxisCommand( "p", axis, angle )
#-----------------------------------------------------------------
def tpap( self, axis=All, angle=None ):
'''
Set target angle, get actual angle for axis.
- If axis is All and angle is None then a NUMBER_OF_AXES-list
of the currently set target angles is returned
- If axis is a single number and angle is None then the
actual angle for that axis is returned.
- If axis and angle are single numbers then the target
angle for that axis is set (and actual angle returned).
- If axis is All and angle is a NUMBER_OF_AXES-vector
then all axes target angles are set accordingly, the NUMBER_OF_AXES-list
of actual angles is returned.
Angles are set/reported in degrees.
'''
if (type( angle ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( angle, self.min_angle_a[a], self.max_angle_a[a], "axis %s angle" % (repr(a)) )
else:
self.CheckRange( angle, self.min_angle_a[axis], self.max_angle_a[axis], "axis %d angle" % axis )
elif (type( angle ) in self.vector_types):
self.CheckRange( angle, self.min_angle_a, self.max_angle_a, "axis angle" )
return self.AxisCommand( "tpap", axis, angle )
#-----------------------------------------------------------------
def tvav( self, axis=All, velocity=None ):
'''
Set target velocity, get actual velocity for axis.
- If axis is All and velocity is None then a NUMBER_OF_AXES-list
of the currently set target velocities is returned
- If axis is a single number and velocity is None then the
actual velocity for that axis is returned.
- If axis and velocity are single numbers then the target
velocity for that axis is set (and actual velocity returned).
- If axis is All and velocity is a NUMBER_OF_AXES-vector
then all axes target velocities are set accordingly, the NUMBER_OF_AXES-list
of actual velocities is returned.
Angles are set/reported in degrees.
'''
if (type( velocity ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( velocity, self.min_angular_velocity_a[a], self.max_angular_velocity_a[a], "axis %s velocity" % (repr(a)) )
else:
self.CheckRange( velocity, self.min_angular_velocity_a[axis], self.max_angular_velocity_a[axis], "axis %d velocity" % axis )
elif (type( velocity ) in self.vector_types):
self.CheckRange( velocity, self.min_angular_velocity_a, self.max_angular_velocity_a, "axis velocity" )
return self.AxisCommand( "tvav", axis, velocity )
#-----------------------------------------------------------------
def m( self, sequ ):
'''
Send move command. Moves all enabled axes to their previously
set target angle. The movement duration is determined by
that axis that takes longest with its currently set velocity.
The actual velocity of all other axes is set so that all axes
begin and end their movements synchronously.
If sequ is True then wait until SDH hardware fully executed
the command. Else return immediately and do not wait until SDH
hardware fully executed the command.
return the expected duration of the execution of the command in seconds
'''
# Actual input/output for the command looks like:
#--
# m
# M=4.51s
#
# Before firmware 0.0.3.1 actual input/output for the command looked like:
#--
# m
# @Enabling all axis
# @max distance=45.06, T=4.51s, num_points: 451
# m
#---------------------
# settings for sequ/non-sequ:
nb_lines_total = 1
nb_lines = nb_lines_total
#---------------------
#---------------------
# send command and parse reply
reply = self.Send( "m", nb_lines, nb_lines_total )
T = self.GetDuration( reply[0] )
#---------------------
# the SDH firmware does NOT produce an output after the command has finished
if sequ:
time.sleep( T+self.m_sequtime )
return T
#-----------------------------------------------------------------
def get_duration( self ):
'''
Send get_duration command. Returns the calculated duration of the
currently configured movement (target positions, velocities,
accelerations and velocity profile.
return the expected duration of the execution of the command in seconds
'''
# Actual input/output for the command looks like:
#--
# get_duration
# GET_DURATION=4.51
#
# Before firmware 0.0.3.1 actual input/output for the command looked like:
#--
# get_duration
# @max distance=45.06, T=4.51s, num_points: 451
# GET_DURATION=4.51
#---------------------
# settings for sequ/non-sequ:
nb_lines_total = 1
nb_lines = nb_lines_total
#---------------------
#---------------------
# send command and parse reply
reply = self.Send( "get_duration", nb_lines, nb_lines_total )
T = self.GetDuration( reply[0] )
#---------------------
return T
#-----------------------------------------------------------------
def stop( self ):
'''
Stop sdh.
Will NOT interrupt a previous "selgrip" or "grip" command, only an "m" command!
'''
self.Send( "stop" )
#-----------------------------------------------------------------
def vp( self, velocity_profile=None ):
'''
Get/set velocity profile.
If velocity_profile is None then the currently set velocity profile is
read from the SDH firmware and returned. Else the given velocity_profile type
is set in the SDH firmware if valid.
'''
if (type( velocity_profile ) in (int, float)):
self.CheckIndex( velocity_profile, len(self.eVelocityProfile), "velocity profile type" )
reply = self.Send( "vp=%d" % (velocity_profile) )
elif (velocity_profile is None):
reply = self.Send( "vp" )
else:
raise cSDHErrorInvalidParameter( "Invalid paramter type %s for velocity_profile! (Not in [int, None])" % (type(velocity_profile)) )
self.actual_vp = int( reply[-1][3:] )
return self.actual_vp
#-----------------------------------------------------------------
def con( self, controller=None ):
'''
Get/set controller type.
If controller is None then the currently set controller is
read from the SDH firmware and returned. Else the given controller type
is set in the SDH firmware if valid.
'''
if (type( controller ) in (int, float)):
self.CheckIndex( controller, len(self.eControllerType), "controller type" )
reply = self.Send( "con=%d" % (controller) )
elif (controller is None):
reply = self.Send( "con" )
else:
raise cSDHErrorInvalidParameter( "Invalid paramter type %s for controller! (Not in [int, None])" % (type(controller)) )
self.actual_con = int( reply[-1][4:] )
return self.actual_con
# end of doxygen name group sdhlibrary_python_csdhserial_movement_commands
## @}
##################################################################
##################################################################
## \anchor sdhlibrary_python_csdhserial_diagnosis_commands
# \name Diagnostic and identification methods
# @{
#-----------------------------------------------------------------
def pos( self, axis=All ):
'''
Get actual angle/s of axis/axes.
- If axis is All then a NUMBER_OF_AXES-vector of the actual
axis angles is returned
- If axis is a single number then the
actual angle of that axis is returned.
Angles are reported in degrees.
'''
return self.AxisCommand( "pos", axis )
#-----------------------------------------------------------------
def pos_save( self, axis=All, value=None ):
'''
Save actual angle/s to non volatile memory. (Usefull for axes that dont have an absolute encoder)
- If value is None then an exception is thrown since
this is NOT usefull if any axis has an absolute encoder that
the LLC knows about since these positions will be invalidated at the next start
- If axis and value are single numbers then that axis is saved.
- If axis is All and value is a NUMBER_OF_AXES-vector
then all axes are saved if the corresponding value is 1.
- This will yield a E_RANGE_ERROR if any of the given values is not 0 or 1
'''
if ( value is None ):
raise cSDHErrorInvalidParameter( "value may not be None for pos_save" )
return self.AxisCommand( "pos_save", axis, value )
#-----------------------------------------------------------------
def ref( self, axis=All, value=None ):
'''
Do reference movements with selected axes. (Usefull for axes that dont have an absolute encoder)
value must be either
- 0 : do not reference
- 1 : reference till mechanical block in positive direction
- 2 : reference till mechanical block in negative direction
- If value is None then an exception is thrown since
this is NOT usefull here
- If axis and value are single numbers then that axis is referenced as requested.
- If axis is All and value is a NUMBER_OF_AXES-vector
then all axes are referenced as requested.
- This will yield a E_RANGE_ERROR if any of the given values is not 0 or 1 or 2
'''
if ( value is None ):
raise cSDHErrorInvalidParameter( "value may not be None for ref" )
return self.AxisCommand( "ref", axis, value )
#-----------------------------------------------------------------
def vel( self, axis=All ):
'''
Get actual angular velocity/ies of axis/axes.
- If axis is All then a NUMBER_OF_AXES-vector of the actual
angular velocity is returned
- If axis is a single number then the
actual angular velocity of that axis is returned.
Angular velocities are reported in degrees per second.
'''
return self.AxisCommand( "vel", axis )
#-----------------------------------------------------------------
def rvel( self, axis=All ):
'''
Get reference angular velocity/ies of axis/axes.
- If axis is All then a NUMBER_OF_AXES-vector of the actual
angular velocity is returned
- If axis is a single number then the
actual angular velocity of that axis is returned.
Angular velocities are reported in degrees per second.
'''
return self.AxisCommand( "rvel", axis )
#-----------------------------------------------------------------
def state( self, axis=All ):
'''
Get actual state/s of axis/axes.
state values are returned numerically, see eAxisState.
- If axis is All then a NUMBER_OF_AXES-vector of the actual
axis states is returned
- If axis is a single number then the
actual state of that axis is returned.
'''
return self.AxisCommand( "state", axis )
#-----------------------------------------------------------------
def temp( self ):
'''
Get actual temperatures of SDH.
Returns a list of the actual controller and driver temperature in degrees celsius.
'''
reply = self.Send( "temp" )
return eval( "[" + reply[0][5:] + "]" )
#-----------------------------------------------------------------
def p_min( self, axis=All, angle=None ):
'''
Get/Set minimum allowed target angle for axis.
- If axis is All and angle is None then a NUMBER_OF_AXES-list
of the currently set minimum angles is returned
- If axis is a single number and angle is None then the
minimum angle for that axis is returned.
- If axis and angle are single numbers then the minimum
angle for that axis is set (and returned).
- If axis is All and angle is a NUMBER_OF_AXES-vector
then all axes minimum angles are set accordingly, the NUMBER_OF_AXES-list
is returned.
- This will yield a E_RANGE_ERROR if any of the new minimum positions
to set is larger than the actual position or the current maximum
position of the axis.
Angles are set/reported in degrees.
'''
if (type( angle ) in (int, float)):
if axis == All:
for a in self.all_axes:
self.CheckRange( angle, MIN_FLOAT, min( self.pos( a ), self.p_max( a ) ), "axis %s angle" % (repr(a)) )
else:
self.CheckRange( angle, MIN_FLOAT, min( self.pos( axis ), self.p_max( axis ) ), "axis %d angle" % axis )
elif (type( angle ) in self.vector_types):
apos = self.pos( All )
amax = self.p_max( All )
self.CheckRange( angle, self.MIN_FLOATS, Allmin( apos, amax ), "axis angle" )
return self.AxisCommand( "p_min", axis, angle )
#-----------------------------------------------------------------
def p_max( self, axis=All, angle=None ):
'''
Get/Set maximum allowed target angle for axis.
- If axis is All and angle is None then a NUMBER_OF_AXES-list
of the currently set maximum angles is returned
- If axis is a single number and angle is None then the
maximum angle for that axis is returned.
- If axis and angle are single numbers then the maximum
angle for that axis is set (and returned).
- If axis is All and angle is a NUMBER_OF_AXES-vector
then all axes maximum angles are set accordingly, the NUMBER_OF_AXES-list
is returned.
- This will yield a E_RANGE_ERROR if any of the new maximum positions
to set is smaller than the actual position or the current minimum
position of the axis.
Angles are set/reported in degrees.
'''
if (type( angle ) in (int, float)):
if ( axis == All ):
for a in self.all_axes:
self.CheckRange( angle, max( self.pos( a ), self.p_min( a ) ), MAX_FLOAT, "axis %s angle" % (repr(a)) )
else:
self.CheckRange( angle, max( self.pos( axis ), self.p_min( axis ) ), MAX_FLOAT, "axis %d angle" % axis )
elif (type( angle ) in self.vector_types):
apos = self.pos( All )
amin = self.p_min( All )
self.CheckRange( angle, self.MAX_FLOATS, Allmax( apos, amin ), "axis angle" )
return self.AxisCommand( "p_max", axis, angle )
#-----------------------------------------------------------------
def p_offset( self, axis=All, angle=None ):
'''
Get/Set offset for axis.
- If axis is All and angle is None then a NUMBER_OF_AXES-list
of the currently set offset angles is returned
- If axis is a single number and angle is None then the
offset angle for that axis is returned.
- If axis and angle are single numbers then the offset
angle for that axis is set (and returned).
- If axis is All and angle is a NUMBER_OF_AXES-vector
then all axes offset angles are set accordingly, the NUMBER_OF_AXES-list
is returned.
Angles are set/reported in degrees.
'''
#### ???? no range checking?
return self.AxisCommand( "p_offset", axis, angle )
#-----------------------------------------------------------------
def ver( self ):
'''
Return version of SDH firmware
'''
reply = self.Send( "ver" )
return reply[0][4:]
def ver_date( self ):
'''
Return date of SDH firmware
'''
reply = self.Send( "ver_date" )
return reply[0][9:]
#-----------------------------------------------------------------
def id( self ):
'''
Return id of SDH
'''
reply = self.Send( "id" )
return reply[0][3:]
#-----------------------------------------------------------------
def sn( self ):
'''
Return sn of SDH
'''
reply = self.Send( "sn" )
return reply[0][3:]
#-----------------------------------------------------------------
def soc( self ):
'''
Return soc of SDH
'''
reply = self.Send( "soc" )
return reply[0][4:]
#-----------------------------------------------------------------
def soc_date( self ):
'''
Return soc of SDH
'''
reply = self.Send( "soc_date" )
return reply[0][9:]
#-----------------------------------------------------------------
def numaxis( self ):
'''
Return number of axis of SDH
'''
reply = self.Send( "numaxis" )
return int( reply[0][8:] )
# end of doxygen name group sdhlibrary_python_csdhserial_diagnosis_commands
## @}
##################################################################
##################################################################
## \anchor sdhlibrary_python_csdhserial_grip_commands
# \name Grip methods
# @{
#-----------------------------------------------------------------
def igrip( self, axis=All, limit=None ):
'''
Get/Set motor current limits for grip commands
- If axis is All and limit is None then a NUMBER_OF_AXES-list
of the currently set current limits is returned
- If axis is a single number and limit is None then the
current limit for that axis is returned.
- If axis and limit are single numbers then the current limit
for that axis is set (and returned).
- If axis is All and limit is a NUMBER_OF_AXES-vector then all axes
current limits are set accordingly, the NUMBER_OF_AXES-list is returned.
'''
return self.AxisCommand( "igrip", axis, limit )
#-----------------------------------------------------------------
def ihold( self, axis=All, limit=None ):
'''
Get/Set motor current limits for hold commands
- If axis is All and limit is None then a NUMBER_OF_AXES-list
of the currently set current limits is returned
- If axis is a single number and limit is None then the
current limit for that axis is returned.
- If axis and limit are single numbers then the current limit
for that axis is set (and returned).
- If axis is All and limit is a NUMBER_OF_AXES-vector then all axes
current limits are set accordingly, the NUMBER_OF_AXES-list is returned.
'''
return self.AxisCommand( "ihold", axis, limit )
#-----------------------------------------------------------------
def selgrip( self, grip, sequ ):
'''
Send "selgrip grip" command to SDH. Where grip is in [0..self.NUMBER_OF_GRIPS-1]
or one of the self.eGraspId enums.
If sequ is True then wait until SDH hardware fully executed
the command. Else return immediately and do not wait until SDH
hardware fully executed the command.
return the expected duration of the execution of the command in seconds
'''
# Actual input/output for the command looks like:
#--
# selgrip=1
# SELGRIP=0.0,1
#
# Before firmware 0.0.3.1 actual input/output for the command looked like:
#--
# selgrip=1
# @Enabling all axis
# @Setting current limit to @1.0 @0.5 @0.5 @0.5 @0.5 @0.5 @0.5 @
# @max distance=0.00, T=0.00s, num_points: 1
# @max distance=0.00, T=0.00s, num_points: 1
# @Setting current limit to @0.1 @0.2 @0.2 @0.2 @0.2 @0.2 @0.2 @
# @Disabling axis 0
# SELGRIP=1
self.CheckIndex( grip, self.NUMBER_OF_GRIPS, "grip" )
#---------------------
# settings for sequ/non-sequ:
nb_lines_total = 1
nb_lines = nb_lines_total
#---------------------
#---------------------
# send command and parse reply
reply = self.Send( "selgrip=" + str(grip), nb_lines, nb_lines_total )
T = self.GetDuration( reply[0] )
#---------------------
return T
#-----------------------------------------------------------------
def grip( self, close, velocity, sequ ):
'''
send "grip=close,velocity" command to SDH
close : [0.0 .. 1.0] where 0.0 is 'fully opened' and 1.0 is 'fully closed'
velocity : ]0.0 .. 100.0] where 0.0 (not allowed) is very slow and 100.0 is very fast
If sequ is True then wait until SDH hardware fully executed
the command. Else return immediately and do not wait until SDH
hardware fully executed the command.
This seems to work with sin square velocity profile only,
so the velocity profile is switched to that if necessary.
return the expected duration of the execution of the command in seconds
'''
# Actual input/output for the command looks like:
#--
# grip=0.1,40
# GRIP=0.42,0.1
#
# Before firmware 0.0.3.1 actual input/output for the command looked like:
#--
# grip=0.1,40
# @Enabling finger axis
# @Setting current limit to @1.0 @0.5 @0.5 @0.5 @0.5 @0.5 @0.5 @
# @max distance=8.31, T=0.42s, num_points: 42
# @Setting current limit to @0.1 @0.2 @0.2 @0.2 @0.2 @0.2 @0.2 @
# GRIP=0.1
self.CheckRange( close, 0.0, 1.0, "close ratio" )
self.CheckRange( velocity, 0.0+self.eps, 100.0, "velocity" )
#---------------------
# set velocity profile if wrong or unknown
try:
if (self.actual_vp != 0 ):
self.vp( 0 )
except AttributeError:
self.vp( 0 )
#---------------------
#---------------------
# settings for sequ/non-sequ:
nb_lines_total = 1
nb_lines = nb_lines_total
#---------------------
#---------------------
# send command and parse reply
reply = self.Send( "grip=" + str(close) + "," + str(velocity), nb_lines, nb_lines_total )
T = self.GetDuration( reply[0] )
#---------------------
return T
# end of doxygen name group sdhlibrary_python_csdhserial_grip_commands
## @}
##################################################################
# end of class cSDHSerial
#=====================================================================
| UTF-8 | Python | false | false | 62,429 | py | 162 | sdhserial.py | 59 | 0.501129 | 0.490269 | 0 | 1,486 | 41.01144 | 212 |
jmval111/Programming-Foundations-with-Python | 3,556,232,940,195 | 6b99f97439b7e623e0197378e7b7c056d295b1e0 | a233cbbbddfc1cf9a7e6918d4d98d326bef4bf31 | /2 - Uses Classes - Draw Turtles/Making A Circle Out Of Squares/circles squares.py | 2daa110b66fecbafbabc7bbf818635f97b485f39 | [] | no_license | https://github.com/jmval111/Programming-Foundations-with-Python | 5476e5ecbd02fe3e9d200abe392d01a3aea1d99e | 1f1bca1ca13e92d1968bb8fbc0fddfced40b8ae6 | refs/heads/master | 2021-06-03T15:54:35.303417 | 2016-05-04T22:11:08 | 2016-05-04T22:11:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import turtle
def draw_square(some_turtle) :
for i in range(0,4) :
some_turtle.forward(100)
some_turtle.right(90)
def draw_art() :
window = turtle.Screen()
window.bgcolor("red")
jp = turtle.Turtle()
jp.shape("turtle")
jp.color("yellow")
jp.speed(6)
for i in range(0,36) :
draw_square(jp)
jp.right(10)
"""
jp2 = turtle.Turtle()
jp2.shape("circle")
jp2.color("yellow")
jp2.speed(2)
jp2.circle(100)
"""
window.exitonclick()
draw_art()
| UTF-8 | Python | false | false | 506 | py | 45 | circles squares.py | 35 | 0.592885 | 0.549407 | 0 | 36 | 12 | 30 |
tefra/xsdata-samples | 2,808,908,615,482 | 4f531928e6983dde2105e4ea54c417f06c352410 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /autosar/models/ip_sec_rule.py | e651050c479e0df6d4f235e0350c6f0e3c03d632 | [] | no_license | https://github.com/tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | false | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | 2023-01-05T10:04:55 | 2023-06-25T07:21:03 | 22,394 | 6 | 0 | 0 | Python | false | false | from dataclasses import dataclass, field
from typing import List, Optional
from .annotation import (
AdminData,
Annotation,
DocumentationBlock,
)
from .category_string import CategoryString
from .communication_direction_type import CommunicationDirectionType
from .crypto_service_certificate_subtypes_enum import CryptoServiceCertificateSubtypesEnum
from .crypto_service_key_subtypes_enum import CryptoServiceKeySubtypesEnum
from .i_psec_header_type_enum import IPsecHeaderTypeEnum
from .i_psec_ip_protocol_enum import IPsecIpProtocolEnum
from .i_psec_mode_enum import IPsecModeEnum
from .i_psec_policy_enum import IPsecPolicyEnum
from .identifier import Identifier
from .ike_authentication_method_enum import IkeAuthenticationMethodEnum
from .multi_language_overview_paragraph import MultiLanguageOverviewParagraph
from .multilanguage_long_name import MultilanguageLongName
from .network_endpoint_subtypes_enum import NetworkEndpointSubtypesEnum
from .positive_integer import PositiveInteger
from .ref import Ref
from .short_name_fragment import ShortNameFragment
from .string import String
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
@dataclass
class IpSecRule:
"""
This element defines an IPsec rule that describes communication traffic that is
monitored, protected and filtered.
:ivar short_name: This specifies an identifying shortName for the
object. It needs to be unique within its context and is intended
for humans but even more for technical reference.
:ivar short_name_fragments: This specifies how the
Referrable.shortName is composed of several shortNameFragments.
:ivar long_name: This specifies the long name of the object. Long
name is targeted to human readers and acts like a headline.
:ivar desc: This represents a general but brief (one paragraph)
description what the object in question is about. It is only one
paragraph! Desc is intended to be collected into overview
tables. This property helps a human reader to identify the
object in question. More elaborate documentation, (in particular
how the object is built or used) should go to "introduction".
:ivar category: The category is a keyword that specializes the
semantics of the Identifiable. It affects the expected existence
of attributes and the applicability of constraints.
:ivar admin_data: This represents the administrative data for the
identifiable object.
:ivar introduction: This represents more information about how the
object in question is built or is used. Therefore it is a
DocumentationBlock.
:ivar annotations: Possibility to provide additional notes while
defining a model element (e.g. the ECU Configuration Parameter
Values). These are not intended as documentation but are mere
design notes.
:ivar direction: This attribute defines the direction in which the
traffic is monitored. If this attribute is not set a
bidirectional traffic monitoring is assumed.
:ivar header_type: Header type specifying the IPsec security
mechanism.
:ivar ike_authentication_method: This attribute defines the IKE
authentication method that is used locally and is expected on
the remote side.
:ivar ip_protocol: This attribute defines the relevant IP protocol
used in the Security Policy Database (SPD) entry.
:ivar local_certificate_refs: This reference identifies the
applicable certificate used for a local authentication.
:ivar local_id: This attribute defines how the local participant
should be identified for authentication.
:ivar local_port_range_end: This attribute restricts the traffic
monitoring and defines an end value for the local port range. If
this attribute is not set then this rule shall be effective for
all local ports. Please note that port ranges are currently not
supported in the AUTOSAR AP's operating system backend. If AP
systems are involved, each IPsec rule may only contain a single
port.
:ivar local_port_range_start: This attribute restricts the traffic
monitoring and defines a start value for the local port range.
If this attribute is not set then this rule shall be effective
for all local ports. Please note that port ranges are currently
not supported in the AUTOSAR AP's operating system backend. If
AP systems are involved, each IPsec rule may only contain a
single port.
:ivar mode: This attribute defines the type of the connection.
:ivar policy: An IPsec policy defines the rules that determine which
type of IP traffic needs to be secured using IPsec and how that
traffic is secured.
:ivar pre_shared_key_ref: This reference identifies the applicable
cryptograhic key used for authentication.
:ivar priority: This attribute defines the priority of the IPSecRule
(SPD entry). The processing of entries is based on priority,
starting with the highest priority "0".
:ivar remote_certificate_refs: This reference identifies the
applicable certificate used for a remote authentication.
:ivar remote_id: This attribute defines how the remote participant
should be identified for authentication.
:ivar remote_ip_address_refs: Definition of the remote
NetworkEndpoint. With this reference the connection between the
local NetworkEndpoint and the remote NetworkEndpoint is
described on which the traffic is monitored.
:ivar remote_port_range_end: This attribute restricts the traffic
monitoring and defines an end value for the remote port range.
If this attribute is not set then this rule shall be effective
for all local ports. Please note that port ranges are currently
not supported in the AUTOSAR AP's operating system backend. If
AP systems are involved, each IPsec rule may only contain a
single port.
:ivar remote_port_range_start: This attribute restricts the traffic
monitoring and defines a start value for the remote port range.
If this attribute is not set then this rule shall be effective
for all local ports. Please note that port ranges are currently
not supported in the AUTOSAR AP's operating system backend. If
AP systems are involved, each IPsec rule may only contain a
single port.
:ivar s: Checksum calculated by the user's tool environment for an
ArObject. May be used in an own tool environment to determine if
an ArObject has changed. The checksum has no semantic meaning
for an AUTOSAR model and there is no requirement for AUTOSAR
tools to manage the checksum.
:ivar t: Timestamp calculated by the user's tool environment for an
ArObject. May be used in an own tool environment to determine
the last change of an ArObject. The timestamp has no semantic
meaning for an AUTOSAR model and there is no requirement for
AUTOSAR tools to manage the timestamp.
:ivar uuid: The purpose of this attribute is to provide a globally
unique identifier for an instance of a meta-class. The values of
this attribute should be globally unique strings prefixed by the
type of identifier. For example, to include a DCE UUID as
defined by The Open Group, the UUID would be preceded by "DCE:".
The values of this attribute may be used to support merging of
different AUTOSAR models. The form of the UUID (Universally
Unique Identifier) is taken from a standard defined by the Open
Group (was Open Software Foundation). This standard is widely
used, including by Microsoft for COM (GUIDs) and by many
companies for DCE, which is based on CORBA. The method for
generating these 128-bit IDs is published in the standard and
the effectiveness and uniqueness of the IDs is not in practice
disputed. If the id namespace is omitted, DCE is assumed. An
example is "DCE:2fac1234-31f8-11b4-a222-08002b34c003". The uuid
attribute has no semantic meaning for an AUTOSAR model and there
is no requirement for AUTOSAR tools to manage the timestamp.
"""
class Meta:
name = "IP-SEC-RULE"
short_name: Optional[Identifier] = field(
default=None,
metadata={
"name": "SHORT-NAME",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
"required": True,
}
)
short_name_fragments: Optional["IpSecRule.ShortNameFragments"] = field(
default=None,
metadata={
"name": "SHORT-NAME-FRAGMENTS",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
long_name: Optional[MultilanguageLongName] = field(
default=None,
metadata={
"name": "LONG-NAME",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
desc: Optional[MultiLanguageOverviewParagraph] = field(
default=None,
metadata={
"name": "DESC",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
category: Optional[CategoryString] = field(
default=None,
metadata={
"name": "CATEGORY",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
admin_data: Optional[AdminData] = field(
default=None,
metadata={
"name": "ADMIN-DATA",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
introduction: Optional[DocumentationBlock] = field(
default=None,
metadata={
"name": "INTRODUCTION",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
annotations: Optional["IpSecRule.Annotations"] = field(
default=None,
metadata={
"name": "ANNOTATIONS",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
direction: Optional[CommunicationDirectionType] = field(
default=None,
metadata={
"name": "DIRECTION",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
header_type: Optional[IPsecHeaderTypeEnum] = field(
default=None,
metadata={
"name": "HEADER-TYPE",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
ike_authentication_method: Optional[IkeAuthenticationMethodEnum] = field(
default=None,
metadata={
"name": "IKE-AUTHENTICATION-METHOD",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
ip_protocol: Optional[IPsecIpProtocolEnum] = field(
default=None,
metadata={
"name": "IP-PROTOCOL",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
local_certificate_refs: Optional["IpSecRule.LocalCertificateRefs"] = field(
default=None,
metadata={
"name": "LOCAL-CERTIFICATE-REFS",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
local_id: Optional[String] = field(
default=None,
metadata={
"name": "LOCAL-ID",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
local_port_range_end: Optional[PositiveInteger] = field(
default=None,
metadata={
"name": "LOCAL-PORT-RANGE-END",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
local_port_range_start: Optional[PositiveInteger] = field(
default=None,
metadata={
"name": "LOCAL-PORT-RANGE-START",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
mode: Optional[IPsecModeEnum] = field(
default=None,
metadata={
"name": "MODE",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
policy: Optional[IPsecPolicyEnum] = field(
default=None,
metadata={
"name": "POLICY",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
pre_shared_key_ref: Optional["IpSecRule.PreSharedKeyRef"] = field(
default=None,
metadata={
"name": "PRE-SHARED-KEY-REF",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
priority: Optional[PositiveInteger] = field(
default=None,
metadata={
"name": "PRIORITY",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
remote_certificate_refs: Optional["IpSecRule.RemoteCertificateRefs"] = field(
default=None,
metadata={
"name": "REMOTE-CERTIFICATE-REFS",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
remote_id: Optional[String] = field(
default=None,
metadata={
"name": "REMOTE-ID",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
remote_ip_address_refs: Optional["IpSecRule.RemoteIpAddressRefs"] = field(
default=None,
metadata={
"name": "REMOTE-IP-ADDRESS-REFS",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
remote_port_range_end: Optional[PositiveInteger] = field(
default=None,
metadata={
"name": "REMOTE-PORT-RANGE-END",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
remote_port_range_start: Optional[PositiveInteger] = field(
default=None,
metadata={
"name": "REMOTE-PORT-RANGE-START",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
s: Optional[str] = field(
default=None,
metadata={
"name": "S",
"type": "Attribute",
}
)
t: Optional[str] = field(
default=None,
metadata={
"name": "T",
"type": "Attribute",
"pattern": r"([0-9]{4}-[0-9]{2}-[0-9]{2})(T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|([+\-][0-9]{2}:[0-9]{2})))?",
}
)
uuid: Optional[str] = field(
default=None,
metadata={
"name": "UUID",
"type": "Attribute",
}
)
@dataclass
class ShortNameFragments:
short_name_fragment: List[ShortNameFragment] = field(
default_factory=list,
metadata={
"name": "SHORT-NAME-FRAGMENT",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
@dataclass
class Annotations:
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"name": "ANNOTATION",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
@dataclass
class LocalCertificateRefs:
local_certificate_ref: List["IpSecRule.LocalCertificateRefs.LocalCertificateRef"] = field(
default_factory=list,
metadata={
"name": "LOCAL-CERTIFICATE-REF",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
@dataclass
class LocalCertificateRef(Ref):
dest: Optional[CryptoServiceCertificateSubtypesEnum] = field(
default=None,
metadata={
"name": "DEST",
"type": "Attribute",
"required": True,
}
)
@dataclass
class PreSharedKeyRef(Ref):
dest: Optional[CryptoServiceKeySubtypesEnum] = field(
default=None,
metadata={
"name": "DEST",
"type": "Attribute",
"required": True,
}
)
@dataclass
class RemoteCertificateRefs:
remote_certificate_ref: List["IpSecRule.RemoteCertificateRefs.RemoteCertificateRef"] = field(
default_factory=list,
metadata={
"name": "REMOTE-CERTIFICATE-REF",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
@dataclass
class RemoteCertificateRef(Ref):
dest: Optional[CryptoServiceCertificateSubtypesEnum] = field(
default=None,
metadata={
"name": "DEST",
"type": "Attribute",
"required": True,
}
)
@dataclass
class RemoteIpAddressRefs:
remote_ip_address_ref: List["IpSecRule.RemoteIpAddressRefs.RemoteIpAddressRef"] = field(
default_factory=list,
metadata={
"name": "REMOTE-IP-ADDRESS-REF",
"type": "Element",
"namespace": "http://autosar.org/schema/r4.0",
}
)
@dataclass
class RemoteIpAddressRef(Ref):
dest: Optional[NetworkEndpointSubtypesEnum] = field(
default=None,
metadata={
"name": "DEST",
"type": "Attribute",
"required": True,
}
)
| UTF-8 | Python | false | false | 18,166 | py | 7,285 | ip_sec_rule.py | 7,033 | 0.602829 | 0.596554 | 0 | 471 | 37.569002 | 113 |
jgab13/Stonehenge | 16,930,761,117,797 | ee20595d22f4a8b13b6b82e8c8123cc8ab78d761 | 18d026163ba0c3823d1d8f08196c1e89086806ed | /stonehenge_state.py | 665d9e9ccc1804a3a0fde23ea2a4a351dc972f15 | [] | no_license | https://github.com/jgab13/Stonehenge | 6cdc5d0bd4e2c5e01192ec7bb5720099c06d0095 | 10b88bf7a061c41da46fe852f4719f2725ed9f13 | refs/heads/master | 2020-04-12T20:50:49.309040 | 2018-12-21T19:01:59 | 2018-12-21T19:01:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Stonehenge state class
subclass of GameState
"""
from typing import Any, Dict, List
from string import ascii_uppercase
import copy
from game_state import GameState
def calculate_size(size: int) -> int:
"""Returns the number of circles for a grid
of Stonehenge given a size.
>>> calculate_size(5)
25
>>> calculate_size(2)
7
>>> calculate_size(1)
3
>>> calculate_size(4)
18
"""
return int(2 + (((size + 1) - 2) / 2) * (3 + size + 1)
+ size)
def return_nodes(size: int) -> List[str]:
"""Returns a list of uppercase letters for a
give size of Stonehenge circles.
>>> return_nodes(calculate_size(1))
['A', 'B', 'C']
"""
return [ascii_uppercase[i] for i in range(size)]
def hor_leylines(leylines: Dict[int, List[str]], nodes: List[str]) -> None:
"""Mutuates a dictionary of leylines to include the nodes in each horizontal
leyine.
"""
b = 0
k = 2
c = 2
for i in range(1, len(leylines) // 3 + 1):
leylines[i].extend(nodes[b:k])
c += 1
b = k
k += c
leylines[len(leylines) // 3 + 1].extend(nodes[k:])
def diag_right_leylines(leylines: Dict[int, List[str]], nodes: List[str],
size: int) -> None:
"""Mutuates a dictionary of leylines to include the nodes in each
diagonal right leyline."""
# to deal with the last row of the grid
b = size
k = 0
for i in range(len(leylines) // 3 + 1, (len(leylines) // 3) * 2):
leylines[i].append(nodes[-b + k])
k += 1
# To deal with the first "size" leylines
f = 1
m = 0
for i in range(len(leylines) // 3 + 1, (len(leylines) // 3) * 2):
j = 0
b = size + size + 1
k = 0
while j < f:
leylines[i].append(nodes[-b + m])
j += 1
b += size - k + 1
k += 1
f += 1
m += 1
# To deal with the last leyline
i = 0
g = size
v = 0
while i < size:
leylines[len(leylines) // 3 * 2].append(nodes[-g - 1])
g += size - v + 1
v += 1
i += 1
def diag_left_leylines(leylines: Dict[int, List[str]], nodes: List[str],
size: int) -> None:
"""
Mutuates a dictionary of leylines to include the nodes in each
diagonal right leyline.
"""
# to deal with the last row of the grid
k = 1
for i in range((len(leylines) // 3) * 2 + 1, len(leylines)):
leylines[i].append(nodes[-k])
k += 1
# To deal with the first "size" leylines
f = 1
m = 0
for i in range((len(leylines) // 3) * 2 + 1, len(leylines)):
j = 0
b = size + 1
k = 0
while j < f:
leylines[i].append(nodes[-b - m])
j += 1
b += size - k
k += 1
f += 1
m += 1
# To deal with the last leyline
i = 0
g = size + size
v = 0
while i < size:
leylines[len(leylines)].append(nodes[-g - 1])
g += size - v
v += 1
i += 1
class StoneState(GameState):
"""
# The state of a game at a certain point in time.
WIN - score if player is in a winning position
LOSE - score if player is in a losing position
DRAW - score if player is in a tied position
p1_turn - whether it is p1's turn or not
"""
WIN: int = 1
LOSE: int = -1
DRAW: int = 0
p1_turn: bool
leylines: Dict[int, List[str]]
leywinners: Dict[int, str]
node: str
def __init__(self, is_p1_turn: bool, size: int) -> None:
"""
Initialize this game state and set the current player based on
is_p1_turn.
Extends GameState.__init__
"""
super().__init__(is_p1_turn)
self.size = size
self.nodes = return_nodes(calculate_size(self.size))
self.leylines = {}
self.leywinners = {}
for i in range(1, ((self.size + 1) * 3) + 1):
self.leywinners[i] = "@"
self.leylines[i] = []
hor_leylines(self.leylines, self.nodes)
diag_right_leylines(self.leylines, self.nodes, self.size)
diag_left_leylines(self.leylines, self.nodes, self.size)
def __str__(self) -> str:
"""
Return a string representation of the current state of the game.
Overrides GameState.__str__
"""
if self.size == 1:
return """\
{} {}
/ /
{} - {} - {}
\\ / \\
{} - {} {}
\\
{}""".format(self.leywinners[len(self.leywinners)],
self.leywinners[len(self.leywinners) - 1],
self.leywinners[1], self.nodes[0],
self.nodes[1], self.leywinners[2],
self.nodes[2], self.leywinners[len(self.leywinners) - 2],
self.leywinners[len(self.leywinners) - 3])
elif self.size == 2:
return """\
{} {}
/ /
{} - {} - {} {}
/ \\ / \\ /
{} - {} - {} - {}
\\ / \\ / \\
{} - {} - {} {}
\\ \\
{} {}""".format(self.leywinners[len(self.leywinners)],
self.leywinners[len(self.leywinners) - 1],
self.leywinners[1], self.nodes[0], self.nodes[1],
self.leywinners[len(self.leywinners) - 2],
self.leywinners[2], self.nodes[2], self.nodes[3],
self.nodes[4], self.leywinners[3], self.nodes[5],
self.nodes[6],
self.leywinners[len(self.leywinners) - 3],
self.leywinners[4], self.leywinners[5])
elif self.size == 3:
return """\
{} {}
/ /
{} - {} - {} {}
/ \\ / \\ /
{} - {} - {} - {} {}
/ \\ / \\ / \\ /
{} - {} - {} - {} - {}
\\ / \\ / \\ / \\
{} - {} - {} - {} {}
\\ \\ \\
{} {} {}""".format(self.leywinners[len(self.leywinners)],
self.leywinners[len(self.leywinners) - 1],
self.leywinners[1], self.nodes[0], self.nodes[1],
self.leywinners[len(self.leywinners) - 2],
self.leywinners[2], self.nodes[2], self.nodes[3],
self.nodes[4],
self.leywinners[len(self.leywinners) - 3],
self.leywinners[3], self.nodes[5], self.nodes[6],
self.nodes[7], self.nodes[8], self.leywinners[4],
self.nodes[9], self.nodes[10], self.nodes[11],
self.leywinners[len(self.leywinners) - 4],
self.leywinners[5], self.leywinners[6],
self.leywinners[7])
elif self.size == 4:
return """\
{} {}
/ /
{} - {} - {} {}
/ \\ / \\ /
{} - {} - {} - {} {}
/ \\ / \\ / \\ /
{} - {} - {} - {} - {} {}
/ \\ / \\ / \\ / \\ /
{} - {} - {} - {} - {} - {}
\\ / \\ / \\ / \\ / \\
{} - {} - {} - {} - {} {}
\\ \\ \\ \\
{} {} {} {}""".format(self.leywinners[len(self.leywinners)],
self.leywinners[len(self.leywinners) - 1],
self.leywinners[1], self.nodes[0],
self.nodes[1],
self.leywinners[len(self.leywinners) - 2],
self.leywinners[2], self.nodes[2],
self.nodes[3],
self.nodes[4],
self.leywinners[len(self.leywinners) - 3],
self.leywinners[3], self.nodes[5],
self.nodes[6],
self.nodes[7], self.nodes[8],
self.leywinners[len(self.leywinners) - 4],
self.leywinners[4], self.nodes[9],
self.nodes[10], self.nodes[11],
self.nodes[12],
self.nodes[13], self.leywinners[5],
self.nodes[14], self.nodes[15],
self.nodes[16],
self.nodes[17],
self.leywinners[len(self.leywinners) - 5],
self.leywinners[6], self.leywinners[7],
self.leywinners[8], self.leywinners[9])
return """
{} {}
/ /
{} - {} - {} {}
/ \\ / \\ /
{} - {} - {} - {} {}
/ \\ / \\ / \\ /
{} - {} - {} - {} - {} {}
/ \\ / \\ / \\ / \\ /
{} - {} - {} - {} - {} - {} {}
/ \\ / \\ / \\ / \\ / \\ /
{} - {} - {} - {} - {} - {} - {}
\\ / \\ / \\ / \\ / \\ / \\
{} - {} - {} - {} - {} - {} {}
\\ \\ \\ \\ \\
{} {} {} {} {}""".format(self.leywinners[len(self.leywinners)],
self.leywinners[
len(self.leywinners) - 1],
self.leywinners[1], self.nodes[0],
self.nodes[1],
self.leywinners[len(
self.leywinners) - 2],
self.leywinners[2], self.nodes[2],
self.nodes[3],
self.nodes[4],
self.leywinners[len(
self.leywinners) - 3],
self.leywinners[3], self.nodes[5],
self.nodes[6],
self.nodes[7], self.nodes[8],
self.leywinners[len(
self.leywinners) - 4],
self.leywinners[4], self.nodes[9],
self.nodes[10], self.nodes[11],
self.nodes[12],
self.nodes[13],
self.leywinners[len(
self.leywinners) - 5],
self.leywinners[5],
self.nodes[14], self.nodes[15],
self.nodes[16], self.nodes[17],
self.nodes[18], self.nodes[19],
self.leywinners[6], self.nodes[20],
self.nodes[21], self.nodes[22],
self.nodes[23],
self.nodes[24],
self.leywinners[len(
self.leywinners) - 6],
self.leywinners[7],
self.leywinners[8], self.leywinners[9],
self.leywinners[10],
self.leywinners[11])
def get_possible_moves(self) -> list:
"""
Return all possible moves that can be applied to this state.
Overrides GameState.get_possible_moves
>>> a = StoneState(True, 1)
>>> a.get_possible_moves()
['A', 'B', 'C']
"""
mov = []
for x in self.nodes:
if not isinstance(x, int):
mov.append(x)
if self.check_leylines():
return []
return mov
def get_current_player_name(self) -> str:
"""
Return 'p1' if the current player is Player 1, and 'p2' if the current
player is Player 2.
Overrides GameState.get_current_player_name
>>> a = StoneState(True, 1)
>>> a.get_current_player_name()
'p1'
"""
if self.p1_turn:
return 'p1'
return 'p2'
def make_move(self, move: Any) -> 'StoneState':
"""
Return the GameState that results from applying move to this StoneState.
Overrides GameState.make_move
"""
# create a new state that has the exact attributes of previous state
new = StoneState(self.p1_turn, self.size)
new.nodes = self.nodes.copy()
new.leylines = copy.deepcopy(self.leylines)
new.leywinners = copy.deepcopy(self.leywinners)
# make move and update all leylines, circles
# Update nodes
for i in range(len(new.nodes)):
if move == new.nodes[i]:
if new.get_current_player_name() == 'p1':
new.nodes[i] = 1
else:
new.nodes[i] = 2
# Update leylines
for k in new.leylines:
for i in range(len(new.leylines[k])):
if move == new.leylines[k][i]:
if new.get_current_player_name() == 'p1':
new.leylines[k][i] = 1
else:
new.leylines[k][i] = 2
# Check leyline winners based on leylines
for k in new.leywinners:
# for i in range(len(self.leywinners[k])):
if new.leywinners[k] == "@":
count1 = 0
count2 = 0
for j in range(len(new.leylines[k])):
if new.leylines[k][j] == 1:
count1 += 1
elif new.leylines[k][j] == 2:
count2 += 1
if count1 >= len(new.leylines[k]) / 2:
new.leywinners[k] = 1
elif count2 >= len(new.leylines[k]) / 2:
new.leywinners[k] = 2
# return new state
new.p1_turn = not new.p1_turn
return new
def is_valid_move(self, move: Any) -> bool:
"""
Return whether move is a valid move for this GameState.
Overrides GameState.is_valid_move
>>> a = StoneState(True, 2)
>>> a.is_valid_move('A')
True
>>> a.is_valid_move('X')
False
>>> a.is_valid_move(1)
False
"""
return move in self.get_possible_moves()
def __repr__(self) -> Any:
"""
Return a representation of this state (which can be used for
equality testing).
Overrides GameState.__repr__
"""
string = f'Current player is {self.get_current_player_name()}'
return self.__str__() + '\n' + string
def check_leylines(self) -> bool:
"""Returns true if majority of leylines have been captures
by a player in StoneState.
"""
count1 = 0
count2 = 0
for k in self.leywinners:
if self.leywinners[k] == 1:
count1 += 1
elif self.leywinners[k] == 2:
count2 += 1
return (count1 >= len(self.leywinners) / 2 or
count2 >= len(self.leywinners) / 2)
def rough_outcome(self) -> float:
"""
Return an estimate in interval [LOSE, WIN] of best
outcome the current player can guarantee from state self.
Overrides GameState.rough_outcome
"""
if self.check_leylines():
return self.LOSE
moves = self.get_possible_moves()
for move in moves:
new = self.make_move(move)
if new.check_leylines():
return self.WIN
move2 = new.get_possible_moves()
count = 0
for mov in move2:
new2 = new.make_move(mov)
if new2.check_leylines():
count += 1
if count == len(move2):
return self.LOSE
return self.DRAW
if __name__ == "__main__":
from python_ta import check_all
check_all(config="a2_pyta.txt")
| UTF-8 | Python | false | false | 16,438 | py | 4 | stonehenge_state.py | 4 | 0.41325 | 0.39506 | 0 | 469 | 34.049041 | 80 |
uw-it-aca/spotseeker_server | 17,446,157,178,810 | c3e9babe8e75e264c74da4538955628891376554 | 796dd05d84c56d0873797b29be58247b1be782c6 | /spotseeker_server/test/long_message.py | 20de4f6fd623467dd77b3d32c9431fb3af0fa8c5 | [
"Apache-2.0"
] | permissive | https://github.com/uw-it-aca/spotseeker_server | 039e65824887a8303f12a3f07e730ca061bab2d5 | 5f21a58b4084d798b1c4f87721b63bad01ac4f36 | refs/heads/main | 2023-08-16T08:53:38.124987 | 2023-04-14T21:41:34 | 2023-04-14T21:41:34 | 11,489,866 | 6 | 7 | Apache-2.0 | false | 2023-08-15T22:45:30 | 2013-07-17T23:18:17 | 2023-03-28T12:25:44 | 2023-08-15T22:45:28 | 2,015 | 9 | 8 | 1 | Python | false | false | # Copyright 2023 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
'''
Tells unit test framework to use full assertion failure messages,
i.e. it will include both the standard 'a != b' messages as well
as the custom message.
'''
import unittest
unittest.TestCase.longMessage = True
| UTF-8 | Python | false | false | 304 | py | 139 | long_message.py | 120 | 0.763158 | 0.743421 | 0 | 11 | 26.636364 | 65 |
mneira10/MauricioNeira_Ejercicio27 | 9,028,021,281,153 | b643fdb1296edbf7bf73d4344f6cc414059f9cdd | 7fb48d59ec2152635fa10fbbeb529aecaa30e120 | /2nda_parte/graph.py | 2d9c196ac6a50506a5987c0c2422a8a2e08c4f6c | [] | no_license | https://github.com/mneira10/MauricioNeira_Ejercicio27 | 532f3998ea25c8c8b38736c917216f607ac0b89f | 25097449f91e616bbe98d6b175d74bff5488fb8f | refs/heads/master | 2020-04-06T12:52:31.840120 | 2018-11-14T12:48:04 | 2018-11-14T12:48:04 | 157,474,330 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib
matplotlib.use("Agg")
import numpy as np
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
from scipy.stats import norm
onlyfiles = [f for f in listdir("./") if (isfile(join("./", f)) and f.endswith('.dat'))]
# string for string in list if string.endswith("foo")
data = np.array([])
for f in onlyfiles:
nums = np.loadtxt(f)
data = np.concatenate([nums, data])
plt.hist(data,normed = True)
mu, std = norm.fit(data)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
# plt.show()
plt.savefig('sample.pdf')
| UTF-8 | Python | false | false | 712 | py | 7 | graph.py | 2 | 0.667135 | 0.658708 | 0 | 30 | 22.733333 | 88 |
MauricioFT1/MauTrick-s-Project | 13,030,930,804,513 | dd25c9364884fae9e7a2c5d152716b33fa1a1b97 | 77f029f7526dc751cabd97b6fd22b35bbcbdcff6 | /backend/backend/app/views/__init__.py | 2194e56ab997708c31a528c0612fc9f36ace150c | [] | no_license | https://github.com/MauricioFT1/MauTrick-s-Project | c34e8112dcda5c01eabc4e1ee93c2e3031295c61 | 46f0dd5d191f82c50ef802340166623b15f09819 | refs/heads/master | 2022-03-15T18:19:30.130840 | 2019-12-05T17:41:59 | 2019-12-05T17:41:59 | 221,292,582 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .championships import (ChampionshipCreate, ChampionshipUpdate, ChampionshipDestroy, ChampionshipGet, ChampionshipList, EditionChampionshipList, EditionChampionshipGet)
from .editions import (EditionList, EditionDestroy, EditionGet, EditionUpdate)
from .teams import (TeamList, TeamDestroy, TeamGet, TeamUpdate)
from .peoples import (PeopleList, PeopleGet)
from .noticias import NoticiaList
from .brazilian import BrazilianList
| UTF-8 | Python | false | false | 432 | py | 42 | __init__.py | 22 | 0.851852 | 0.851852 | 0 | 6 | 71 | 172 |
young-amateurs-rc/arbys | 19,129,784,358,424 | eb1df44e20683b8e84c02243806da9b0b41fed6d | 35a4c0ecdc28d7b7aaf03a72114d6fa40ab76012 | /exceptions.py | e0a93fb609a4ccc0fdc2805c98c3059b066bc652 | [
"MIT"
] | permissive | https://github.com/young-amateurs-rc/arbys | 5becb50805f790f5384e631c975e3a1ee0ddd529 | eaccc4634ca9bdc2880761934b033b1907fbdc46 | refs/heads/master | 2020-03-29T13:56:04.997915 | 2019-12-16T19:56:58 | 2019-12-16T19:56:58 | 149,989,681 | 9 | 8 | MIT | false | 2019-07-26T02:30:47 | 2018-09-23T14:01:30 | 2019-07-25T16:01:01 | 2019-07-26T02:30:47 | 190 | 7 | 7 | 0 | Python | false | false | """Exceptions that are used in the framework"""
class BaseFrameworkError(Exception):
"""Base exception for all custom errors."""
pass
class UserBotError(BaseFrameworkError):
"""Raised when framework is run with a user account."""
pass
class HandlerError(BaseFrameworkError):
"""Raised when there is a problem with a handler function."""
pass
| UTF-8 | Python | false | false | 354 | py | 53 | exceptions.py | 49 | 0.748588 | 0.748588 | 0 | 16 | 21.125 | 62 |
cdearborn/sushy-oem-idrac | 14,800,457,329,648 | 5e850c93bc4d48cdbedfb10fff77d36700ea52dc | 0c986d3da4e7bfdbd9d8a9ceb8f2439b78ab4235 | /sushy_oem_idrac/tests/functional/vmedia_boot.py | 09854cf90736a289bc6cd6483ee56dc33036032a | [
"Apache-2.0"
] | permissive | https://github.com/cdearborn/sushy-oem-idrac | 10ada9d5c7ac17faba6518fdd771b81ad132cd60 | 7b31b0b0c679fdf015fdb617f600c66dec1c322a | refs/heads/master | 2020-12-28T11:04:19.593556 | 2019-12-11T11:20:05 | 2019-12-11T11:20:05 | 238,305,153 | 0 | 3 | Apache-2.0 | false | 2020-04-21T13:16:43 | 2020-02-04T20:59:24 | 2020-02-04T20:59:55 | 2020-04-20T14:24:51 | 37 | 0 | 2 | 0 | Python | false | false | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import sushy
from sushy_oem_idrac import utils
USERNAME = 'root'
PASSWORD = 'calvin'
SERVICE_ROOT = 'http://demo.snmplabs.com:80/redfish/v1'
SYSTEM_ID = '437XR1138R2'
BOOT_DEVICE = sushy.VIRTUAL_MEDIA_CD
BOOT_MODE = sushy.BOOT_SOURCE_MODE_BIOS
BOOT_IMAGE = 'http://demo.snmplabs.com/mini.iso'
LOG = logging.getLogger(__name__)
def main():
"""Boot Dell node from virtual media device"""
LOG.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
LOG.addHandler(handler)
authenticator = sushy.auth.BasicAuth(USERNAME, PASSWORD)
conn = sushy.Sushy(SERVICE_ROOT, verify=False, auth=authenticator)
LOG.info('connected to %s', SERVICE_ROOT)
system = conn.get_system(
os.path.join(SERVICE_ROOT, 'Systems', SYSTEM_ID))
LOG.info('read system resource %s', system.identity)
for manager in system.managers:
LOG.info('trying manager %s', manager.identity)
for v_media in manager.virtual_media.get_members():
if BOOT_DEVICE not in v_media.media_types:
continue
LOG.info(
'device %s is present at %s', BOOT_DEVICE, manager.identity)
try:
manager_oem = manager.get_oem_extension('Dell')
except sushy.exceptions.OEMExtensionNotFoundError:
LOG.info('Dell OEM not found')
continue
LOG.info('found Dell OEM extension at %s', manager.identity)
if v_media.inserted:
v_media.eject_media()
LOG.info('ejected virtual media')
v_media.insert_media(BOOT_IMAGE, inserted=True,
write_protected=True)
LOG.info('inserted boot image %s into virtual media', BOOT_IMAGE)
# the caller (e.g. ironic) sets boot mode first, boot device second
system.set_system_boot_source(
BOOT_DEVICE, enabled=sushy.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=BOOT_MODE)
# with Dell, patching System tree does not work as expected
# we need to reboot for the new boot mode to take effect
utils.reboot_system(system)
LOG.info('set boot mode to %s', BOOT_MODE)
manager_oem.set_virtual_boot_device(
BOOT_DEVICE, persistent=False, manager=manager, system=system)
LOG.info('set boot device to %s', BOOT_DEVICE)
# real caller should better not use our way to reboot
utils.reboot_system(system)
LOG.info('system rebooted')
return 0
if __name__ == '__main__':
sys.exit(main())
| UTF-8 | Python | false | false | 3,264 | py | 8 | vmedia_boot.py | 5 | 0.635723 | 0.630821 | 0 | 109 | 28.944954 | 79 |
maliciousgroup/RedCisco_Command_Interepreter | 12,309,376,309,093 | fd0b29dc55e5614a6344e5844a1df3a025703c32 | 0d7ed63946224fce7970d27ad15596e33cef34d7 | /src/core/command/RunCommand.py | 11a560a5eca3c5c8e14ee329bfbf3149cc945582 | [] | no_license | https://github.com/maliciousgroup/RedCisco_Command_Interepreter | 855f516e2efff90ffe3de201dce4cb4fcee8fe10 | 8588e42d49a107037093a9135c1d7ae8b506e397 | refs/heads/main | 2023-03-31T06:26:18.086968 | 2021-04-05T19:59:06 | 2021-04-05T19:59:06 | 354,955,758 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import asyncio
import netdev
from src.core.base.BaseCommand import BaseCommand
from src.core.registry.OptionsRegistry import OptionRegistry
class RunCommand(BaseCommand):
helper = {
'name': 'run',
'help': 'This command will start the connection process',
'usage': 'run'
}
def __init__(self, command: str, print_queue: asyncio.Queue):
super().__init__()
self.command: str = command
self.print_queue: asyncio.Queue = print_queue
self.options: OptionRegistry = OptionRegistry()
self.end_points: list = []
async def main(self) -> None:
"""
Coroutine that starts command logic
:returns: None
"""
await self.execute()
async def execute(self) -> None:
"""
Coroutine that handles any execution logic
:returns: None
"""
_options: dict = self.options.get_registry_pairs()
await self.parse_options(_options)
async def parse_options(self, options: dict) -> None:
"""
Class Coroutine that parses the user-supplied options
:param options: User-supplied options
:return: None
"""
"""
host: str = options['host'][0]
username: str = options['username'][0]
password: str = options['password'][0]
secret: str = options['secret'][0]
if not all([host, username, password, secret]):
await self.print_queue.put(('error', f"Required target options are missing\n"))
return
try:
socket.inet_aton(host)
except socket.error:
await self.print_queue.put(('error', f"The host address '{host}' is not a valid IP address.\n"))
return
params: dict = {
'host': host,
'username': username,
'password': password,
'device_type': 'cisco_ios',
'secret': secret
}
enable_docker: str = options['enable_docker'][0]
docker_host: str = options['docker_host'][0]
enable_http: str = options['enable_http'][0]
enable_http_port: str = options['enable_http_port'][0]
enable_tftp: str = options['enable_tftp'][0]
enable_tftp_port: str = options['enable_tftp_port'][0]
if enable_docker == 'true':
try:
socket.inet_aton(docker_host)
except socket.error:
await self.print_queue.put(
('error', f"The docker host address '{docker_host}' is not a valid IP address.\n"))
return
if enable_http == 'true':
if not 1 <= int(enable_http_port) <= 65535:
await self.print_queue.put(
('error', f"The HTTP port {enable_http_port} is out of range. (1-65535).\n"))
return
tag: str = 'malicious_http'
path: str = 'src/docker/services/http'
ports: dict = {'8000/tcp': enable_http_port}
builder = BuildHandler(self.print_queue)
end_point = f"http://{docker_host}:{enable_http_port}/"
if end_point not in self.end_points:
self.end_points.append(end_point)
await builder.build_image(path, tag, ports)
if enable_tftp == 'true':
if not 1 <= int(enable_tftp_port) <= 65535:
await self.print_queue.put(
('error', f"The Trivial FTP port {enable_tftp_port} is out of range. (1-65535).\n"))
return
tag: str = 'malicious_tftp'
path: str = 'src/docker/services/tftp'
ports: dict = {'9069/udp': enable_tftp_port}
builder = BuildHandler(self.print_queue)
end_point = f"tftp://{docker_host}:{enable_tftp_port}/"
if end_point not in self.end_points:
self.end_points.append(end_point)
await builder.build_image(path, tag, ports)
enable_remote: str = options['enable_remote'][0]
remote_host: str = options['remote_host'][0]
enable_remote_http: str = options['enable_remote_http'][0]
enable_remote_http_port: str = options['enable_remote_http_port'][0]
enable_remote_tftp: str = options['enable_remote_tftp'][0]
enable_remote_tftp_port: str = options['enable_remote_tftp_port'][0]
if enable_remote == 'true':
try:
socket.inet_aton(remote_host)
except socket.error:
await self.print_queue.put(
('error', f"The remote host address '{remote_host}' is not a valid IP address.\n"))
return
if enable_remote_http == 'true':
if not 1 <= int(enable_remote_http_port) <= 65535:
await self.print_queue.put(
('error', f"The HTTP port {enable_remote_http_port} is out of range. (1-65535).\n"))
return
end_point = f"http://{remote_host}:{enable_remote_http_port}/"
if end_point not in self.end_points:
self.end_points.append(end_point)
if enable_remote_tftp == 'true':
if not 1 <= int(enable_remote_tftp_port) <= 65535:
await self.print_queue.put(
('error', f"The Trivial FTP port {enable_remote_tftp_port} is out of range. (1-65535).\n"))
return
end_point = f"tftp://{remote_host}:{enable_remote_tftp_port}/"
if end_point not in self.end_points:
self.end_points.append(end_point)
# Create Connection
try:
await self.print_queue.put(('bold', f"Attempting connection to the target device '{host}'"))
async with netdev.create(**params) as ios:
await self.print_queue.put(('success', f"Successfully connected to the target device '{host}'"))
device: DeviceHandler = DeviceHandler(self.print_queue, self.end_points, params)
except netdev.DisconnectError as e:
await self.print_queue.put(('error', f"{e.__str__()}\n"))
return
except netdev.TimeoutError as e:
await self.print_queue.put(('error', f"{e.__str__()}\n"))
return
except Exception as e:
await self.print_queue.put(('error', f"{e.__str__()}\n"))
return
"""
| UTF-8 | Python | false | false | 6,716 | py | 7 | RunCommand.py | 5 | 0.52293 | 0.51221 | 0 | 162 | 39.45679 | 115 |
katabr/Stepik_auto_test_course | 678,604,879,301 | 8f9360ea21efdbeacb1b233c09ea62afcfbf2dcf | ccd415c933f82e6839e13ce2c5d9b0f11a84f058 | /lessons_python/param_test.py | 886d8c6ac7dd3d24cf460131b815ea83ab857586 | [] | no_license | https://github.com/katabr/Stepik_auto_test_course | 21e0999377ddc86cadb19a605e4ecc55aa6f7504 | 4a0d4d444de09564a64a9e72e716ffaa0287bacc | refs/heads/master | 2021-07-25T13:49:37.824621 | 2020-10-15T12:15:42 | 2020-10-15T12:15:42 | 226,271,386 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Тест проверяет возможность создания новой папки
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
import unittest
import sys
import time
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
#self.driver = webdriver.Remote(desired_capabilities={
# "browserName": "firefox",
# "urlName": "MAC",
#})
class ParamTest(unittest.TestCase):
capabilities = None
self.driver = webdriver.Remote(desired_capabilities=self.capabilities)
#self.driver = webdriver.Firefox()
# открываем браузер
browser = self.driver
browser.get("http://192.168.16.66:8090/axel_web/#/requests.html")
# аутентификация
login = browser.find_element_by_css_selector('[type="text"]')
login.send_keys("vasiliev_nv")
button_enter = browser.find_element_by_css_selector('[type="submit"]')
button_enter.click()
def test_1_add_folder(self):
# заходим во вкладку "Папки"
browser = self.driver
link_folders = browser.find_element_by_css_selector('nav>div>ul>li')
link_folders.click()
# выбираем папку Избранное
folder_favourites = browser.find_elements_by_css_selector('[class="group"]')
folder_favourites[1].click()
# выбираем кнопку Добавить
add_data = browser.find_elements_by_css_selector('[type="button"]')
add_data[2].click()
# открываем выпадающий список типов объектов
elem_kind = browser.find_element_by_css_selector('#kind')
elem_kind.click()
# добавляем запись
# выбираем объект Папка
add_data = browser.find_elements_by_css_selector('[value="F"]')
add_data[0].click()
# добавляем запись
# Открывается карточка добавления нового объекта
# вводим Название папки
folder_name = browser.find_element_by_css_selector('#name')
folder_name.click()
folder_name_text = "_ 000_Новая тестовая папка"
folder_name.clear()
folder_name.send_keys(folder_name_text)
# выбираем типы содержимого папки (Папка и Документ)
content_of_folder1 = browser.find_element_by_css_selector('#F')
content_of_folder1.click()
content_of_folder2 = browser.find_element_by_css_selector('#D')
content_of_folder2.click()
# Сохраняем
save_folder = browser.find_element_by_css_selector('[class="btn float-left save-icon text-white btn-secondary btn-sm"]')
save_folder.click()
# Удаляем созданную папку
# Сортируем по названию
#sort_folder = browser.find_elements_by_css_selector('[role="columnheader"]')
#sort_folder[2].click()
#Выбираем последнюю созданну папку
choose1_folder = browser.find_elements_by_css_selector('[aria-colindex="3"]')
actionChains = ActionChains(browser)
actionChains.context_click(choose1_folder[-1]).perform()
#choose1_folder[-1].context_click()
time.sleep(2)
#Нажимаем кнопку Удалить
del_folder = browser.find_elements_by_css_selector('div>ul>li>span')
del_folder[5].click()
time.sleep(2)
# Подтверждаем удаление
del_folder_yes = browser.find_element_by_css_selector('footer>div>button')
del_folder_yes.click()
def tearDown (self):
time.sleep(2)
# закрываем браузер
self.driver.quit()
if __name__ == "__main__":
Param_Test.capabilities = {
"browserName": sys.argv[1],
#"urlName": sys.argv[2],
}
unittest.main() | UTF-8 | Python | false | false | 4,377 | py | 34 | param_test.py | 27 | 0.594199 | 0.584446 | 0 | 135 | 27.859259 | 132 |
montaguegabe/earth-speaks | 7,730,941,160,782 | e5779857f1907aa28be26715cea44aefdd768b28 | f87b5c5df7080e43634b5c27ef1e45a33e76bdfc | /common.py | 172e7ad8942a1acd0f8a924d6afa27e9550eb836 | [] | no_license | https://github.com/montaguegabe/earth-speaks | 2f01dfffc61b39a13fb9950d6fae6fb6e686261e | 7011a3b548474776f2d61694d7dda3cf3122b939 | refs/heads/master | 2022-12-11T05:48:27.006664 | 2018-05-09T03:27:28 | 2018-05-09T03:27:28 | 132,697,128 | 1 | 0 | null | false | 2022-06-21T21:19:05 | 2018-05-09T03:31:09 | 2018-11-21T20:19:59 | 2022-06-21T21:19:05 | 23,734 | 1 | 0 | 4 | Jupyter Notebook | false | false | from os import listdir
from os.path import isfile, join
import datetime
# For map-reduce
def flatten_generator(lol):
for list2 in lol:
for item in list2:
yield item
class Logger(object):
def __init__(self, fname):
super(Logger, self).__init__()
self.fname = fname
self.log = open(fname, 'a')
def __call__(self, str2):
print(str2)
self.log.write(datetime.datetime.now().isoformat() + ': ' + str2 + '\n')
# Returns a logging function that will write to the file specified
def get_logger(fname):
return Logger(fname)
# Makes a string usable as a file name
def make_fname_safe(filename):
return ''.join([c for c in filename if c.isalpha() or c.isdigit()]).rstrip()
| UTF-8 | Python | false | false | 754 | py | 25 | common.py | 8 | 0.627321 | 0.62069 | 0 | 27 | 26.925926 | 80 |
ryanvolz/botcbot | 18,622,978,237,147 | ca227e9c8c9aa383902ffff60597783bd136a49b | e1a13ad1d0495fab04b0b3f6a59d12e6f67aae42 | /resources/basegame/characters/Drunk.py | 325d445e64f5f9697bf3fe16906ba8e2a6dc3399 | [] | no_license | https://github.com/ryanvolz/botcbot | 336bc7c42bcd5bd39497eea409415c0b6c5c5b56 | cb1b0a04853a89c0fd945c5823d6625fc68fec28 | refs/heads/master | 2022-06-08T22:11:17.528803 | 2020-05-04T04:58:17 | 2020-05-04T04:58:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Contains the Drunk class."""
from lib.logic.Character import Outsider
class Drunk(Outsider):
"""The Drunk."""
name: str = "Drunk"
playtest: bool = False
| UTF-8 | Python | false | false | 172 | py | 101 | Drunk.py | 97 | 0.645349 | 0.645349 | 0 | 10 | 16.2 | 40 |
ronaldoussoren/pyobjc | 3,762,391,377,989 | c9a635cd5754d4272b2bc702162bd8991b9b83c5 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-MetalPerformanceShaders/PyObjCTest/test_mpsneuralnetwork_mpscnngroupnormalization.py | b899a88ab30c00a4f6f8fdc79239080393d916ef | [
"MIT"
] | permissive | https://github.com/ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | false | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | 2023-06-25T00:34:37 | 2023-06-25T02:49:06 | 153,281 | 403 | 35 | 86 | Python | false | false | from PyObjCTools.TestSupport import TestCase, min_sdk_level
import objc
import MetalPerformanceShaders
class TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper(
MetalPerformanceShaders.NSObject
):
def gamma(self):
return 1
def beta(self):
return 1
def numberOfFeatureChannels(self):
return 1
def numberOfGroups(self):
return 1
def updateGammaAndBetaWithGroupNormalizationStateBatch_(self, a):
return 1
def epsilon(self):
return 1
def supportsSecureCoding(self):
return 1
def copyWithZone_(self, zone):
return 1
class TestMPSNeuralNetwork_MPSCNNGroupNormalization(TestCase):
@min_sdk_level("10.15")
def test_protocols(self):
self.assertProtocolExists("MPSCNNGroupNormalizationDataSource")
def test_methods(self):
self.assertResultHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.beta, b"^f"
)
self.assertResultIsVariableSize(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.beta
)
self.assertResultHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.gamma, b"^f"
)
self.assertResultIsVariableSize(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.gamma
)
self.assertResultHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.numberOfFeatureChannels,
objc._C_NSUInteger,
)
self.assertResultHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.numberOfGroups,
objc._C_NSUInteger,
)
self.assertResultIsBOOL(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.updateGammaAndBetaWithGroupNormalizationStateBatch_
)
self.assertResultHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.epsilon, b"f"
)
self.assertResultIsBOOL(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.supportsSecureCoding
)
self.assertArgHasType(
TestMPSNeuralNetwork_MPSCNNGroupNormalizationHelper.copyWithZone_,
0,
b"^{_NSZone=}",
)
| UTF-8 | Python | false | false | 2,226 | py | 4,006 | test_mpsneuralnetwork_mpscnngroupnormalization.py | 3,256 | 0.684187 | 0.678347 | 0 | 79 | 27.177215 | 115 |
hayeonk/leetcode | 3,487,513,485,932 | 5de6b84a01257c4f7f433d8f74fd580037228d86 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/find_duplicate_subtrees.py | ea1a96145408cb2ea73082fe51ce569c31bacb79 | [] | no_license | https://github.com/hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import Counter
class Solution(object):
def findDuplicateSubtrees(self, root):
count = Counter()
ans = []
def serial(node):
if not node:
return "#"
s = "%s.%s.%s" % (str(node.val), serial(node.left), serial(node.right))
count[s] += 1
if count[s] == 2:
ans.append(node)
return s
serial(root)
return ans | UTF-8 | Python | false | false | 641 | py | 447 | find_duplicate_subtrees.py | 446 | 0.486739 | 0.483619 | 0 | 23 | 26.913043 | 83 |
punitbasnet/Network-Monitoring-and-Database-Sensor-Data-Acquisition | 3,083,786,536,032 | 1974f75e5ae2f9933eb3c79dab4da265dc0c15f8 | 39172d5cde1a50e782da6b98d321549638716b7f | /Node_Three/code/verify.py | b42bbd6dc4740968d6dd6ab67bf9c3325e5b9514 | [] | no_license | https://github.com/punitbasnet/Network-Monitoring-and-Database-Sensor-Data-Acquisition | 7f9432419d502a17c0c7a6d8f1c880b706ef9912 | 2fcad8e8b5f2bde52520cb95535318a3bf70c918 | refs/heads/master | 2020-05-15T00:34:27.931966 | 2019-04-18T04:26:59 | 2019-04-18T04:26:59 | 182,014,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import hashlib
import hmac
#Files made after execution of this module are hmaclog_date.txt, vreadings_date.txt and badreadings.txt
def verify(thefile):
datetxt = thefile[-14:]
date=datetxt.strip('.txt')
#datetime.datetime.now().strftime("%Y-%m-%d")
Loc = "Baker"
bLoc=Loc.encode()
LocID = "39.326265,-82.103506"
bLocID = LocID.encode()
pwd=b"sensor"
readhr=[]
f=open(thefile, 'r')
for line in f:
datablock = line.encode()
salt=bLoc+bLocID
keylength = 12
NumKDHashes = 10000
key = hashlib.pbkdf2_hmac('sha256', pwd, salt, NumKDHashes, dklen=keylength)
ourHMAC=hmac.new(key, datablock, hashlib.sha256)
hmac_string = ourHMAC.hexdigest()
hmac_string = hmac_string + '\n'
readhr.append(hmac_string)
f.close()
hmacs=open('/home/sysop/repository/incoming/hmacreadings_{}'.format(datetxt),'r')
f=open(thefile, 'r')
readhs=hmacs.readlines()
read=f.readlines()
lhs = len(readhs)
lhr = len(readhr)
badreads=0
count = 0
hmaclog='hmaclog_{}'.format(datetxt)
vreadings='vreadings_{}'.format(datetxt)
breadings = 'breadings_{}'.format(datetxt)
g=open(hmaclog, 'w')
h=open(vreadings, 'w')
i=open(breadings, 'w')
if (lhs != lhr):
g.write("The readings from {} had INCONSISTENT NUMBER OF ENTRIES".format(date))
h.write("Empty")
while (count != lhr):
i.write(read[count])
count +=1
else:
while (count != lhs):
a = read[count].split()
try:
if (a[0] != 'Conductivity'):
float(a[4])
float(a[6])
if (readhs[count] != readhr[count]):
badreads = badreads + 1
g.write(read[count])
i.write(read[count])
else:
h.write(read[count])
except ValueError:
badreads = badreads + 1
g.write(read[count])
i.write(read[count])
count +=1
if (badreads == 0):
g.write("All the values were VERIFIED for the readings from {}".format(date))
else:
g.write("The {} value(s) BELOW FAILED VERIFICATION in the readings from {}".format(badreads, date))
f.close()
g.close()
h.close()
i.close()
return(datetxt)
| UTF-8 | Python | false | false | 2,436 | py | 21 | verify.py | 14 | 0.541872 | 0.524631 | 0 | 74 | 31.918919 | 111 |
DrakeWYH/OnlineEducationSite | 16,303,695,886,158 | 6ef49306b1d15128ca0975964ce063b09da31f00 | 0b30e2539a5ac26afc0f7cceb8d7af4cba9ef71a | /apps/question/migrations/0031_auto_20180709_1421.py | 1402c7d4888c68c6a2b01400d9cd8d7c3abe9e96 | [] | no_license | https://github.com/DrakeWYH/OnlineEducationSite | 23b4b731fd4653b07ac29d669039cb58b4d84629 | 9f1d5944f89005aea139a176055e497761f226ea | refs/heads/master | 2022-04-14T23:50:10.046285 | 2020-04-19T04:14:19 | 2020-04-19T04:14:19 | 109,455,865 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-09 14:21
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('question', '0030_auto_20180423_1529'),
]
operations = [
migrations.AlterField(
model_name='question',
name='question',
field=DjangoUeditor.models.UEditorField(default='', verbose_name='问题'),
),
]
| UTF-8 | Python | false | false | 534 | py | 294 | 0031_auto_20180709_1421.py | 77 | 0.598113 | 0.535849 | 0 | 21 | 23.238095 | 83 |
dongwudanci/fairy-1 | 8,873,402,461,321 | 8d115036716bb12c42a7e901aec61175d41c76e0 | 872592bbd188d5730804eb07b7cff26f7a627509 | /src/Account.py | 7da2a2a4169cae088dd00ae9ab1e387950e850ea | [] | no_license | https://github.com/dongwudanci/fairy-1 | 904b9b2acb77e502730b383f7e8b3852bf3af38a | 7fe92ad430f9b29e7ca448613ba98c5f953d4028 | refs/heads/master | 2023-08-07T08:38:50.303302 | 2021-09-06T22:42:04 | 2021-09-06T22:42:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from src.MySql import *
from src.String import md5, token_encode
class Account:
def __init__(self):
self.db: DB = DB()
self.id: str = NULL
self.password: str = NULL
self.email: str = NULL
def login(self, email, password):
res = self.db.table('user').where("email = '%s' and password = '%s'" % (email, md5(password))).item()
if res is None:
return False
else:
return token_encode(id=res['id'], email=res['email'], password=res['password'])
| UTF-8 | Python | false | false | 530 | py | 8 | Account.py | 4 | 0.566038 | 0.562264 | 0 | 17 | 30.176471 | 109 |
Li96224/ReservationUITest | 6,493,990,570,780 | 5abc3f8d8593d50c44b629128b1252efac857960 | cf2602265f4a2156f36f19b908f4c70df1486e41 | /TestCases/test_recommended_schedule.py | 95c303cbb950885a280dcbcd34a3f0550900e1bd | [] | no_license | https://github.com/Li96224/ReservationUITest | 72d322377c2d6eaa48c700037ca214a9a9653fc7 | 99c67181c51443c47dda7b248df7762fc8252473 | refs/heads/master | 2023-08-22T19:13:02.715540 | 2021-10-08T14:55:32 | 2021-10-08T14:55:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from PageObject.recommended_schedule_page import RecommendedSchedulePage
import allure
@allure.title("推荐课表页-推荐课程信息")
@pytest.mark.usefixtures('get_home_page')
class TestClassScheduleCard:
def test_class_schedule_card(self, get_home_page):
assert ('CXWORX腰腹核心' == RecommendedSchedulePage(get_home_page).get_recommended_course_name_text())
assert ('高效燃脂' == RecommendedSchedulePage(get_home_page).get_training_effect_1_text())
RecommendedSchedulePage(get_home_page).swipe_by_dorection('up')
assert ('普拉提' == RecommendedSchedulePage(get_home_page).get_Selected_courses_Category_1_text())
assert ('雕刻线条' == RecommendedSchedulePage(get_home_page).get_training_effect_2_text())
assert ('RPM 燃脂骑行' == RecommendedSchedulePage(get_home_page).get_Selected_courses_Category_2_text())
RecommendedSchedulePage(get_home_page).swipe_by_dorection('up')
assert ('拉伸放松' == RecommendedSchedulePage(get_home_page).get_training_effect_3_text())
assert ('Deep Rowing 深燃划船' == RecommendedSchedulePage(get_home_page).get_Selected_courses_Category_3_text())
| UTF-8 | Python | false | false | 1,225 | py | 51 | test_recommended_schedule.py | 18 | 0.718886 | 0.713664 | 0 | 21 | 52.714286 | 116 |
tmolcard/databricks-snowflake | 2,576,980,391,300 | 263e258c66bf4c99ab4a640a04607e843f1c9d03 | a90448adbea8741b39596dc87322059bb6be42a7 | /notebooks/Users/molcard.t@gmail.com/load_customer.py | 41a90a2e71da5192f290a5324fa43009627359dd | [] | no_license | https://github.com/tmolcard/databricks-snowflake | 2a96f6e7c6917b7f426a721b3fd7d18aea050c49 | 4e99b9a9654e0c1ce6b457335878becbf7b6fcab | refs/heads/master | 2022-11-27T04:38:48.599392 | 2020-08-01T12:20:28 | 2020-08-01T12:20:28 | 283,496,107 | 0 | 0 | null | false | 2020-08-01T12:16:03 | 2020-07-29T12:42:37 | 2020-07-31T16:44:12 | 2020-08-01T12:16:02 | 17 | 0 | 0 | 0 | Python | false | false | # Databricks notebook source
dbutils.library.installPyPI("mlflow")
dbutils.library.restartPython()
# COMMAND ----------
import mlflow
mlflow.start_run()
# COMMAND ----------
# File storage session parameters
appID = dbutils.secrets.get("datastorage", "app_id")
secret = dbutils.secrets.get("datastorage", "secret")
tenantID = dbutils.secrets.get("datastorage", "tenant_id")
# Configure file storage session
spark.conf.set("fs.azure.account.auth.type", "OAuth")
spark.conf.set("fs.azure.account.oauth.provider.type", "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider")
spark.conf.set("fs.azure.account.oauth2.client.id", appID)
spark.conf.set("fs.azure.account.oauth2.client.secret", secret)
spark.conf.set("fs.azure.account.oauth2.client.endpoint", "https://login.microsoftonline.com/" + tenantID + "/oauth2/token")
spark.conf.set("fs.azure.createRemoteFileSystemDuringInitialization", "true")
# COMMAND ----------
# Get Snowflake credentials.
user = dbutils.secrets.get("snowflake", "user")
password = dbutils.secrets.get("snowflake", "password")
# Snowflake connection options
sf_options = {
"sfUrl": "wt07783.west-europe.azure.snowflakecomputing.com",
"sfUser": user,
"sfPassword": password,
"sfDatabase": "RESTAURANT",
"sfSchema": "PUBLIC",
"sfWarehouse": "COMPUTE_WH"
}
# COMMAND ----------
# Storage parameters
storageAccountName = "restaurantaccount"
fileSystemName = "restaurant"
# File parameters
path = "raw_data/customers"
file = "customers.csv"
# Table parameters
table_name = "CUSTOMERS"
mlflow.log_param("processed_file", "{}/{}".format(path, file))
mlflow.log_param("table", table_name)
# COMMAND ----------
# Read file on the file storage
df = spark.read.csv("abfss://" + fileSystemName + "@" + storageAccountName + ".dfs.core.windows.net/{path}/{file}".format(path=path, file=file), header=True)
df.show()
# COMMAND ----------
from pyspark.sql.functions import col, unix_timestamp
for col_name in ["created_at", "updated_at"]:
df = df.withColumn(
col_name,
unix_timestamp(col(col_name), "yyyy-MM-dd HH:mm:ss").cast("timestamp")
)
df.printSchema()
df.show()
# COMMAND ----------
# Compute age
from datetime import datetime
year = datetime.now().year
df = df.withColumn("age", year - col("dob"))
# COMMAND ----------
# Load Data Frame into table
df.write \
.format("snowflake") \
.options(**sf_options) \
.option("dbtable", table_name) \
.mode("append") \
.save()
mlflow.log_metric("inserted_rows", df.count())
mlflow.end_run() | UTF-8 | Python | false | false | 2,523 | py | 5 | load_customer.py | 4 | 0.693619 | 0.689655 | 0 | 100 | 24.24 | 157 |
kagesaburo27/py1assignment | 3,332,894,642,654 | 0d4e62bd84eff2eb7c698e58afa0aef89780c896 | 56e4aafbb757ea1a10c39270948331ec72ffdad7 | /test/pytest.py | 22939d003a114cf42252a562fa8c6fbaf89bdc70 | [
"CC0-1.0"
] | permissive | https://github.com/kagesaburo27/py1assignment | 7e7dff8f8792ca06cefb7b71422a145680b43da6 | 200bed0c8b64005d2411f00c330721c13d92aa73 | refs/heads/main | 2023-08-15T12:40:24.521053 | 2021-09-26T19:38:07 | 2021-09-26T19:38:07 | 410,575,426 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append('.')
from src.CoinGeckoAssignment import *
| UTF-8 | Python | false | false | 73 | py | 3 | pytest.py | 2 | 0.739726 | 0.739726 | 0 | 3 | 22.333333 | 37 |
Francois-ziserman/youtube-metadata | 17,557,826,326,784 | fc172ee65372b619e0446cc4c88b3c8639b62f61 | 6a765c870ee0980dab8eaea660ed1a81e931c58f | /youtube_metadata_csv_to_db.py | d0723b6ccef2f8a22f174a7b28e8934984774ba2 | [] | no_license | https://github.com/Francois-ziserman/youtube-metadata | ec5572a174a53de92343e70ed142152faf52c90b | e080e363e2f9cc3f31850942d5fbb9445eaf510d | refs/heads/master | 2020-03-18T03:19:31.178965 | 2018-05-21T07:43:08 | 2018-05-21T07:43:08 | 134,234,644 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sqlalchemy import *
import csv
from datetime import datetime
DT_FORMAT = "%Y-%m-%dT%H:%M:%S"
CSV_PATH = "/Users/francois/Google Drive/these_pauline/csv/"
class Db:
def __init__(self):
self.engine = create_engine('sqlite:///dbyoutube.sql')
self.metadata = MetaData()
self.table_video = Table('video', self.metadata,
Column('category', String(10), nullable=False),
Column('name', String(25), nullable=False),
Column('id', String(20), nullable=False),
Column('title', String(160), primary_key=True, nullable=False),
Column('description', String(500), nullable=False),
Column('publishedAt', DateTime(), nullable=False),
Column('duration', Integer(), nullable=False),
Column('views', Integer(), nullable=False),
Column('likes', Integer(), nullable=False),
Column('dislikes', Integer(), nullable=False),
Column('comments', Integer(), nullable=False),
Column('commentLock', Boolean(), nullable=False)
)
self.table_comment = Table('comment', self.metadata,
Column('videoId', String(20), nullable=False),
Column('id', String(25), primary_key=True, nullable=False),
Column('author', String(60), nullable=False),
Column('text', String(700), nullable=False),
Column('replies', Integer(), nullable=False),
Column('likes', Integer(), nullable=False),
Column('publishedAt', DateTime(), nullable=False)
)
self.metadata.create_all(self.engine)
self.conn = self.engine.connect()
def read_csv(self, list_file):
with open(list_file, 'r') as file_csv_list:
for line in file_csv_list:
file_name = line.replace("\n", "")
if line.startswith("video"):
self.read_videos(file_name)
else:
self.read_comments(file_name)
def read_videos(self, file_name):
with open(CSV_PATH + file_name) as file_videos:
print("read_videos. Start reading csv : " + file_name)
csv_reader_videos = csv.reader(file_videos, delimiter=',')
i = 0
ok = 0
errors = 0
for row in csv_reader_videos:
if i == 0:
i += 1
continue # do not add the header in the db
try:
ins = self.table_video.insert()
self.conn.execute(ins,
category=row[0],
name=row[1],
id=row[2],
title=row[3],
description=row[4],
publishedAt=self.get_datetime(row[5]),
duration=self.get_int(row[6]),
views=self.get_int(row[7]),
likes=self.get_int(row[8]),
dislikes=self.get_int(row[9]),
comments=self.get_int(row[10]),
commentLock=self.get_bool(row[11])
)
ok += 1
if ok % 1000 == 0:
print(" read_videos. lines: " + str(ok/1000) + " k")
except:
print(" read_videos. error on line " + str(i) + " " + str(row))
errors += 1
i += 1
print("read_videos. Add lines: " + str(ok) + " Errors: " + str(errors))
def get_datetime(self, input):
return datetime.strptime(input[:-5], DT_FORMAT)
def get_int(self, input):
return int(input)
def get_bool(self, input):
if input=='False':
return False
return True
def read_comments(self, file_name):
with open(CSV_PATH + file_name) as file_videos:
print("read_comments. Start reading csv : " + file_name)
csv_reader_comments = csv.reader(file_videos, delimiter=',')
i = 0
ok = 0
errors = 0
for row in csv_reader_comments:
if i == 0:
i += 1
continue # do not add the header in the db
try:
ins = self.table_comment.insert()
self.conn.execute(ins,
videoId=row[0],
id=row[1],
author=row[2],
text=row[3],
replies=self.get_int(row[4]),
likes=self.get_int(row[5]),
publishedAt=self.get_datetime(row[6])
)
ok += 1
if ok % 1000 == 0:
print(" read_comments. lines: " + str(ok/1000) + " k")
except:
print(" read_comments. error on line " + str(i) + " " + str(row))
errors += 1
i += 1
print("read_comments. Add lines: " + str(ok) + " Errors: " + str(errors))
# s = select([table_video])
# r = conn.execute(s)
# for row in r:
# print(row)
db = Db()
#db.read_csv("csv_list.txt")
| UTF-8 | Python | false | false | 6,072 | py | 2 | youtube_metadata_csv_to_db.py | 1 | 0.409585 | 0.396904 | 0 | 135 | 43.977778 | 96 |
gushedaoren/dianping-bawancang | 16,131,897,188,951 | 207cfa482d72f6e883c471f9223ffb8913bc4204 | 8e46e27f70e7688b9341b14527d8e687f2524321 | /utils/SMMailTool.py | ae757b6537c0905b19b621cec02b8e8eeeac7f64 | [] | no_license | https://github.com/gushedaoren/dianping-bawancang | 71e24973eb445ec4908fe0508cd0ff1080d6bb31 | a01118cc3dccbfd05b264f2756622d66be0e4cd3 | refs/heads/master | 2022-11-17T06:56:34.672935 | 2020-07-13T15:02:40 | 2020-07-13T15:02:40 | 279,333,607 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
import smtplib
from email.mime.text import MIMEText
from email.header import Header
# 第三方 SMTP 服务
from time import sleep
mail_host="smtp.xxx.com" #设置服务器
mail_user="xxx" #用户名
mail_pass="xxxx" #口令
sender = 'xxx@xxx.com'
mail_port = 994
def sendSMMail(title, content, receivers):
message = MIMEText(content, 'plain', 'utf-8')
message['From'] = sender
message['To'] = ";".join(receivers)
subject = title
message['Subject'] = Header(subject, 'utf-8')
# smtpObj = smtplib.SMTP()
smtpObj = smtplib.SMTP_SSL(mail_host, mail_port)
# smtpObj.connect(mail_host, mail_port)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
smtpObj.quit()
| UTF-8 | Python | false | false | 810 | py | 5 | SMMailTool.py | 4 | 0.667969 | 0.660156 | 0 | 35 | 20.857143 | 60 |
AK-1121/code_extraction | 1,872,605,747,648 | b6d0ee1afef4261e4dbba8942af00b85a1c0d5e8 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_269.py | 8befbf4df1bb76a09bb487be7dd1b88cba7aa3d5 | [] | no_license | https://github.com/AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Setting up Django on an internal server (os.environ() not working as expected?)
sys.path.append('/path/to/flup/egg/flup-1.0.1-py2.5.egg')
| UTF-8 | Python | false | false | 140 | py | 29,367 | python_269.py | 29,367 | 0.728571 | 0.692857 | 0 | 2 | 69 | 81 |
polyactis/repos | 3,839,700,803,725 | 59bb259861f587dfa4520895f8cc457fdf31c5da | 98a7b8c539d17946e361495ebb5eed3d83727e38 | /variation/trunk/src/Camp.py | b5ad9a0b9adb0faf8e8dc970fc937b4c86174900 | [] | no_license | https://github.com/polyactis/repos | 8a6a9703ca9ec2f9b75fb5bda06d4d29aace19c1 | 7b402496aae81665e6a915b5021b94d56e034c9d | refs/heads/master | 2020-12-08T16:30:24.175800 | 2013-02-27T19:45:20 | 2013-02-27T19:45:20 | 8,463,267 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
A wrapper for the camp program (Kimmel et al., 2008).
-o ..., --outputFile=... CAMP output.
-d ..., --delim=... default is ", "
-m ..., --missingval=... default is "NA"
-n ..., --sampleNum=... Num of samples
--parallel=... Run Camp on the cluster with standard parameters. The arguement is used for runid
--parallelAll Run Camp on all phenotypes.
-h, --help show this help
--useFloats Use floats in phenotype values.
"""
import sys, getopt, traceback
import os, env
import phenotypeData
import tempfile
tempfile.tempdir = "/home/cmb-01/bvilhjal/tmp/" #(Temporary) debug hack...
import dataParsers
import snpsdata
resultDir="/home/cmb-01/bvilhjal/results/"
scriptDir="/home/cmb-01/bvilhjal/Projects/Python-snps/"
def _run_():
if len(sys.argv)==1:
print __doc__
sys.exit(2)
long_options_list=["outputFile=", "delim=", "missingval=", "sampleNum=", "parallel=", "parallelAll", "useFloats"]
try:
opts, args=getopt.getopt(sys.argv[1:], "o:d:m:n:h", long_options_list)
except:
traceback.print_exc()
print sys.exc_info()
print __doc__
sys.exit(2)
phenotypeFileType=1
outputFile=None
delim=","
missingVal="NA"
help=0
withArrayIds=1
parallel=None
parallelAll=False
sampleNum = None
chromosomes=[1,2,3,4,5]
useFloats = False
for opt, arg in opts:
if opt in ("-h", "--help"):
help=1
print __doc__
elif opt in ("-o", "--outputFile"):
outputFile=arg
elif opt in ("--parallel"):
parallel=arg
elif opt in ("--parallelAll"):
parallelAll=True
elif opt in ("-d", "--delim"):
delim=arg
elif opt in ("-m", "--missingval"):
missingVal=arg
elif opt in ("n", "--sampleNum"):
sampleNum = int(arg)
elif opt in ("--useFloats"):
useFloats = True
else:
if help==0:
print "Unkown option!!\n"
print __doc__
sys.exit(2)
if len(args)<3 and not parallel:
if help==0:
print "Arguments are missing!!\n"
print __doc__
sys.exit(2)
snpsDataFile=args[0]
phenotypeDataFile=args[1]
print "CAMP is being set up with the following parameters:"
print "phenotypeDataFile:",phenotypeDataFile
if len(args)>2:
print "Phenotype_id:",args[2]
print "snpsDataFile:",snpsDataFile
print "parallel:",parallel
print "parallelAll:",parallelAll
print "sampleNum:",sampleNum
def runParallel(phenotypeIndex,id=""):
#Cluster specific parameters
phed=phenotypeData.readPhenotypeFile(phenotypeDataFile, delimiter = '\t') #Get Phenotype data
phenName=phed.getPhenotypeName(phenotypeIndex)
phenName=phenName.replace("/", "_div_")
phenName=phenName.replace("*", "_star_")
outputFile=resultDir+"CAMP_"+parallel+"_"+phenName+id
shstr="""#!/bin/csh
#PBS -l walltime=24:00:00
#PBS -l mem=6g
#PBS -q cmb
"""
shstr+="#PBS -N C"+phenName+"_"+parallel+"\n"
shstr+="set phenotypeName="+parallel+"\n"
shstr+="set phenotype="+str(phenotypeIndex)+"\n"
shstr+="(python "+scriptDir+"Camp.py -o "+outputFile+" "
if sampleNum:
shstr+=" -n "+str(sampleNum)+" "
if useFloats:
shstr+=" --useFloats "
shstr+=snpsDataFile+" "+phenotypeDataFile+" "+str(phenotypeIndex)+" "
shstr+="> "+outputFile+"_job"+".out) >& "+outputFile+"_job"+".err\n"
f=open(parallel+".sh", 'w')
f.write(shstr)
f.close()
#Execute qsub script
os.system("qsub "+parallel+".sh ")
if parallel: #Running on the cluster..
if parallelAll:
phed=phenotypeData.readPhenotypeFile(phenotypeDataFile, delimiter = '\t') #Get Phenotype data
for phenotypeIndex in phed.phenIds:
runParallel(phenotypeIndex)
else:
phenotypeIndex=int(args[2])
runParallel(phenotypeIndex)
return
else:
phenotypeIndex=int(args[2])
#Load phenotype file
phed=phenotypeData.readPhenotypeFile(phenotypeDataFile, delimiter = '\t') #Get Phenotype data
#Load genotype file
snpsds=dataParsers.parseCSVData(snpsDataFile, format = 1, deliminator = delim, missingVal = missingVal, withArrayIds = withArrayIds)
#Checking overlap between phenotype and genotype accessions.
phenotype=phed.getPhenIndex(phenotypeIndex)
accIndicesToKeep=[]
phenAccIndicesToKeep=[]
numAcc=len(snpsds[0].accessions)
sys.stdout.write("Removing accessions which do not have a phenotype value for "+phed.phenotypeNames[phenotype]+".")
sys.stdout.flush()
for i in range(0, len(snpsds[0].accessions)):
acc1=snpsds[0].accessions[i]
for j in range(0, len(phed.accessions)):
acc2=phed.accessions[j]
if acc1==acc2 and phed.phenotypeValues[j][phenotype]!='NA':
accIndicesToKeep.append(i)
phenAccIndicesToKeep.append(j)
break
#Filter accessions which do not have the phenotype value.
for snpsd in snpsds:
sys.stdout.write(".")
sys.stdout.flush()
snpsd.removeAccessionIndices(accIndicesToKeep)
print ""
print numAcc-len(accIndicesToKeep), "accessions removed, leaving", len(accIndicesToKeep), "accessions in all."
print "Filtering phenotype data."
phed.removeAccessions(phenAccIndicesToKeep) #Removing accessions that don't have genotypes or phenotype values
#Ordering accessions according to the order of accessions in the genotype file
accessionMapping=[]
i=0
for acc in snpsds[0].accessions:
if acc in phed.accessions:
accessionMapping.append((phed.accessions.index(acc), i))
i+=1
phed.orderAccessions(accessionMapping)
#Filtering monomorphic
print "Filtering monomorphic SNPs"
for snpsd in snpsds:
print "Removed", str(snpsd.filterMonoMorphicSnps()), "Snps"
#Converting format to 01
newSnpsds=[]
sys.stdout.write("Converting data format")
for snpsd in snpsds:
sys.stdout.write(".")
sys.stdout.flush()
newSnpsds.append(snpsd.getSnpsData())
print ""
#Writing phenotype data to CAMP format.
(fId, phenotypeFile) = tempfile.mkstemp()
os.close(fId)
phenVals = phed.getPhenVals(phenotypeIndex,asString=False)
if not useFloats:
phenVals = map(int,phenVals)
phenFile = open(phenotypeFile,"w")
for value in phenVals:
phenFile.write(str(value)+"\n")
phenFile.close()
chromosome_list = []
positions_list = []
scores_list = []
interaction_positions_list = []
mafs = []
marfs = []
#Writing SNP data to CAMP format.
for chromosome in chromosomes:
(fId, snpsFile) = tempfile.mkstemp()
os.close(fId)
(fId, posFile) = tempfile.mkstemp()
os.close(fId)
sf = open(snpsFile,"w")
pf = open(posFile,"w")
snpsd = newSnpsds[chromosome-1]
for i in range(0,len(snpsd.snps)):
snp = snpsd.snps[i]
(marf,maf) = snpsdata.getMAF(snp)
marfs.append(marf)
mafs.append(maf)
str_snp = map(str,snp)
double_snp = []
for nt in str_snp:
double_snp.append(nt)
double_snp.append(nt)
sf.write("".join(double_snp)+"\n")
pf.write(str(snpsd.positions[i])+"\n")
sf.close()
pf.close()
outFile = outputFile+"_job_"+str(chromosome)+".out"
errFile = outputFile+"_job_"+str(chromosome)+".err"
resFile = outputFile+"_"+str(chromosome)+".out"
print "resFile,outFile,errFile,snpsFile,posFile,phenotypeFile:",resFile,outFile,errFile,snpsFile,posFile,phenotypeFile
results = _runCAMP_(resFile,outFile,errFile,snpsFile,posFile,phenotypeFile,sampleNum)
positions_list += results["positions"]
scores_list += results["scores"]
for (i,j) in results["snpIndices"]:
if not (j<0 or i<0):
marfs.append(0.5) #An ugly hack!!!
mafs.append(0.5)
chromosome_list.append(chromosome)
scoreFile = outputFile+".scores"
f = open(scoreFile,"w")
f.write("Chromosome,Position,Score,MARF,MAF,Second_Position\n")
for i in range(0,len(positions_list)):
chromosome = chromosome_list[i]
(pos1,pos2) = positions_list[i]
score = scores_list[i]
marf = marfs[i]
maf = mafs[i]
l = map(str,[chromosome,pos1,score,marf,maf,pos2])
f.write(",".join(l)+"\n")
f.close()
def _runCAMP_(resFile,outFile,errFile,snpsFile,posFile,phenotypeFile,sampleNum=None,windowSize=None):
cmdStr = "(/home/cmb-01/bvilhjal/Projects/camp/camp -i "+snpsFile+" -f -p "+posFile+" -d "+phenotypeFile+" -w 30000 -o "+resFile+" "
if windowSize:
cmdStr+= " -w "+str(windowSize)+" "
if sampleNum:
cmdStr+= " -n "+str(sampleNum)+" "
cmdStr+=" > "+outFile+") >& "+errFile
sys.stdout.write(cmdStr+"\n")
sys.stdout.flush()
os.system(cmdStr)
#Parse results... and return
rf = open(resFile,"r")
#pf = open(posFile,"r")
lines = rf.readlines()
lines.pop(0)
positions = []
interactions = []
snpIndices = []
scores = []
i = 1
for line in lines:
line_list = line.split()
if i%10000==0:
print i,"lines read"
sys.stdout.flush()
if len(line_list)==5:
positions.append(int(line_list[2]))
interaction = int(line_list[3])
interactions.append(interaction)
snpIndices.append([int(line_list[0])-1,int(line_list[1])-1])
scores.append(float(line_list[4]))
else:
print line_list, len(line_list)
break
print i,"lines read out of",len(lines)
sys.stdout.flush()
rf.close()
return {"scores":scores, "positions": positions, "interactions":interactions, "snpIndices":snpIndices}
if __name__ == '__main__':
_run_()
print "Done!"
| UTF-8 | Python | false | false | 8,957 | py | 1,119 | Camp.py | 474 | 0.683376 | 0.673105 | 0 | 316 | 27.338608 | 134 |
Mabushi1227/Nitech | 1,881,195,711,209 | 0a3e474e2fecddceedde0cbc0bf8fbb42e870a69 | d9ee19f45a92c9c5656883263d874596c50bf314 | /python/CG/MidExam.py | 209c20dc0c4a5d9b4966b3a801ecbc4059aa8758 | [] | no_license | https://github.com/Mabushi1227/Nitech | d89fec954574708dcebce9aec53d0fe279778949 | dd86bf9ee45c8ba4c8dbd878618a939cfb12088b | refs/heads/master | 2022-01-28T15:16:27.775319 | 2022-01-13T05:47:14 | 2022-01-13T05:47:14 | 180,480,168 | 0 | 0 | null | false | 2019-08-10T02:54:28 | 2019-04-10T01:49:14 | 2019-07-24T02:41:44 | 2019-08-10T02:54:28 | 2,152 | 0 | 0 | 0 | C | false | false | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
#2
array_2 = np.array([[48.,46,2,7,14],[50,52,3,8,15],[42,58,9,11,17],[34,30,14,18,20],[25,31,39,30,24]])
##2.(a)
y_2a = np.zeros([3,3])
for i in range(0,3):
for j in range(0,3):
for k in range(0,3):
for l in range(0,3):
y_2a[i][j] += array_2[i+k][j+l]
print("2(a)*9",y_2a)
y_2a = y_2a / 9.0
print("2(a)\n",y_2a)
#4
###4.(a),(b) 5*5はarray_2と同じ
t = np.array([[3.,11,15],[8,10,16],[7,18,17]])
SAD = np.zeros([3,3])
for i in range(0,3):
for j in range(0,3):
for k in range(0,3):
for l in range(0,3):
SAD[i][j] += np.abs(array_2[i+k][j+l] - t[k][l ])
print("SAD\n",SAD)
| UTF-8 | Python | false | false | 761 | py | 154 | MidExam.py | 33 | 0.492696 | 0.357238 | 0 | 36 | 19.916667 | 103 |
AmimulAhshanAvi/Learn_to_code | 18,554,258,758,896 | 6c8603e1e5e21f63646490a99a3706e2fc560979 | 6296b204c3bc8f1e0043baf08399443ece4ff07f | /Assignment/Ch-1/pg-24/pg-24(random_function).py | 93ec5b89b29c4f8970d213cd067d362825e28167 | [] | no_license | https://github.com/AmimulAhshanAvi/Learn_to_code | e4b4be9eedc66fe142b1106641088f977852297e | 0da02a055058c35d52b6713ef7989ea0b4a967b7 | refs/heads/master | 2023-02-17T18:31:36.162459 | 2021-01-13T16:40:05 | 2021-01-13T16:40:05 | 322,248,799 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
verbs = ['Leverage','Sync','Target','Gimify']
adjectives = ['A/B Tested','Freemium','Hyperlocal','Siloed']
nouns = ['Early Adopter','Low-hanging','pipeline']
verb = random.choice(verbs)
adjective = random.choice(adjectives)
noun = random.choice(nouns)
phrase = verb + ' ' + adjective + ' ' + noun
print(phrase)
| UTF-8 | Python | false | false | 329 | py | 24 | pg-24(random_function).py | 16 | 0.683891 | 0.683891 | 0 | 13 | 24.307692 | 60 |
N-H-C/CSE_2201_Algorithms_Code_Python | 7,559,142,468,637 | 650e57dddd10b7e446deb7de3dca110cc7fae643 | 8a13c1026ab9182fd4a083794a98654bcdcb0006 | /graph_dfs_pythonic_2.py | 69a8af4826e81983898cbd4ad82c5a08ac03fea1 | [] | no_license | https://github.com/N-H-C/CSE_2201_Algorithms_Code_Python | efe95efa49163a2ea82b025444453b999a1d6c1e | 7fbec9ae293b719419cb218af9ae6ff2b4dae4cc | refs/heads/master | 2023-02-18T20:43:23.601183 | 2021-01-22T15:27:37 | 2021-01-22T15:27:37 | 265,626,976 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def dfs(adj, visited, current_node):
if current_node not in visited:
visited.add(current_node)
for neighbour in adj[current_node]:
if neighbour not in visited:
dfs(adj, visited, neighbour)
print(current_node, end=' ')
n = int(input("Enter number of nodes: "))
e = int(input("Enter number of edges: "))
adj_list = {}
undirected_graph = input("Undirected graph? (Y/N) ")
print("Enter edges (u, v) in separate line(s).")
for i in range(e):
u, v = input().split()
adj_list[u] = v
if undirected_graph == 'Y' or undirected_graph == 'y':
adj_list[v] = u
visited = set()
print("DFS: ", end='')
for vertex in adj_list:
if vertex not in visited:
dfs(adj_list, visited, vertex)
| UTF-8 | Python | false | false | 763 | py | 22 | graph_dfs_pythonic_2.py | 22 | 0.59633 | 0.59633 | 0 | 30 | 24.433333 | 58 |
Nikkuniku/AtcoderProgramming | 13,340,168,428,712 | 64c6f7a3b3ab2430b59f84a6cf1ecb54272d9e2b | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC100~ABC199/ABC160/E.py | 0fced2f2d45bb4c9431e2218ddf59eefe68b6c1c | [] | no_license | https://github.com/Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x,y,a,b,c=map(int,input().split())
p=list(map(int,input().split()))
q=list(map(int,input().split()))
r=list(map(int,input().split()))
p=sorted(p,reverse=True)
q=sorted(q,reverse=True)
r=sorted(r)
p=p[:x]
q=q[:y]
import heapq
from collections import deque
heapq.heapify(p)
heapq.heapify(q)
r=deque(r)
for i in range(c):
v_p=heapq.heappop(p)
v_q=heapq.heappop(q)
v_r=r.pop()
if v_p<=v_q:
heapq.heappush(q,v_q)
if v_p<v_r:
heapq.heappush(p,v_r)
else:
heapq.heappush(p,v_p)
else:
heapq.heappush(p,v_p)
if v_q<v_r:
heapq.heappush(q,v_r)
else:
heapq.heappush(q,v_q)
ans=sum(p)+sum(q)
print(ans) | UTF-8 | Python | false | false | 711 | py | 2,078 | E.py | 2,067 | 0.561181 | 0.561181 | 0 | 39 | 17.25641 | 34 |
BilyiVoron/monty2019 | 17,033,840,312,520 | 32ec1e50579d614f713622314b13ee71ae8ecd4d | a989df9c252dccfaadc7116a271503a9f6b762b4 | /tests/tests_api.py | 51d96c55b2165ffd049b57d6503eba0028adc228 | [] | no_license | https://github.com/BilyiVoron/monty2019 | c084d29cabbe352ac43a6db8d3af59378b2bfc7c | 7823f08ab8196c2c537c4996571b6de31a27a846 | refs/heads/master | 2020-07-28T19:46:55.417514 | 2020-05-27T12:07:21 | 2020-05-27T12:07:21 | 209,516,025 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
from django.utils import timezone
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIRequestFactory, APIClient
from api.views import TestList, TestDetail
from monty.models import Dictionary, Theme, Word, Test
User = get_user_model()
class DictionaryListTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_list(self):
response = self.client.get(self.uri)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_create(self):
response = self.client.post(
self.uri,
{
"owner": self.owner.id,
"native_language": "uk",
"foreign_language": "en",
"dictionary_name": "En-Uk",
},
format="json",
)
self.assertEqual(
response.status_code,
201,
f"Expected Response Code 201, received {response.status_code} instead.",
)
class DictionaryDetailTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.test_dictionary = Dictionary.objects.create(
dictionary_name="En-Uk",
native_language="en",
foreign_language="uk",
owner=self.owner,
)
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_retrieve(self):
response = self.client.get("{0}{1}/".format(self.uri, self.test_dictionary.pk))
self.assertEqual(
response.status_code,
200,
"Expected Response Code 200, received {0} instead.".format(
response.status_code
),
)
def test_update(self):
response = self.client.put(
"{0}{1}/".format(self.uri, self.test_dictionary.pk),
{
"owner": self.owner.id,
"native_language": "uk",
"foreign_language": "en",
"dictionary_name": "Uk-En",
},
format="json",
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_destroy(self):
response = self.client.delete(
"{0}{1}/".format(self.uri, self.test_dictionary.pk)
)
self.assertEqual(
response.status_code,
204,
f"Expected Response Code 204, received {response.status_code} instead.",
)
class ThemeListTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.test_dictionary = Dictionary.objects.create(
dictionary_name="En-Uk", owner=self.owner
)
self.test_theme = Theme.objects.create(
theme_name="Detective Comics", dictionary=self.test_dictionary
)
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_list(self):
response = self.client.get(f"{self.uri}{self.test_dictionary.pk}/themes/")
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_create(self):
response = self.client.post(
f"{self.uri}{self.test_dictionary.pk}/themes/",
{"dictionary": self.test_dictionary.id, "theme_name": "Detective Comics"},
format="json",
)
self.assertEqual(
response.status_code,
201,
f"Expected Response Code 201, received {response.status_code} instead.",
)
class ThemeDetailTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.test_dictionary = Dictionary.objects.create(
dictionary_name="En-Uk", owner=self.owner
)
self.test_theme = Theme.objects.create(
theme_name="Detective Comics", dictionary=self.test_dictionary
)
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_retrieve(self):
response = self.client.get(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/"
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_update(self):
response = self.client.put(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/",
{"dictionary": self.test_dictionary.id, "theme_name": "Vertigo"},
format="json",
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_destroy(self):
response = self.client.delete(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/"
)
self.assertEqual(
response.status_code,
204,
f"Expected Response Code 204, received {response.status_code} instead.",
)
class WordListTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.test_dictionary = Dictionary.objects.create(
dictionary_name="En-Uk", owner=self.owner
)
self.test_theme = Theme.objects.create(
theme_name="Detective Comics", dictionary=self.test_dictionary
)
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_list(self):
response = self.client.get(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/words/"
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_create(self):
response = self.client.post(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/words/",
{
"native_word": ["blablabla"],
"foreign_word": ["блаблабла"],
"dictionary": self.test_dictionary.id,
"theme": self.test_theme.id,
},
format="json",
)
self.assertEqual(
response.status_code,
201,
f"Expected Response Code 201, received {response.status_code} instead.",
)
class WordDetailTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.uri = "/api/dictionaries/"
self.owner = self.setup_user()
self.test_dictionary = Dictionary.objects.create(
dictionary_name="En-Uk", owner=self.owner
)
self.test_theme = Theme.objects.create(
theme_name="Detective Comics", dictionary=self.test_dictionary
)
self.test_word = Word.objects.create(
native_word=["blablabla"],
foreign_word=["блаблабла"],
dictionary=self.test_dictionary,
theme=self.test_theme,
)
self.client.force_authenticate(user=self.owner)
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_retrieve(self):
response = self.client.get(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/words/{self.test_word.pk}/"
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_update(self):
response = self.client.put(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/words/{self.test_word.pk}/",
{
"native_word": ["ololololo"],
"foreign_word": ["ололололо"],
"dictionary": self.test_dictionary.id,
"theme": self.test_theme.id,
},
format="json",
)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_destroy(self):
response = self.client.delete(
f"{self.uri}{self.test_dictionary.pk}/themes/{self.test_theme.pk}/words/{self.test_word.pk}/"
)
self.assertEqual(
response.status_code,
204,
f"Expected Response Code 204, received {response.status_code} instead.",
)
class TestTestViews(APITestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.view = TestList.as_view({"get": "list"})
self.view1 = TestDetail.as_view()
self.uri = "/api/tests/"
self.uri1 = "/api/tests/32/"
self.owner = self.setup_user()
self.example_test = Test.objects.create(result=53, test_date=timezone.now())
self.token = Token.objects.create(user=self.owner)
self.token.save()
@staticmethod
def setup_user():
return User.objects.create_user(
"Bruce Wayne",
email="batman@batcave.com",
password="Martha",
native_language="uk",
)
def test_list(self):
request = self.factory.get(
self.uri, HTTP_AUTHORIZATION=f"Token {self.token.key}"
)
request.user = self.owner
response = self.view(request)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
def test_details(self):
request = self.factory.get(
f"{self.uri}{self.example_test.pk}/",
HTTP_AUTHORIZATION=f"Token {self.token.key}",
)
request.user = self.owner
response = self.view(request)
self.assertEqual(
response.status_code,
200,
f"Expected Response Code 200, received {response.status_code} instead.",
)
| UTF-8 | Python | false | false | 11,953 | py | 27 | tests_api.py | 17 | 0.555593 | 0.54595 | 0 | 372 | 31.05914 | 106 |
momo9/pyfriend-django | 16,071,767,659,833 | 131b0d60c72f7bdb34c3d4d1d93dab0e33713b58 | 06fa3bd0c21ae9d75a860f3563dab3bed2c20df4 | /website/website/urls.py | eb0867243ad5ca48fee717c008f67409c63e88cb | [] | no_license | https://github.com/momo9/pyfriend-django | 282891c1e4f3fde3bd1fff1a42c11400fa4d5526 | bbcf2f576273d375e235f3a5987f8f3338eccf32 | refs/heads/master | 2021-01-18T07:31:41.663899 | 2014-07-24T03:41:42 | 2014-07-24T03:41:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, include, url
from pyfriend.views import register,login,logout
from django.contrib import admin
import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/',include(admin.site.urls)),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',{'document_root':settings.STATIC_ROOT}),
url(r'^$',include('pyfriend.urls')),
url(r'^register/$',register),
url(r'^login/$',login),
url(r'^logout/$',logout),
)
| UTF-8 | Python | false | false | 507 | py | 10 | urls.py | 5 | 0.65286 | 0.65286 | 0 | 16 | 30.6875 | 123 |
EdwinKato/bucket-list | 1,047,972,025,159 | ec269401b9ce577cd8af71637b606ca095967281 | c16c32efbc80953694903fde58c08fb53ff2dc1d | /backend/api/__init__.py | 7b9d92ae0726a5fcf00d18e7fef6f56c73734379 | [
"MIT"
] | permissive | https://github.com/EdwinKato/bucket-list | 9af5a7a4c6852c881dcc98d4dccf18b0a74baaff | 16b71cce59df2f28061b8f5d6b2cfb1f679381c4 | refs/heads/master | 2022-12-09T13:54:23.046711 | 2017-08-18T12:56:36 | 2017-08-18T12:56:36 | 95,651,248 | 0 | 1 | MIT | false | 2023-06-21T15:39:22 | 2017-06-28T09:15:04 | 2017-08-08T06:05:54 | 2022-12-08T00:00:30 | 56,226 | 0 | 1 | 16 | CSS | false | false | from flask_sqlalchemy import SQLAlchemy
import connexion
from config import config
db = SQLAlchemy()
def create_app(config_name):
app = connexion.FlaskApp(__name__, specification_dir='swagger/')
app.add_api('swagger.yaml')
application = app.app
application.config.from_object(config[config_name])
application.add_url_rule('/auth/register',
'register',
register)
application.add_url_rule('/auth/login',
'login',
login)
db.init_app(application)
return application
from api.api import *
| UTF-8 | Python | false | false | 642 | py | 52 | __init__.py | 39 | 0.584112 | 0.584112 | 0 | 28 | 21.928571 | 68 |
risent/little_project | 9,405,978,418,884 | f7cc30e9b331e030687b72522be45e05a4fa7ca9 | 66c40b180e7ad5b10b49d1159dc4a6d8e53131d7 | /code/record/pottery/migrations/0005_auto_20191002_1045.py | 792ae41367a51005e1e6b6d898a4f715f15fe8cd | [] | no_license | https://github.com/risent/little_project | 6b2ebeb4ae380fec554b2684792ef7e4fd607ecb | 74792d57d680e343ab4aab7b41dd27fd38a4ef01 | refs/heads/master | 2020-08-14T03:01:10.085798 | 2019-10-14T05:55:58 | 2019-10-14T05:55:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.7 on 2019-10-02 02:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pottery', '0004_auto_20190930_1345'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='content_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='coupon',
name='object_id',
field=models.IntegerField(blank=True, null=True),
),
]
| UTF-8 | Python | false | false | 665 | py | 64 | 0005_auto_20191002_1045.py | 43 | 0.610526 | 0.56391 | 0 | 24 | 26.708333 | 123 |
wpwbb510582246/InterestingProgram | 19,456,201,862,411 | 88dece8bd9ba895bf17f16f5ded7073edc7f8f62 | fe1c4deaab5f1d41250021b1e5098944d04f5945 | /ChatWithAI.py | 93ddafca8bcf48c2e1376e66a92fb7ce1cf9754d | [
"Apache-2.0"
] | permissive | https://github.com/wpwbb510582246/InterestingProgram | 4185f353e24c1238e8ba2e47c39b6df78d31ab18 | 8d0f71a96158f5e08085024038fd4ad137789858 | refs/heads/master | 2021-10-21T17:48:10.398593 | 2019-03-05T12:50:41 | 2019-03-05T12:50:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
调用腾讯智能闲聊API实现聊天,使用腾讯优图语音合成API实现朗读结果
"""
import time;
import base64;
import hashlib;
import random;
import json;
import string;
import os;
import urllib;
from urllib.parse import quote;
from urllib import request;
# 基础类
class BaseClass:
def __init__(self, url):
"""
:param url:api的访问地址
"""
self.URL = url;
self.APP_ID = 0000000000; # 你自己的app_id
self.APP_KEY = "xxxxxxxxxxxxxxxx"; # 你自己的app_key
# params属性需要用户后来修改,添加对应api所需的参数
# 这里列举出的key都是共有的,特有的需要用户自己传入
self.params = {
'app_id' : self.APP_ID,
'time_stamp' : None,
'nonce_str' : None,
};
# 调用接口返回的结果
self.result = None;
def __get_sign(self):
"""
计算获得sign的方法
:return:None
"""
# 获得时间戳(秒级),防止请求重放
time_stamp = int(time.time());
# 获得随机字符串,保证签名不被预测
nonce_str = ''.join(random.sample(string.ascii_letters + string.digits, 10))
# 组合参数(缺少sign,其值要根据以下获得)
self.params['time_stamp'] = time_stamp;
self.params['nonce_str'] = nonce_str;
# 获得sign对应的值
before_sign = '';
# 对key排序拼接
for key in sorted(self.params):
before_sign += f'{key}={quote(str(self.params[key]).encode("utf-8"))}&';
# 将应用秘钥以app_key为键名,拼接到before_sign的末尾
before_sign += f"app_key={self.APP_KEY}";
# 对获得的before_sign进行MD5加密(结果大写),得到借口请求签名
sign = hashlib.md5(before_sign.encode("utf-8")).hexdigest().upper();
# 将请求签名添加进参数字典
self.params["sign"] = sign;
def get_result(self):
"""
该方法用于调用api,获得返回的结果
:return: None
"""
# 完善params参数
self.__get_sign();
params = urllib.parse.urlencode(self.params).encode("utf-8");
req = request.Request(url=self.URL, data=params);
# 设置超时10秒,重试3次
count = 0;
while True:
try:
count += 1;
self.result = request.urlopen(req, timeout=10);
break;
except Exception as e:
print(e)
print(f"连接超时,正在进行第{str(count)}次重连")
if count <= 3:
continue;
else:
break;
def do_result(self):
"""
处理结果的方法
:return: None
"""
pass;
def play_audio(self, file_dir, sleep_time, is_delete=False):
"""
播放下载的语音
:param file_dir: 包含语音文件的文件夹
:param sleep_time: 每次播放的时间(因为无法获得语音的时长,只能指定固定时长)
:param is_delete: 播放完成后是否删除所有语音
:return: None
"""
for file in os.listdir(file_dir):
os.system(file_dir+"\\"+file);
time.sleep(sleep_time);
if is_delete:
for file in os.listdir(file_dir):
os.remove(file_dir+"\\"+file);
def run(self):
"""
主运行方法
:return: None
"""
pass;
# 使用腾讯优图语音合成api
class TencentVoice(BaseClass):
def __init__(self, text,audio_path, sound_choice=2, sound_speed=0):
"""
:param text: 要合成语音的文本
:param audio_path: 音频文件的保存位置
:param sound_choice: 音源选择
:param sound_speed: 语速选择
"""
super(TencentVoice, self).__init__('https://api.ai.qq.com/fcgi-bin/aai/aai_tta');
self.TEXT = text;
self.audio_path = audio_path;
self.params['model_type'] = sound_choice; # 语音 0~2。0:女。1:女英文。2:男
self.params['speed'] = sound_speed; # 语速 -2:0.6倍,-1:0.8倍, 0:正常, 1:1.2倍,2:1.5倍
def deal_text(self):
"""
处理传入的text
:return:
"""
if len(self.TEXT.encode("utf-8")) > 300:
raise ValueError("text参数长度超出限制,限制utf8下300个字节")
if isinstance(self.TEXT, str):
self.params["text"] = self.TEXT;
self.do_result(self.TEXT);
elif isinstance(self.TEXT, list):
for text in self.TEXT:
if len(text.encode("utf-8")) > 300:
raise ValueError("text参数长度超出限制,限制utf8下300个字节");
else:
self.params["text"] = text;
self.do_result(text);
def do_result(self, text):
"""
将返回的结果处理成mp3格式的音频
:param text: 用作文件名
:return:
"""
self.get_result();
# print(self.params)
str_json = self.result.read().decode("utf-8");
# print(str_json)
voice_data = json.loads(str_json)["data"]["voice"];
voice_data = base64.decodestring(bytes(voice_data.encode("utf-8")));
if len(text) > 10:
file_name = text[:10];
else:
file_name = text;
if voice_data:
with open(self.audio_path+"/" + file_name + ".mp3", "wb") as f:
f.write(voice_data);
def run(self):
"""
主运行方法
:return: None
"""
self.deal_text();
# 使用腾讯智能闲聊api
class TencetChat(BaseClass):
def __init__(self, question):
"""
:param question: 聊天的问题
"""
super(TencetChat, self).__init__("https://api.ai.qq.com/fcgi-bin/nlp/nlp_textchat");
self.params["session"] = "10000"
self.question = question;
def deal_question(self):
"""
对提出的问题进行处理,限制长度和类型
:return: None
"""
if not isinstance(self.question, str):
raise TypeError(f"question参数必须是 ‘str’ 类型的,不能是 ‘{type(self.question)}’ 类型的!!!");
else:
if len(self.question.encode("utf-8")) > 300:
raise ValueError("question参数的长度必须小于300个字节(utf-8格式下)")
else:
self.params["question"] = self.question;
# print(self.params)
self.do_result();
def do_result(self):
"""
处理结果
:return:None
"""
self.get_result();
if self.result:
res = json.loads(self.result.read().decode("utf-8"));
# print(res)
if not res["msg"] == "ok":
self.answer = "我好像出错了:"+res["msg"];
else:
self.answer = res["data"]["answer"];
else:
self.answer="我尝试了4次,但还是失败了,只能说我尽力了。";
def run(self):
self.deal_question();
# 整合之后的一个聊天程序
def complete_chat(use_voice=False):
"""
一个完整的聊天的方法
:param use_voice: 是否使用语音,出错率较高
:return: None
"""
print("欢迎使用智能闲聊,下面开始聊天吧(输入quit退出聊天):")
print("*"*50)
while True:
question = input("我:");
if question == "quit":
break;
t_chat = TencetChat(question);
t_chat.run();
answer = t_chat.answer;
if use_voice:
t_voice = TencentVoice(answer,audio_path="TencentChatAudio",sound_choice=0,sound_speed=-1);
t_voice.run();
t_voice.play_audio("TencentChatAudio",0,is_delete=True);
else:
print("智能闲聊:",answer);
# print("智能闲聊:",answer);
if __name__ == '__main__':
complete_chat(); | UTF-8 | Python | false | false | 8,476 | py | 3 | ChatWithAI.py | 2 | 0.495719 | 0.482878 | 0 | 243 | 27.8107 | 103 |
alexdy2007/DataMiningClusteringText | 18,408,229,834,883 | 86b38ba139e237b23caf781ba42390998142d3aa | d8fec967bdf1a873c99f1c010aa0b05f891ff03b | /analysis/multidimensional_scaling.py | c04661a7bad142818f7eb602b24521cfd390c1f7 | [] | no_license | https://github.com/alexdy2007/DataMiningClusteringText | 84cc32ac3dece81903a71547d5bb1da8a8e69948 | 7eaf5ce06f1fd9f358613faa9f97d081e9ad8ae5 | refs/heads/master | 2021-01-18T20:19:45.873503 | 2017-03-21T23:48:41 | 2017-03-21T23:48:41 | 85,005,380 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from analysis.cosine_similarity import get_cosine_similarity, get_euclidean_distance
from sklearn.manifold import MDS
from sklearn.decomposition import TruncatedSVD
def get_multi_scaling_positions(words_freq_matrix, euclidean=True):
print(euclidean)
if not euclidean:
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1) # Using COSINE as dissimilatrity
dist = get_cosine_similarity(words_freq_matrix)
pos = mds.fit_transform(dist)
else:
mds = MDS(n_components=2, random_state=1) # Using euclidean as dissimilatrity
dist = get_euclidean_distance(words_freq_matrix)
pos = mds.fit_transform(dist)
return pos
def get_LSA_scaling_positions(words_freq_matrix):
svd = TruncatedSVD(n_components=2)
return svd.fit_transform(words_freq_matrix)
| UTF-8 | Python | false | false | 830 | py | 39 | multidimensional_scaling.py | 11 | 0.727711 | 0.721687 | 0 | 20 | 40.4 | 111 |
shearern/drupal-deployment-tools | 16,020,228,039,317 | c89c0f2fe23dab0434315d14ede0b167de9dd412 | 65db4dea2aa15210dddcc74fb09a07468e005a79 | /src/init-drupal-deploy.py | ccb0ba2fed4f57f373eae8ab8cc335add3109198 | [] | no_license | https://github.com/shearern/drupal-deployment-tools | 860ea9714f1d34988240e76c1fa11b219b4727a9 | 22171c5075c2ac8819fe26cfa6565950a8487c8f | refs/heads/master | 2021-01-10T03:47:16.356106 | 2015-11-24T03:45:08 | 2015-11-24T03:45:08 | 46,764,883 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
'''init-drupal-deploy.py
Call in Drupal project folder to init setup'''
# Create drupal-deploy folder
# Add secrets folder to .gitignore | UTF-8 | Python | false | false | 157 | py | 58 | init-drupal-deploy.py | 55 | 0.745223 | 0.745223 | 0 | 8 | 18.75 | 46 |
XarisA/nethack | 2,078,764,193,364 | d79173570cc92bf70d60f4347a57147f070bbafc | 0c79f36da68f46d1e2ba19f0c2d3296f008f86c1 | /GetInfo.py | eb5b36bff77fae3a37d1c5b9d725c8df72c99288 | [
"MIT"
] | permissive | https://github.com/XarisA/nethack | ecf78e82c41ef2a0b8b29b9c852244ba2a899e2f | b5622286679cc0d337270b3c7fcb9f36885ed80f | refs/heads/master | 2023-01-10T22:43:41.445861 | 2020-11-21T15:54:22 | 2020-11-21T15:54:22 | 109,478,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import os
from socket import *
import dns.resolver
import dns.reversename
# TODO Pass arguments from terminal
def reach_host(hostname,arguments='-c 1'):
# Pinging the host
print ('[+] Pinging ' + hostname)
if os.system("ping " + arguments + ' '+ hostname) == 0:
print ("Host appears to be up ")
else:
print ("Host is down or does not reply to ping requests ")
print ("Host's ip is " , gethostbyname(hostname))
def nslookup(hostname, typ3='MX'):
answers = dns.resolver.query(hostname, typ3)
for rdata in answers:
print ('Host', rdata.exchange, 'has preference', rdata.preference)
def name_reverse(hostip):
n = dns.reversename.from_address(hostip)
print(n)
#print(dns.reversename.to_address(n))
def main():
hostname = "dnspython.org"
print ('[+] Gatering information about host')
reach_host(hostname)
print ("Mail server lookup ")
nslookup(hostname)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,001 | py | 5 | GetInfo.py | 3 | 0.647353 | 0.643357 | 0 | 39 | 24.666667 | 74 |
Johnkhk/assignment5 | 6,975,026,922,815 | 2023401a078845134eb786e0b35e9f26b211ca0e | 9d8aa183b700613b9a95cd722c5f5554621d120e | /init_db.py | 3af4aa503dda67dbf045511904ca378a9211bf98 | [] | no_license | https://github.com/Johnkhk/assignment5 | 4d92268e40bcef31eb6ccdc89f08f6f6ff2c00b3 | aef6175e2efb6853f12780ced35fa749cb7c6f91 | refs/heads/master | 2021-01-06T19:34:21.916499 | 2020-02-18T20:37:02 | 2020-02-18T20:37:02 | 241,461,250 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Import MySQL Connector Driver
import mysql.connector as mysql
# Load the credentials from the secured .env file
import os
from dotenv import load_dotenv
load_dotenv('credentials.env')
db_user = os.environ['MYSQL_USER']
db_pass = os.environ['MYSQL_PASSWORD']
db_name = os.environ['MYSQL_DATABASE']
db_host = 'localhost' # different than inside the container and assumes default port of 3306
# Connect to the database
db = mysql.connect(user=db_user, password=db_pass, host=db_host, database=db_name)
cursor = db.cursor()
# # CAUTION!!! CAUTION!!! CAUTION!!! CAUTION!!! CAUTION!!! CAUTION!!! CAUTION!!!
cursor.execute("drop table if exists TStudents;")
# Create a TStudents table (wrapping it in a try-except is good practice)
try:
cursor.execute("""
CREATE TABLE TStudents (
id integer AUTO_INCREMENT PRIMARY KEY,
first_name VARCHAR(30) NOT NULL,
last_name VARCHAR(30) NOT NULL,
email VARCHAR(50) NOT NULL,
pid VARCHAR(20) NOT NULL,
created_at TIMESTAMP
);
""")
except:
print("Table already exists. Not recreating it.")
# Insert Records
query = "insert into TStudents (first_name, last_name, email, pid, created_at) values (%s, %s, %s, %s, %s)"
values = [
('rick','gessner','rgessner@eng.ucsd.edu', 'A12345', '2020-02-11 12:00:00'),
('ramsin','khoshabeh','ramsin@eng.ucsd.edu', 'A23456', '2020-02-11 12:00:00'),
('steve','carrell','stevec@eng.ucsd.edu', 'A34567', '2020-02-11 12:00:00'),
('charleze','theron','charlezet@eng.ucsd.edu', 'A45678', '2020-02-11 12:00:00'),
('bryant','liu','briant@eng.ucsd.edu', 'A56789', '2020-02-11 12:00:00')
]
cursor.executemany(query, values)
db.commit()
# Selecting Records
cursor.execute("select * from TStudents;")
print('---------- DATABASE INITIALIZED ----------')
[print(x) for x in cursor]
db.close | UTF-8 | Python | false | false | 1,827 | py | 4 | init_db.py | 3 | 0.673235 | 0.614669 | 0 | 53 | 33.490566 | 107 |
bswopes/pmmm | 12,103,217,847,089 | 18f047416f0a31e244d74fe854fa4b9a7bbe33b3 | 17bb75cca1e2607b8c547a3cc12daa7fac9634ba | /pmmm/db_pm_hours_record_handle.py | 0afb4d2d700404b23c7b1ea357636def07c5da4a | [] | no_license | https://github.com/bswopes/pmmm | 8d8ecbef09d7f963b2ba1e90a6478a6a85c95e6c | 2ffb3844f2a2f84ce868141ab3335c3297296b0e | refs/heads/master | 2021-01-20T22:58:49.581215 | 2013-08-10T20:09:10 | 2013-08-10T20:09:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sqlite
import sys
#setdefaultencoding()方法在系统启动时设置,需要加上reload(sys)
reload(sys)
#设置默认编码为utf-8,解决里程碑为中文时报编码错误的问题
sys.setdefaultencoding('utf-8')
class PmtableController:
#查出工时记录表中所有项目名
def select_proj_name_table(self,db):
cu=db.cursor()
sql="select distinct proj_name from proj_hours_record"
cu.execute(sql)
projs=cu.fetchall()
return projs
#对工时记录表做查询,查看是否重复提交工时记录
def select_table(self,db,proj_name,milestone,work_type,work_date,work_date_length,user_remarks):
cu = db.cursor()
sql="select user_name from proj_hours_record where proj_name=%s AND milestone=%s AND work_type=%s AND work_date=%s AND work_time_length=%s AND user_feedback=%s"
cu.execute(sql,(proj_name,milestone,work_type,work_date,work_date_length,user_remarks))
result=cu.fetchall()
return result
#对工时记录表做查询,查看是否重复提交工时记录
def select_import_proj_hours_record(self,db,proj_name,user_name,work_date,user_remarks):
cu = db.cursor()
sql="select rowid from proj_hours_record where proj_name=%s AND user_name=%s AND work_date=%s AND user_feedback=%s"
cu.execute(sql,(proj_name,user_name,work_date,user_remarks))
result=cu.fetchone()
return result
#显示工时记录列表(根据人员)
def select_list_table(self,db,user_name,startDate,endDate):
list1=[]
cu = db.cursor()
sql="select proj_name,user_name,work_type,work_date,milestone,work_time_length,rowid,check_status,user_feedback,manager_feedback,checked_user,write_time,checked_time from proj_hours_record where user_name=%s AND work_date>=%s AND work_date<=%s order by work_date desc"
cu.execute(sql,(user_name,startDate,endDate))
listResult = cu.fetchall()
return listResult
#显示工时记录列表(根据项目)
def select_projlist_table(self,db,pr,startDate,endDate):
list1=[]
cu = db.cursor()
sql="select proj_name,user_name,work_type,work_date,milestone,work_time_length,rowid,check_status,user_feedback,manager_feedback,checked_user,write_time,checked_time from proj_hours_record where proj_name=%s AND work_date>=%s AND work_date<=%s order by user_name,work_date"
cu.execute(sql,(pr,startDate,endDate))
listResult = cu.fetchall()
return listResult
#添加工时记录
def insert_into_table(self,db,proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remarks,check_status,write_time):
cu = db.cursor()
cu.execute("INSERT INTO proj_hours_record(proj_name,user_name,work_type,work_date,milestone,work_time_length,user_feedback,check_status,write_time) VALUES (%s, %s, %s, %s, %s, %s, %s,%s,%s)", (proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remarks,check_status,write_time))
db.commit()
#导入工时记录
def import_insert_into_table(self,db,proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remarks,manager_feedback,check_status,checked_user,write_time,checked_time):
cu = db.cursor()
cu.execute("INSERT INTO proj_hours_record(proj_name,user_name,work_type,work_date,milestone,work_time_length,user_feedback,manager_feedback,check_status,checked_user,write_time,checked_time) VALUES (%s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s)", (proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remarks,manager_feedback,check_status,checked_user,write_time,checked_time))
db.commit()
#查询工时记录表中工时填写时间超时的员工
def select_timeout_table(self,db,startDate,endDate,u):
cu = db.cursor()
sql="select count(distinct work_date) from proj_hours_record where user_name='"+str(u)+"' and (julianday(write_time) - julianday(work_date))>7 and work_date>='"+startDate+"' and work_date<='"+endDate+"'"
cu.execute(sql)
day=cu.fetchone()
return day
#查询员工在一段时间内的工时记天数(distinct work_date)
def select_sumworkdate_table(self,db,startDates,endDates,u):
cu = db.cursor()
sql="select count(distinct work_date) from proj_hours_record where user_name='"+str(u)+"' and work_date>='"+startDates+"' and work_date<='"+endDates+"'"
cu.execute(sql)
count=cu.fetchone()
return count
#显示周报中本周工作内容
def select_weekReport_table(self,db,startDate,endDate,user_name):
cu = db.cursor()
sql="select proj_name,work_type,work_date,user_feedback,work_time_length from proj_hours_record where work_date>=%s AND work_date<=%s AND user_name=%s order by work_date"
cu.execute(sql,(startDate,endDate,user_name))
work = cu.fetchall()
return work
#查询出每个用户在该月审核通过的记录时间总和
def select_user_monthtime_tabel(self,db,u,startDate,endDate):
cu = db.cursor()
sql4 = "select sum(work_time_length) from proj_hours_record where user_name='"+str(u)+"' AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"' AND work_type not in ('事假','病假','调休') AND check_status=1"
cu.execute(sql4)
t = cu.fetchall()
return t
#工作量总统计表中计算项目每月的工时总量
def select_sumtime_table(self,db,startDate,endDate,pr):
cu = db.cursor()
sqls1="select sum(work_time_length) from proj_hours_record where proj_name='"+str(pr)+"' AND check_status=1 AND work_type not in ('事假','病假','调休') AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"'"
cu.execute(sqls1)
monthtime=cu.fetchone()
return monthtime
#工作量总统计表中员工每月的工作量(anaWay==1)
def select_user_sumtime(self,db,startDate,endDate,u,pr):
cu = db.cursor()
sql="select sum(work_time_length) from proj_hours_record where proj_name='"+str(pr)+"' AND check_status=1 AND work_type not in ('事假','病假','调休') AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"' and user_name='"+str(u)+"'"
cu.execute(sql)
time=cu.fetchone()
return time
#工作量总统计表中员工每月的工作量(anaWay==2)
def select_sumtime(self,db,startDate,endDate,u,pr):
cu = db.cursor()
sql="select sum(work_time_length) from proj_hours_record where proj_name='"+str(pr)+"' AND check_status=1 AND work_type not in ('事假','病假') AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"' and user_name='"+str(u)+"'"
cu.execute(sql)
time=cu.fetchone()
return time
#工作量总统计表中限制人员工时时合计
def select_reality_work_length(self,db,pr,startDate,endDate):
cu = db.cursor()
sql="select sum(work_time_length) from proj_hours_record where proj_name='"+str(pr)+"' AND check_status=1 AND work_type not in ('事假','病假') AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"' group by user_name"
cu.execute(sql)
monthtime=cu.fetchall()
return monthtime
def select_user_month_work_length(self,db,pr,startDate,endDate):
cu = db.cursor()
sql="select sum(work_time_length),user_name from proj_hours_record where proj_name='"+str(pr)+"' AND check_status=1 AND work_type not in ('事假','病假','调休') AND work_date>='"+str(startDate)+"' AND work_date<='"+str(endDate)+"' group by user_name"
cu.execute(sql)
monthtime=cu.fetchall()
return monthtime
#根据部门用户查询待审核的记录
def select_audit_table(self,db,username):
cu =db.cursor()
sql = "select proj_name,user_name,work_type,work_date,milestone,work_time_length,rowid,user_feedback,write_time from proj_hours_record where check_status=0 and user_name=%s order by work_date"
cu.execute(sql,username)
res = cu.fetchall()
return res
#根据项目查询出待审核的记录
def select_proj_audit_table(self,db,proj_name):
cu = db.cursor()
sql = "select proj_name,user_name,work_type,work_date,milestone,work_time_length,rowid,user_feedback,write_time from proj_hours_record where check_status=0 and proj_name=%s order by user_name,work_date"
cu.execute(sql,proj_name)
res= cu.fetchall()
return res
#提交审核记录方法
def submit_check_table(self,db,status,manager_feedback,check_user,check_time,rowid):
cu =db.cursor()
cu.execute('UPDATE proj_hours_record SET check_status=%s,manager_feedback=%s,checked_user=%s,checked_time=%s WHERE rowid=%s',(status,manager_feedback,check_user,check_time,rowid))
db.commit()
#更新工时记录,该方法用于对未审核通过的工时记录重新填写,改变记录的审核状态为未审核
def update_pmhourecord_table(self,db,proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remark,rowid):
cu = db.cursor()
cu.execute("update proj_hours_record set proj_name=%s,user_name=%s,work_type=%s,work_date=%s,milestone=%s,work_time_length=%s,user_feedback=%s,check_status=0,checked_user=%s,checked_time=%s where rowid=%s", (proj_name,user_name,work_type,work_date,milestone,work_date_length,user_remark,"","",rowid))
db.commit()
#按投入统计项目的工作量
def select_input_worktime_table(self,db,w,projName,startTime,endTime):
cu = db.cursor()
sql = "select sum(work_time_length) from proj_hours_record where work_type=%s AND proj_name=%s AND check_status=1 AND work_date>=%s AND work_type not in ('事假','病假','调休') AND work_date<=%s"
cu.execute(sql,(w,str(projName),startTime,endTime))
hourLength = cu.fetchone()
return hourLength
#显示人员工时记录审核未通过的记录
def select_unreview_record_table(self,db,u):
cu = db.cursor()
sqllist = "select proj_name,user_name,work_type,work_date,milestone,work_time_length,rowid,check_status,manager_feedback,checked_user,user_feedback from proj_hours_record where check_status>1 and user_name='"+u+"'"
cu.execute(sqllist)
listRes = cu.fetchall()
return listRes
#删除工时记录中的记录
def delete_record_table(self,db,data):
cu = db.cursor()
sql = "delete from proj_hours_record where rowid="+data
cu.execute(sql)
db.commit()
#查出工时记录表中工作类型
def select_worktype_table(self,db):
cu = db.cursor()
sql = "select distinct work_type from proj_hours_record"
cu.execute(sql)
work_types = cu.fetchall()
return work_types
#根据项目查出一段时间内员工名
def select_proj_user_name_table(self,db,proj,startDate,endDate):
cu = db.cursor()
sql="select distinct user_name from proj_hours_record where proj_name=%s AND work_date>=%s AND work_date<=%s"
cu.execute(sql,(proj,startDate,endDate))
users = cu.fetchall()
return users
#根据项目名查出工时记录表中对应员工名
def select_proj_user_table(self,db,projName):
cu = db.cursor()
sql = "select distinct user_name from proj_hours_record where proj_name='"+projName+"'"
cu.execute(sql)
user_results = cu.fetchall()
return user_results
#查询出工时系统中一段时间内员工名
def select_username_table(self,db,startTime,endtime):
cu =db.cursor()
sql2 = "select distinct user_name from proj_hours_record where work_date>=%s and work_date<=%s"
cu.execute(sql2,(startTime,endtime))
days = cu.fetchall()
return days
#查询出工时系统中所有人员名单
def select_all_user_table(self,db):
cu = db.cursor()
sql = "select distinct user_name from proj_hours_record"
cu.execute(sql)
user_results = cu.fetchall()
return user_results
#查出工时记录表在一段时间内中所有项目名
def select_time_proj_name_table(self,db,startDate,endDate):
cu=db.cursor()
sql="select distinct proj_name from proj_hours_record where work_date>='"+startDate+"' and work_date<='"+endDate+"'"
cu.execute(sql)
projs=cu.fetchall()
return projs | UTF-8 | Python | false | false | 12,828 | py | 57 | db_pm_hours_record_handle.py | 17 | 0.650661 | 0.648543 | 0 | 204 | 55.892157 | 402 |
cenan/bugtracker | 16,982,300,713,360 | de9fd1640d0c74ca6dfba7354317a33c3018dbf6 | 9a86990885b8e61bd315b6da8eddcfd4359acddf | /models/projects.py | da14e19cdfb654785e87067481f9b2ef2ebe6c7f | [] | no_license | https://github.com/cenan/bugtracker | a4abcadbe3e487b3c1485ee6df27e7d08fed1c71 | 543dffc07949e6b2b2048857a74b52544f878aef | refs/heads/master | 2020-05-29T08:55:50.538530 | 2015-09-20T20:02:35 | 2015-09-20T20:02:35 | 42,827,033 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from config import db
def get_projects():
return db.select('project')
def get_project(project_id):
try:
return db.select('project', where='id=$project_id', vars=locals())[0]
except IndexError:
return None
def create_project(project_name):
return db.insert('project', project_name=project_name)
| UTF-8 | Python | false | false | 331 | py | 57 | projects.py | 34 | 0.670695 | 0.667674 | 0 | 14 | 22.571429 | 77 |
lywen52/live-age-gender-estimator | 5,884,105,216,466 | 7fbda80b68e25c06eeb91c66e74df49a188ed68f | 135cbac63a8f7533f106f0c9474af2472316a25c | /EstimateAge.py | af57b4e311cc2d9ad73825199b507f75cbd3b69f | [] | no_license | https://github.com/lywen52/live-age-gender-estimator | e52fc68061a136987887b20c8d2c69eb60a048b5 | 5dea45c9203bb3cbb8a633c66bd840ff75d7c0c6 | refs/heads/master | 2021-01-20T16:50:28.587797 | 2016-11-02T10:54:59 | 2016-11-02T10:54:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import VideoThread
if __name__ == '__main__':
help_message = '''
USAGE: LiveDetector.py <image_names> ...
Press any key to continue, ESC to stop.
'''
videoThread = VideoThread.VideoThread()
videoThread.start()
| UTF-8 | Python | false | false | 277 | py | 7 | EstimateAge.py | 5 | 0.581227 | 0.581227 | 0 | 15 | 17.066667 | 44 |
Arielv1/backend_messaging_app | 7,945,689,511,475 | c299b8d82b78a450943ea331677c4cd7d28227bb | 6405b21d2983a7f8060e9f9bb7fafb35bb370f07 | /UserProfile/models.py | f3e9af60f911951823ef51ca1a34e2bc317d00ae | [] | no_license | https://github.com/Arielv1/backend_messaging_app | 9536a0de8a82dc6c6e9dcff731aa9baa3d6be05b | da32b39d6cca95a539083fc5a2f93932e98fcbbb | refs/heads/main | 2023-07-18T13:47:41.936418 | 2021-08-28T14:55:39 | 2021-08-28T14:55:39 | 400,723,228 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
class UserAccount(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
token = models.CharField(max_length=255, null=False, default=None)
def __str__(self):
return f"{self.user, self.token}"
| UTF-8 | Python | false | false | 565 | py | 11 | models.py | 8 | 0.780531 | 0.775221 | 0 | 16 | 34.3125 | 70 |
ccbib/dp-plastex | 1,984,274,902,099 | ae0b686c7a49db1e968672e0711bd5f27da82a55 | 8cb97e2325658bf6aee62bb7c098799d514b54bb | /pgxhtml/__init__.py | 86a35b4ffcf1b8bf640c3cbbd75934a1a4a5019a | [] | no_license | https://github.com/ccbib/dp-plastex | c37cf8c3193d2c53da3a042446b6151973e7927f | 9c0a383d187e5b292a934e91790601085d2be6e3 | refs/heads/master | 2020-12-24T17:18:08.387446 | 2011-11-21T22:28:50 | 2011-11-21T22:28:50 | 1,901,071 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from plasTeX.Renderers.XHTML import Renderer as BaseRenderer
class pgXHTMLRenderer(BaseRenderer):
""" Renderer for plain text documents for upload to Project Gutenberg"""
pass
Renderer = pgXHTMLRenderer
| UTF-8 | Python | false | false | 213 | py | 4 | __init__.py | 3 | 0.788732 | 0.788732 | 0 | 7 | 29.428571 | 76 |
wyvernium/Sorting-Algorithms-Visualized-and-Compared | 5,274,219,882,779 | e9246f51a30ad242390544a1da98ffcea32f3547 | 3829f93348321b7f7b116b5d96437ed61bcf106c | /kthSmallestNumber.py | e6c0ca5b92a8b10362c56edb1250426c6fcb1c78 | [] | no_license | https://github.com/wyvernium/Sorting-Algorithms-Visualized-and-Compared | 760f433e916a6aef51e2e5228fa2196b837d7c4e | 3ffa59d05a5d7703264fb529e335b244d212cbad | refs/heads/master | 2022-04-04T09:41:22.588309 | 2019-12-23T19:37:08 | 2019-12-23T19:37:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Muhammet Furkan MUŞTU
160403041
"""
"""k th smallest number"""
# This function returns k'th smallest element
from random import randint
def randomPartition(arr, firstIndex, lastIndex):
n = lastIndex - firstIndex + 1
pivot = randint(1, 100) % n
arr[firstIndex + pivot], arr[lastIndex] = arr[firstIndex + pivot], arr[lastIndex]
return partition(arr, firstIndex, lastIndex)
def kthSmallest(arr, firstIndex, lastIndex, k):
# If k is smaller than
# number of elements in array
if (k > 0 and k <= lastIndex - firstIndex + 1):
# Partition the array around last element and
# get position of pivot element in sorted array
pos = randomPartition(arr, firstIndex, lastIndex)
# If position is same as k
if (pos - firstIndex == k - 1):
return arr[pos]
# If position is more, recur for left subarray
if (pos - firstIndex > k - 1):
return kthSmallest(arr, firstIndex, pos - 1, k)
# Else recur for right subarray
return kthSmallest(arr, pos + 1, lastIndex, k - pos + firstIndex - 1)
# If k is more than number of elements in array
return 10 ** 9
# Standard partition process of QuickSort().
# It considers the last element as pivot and
# moves all smaller element to left of it
# and greater elements to right
def partition(arr, firstIndex, lastIndex):
x = arr[lastIndex]
i = firstIndex
for j in range(firstIndex, lastIndex):
if (arr[j] <= x):
arr[i], arr[j] = arr[j], arr[i]
i += 1
arr[i], arr[lastIndex] = arr[lastIndex], arr[i]
return i
# Driver Code
if __name__ == "__main__":
arr = [12, 3, 5, 7, 4, 19, 26]
n = len(arr)
k = 3
print("K'th smallest element is", kthSmallest(arr, 0, n - 1, k))
| UTF-8 | Python | false | false | 1,904 | py | 11 | kthSmallestNumber.py | 10 | 0.592223 | 0.571729 | 0 | 66 | 26.833333 | 85 |
ffc28/ImageSeparationL2TI | 4,595,615,049,121 | 05725e1d518ce196c3f28227c87d84d548f2083b | e7126859917b5311ee1796383baacb81b7d152a1 | /Code/Three_Projection/three_projection_dic_RectoVerso.py | 35f8d82e880f58f285faa3ccf9b13f713554666c | [] | no_license | https://github.com/ffc28/ImageSeparationL2TI | 665b521ab268fc1fcbba33adb326a5b7b4ce3681 | d500358082383323c564c6242a9acaa0dfb28de8 | refs/heads/master | 2023-06-02T12:11:19.211694 | 2021-06-18T14:20:19 | 2021-06-18T14:20:19 | 264,213,127 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Trying to use the dictionary learning
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy as sp
import numpy as np
import scipy.linalg as la
from skimage.color import rgb2gray
from skimage.util import view_as_windows
from sklearn.feature_extraction.image import extract_patches_2d, PatchExtractor
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from patchify import patchify, unpatchify
import museval.metrics as mmetrics
from rdc import rdc
from math import floor
from scipy.stats import kurtosis
from skimage.restoration import (denoise_wavelet, denoise_tv_chambolle, estimate_sigma, denoise_nl_means)
np.random.seed(1)
# load images and convert them
n = 256
m = 8 # Check other sizes
image_size = (n, n)
patch_size = (m, m)
step = 4
pic_set = 5
img_train1=mpimg.imread('./images_hard/set'+ str(pic_set) + '_pic1.png')
img_train_gray1 = rgb2gray(img_train1) # the value is between 0 and 1
print('Learning the dictionary for recto and verso images...')
# Extract reference patches from the first image
patches1 = patchify(img_train_gray1, patch_size, step)
initial_patch_size = patches1.shape
patches1 = patches1.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 2
img_train2=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray2 = rgb2gray(img_train2) # the value is between 0 and 1
patches2 = patchify(img_train_gray2, patch_size, step)
patches2 = patches2.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 3
img_train3=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray3 = rgb2gray(img_train3) # the value is between 0 and 1
patches3 = patchify(img_train_gray3, patch_size, step)
patches3 = patches3.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 4
img_train4=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray4 = rgb2gray(img_train4) # the value is between 0 and 1
patches4 = patchify(img_train_gray4, patch_size, step)
patches4 = patches4.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 5
img_train5=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray5 = rgb2gray(img_train5) # the value is between 0 and 1
patches5 = patchify(img_train_gray5, patch_size, step)
patches5 = patches5.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 6
img_train6=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray6 = rgb2gray(img_train6) # the value is between 0 and 1
patches6 = patchify(img_train_gray6, patch_size, step)
patches6 = patches6.reshape(-1, patch_size[0] * patch_size[1])
########
patches_recto = patches1
# patches_recto = np.concatenate((patches1, patches2, patches3, patches4, patches5, patches6), axis = 0)
patches_recto -= np.mean(patches_recto, axis=0) # remove the mean
patches_recto /= np.std(patches_recto, axis=0) # normalise each patch
print(patches_recto.shape)
print('Learning the recto dictionary...')
dico_recto = MiniBatchDictionaryLearning(n_components=100, alpha=0.7, n_iter=400) #TODO:check with different parameters
V_recto = dico_recto.fit(patches_recto).components_
"""
# plot the dictionary
plt.figure(figsize=(8, 6))
for i, comp in enumerate(V_recto[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Recto dictionary learned from patches')
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
"""
pic_set = 5
img_train1=mpimg.imread('./images_hard/set'+ str(pic_set) + '_pic2.png')
img_train_gray1 = rgb2gray(img_train1) # the value is between 0 and 1
# Flip to get the verso images
img_train_gray1 = np.fliplr(img_train_gray1)
patches1 = patchify(img_train_gray1, patch_size, step)
patches1 = patches1.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 2
img_train2=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray2 = rgb2gray(img_train2) # the value is between 0 and 1
# Flip to get the verso images
img_train_gray2 = np.fliplr(img_train_gray2)
patches2 = patchify(img_train_gray2, patch_size, step)
patches2 = patches2.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 3
img_train3=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray3 = rgb2gray(img_train3) # the value is between 0 and 1
# Flip to get the verso images
img_train_gray3 = np.fliplr(img_train_gray3)
patches3 = patchify(img_train_gray3, patch_size, step)
patches3 = patches3.reshape(-1, patch_size[0] * patch_size[1])
pic_set = 4
img_train4=mpimg.imread('./images/set'+ str(pic_set) + '_pic1.png')
img_train_gray4 = rgb2gray(img_train4) # the value is between 0 and 1
# Flip to get the verso images
img_train_gray4 = np.fliplr(img_train_gray4)
patches4 = patchify(img_train_gray4, patch_size, step)
patches4 = patches4.reshape(-1, patch_size[0] * patch_size[1])
patches_verso = patches1
# patches_verso = np.concatenate((patches1, patches2, patches3, patches4), axis = 0)
patches_verso -= np.mean(patches_verso, axis=0) # remove the mean
patches_verso /= np.std(patches_verso, axis=0) # normalise each patch
print('Learning the verso dictionary...')
dico_verso = MiniBatchDictionaryLearning(n_components=100, alpha=0.7, n_iter=400) #TODO:check with different parameters
V_verso = dico_verso.fit(patches_verso).components_
"""
# plot the dictionary
plt.figure(figsize=(8, 6))
for i, comp in enumerate(V_verso[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Verso dictionary learned from patches')
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
"""
## load the source here
pic_set = 5
img1=mpimg.imread('./images_hard/set'+ str(pic_set) + '_pic1.png')
img2=mpimg.imread('./images_hard/set'+ str(pic_set) + '_pic2.png')
img1_gray = rgb2gray(img1) # the value is between 0 and 1
img2_gray = rgb2gray(img2)
# Mixing process here
img2_gray = np.fliplr(img2_gray)
source1 = np.matrix(img1_gray)
source1 = source1.flatten('F') #column wise
source2 = np.matrix(img2_gray)
source2 = source2.flatten('F') #column wise
source1 = source1 - np.mean(source1)
source2 = source2 - np.mean(source2)
#source1 = source1/np.linalg.norm(source1)
#source2 = source2/np.linalg.norm(source2)
# print("rdc = ", rdc(source1.T,source2.T))
source = np.stack((source1, source2))
k1 = kurtosis(np.squeeze(np.asarray(source[0,:])))
k2 = kurtosis(np.squeeze(np.asarray(source[1,:])))
print('Kuortosis of the original sources is: ', np.abs(k1) + np.abs(k2))
# print('Covariance matrix is: ')
# print(np.matmul(source,source.T))
# randomly generated mixing matrix
#mixing_matrix = np.random.rand(2,2)
mixing_matrix = np.array([[1, 0.99], [0.002, 1]])
#mixing_matrix = np.array([[0.8488177, 0.17889592], [0.05436321, 0.36153845]])
# mixing_matrix = np.array([[1, 0.9], [0.02, 1]])
# mixing_matrix = np.array([[1, 0.3], [0.5, 1]])
# X = source * mixing_matrix - The mixed images
X = np.matmul(mixing_matrix, source)
def Dic_proj_recto(data, n_coef, alpha):
"""
The dictionary projection method
"""
data = patchify(data, patch_size, step)
data = data.reshape(-1, patch_size[0] * patch_size[1])
intercept = np.mean(data, axis=0)
data -= intercept
dico_recto.set_params(transform_algorithm = 'omp', transform_n_nonzero_coefs = n_coef)
code = dico_recto.transform(data)
patch = np.dot(code, V_recto)
patch += intercept
patch = np.reshape(patch, initial_patch_size)
# if we use threshold then we have this
# patch -= patch.min()
# patch /= patch.max()
im_re = unpatchify(np.asarray(patch), image_size)
return im_re
def Dic_proj_verso(data, n_coef, alpha):
"""
The dictionary projection method
"""
data = patchify(data, patch_size, step)
data = data.reshape(-1, patch_size[0] * patch_size[1])
intercept = np.mean(data, axis=0)
data -= intercept
dico_verso.set_params(transform_algorithm = 'omp', transform_n_nonzero_coefs = n_coef)
code = dico_verso.transform(data)
patch = np.dot(code, V_verso)
patch += intercept
patch = np.reshape(patch, initial_patch_size)
# if we use threshold then we have this
# patch -= patch.min()
# patch /= patch.max()
im_re = unpatchify(np.asarray(patch), image_size)
return im_re
def Dic_proj_double(S, n_coeff, alpha):
S1 = np.reshape(S[0,:], image_size)
S2 = np.reshape(S[1,:], image_size)
S1 = S1.T
S2 = S2.T
"""
plt.figure()
plt.subplot(121)
plt.imshow(S1, cmap='gray')
plt.title("Estimated Source Before")
plt.show
"""
S1 = Dic_proj_recto(S1, n_coeff, alpha)
S2 = Dic_proj_verso(S2, n_coeff, alpha)
"""
plt.subplot(122)
plt.imshow(S1, cmap='gray')
plt.title("Estimated Source after")
plt.show
"""
S1 = S1.T
S2 = S2.T
S[0,:] = np.reshape(S1, (1, n*n))
S[1,:] = np.reshape(S2, (1, n*n))
return S
def Dic_proj_single(S, n_coeff, alpha):
S1 = np.reshape(S[0,:], image_size)
S2 = np.reshape(S[1,:], image_size)
S1 = S1.T
S2 = S2.T
S1 = Dic_proj_recto(S1, n_coeff, alpha)
S2 = Dic_proj_recto(S2, n_coeff, alpha)
S1 = S1.T
S2 = S2.T
S[0,:] = np.reshape(S1, (1, n*n))
S[1,:] = np.reshape(S2, (1, n*n))
return S
def data_projection(X,S):
"""
This functions does the data projection with the equation X = AS
"""
A = np.dot(S, X.T)
S = np.dot(A,X)
R = np.dot(A, A.T)
return np.dot(np.linalg.inv(R),S)
def get_demix(X, S):
return np.dot(S, X.T)
def whiten_projection(S):
"""
This function does the whitening projection with PCA
"""
R = np.dot(S, S.T)
W = la.sqrtm(np.linalg.inv(R))
return np.dot(W, S)
def TV_proj(S, lambda_this):
"""
This function does the TV projection
"""
S1 = np.reshape(S[0,:], image_size)
S2 = np.reshape(S[1,:], image_size)
# TV denoising
S1 = denoise_tv_chambolle(S1, weight = lambda_this, multichannel=False)
S2 = denoise_tv_chambolle(S2, weight = lambda_this, multichannel=False)
S[0,:] = np.reshape(S1, (1, n*n))
S[1,:] = np.reshape(S2, (1, n*n))
return S
# Here begins the algorithm
# whitening processing. It's important
R = np.dot(X, X.T)
W = la.sqrtm(np.linalg.inv(R))
X = np.dot(W, X)
# mixing_matrix_norm = np.dot(W, mixing_matrix)
# mixing_matrix_norm[:,0] = mixing_matrix_norm[:,0]/np.linalg.norm(mixing_matrix_norm[:,0])
# mixing_matrix_norm[:,1] = mixing_matrix_norm[:,1]/np.linalg.norm(mixing_matrix_norm[:,1])
(sdr_ref, sir_ref, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), np.asarray(X))
# mix = [[0.6992, 0.7275], [0.4784, 0.5548]] #or use the matrix from the paper
print('The mean value of the reference SDR is: ', np.mean(sdr_ref))
max_it = 30
#Se = np.random.randn(2, n*n)
Se = np.copy(X)
cost_it = np.zeros((1, max_it))
SDR_it = np.zeros((2, max_it))
SIR_it = np.zeros((2, max_it))
SAR_it = np.zeros((2, max_it))
num_coeff_begin = 2
num_coeff_final = 2
num_coeff_v = np.floor(np.linspace(num_coeff_begin, num_coeff_final, max_it))
sigma = 1e-2
sigma_final = 1e-3
sigma_v = np.logspace(np.log10(sigma), np.log10(sigma_final), max_it)
Se_old = np.copy(Se)
for it in np.arange(max_it):
# print(it)
# we performe three projections
# Se = whiten_projection(soft_proximal(data_projection(X, Se),lambda_v[it]))
# Se = whiten_projection(Dic_proj_single(data_projection(X,Se), num_coeff_v[it]))
# 1. denoising
Se = Dic_proj_single(Se, num_coeff_v[it], sigma_v[it])
# Se = TV_proj(Se, sigma)
# 2. get demixing matrix
WW = get_demix(X, Se)
# 3. whiten the demix matrix
WW = whiten_projection(WW)
# 4. get the new source
Se = np.dot(WW, X)
# cost_it[0,it] = np.linalg.norm(X - np.dot(np.dot(X, Se.T), Se), ord = 'fro')
if np.linalg.norm(Se - Se_old, ord = 'fro') < 1e-6:
print('Dict demix convergence reached')
print('The real number of iteration is', it)
break
Se_old = np.copy(Se)
# Se_inv = np.dot(np.linalg.inv(np.dot(X, Se.T)), X)
#(sdr, sir, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), Se_inv)
# SDR_it[:, it] = np.squeeze(sdr)
# Se = np.dot(WW, X)
(sdr, sir, sar, perm) = mmetrics.bss_eval_sources(np.asarray(source), Se)
Se = Dic_proj_single(Se, num_coeff_v[it], sigma)
print('The mean value of the SDR is: ', np.mean(sdr))
print('The SDR improvement is: ', np.mean(sdr) - np.mean(sdr_ref))
"""
plt.figure()
plt.subplot(211)
plt.plot(cost_it[0,:])
plt.title('Cost for iterations')
plt.grid()
plt.show
plt.subplot(212)
plt.plot(np.mean(SDR_it, axis = 0))
plt.title('SDR for iterations Dictionary learning')
plt.grid()
plt.show
s1 = Se[0,:]
s1 = np.reshape(s1, (n,n))
s2 = Se[1,:]
s2 = np.reshape(s2, (n,n))
plt.figure()
plt.imshow(s1.T, cmap='gray')
plt.title("Estimated source 1 with Sparse")
plt.show
plt.figure()
plt.imshow(s2.T, cmap='gray')
plt.title("Estimated source 2 with Sparse")
plt.show()
"""
| UTF-8 | Python | false | false | 13,155 | py | 46 | three_projection_dic_RectoVerso.py | 39 | 0.665146 | 0.626682 | 0 | 412 | 30.924757 | 119 |
roque334/smd | 3,607,772,542,292 | 17949ada38c95583143212cff88c66bb16e4a27e | 593e6e414a32295d64b8c4ef6c8e8ec086f20ef2 | /Pyuic4/registrarDonacion.py | ae65478a1fde534a2af24157dfce2fafba9109a0 | [] | no_license | https://github.com/roque334/smd | 5a2218aaf163349e5d9062721b4afca6e24f4872 | 545172084dbce3d7fee314f85c9d5ae9187c2505 | refs/heads/master | 2021-01-15T21:24:05.640956 | 2013-04-27T21:19:39 | 2013-04-27T21:19:39 | 9,633,312 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'registrar_donacion.ui'
#
# Created: Thu Apr 25 18:33:43 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_registrarDonacion(object):
def setupUi(self, registrarDonacion):
registrarDonacion.setObjectName(_fromUtf8("registrarDonacion"))
registrarDonacion.resize(1300, 720)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(registrarDonacion.sizePolicy().hasHeightForWidth())
registrarDonacion.setSizePolicy(sizePolicy)
self.form_2 = QtGui.QWidget(registrarDonacion)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.form_2.sizePolicy().hasHeightForWidth())
self.form_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.form_2.setFont(font)
self.form_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.form_2.setObjectName(_fromUtf8("form_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.form_2)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.form = QtGui.QFormLayout()
self.form.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.form.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.form.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.form.setLabelAlignment(QtCore.Qt.AlignCenter)
self.form.setFormAlignment(QtCore.Qt.AlignCenter)
self.form.setMargin(9)
self.form.setHorizontalSpacing(9)
self.form.setObjectName(_fromUtf8("form"))
self.label_4 = QtGui.QLabel(self.form_2)
self.label_4.setText(_fromUtf8(""))
self.label_4.setPixmap(QtGui.QPixmap(_fromUtf8("../../../.designer/Code/ImgFundacion.JPG")))
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.form.setWidget(0, QtGui.QFormLayout.SpanningRole, self.label_4)
self.label_3 = QtGui.QLabel(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setTextFormat(QtCore.Qt.AutoText)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.form.setWidget(1, QtGui.QFormLayout.SpanningRole, self.label_3)
self.label_RIF = QtGui.QLabel(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_RIF.sizePolicy().hasHeightForWidth())
self.label_RIF.setSizePolicy(sizePolicy)
self.label_RIF.setObjectName(_fromUtf8("label_RIF"))
self.form.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_RIF)
self.gridWidget = QtGui.QWidget(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridWidget.sizePolicy().hasHeightForWidth())
self.gridWidget.setSizePolicy(sizePolicy)
self.gridWidget.setObjectName(_fromUtf8("gridWidget"))
self.gridLayout_3 = QtGui.QGridLayout(self.gridWidget)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.Rif_Ci = QtGui.QLineEdit(self.gridWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Rif_Ci.sizePolicy().hasHeightForWidth())
self.Rif_Ci.setSizePolicy(sizePolicy)
self.Rif_Ci.setObjectName(_fromUtf8("Rif_Ci"))
self.gridLayout_3.addWidget(self.Rif_Ci, 1, 0, 1, 1)
self.irAgregar = QtGui.QPushButton(self.gridWidget)
self.irAgregar.setEnabled(False)
self.irAgregar.setObjectName(_fromUtf8("irAgregar"))
self.gridLayout_3.addWidget(self.irAgregar, 1, 1, 1, 1)
self.form.setWidget(3, QtGui.QFormLayout.FieldRole, self.gridWidget)
self.label = QtGui.QLabel(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName(_fromUtf8("label"))
self.form.setWidget(4, QtGui.QFormLayout.FieldRole, self.label)
self.label_registrado = QtGui.QLabel(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_registrado.sizePolicy().hasHeightForWidth())
self.label_registrado.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(250, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(250, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(159, 158, 158))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_registrado.setPalette(palette)
self.label_registrado.setText(_fromUtf8(""))
self.label_registrado.setObjectName(_fromUtf8("label_registrado"))
self.form.setWidget(5, QtGui.QFormLayout.FieldRole, self.label_registrado)
self.label_5 = QtGui.QLabel(self.form_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.form.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_5)
self.razon_nombre = QtGui.QLineEdit(self.form_2)
self.razon_nombre.setEnabled(True)
self.razon_nombre.setReadOnly(True)
self.razon_nombre.setObjectName(_fromUtf8("razon_nombre"))
self.form.setWidget(6, QtGui.QFormLayout.FieldRole, self.razon_nombre)
self.label_campo1 = QtGui.QLabel(self.form_2)
self.label_campo1.setEnabled(False)
self.label_campo1.setText(_fromUtf8(""))
self.label_campo1.setObjectName(_fromUtf8("label_campo1"))
self.form.setWidget(12, QtGui.QFormLayout.LabelRole, self.label_campo1)
self.label_2 = QtGui.QLabel(self.form_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.form.setWidget(13, QtGui.QFormLayout.LabelRole, self.label_2)
self.textEdit = QtGui.QTextEdit(self.form_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(242, 241, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(242, 241, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.textEdit.setPalette(palette)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.form.setWidget(13, QtGui.QFormLayout.FieldRole, self.textEdit)
self.pushButton = QtGui.QPushButton(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton.setFlat(False)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.form.setWidget(16, QtGui.QFormLayout.LabelRole, self.pushButton)
self.pushButton_2 = QtGui.QPushButton(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.form.setWidget(16, QtGui.QFormLayout.FieldRole, self.pushButton_2)
self.label_cantmon = QtGui.QLabel(self.form_2)
self.label_cantmon.setEnabled(False)
self.label_cantmon.setObjectName(_fromUtf8("label_cantmon"))
self.form.setWidget(11, QtGui.QFormLayout.LabelRole, self.label_cantmon)
self.cantidad_monto = QtGui.QLineEdit(self.form_2)
self.cantidad_monto.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cantidad_monto.sizePolicy().hasHeightForWidth())
self.cantidad_monto.setSizePolicy(sizePolicy)
self.cantidad_monto.setReadOnly(False)
self.cantidad_monto.setObjectName(_fromUtf8("cantidad_monto"))
self.form.setWidget(11, QtGui.QFormLayout.FieldRole, self.cantidad_monto)
self.gridWidget1 = QtGui.QWidget(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridWidget1.sizePolicy().hasHeightForWidth())
self.gridWidget1.setSizePolicy(sizePolicy)
self.gridWidget1.setObjectName(_fromUtf8("gridWidget1"))
self.gridLayout = QtGui.QGridLayout(self.gridWidget1)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.monetaria = QtGui.QRadioButton(self.gridWidget1)
self.monetaria.setEnabled(False)
self.monetaria.setCheckable(False)
self.monetaria.setObjectName(_fromUtf8("monetaria"))
self.gridLayout.addWidget(self.monetaria, 4, 0, 1, 1)
self.mobiliaria = QtGui.QRadioButton(self.gridWidget1)
self.mobiliaria.setEnabled(False)
self.mobiliaria.setCheckable(False)
self.mobiliaria.setObjectName(_fromUtf8("mobiliaria"))
self.gridLayout.addWidget(self.mobiliaria, 2, 0, 1, 1)
self.especie = QtGui.QRadioButton(self.gridWidget1)
self.especie.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.especie.sizePolicy().hasHeightForWidth())
self.especie.setSizePolicy(sizePolicy)
self.especie.setCheckable(False)
self.especie.setObjectName(_fromUtf8("especie"))
self.gridLayout.addWidget(self.especie, 1, 0, 1, 1)
self.tipoMonetaria = QtGui.QComboBox(self.gridWidget1)
self.tipoMonetaria.setEnabled(False)
self.tipoMonetaria.setAutoFillBackground(False)
self.tipoMonetaria.setEditable(False)
self.tipoMonetaria.setObjectName(_fromUtf8("tipoMonetaria"))
self.tipoMonetaria.addItem(_fromUtf8(""))
self.tipoMonetaria.addItem(_fromUtf8(""))
self.tipoMonetaria.addItem(_fromUtf8(""))
self.tipoMonetaria.addItem(_fromUtf8(""))
self.tipoMonetaria.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.tipoMonetaria, 4, 1, 1, 1)
self.form.setWidget(7, QtGui.QFormLayout.FieldRole, self.gridWidget1)
self.label_tipo_don = QtGui.QLabel(self.form_2)
self.label_tipo_don.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_tipo_don.sizePolicy().hasHeightForWidth())
self.label_tipo_don.setSizePolicy(sizePolicy)
self.label_tipo_don.setObjectName(_fromUtf8("label_tipo_don"))
self.form.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_tipo_don)
self.gridWidget2 = QtGui.QWidget(self.form_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridWidget2.sizePolicy().hasHeightForWidth())
self.gridWidget2.setSizePolicy(sizePolicy)
self.gridWidget2.setObjectName(_fromUtf8("gridWidget2"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridWidget2)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.campoFecha = QtGui.QDateEdit(self.gridWidget2)
self.campoFecha.setEnabled(False)
self.campoFecha.setObjectName(_fromUtf8("campoFecha"))
self.gridLayout_2.addWidget(self.campoFecha, 0, 1, 1, 1)
self.label_ayuda_fecha = QtGui.QLabel(self.gridWidget2)
self.label_ayuda_fecha.setEnabled(False)
self.label_ayuda_fecha.setObjectName(_fromUtf8("label_ayuda_fecha"))
self.gridLayout_2.addWidget(self.label_ayuda_fecha, 0, 2, 1, 1)
self.form.setWidget(8, QtGui.QFormLayout.FieldRole, self.gridWidget2)
self.label_fecha = QtGui.QLabel(self.form_2)
self.label_fecha.setEnabled(False)
self.label_fecha.setObjectName(_fromUtf8("label_fecha"))
self.form.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_fecha)
self.concepto = QtGui.QLineEdit(self.form_2)
self.concepto.setEnabled(False)
self.concepto.setObjectName(_fromUtf8("concepto"))
self.form.setWidget(9, QtGui.QFormLayout.FieldRole, self.concepto)
self.label_concep = QtGui.QLabel(self.form_2)
self.label_concep.setEnabled(False)
self.label_concep.setObjectName(_fromUtf8("label_concep"))
self.form.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_concep)
self.numero = QtGui.QLineEdit(self.form_2)
self.numero.setEnabled(False)
self.numero.setObjectName(_fromUtf8("numero"))
self.form.setWidget(10, QtGui.QFormLayout.FieldRole, self.numero)
self.label_num = QtGui.QLabel(self.form_2)
self.label_num.setEnabled(False)
self.label_num.setObjectName(_fromUtf8("label_num"))
self.form.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_num)
self.verticalLayout.addLayout(self.form)
registrarDonacion.setCentralWidget(self.form_2)
self.menubar = QtGui.QMenuBar(registrarDonacion)
self.menubar.setGeometry(QtCore.QRect(0, 0, 625, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
registrarDonacion.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(registrarDonacion)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
registrarDonacion.setStatusBar(self.statusbar)
self.retranslateUi(registrarDonacion)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.verifiedAndSave)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.cancel)
QtCore.QObject.connect(self.Rif_Ci, QtCore.SIGNAL(_fromUtf8("editingFinished()")), registrarDonacion.verified_Rif_Ci)
QtCore.QObject.connect(self.monetaria, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.select_monetario)
QtCore.QObject.connect(self.mobiliaria, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.select_mobiliaria)
QtCore.QObject.connect(self.especie, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.select_especie)
QtCore.QObject.connect(self.irAgregar, QtCore.SIGNAL(_fromUtf8("clicked()")), registrarDonacion.goRegistrarDonante)
QtCore.QObject.connect(self.tipoMonetaria, QtCore.SIGNAL(_fromUtf8("activated(QString)")), registrarDonacion.guardar_tipoMon)
QtCore.QMetaObject.connectSlotsByName(registrarDonacion)
def retranslateUi(self, registrarDonacion):
registrarDonacion.setWindowTitle(QtGui.QApplication.translate("registrarDonacion", "Registrar Donante", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("registrarDonacion", "Registrar Donación", None, QtGui.QApplication.UnicodeUTF8))
self.label_RIF.setText(QtGui.QApplication.translate("registrarDonacion", "RIF/CI:", None, QtGui.QApplication.UnicodeUTF8))
self.irAgregar.setText(QtGui.QApplication.translate("registrarDonacion", "Ir a Agregar Donante", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("registrarDonacion", "-Natural: V-12345678 o E-12345678 -Jurídica:J-12345678-9", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("registrarDonacion", "Razón/Nombre:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("registrarDonacion", "Observaciones del Sistema:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("registrarDonacion", "Registrar", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("registrarDonacion", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
self.label_cantmon.setText(QtGui.QApplication.translate("registrarDonacion", "Cantidad/Monto:", None, QtGui.QApplication.UnicodeUTF8))
self.monetaria.setText(QtGui.QApplication.translate("registrarDonacion", "Monetaria", None, QtGui.QApplication.UnicodeUTF8))
self.mobiliaria.setText(QtGui.QApplication.translate("registrarDonacion", "Mobiliaria", None, QtGui.QApplication.UnicodeUTF8))
self.especie.setText(QtGui.QApplication.translate("registrarDonacion", "Especie", None, QtGui.QApplication.UnicodeUTF8))
self.tipoMonetaria.setItemText(0, QtGui.QApplication.translate("registrarDonacion", "Selecciona", None, QtGui.QApplication.UnicodeUTF8))
self.tipoMonetaria.setItemText(1, QtGui.QApplication.translate("registrarDonacion", "Baucher", None, QtGui.QApplication.UnicodeUTF8))
self.tipoMonetaria.setItemText(2, QtGui.QApplication.translate("registrarDonacion", "Cheque", None, QtGui.QApplication.UnicodeUTF8))
self.tipoMonetaria.setItemText(3, QtGui.QApplication.translate("registrarDonacion", "Efectivo", None, QtGui.QApplication.UnicodeUTF8))
self.tipoMonetaria.setItemText(4, QtGui.QApplication.translate("registrarDonacion", "Transferencia", None, QtGui.QApplication.UnicodeUTF8))
self.label_tipo_don.setText(QtGui.QApplication.translate("registrarDonacion", "Tipo de Donación:", None, QtGui.QApplication.UnicodeUTF8))
self.label_ayuda_fecha.setText(QtGui.QApplication.translate("registrarDonacion", "aaaa-mm-dd", None, QtGui.QApplication.UnicodeUTF8))
self.label_fecha.setText(QtGui.QApplication.translate("registrarDonacion", "Fecha:", None, QtGui.QApplication.UnicodeUTF8))
self.label_concep.setText(QtGui.QApplication.translate("registrarDonacion", "Concepto:", None, QtGui.QApplication.UnicodeUTF8))
self.label_num.setText(QtGui.QApplication.translate("registrarDonacion", "Numero:", None, QtGui.QApplication.UnicodeUTF8))
| UTF-8 | Python | false | false | 20,908 | py | 17 | registrarDonacion.py | 14 | 0.722685 | 0.703502 | 0 | 327 | 62.923547 | 177 |
dlutwy/ppim | 2,911,987,875,385 | d13d82764cb1c752f312a84f2756d6b8c59424da | 2f05e4aa502d5249d32d406979e61e47a4c75839 | /utils/homolo/homologene.py | 8a228c67970e5002215a81d902a50b28304b264d | [] | no_license | https://github.com/dlutwy/ppim | c59450b72c29cd6f48ad62fb904a04a569fd28ba | 0992df73dca86c6631af5a23502dab9c75f34af4 | refs/heads/master | 2022-12-13T05:01:52.056042 | 2020-09-03T06:38:34 | 2020-09-03T06:38:34 | 292,164,676 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import namedtuple
import pickle
import os
import sys
class HomoloQueryService():
def __init__(self):
# super().__init__()
Record = namedtuple("Record", ['homoloID', 'TaxonomyID', 'geneID', 'geneSymbol', 'proteinID', 'proteinRefSeq'])
self.homoloID2Genes = {}
self.gene2HomoloID = {}
n = 0
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"homologene.data")
# print('-'*30, file = sys.stderr)
# print("Homolo Init Data File:", path, file = sys.stderr)
with open(path) as f:
for line in f:
record = Record(*line.strip().split('\t'))
if self.homoloID2Genes.get(record.homoloID) is None:
self.homoloID2Genes[record.homoloID] = []
self.homoloID2Genes[record.homoloID].append(record.geneID)
self.gene2HomoloID[record.geneID] = record.homoloID
n += 1
# print('homolo num:', len(self.homoloID2Genes.keys()),'\tGenes num:' ,n, file = sys.stderr)
# print('-'*30, file = sys.stderr)
def getHomolo(self, geneID):
return self.gene2HomoloID.get(geneID, "NotFound:"+geneID)
def isHomolo(self, geneID, geneID2):
homo1 = self.getHomo(geneID)
homo2 = self.getHomo(geneID2)
return homo1 == homo2 | UTF-8 | Python | false | false | 1,352 | py | 24 | homologene.py | 18 | 0.589497 | 0.574704 | 0 | 31 | 42.645161 | 119 |
EnriqueBet/Python-UAM | 14,027,363,193,640 | ecfad2c9c91d2bfd72aec774fb2ac391ef919da8 | 9a4f00cc750b9b9571ec2c388e05944b46a1dcdd | /Numpy/slicing.py | 935f69e32668776094d35e1de48091fa36c38964 | [] | no_license | https://github.com/EnriqueBet/Python-UAM | 78252914b19f70d6f90a249243b0629e5741dfd7 | 8a99c39e7d4c46f567492f8f6848971ffdf1eb27 | refs/heads/master | 2021-05-05T05:32:48.847380 | 2018-03-11T20:52:46 | 2018-03-11T20:52:46 | 118,714,249 | 2 | 0 | null | true | 2018-01-24T04:57:56 | 2018-01-24T04:57:56 | 2018-01-24T04:57:39 | 2018-01-24T04:55:42 | 29 | 0 | 0 | 0 | null | false | null | '''
Aprender lo que es el slicing.
'''
# Librerias
import numpy as np
# Slicing sirve para obtener varios elementos de la matriz ea la vez
lista = list(range(6,21))
# Slicing en numpy
matriz = np.zeros((10,10))
x,y = matriz.shape
val = 0
for i in range(x):
for j in range(y):
val+=1
matriz[i,j] = val
print(matriz)
# Cuadrante superior izquierdo
# print(matriz[0:5,0:5])
matriz[:5,:5] = 1
print(matriz[:5,:5])
# Cuadrante superior derecho
matriz[:5,5:] = 2
print(matriz[:5,5:])
# Cuadrante inferior izquierdo
matriz[5:,:5] = 3
print(matriz[5:,:5])
# Cuadramte inferior derecho
matriz[5:,5:] = 4
print(matriz[5:,5:])
# Ejercicio de la cruz con puro slicing
matriz = np.zeros((11,11))
x,y = matriz.shape
xm = int(x/2)
ym = int(y/2)
n = 0
grosor = 2*n + 1
matriz[:,ym-n:ym+n+1] = 1
matriz[xm-n:xm+n+1,:] = 1
print(matriz)
| UTF-8 | Python | false | false | 852 | py | 3 | slicing.py | 3 | 0.640845 | 0.586854 | 0 | 51 | 15.686275 | 68 |
uralogical/pasori | 10,634,339,026,989 | 0ddf2de7f74016887445f63145a2cd85933abe9a | 2989e7a67dd4f5f67597dac5593132ae72f9d399 | /src/python/vendor/slack/slackbot_settings.py | 3cb70f320b7d245fa6ceeb17496d51cc59c941fe | [] | no_license | https://github.com/uralogical/pasori | c8483a03937257e9f2acd709dde6f4b120f1c699 | ddc5e9d6861d08125a6a4646d84a7b5719238b98 | refs/heads/master | 2021-01-15T09:52:22.753271 | 2016-09-18T06:23:02 | 2016-09-18T06:23:02 | 68,296,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
API_TOKEN = "xoxb-80831586261-EQu2WjqSGmxlV56NPZF7Z8qy"
CHANNEL = 'test'
DEFAULT_REPLY = 'ぱそりだよ☆'
| UTF-8 | Python | true | false | 135 | py | 8 | slackbot_settings.py | 5 | 0.691057 | 0.552846 | 0 | 5 | 23.6 | 55 |
philuxe/ACI-Python-Scripts | 15,152,644,653,053 | 02127ce77dba69e99a4c932e893211f554546ad4 | 74001570e065aecf34c6353e1911dc40d4f24195 | /Set_Config/aci_add_static-paths.py | 297b6453fa49c4fe5ce683e8bddbc911a4d85dbe | [] | no_license | https://github.com/philuxe/ACI-Python-Scripts | 96c75ba26712b891c331ea991e4aace896167736 | c75c3aeffaa32b18c3a9c53947681e4c3a584f7d | refs/heads/master | 2021-06-20T21:04:59.795636 | 2021-02-05T09:39:47 | 2021-02-05T09:39:47 | 176,603,329 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import requests.packages.urllib3
from cobra.mit.access import MoDirectory, ClassQuery
from cobra.mit.request import ConfigRequest
from cobra.mit.session import LoginSession
from cobra.model.fv import Tenant, Ap, AEPg, RsPathAtt
requests.packages.urllib3.disable_warnings()
# Current Features :
# Support creating static paths for regular ports, port-channel and virtual port-channel
# csv columns must be as follow :
# TENANT;VLAN;NODE;TARGET;APP;EPG;MODE
# regular port target : leaf id and port id (ethx/y) must be supplied
# port-channel : leaf id is numerical, target is the policy-group name
# vpc : leaf id is not evaluated , target is the policy-group name
def apic_logon(url, username, password):
my_session = LoginSession(url, username, password)
modir_local = MoDirectory(my_session)
modir_local.login()
return modir_local
# format port id = eth1/1
def get_mo_for_interface(nodeid, portid):
cq = ClassQuery('fabricPathEpCont')
cq.propFilter = 'eq(fabricPathEpCont.nodeId, "{0}")'.format(nodeid)
cq.subtree = 'children'
cq.subtreeClassFilter = 'fabricPathEp'
req = moDir.query(cq)
for key in req[0].children:
if key.name == portid:
return format(key.dn) # return a string containing the MO for the requested interface
def get_mo_for_pg(name):
cq = ClassQuery('fabricProtPathEpCont')
cq.subtree = 'children'
cq.subtreeClassFilter = 'fabricPathEp'
cq.subtreePropFilter = 'eq(fabricPathEp.name, "{0}")'.format(name)
req = moDir.query(cq)
for key in req[0].children:
return format(key.dn)
def create_static_path(tenant_name, app_name, epg_name, path_name, encap_id, mode_name):
vlan_id = 'vlan-' + encap_id
uni_mo = moDir.lookupByDn('uni')
tenant_mo = Tenant(uni_mo, tenant_name)
app_mo = Ap(tenant_mo, app_name)
epg_mo = AEPg(app_mo, epg_name)
rspathatt_mo = RsPathAtt(epg_mo, tDn=path_name, instrImedcy=u'immediate', encap=vlan_id, mode=mode_name)
config = ConfigRequest()
config.addMo(tenant_mo)
moDir.commit(config)
moDir = apic_logon('https://sandboxapicdc.cisco.com', 'admin', 'password')
with open('Test.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
# Checking whether it is regular port or PC/VPC
node = unicode(str(row['NODE']))
# make sure node iD is numeric and port id is ethx/y -> reguylar port
if node.isnumeric():
path = get_mo_for_interface(str(row['NODE']), str(row['TARGET']))
create_static_path(str(row['TENANT']), str(row['APP']), str(row['EPG']), path, str(row['VLAN']),
str(row['MODE']))
print ("Adding -> Tenant: {0} - APP: {1} - EPG: {2} - LEAF: {3} - TARGET: {4} - ENCAP: {5} "
"- MODE: {6}".format(str(row['TENANT']),
str(row['APP']), str(row['EPG']), str(row['NODE']), str(row['TARGET']),
str(row['VLAN']), str(row['MODE'])))
else:
path = get_mo_for_pg(str(row['TARGET']))
create_static_path(str(row['TENANT']), str(row['APP']), str(row['EPG']), path, str(row['VLAN']),
str(row['MODE']))
print ("Adding -> Tenant: {0} - APP: {1} - EPG: {2} - LEAF: {3} - TARGET: {4} - ENCAP: {5} "
"- MODE: {6}".format(str(row['TENANT']),
str(row['APP']), str(row['EPG']), str(row['NODE']), str(row['TARGET']),
str(row['VLAN']), str(row['MODE'])))
moDir.logout()
| UTF-8 | Python | false | false | 3,652 | py | 13 | aci_add_static-paths.py | 7 | 0.60241 | 0.596386 | 0 | 86 | 41.465116 | 111 |
timxian/caraml | 10,883,447,159,683 | cd19d2462778233c51aa8b9d36ec18a3aee2d02c | 8fec6af8d442811428247f226595b7e4356c4b40 | /tests/test_proxy.py | 34518fdc2a9f1a9dd16035577e7b72c2e6ba0996 | [
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | https://github.com/timxian/caraml | 5dcd5213b767cbacbc09dee9be7c190047bf0a53 | 2a6ff5e51c7294acb8273e140ea2280b63ac9408 | refs/heads/master | 2023-04-08T19:55:56.596509 | 2018-10-28T21:15:34 | 2018-10-28T21:15:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from threading import Thread
import pytest
from caraml.zmq import ZmqProxyThread, ZmqSender, ZmqReceiver, ZmqPusher, ZmqPuller
@pytest.mark.timeout(1)
def test_proxyed_send_repceive():
host = '127.0.0.1'
port_frontend = 7000
port_backend = 7001
N = 6
msg_send = list(range(N))
msg_receive = [None] * len(msg_send)
def send(N):
sender = ZmqSender(host=host,
port=port_frontend,
serializer='pickle')
for i in range(N):
sender.send(msg_send[i])
def receive(N):
receiver = ZmqReceiver(host=host,
port=port_backend,
bind=False,
deserializer='pickle')
for i in range(N):
n = receiver.recv()
msg_receive[i] = n
server_thread = Thread(target=send, args=[N])
client_thread = Thread(target=receive, args=[N])
in_add = 'tcp://{}:{}'.format(host, port_frontend)
out_add = 'tcp://{}:{}'.format(host, port_backend)
proxy_thread = ZmqProxyThread(in_add, out_add, pattern='router-dealer')
client_thread.start()
server_thread.start()
proxy_thread.start()
client_thread.join()
server_thread.join()
assert msg_send == msg_receive
@pytest.mark.timeout(1)
def test_proxyed_pull_push():
host = '127.0.0.1'
port_frontend = 7002
port_backend = 7003
N = 6
msg_push = list(range(N))
msg_pull = [None] * len(msg_push)
def push(N):
pusher = ZmqPusher(host=host,
port=port_frontend,
serializer='pickle')
for i in range(N):
pusher.push(msg_push[i])
def pull(N):
puller = ZmqPuller(host=host,
port=port_backend,
bind=False,
deserializer='pickle')
for i in range(N):
n = puller.pull()
msg_pull[i] = n
server_thread = Thread(target=push, args=[N])
client_thread = Thread(target=pull, args=[N])
in_add = 'tcp://{}:{}'.format(host, port_frontend)
out_add = 'tcp://{}:{}'.format(host, port_backend)
proxy_thread = ZmqProxyThread(in_add, out_add, pattern='pull-push')
client_thread.start()
server_thread.start()
proxy_thread.start()
client_thread.join()
server_thread.join()
assert msg_push == msg_pull
| UTF-8 | Python | false | false | 2,450 | py | 11 | test_proxy.py | 9 | 0.544082 | 0.53102 | 0 | 83 | 28.518072 | 83 |
ManikSinghSethi/SoftDesSp15 | 2,362,232,024,183 | bf88294b5e34e2779ca9f60e6e0e27eea71cb75a | c4dfcd23aa7e2511efc7a749f7dce293c102e240 | /computational_art/recursive_art.py | d01e2420d18faff97206faa6027c189e9896bbae | [] | no_license | https://github.com/ManikSinghSethi/SoftDesSp15 | 253db30f18cb0521cdf87611ffeca6a5f0591afa | b080febffecf9a0351131957410d85ab0d4c70bb | refs/heads/master | 2021-01-24T02:45:36.751664 | 2015-03-30T03:56:12 | 2015-03-30T03:56:12 | 30,140,333 | 0 | 0 | null | true | 2015-02-01T08:03:28 | 2015-02-01T08:03:28 | 2015-02-01T08:03:28 | 2015-01-31T23:05:05 | 3,839 | 0 | 0 | 0 | Python | null | null | """ TODO: Put your header comment here """
import random
<<<<<<< HEAD
from math import cos, sin, pi
=======
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
from PIL import Image
def build_random_function(min_depth, max_depth):
""" Builds a random function of depth at least min_depth and depth
at most max_depth (see assignment writeup for definition of depth
in this context)
min_depth: the minimum depth of the random function
max_depth: the maximum depth of the random function
returns: the randomly generated function represented as a nested list
(see assignment writeup for details on the representation of
these functions)
"""
<<<<<<< HEAD
randdepth = random.randint(min_depth, max_depth)
fn = randmaker(randdepth)
return fn
def randmaker(depth):
if depth == 1:
randxy = random.randint(0,1)
return ["x", "y"][randxy]
else:
randnumber = random.randint(1,6)
if randnumber == 1:
# prod = a*b
return ["prod", randmaker(depth-1), randmaker(depth-1)]
elif randnumber == 2:
# avg = 0.5*(a+b)
return ["avg", randmaker(depth-1), randmaker(depth-1)]
elif randnumber == 3:
# cospi = cos(pi*a)
return ["cospi", randmaker(depth-1)]
elif randnumber == 4:
# sinpi = sin(pi*a)
return ["sinpi", randmaker(depth-1)]
elif randnumber == 5:
return ["cube", randmaker(depth-1)]
elif randnumber == 6:
return ["ex", randmaker(depth-1)]
# TODO: implement this
# pass
=======
# TODO: implement this
pass
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
def evaluate_random_function(f, x, y):
""" Evaluate the random function f with inputs x,y
Representation of the function f is defined in the assignment writeup
f: the function to evaluate
x: the value of x to be used to evaluate the function
y: the value of y to be used to evaluate the function
returns: the function value
>>> evaluate_random_function(["x"],-0.5, 0.75)
-0.5
>>> evaluate_random_function(["y"],0.1,0.02)
0.02
"""
<<<<<<< HEAD
if f[0] == "x":
return x
elif f[0] == "y":
return y
elif f[0] == "prod":
return evaluate_random_function(f[1], x, y)*evaluate_random_function(f[2], x, y)
elif f[0] == "avg":
return (evaluate_random_function(f[1], x, y)+evaluate_random_function(f[2], x, y))*0.5
elif f[0] == "cospi":
return cos(pi*evaluate_random_function(f[1], x, y))
elif f[0] == "sinpi":
return sin(pi*evaluate_random_function(f[1], x, y))
elif f[0] == "cube":
return (evaluate_random_function(f[1], x, y))**3
elif f[0] == "ex":
return abs(evaluate_random_function(f[1], x, y))**0.5
=======
# TODO: implement this
pass
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):
""" Given an input value in the interval [input_interval_start,
input_interval_end], return an output value scaled to fall within
the output interval [output_interval_start, output_interval_end].
val: the value to remap
input_interval_start: the start of the interval that contains all
possible values for val
input_interval_end: the end of the interval that contains all possible
values for val
output_interval_start: the start of the interval that contains all
possible output values
output_inteval_end: the end of the interval that contains all possible
output values
returns: the value remapped from the input to the output interval
>>> remap_interval(0.5, 0, 1, 0, 10)
5.0
>>> remap_interval(5, 4, 6, 0, 2)
1.0
>>> remap_interval(5, 4, 6, 1, 2)
1.5
"""
<<<<<<< HEAD
# val = (output_interval_end-output_interval_start)/(input_interval_end-input_interval_start)
val = float(val - input_interval_start)/(input_interval_end - input_interval_start)*(output_interval_end-output_interval_start)+output_interval_start
# (input_interval_end - val)/(input_interval_start - input_interval_end)*(output_interval_start + output_interval_end)
# return output_interval_start+float((output_interval_end-output_interval_start))/(input_interval_end-input_interval_start)*(val-input_interval_start)
return val
# TODO: implement this
# pass
=======
# TODO: implement this
pass
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
def color_map(val):
""" Maps input value between -1 and 1 to an integer 0-255, suitable for
use as an RGB color code.
val: value to remap, must be a float in the interval [-1, 1]
returns: integer in the interval [0,255]
>>> color_map(-1.0)
0
>>> color_map(1.0)
255
>>> color_map(0.0)
127
>>> color_map(0.5)
191
"""
# NOTE: This relies on remap_interval, which you must provide
color_code = remap_interval(val, -1, 1, 0, 255)
return int(color_code)
def test_image(filename, x_size=350, y_size=350):
""" Generate test image with random pixels and save as an image file.
filename: string filename for image (should be .png)
x_size, y_size: optional args to set image dimensions (default: 350)
"""
# Create image and loop over all pixels
im = Image.new("RGB", (x_size, y_size))
pixels = im.load()
for i in range(x_size):
for j in range(y_size):
x = remap_interval(i, 0, x_size, -1, 1)
y = remap_interval(j, 0, y_size, -1, 1)
pixels[i, j] = (random.randint(0, 255), # Red channel
random.randint(0, 255), # Green channel
random.randint(0, 255)) # Blue channel
im.save(filename)
def generate_art(filename, x_size=350, y_size=350):
""" Generate computational art and save as an image file.
filename: string filename for image (should be .png)
x_size, y_size: optional args to set image dimensions (default: 350)
"""
# Functions for red, green, and blue channels - where the magic happens!
<<<<<<< HEAD
red_function = build_random_function(10, 12)
green_function = build_random_function(9, 12)
blue_function = build_random_function(10, 12)
print red_function
print green_function
print blue_function
=======
red_function = ["x"]
green_function = ["y"]
blue_function = ["x"]
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
# Create image and loop over all pixels
im = Image.new("RGB", (x_size, y_size))
pixels = im.load()
for i in range(x_size):
for j in range(y_size):
x = remap_interval(i, 0, x_size, -1, 1)
y = remap_interval(j, 0, y_size, -1, 1)
pixels[i, j] = (
color_map(evaluate_random_function(red_function, x, y)),
color_map(evaluate_random_function(green_function, x, y)),
color_map(evaluate_random_function(blue_function, x, y))
)
im.save(filename)
if __name__ == '__main__':
import doctest
doctest.testmod()
# Create some computational art!
# TODO: Un-comment the generate_art function call after you
# implement remap_interval and evaluate_random_function
<<<<<<< HEAD
generate_art("myart.png")
# Test that PIL is installed correctly
# TODO: Comment or remove this function call after testing PIL install
# test_image("noise.png")
=======
#generate_art("myart.png")
# Test that PIL is installed correctly
# TODO: Comment or remove this function call after testing PIL install
test_image("noise.png")
>>>>>>> 68b2965c9c5fa90bd2433f3b7f672e6b29c1bbbb
| UTF-8 | Python | false | false | 8,136 | py | 12 | recursive_art.py | 9 | 0.598574 | 0.560841 | 0 | 240 | 32.9 | 154 |
bubthegreat/PyRom | 1,992,864,873,506 | b090f1d76caa3628e42010f7c78b5cd72a4f55ec | 8a8e71a781e93b49a9ec73ecb997b69f8e80060f | /src/rom24/commands/do_wiznet.py | 954aa5977ad22eaedff276a210adfe74fb32f47d | [] | no_license | https://github.com/bubthegreat/PyRom | 82a35ceddfedb64c144d953722b1fc9271af39c9 | decb4bc20e4911e1787977a57c83df38e16ad6a7 | refs/heads/master | 2022-12-07T12:31:29.796552 | 2021-12-26T21:07:09 | 2021-12-26T21:07:09 | 292,462,898 | 0 | 0 | null | true | 2020-09-03T04:08:59 | 2020-09-03T04:08:58 | 2020-01-04T22:07:43 | 2014-08-29T15:05:03 | 3,214 | 0 | 0 | 0 | null | false | false | import logging
logger = logging.getLogger(__name__)
from rom24 import merc
from rom24 import interp
from rom24 import const
from rom24 import state_checks
def do_wiznet(ch, argument):
if not argument:
if state_checks.IS_SET(ch.wiznet, merc.WIZ_ON):
ch.send("Signing off of Wiznet.\n")
ch.wiznet = state_checks.REMOVE_BIT(ch.wiznet, merc.WIZ_ON)
else:
ch.send("Welcome to Wiznet!\n")
ch.wiznet = state_checks.SET_BIT(ch.wiznet, merc.WIZ_ON)
return
if "on".startswith(argument):
ch.send("Welcome to Wiznet!\n")
ch.wiznet = state_checks.SET_BIT(ch.wiznet, merc.WIZ_ON)
return
if "off".startswith(argument):
ch.send("Signing off of Wiznet.\n")
ch.wiznet = state_checks.REMOVE_BIT(ch.wiznet, merc.WIZ_ON)
return
buf = ""
# show wiznet status
if "status".startswith(argument):
if not state_checks.IS_SET(ch.wiznet, merc.WIZ_ON):
buf += "off "
for name, flag in const.wiznet_table.items():
if state_checks.IS_SET(ch.wiznet, flag.bit):
buf += name + " "
ch.send("Wiznet status:\n%s\n" % buf)
return
if "show".startswith(argument):
# list of all wiznet options
buf = ""
for name, flag in const.wiznet_table.items():
if flag.level <= ch.trust:
buf += name + " "
ch.send("Wiznet options available to you are:\n%s\n" % buf)
return
flag = state_checks.prefix_lookup(const.wiznet_table, argument)
if not flag or ch.trust < flag.level:
ch.send("No such option.\n")
return
if state_checks.IS_SET(ch.wiznet, flag.bit):
ch.send("You will no longer see %s on wiznet.\n" % flag.name)
ch.wiznet = state_checks.REMOVE_BIT(ch.wiznet, flag.bit)
return
else:
ch.send("You will now see %s on wiznet.\n" % flag.name)
ch.wiznet = state_checks.SET_BIT(ch.wiznet, flag.bit)
return
interp.register_command(
interp.cmd_type("wiznet", do_wiznet, merc.POS_DEAD, merc.IM, merc.LOG_NORMAL, 1)
)
| UTF-8 | Python | false | false | 2,146 | py | 355 | do_wiznet.py | 347 | 0.592265 | 0.588071 | 0 | 63 | 33.063492 | 84 |
manojakumarpanda/This-is-first | 5,970,004,558,621 | 246d8501383cda495b677a3e6de71f982e98cee3 | 7b81fb89873f48b63da2f4f607039f2d53fd4135 | /sequence/string/in3a_opaaa.py | 5a22909c6e83033f15372ee8350e7d0a8ee52b9b | [] | no_license | https://github.com/manojakumarpanda/This-is-first | 9f7d498ace68fc53146e5c2d0769e612b7eedd43 | 147c230604c35b19bdc40bd9f6207f8aaf2708b1 | refs/heads/master | 2021-07-30T10:38:56.045002 | 2021-07-26T09:39:19 | 2021-07-26T09:39:19 | 233,637,965 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #program for input of a4b3c2 the output should be aaaabbbcc
st=input('Enter a string in the format of a3b3::')
st2=''
for ch in st:
if (ch>='a'and ch<='z')or(ch>='A' and ch<='Z'):
cha=ch
elif ch>='0'and ch<='9':
i=int(ch)
st2+=cha*i
else:
pritn('You have entered a wrong format:')
print('The sring you have entered is {0} and afte converssion the string is{1}'.format(st,st2))
| UTF-8 | Python | false | false | 421 | py | 158 | in3a_opaaa.py | 135 | 0.608076 | 0.579572 | 0 | 12 | 34.083333 | 95 |
tatyana12/Django- | 11,261,404,270,025 | d989493f9be07bdac1ef5b10f5cce37fa98b32cf | 2ec2cd988224d69a031d145d15475c44144a661e | /apps/realtors/admin/forms/__init__.py | eecedd1cbcaadf906327ac937e522a416f048208 | [] | no_license | https://github.com/tatyana12/Django- | dfd620ef924c190ba319522fde7548c00e4822c1 | cadc053776e23ecc542be99ae8c755c5a8b0c084 | refs/heads/master | 2020-03-15T15:53:42.688022 | 2018-05-09T01:07:59 | 2018-05-09T01:07:59 | 132,223,075 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from realtors import RealtorAdminFormSet, RealtorAdminForm
__all__=('RealtorAdminFormSet','RealtorAdminForm') | UTF-8 | Python | false | false | 109 | py | 190 | __init__.py | 140 | 0.834862 | 0.834862 | 0 | 2 | 54 | 58 |
shmiko/discograph | 11,003,706,215,928 | 1f202a271a2140d9b52a16a56bd2cf81aa5dcbf2 | 6fbcf5486fdc0ea2c8240e362ee7eb02391ebf1f | /discograph/library/test/test_Release_from_element.py | 4e079ce7a380ccaf22f7f0eadcd060d308324e15 | [
"MIT"
] | permissive | https://github.com/shmiko/discograph | 5532386b3cefd28bb9193b4a65d8039e7132ba6c | 8ed9ab4248a458a5a4753274861590c446959f8d | refs/heads/master | 2020-04-29T07:26:54.175232 | 2015-10-14T16:27:46 | 2015-10-14T16:27:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
import mongoengine
import unittest
from abjad import stringtools
from discograph.library.Bootstrapper import Bootstrapper
from discograph import library
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
class Test(unittest.TestCase):
database_name = 'discograph-test'
def setUp(self):
self.database = mongoengine.connect(self.database_name)
def tearDown(self):
self.database.drop_database(self.database_name)
self.database.close()
def test_01(self):
iterator = Bootstrapper.get_iterator('release')
release_element = next(iterator)
actual = stringtools.normalize(Bootstrapper.prettify(release_element))
expected = stringtools.normalize('''
<?xml version="1.0" ?>
<release id="1" status="Accepted">
<artists>
<artist>
<id>1</id>
<name>Persuader, The</name>
<anv/>
<join/>
<role/>
<tracks/>
</artist>
</artists>
<title>Stockholm</title>
<labels>
<label catno="SK032" name="Svek"/>
</labels>
<extraartists>
<artist>
<id>239</id>
<name>Jesper Dahlb\xe4ck</name>
<anv/>
<join/>
<role>Music By [All Tracks By]</role>
<tracks/>
</artist>
</extraartists>
<formats>
<format name="Vinyl" qty="2" text="">
<descriptions>
<description>12"</description>
<description>33 \u2153 RPM</description>
</descriptions>
</format>
</formats>
<genres>
<genre>Electronic</genre>
</genres>
<styles>
<style>Deep House</style>
</styles>
<country>Sweden</country>
<released>1999-03-00</released>
<notes>The song titles are the names of Stockholm's districts.
</notes>
<data_quality>Complete and Correct</data_quality>
<tracklist>
<track>
<position>A</position>
<title>\xd6stermalm</title>
<duration>4:45</duration>
</track>
<track>
<position>B1</position>
<title>Vasastaden</title>
<duration>6:11</duration>
</track>
<track>
<position>B2</position>
<title>Kungsholmen</title>
<duration>2:49</duration>
</track>
<track>
<position>C1</position>
<title>S\xf6dermalm</title>
<duration>5:38</duration>
</track>
<track>
<position>C2</position>
<title>Norrmalm</title>
<duration>4:52</duration>
</track>
<track>
<position>D</position>
<title>Gamla Stan</title>
<duration>5:16</duration>
</track>
</tracklist>
<identifiers>
<identifier description="A-Side" type="Matrix / Runout" value="MPO SK 032 A1 G PHRUPMASTERGENERAL T27 LONDON"/>
<identifier description="B-Side" type="Matrix / Runout" value="MPO SK 032 B1"/>
<identifier description="C-Side" type="Matrix / Runout" value="MPO SK 032 C1"/>
<identifier description="D-Side" type="Matrix / Runout" value="MPO SK 032 D1"/>
</identifiers>
<videos>
<video duration="290" embed="true" src="http://www.youtube.com/watch?v=AHuQWcylaU4">
<title>The Persuader (Jesper Dahlb\xe4ck) - \xd6stermalm</title>
<description>The Persuader (Jesper Dahlb\xe4ck) - \xd6stermalm</description>
</video>
<video duration="380" embed="true" src="http://www.youtube.com/watch?v=5rA8CTKKEP4">
<title>The Persuader - Vasastaden</title>
<description>The Persuader - Vasastaden</description>
</video>
<video duration="335" embed="true" src="http://www.youtube.com/watch?v=QVdDhOnoR8k">
<title>The Persuader-Stockholm-Sodermalm</title>
<description>The Persuader-Stockholm-Sodermalm</description>
</video>
<video duration="289" embed="true" src="http://www.youtube.com/watch?v=hy47qgyJeG0">
<title>The Persuader - Norrmalm</title>
<description>The Persuader - Norrmalm</description>
</video>
</videos>
<companies>
<company>
<id>271046</id>
<name>The Globe Studios</name>
<catno/>
<entity_type>23</entity_type>
<entity_type_name>Recorded At</entity_type_name>
<resource_url>http://api.discogs.com/labels/271046</resource_url>
</company>
<company>
<id>56025</id>
<name>MPO</name>
<catno/>
<entity_type>17</entity_type>
<entity_type_name>Pressed By</entity_type_name>
<resource_url>http://api.discogs.com/labels/56025</resource_url>
</company>
</companies>
</release>
''')
assert actual.splitlines() == expected.splitlines()
release_document = library.Release.from_element(release_element)
actual = format(release_document)
expected = stringtools.normalize(u"""
discograph.library.mongo.Release(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=1,
name='Persuader, The',
),
],
companies=[
discograph.library.mongo.CompanyCredit(
entity_type=23,
entity_type_name='Recorded At',
name='The Globe Studios',
),
discograph.library.mongo.CompanyCredit(
entity_type=17,
entity_type_name='Pressed By',
name='MPO',
),
],
country='Sweden',
discogs_id=1,
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=239,
name='Jesper Dahlbäck',
roles=[
discograph.library.mongo.CreditRole(
detail='All Tracks By',
name='Music By',
),
],
),
],
formats=[
discograph.library.mongo.Format(
descriptions=['12"', '33 ⅓ RPM'],
name='Vinyl',
quantity=2,
),
],
genres=['Electronic'],
identifiers=[
discograph.library.mongo.Identifier(
type_='Matrix / Runout',
value='MPO SK 032 A1 G PHRUPMASTERGENERAL T27 LONDON',
),
discograph.library.mongo.Identifier(
type_='Matrix / Runout',
value='MPO SK 032 B1',
),
discograph.library.mongo.Identifier(
type_='Matrix / Runout',
value='MPO SK 032 C1',
),
discograph.library.mongo.Identifier(
type_='Matrix / Runout',
value='MPO SK 032 D1',
),
],
labels=[
discograph.library.mongo.LabelCredit(
catalog_number='SK032',
name='Svek',
),
],
release_date=datetime.datetime(1999, 3, 1, 0, 0),
styles=['Deep House'],
title='Stockholm',
tracklist=[
discograph.library.mongo.Track(
duration='4:45',
position='A',
title='Östermalm',
),
discograph.library.mongo.Track(
duration='6:11',
position='B1',
title='Vasastaden',
),
discograph.library.mongo.Track(
duration='2:49',
position='B2',
title='Kungsholmen',
),
discograph.library.mongo.Track(
duration='5:38',
position='C1',
title='Södermalm',
),
discograph.library.mongo.Track(
duration='4:52',
position='C2',
title='Norrmalm',
),
discograph.library.mongo.Track(
duration='5:16',
position='D',
title='Gamla Stan',
),
],
)
""")
assert actual == expected
def test_02(self):
iterator = Bootstrapper.get_iterator('release')
release_element = next(iterator)
release_element = next(iterator)
release_element = next(iterator)
actual = stringtools.normalize(Bootstrapper.prettify(release_element))
expected = stringtools.normalize('''
<?xml version="1.0" ?>
<release id="3" status="Accepted">
<artists>
<artist>
<id>3</id>
<name>Josh Wink</name>
<anv/>
<join/>
<role/>
<tracks/>
</artist>
</artists>
<title>Profound Sounds Vol. 1</title>
<labels>
<label catno="CK 63628" name="Ruffhouse Records"/>
</labels>
<extraartists>
<artist>
<id>3</id>
<name>Josh Wink</name>
<anv/>
<join/>
<role>DJ Mix</role>
<tracks/>
</artist>
</extraartists>
<formats>
<format name="CD" qty="1" text="">
<descriptions>
<description>Compilation</description>
<description>Mixed</description>
</descriptions>
</format>
</formats>
<genres>
<genre>Electronic</genre>
</genres>
<styles>
<style>Techno</style>
<style>Tech House</style>
</styles>
<country>US</country>
<released>1999-07-13</released>
<notes>1: Track title is given as "D2" (which is the side of record on the vinyl version of i220-010 release). This was also released on CD where this track is listed on 8th position. On both version no titles are given (only writing/producing credits). Both versions of i220-010 can be seen on the master release page [m27265]. Additionally this track contains female vocals that aren't present on original i220-010 release.
4: Credited as J. Dahlb\xe4ck.
5: Track title wrongly given as "Vol. 1".
6: Credited as Gez Varley presents Tony Montana.
12: Track exclusive to Profound Sounds Vol. 1.</notes>
<master_id>66526</master_id>
<data_quality>Correct</data_quality>
<tracklist>
<track>
<position>1</position>
<title>Untitled 8</title>
<duration>7:00</duration>
<artists>
<artist>
<id>5</id>
<name>Heiko Laux</name>
<anv/>
<join>&</join>
<role/>
<tracks/>
</artist>
<artist>
<id>4</id>
<name>Johannes Heil</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>2</position>
<title>Anjua (Sneaky 3)</title>
<duration>5:28</duration>
<artists>
<artist>
<id>15525</id>
<name>Karl Axel Bissler</name>
<anv>K.A.B.</anv>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>3</position>
<title>When The Funk Hits The Fan (Mood II Swing When The Dub Hits The Fan)</title>
<duration>5:25</duration>
<artists>
<artist>
<id>7</id>
<name>Sylk 130</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>8</id>
<name>Mood II Swing</name>
<anv/>
<join/>
<role>Remix</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>4</position>
<title>What's The Time, Mr. Templar</title>
<duration>4:27</duration>
<artists>
<artist>
<id>1</id>
<name>Persuader, The</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>5</position>
<title>Vol. 2</title>
<duration>5:36</duration>
<artists>
<artist>
<id>267132</id>
<name>Care Company (2)</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>6</position>
<title>Political Prisoner</title>
<duration>3:37</duration>
<artists>
<artist>
<id>6981</id>
<name>Gez Varley</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>7</position>
<title>Pop Kulture</title>
<duration>5:03</duration>
<artists>
<artist>
<id>11</id>
<name>DJ Dozia</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>8</position>
<title>K-Mart Shopping (Hi-Fi Mix)</title>
<duration>5:42</duration>
<artists>
<artist>
<id>10702</id>
<name>Nerio's Dubwork</name>
<anv/>
<join>Meets</join>
<role/>
<tracks/>
</artist>
<artist>
<id>233190</id>
<name>Kathy Lee</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>23</id>
<name>Alex Hi-Fi</name>
<anv/>
<join/>
<role>Remix</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>9</position>
<title>Lovelee Dae (Eight Miles High Mix)</title>
<duration>5:47</duration>
<artists>
<artist>
<id>13</id>
<name>Blaze</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>14</id>
<name>Eight Miles High</name>
<anv/>
<join/>
<role>Remix</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>10</position>
<title>Sweat</title>
<duration>6:06</duration>
<artists>
<artist>
<id>67226</id>
<name>Stacey Pullen</name>
<anv/>
<join>Presents</join>
<role/>
<tracks/>
</artist>
<artist>
<id>7554</id>
<name>Black Odyssey</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>67226</id>
<name>Stacey Pullen</name>
<anv/>
<join/>
<role>Presenter</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>11</position>
<title>Silver</title>
<duration>3:16</duration>
<artists>
<artist>
<id>3906</id>
<name>Christian Smith & John Selway</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>12</position>
<title>Untitled</title>
<duration>2:46</duration>
<artists>
<artist>
<id>3</id>
<name>Josh Wink</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>13</position>
<title>Boom Box</title>
<duration>3:41</duration>
<artists>
<artist>
<id>19</id>
<name>Sound Associates</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
<track>
<position>14</position>
<title>Track 2</title>
<duration>3:39</duration>
<artists>
<artist>
<id>20</id>
<name>Percy X</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
</track>
</tracklist>
<identifiers>
<identifier type="Barcode" value="074646362822"/>
</identifiers>
<companies>
<company>
<id>93330</id>
<name>Columbia Records</name>
<catno/>
<entity_type>10</entity_type>
<entity_type_name>Manufactured By</entity_type_name>
<resource_url>http://api.discogs.com/labels/93330</resource_url>
</company>
<company>
<id>93330</id>
<name>Columbia Records</name>
<catno/>
<entity_type>9</entity_type>
<entity_type_name>Distributed By</entity_type_name>
<resource_url>http://api.discogs.com/labels/93330</resource_url>
</company>
</companies>
</release>
''')
assert actual.splitlines() == expected.splitlines()
release_document = library.Release.from_element(release_element)
actual = format(release_document)
expected = stringtools.normalize(u"""
discograph.library.mongo.Release(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=3,
name='Josh Wink',
),
],
companies=[
discograph.library.mongo.CompanyCredit(
entity_type=10,
entity_type_name='Manufactured By',
name='Columbia Records',
),
discograph.library.mongo.CompanyCredit(
entity_type=9,
entity_type_name='Distributed By',
name='Columbia Records',
),
],
country='US',
discogs_id=3,
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=3,
name='Josh Wink',
roles=[
discograph.library.mongo.CreditRole(
name='DJ Mix',
),
],
),
],
formats=[
discograph.library.mongo.Format(
descriptions=['Compilation', 'Mixed'],
name='CD',
quantity=1,
),
],
genres=['Electronic'],
identifiers=[
discograph.library.mongo.Identifier(
type_='Barcode',
value='074646362822',
),
],
labels=[
discograph.library.mongo.LabelCredit(
catalog_number='CK 63628',
name='Ruffhouse Records',
),
],
master_id=66526,
release_date=datetime.datetime(1999, 7, 13, 0, 0),
styles=['Techno', 'Tech House'],
title='Profound Sounds Vol. 1',
tracklist=[
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=5,
join='&',
name='Heiko Laux',
),
discograph.library.mongo.ArtistCredit(
discogs_id=4,
join=',',
name='Johannes Heil',
),
],
duration='7:00',
position='1',
title='Untitled 8',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
anv='K.A.B.',
discogs_id=15525,
join=',',
name='Karl Axel Bissler',
),
],
duration='5:28',
position='2',
title='Anjua (Sneaky 3)',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=7,
join=',',
name='Sylk 130',
),
],
duration='5:25',
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=8,
name='Mood II Swing',
roles=[
discograph.library.mongo.CreditRole(
name='Remix',
),
],
),
],
position='3',
title='When The Funk Hits The Fan (Mood II Swing When The Dub Hits The Fan)',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=1,
join=',',
name='Persuader, The',
),
],
duration='4:27',
position='4',
title="What's The Time, Mr. Templar",
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=267132,
join=',',
name='Care Company (2)',
),
],
duration='5:36',
position='5',
title='Vol. 2',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=6981,
join=',',
name='Gez Varley',
),
],
duration='3:37',
position='6',
title='Political Prisoner',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=11,
join=',',
name='DJ Dozia',
),
],
duration='5:03',
position='7',
title='Pop Kulture',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=10702,
join='Meets',
name="Nerio's Dubwork",
),
discograph.library.mongo.ArtistCredit(
discogs_id=233190,
join=',',
name='Kathy Lee',
),
],
duration='5:42',
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=23,
name='Alex Hi-Fi',
roles=[
discograph.library.mongo.CreditRole(
name='Remix',
),
],
),
],
position='8',
title='K-Mart Shopping (Hi-Fi Mix)',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=13,
join=',',
name='Blaze',
),
],
duration='5:47',
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=14,
name='Eight Miles High',
roles=[
discograph.library.mongo.CreditRole(
name='Remix',
),
],
),
],
position='9',
title='Lovelee Dae (Eight Miles High Mix)',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=67226,
join='Presents',
name='Stacey Pullen',
),
discograph.library.mongo.ArtistCredit(
discogs_id=7554,
join=',',
name='Black Odyssey',
),
],
duration='6:06',
extra_artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=67226,
name='Stacey Pullen',
roles=[
discograph.library.mongo.CreditRole(
name='Presenter',
),
],
),
],
position='10',
title='Sweat',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=3906,
join=',',
name='Christian Smith & John Selway',
),
],
duration='3:16',
position='11',
title='Silver',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=3,
join=',',
name='Josh Wink',
),
],
duration='2:46',
position='12',
title='Untitled',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=19,
join=',',
name='Sound Associates',
),
],
duration='3:41',
position='13',
title='Boom Box',
),
discograph.library.mongo.Track(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=20,
join=',',
name='Percy X',
),
],
duration='3:39',
position='14',
title='Track 2',
),
],
)
""")
assert actual == expected
def test_03(self):
source = stringtools.normalize(r"""
<?xml version="1.0" ?>
<release id="138522" status="Accepted">
<artists>
<artist>
<id>12584</id>
<name>Felix Kubin</name>
<anv/>
<join/>
<role/>
<tracks/>
</artist>
</artists>
<title>Jetlag Disco</title>
<labels>
<label catno="a19" name="A-Musik"/>
</labels>
<extraartists/>
<formats>
<format name="CD" qty="1" text="">
<descriptions>
<description>Mini</description>
</descriptions>
</format>
</formats>
<genres>
<genre>Electronic</genre>
</genres>
<styles>
<style>Acid House</style>
<style>Experimental</style>
<style>Happy Hardcore</style>
</styles>
<country>Germany</country>
<released>20020206</released>
<master_id>86193</master_id>
<data_quality>Correct</data_quality>
<tracklist>
<track>
<position>1</position>
<title>Phonebashing</title>
<duration/>
</track>
<track>
<position>2</position>
<title>Groscher Lausangriff</title>
<duration/>
</track>
<track>
<position>3</position>
<title>Mondgesang</title>
<duration/>
</track>
<track>
<position>4</position>
<title>Hotel Supernova</title>
<duration/>
</track>
<track>
<position>5</position>
<title>I lost My Heart In Reykjavik</title>
<duration/>
</track>
<track>
<position>6</position>
<title>Liebe Mutter</title>
<duration/>
</track>
</tracklist>
<videos>
<video duration="187" embed="true" src="http://www.youtube.com/watch?v=C2B97vlcIE8">
<title>Felix Kubin - Phonobashing (a19 V)</title>
<description>Felix Kubin - Phonobashing (a19 V)</description>
</video>
<video duration="249" embed="true" src="http://www.youtube.com/watch?v=7M4RIeePO48">
<title>Felix Kubin Hotel Supernova</title>
<description>Felix Kubin Hotel Supernova</description>
</video>
</videos>
<companies/>
</release>
""")
release_element = ElementTree.fromstring(source)
release_document = library.Release.from_element(release_element)
actual = format(release_document)
expected = stringtools.normalize(u"""
discograph.library.mongo.Release(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=12584,
name='Felix Kubin',
),
],
country='Germany',
discogs_id=138522,
formats=[
discograph.library.mongo.Format(
descriptions=['Mini'],
name='CD',
quantity=1,
),
],
genres=['Electronic'],
labels=[
discograph.library.mongo.LabelCredit(
catalog_number='a19',
name='A-Musik',
),
],
master_id=86193,
release_date=datetime.datetime(2002, 2, 6, 0, 0),
styles=['Acid House', 'Experimental', 'Happy Hardcore'],
title='Jetlag Disco',
tracklist=[
discograph.library.mongo.Track(
position='1',
title='Phonebashing',
),
discograph.library.mongo.Track(
position='2',
title='Groscher Lausangriff',
),
discograph.library.mongo.Track(
position='3',
title='Mondgesang',
),
discograph.library.mongo.Track(
position='4',
title='Hotel Supernova',
),
discograph.library.mongo.Track(
position='5',
title='I lost My Heart In Reykjavik',
),
discograph.library.mongo.Track(
position='6',
title='Liebe Mutter',
),
],
)
""")
assert actual == expected
def test_04(self):
source = stringtools.normalize(r"""
<?xml version="1.0" ?>
<release id="138522" status="Accepted">
<artists>
<artist>
<id>12584</id>
<name>Felix Kubin</name>
<anv/>
<join/>
<role/>
<tracks/>
</artist>
</artists>
<title>Jetlag Disco</title>
<labels>
<label catno="a19" name="A-Musik"/>
</labels>
<extraartists/>
<formats>
<format name="CD" qty="1" text="">
<descriptions>
<description>Mini</description>
</descriptions>
</format>
</formats>
<genres>
<genre>Electronic</genre>
</genres>
<styles>
<style>Acid House</style>
<style>Experimental</style>
<style>Happy Hardcore</style>
</styles>
<country>Germany</country>
<released>2002</released>
<master_id>86193</master_id>
<data_quality>Correct</data_quality>
<tracklist>
<track>
<position>1</position>
<title>Phonebashing</title>
<duration/>
</track>
<track>
<position>2</position>
<title>Groscher Lausangriff</title>
<duration/>
</track>
<track>
<position>3</position>
<title>Mondgesang</title>
<duration/>
</track>
<track>
<position>4</position>
<title>Hotel Supernova</title>
<duration/>
</track>
<track>
<position>5</position>
<title>I lost My Heart In Reykjavik</title>
<duration/>
</track>
<track>
<position>6</position>
<title>Liebe Mutter</title>
<duration/>
</track>
</tracklist>
<videos>
<video duration="187" embed="true" src="http://www.youtube.com/watch?v=C2B97vlcIE8">
<title>Felix Kubin - Phonobashing (a19 V)</title>
<description>Felix Kubin - Phonobashing (a19 V)</description>
</video>
<video duration="249" embed="true" src="http://www.youtube.com/watch?v=7M4RIeePO48">
<title>Felix Kubin Hotel Supernova</title>
<description>Felix Kubin Hotel Supernova</description>
</video>
</videos>
<companies/>
</release>
""")
release_element = ElementTree.fromstring(source)
release_document = library.Release.from_element(release_element)
actual = format(release_document)
expected = stringtools.normalize(u"""
discograph.library.mongo.Release(
artists=[
discograph.library.mongo.ArtistCredit(
discogs_id=12584,
name='Felix Kubin',
),
],
country='Germany',
discogs_id=138522,
formats=[
discograph.library.mongo.Format(
descriptions=['Mini'],
name='CD',
quantity=1,
),
],
genres=['Electronic'],
labels=[
discograph.library.mongo.LabelCredit(
catalog_number='a19',
name='A-Musik',
),
],
master_id=86193,
release_date=datetime.datetime(2002, 1, 1, 0, 0),
styles=['Acid House', 'Experimental', 'Happy Hardcore'],
title='Jetlag Disco',
tracklist=[
discograph.library.mongo.Track(
position='1',
title='Phonebashing',
),
discograph.library.mongo.Track(
position='2',
title='Groscher Lausangriff',
),
discograph.library.mongo.Track(
position='3',
title='Mondgesang',
),
discograph.library.mongo.Track(
position='4',
title='Hotel Supernova',
),
discograph.library.mongo.Track(
position='5',
title='I lost My Heart In Reykjavik',
),
discograph.library.mongo.Track(
position='6',
title='Liebe Mutter',
),
],
)
""")
assert actual == expected
@unittest.skip("Subtracks not yet implemented.")
def test_05(self):
source = stringtools.normalize(r"""
<?xml version="1.0" ?>
<release id="4876850" status="Accepted">
<artists>
<artist>
<id>194</id>
<name>Various</name>
<anv/>
<join/>
<role/>
<tracks/>
</artist>
</artists>
<title>Contempuls 2008_09_10</title>
<labels>
<label catno="none" name="His Voice"/>
</labels>
<extraartists/>
<formats>
<format name="CD" qty="1" text="">
<descriptions>
<description>Compilation</description>
<description>Promo</description>
</descriptions>
</format>
</formats>
<genres>
<genre>Classical</genre>
</genres>
<styles>
<style>Contemporary</style>
</styles>
<country>Czech Republic</country>
<released>2011</released>
<data_quality>Needs Vote</data_quality>
<tracklist>
<track>
<position>1</position>
<title>Sahaf</title>
<duration>6:56</duration>
<artists>
<artist>
<id>866840</id>
<name>Chaya Czernowin</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>3446612</id>
<name>Ensemble Nikel</name>
<anv/>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>2</position>
<title>Solipse</title>
<duration>12:22</duration>
<artists>
<artist>
<id>14563</id>
<name>Rolf Gehlhaar</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>214409</id>
<name>Rohan De Saram</name>
<anv/>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>3</position>
<title>Magnitudo 9.0</title>
<duration>8:56</duration>
<artists>
<artist>
<id>3063233</id>
<name>Miroslav Srnka</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>488584</id>
<name>Klangforum Wien</name>
<anv/>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position/>
<title>String Quartet No. I</title>
<duration/>
<artists>
<artist>
<id>1032912</id>
<name>Peter Graham</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>645741</id>
<name>Arditti Quartet</name>
<anv>Arditti String Quartet</anv>
<join/>
<role>Ensemble</role>
<tracks/>
</artist>
</extraartists>
<sub_tracks>
<track>
<position>4</position>
<title>I. Velmi Koncentrovaně, Tiše A Jemně</title>
<duration>3:34</duration>
</track>
<track>
<position>5</position>
<title>II. Jako V Horečce</title>
<duration>2:15</duration>
</track>
<track>
<position>6</position>
<title>III. Chladně A Nezúčastněně</title>
<duration>3:33</duration>
</track>
<track>
<position>7</position>
<title>IV. Bez Zábran</title>
<duration>7:28</duration>
</track>
</sub_tracks>
</track>
<track>
<position>8</position>
<title>Saxophone Quartet</title>
<duration>10:12</duration>
<artists>
<artist>
<id>1661795</id>
<name>Luboš Mrkvička</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>714983</id>
<name>Xasax</name>
<anv>Xasax Saxophone Quartet</anv>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>9</position>
<title>Just Before</title>
<duration>11:51</duration>
<artists>
<artist>
<id>657909</id>
<name>Michel van der Aa</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>3446610</id>
<name>Emanuele Torquati</name>
<anv/>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
<track>
<position>10</position>
<title>...Your Heart Stops... You Continue Writing</title>
<duration>12:37</duration>
<artists>
<artist>
<id>1366191</id>
<name>Michal Nejtek</name>
<anv/>
<join>,</join>
<role/>
<tracks/>
</artist>
</artists>
<extraartists>
<artist>
<id>2182382</id>
<name>Michel Swierczewski</name>
<anv/>
<join/>
<role>Conductor [Dir.]</role>
<tracks/>
</artist>
<artist>
<id>3446611</id>
<name>Prague Modern</name>
<anv/>
<join/>
<role>Performer</role>
<tracks/>
</artist>
</extraartists>
</track>
</tracklist>
<identifiers>
<identifier type="Matrix / Runout" value="Contempuls Sampler"/>
<identifier type="Rights Society" value="osa"/>
</identifiers>
<companies>
<company>
<id>481713</id>
<name>Hudební Informační Středisko, o. p. s.</name>
<catno/>
<entity_type>13</entity_type>
<entity_type_name>Phonographic Copyright (p)</entity_type_name>
<resource_url>http://api.discogs.com/labels/481713</resource_url>
</company>
<company>
<id>481713</id>
<name>Hudební Informační Středisko, o. p. s.</name>
<catno/>
<entity_type>14</entity_type>
<entity_type_name>Copyright (c)</entity_type_name>
<resource_url>http://api.discogs.com/labels/481713</resource_url>
</company>
</companies>
</release>
""")
release_element = ElementTree.fromstring(source)
release_document = library.Release.from_element(release_element)
actual = format(release_document)
expected = stringtools.normalize(u"""
""")
assert actual == expected | UTF-8 | Python | false | false | 62,344 | py | 35 | test_Release_from_element.py | 30 | 0.316709 | 0.301353 | 0.000128 | 1,469 | 41.423417 | 451 |
torpedolabs/amulet | 18,588,618,471,315 | 8efdcdb5af76319a05e2f535ed9fa8641a8a3606 | eb6e9d1dba0e6d649daf09a5568aeadeeedea94a | /login/controller.py | 97c8a025cb3e76f6a9d48ec597a94d12a93f183e | [] | no_license | https://github.com/torpedolabs/amulet | fd576e1ffa75e5f3b555b38d627b405368fd5e47 | ab7cb06df2ed0aed26ca2363b2cf5ab2cdc75212 | refs/heads/master | 2021-01-10T01:55:53.720805 | 2015-11-07T06:51:10 | 2015-11-07T06:51:10 | 45,725,262 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'xstead-master'
import random, string, os
import config, datetime, fileinput
from users import get_user_by_email
import time
from sinchsms import SinchSMS
#
# generate random pin code
# xxxx digit
#
def generate_random_code():
return ''.join(random.choice(string.digits) for _ in range(4))
#
# check user login data
# check email == USEREMAIL,
# phone == USERPHONE,
# code == VALID
#
def check_user_with_code(email='', code=''):
try:
data = read_data_from_file()
if data:
match = [j for i, j in data if i == email]
#todo: check phone valid!
if match:
return (match[0].get('code') == code)
else:
return False
else:
return False
except Exception as e:
print(e)
#
# save user login data
#
# email;phone;generated pin;datetime
# email;phone;generated pin;datetime
# ...
# ...
#
def save_user_with_code(email='', code=''):
try:
make_dir(config.data_dir)
user_data = get_user_by_email(email)
if user_data:
save_data_into_file(
';'.join(
[email.encode('utf-8'),
user_data.get('phone').encode('utf-8'),
code.encode('utf-8'),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]
)
)
return user_data.get('phone') if user_data else None
except Exception as e:
print(e)
#
# write login data into file
# data file set in config.py
#
def save_data_into_file(r):
try:
data_file = os.path.join(config.data_dir, config.data_file)
f = open(data_file,'a+')
f.write(r+"\n")
f.close()
except Exception, e:
raise e
#
# read login data from file
# data file set in config.py
# security time check
# pin code valid until "config.code_valid_in_minutes" minutes only
#
# return dict, {email,phone,code,timestamp}
#
def read_data_from_file():
result = {}
try:
security_date = datetime.datetime.now()-datetime.timedelta(minutes=config.code_valid_in_minutes)
data_file = os.path.join(config.data_dir, config.data_file)
with open(data_file) as f:
content = f.readlines()
for user in content:
email = user.rstrip('\n').split(';')[0]
phone = user.rstrip('\n').split(';')[1]
code = user.rstrip('\n').split(';')[2]
time_ = user.rstrip('\n').split(';')[3]
date_object = datetime.datetime.strptime(time_, '%Y-%m-%d %H:%M:%S')
if date_object > security_date:
result[email] = {'email': email, 'phone': phone, 'code': code, 'time': date_object}
return sorted(result.iteritems())
except Exception, e:
raise e
#
# remove user from login file
# when successfully logged in
#
def delete_user_from_data(email=''):
data_file = os.path.join(config.data_dir, config.data_file)
for line in fileinput.input(data_file, inplace=True):
if email in line:
continue
print(line.rstrip('\n'))
#
# make directory if doesnt exists
#
def make_dir(dir_path):
try:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
except Exception, e:
raise e
#
# send pin code sms
#
def send_sinch_sms(number='', code=''):
message = ('Your verification code is: %s' % code)
client = SinchSMS(config.sinch_sms_app_key,
config.sinch_sms_secret)
print("Sending '%s' to %s" % (message, number))
#
#todo: handle repsonse
# not important to check
# more security when you save the message id too!
#
sms_sent_response = client.send_message(number, message)
"""
message_id = sms_sent_response['messageId']
sms_status_response = client.check_status(message_id)
while sms_status_response['status'] != 'Successful':
print(sms_status_response['status'])
time.sleep(4)
sms_status_response = client.check_status(message_id)
print(sms_status_response['status'])
"""
| UTF-8 | Python | false | false | 4,169 | py | 15 | controller.py | 8 | 0.57136 | 0.568961 | 0 | 176 | 22.6875 | 104 |
magnusvaughan/maestro | 10,660,108,850,592 | de0105d514bc2c49a6c8312a965b70c9f2539c69 | 9e05a1c846ea4bcf530eb731228e1e6bd78d596c | /api/views.py | c83cd70ce4c99083b61c2b0595eec5137def488c | [] | no_license | https://github.com/magnusvaughan/maestro | 9bdd3a016d03e9232b078513265b5b3e0c9e8f47 | 5d2239f5d9219e9929f095c352e46def0f14ae54 | refs/heads/master | 2022-12-01T09:50:43.578895 | 2020-08-15T21:18:05 | 2020-08-15T21:18:05 | 287,619,926 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
# Create your views here.
from .models import Resource
from .serializers import ResourceSerializer
from rest_framework import generics
class ResourceListCreate(generics.ListCreateAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer | UTF-8 | Python | false | false | 306 | py | 3 | views.py | 3 | 0.820261 | 0.820261 | 0 | 10 | 29.7 | 53 |
tyanderson/Ty_INTEX | 9,560,597,225,437 | f552b956d252aced3f39b1061ab8f22b499bd631 | d65a5db69009fc83a577765c6d531d30c3833d66 | /store/cached_templates/templates/cart.html.py | eeea0b583c5ae05a263ad9c6f8f42f3ea694fb8b | [] | no_license | https://github.com/tyanderson/Ty_INTEX | 7ee1c2e0346984347fed8b8d02eb6cdbdd3424db | 3c9d4771b9a8a0b2edbbd6e35eb4679bd409e2f8 | refs/heads/master | 2021-01-20T03:39:09.347291 | 2015-03-17T14:56:24 | 2015-03-17T14:56:24 | 33,574,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1425772416.746322
_enable_loop = True
_template_filename = 'C:\\Users\\Charizard\\Google Drive\\BYU\\IS 413\\Sprint2\\store\\templates/cart.html'
_template_uri = 'cart.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, '/homepage/templates/base_ajax.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
str = context.get('str', UNDEFINED)
item = context.get('item', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
str = context.get('str', UNDEFINED)
item = context.get('item', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
__M_writer = context.writer()
__M_writer("\r\n\r\n <!-- Loop through each request.session['shopping_cart'] -->\r\n\r\n")
for i in item:
__M_writer(' <div class="cart_item">\r\n <div class="cart_item_data">\r\n <img src="/static')
__M_writer(str( i.photo ))
__M_writer('" class="cart_thumbnail"/>\r\n </div>\r\n <div class="cart_item_data">\r\n <p><b>Name: ')
__M_writer(str(i.name))
__M_writer('</b></p>\r\n <p>Description: ')
__M_writer(str(i.description))
__M_writer('</p>\r\n <p>Price: ')
__M_writer(str( i.price ))
__M_writer('</p>\r\n <p>Quantity: ')
__M_writer(str( request.session['shopping_cart'][str(i.id)] ))
__M_writer('</p>\r\n <p class="delete" data-item_id="')
__M_writer(str(i.id))
__M_writer('"><a href="#">Delete</a></p>\r\n </div>\r\n </div>\r\n <hr>\r\n')
__M_writer('\r\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"line_map": {"64": 14, "65": 15, "66": 15, "27": 0, "68": 16, "37": 1, "70": 17, "71": 22, "77": 71, "47": 3, "67": 16, "69": 17, "56": 3, "57": 7, "58": 8, "59": 10, "60": 10, "61": 13, "62": 13, "63": 14}, "uri": "cart.html", "source_encoding": "ascii", "filename": "C:\\Users\\Charizard\\Google Drive\\BYU\\IS 413\\Sprint2\\store\\templates/cart.html"}
__M_END_METADATA
"""
| UTF-8 | Python | false | false | 3,471 | py | 68 | cart.html.py | 38 | 0.547105 | 0.518294 | 0 | 81 | 41.851852 | 356 |
pimlu/vocoder | 2,396,591,753,683 | 8339f92d2d04c51b7ee1b394359169736c444a8d | b88d36084e0ff363eef638999ab081dcb8a5c7f9 | /old/polish.py | 18ecfceb1a23546c628884f1663f58ed472239b4 | [] | no_license | https://github.com/pimlu/vocoder | 68f2a8bee1b743d148768f86725d86ee71be4ee0 | 76abee6deaf7c78256fef3f7220951987439d6ad | refs/heads/master | 2023-03-14T05:38:39.819566 | 2021-03-06T17:15:04 | 2021-03-06T17:17:43 | 344,473,316 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
import sigproc as sig
import itertools
import vocoder as voc
import util
import signal
import sys
sigint = False
def signal_handler(sig, frame):
global sigint
if sigint:
sys.exit(1)
else:
sigint = True
mel, minv = sig.melbank()
def prep_mel(truth, gritty):
with tf.compat.v1.Session() as sess:
waveform = util.read_sound(truth)
ofreqs = sig.encode(waveform)
omags, ophase = sig.topolar(ofreqs)
melmags = tf.matmul(omags, tf.expand_dims(mel, 0))
gritty_wf = util.read_sound(gritty)
gritty_freqs = sig.encode(gritty_wf)
return sess.run([melmags, gritty_freqs])
def run_vocoder(melmags, refreqs):
with tf.compat.v1.Session() as sess:
res = voc.mel_loss(melmags, mel, refreqs)
optimizer = tf.compat.v1.train.AdamOptimizer()
train = optimizer.minimize(res.loss)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
signal.signal(signal.SIGINT, signal_handler)
for i in itertools.count():
if sigint: break
(loss, resonance), errs, _ = sess.run(
[(res.loss, res.loss_resonance),
(res.err_sum, res.err_stft, res.err_mag),
train])
print(('Iter: {}; Loss: {:#.4G}, Error: {:#.4G}; '
'(Pieces: {:#.4G}, {:#.4G}, {:#.4G})')
.format(i, loss, *errs, resonance))
restored = sig.decode(res.vfreq)
sess.run(util.write_sound(restored, 'samples/flown/polished.wav'))
print('loading file...')
melmags, refreqs = prep_mel('samples/flown/Gound_truth.wav', 'samples/flown/FloWaveNet.wav')
#refreqs += 0.002*np.random.random(refreqs.shape) - 0.001
tf.compat.v1.reset_default_graph()
print('running vocoder...')
run_vocoder(melmags, refreqs)
| UTF-8 | Python | false | false | 1,867 | py | 11 | polish.py | 9 | 0.611141 | 0.600428 | 0 | 64 | 28.171875 | 92 |
18351089214/ICA | 16,020,228,055,078 | 00540beca5487cc658b0309ba4c94e7e8e5dc942 | 92c16963fa3f3fcd3d06eeea7bfc4075cbc9edce | /qqmsg/models.py | c36bdb7384160430541f89cf3b8b105504e3286e | [] | no_license | https://github.com/18351089214/ICA | d0f15a30a0cce0ac32ed147b772f17055de38b2e | a799d8ca889e03fbcc764221ab7a8922e3a390ff | refs/heads/master | 2021-01-08T02:54:21.094290 | 2020-02-20T13:32:07 | 2020-02-20T13:32:07 | 241,871,498 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.utils.html import format_html
# Create your models here.
class QqMsg(models.Model):
id = models.CharField(primary_key=True, max_length=50, null=False)
source = models.CharField(max_length=50, null=False)
msg_id = models.CharField(max_length=50)
type = models.CharField(max_length=25, null=False)
qq = models.CharField(max_length=25, null=False)
gc = models.CharField(max_length=25)
gn = models.CharField(max_length=255)
owner = models.CharField(max_length=25)
nick = models.CharField(max_length=255)
l_nick = models.TextField()
phone = models.CharField(max_length=25)
city = models.CharField(max_length=25)
occupation = models.CharField(max_length=25)
avatar = models.CharField(max_length=255)
email = models.CharField(max_length=50)
mobile = models.CharField(max_length=25)
image = models.CharField(max_length=255)
content = models.TextField()
def __str__(self):
return self.source
class Meta:
db_table = 'qqmsg'
verbose_name = 'QQ消息'
verbose_name_plural = 'QQ消息'
def colored_type(self):
color_code = 'red'
return format_html(
'<span style="color: {};">{}</span>',
color_code,
self.gn
)
colored_type.short_description = 'gn'
| UTF-8 | Python | false | false | 1,361 | py | 15 | models.py | 14 | 0.64745 | 0.620843 | 0 | 42 | 31.214286 | 70 |
a4shubert/statslib | 25,769,807,111 | 011a2fff8d52f06951b714384bb697551fa21ff7 | 3fa2e4e97db1dace11f58f02b56ac42da58d1e91 | /setup.py | ceafc745b28a9790ab21a03027d70a54c48bed18 | [
"Apache-2.0"
] | permissive | https://github.com/a4shubert/statslib | 4ab932f43185ea4fe115a7e36bcf843d29487809 | 5a35c0d10c3ca44c2d48f329c4f3790c91c385ac | refs/heads/main | 2023-06-27T00:24:34.662281 | 2021-05-13T09:33:16 | 2021-05-13T09:33:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="statslib",
version="0.0.1",
author="Alexander Shubert",
author_email="ashubertt@gmail.com",
description="Python library for rapid statistical and ML modelling",
long_description=long_description,
long_description_content_type="text/markdown",
url="localhost//statslib",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3"
],
install_requires=[
'matplotlib==3.3.4',
'pandas==1.1.5',
'seaborn==0.11.1',
'statsmodels==0.12.2',
'tensorflow==2.4.1',
'openpyxl==3.0.7',
'scikit-learn==0.24.2',
'beautifulsoup4==4.9.3',
'numpy==1.19.2'
],
include_package_data=True,
package_data={'': ['datasets/*.csv']},
)
| UTF-8 | Python | false | false | 828 | py | 55 | setup.py | 26 | 0.638889 | 0.595411 | 0 | 33 | 24.090909 | 72 |
Iongtao/learning-python3 | 17,729,624,999,776 | ef6e27ebc0bff1a5da491cad50ee524f417fea76 | dcdf6506db0cad447fb00297537e8b084698c746 | /learning/class/demo5.py | 6d7775683f6ba1b1e0bc77d6a6acc985cebf7aac | [] | no_license | https://github.com/Iongtao/learning-python3 | f9b41f255c156194c6778d11c0bf0511b8ab6eda | 76ab3651f62a460140e41ba3b154b084a1a8e30d | refs/heads/master | 2022-11-16T00:32:03.320067 | 2020-07-09T15:23:13 | 2020-07-09T15:23:13 | 258,811,065 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # !/usr/bin/env python3
# coding=utf-8
'''
将类的实例作为属性
当一个类中的属性和方法越来越多 变的复杂 会不利于维护
将类中的特性或公共部分提取成一个新的类 会显得直观明了
下面 定义了一个Dog类 一个Hunting类 一个HuntingDog类
这里将Hunting类的实例 赋值给了 HuntingDog中的hunting属性的值
'''
class Dog():
def __init__(self, name, age):
self.name = name
self.age = age
def sit(self):
print(self.name + "坐下了")
class Hunting():
def __init__(self, name):
self.speed = 100
self.name = name
def desciption(self):
print('猎狗' + self.name + '的速度可以达到' + str(self.speed) + '千米/小时')
class HuntingDog(Dog):
def __init__(self, name, age):
super().__init__(name, age)
self.hunting = Hunting(name)
if __name__ == '__main__':
dugao = HuntingDog('杜高犬', 2)
dugao.hunting.desciption()
print('直接 python 当前这个文件时候 触发')
else:
print('在别的文件中引用执行的时候 触发')
| UTF-8 | Python | false | false | 1,120 | py | 51 | demo5.py | 47 | 0.606383 | 0.599291 | 0 | 45 | 17.8 | 71 |
evotools/CattleGraphGenomePaper | 10,368,051,068,785 | c8efac077c9a26c6ac0a11cba32d9f18174570ff | 4e401d24d511c7472bf098ba6168d7d228e2a39d | /detectSequences/nf-GraphSeq/bin/11G-removeFlanks | edfe79028e66ed402735048ee9ad78a9c512e16b | [] | no_license | https://github.com/evotools/CattleGraphGenomePaper | 624e4ff8fc3a579003167e43c7dace1fa14327dd | 8eed9013e744f81863f1e427effd447e2b6b4036 | refs/heads/master | 2023-04-13T20:08:23.807138 | 2023-01-10T09:15:53 | 2023-01-10T09:15:53 | 303,937,151 | 10 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import sys
#flank=int(sys.argv[2])
conv={}
for l in open(sys.argv[2]):
l = l.strip().split()
conv[f"{l[0]}:{l[1]}-{l[2]}"] = l[3].split("#")[-1]
#originalReg = {i.strip().split()[-1]: i.split()[0:3] for i in open(sys.argv[2])}
for line in open(sys.argv[1]):
if "#" in line[0]: print(line.strip()); continue
line = line.strip().split()
old_id = conv.get(line[0])
ctg_id, bpi_bpe = ':'.join( old_id.split(":")[0:-1] ), old_id.split(":")[-1]
#ctg_id, bpi_bpe = ':'.join( line[0].split(":")[0:-1] ), line[0].split(":")[-1]
bpi, bpe = bpi_bpe.split("-")
line[0] = f"{ctg_id}:{bpi}-{bpe}"
print('\t'.join(line))
| UTF-8 | Python | false | false | 711 | 178 | 11G-removeFlanks | 157 | 0.496484 | 0.464135 | 0 | 20 | 34.55 | 87 |
|
grasuth/social_media_brand_disambiguator | 10,599,979,334,422 | 272a439f0a2ce5b26c5f2f2705952fda5bb7afe3 | e6816a586a2e9954da0d46124b168e518c110946 | /learn1.py | 0bae71029018d52e17aebff73e69a031c26d7ce7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/grasuth/social_media_brand_disambiguator | 6fa5d852411dea1cd6103eef4edacce758b254b5 | 961002efba1be55d9815178973d02de0b6b2979c | refs/heads/master | 2021-01-17T21:42:32.989214 | 2013-06-10T16:46:03 | 2013-06-10T16:46:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""First simple sklearn classifier"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import argparse
import os
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import linear_model
from sklearn import cross_validation
from nltk.corpus import stopwords
import unicodecsv
import sql_convenience
############
# NOTE
# this is HACKY CODE just to get something in place (end-to-end from making
# gold standard to getting a simple ML classifier working)
# do not trust this code to do anything useful in the real world!
############
def reader(class_name):
class_reader = unicodecsv.reader(open(class_name), encoding='utf-8')
row0 = next(class_reader)
assert row0 == ["tweet_id", "tweet_text"]
#class_writer.writerow(("tweet_id", "tweet_text"))
lines = []
for tweet_id, tweet_text in class_reader:
txt = tweet_text.strip()
if len(txt) > 0:
lines.append(txt)
return lines
#def tokenize(items):
#"""Create list of >1 char length tokens, split by punctuation"""
#tokenised = []
#for tweet in items:
#tokens = nltk.tokenize.WordPunctTokenizer().tokenize(tweet)
#tokens = [token for token in tokens if len(token) > 1]
#tokenised.append(tokens)
#return tokenised
#def clean_tweet(tweet, tweet_parser):
#tweet_parser.describe_tweet(tweet)
#components = tweet_parser.get_components()
#filtered_tweet = " ".join(tweet_parser.get_tokens(filtered_components))
#return filtered_tweet
def label_learned_set(vectorizer, clfl, threshold):
table = "learn1_validation_apple"
for row in sql_convenience.extract_classifications_and_tweets(table):
cls, tweet_id, tweet_text = row
spd = vectorizer.transform([tweet_text]).todense()
predicted_cls = clfl.predict(spd)
predicted_class = predicted_cls[0] # turn 1D array of 1 item into 1 item
predicted_proba = clfl.predict_proba(spd)[0][predicted_class]
if predicted_proba < threshold and predicted_class == 1:
predicted_class = 0 # force to out-of-class if we don't trust our answer
#import pdb; pdb.set_trace()
sql_convenience.update_class(tweet_id, table, predicted_class)
def check_classification(vectorizer, clfl):
spd0 = vectorizer.transform([u'really enjoying how the apple\'s iphone makes my ipad look small']).todense()
print("1?", clfl.predict(spd0), clfl.predict_proba(spd0)) # -> 1 which is set 1 (is brand)
spd1 = vectorizer.transform([u'i like my apple, eating it makes me happy']).todense()
print("0?", clfl.predict(spd1), clfl.predict_proba(spd1)) # -> 0 which is set 0 (not brand)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple sklearn implementation')
parser.add_argument('table', help='Name of in and out of class data to read (e.g. annotations_apple)')
args = parser.parse_args()
data_dir = "data"
in_class_name = os.path.join(data_dir, args.table + '_in_class.csv')
out_class_name = os.path.join(data_dir, args.table + '_out_class.csv')
in_class_lines = reader(in_class_name)
out_class_lines = reader(out_class_name)
# put all items into the training set
train_set = in_class_lines + out_class_lines
target = np.array([1] * len(in_class_lines) + [0] * len(out_class_lines))
stopWords = stopwords.words('english')
vectorizer = CountVectorizer(stop_words=stopWords, ngram_range=(1, 1), min_df=1)
trainVectorizerArray = vectorizer.fit_transform(train_set).toarray()
# get list of feature_names, these occur > 1 time in the dataset
print("Feature names:", vectorizer.get_feature_names()[:20], "...")
print("Found %d features" % (len(vectorizer.get_feature_names())))
clf_logreg = linear_model.LogisticRegression() # C=1e5)
clf = clf_logreg
#kf = cross_validation.LeaveOneOut(n=len(target)) # KFold(n=len(target), k=10, shuffle=True)
kf = cross_validation.KFold(n=len(target), n_folds=5, shuffle=True)
print("Shortcut cross_val_score to do the same thing, using all CPUs:")
cross_val_scores = cross_validation.cross_val_score(clf, trainVectorizerArray, target, cv=kf, n_jobs=-1)
print(np.average(cross_val_scores))
# make sparse training set using all of the test/train data (combined into
# one set)
train_set_sparse = vectorizer.transform(train_set)
# instantiate a local classifier
clfl = clf.fit(train_set_sparse.todense(), target)
# check and print out two classifications as sanity checks
check_classification(vectorizer, clfl)
# use a threshold (arbitrarily chosen at present), test against the
# validation set and write classifications to DB for reporting
chosen_threshold = 0.92
label_learned_set(vectorizer, clfl, chosen_threshold)
| UTF-8 | Python | false | false | 5,153 | py | 2 | learn1.py | 1 | 0.68659 | 0.677081 | 0 | 122 | 41.237705 | 112 |
bjornua/beercalc | 2,121,713,888,728 | 40adfe61cfef0cd33484d7de1b6b6dfcd19cc2a4 | 2a4d1cf3ca64875313796d9e3bba91d04747bb0e | /beercalc/view/stockbutton.py | 2e157ef8677e10fa7db8d919b822913d362f27b9 | [] | no_license | https://github.com/bjornua/beercalc | 3e50434a48509d4de3ff1b3fd83609004ed309ca | ebd4441df2e2097bda2f7a3dda14c2a9d080cc78 | refs/heads/master | 2021-01-25T08:28:07.671834 | 2010-11-17T23:51:49 | 2010-11-17T23:51:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import gtk
from beercalc.lib import Observable
class StockButton(gtk.Button, Observable):
def __init__(self, label, stock_image):
gtk.Button.__init__(self, label)
Observable.__init__(self)
self.image = gtk.Image()
self.image.set_from_stock(stock_image, gtk.ICON_SIZE_MENU)
self.set_image(self.image)
self.connect("clicked", self._OnClick)
def _OnClick(self, *args):
self.observers.notify("clicked")
| UTF-8 | Python | false | false | 503 | py | 23 | stockbutton.py | 23 | 0.612326 | 0.610338 | 0 | 15 | 31.866667 | 66 |
peperoschach/logica-de-programacao | 15,985,868,304,658 | 6d7ca78efe1dd1caa776f930b53bda23c93baad3 | 9c33b4da2ebe21ce7d0e48fa6581ec0e9f7582fe | /funcoes/tratando-erros.py | a68d9baaa5383d5c7b3b9676787ca56d7331e790 | [] | no_license | https://github.com/peperoschach/logica-de-programacao | 087bcad8af59e92e4f7ea8bd6ace6d6093853014 | 28f3f8087a0f5db07025d7614f8dbbe20dec250c | refs/heads/main | 2023-03-30T21:09:12.178803 | 2021-03-26T01:26:16 | 2021-03-26T01:26:16 | 344,959,751 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | while True:
try:
x = int(input('Por favor digite um número: '))
break
except ValueError:
print('Oops! Número inválido. Tente novamente... ')
def div():
try:
num1 = int(input('Digite um número: _ '))
num2 = int(input('Digite um número: _ '))
res = num1 / num2
except ZeroDivisionError:
print('Opps! Erro de divisão por zero ....')
except:
print('Algo de errado aconteceu...')
else:
return res
finally:
print('Executará sempre!')
#Programa Principal
print(div())
| UTF-8 | Python | false | false | 586 | py | 41 | tratando-erros.py | 41 | 0.556131 | 0.549223 | 0 | 23 | 23.826087 | 59 |
uweking/BigData | 6,734,508,729,444 | a1a4ada80028d6d869685ff60bc5870ca185358a | 57f7894df4a291d805d81de72cfe63dd5bd10a4f | /Simulator/normaldistribution.py | 776ca53a8ae2e254fbcce98f559c04d34d0ca2c5 | [] | no_license | https://github.com/uweking/BigData | 9a54c169552e1058e6ab4898a959b7d35058b9ed | ef5969998009341da98073359c813df88e4571fc | refs/heads/main | 2023-08-15T00:02:37.957074 | 2021-09-12T12:55:38 | 2021-09-12T12:55:38 | 374,999,631 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scipy.stats import norm
import matplotlib.pyplot as plt
import numpy as np
def plotND():
ndPossibleAxisValues = norm.rvs(5, 2, 10)
chancesForEachPossibleAxisValue = []
chance = 0
valueForCycleByChance = 0
counter = 0
sum = 0
i = -10
y = 0
val = []
cha = []
cha2 = []
while i < 20:
while y < 10:
x = i + y / 10
ch = norm.pdf(x, 5, 2)
#print(x)
#print(ch)
cha.append(ch)
val.append(x)
sum += ch
y += 1
y = 0
i += 1
counter = 0
while counter < len(cha):
cha2.append(cha[counter] / sum)
counter += 1
print(sum)
print(val)
print(cha)
valueForCycleByChance = np.random.choice(val, 1, p=cha2)
print(valueForCycleByChance)
#while counter < ndPossibleAxisValues.size:
#chance = norm.pdf(ndPossibleAxisValues[counter], loc=100, scale=1)
#chancesForEachPossibleAxisValue.append(chance)
# print("wert: " + str(ndPossibleAxisValues[counter]) + ", chance: " + str(chancesForEachPossibleAxisValue[counter]))
#sum += chance
#counter += 1
# valueForCycleByChance = np.random.choice(ndPossibleAxisValues, 1, p=chancesForEachPossibleAxisValue)
# print("valueForCycleByChance: " + valueForCycleByChance)
#print(sum)
#print(chancesForEachPossibleAxisValue)
# plt.hist(ndPossibleAxisValues, bins = 100)
# plt.show()
# this function is for calculating all the chances for a range of .2f numbers in a normal distribution
# loc has to have 2 decimal places
# scale has a default value of .5 and is the mean deviation in percent
# max_offset_in_percent is std set to 10 percent and defines the range of values which get created for the chance
# evaluation. this value might need to get increased when the scale is rising
def get_random_number_and_chance(loc, scale=.5, max_offset_in_percent=10):
values = []
chance_for_value = []
chance_for_value_normalized = []
sum_of_chances = 0 # all the chances combined do not add up to 1, that's why we need to store it so that we can
# divide the chances by this sum to get the real chance. this value will only differ by a very small value
step_size = .01 # step_size matters for the chance calculation as we have to loop through all the possible value
# from min to max with the the given step_size. as loc has to be with 2 decimal places, step_size is also
mean = loc * scale / 100 # /100 cause scale is in percent
max_offset = max_offset_in_percent / 100
# generating values and chance list
counter = 0
min_val = round(loc - (loc * max_offset), 2)
max_val = round((loc + (loc * max_offset)), 2)
current_value = min_val
while min_val <= current_value <= max_val:
chance = norm.pdf(current_value, loc, mean)
values.append(current_value)
chance_for_value.append(chance)
sum_of_chances += chance
current_value += step_size
counter += 1
# normalize chances
counter = 0
while counter < len(chance_for_value):
chance_for_value_normalized.append(chance_for_value[counter] / sum_of_chances)
counter += 1
return np.random.choice(values, 1, p=chance_for_value_normalized)
def test():
a = .005
b = .01
c = 0
while a < 1:
a += a * b
print(str(a))
c += 1
print("c: " + str(c))
#print(get_random_number_and_chance(5.24, .5, 10))
test()
| UTF-8 | Python | false | false | 3,537 | py | 59 | normaldistribution.py | 4 | 0.627085 | 0.604467 | 0 | 118 | 28.974576 | 125 |
OrangeJessie/Fighting_Leetcode | 8,933,532,002,657 | ee67237df7b04af4c7607b6935053443b2e68942 | 99529bba5be988bae986c9584373df86b14dfb03 | /初级算法/rotateMatrix.py | d6236746c67613f690fb9b4291e64e511c25d1fc | [] | no_license | https://github.com/OrangeJessie/Fighting_Leetcode | 8efa3661f933f27316cbf49c6ccef7d4f064dcf3 | 2866df7587ee867a958a2b4fc02345bc3ef56999 | refs/heads/master | 2020-04-10T16:11:32.867495 | 2019-04-29T01:35:18 | 2019-04-29T01:35:18 | 161,136,599 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
l = len(matrix)
c_l = int(l/2)
for i in range(c_l):
for j in range(l):
col = j
row = l-1-i
a = matrix[row][col]
matrix[row][col] = matrix[i][j]
matrix[i][j] = a
for i in range(l):
c_l2 = i + 1
for j in range(c_l2):
col = i
row = j
a = matrix[row][col]
matrix[row][col] = matrix[i][j]
matrix[i][j] = a
s = Solution()
mat = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
s.rotate(mat)
print(mat)
| UTF-8 | Python | false | false | 835 | py | 266 | rotateMatrix.py | 125 | 0.384431 | 0.367665 | 0 | 32 | 24.09375 | 76 |
Zer0FluxGiven/Numerical-Methods | 8,899,172,276,763 | 936b63f4e5144cb67ebf0ec9f897dd0fad5a2048 | cec2b685fb9e5cb0dd8ca8190e31dd106875fc8f | /Rutherford-Scattering.py | 029a059bf85852d8243c569f2969c9789b2f3389 | [] | no_license | https://github.com/Zer0FluxGiven/Numerical-Methods | d8fefed58c35c6019bc0768f60c87be97dc66c9f | 197c6ab21fa3abb34b28bb7ebef6ac7e5ad97f5e | refs/heads/master | 2022-11-21T06:16:50.915150 | 2020-07-27T22:17:33 | 2020-07-27T22:17:33 | 275,882,196 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-----------------------------------
# Introduction
#-----------------------------------
#This program models Rutherford Scattering, i.e. particle scattering due to a central Kepler Force
#The procedure used is two-fold: first we model the motion of an incoming particle by solving Hamilton's Equations with an Euler-Method Algorithm
#Second, we determine the (approximate) polar scatering angle (between 0 and pi), and bin the result.
#The result is the counts for each bin, which may be compared to the 1/sin**4(theta/2) relation.
#Written by Andrew Murphy, 2020
import numpy as np
import random
import matplotlib.pyplot as plt
#-----------------------------
# Functions used in Main Code
#-----------------------------
#Create a list of bins. Each bin is defined by the minimum value (bins[i][0]) and maximum value (bins[i][1])
def make_bins(bin_list):
N = len(bin_list) - 1
bins = []
for i in range(N):
bins.append([bin_list[i], bin_list[i + 1]])
return bins
#Places a given value "val" into a bin
def bin_val(val, bins):
for i in range(len(bins)):
if bins[i][0] <= val < bins[i][1]:
bin_count[i] += 1
# The following functions represent the six partial derivatives of the Hamiltonian for repulsive Kepler Motion: H = p**2/2m + k/r.
def dH_dx(x,y,z):
return -x*k/((x**2 + y**2 + z**2)**(1.5))
def dH_dy(x,y,z):
return -y*k/((x**2 + y**2 + z**2)**(1.5))
def dH_dz(x,y,z):
return -z*k/((x**2 + y**2 + z**2)**(1.5))
def dH_dpx(px, py, pz):
return px/m
def dH_dpy(px, py, pz):
return py/m
def dH_dpz(px, py, pz):
return pz/m
#Here we define our scattering process: first we select a random start point within a beam-width of s_max,
#then we approximate the motion of our particle by solving Hamilton's equations
def scattering_process():
#Our first step is to randomly generate starting x and y coordinates within a circular cross section of radius s_max
phi = random.uniform(-np.pi , np.pi) #Random azimuthal angle
r = random.uniform(0, s_max) #Random radius
x = r*np.cos(phi) #x-coordinate of random point
y = r*np.sin(phi) #y-coordinate of random point
z = z_0
px = 0
py = 0
pz = pz_0
t = 0
#Here we execute our Euler-Method Algorithm to approximately solve Hamilton's Equations in 3-Dimensions
while t < t_max:
diff_x = dH_dpx(px,py,pz)*dt
diff_y = dH_dpy(px, py, pz)*dt
diff_z = dH_dpz(px, py, pz)*dt
diff_px = -dH_dx(x,y,z)*dt
diff_py = -dH_dy(x,y,z)*dt
diff_pz = -dH_dz(x,y,z)*dt
x += diff_x
y += diff_y
z += diff_z
px += diff_px
py += diff_py
pz += diff_pz
t += dt
return np.arccos(z/np.sqrt(x**2 + y**2 + z**2)) #Returns the polar angle of our particle at t_max, this is (approximately) our scattering angle
#--------------------------
# Seting up the Simulation
#--------------------------
particle_number = 1000
m = 1.0 # Particle Mass
k = 100.0 #Force-constant for Kepler-Force
z_0 = -100.0 #Initiapythonl Z-coordinate
pz_0 = 100.0 #Initial z-momentum
t_Steps = 1000 #Number of time steps
s_max = 1 #radius of particle beam
t_max = abs(4*z_0*m/pz_0) #This is our maximum time, which is sufficiently large, see explaination for why this is so
dt = t_max/t_Steps #This is our time-differential
#----------------------------
# Setting up the Bins
#----------------------------
number_of_bins = 100
bin_list = np.linspace(0, np.pi , (number_of_bins + 1))
bins = make_bins(bin_list)
bin_count = np.zeros(number_of_bins)
#-----------------------------
# Running the Simulation
#-----------------------------
for i in range(particle_number):
bin_val(scattering_process(), bins)
#-----------------------------
# Plotting the Results
#-----------------------------
bin_coordinate = np.zeros(number_of_bins)
#Here we generate a list of the midpoint of each bin, the count of each bin is plotted at this point
for i in range(number_of_bins):
bin_coordinate[i] = (bins[i][0]+ bins[i][1])/2
fig, ax = plt.subplots ()
ax.scatter(bin_coordinate, bin_count)
ax.set(xlabel='Scattering Angle (Rad)' , ylabel="Particle Count" , title='Scattering Results')
plt.show()
| UTF-8 | Python | false | false | 4,419 | py | 5 | Rutherford-Scattering.py | 4 | 0.572301 | 0.55465 | 0 | 136 | 30.492647 | 147 |
orca9s/crawler | 7,258,494,741,889 | eea76449f8d22a33fc8fc9d5c8b235e93d8eef41 | 7728897c5e7507ed1b0d3292ed3a492e0d7ea5ec | /crwaler-practice.py | 4fdff02940bc4ef58a262d0adb3b295938aece06 | [] | no_license | https://github.com/orca9s/crawler | 5cb985fec9c389e389697af7aaec98a8c84ab2d4 | f38397296edaced783c43349fcfe4cdf21a71b45 | refs/heads/master | 2020-03-18T18:44:56.613469 | 2018-06-13T14:43:56 | 2018-06-13T14:43:56 | 135,112,259 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from urllib import parse
import requests
from bs4 import BeautifulSoup
class Webtoon:
def __init__(self, webtoon_id):
self.webtoon_id = webtoon_id
info = self.webtoon_crwaler()
# print(info)
self.title = info['title']
self.author = info['author']
self.description = info['description']
self.episode_list = list()
def update(self):
"""
업데이트 함수를 실행하면 해당 webtoon_id에 따른 에피소드 정보들을 episode인스턴스로 저장
"""
result = self.episode_crawler()
self.episode_list = result
def if_for(self):
file_path = 'data/episode_list.html-{webtoon_id}.html'.format(webtoon_id=self.webtoon_id)
# HTTP요청을 보낼 주소
url_episode_list = 'http://comic.naver.com/webtoon/list.nhn'
# HTTP요청시 전달할 GET parameters
paramas = {
'titleId': self.webtoon_id,
}
# -> 'http://com...nhn?titleId=703845
# HTML파일이 로컬에 저장되어 있는지 검사
if os.path.exists(file_path):
# 저장되어 있다면, 해당 파일을 읽어서 html변수에 할당
html = open(file_path, 'rt').read()
else:
# 저장되어 있지 않다면, request를 사용해 HTTP get요청
response = requests.get(url_episode_list, paramas)
print(response.url)
# 요청 응답객체의 text속성값을 html변수에 할당
html = response.text
# 받은 텍스트 데이터를 HTML파일로 저장
open(file_path, 'wt').write(html)
return html
def webtoon_crwaler(self):
# BeautifulSoup클래스형 객체 생성 및 soup변수에 할당
html = self.if_for()
soup = BeautifulSoup(html, 'lxml')
# div.detail > h2 (제목, 작가)의
# 0번째 자식: 제목 텍스트
# 1번째 자식: 작가정보 span Tag
# Tag로부터 문자열을 가져올때는 get_text()
h2_title = soup.select_one('div.detail > h2')
title = h2_title.contents[0].strip()
author = h2_title.contents[1].get_text(strip=True)
# div.detail > p (설명)
description = soup.select_one('div.detail > p').get_text(strip=True)
# 웹툰 크롤링을 통해 얻게된 정보를 딕셔너리 형태로 return
# print(title)
# print(author)
# print(description)
info = dict()
info['title'] = title
info['author'] = author
info['description'] = description
return info
# 3. 에피소드 정보 목록을 가져오기
# url_thumbnail: 썸네일 URL
# title: 제목
# rating: 별점
# created_date: 등록일
# no: 에피소드 상세페이지의 고유 번호
# 각 에피소드들은 하나의 dict데이터
# 모든 에피소드들을 list에 넣는다
def episode_crawler(self):
# Beautifulsoup클래스형 객체 생성 및 soup변수에 할당
html = self.if_for()
soup = BeautifulSoup(html, 'lxml')
# 에피소드 목록을 담고 있는 table
table = soup.select_one('table.viewList')
# table 내의 모든 tr요소 목록
tr_list = table.select('tr')
# list를 리턴하기 위한 선언
# for문을 다 설정하면 episode_list 에는 episode 인스턴스가 들어가 있음
episode_list = list()
# 첫 번째 tr은 thead의 tr이므로 제외, tr_list의 [1:]부터 순회
for index, tr in enumerate(tr_list[1:]):
# 에피소드에 해당하는 tr은 클래스가 없으므로,
# 현재 순회중인 tr요소가 클래스 속성값을 가진다면 continue
if tr.get('class'):
continue
# 현재 tr의 첫 번째 td요소의 하위 img태그의 'src'속성값
url_thumbnail = tr.select_one('td:nth-of-type(1) img').get('src')
# 현재 tr의 첫 번째 td요소의 자식 a태그의 'href'속성
from urllib import parse
url_detail = tr.select_one('td:nth-of-type(1) > a').get('href')
query_string = parse.urlsplit(url_detail).query
query_dict = parse.parse_qs(query_string)
# print(query_dict)
no = query_dict['no'][0]
# 현재 tr의 두 번째 td요소의 자식 a요소의 내용
title = tr.select_one('td:nth-of-type(2) > a').get_text(strip=True)
# 현재 tr의 세 번째 td요소의 하위 strong태그의 내용
rating = tr.select_one('td:nth-of-type(3) strong').get_text(strip=True)
# 현재 tr의 네 번째 td요소의 내용
created_date = tr.select_one('td:nth-of-type(4)').get_text(strip=True)
# print(title)
# print(no)
# print(rating)
# print(created_date)
# print(url_thumbnail)
new_episode = Episode(
webtoon_id=self.webtoon_id,
no=no,
url_thumbnail=url_thumbnail,
title=title,
rating=rating,
created_date=created_date,
)
episode_list.append(new_episode)
# print(episode_list)
return episode_list
class Episode:
def __init__(self, webtoon_id, no, url_thumbnail, title, rating, created_date):
self.webtoon_id = webtoon_id
self.no = no
self.url_thumbnail = url_thumbnail
self.title = title
self.rating = rating
self.created_date = created_date
@property
def url(self):
url = "http://comic.naver.com/webtoon/detail.nhn?"
params = {
'titleId': self.webtoon_id, 'no': self.no,
}
episode_url = url + parse.urlencode(params)
return episode_url
# if __name__ == '__main__':
# webtoon1 = Webtoon(703845)
# print(webtoon1.title)
# webtoon1.update()
#
# for episode in webtoon1.episode_list:
# print(episode.url)
| UTF-8 | Python | false | false | 6,218 | py | 9 | crwaler-practice.py | 7 | 0.539444 | 0.53287 | 0 | 170 | 30.317647 | 97 |
ravitejalvr/Insight | 16,630,113,407,583 | c118fbe6a3576dee0edb470d8ff3fd98ec0c08b3 | 9ed1e5ea14d60a9daf6f7e5572f14cf37f1cba61 | /src/Source.py | aaa3fdcd4ed9e7072f7be38f3cb4cccfcaa2023b | [] | no_license | https://github.com/ravitejalvr/Insight | c7452d9d765bc076f7b9b6ec75ca2b5870e99731 | 284ab5afd647b55bcf01cce3b2038f4534e9a3de | refs/heads/master | 2020-03-25T02:42:46.205948 | 2018-08-03T15:04:52 | 2018-08-03T15:04:52 | 143,304,004 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import glob
import os
import csv
import sys
"""
This Function get_indexes taken in the first row of the data (index) as input and gives out
the indexes pertaining to name,cost,last name and first name.
"""
def get_indices(index):
index = index.strip().split(',')
nm_ind = 0;
cost_ind = 0;
for ind,name in enumerate(index):
if name == "drug_name": # Check for Name of the Drug index
nm_ind = ind
if name == "drug_cost": # Check for Cost of the Drug index
cost_ind = ind
if name == "prescriber_last_name": # Check for Last Name index
last_ind = ind
if name == "prescriber_first_name": # Check for First Name index
first_ind = ind
return nm_ind,cost_ind,last_ind,first_ind
"""
This Function get_Dict creates a dictionary drug_name with Prescriptors and the Drugs they prescribed.
The Second Dict drug_Cost is a sorted dictionoary(Just like a nested list), which contains
details about the Drugs and total cost in descending order.
Inputs for this function are the indexes of name, cost, last name and first name of the given data.
"""
def get_Dict(data,nm_ind,cost_ind,last_ind,first_ind):
drug_name = {};
drug_cost = {};
for i in range(1,len(data)):
data_loc = data[i].strip().split(',')
name = data_loc[nm_ind] # Name of the Drug is selected
pres_name = data_loc[first_ind]+data_loc[last_ind] # Creating prescribers full name
drug_name[pres_name] = name # Creating a Dict with Prescribers Name and Drug Name
if(name in drug_cost.keys()):
drug_cost[name] += int(data_loc[cost_ind]) # Creating total cost cumulatively
else:
drug_cost[name] = int(data_loc[cost_ind]) # Creating first instance of the prescribers name if not existing
drug_Cost = sorted(drug_cost.items(),key=lambda x: x[1],reverse = True) # Create a sorted Dict w.r.t cost
return drug_name,drug_Cost
"""
This Function create_csv taken in the dictionary with prescribers name and drug name as well as
the sorted dictionary of the drug name and cost to create a csv file in the selected folder.
"""
def create_csv(drug_name,drug_dict):
index_out = ['drug_name','num_prescriber','total_cost'] # First line of the output csv file
out = csv.writer(open(output_folder,"w"), delimiter=',',quoting=csv.QUOTE_NONE) # Setting up the Output Directory with no quotes
out.writerow(index_out)
for drug in drug_dict:
row_out = [drug[0],list(drug_name.values()).count(drug[0]),drug[1]] # Create a row for each drug in the datafile
out.writerow(row_out)
input_folder = str(sys.argv[1]); # Take the First input as Input_Folder
output_folder = str(sys.argv[2]);# Take the Secong input as Output_Folder
# Checking if path exists for the input folder
file = glob.glob(input_folder)
if(file):
with open(file[0],'r') as fin:
data = fin.readlines()
else:
sys.exit('Given Input Folder Does not Exist')
index = data[0]
name_index,cost_index,firstname_index,lastname_index = get_indices(index)
drug_name,drug_cost = get_Dict(data,name_index,cost_index,lastname_index,firstname_index)
create_csv(drug_name,drug_cost)
print('Successfully Executed')
| UTF-8 | Python | false | false | 3,345 | py | 5 | Source.py | 2 | 0.661584 | 0.657997 | 0 | 75 | 42.6 | 132 |
joshuafan61/IAQSPR | 14,654,428,447,425 | d59f3e7c2e215c41fde7ae92a8f984f36df4d8ba | d3612db18bc250b0cf41a8f0a156d342715d9b9f | /ProtoDemo/pwmmotortest.py | a7d663e5239f4db8d9efee50f917728a8b999008 | [] | no_license | https://github.com/joshuafan61/IAQSPR | b28952d21a96f0b643ebc774b751633b6882a7fa | 5d5560654475e09a261914276700161f8d24eb25 | refs/heads/master | 2022-12-21T17:31:00.801030 | 2020-06-21T14:07:02 | 2020-06-21T14:07:02 | 273,867,873 | 0 | 0 | null | false | 2022-12-11T10:54:32 | 2020-06-21T08:52:38 | 2020-06-21T14:11:51 | 2022-12-11T10:54:31 | 37,039 | 0 | 0 | 2 | Python | false | false | #!/usr/bin/env python3
"""
File: skidsteer_four_pwm_test.py
This code will test Raspberry Pi GPIO PWM on four GPIO
pins. The code test ran with L298N H-Bridge driver module connected.
Website: www.bluetin.io
Date: 27/11/2017
"""
__author__ = "Mark Heywood"
__version__ = "0.1.0"
__license__ = "MIT"
from gpiozero import PWMOutputDevice
from time import sleep
#///////////////// Define Motor Driver GPIO Pins /////////////////
# Motor A, Left Side GPIO CONSTANTS
PWM_FORWARD_LEFT_PIN = 12 # IN1 - Forward Drive
PWM_REVERSE_LEFT_PIN = 16 # IN2 - Reverse Drive
# Motor B, Right Side GPIO CONSTANTS
PWM_FORWARD_RIGHT_PIN = 20 # IN1 - Forward Drive
PWM_REVERSE_RIGHT_PIN = 21 # IN2 - Reverse Drive
# Initialise objects for H-Bridge PWM pins
# Set initial duty cycle to 0 and frequency to 1000
forwardLeft = PWMOutputDevice(PWM_FORWARD_LEFT_PIN, True, 0, 1000)
reverseLeft = PWMOutputDevice(PWM_REVERSE_LEFT_PIN, True, 0, 1000)
forwardRight = PWMOutputDevice(PWM_FORWARD_RIGHT_PIN, True, 0, 1000)
reverseRight = PWMOutputDevice(PWM_REVERSE_RIGHT_PIN, True, 0, 1000)
def allStop():
print("allStop")
forwardLeft.value = 0
reverseLeft.value = 0
forwardRight.value = 0
reverseRight.value = 0
def forwardDrive():
print("forwardDrive")
forwardLeft.value = 1.0
reverseLeft.value = 0
forwardRight.value = 1.0
reverseRight.value = 0
def reverseDrive():
print("reverseDrive")
forwardLeft.value = 0
reverseLeft.value = 1.0
forwardRight.value = 0
reverseRight.value = 1.0
def spinLeft():
print("spinLeft")
forwardLeft.value = 0
reverseLeft.value = 1.0
forwardRight.value = 1.0
reverseRight.value = 0
def SpinRight():
print("SpinRight")
forwardLeft.value = 1.0
reverseLeft.value = 0
forwardRight.value = 0
reverseRight.value = 1.0
def forwardTurnLeft():
print("fowardTurnLeft")
forwardLeft.value = 0.8
reverseLeft.value = 0
forwardRight.value = 1
reverseRight.value = 0
def forwardTurnRight():
print("forwardTurnRight")
forwardLeft.value = 1
reverseLeft.value = 0
forwardRight.value = 0.5
reverseRight.value = 0
def reverseTurnLeft():
print("reverseTurnLeft")
forwardLeft.value = 0
reverseLeft.value = 0.5
forwardRight.value = 0
reverseRight.value = 1
def reverseTurnRight():
print("reverseTurnRight")
forwardLeft.value = 0
reverseLeft.value = 1
forwardRight.value = 0
reverseRight.value = 0.5
def main():
allStop()
'''
forwardDrive()
sleep(5)
reverseDrive()
sleep(5)
'''
spinLeft()
sleep(2.5)
'''
SpinRight()
sleep(2.5)
forwardTurnLeft()
sleep(5)
forwardTurnRight()
sleep(5)
reverseTurnLeft()
sleep(5)
reverseTurnRight()
sleep(5)
allStop()
'''
if __name__ == "__main__":
""" This is executed when run from the command line """
main() | UTF-8 | Python | false | false | 3,200 | py | 13 | pwmmotortest.py | 12 | 0.601563 | 0.567188 | 0 | 126 | 24.404762 | 68 |
melinaverger/ed_project | 11,819,750,050,477 | 941ec3c18c7f16a8d55d0f19598815165095dbec | dd6dadeded93691556f8a24602a69d82e3585d10 | /src/preprocessing/formatting/action.py | ffaf8f9d28d4b64ba8af6e6a1d2453efac2b16db | [] | no_license | https://github.com/melinaverger/ed_project | b60815cf54f1d7cebd95c0be792462f8572cea05 | e28d3e04238850a138d84558f860d78cea7998a0 | refs/heads/main | 2023-07-19T21:12:42.431787 | 2021-09-01T08:11:07 | 2021-09-01T08:11:07 | 398,860,706 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Convert the action data to a more easily manipulated format,
without changing them."""
import pandas as pd
import sys
sys.path.append('../..')
import utils
DATA_PATH = "../../../data/01_raw_data/"
OUTPUT_PATH = "../../../data/02_converted_data/"
def rename_columns(data):
data.rename(columns={"ID": "st_id",
"Nombre total d'actions": "st_nb_action"},
inplace=True)
return data
def preprocess_data():
action_name = utils.get_file_or_error(DATA_PATH, "action")
data = pd.read_csv(DATA_PATH + action_name)
data = rename_columns(data)
utils.save_or_replace(data, "action", OUTPUT_PATH)
if __name__ == "__main__":
preprocess_data()
print("Action data have been converted and saved in "
"{}.".format(OUTPUT_PATH))
| UTF-8 | Python | false | false | 855 | py | 53 | action.py | 26 | 0.607018 | 0.6 | 0 | 34 | 24.029412 | 67 |
xjh1230/py_algorithm | 7,541,962,591,235 | 898465ebca38846d372ab59542699fce0df2ab01 | 92829e5c16175cb0b7ae79fe4df92d443c5c5edd | /test/sort.py | 68221bae124cac3b49aaa7b18014235562f2c5cb | [] | no_license | https://github.com/xjh1230/py_algorithm | bc58010a70154ecfcf4ded3e9bf00ef67da80063 | d9018d153e405202c79a6421f7c90992b353ecd9 | refs/heads/master | 2021-06-12T10:46:20.538485 | 2021-03-24T05:36:28 | 2021-03-24T05:36:28 | 167,361,780 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2019/3/11 10:34
# @Author : Xingjh
# @Email : xjh_0125@sina.com
# @File : sort.py
# @Software: PyCharm
class Solution:
def __init__(self):
'''
:param arr:
'''
self.count = 0
pass
def merge_sort(self, arr):
import math
count = len(arr)
if count < 2:
return arr
else:
mid = math.floor(count / 2)
l_arr = arr[:mid]
r_arr = arr[mid:]
return self.merge(self.merge_sort(l_arr), self.merge_sort(r_arr))
def merge(self, l_arr, r_arr):
result = []
l, r = 0, 0
while l < len(l_arr) and r < len(r_arr):
self.count += 1
l_val, r_val = l_arr[l], r_arr[r]
if l_val < r_val:
result.append(l_arr[l])
l += 1
else:
result.append(r_arr[r])
r += 1
if l < len(l_arr):
result.extend(l_arr[l:])
if r < len(r_arr):
result.extend(r_arr[r:])
return result
def quick_sort(self, arr, left=None, right=None):
'''
每次排序,找到一个值做参考,小于放左边,大于放右边
同样的方法递归左右两边
:param arr:
:param left:
:param right:
:return:
'''
left = left if isinstance(left, (int, float)) else 0
right = right if isinstance(right, (int, float)) else len(arr) - 1
if left < right:
mid = self.partition(arr, left, right)
self.quick_sort(arr, left, mid)
self.quick_sort(arr, mid + 1, right)
return arr
def partition(self, arr, left, right):
index = right
right -= 1
while left < right:
self.count += 1
if arr[left] <= arr[index]:
left += 1
elif arr[right] > arr[index]:
right -= 1
else:
self.swap(arr, left, right)
if arr[right] > arr[index]:
self.swap(arr, right, index)
return right
def swap(self, arr, left, right):
arr[left], arr[right] = arr[right], arr[left]
def heap_sort(self, arr):
self.build_heap(arr)
self.sort_heap(arr)
return arr
def build_heap(self, arr):
count = len(arr) // 2
for i in range(count, -1, -1):
self.heapify(arr, len(arr), i)
def sort_heap(self, arr):
for i in range(len(arr) - 1):
self.swap(arr, 0, len(arr) - 1 - i)
self.heapify(arr, len(arr) - 1 - i, 0)
def heapify(self, arr, heap_size, pos):
'''
构建最大堆 跟节点最大,左子树小于右子树
按照索引排
0
1 * 2
3 4 * 5 6
:param arr:生成堆的源数组
:param heap_size: 堆大小 小于等于len(arr)
:param pos:堆顶点位置
:return:
'''
count, l, r = heap_size, pos * 2 + 1, pos * 2 + 2
while l < count or r < count:
largest = l
if r < count and arr[l] < arr[r]:
largest = r
if arr[pos] >= arr[largest]:
break
else:
self.swap(arr, pos, largest)
pos = largest # 此时largest为左或右子树,再遍历当前节点的左右子树
l = pos * 2 + 1
r = pos * 2 + 2
def swap(self, arr, i, j):
arr[i], arr[j] = arr[j], arr[i]
def heap_sort1(self, arr):
self.build_heap1(arr)
self.sort_heap1(arr)
return arr
def build_heap1(self, arr):
mid = len(arr) // 2
for i in range(mid, -1, -1):
self.heapify1(arr, len(arr) - 1, i)
print(arr)
def sort_heap1(self, arr):
size = len(arr) - 1
for i in range(size):
self.swap(arr, 0, size - i)
self.heapify1(arr, size - i, 0)
def heapify1(self, arr, heap_size, top):
size, l, r = heap_size, top * 2 + 1, top * 2 + 2
while l < size or r < size:
largest = l
if r < size and arr[l] < arr[r]:
largest = r
if arr[top] >= arr[largest]:
break
else:
self.swap(arr, top, largest)
top = largest
l = top * 2 + 1
r = top * 2 + 2
def quick_sort1(self, arr, l=None, r=None):
l = l if isinstance(l, int) else 0
r = l if isinstance(r, int) else len(arr) - 1
def partition(arr, l, r):
index = r
r -= 1
while l <= r:
if arr[l] < arr[index]:
l += 1
elif arr[r] >= arr[index]:
r -= 1
else:
self.swap(arr, l, r)
l += 1
if arr[r] > arr[index]:
self.swap(arr, r, index)
return r
mid = partition(arr, l, r)
self.quick_sort(arr, 0, mid)
self.quick_sort(arr, mid + 1, len(arr) - 1)
# print(arr)
return arr
if __name__ == '__main__':
s = Solution()
arr = [4, 5, 3, 9, 1, 8, 6, 2, 7, 2, 1]
# print(len(arr))
# print(arr)
# s.heap_sort1(arr)
# print(arr)
# arr2 = s.merge_sort(arr)
# print(arr2, len(arr), len(arr2), s.count) # 34
# s.count = 0
arr2 = s.quick_sort1(arr)
print(arr2, len(arr), len(arr2), s.count) # 62
| UTF-8 | Python | false | false | 5,593 | py | 144 | sort.py | 142 | 0.442796 | 0.422214 | 0 | 193 | 26.943005 | 77 |
febyputri28/ppat | 8,916,352,145,941 | d601a76d3a341bf29dc65ca5f4a51102a91aa790 | 1295557da19f8b3d66624f0b341cab3f7017681d | /strap/migrations/0090_auto_20210801_0927.py | 397331e384643c0c58483669561908665d86b92d | [] | no_license | https://github.com/febyputri28/ppat | cebdead0346183e4b44b33c342330dfc5cec0a97 | 41cd9122eb1f407634f2d21aafa560205975e7f3 | refs/heads/master | 2023-07-10T02:47:58.071331 | 2021-08-16T15:26:21 | 2021-08-16T15:26:21 | 396,847,610 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.7 on 2021-08-01 02:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('strap', '0089_auto_20210728_1218'),
]
operations = [
migrations.RenameField(
model_name='karyawan',
old_name='tanggal_karyawan',
new_name='tanggal_lahir_karyawan',
),
]
| UTF-8 | Python | false | false | 389 | py | 91 | 0090_auto_20210801_0927.py | 69 | 0.588689 | 0.508997 | 0 | 18 | 20.611111 | 47 |
penta2019/btc_bot_framework | 17,128,329,583,859 | 3beb692af92e1ab62eb11165e9c01a8d9a04246a | c3fb19c2e228fd1d32516fe64f38a9006772d260 | /samples/bitmex/trade.py | db352aae4dfe4bcdc0e8ae121853345a82d39ce7 | [
"MIT"
] | permissive | https://github.com/penta2019/btc_bot_framework | 0642725cfd5c7b9f1c4e011f30b292cc4fc7ba50 | 3793c4c1d170db31a8017096f16946610c0a062e | refs/heads/master | 2022-01-24T01:25:17.346272 | 2022-01-18T10:29:28 | 2022-01-18T10:29:28 | 224,544,310 | 131 | 43 | MIT | false | 2022-01-18T10:29:29 | 2019-11-28T01:11:14 | 2022-01-16T17:17:42 | 2022-01-18T10:29:28 | 350 | 105 | 22 | 0 | Python | false | false | import botfw
botfw.test_trade(botfw.Bitmex.Trade('BTC/USD'))
| UTF-8 | Python | false | false | 61 | py | 81 | trade.py | 79 | 0.770492 | 0.770492 | 0 | 2 | 29.5 | 47 |
suraj-testing2/Solar_YouTube | 19,018,115,187,505 | b5dcb623f3f4ad1cd4fb039545e7a8d18373bfcc | c4c948f21cbd519cadede642ef9478a0fc591abe | /networkx-d3-v2/auth/context_processors.py | a3e911657aa4b46de64a246b184dbfa199fa9fda | [
"Apache-2.0"
] | permissive | https://github.com/suraj-testing2/Solar_YouTube | 2a9b625e7c37fc44cad1dd792ede8410889c1bf0 | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | refs/heads/master | 2022-09-21T06:11:00.307058 | 2020-01-06T12:07:13 | 2020-01-06T12:07:13 | 267,720,870 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from google.appengine.api import users
def google_user(request):
"""
Inserts value "google_user" inside the template context with the
currently logged user.
"""
user = None
if request.session.get('credentials', None):
user = users.get_current_user()
return {"google_user": user}
| UTF-8 | Python | false | false | 318 | py | 58 | context_processors.py | 41 | 0.660377 | 0.660377 | 0 | 13 | 23.461538 | 68 |
JoelBNU/ShadowOui-Tutorial | 2,697,239,483,824 | d1cb20b3e7b8595d8642dd5b80f7a92590973fd2 | cb16a721c2c1323fbaa76e97c9e29c5b45cf6cd9 | /SCRIPTS/screen_pattern_grid.py | f844fe7fb6a354e24a9337df05ac69ee101cc970 | [
"MIT"
] | permissive | https://github.com/JoelBNU/ShadowOui-Tutorial | c11a1907dfded9233910aaf0c7993daf492e70dd | 4629d896c1f02f811d5a1b491f6fca1a7c67a70e | refs/heads/master | 2022-04-19T07:39:16.338099 | 2020-04-16T19:50:57 | 2020-04-16T19:50:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# creates grid.pol
#
import numpy
corners = numpy.array([-3.0,-1.5,3,1.5]) # x_leftbottom,y_leftbottom,x_rightup,y_roghtup
t = numpy.array([9.0,4.5]) # translation vector (i.e., horiz. and V preiods)
n = numpy.array([2,3]) # number of translation (H,V)
file_out = "grid.pol"
f = open(file_out,'w')
nn = (2*n[0]+1)*(2*n[1]+1)
f.write("%d\n"%nn)
#pay attention that the last element is not included...
n0 = numpy.arange(-n[0],n[0]+1)
n1 = numpy.arange(-n[1],n[1]+1)
for i in n0:
for j in n1:
f.write("%d\n"%4)
f.write( "%f %f \n"%(corners[0]+i*t[0], corners[1]+j*t[1]) )
f.write( "%f %f \n"%(corners[0]+i*t[0], corners[3]+j*t[1]) )
f.write( "%f %f \n"%(corners[2]+i*t[0], corners[3]+j*t[1]) )
f.write( "%f %f \n"%(corners[2]+i*t[0], corners[1]+j*t[1]) )
f.close()
print('file %s written to disk.'%file_out)
| UTF-8 | Python | false | false | 900 | py | 76 | screen_pattern_grid.py | 26 | 0.535556 | 0.484444 | 0 | 28 | 31.142857 | 90 |
joysn/leetcode | 16,320,875,755,411 | 001b70b268d227eaa069f71d55e943aeafe007d3 | 1ecaac1194afe02bc57b6f06e5f94c3f6b636c50 | /leetcode1219_path_max_gold.py | 28ebb6321b26589ca75cc79c907cf5f294f841df | [] | no_license | https://github.com/joysn/leetcode | 96b2fe3272096ca151ba7216592f5a06ae8aa908 | 7e12c5b2e202f44bd4ad02bc804495a23866404c | refs/heads/master | 2022-02-21T18:32:14.970972 | 2022-01-01T08:44:04 | 2022-01-01T08:44:04 | 242,932,865 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://leetcode.com/problems/path-with-maximum-gold/
# 1219. Path with Maximum Gold
# In a gold mine grid of size m * n, each cell in this mine has an integer representing the amount of gold in that cell, 0 if it is empty.
# Return the maximum amount of gold you can collect under the conditions:
# Every time you are located in a cell you will collect all the gold in that cell.
# From your position you can walk one step to the left, right, up or down.
# You can't visit the same cell more than once.
# Never visit a cell with 0 gold.
# You can start and stop collecting gold from any position in the grid that has some gold.
# Example 1:
# Input: grid = [[0,6,0],[5,8,7],[0,9,0]]
# Output: 24
# Explanation:
# [[0,6,0],
# [5,8,7],
# [0,9,0]]
# Path to get the maximum gold, 9 -> 8 -> 7.
# Example 2:
# Input: grid = [[1,0,7],[2,0,6],[3,4,5],[0,3,0],[9,0,20]]
# Output: 28
# Explanation:
# [[1,0,7],
# [2,0,6],
# [3,4,5],
# [0,3,0],
# [9,0,20]]
# Path to get the maximum gold, 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7.
# Constraints:
# 1 <= grid.length, grid[i].length <= 15
# 0 <= grid[i][j] <= 100
# There are at most 25 cells containing gold.
global maxGold
maxGold = 0
def getMaximumGold( grid) -> int:
global maxGold
def collect(r,c,total=0):
#print("Called with",r,c)
global maxGold
total += grid[r][c]
tmp = grid[r][c]
grid[r][c] = 0
if maxGold < total:
maxGold = total
for gapr,gapc in ((-1,0),(1,0),(0,1),(0,-1)):
newr,newc = r+gapr, c+gapc
if not (0<=newc<csize and 0<=newr<rsize and grid[newr][newc] != 0):
continue
collect(newr,newc,total)
grid[r][c] = tmp
#print(total)
rsize = len(grid)
csize = len(grid[0])
for r in range(rsize):
for c in range(csize):
if grid[r][c] != 0:
collect(r,c,0)
return maxGold
maxGold = 0
print(getMaximumGold([[0,6,0],[5,8,7],[0,9,0]])==24)
maxGold = 0
print(getMaximumGold([[0,6],[5,8]])==19)
#print("MaxGold is",maxGold) | UTF-8 | Python | false | false | 2,025 | py | 110 | leetcode1219_path_max_gold.py | 109 | 0.594074 | 0.536296 | 0 | 70 | 26.957143 | 138 |
szhmery/leetcode | 14,594,298,903,858 | ab966f16377975bc64404e6639cd1bed10c78421 | e831c22c8834030c22c54b63034e655e395d4efe | /LinkedList/19-RemoveNthFromEnd.py | b405bc73863d19164b3fdd88142c2392e9bc42ef | [] | no_license | https://github.com/szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# one pass algorithm
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not head or n == 0:
return
dummy = ListNode()
dummy.next = head
first = dummy
second = dummy
for i in range(n + 1):
# if not first.next:
# return
first = first.next
while first:
first = first.next
second = second.next
second.next = second.next.next
return dummy.next
# two pass algorithm
def removeNthFromEnd3(self, head: ListNode, n: int) -> ListNode:
dummy = ListNode(0)
dummy.next = head
first = head
length = 0
while first:
length += 1
first = first.next
length -= n
first = dummy
while length > 0:
first = first.next
length -= 1
first.next = first.next.next
return dummy.next
def removeNthFromEnd2(self, head: ListNode, n: int) -> ListNode:
fast = head
while n > 0:
fast = fast.next
n -= 1
pre = None
slow = head
while slow and fast:
pre = slow
slow = slow.next
fast = fast.next
if pre:
pre.next = slow.next
else:
head = head.next
return head
if __name__ == '__main__':
rawList = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
print("\nBefore1:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
solution = Solution()
rawList = solution.removeNthFromEnd2(rawList, 2)
print("\nAfter1:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
rawList = ListNode(1, ListNode(2))
print("\nBefore2:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
rawList = solution.removeNthFromEnd2(rawList, 2)
print("\nAfter2:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
rawList = ListNode(1)
rawList = solution.removeNthFromEnd2(rawList, 1)
print("\nAfter3:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
rawList = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
rawList = solution.removeNthFromEnd3(rawList, 2)
print("\nMethod 3 After:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
rawList = ListNode(1)
rawList = solution.removeNthFromEnd3(rawList, 1)
print("\nMethod 3 After:")
tmp_list = rawList
while tmp_list != None:
print(str(tmp_list.val), end='->')
tmp_list = tmp_list.next
| UTF-8 | Python | false | false | 3,183 | py | 275 | 19-RemoveNthFromEnd.py | 274 | 0.544455 | 0.530946 | 0 | 113 | 27.168142 | 77 |
theminer3746/2110101_Com_Prog | 19,396,072,343,971 | cdb8dac6824732506dd4eaec3fbbefc4f27cef84 | 1bfa2d40fc07ef38eb2b74cf6df6d9d3a0d31b86 | /Repetition/Repetition_P10.py | ef6194d6cad695f4abbaf62fed1e685459628417 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-Clear"
] | permissive | https://github.com/theminer3746/2110101_Com_Prog | 9964dff4452036e674d5380d081b9ae3689a7321 | f3f6dd5e34635160d53ddff7fdada218a89f4161 | refs/heads/master | 2021-01-19T18:46:56.756900 | 2017-09-13T13:33:10 | 2017-09-13T13:33:10 | 101,164,096 | 0 | 0 | null | false | 2017-11-08T13:17:05 | 2017-08-23T09:42:30 | 2017-08-23T10:05:12 | 2017-11-08T13:17:05 | 110 | 0 | 0 | 0 | Python | false | null | value = int(input())
divider = list()
for i in range(2, value):
if value % i == 0:
divider.append(i)
if not divider:
print('Prime Number')
else:
divider.reverse()
print(*divider)
| UTF-8 | Python | false | false | 206 | py | 94 | Repetition_P10.py | 93 | 0.592233 | 0.582524 | 0 | 13 | 14.846154 | 25 |
dacut/juliet | 1,288,490,205,741 | 7148ebce5aa4e2d7c92774a01d80a52de1393c08 | ced67045eb43c938a01de48181dbc4f1409886d7 | /build-tools/verify-targets | bebcf7f3f725597a2764a9f13a1eb9aab970cb09 | [
"BSD-2-Clause"
] | permissive | https://github.com/dacut/juliet | d1d5cf9d1ef8c5701fcb7a42c808ef607ba97a51 | b6c73fd42255047680b5271a56d05fe78c625c83 | refs/heads/master | 2016-09-05T22:24:34.791701 | 2015-05-03T06:27:56 | 2015-05-03T06:27:56 | 8,089,602 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import, print_function
from getopt import getopt, GetoptError
from os.path import exists, isfile
from six.moves import cStringIO as StringIO
from sys import argv, exit, stderr, stdin, stdout
def parse_manifest(verify_vars, fd):
n_missing = 0
cur_line = StringIO()
for line in fd:
if line and line[-1] == '\n':
line = line[:-1]
if line.endswith("\\"):
cur_line.write(line[:-1])
continue
cur_line.write(line)
cur_line = cur_line.getvalue().strip()
eq_pos = cur_line.find(":=")
if eq_pos != -1:
var_name = cur_line[:eq_pos].strip()
var_value = cur_line[eq_pos+2:].strip()
if var_name in verify_vars:
n_missing += verify_existence(var_value.strip().split())
cur_line = StringIO()
return n_missing
def verify_existence(filenames):
n_missing = 0
for filename in filenames:
filename = filename.strip()
if len(filename) == 0:
continue
if not exists(filename):
print("Missing file: %s" % filename, file=stderr)
n_missing += 1
elif not isfile(filename):
print("Not a file: %s" % filename, file=stderr)
n_missing += 1
return n_missing
def main(args):
try:
opts, args = getopt(
args, "shov:",
["sources", "headers", "objects", "varname="])
except GetoptError as e:
print(str(e), file=stderr)
usage()
return 1
verify_vars = set()
for opt, value in opts:
if opt in ("-s", "--sources",):
verify_vars.add("CLASSLIB_GEN_SRC")
elif opt in ("-h", "--headers",):
verify_vars.add("CLASSLIB_GEN_HDR")
elif opt in ("-o", "--objects",):
verify_vars.add("CLASSLIB_GEN_OBJ")
elif opt in ("-v", "--varname",):
verify_vars.add(value)
n_missing = 0
if len(args) > 0:
for filename in args:
with open(filename) as fd:
n_missing += parse_manifest(verify_vars, fd)
else:
n_missing = parse_manifest(verify_vars, stdin)
print("%d file(s) missing" % n_missing)
return 0 if n_missing == 0 else 1
def usgae(fd=stderr):
fd.write("""\
Usage: verify-targets [options] <manifest> ...
Open the Makefile manifest file and check that the given files exist.
Options:
-s | --sources
Verify that all files in CLASSLIB_GEN_SRC exist.
-h | --headers
Verify that all files in CLASSLIB_GEN_HDR exist.
-o | --objects
Verify that all files in CLASSLIB_GEN_OBJ exist.
-v<varname> | --variable=<varname>
Verify that all files in <varname> exist.
""")
return
if __name__ == "__main__":
exit(main(argv[1:]))
| UTF-8 | Python | false | false | 2,894 | 175 | verify-targets | 162 | 0.553214 | 0.547339 | 0 | 109 | 25.550459 | 72 |
|
ivanterekh/last-mile-delivery | 11,708,080,879,816 | 50c891c78dd11cabd59f0b34ebeb0ae116564593 | e60e4109380b3c98b8b9a17152e6aafbb292c98c | /cluster.py | 8f70df2b6b615785afeb1ae62b9c6dd5b7f3210b | [] | no_license | https://github.com/ivanterekh/last-mile-delivery | d783887d1e16ab63a11d86329ae7f16f05903206 | dda21d853e7da9238333ef10fa49f60ce47bdd61 | refs/heads/main | 2023-01-03T14:43:41.287228 | 2020-10-31T16:39:54 | 2020-10-31T16:39:54 | 307,831,949 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[99]:
import pandas as pd
import numpy as np
import sklearn.cluster as cl
import matplotlib.pyplot as plt
# In[100]:
df = pd.read_csv('fix_demand.csv', encoding = 'utf-8')
# In[101]:
df_demand = df.drop(columns = ['day-2', 'day-3', 'day-4', 'day-5', 'day-6', 'day-7', 'lat', 'lng'])
# In[102]:
table_demand = pd.pivot_table(df_demand, values='day-1', index=['key_adr'], columns=['product'], aggfunc=np.sum)
# In[103]:
kmeans = cl.KMeans(n_clusters=7)
# In[104]:
kmeans.fit(table_demand)
# In[105]:
cl_dem_res = kmeans.labels_
# In[106]:
table_demand = table_demand.reset_index()
# In[107]:
tmp = pd.DataFrame(cl_dem_res, columns = ['cluster'])
# In[108]:
df_cl_d = table_demand.join(tmp)
# In[109]:
#df_cl_d.to_csv('cluster_by_demand.csv')
# In[110]:
df_position = df.drop(columns = ['product', 'day-1', 'day-2', 'day-3', 'day-4', 'day-5', 'day-6', 'day-7'])
# In[111]:
df_position = df_position.drop_duplicates(['key_adr', 'lat', 'lng']).reset_index(drop=True)
# In[112]:
df_position = df_position.set_index(['key_adr'])
# In[113]:
kmeans = cl.KMeans(n_clusters=7)
# In[114]:
kmeans.fit(df_position)
# In[115]:
cl_pos_res = kmeans.labels_
# In[116]:
cl_pos_centers = kmeans.cluster_centers_
# In[117]:
tmp1 = pd.DataFrame(cl_pos_res, columns = ['cluster'])
tmp2 = pd.DataFrame(cl_pos_centers, columns = ['c_lat', 'c_lng'])
tmp1 = tmp1.join(tmp2, on = 'cluster')
# In[118]:
df_position = df_position.reset_index()
# In[119]:
df_cl_p = df_position.join(tmp1)
# In[120]:
#df_cl_p.to_csv('cluster_by_position.csv')
| UTF-8 | Python | false | false | 1,642 | py | 33 | cluster.py | 7 | 0.609622 | 0.55542 | 0 | 138 | 10.891304 | 112 |
Snnzhao/algorithm009-class02 | 6,167,573,059,238 | 35d08e9bde5068bf5409dc9ba585622590ff7c9e | 63b256b7cd925c953db71a4f932acf470a46ec3e | /Week_01/26.删除排序数组中的重复项.py | fe01bc0861dd950840c5bef153405303bfbd1506 | [] | no_license | https://github.com/Snnzhao/algorithm009-class02 | 8e101bed491e0fcdfc90c3e7c2d36a4547ca1b6f | fb79bc93c9057942c77fcf0fd02fe6649243940f | refs/heads/master | 2022-11-23T20:34:40.466808 | 2020-07-26T16:05:52 | 2020-07-26T16:05:52 | 264,933,746 | 1 | 0 | null | true | 2020-05-18T12:32:54 | 2020-05-18T12:32:53 | 2020-05-18T06:05:17 | 2020-05-13T10:32:51 | 5 | 0 | 0 | 0 | null | false | false | #
# @lc app=leetcode.cn id=26 lang=python3
#
# [26] 删除排序数组中的重复项
#
# @lc code=start
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
idx_fwd,idx_bkd=0,0
for idx_fwd in range(1,len(nums)):
if (nums[idx_bkd]!=nums[idx_fwd]):
idx_bkd+=1
nums[idx_bkd]=nums[idx_fwd]
return idx_bkd+1
# @lc code=end
| UTF-8 | Python | false | false | 408 | py | 56 | 26.删除排序数组中的重复项.py | 46 | 0.551813 | 0.525907 | 0 | 16 | 23.0625 | 55 |
AdamZhouSE/pythonHomework | 1,554,778,205,488 | 653be8c666bdcc76776abb79839a80018f3c66db | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2899/48090/310921.py | 14f175b3c828eae74913905b89cff721c6bae685 | [] | no_license | https://github.com/AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | a=int(input())
class Solution:
def FourPower(self, num: int) -> bool:
return num > 0 and num & (num - 1) == 0 and num %3==1
b=Solution()
print(str(b.FourPower(a)).lower()) | UTF-8 | Python | false | false | 186 | py | 45,079 | 310921.py | 43,489 | 0.591398 | 0.564516 | 0 | 9 | 19.777778 | 61 |
Boshes/info3180-lab5 | 8,409,546,003,976 | 74975d039680214d8fa905392aaffd61b22f4207 | 1d3c4bdcaa1c9b4649f921ca61b3e27339e4938a | /app/__init__.py | 307e81ce5f5de839bd3bfb79e7ec84e144fa81a5 | [] | no_license | https://github.com/Boshes/info3180-lab5 | 2ddc4e845d4569eecac12c6caa5463cad8d77334 | 00b073785942d1c9a21f8577fd2c1c09b35b28d0 | refs/heads/master | 2021-01-21T21:39:08.298642 | 2016-03-01T18:57:25 | 2016-03-01T18:57:25 | 52,288,113 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID, COMMON_PROVIDERS
import sys
import logging
app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
app.config['SECRET_KEY'] = "this is a super secure key"
app.config['OPENID_PROVIDERS'] = COMMON_PROVIDERS
#Go to Heroku: http://sheltered-temple-35750.herokuapp.com/login
app.config['SQLALCHEMY_DATABASE_URI'] = "postgres://cpszstmfjktcva:Lc6vcVRVH8KJz4itSa21w8_-RG@ec2-54-227-250-148.compute-1.amazonaws.com:5432/d78fr778jcg850"
# app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://lab5:lab5@localhost/lab5"
db = SQLAlchemy(app)
db.create_all()
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
oid = OpenID(app,'/tmp')
from app import views,models | UTF-8 | Python | false | false | 878 | py | 4 | __init__.py | 3 | 0.776765 | 0.732346 | 0 | 25 | 34.16 | 157 |
mousavian/cuckoo | 910,533,103,726 | 3e5c6d1ffe42f0cd396b327d0a6566132112b235 | c8afe11a46612c3e93a4a33b5e7cba19620ca27c | /modules/machinery/lxc.py | a7c067f634400fa099e8e458d459c81f195889b9 | [] | no_license | https://github.com/mousavian/cuckoo | ebc2fc5f94b7be5e5aa9c748cb87a4ed65ae1f93 | 2fdb6260a49151c940c587586da3d7ba126ef657 | refs/heads/master | 2021-03-16T10:18:40.813762 | 2015-11-28T08:15:54 | 2015-11-28T08:15:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import logging
import subprocess
import os.path
import time
from lib.cuckoo.common.abstracts import Machinery
from lib.cuckoo.common.exceptions import CuckooMachineError
class LXC(Machinery):
"""Virtualization layer for Linux Containers"""
locked = False
def initialize(self):
self.locked = False
def _initialize_check(self):
return ''
def _check_vmx(self, host):
return ''
def _check_snapshot(self, host, snapshot):
return ''
def start(self, label):
return ''
def stop(self, label):
return ''
def _revert(self, host, snapshot):
return ''
def _is_running(self, host):
return ''
def _parse_label(self, label):
return ''
def _get_host_and_snapshot(self, label):
return ''
def availables(self):
return not self.locked
#return super(LXC, self).availables('lxc') | UTF-8 | Python | false | false | 906 | py | 15 | lxc.py | 7 | 0.626932 | 0.626932 | 0 | 36 | 24.166667 | 59 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.