repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
turalabs/deb | 3,762,391,354,397 | dc1bc965bad137c815c2003d58978e09c5863dde | d86d4c9edd522da76671bc5f799103839b762765 | /deb/utils/data_models/flights.py | d4c69f575a997cae59569ed9be2ad71d04de6fee | [
"Apache-2.0"
]
| permissive | https://github.com/turalabs/deb | 51d3491f38e76957bee06b87ed1fb7bc2f885bd8 | 75f1e0d4dc49e60bbecd10a5c7e98eebbe4e5612 | refs/heads/master | 2023-04-07T09:08:57.607654 | 2021-04-15T03:55:54 | 2021-04-15T03:55:54 | 280,475,854 | 3 | 7 | Apache-2.0 | false | 2021-04-15T03:55:55 | 2020-07-17T16:39:37 | 2021-01-21T23:23:38 | 2021-04-15T03:55:54 | 31,025 | 1 | 5 | 6 | Python | false | false | """
Helper functions and datamodels for Chapter 2 flight records.
"""
from google.cloud import bigquery
import pyarrow
# historical flights schemas
FLIGHTS_CSV_COLUMNS = ["day_of_week", "flight_date", "airline", "tailnumber", "flight_number",
"src", "src_city", "src_state", "dest", "dest_city", "dest_state",
"departure_time", "actual_departure_time", "departure_delay",
"taxi_out", "wheels_off", "wheels_on", "taxi_in",
"arrival_time", "actual_arrival_time", "arrival_delay",
"cancelled", "cancellation_code", "flight_time", "actual_flight_time", "air_time", "flights", "distance",
"airline_delay", "weather_delay", "nas_delay", "security_delay", "late_aircraft_delay"]
FLIGHTS_CSV_SCHEMA = {
'day_of_week': {"type": int, "bq_type": "INT64", "parquet_type": pyarrow.int8(), "description": "Day of the week. A number between 1-7 starting with Monday as 1."},
'flight_date': {"type": str, "bq_type": "DATE", "parquet_type": pyarrow.date32(), "description": "Flight date in YYYY-MM-DD format."},
'airline': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Airline IATA src."},
'tailnumber': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Aircraft tail number unique identifier."},
'flight_number': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Airline flight number. This is a unique number combined with airline, flight_date, src, and dest."},
'src': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Originating airport IATA src."},
'src_city': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Originating airport city/state name."},
'src_state': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Originating state name (United States)."},
'dest': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Destination airport IATA src."},
'dest_city': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Destination airport city/state name."},
'dest_state': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Destination state name (United States)."},
'departure_time': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": "Scheduled flight departure time in military format (ie: '1725' as 05:25pm)."},
'actual_departure_time': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": "Actual flight departure time in military format (ie: '1725' as 05:25pm)."},
'departure_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes as a decimal number. Negative numbers represent early flight departure. (ie: -3.5 for 3 minutes and 30 seconds early departure)."},
'taxi_out': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight take-off taxi time in minutes as a decimal (ie: 3.5 as 3 minutes and 30 seconds)."},
'wheels_off': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": "Flight wheels off the ground take-off time in military format (ie: '1725' as 05:25pm)."},
'wheels_on': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": " Flight wheels on on the ground landing time in military format (ie: '1725' as 05:25pm)."},
'taxi_in': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight landing taxi time in minutes as a decimal (ie: 3.5 as 3 minutes and 30 seconds)."},
'arrival_time': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": "Flight scheduled gate arrival time in military format (ie: '1725' as 05:25pm)."},
'actual_arrival_time': {"type": str, "bq_type": "TIME", "parquet_type": pyarrow.string(), "description": "Flight actual gate arrival time in military format (ie: '1725' as 05:25pm)."},
'arrival_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight arrival delay in minutes as a decimal. Negative numbers represent early arrival (ie: -3.5 as 3 minutes and 30 seconds early arrival)."},
'cancelled': {"type": str, "bq_type": "BOOL", "parquet_type": pyarrow.bool_(), "description": "Flight cancellation indicator with 1 indicating a cancelled flight."},
'cancellation_code': {"type": str, "bq_type": "STRING", "parquet_type": pyarrow.string(), "description": "Flight cancellation src. A: Carrier, B: Weather, C: National Air System, D: Security, Empty: Not Cancelled."},
'flight_time': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Scheduled flight time in minutes as a decimal (ie: 120.5 as 2 hours and 30 seconds)."},
'actual_flight_time': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Actual flight time in minutes as a decimal (ie: 120.5 as 2 hours and 30 seconds)."},
'air_time': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight in-air time in minutes as a decimal (ie: 120.5 as 2 hours and 30 seconds)."},
'flights': {"type": str, "bq_type": "INT64", "parquet_type": pyarrow.int8(), "description": "Number of flight legs. This number is typically 1 as a single route."},
'distance': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Distance between airports in miles as a decimal (ie: 1,250.5 miles)."},
'airline_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes due to airline issues."},
'weather_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes due to weather issues."},
'nas_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes due to National Air System (NAS) issues."},
'security_delay': {"type": str, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes due to security issues."},
'late_aircraft_delay': {"type": float, "bq_type": "FLOAT", "parquet_type": pyarrow.float32(), "description": "Flight delay in minutes due to late arriving aircraft."},
}
FUTURE_FLIGHTS_BIGQUERY_SCHEMA = [bigquery.SchemaField('flight_date', 'DATE', mode='REQUIRED'),
bigquery.SchemaField('airline', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('flight_number', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('tailnumber', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('src', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('dest', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('departure_time', 'TIME', mode='REQUIRED'),
bigquery.SchemaField('arrival_time', 'TIME', mode='REQUIRED'),
bigquery.SchemaField('flight_time', 'FLOAT64', mode='NULLABLE'),
bigquery.SchemaField('distance', 'FLOAT64', mode='NULLABLE'),
bigquery.SchemaField('day_of_week', 'INT64', mode='NULLABLE'),
]
def datamodel_flights_column_names():
"""
Get FLIGHTS_CSV_SCHEMA column names (keys)
:return: list
"""
return list(FLIGHTS_CSV_SCHEMA.keys())
def datamodel_flights_bigquery_schema():
"""
Get FLIGHTS_CSV_SCHEMA as BigQuery schema (using bigquery.SchemaField).
:return: list[bigquery.SchemaField]
"""
return [bigquery.SchemaField(k, field_type=v['bq_type'], mode='NULLABLE', description=v['description'])
for k, v in FLIGHTS_CSV_SCHEMA.items()]
def datamodel_flights_parquet_schema():
fields = [(k, v['parquet_type']) for k, v in FLIGHTS_CSV_SCHEMA.items()]
return pyarrow.schema(fields)
| UTF-8 | Python | false | false | 8,285 | py | 108 | flights.py | 79 | 0.638383 | 0.621485 | 0 | 92 | 89.043478 | 263 |
howardderekl/LakeCreekRanchPython | 2,585,570,323,468 | 0d2724e35b43e2b264fffdf5eb2ad414e696690c | da1cdd8ec6c407aa113363df648db92bd63bc5ee | /django-env/Scripts/django-admin.py | 620c8c54055d2be2d729c8b6086174eb480c0221 | []
| no_license | https://github.com/howardderekl/LakeCreekRanchPython | 9c9cbaa24885c01bc18cc288f84511cc25eaffee | 525ff1b92cecae89540417161bf83035394b5ac5 | refs/heads/master | 2021-09-05T11:03:59.064799 | 2018-01-26T18:48:40 | 2018-01-26T18:48:40 | 118,970,919 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!c:\users\dhoward\source\repos\lakecreekranchpython\django-env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| UTF-8 | Python | false | false | 190 | py | 12 | django-admin.py | 10 | 0.742105 | 0.742105 | 0 | 5 | 36.8 | 82 |
tbmihailov/OBQA | 14,482,629,760,776 | b591b44a489969442cabb1318e405a1bfbfdc3c6 | 1036bc1b6247db0a57d1dbd854e5b19b982ed871 | /seq2seq/Seq2Seq_Restricted/Seq2SeqControl.py | d474d8086a0677dc7ab494a4beb1693b7cf625b5 | [
"Apache-2.0"
]
| permissive | https://github.com/tbmihailov/OBQA | e6079b1e29339364ec8a8e95cf0ebe7c2c9c847f | 653c5c64ae7eb164bde0b381813afe5f664dcf67 | refs/heads/master | 2020-07-01T06:31:19.032385 | 2019-08-12T17:41:56 | 2019-08-12T17:41:56 | 201,075,108 | 1 | 0 | Apache-2.0 | true | 2019-08-07T15:14:23 | 2019-08-07T15:14:23 | 2019-08-01T21:33:41 | 2019-08-01T21:33:39 | 9,478 | 0 | 0 | 0 | null | false | false | import os
import csv
import torch
import torch.optim as optim
import itertools
import sys
sys.path.append('../')
import dataPreperation.Fact2_Only_F1_H_exact_tokens as data
#from dataPreperation.Fact2_Only_F1_H_exact_tokens import dataPreparation
from allennlp.data.dataset_readers.seq2seq import Seq2SeqDatasetReader
#from obqa_datasetreader import Seq2SeqDatasetReader
from allennlp.data.tokenizers.word_tokenizer import WordTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper, StackedSelfAttentionEncoder
from obqa_seq2seq import SimpleSeq2Seq
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.attention import LinearAttention, BilinearAttention, DotProductAttention
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.predictors import SimpleSeq2SeqPredictor
#Size of output
ENC_EMBEDDING_DIM = 16
TGT_EMBEDDING_DIM = 16
HIDDEN_DIM = 16
CUDA_DEVICE = 0
numEpochs = 3
beamSize = 8
def findExtraVocab(data):
allExtraVocab = []
for i in range(len(data)):
srcData = list(data[i]['source_tokens'])
srcData = [str(i) for i in srcData]
tgtData = list(data[i]['target_tokens'])
tgtData = [str(i) for i in tgtData]
#print(srcData,tgtData)
extra = set(tgtData) - set(srcData)
for j in extra:
allExtraVocab.append(j)
#print(allExtraVocab)
#print (len(allExtraVocab))
#print (len(set(allExtraVocab)))
return allExtraVocab
def main():
trainFile = "../srcData/trainData.csv"
validFile = "../srcData/devData.csv"
testFile = "../srcData/testData.csv"
trainSeq2SeqFile = data.dataPreparation(trainFile)
validSeq2SeqFile = data.dataPreparation(validFile)
testSeq2SeqFile = data.dataPreparation(testFile)
print (testSeq2SeqFile)
#TokenIndexer Determines how string tokens gets represented as arrays of indexes in a model
#SingleIdTokenIndexer = Tokens are single integers
#TokenCharactersIndexer = Tokens as a list of integers
# Read a tsvfile with paired instances (source, target)
reader = Seq2SeqDatasetReader(
source_tokenizer = WordTokenizer(),
target_tokenizer = WordTokenizer(), # Defaults to source_tokenizer
source_token_indexers={'tokens': SingleIdTokenIndexer()},
target_token_indexers={'tokens': SingleIdTokenIndexer()} # Defaults to source_token_indexers
)
# Each of the dataset is a list of each tokens (source_tokens, target_tokens)
train_dataset = reader.read(trainSeq2SeqFile)
validation_dataset = reader.read(validSeq2SeqFile)
test_dataset = reader.read(testSeq2SeqFile)
# Finding extra fact2 vocab
trainExtraVocab = findExtraVocab(train_dataset)
validExtraVocab = findExtraVocab(validation_dataset)
testExtraVocab = findExtraVocab(test_dataset)
finalExtraVocab = list(set(trainExtraVocab+validExtraVocab+testExtraVocab))
print("length:",len(finalExtraVocab))
#input()
#vocab = Vocabulary.from_instances(train_dataset + validation_dataset, min_count={'tokens': 3, 'target_tokens': 3})
vocab = Vocabulary.from_instances(train_dataset + validation_dataset + test_dataset)
# Train + Valid = 9703
# Train + Valid + Test = 10099
print ("Vocab SIze :",vocab.get_vocab_size('tokens'))
encEmbedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
embedding_dim=ENC_EMBEDDING_DIM)
# Embedding for tokens since in the dataset creation time it is mentioned tokens
source_embedder = BasicTextFieldEmbedder({"tokens": encEmbedding})
encoder = PytorchSeq2SeqWrapper(torch.nn.LSTM(ENC_EMBEDDING_DIM,HIDDEN_DIM,batch_first=True,dropout=0.2))
attention = DotProductAttention()
max_decoding_steps = 4 # TODO: make this variable
model = SimpleSeq2Seq(vocab, source_embedder, encoder, max_decoding_steps,
target_embedding_dim = TGT_EMBEDDING_DIM,
#target_namespace = 'target_tokens',
attention = attention,
beam_size = beamSize,
use_bleu = True,
extra_vocab = finalExtraVocab)
#Can also specify lr=0.001
optimizer = optim.Adam(model.parameters())
# Data Iterator that specify how to batch our dataset
# Takes data shuffles it and creates fixed sized batches
#iterator = BasicIterator(batch_size=2)
#iterator.index_with(vocab)
# Pads batches wrt max input lengths per batch, sorts dataset wrt the fieldnames and padding keys provided for efficient computations
iterator = BucketIterator(batch_size=50, sorting_keys=[("source_tokens", "num_tokens")])
iterator.index_with(vocab)
trainer = Trainer(model = model,
optimizer = optimizer,
iterator = iterator,
train_dataset = train_dataset,
validation_dataset = validation_dataset,
#patience = 3,
num_epochs = numEpochs,
cuda_device = CUDA_DEVICE)
trainer.train()
predictor = SimpleSeq2SeqPredictor(model, reader)
'''for i in range(2):
print ("Epoch: {}".format(i))
trainer.train()
predictor = SimpleSeq2SeqPredictor(model, reader)
for instance in itertools.islice(validation_dataset, 10):
print('SOURCE:', instance.fields['source_tokens'].tokens)
print('GOLD:', instance.fields['target_tokens'].tokens)
print('PRED:', predictor.predict_instance(instance)['predicted_tokens'])
"""'{'predictions': [[1, 4, 5, 92, 8, 6, 1, 8, 6, 26, 3]],
'loss': 5.9835076332092285,
'class_log_probabilities': [-20.10894012451172],
'predicted_tokens': ['@@UNKNOWN@@', 'is', 'a', 'type', 'of', 'the', '@@UNKNOWN@@', 'of', 'the', 'sun']}
"""
print (predictor.predict_instance(instance))
'''
outFile = open("output_"+str(HIDDEN_DIM)+"_"+str(numEpochs)+"_"+str(beamSize)+".csv","w")
writer = csv.writer(outFile,delimiter="\t")
for instance in itertools.islice(test_dataset,500):
src = instance.fields['source_tokens'].tokens
gold = instance.fields['target_tokens'].tokens
pred = predictor.predict_instance(instance)['predicted_tokens']
writer.writerow([src,gold,pred])
outFile.close()
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 6,732 | py | 54 | Seq2SeqControl.py | 30 | 0.673648 | 0.657605 | 0 | 171 | 38.368421 | 137 |
giyoon21c/dailyPy | 19,327,352,865,844 | 586146b68761224a91ddf3d19388bd0ec1f63d8a | 2f2b1c594d8e10dfd4f8f6c071167274996b3167 | /multiprocessing/job.py | 6e5b0c351b0dc6095a8dea36834911ff1cda303b | []
| no_license | https://github.com/giyoon21c/dailyPy | 18fd3fceb3c5c8097458802a6e0f8f51890aa5bf | d646e8afb01d844c200f7c96c52d5ee7fac2e538 | refs/heads/master | 2018-12-21T12:08:23.703102 | 2018-12-14T23:54:10 | 2018-12-14T23:54:10 | 131,449,675 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import multiprocessing
def worker(i):
# at least i remembered it!!!
print 'working...{0}'.format(i)
return
if __name__ == "__main__":
jobs = []
for i in xrange(10):
p = multiprocessing.Process(target=worker, args=(i,))
jobs.append(p)
p.start()
| UTF-8 | Python | false | false | 270 | py | 45 | job.py | 44 | 0.6 | 0.588889 | 0 | 14 | 18.214286 | 57 |
vkjangid/python | 5,652,176,969,946 | f3ac91d284427eb1ae97d5239a1e4cc2cc09bd5a | 2476d2ee4e336ae3713a6fa0f498e6fa89e06738 | /duplicate characters present in any string are converted into $.py | dff986a67c4eb7224f54d8a2d9d54d5475fc8c47 | []
| no_license | https://github.com/vkjangid/python | ccd250c42b0ce0c1b5fb68ef6bc98ff228a403a1 | 2e4374bf00a41037036eefe1d2b64136f0dc5106 | refs/heads/master | 2020-03-18T22:35:07.748696 | 2019-11-11T10:57:29 | 2019-11-11T10:57:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a="xyzaaxyf"
a=list(a)
print(a)
b=1
for i in range(len(a)):
for j in range(b,len(a)):
if a[i]==a[j]:
a[j]='$'
b+=1
print("".join(a))
| UTF-8 | Python | false | false | 177 | py | 72 | duplicate characters present in any string are converted into $.py | 72 | 0.418079 | 0.40678 | 0 | 10 | 15.1 | 29 |
coolbung/eNREGA | 2,061,584,351,449 | 65b88aa37aa9cf0fba74986db18500fb9371fbbe | 649ea375c28d5819e2bdc731d3c0135f00856009 | /code/scraper/nrega/district_scrapper.py | 2f157b50a0518ce60f69bb39a0ef9907a91d7092 | []
| no_license | https://github.com/coolbung/eNREGA | 45f86d2cc1e0c696de88978735cc4fa595ca3f2c | 71d6c381a332c3750d13675b732bc6c74dee3f28 | refs/heads/master | 2021-01-01T16:05:46.489897 | 2011-10-10T07:02:39 | 2011-10-10T07:02:39 | 2,008,393 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # importing the BeautifulSoup libraries
from BeautifulSoup import BeautifulSoup
# import regular expressions library and url library
import re
import urllib2
#RB:01 begin
#import MySQLDb for database operations
import MySQLdb
#RB:01 end
#global values
ipAddress="164.100.112.66"
#database connection string for MySqlDB
#code block for extraction
#global variables for url string for soup : url
#url should be fetched as a value from the database table
#for District expenditure
#including the database module: nregaDB
# db= MySQLdb.connect(host=’Demo’, user=’root’ , passwd=’adm1n’, db=’test’)
#record set pointer returned
def valueExtraction:
# database recordset pointer. url value
# temporarily adding static value here
# eg. url="http://164.100.112.66/netnrega/writereaddata/citizen_out/phy_fin_reptemp_Out_1825015_1112.html"
StateUniqueId=18 # this is for maharashtra
url="http://164.100.112.66/netnrega/writereaddata/citizen_out/phy_fin_reptemp_Out_1825015_1112.html"
##RB:02 begin
#database connection string
db_connection = MySQLdb.connect(
host=mysql.enrega.dreamhosters.com,
user=nrega,
passwd=hBBkaCwz,
db=nrega
)
#cursor to execute the database commands
cursor = db_connection.cursor()
#opening the database
#cursor.execute('nrega')
##RB:02 ends
#Building the soup
page= urllib2.urlopen(url)
soup=BeautifulSoup(page)
#Extraction logic for reaching the table block containing data needed
#codeblock: extraction
table_block= soup('table',id="Table2")[0]
#there are five unwanted rows
unwanted_row= table_block.next.nextSibling
row_count=1
#traversing the table to remove unwanted rows
#first row has been traversed in the above statement
while row_count< 5:
unwanted_row= unwanted_row.nextSibling.nextSibling
row_count+=1 #incrementing the row_counter
#End loop
#first row of the required data for districts
data_row= unwanted_row.nextSibling.nextSibling
while data_row.td.nextSibling.nextSibling.next.string<>'uTotal':
#assigning the value of the data_row to the data_col
data_col= data_row
#Pointing to the first column
data_col=data_col.td.nextSibling.nextSibling
#extracting the url, Code, Name via the href tag
temp_url= data_col.next['href']
#url value extraction
#the url value is extracted as '../../citizen_html' hence a small manipulation
#appending the ip-address and the string block '
url= "http://"+ipAddress+"/netnrega/writereaddata"+temp_url[5:]
#district code index and value. district code is 4 characters
index= temp_url.find("district_code=")
index=index+14
code=temp_url[index:index+4]
#district name is scrapped from the screen value
name=data_col.next.string
#Scrapping total no.of works, labor expenditure, material Expenditure
#these are stored in 32nd column hence a manipulation
col_count=1
while col_count<32:
data_col=data_col.nextSibling.nextSibling
col_count+=1
#scrapping no. of Works noWorks col: 32
noWorks=data_col.next.string
#scrapping labor expenditure col :33
data_col=data_col.nextSibling.nextSibling
labExpn=data_col.next.string
#scrapping material Expenditure col:34
data_col=data_col.nextSibling.nextSibling
matExpn=data_col.next.string
###RB:03 begin
#timestamp value
time=datetime.datetime.now()
#creating the database of the districts and then adding the districtwise expenses for each district in the expense table
query="INSERT INTO rega_enrega_districts " +\
"(UniqueId, StateUniqueId, DistrictName_Mr, CreatedOn) values ('%s', '%s', '%s', '%s')"%(code, StateUniqueId, name, time)
#Executing the query to insert district values in the district table
cursor.execute(query)
#building the string for inserting the datavalues in the database table for districts
query="INSERT INTO rega_enrega_districtexpenses " +\
"(UniqueId, DistrictUniqueId, NoOfWorks, LabourExpenditures, MaterialExpenditures, CreatedOn, Link)" +\
"values ('%s', '%s', '%s', '%s', '%s', '%s', '%s')"%(code, StateUniqueId, noWorks, labExpn, matExpn, time, url)
#Executing the query to insert district expenditure values in the district table
cursor.execute(query)
#Inserting the values into the database table for District_Expenses
#database code block here
###RB:03 end
#move to the next row
data_row=data_row.nextSibling
| UTF-8 | Python | false | false | 4,849 | py | 28 | district_scrapper.py | 25 | 0.673981 | 0.654523 | 0 | 131 | 34.78626 | 133 |
jgibbons-cp/sva_scan_examples | 12,378,095,789,989 | 84be4ac2cdd0b0cfb7757adecfa4c19a2dee44fa | 0b2cf46f31b277d004ba354c2fdb053eca59a0ff | /app/runner.py | 8f8f6bb2e89c20564299acef2bb6c0eeba023e83 | [
"BSD-2-Clause"
]
| permissive | https://github.com/jgibbons-cp/sva_scan_examples | 37d8ba5889de15d32e44f9a33819c7d8025f0453 | 1d012e08bd9fa3a1082b676318faf80346258aea | refs/heads/master | 2021-01-20T02:31:14.760963 | 2017-12-12T01:24:51 | 2017-12-12T01:24:51 | 89,421,469 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sva_scan_examples
from halo_general import HaloGeneral
# Build config
config = sva_scan_examples.ConfigHelper()
# get a halo object for api methods wrapper
halo = HaloGeneral(config)
sva_scan_examples.SVA_ScanExamples(halo)
| UTF-8 | Python | false | false | 234 | py | 15 | runner.py | 8 | 0.794872 | 0.794872 | 0 | 10 | 22.4 | 43 |
xue-jiaxin/paroxython | 3,667,902,084,460 | 326748ceda6314dc1d127bdcf0a2088f0cc3da54 | d7cf547aa93a199e9a58396e1efcb7725c8eac42 | /helpers/print_taxon_patterns.py | 4d3c671a53f63c9f0e9ff2be4ac1e2ae02511bd4 | [
"MIT"
]
| permissive | https://github.com/xue-jiaxin/paroxython | ca4156736c2aa0798acbbc92efcc2d926a99c801 | 777ad985631dd6003c374e14b008ac93dee33567 | refs/heads/master | 2022-07-12T00:38:45.870848 | 2020-05-18T18:40:46 | 2020-05-18T18:40:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Type hierarchy (https://python.readthedocs.io/en/stable/reference/datamodel.html)
#
from functools import reduce
type_hierarchy = """
number/integral/int
number/integral/bool
number/float
number/complex
sequence/immutable/string/str
sequence/immutable/string/bytes
sequence/immutable/tuple
sequence/mutable/list
sequence/mutable/bytearray
set_type/set
set_type/frozenset
dict
""".split()
type_names = "int bool float complex str tuple bytes list bytearray set frozenset dict".split(" ")
type_methods = {
name: {s for s in dir(getattr(__builtins__, name)) if not s.startswith("__")}
for name in type_names
}
print()
print(f"call/function/builtin/casting/\\1<tab>free_call:({'|'.join(type_names)})")
def compute(suffix, names_1, names_2):
set_1 = reduce(set.union, map(type_methods.get, names_1.split()))
set_2 = set().union(*map(type_methods.get, names_2.split()))
label_pattern = "|".join(sorted(set_1 - set_2))
print(f"call/method/{suffix}/\\1<tab>member_call:({label_pattern})")
compute("number", "int bool float complex", "str tuple bytes list bytearray set frozenset dict")
compute("sequence/string", "str", "tuple int bool float complex list set frozenset dict")
compute("sequence/list", "list", "str int bool float complex set frozenset dict")
compute("dict", "dict", "int bool float complex str tuple bytes list bytearray set frozenset")
compute("set", "set frozenset", "int bool float complex str tuple bytes list bytearray dict")
compute("sequence/list", "list str", "int bool float complex set frozenset dict")
| UTF-8 | Python | false | false | 1,555 | py | 61 | print_taxon_patterns.py | 39 | 0.727974 | 0.721543 | 0 | 43 | 35.162791 | 98 |
sunbo5439/MyProject | 4,578,435,145,933 | 3cfb32585104e9db838d283854d88bb77e3894ee | c75348a6fd2c5159015dd6f59081806286a0fe9e | /Speech_Recgnition/data_preprocess.py | 0e7f5c59e1e04ea4b82116d867949a65c5fcb307 | []
| no_license | https://github.com/sunbo5439/MyProject | cb27c083b506751c5148c63b46a63aa85fa312b8 | 9c9adb80a66d95108effd41d9898eed2626e0f36 | refs/heads/master | 2021-04-03T01:15:33.202553 | 2018-04-19T14:14:39 | 2018-04-19T14:14:39 | 124,353,863 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import tensorflow as tf
import numpy as np
import os
from collections import Counter
import librosa
import codecs
import pickle
import json
import sys
import Levenshtein
reload(sys)
sys.setdefaultencoding('utf8')
import neural_model
def generation_vocab(label_vector_path, vocab_path):
labels = json.load(codecs.open(label_vector_path, 'r', encoding='utf-8'))
c = Counter()
for sentence in labels:
for tok in sentence:
c[tok] += 1
most_common_list = c.most_common()
with codecs.open(vocab_path, 'w', encoding='utf-8') as f:
for k, v in most_common_list:
f.write(k + '\n')
def convert_textlabel_to_idlabel(text_label_path, id_label_path, word_num_dict):
labels_text = json.load(codecs.open(text_label_path, 'r', encoding='utf-8'))
labels_id = []
for sentence in labels_text:
sentence_ids = [word_num_dict.get(word, len(word_num_dict)) for word in sentence]
labels_id.append(sentence_ids)
json.dump(labels_id, codecs.open(id_label_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
def load_vocab(vocab_path):
num_word_list = []
with codecs.open(vocab_path, 'r', encoding='utf-8') as f:
for line in f:
num_word_list.append(line.strip('\n'))
word_num_dict = dict(zip(num_word_list, range(len(num_word_list))))
return word_num_dict, num_word_list, len(num_word_list)
def split_data():
def doit(folder_path, wav_file_list_path, label_list_path):
data_path = 'data_thchs30/data/'
wav_file_list, label_list = [], []
for e in os.listdir(folder_path):
if e.endswith('wav'):
f = codecs.open(os.path.join(data_path, e + '.trn'), 'r', 'utf-8')
label_text = f.readline().strip('\n\r\t ').replace(' ', '')
if len(label_text) < 2:
continue
label_list.append(label_text)
wav_file_list.append(os.path.join(data_path, e))
json.dump(wav_file_list, codecs.open(wav_file_list_path, 'w', 'utf-8'), ensure_ascii=False, indent=4)
json.dump(label_list, codecs.open(label_list_path, 'w', 'utf-8'), ensure_ascii=False, indent=4)
merge_train_folder, test_folder = 'data_thchs30/merge_train', 'data_thchs30/test'
doit(merge_train_folder, 'model/wav_mergetrain_files.json', 'model/labels_mergetrain.json')
doit(test_folder, 'model/wav_test.json', 'model/labels_test.json')
def hhh():
my_labels = []
wavs = json.load(codecs.open('model/wav_test.json', 'r', encoding='utf-8'))
for wav in wavs:
name = wav.split('/')[-1]
newname = 'data_thchs30/test/' + name + '.trngen'
f = codecs.open(newname, 'r', encoding='utf-8')
label = f.readline().strip('\n\r\t ')
my_labels.append(label)
json.dump(my_labels, codecs.open('model/my_test_label.json', 'w', 'utf-8'), ensure_ascii=False, indent=4)
def eval(label_path_list_path, gen_path_list_path):
label_list = json.load(codecs.open(label_path_list_path, 'r', 'utf-8'))
gen_list = json.load(codecs.open(gen_path_list_path, 'r', 'utf-8'))
assert len(label_list) == len(gen_list)
total_distance, total_len = 0, 0
for i in range(len(label_list)):
s1 = label_list[i]
s2 = gen_list[i]
total_distance += Levenshtein.distance(s1, s2)
total_len += len(s1)
print("CER:%f" % (total_distance * 1.0 / total_len))
if __name__ == '__main__':
# split_data()
# generation_vocab('model/labels.json', 'model/vocab.txt')
# word_num_dict, num_word_list, vocab_size = load_vocab('model/vocab.txt')
# convert_textlabel_to_idlabel('model/labels_mergetrain.json', 'model/labels_mergetrain_id.json', word_num_dict)
eval('model/labels_test.json','model/my_test_label.json')
eval('model/labels_test.json', 'model/baidu_rs.json')
| UTF-8 | Python | false | false | 3,884 | py | 21 | data_preprocess.py | 16 | 0.623841 | 0.6138 | 0 | 100 | 37.84 | 116 |
zsahi/Web-App-Vulnerability-Scanner | 11,828,339,940,222 | ffcb875747014d6b747adebf7c12f93feef4f929 | 7b1937fea015fd6b0040d0cb07330ee53dea7e05 | /Scanner.py | 7d81b7a790d36ba0b67c243a4838bc5814a1c472 | []
| no_license | https://github.com/zsahi/Web-App-Vulnerability-Scanner | 73418245f4a666be3a90d7ff7ee0232bac1e0eb5 | a1ef55217aa4c3cf66ce2ad5fccc1e66e8c22b5e | refs/heads/master | 2020-04-16T18:29:42.080192 | 2019-02-14T15:21:03 | 2019-02-14T15:21:03 | 165,822,459 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib3
import signal
import sys
from Crawler import Crawler
from core.CommonFunctions import *
from SQLi import *
import multiprocessing
import multiprocessing.pool
import AuthBypass
import WeakPasswords
def runScan(target):
crawler = Crawler()
findings = {}
print("Scanning: ", target)
findings.clear()
findings = {"target":target,"sqlinjection":[], "WeakPassword":[]}
if not crawler.init(target):
return
crawler.crawl()
crawler.findLoginPanel()
AuthBypass.check_authbypass(crawler.loginFormEndpoints, findings)
WeakPasswords.check_weak_passwords(crawler.loginFormEndpoints, findings)
if len(crawler.loginFormEndpoints) > 0:
findings["loginForm"]="yes"
else:
findings["loginForm"] = "no"
sqli_scan_urls(crawler.uEndPoints, findings)
sqli_scan_forms(crawler.fEndpoints, findings)
CommonFunctions.save_findings(findings)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if __name__ == "__main__":
print("Program Started!!!")
signal.signal(signal.SIGINT, signal_handler)
input_file = open("completed.txt", "r")
targets_completed = input_file.readlines()
targets_completed = [x.strip() for x in targets_completed]
input_file.close()
input_file = open("input.txt", "r")
targets = input_file.readlines()
input_file.close()
targets = [x.strip() for x in targets]
#targets = list(set(targets) - set(targets_completed))
#for t in targets:
# runScan(t)
#exit(0)
if len(targets) < Config.number_of_processes:
Config.number_of_processes = len(targets)
index = 0
processes = []
for i in range(Config.number_of_processes):
processes.append(multiprocessing.Process(target=runScan,args=(targets[index],)))
index+=1
for p in processes:
p.start()
more_loop = True
while more_loop:
time.sleep(5)
for i in range(0,Config.number_of_processes) :
if processes[i].is_alive():
processes[i].join(1)
#print("jobs is not finished")
else:
if index >= len(targets):
for p in processes:
p.join()
more_loop = False
break
processes[i] = multiprocessing.Process(target=runScan,args=(targets[index],))
processes[i].start()
index+=1
print("Pool completed execution!!!")
print("Exiting main thread.")
exit(0)
| UTF-8 | Python | false | false | 2,759 | py | 16 | Scanner.py | 9 | 0.595868 | 0.591156 | 0 | 108 | 23.509259 | 93 |
baby-age/testing-bagels | 11,484,742,597,290 | 5d7e492a963353ba5fe025a0b6e9fccd27dc87a0 | 9e8f7c31272eabaafabbfb6e45c6320ebb68b360 | /testing_bagels/feature_calculator.py | 98ff0d4352d7184f2dbc4e7b1d8b651ca8a3698b | []
| no_license | https://github.com/baby-age/testing-bagels | c0c2fd652be91c430726513fa4ce2644484a47bf | c781478dcc968c5777bc7421b4ad7189a57109e4 | refs/heads/master | 2021-05-13T17:02:14.360828 | 2018-04-20T15:24:36 | 2018-04-20T15:24:36 | 116,808,796 | 0 | 1 | null | false | 2018-01-13T17:44:52 | 2018-01-09T11:41:38 | 2018-01-09T11:52:36 | 2018-01-13T17:44:52 | 16 | 0 | 1 | 0 | Python | false | null | import bct
import testing_bagels.matrix_preprocessor as mp
'''
Calculate modularity and efficiency coefficients with Brain Connectivity Toolbox.
Input should be data frame, output is list.
Zip if used together for prediction, like this: X = [list(x) for x in zip(mod, eff)]
'''
def modularity_and_efficiency(data):
mod_scores = []
eff_scores = []
for i, x in enumerate(data):
matrix = mp.preprocess_matrix(x)
mod_score = bct.modularity_und(matrix)[1]
eff_score = bct.efficiency_wei(matrix)
mod_scores.append(mod_score)
eff_scores.append(eff_score)
return mod_scores, eff_scores
'''
Calculate local clustering coefficients with Brain Connectivity Toolbox.
Input should be data frame, output is list.
'''
def local_clustering(data):
cluster_coeffs = []
for i, x in enumerate(data):
matrix = mp.preprocess_matrix(x)
coeffs = bct.clustering_coef_wu(matrix)
cluster_coeffs.append(coeffs.tolist())
return cluster_coeffs
| UTF-8 | Python | false | false | 977 | py | 24 | feature_calculator.py | 21 | 0.714432 | 0.713408 | 0 | 34 | 27.735294 | 84 |
bmoore-10/NewGrounds-Art-Scraper-Discord-bot | 14,199,161,908,524 | 53d3e74b4808ae2f80bcabe8976d4863d2df25f0 | 98b669821fe6c83929e73055844848fe081f79ef | /NG_Scraper/NG_Scraper.py | ab986984da88e16d54641d7ae4352a3b541183b4 | []
| no_license | https://github.com/bmoore-10/NewGrounds-Art-Scraper-Discord-bot | d3e9346670fff67cc24dc78f5770a00e065e0dd5 | f7c9674fb5f3def41143718f3a4a38c1c56e178a | refs/heads/master | 2020-04-26T20:48:16.717591 | 2019-03-06T07:15:35 | 2019-03-06T07:15:35 | 173,822,108 | 3 | 1 | null | false | 2019-03-06T07:15:36 | 2019-03-04T21:11:46 | 2019-03-04T23:04:34 | 2019-03-06T07:15:35 | 10 | 0 | 0 | 0 | Python | false | null | import json
from bs4 import BeautifulSoup
import datetime
import time
import requests
import logging
import re
from slimit import ast
from slimit.parser import Parser
from slimit.visitors import nodevisitor
logging.basicConfig(filename='ng_scraper.log', filemode='w', level=logging.INFO)
# Load in our config information
config_fp = open('config.json', "r", encoding='utf-8')
configData = json.load(config_fp)
config_fp.close()
scraper_data_location = configData["scraper_data_path"]
# Interval to wait between sending reb requests to NewGrounds. Don't set this too low or you'll probably get banned
interval = 0.5
last_req = datetime.datetime.min
# When passed a URL, will open it, attempt to read it 3 times, and return a soup of the read info
# Returns soup on good read, 1 on 500 error (try again later), 2 on 400 error / bad link (throw out the link)
def fetch_soup(in_url):
# Throttling - One request per interval (seconds)
curr_time = datetime.datetime.now()
global last_req
delta_seconds = (curr_time - last_req).seconds
last_req = datetime.datetime.now()
if delta_seconds < interval:
time.sleep(interval - delta_seconds)
# Open a connection and grab the page at the current url
try:
page_raw = requests.get(in_url)
except requests.exceptions.MissingSchema:
logging.error("MissingSchema on page " + in_url + ". Removing link...")
return 2
# If we get a bad link, return the appropriate responses
if int(page_raw.status_code / 100 == 5):
logging.error('Error ' + page_raw.status_code + ' on link ' + in_url + '. Skipping...')
return 1
elif int(page_raw.status_code / 100 == 4):
logging.error('Error ' + page_raw.status_code + ' on link ' + in_url + '. Removing link...')
return 2
# Parse the current page's html into a soup
ret_soup = BeautifulSoup(page_raw.text, "html.parser")
return ret_soup
# Data note: Artists are associated in json with links to their art
# Will enter the scraper_data json file and load the session data for the current session
def retrieve_data():
fp = open(scraper_data_location, "r", encoding="utf-8")
json_data = json.load(fp)
fp.close()
return json_data
# Will enter the scraper_data json file and dump updated the session data from the current session
def store_data(session_data):
fp = open(scraper_data_location, "w", )
json.dump(session_data, fp, ensure_ascii=False, indent=4, sort_keys=True)
fp.close()
# Scrapes the name of an artist from their art page
def fetch_name(page_soup):
artist_header = page_soup.find('span', {'class': 'user-header-name'})
if artist_header is None:
return None
name = artist_header.find('a', {'class': 'user-link'}).text.strip()
return name
# Searches through our dictionary to find the list of links
def find_link_list_in_json(curr, ret=None):
if ret is None:
ret = []
if isinstance(curr, dict):
for k, v in curr.items():
if v:
find_link_list_in_json(v, ret)
if isinstance(curr, list):
ret.append(curr)
return ret
# Recursively searches a nested dictionary for links
def find_links_from_dictionary(curr_dictionary):
ret_list = []
list_of_link_lists = find_link_list_in_json(curr_dictionary)
for link_list in list_of_link_lists:
for elem in link_list:
clean_link = re.search('<a href=\"//(.*)\" ', elem)
ret_list.append(clean_link.group(1))
return ret_list
# Grabs links to artworks on an artist's page
def fetch_art_links(page_soup):
# Get the body script that contains all of the links that we need
body_center = page_soup.find('div', {'class': 'body-center'})
body_script = body_center.find_all('script')[1]
ret_list = []
# Javascript sucks. Let's let slimit parse for us
parser = Parser()
tree = parser.parse(body_script.text)
for node in nodevisitor.visit(tree):
if isinstance(node, ast.Assign) and getattr(node.left, 'value', '') != '':
if node.left.value == '"years"':
links_raw_json = json.loads(node.right.to_ecma())
ret_list = find_links_from_dictionary(links_raw_json)
break
return ret_list
# Checks that the incoming images are of the rating we want
# Comment/uncomment lines based upon current preferences. Removing a category likely requires building list
def check_rating(art_soup):
if art_soup.find('h2', {'class': 'rated-e'}) and configData["rating-e"] == "true":
return True
elif art_soup.find('h2', {'class': 'rated-t'}) and configData["rating-t"] == "true":
return True
elif art_soup.find('h2', {'class': 'rated-m'}) and configData["rating-m"] == "true":
return True
elif art_soup.find('h2', {'class': 'rated-a'}) and configData["rating-a"] == "true":
return True
else:
return False
# Sifts through a fresh list of images and adds new ones to the artist list
def sift_through_image_links(fresh_list, artist_to_links, artist_deep_links):
num_additions = 0
# for link in fresh_list:
for link in fresh_list:
if link in artist_to_links:
continue
else:
artist_to_links.append(link)
art_soup = fetch_soup('https://' + link)
# If our links are broken for some reason, just move on
if art_soup == 1 or art_soup == 2:
continue
# Check that we even want what's on this page according to the current settings
if not check_rating(art_soup):
continue
# Find all relevant image links on the page and add them to the artist's list (no duplicates)
art_pods = art_soup.find_all('div', {'class': 'pod-body'})
for body in art_pods:
# First we find the image in the main zone
image_class = body.find('div', {'class': 'image'})
if image_class is not None:
main_image = image_class.find('img').get('src')
if main_image not in artist_deep_links:
artist_deep_links.append(main_image)
num_additions += 1
# Now we search for any images that may be in the author's comments
author_comments_class = body.find('div', {'id': 'author_comments'})
if author_comments_class is not None:
comment_images = author_comments_class.find_all('img')
for image in comment_images:
aux_image = image.get('data-smartload-src')
if aux_image not in artist_deep_links:
artist_deep_links.append(aux_image)
num_additions += 1
# Return the total number of new additions
return num_additions
# Goes through all of our stored artist URLs, grabs direct links to all of the artwork, and stores them for later
def main():
response = []
# First, retrieve the data for the session
session_data = retrieve_data()
# Now get the list of URLs that we want to scrape from
url_list = session_data["artist_urls"]
# For every artist...
for artist_url in url_list:
# Get a soup of their art page
art_page_soup = fetch_soup(artist_url)
print("Working on: " + artist_url)
if art_page_soup == 1:
response.append("Couldn't add " + artist_url + " this time. Will try again later.")
continue
elif art_page_soup == 2:
url_list.remove(artist_url)
response.append("Couldn't add " + artist_url + ": Invalid URL")
continue
# Grab their name
artist_name = fetch_name(art_page_soup)
if artist_name is None:
response.append("Couldn't add " + artist_url + ": Couldn't find any art links (Invalid URL?)")
url_list.remove(artist_url)
continue
# Grab a link to every work on their art page
art_list = fetch_art_links(art_page_soup)
logging.info("Extracted " + str(len(art_list)) + " links from " + artist_name + "'s page")
# If we couldn't find any art links, drop them
if len(art_list) < 1:
response.append("Couldn't add " + artist_url + ": Couldn't find any art links (Invalid URL?)")
url_list.remove(artist_url)
continue
# If they're not already in the database, add them.
if artist_name not in session_data:
session_data[artist_name] = {
"to_links": [],
"deep_links": []
}
logging.info("Added " + artist_name + " to the database.")
response.append("Added " + artist_name + " to the database.")
# Go through every link we've gotten and save the raw links to images we want into the artist's dictionary
new_additions = sift_through_image_links(art_list, session_data[artist_name]["to_links"], session_data[artist_name]["deep_links"])
logging.info("Added " + str(new_additions) + " new images for " + artist_name)
print("Added " + str(new_additions) + " new images for " + artist_name)
if new_additions > 0:
response.append("Added " + str(new_additions) + " new images for " + artist_name)
# Save our data for next time
store_data(session_data)
# Return the responses if ran by bot
return response
# Run the main
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 9,824 | py | 6 | NG_Scraper.py | 2 | 0.6023 | 0.598127 | 0 | 245 | 38.097959 | 138 |
HeinzHinek/AxiStore | 10,118,942,969,563 | 0e8d5a9335d52c5748ea0065cbf4772c830db8b8 | 3e6d84f4b0e4460326565353de801cfe8c2c493f | /db_repository/versions/067_migration.py | 40f304b4bc7eb24476941317d55247a047a9bb35 | []
| no_license | https://github.com/HeinzHinek/AxiStore | 081d18f8f2beabc70ab887b2496a9d4029942082 | 9acdfe684761929ee660a7b079be96f8a449d27c | refs/heads/master | 2021-01-23T13:22:38.911745 | 2017-06-10T08:27:49 | 2017-06-10T08:27:49 | 24,177,684 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
contact = Table('contact', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('company_name', String(length=100)),
Column('post_code', Integer),
Column('address1', String(length=100)),
Column('address2', String(length=100)),
Column('address3', String(length=100)),
Column('first_name', String(length=50)),
Column('surname', String(length=50)),
Column('phone', String(length=16)),
Column('email', String(length=120)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['contact'].columns['first_name'].create()
post_meta.tables['contact'].columns['surname'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['contact'].columns['first_name'].drop()
post_meta.tables['contact'].columns['surname'].drop()
| UTF-8 | Python | false | false | 1,224 | py | 120 | 067_migration.py | 33 | 0.688725 | 0.669118 | 0 | 36 | 33 | 68 |
sultankhodja/Stock | 19,284,403,190,644 | 3f8af4bbfc79b0f0d8df7d52376f5f4fc9ddf357 | d9a17af8a25a8de7ea7696d90f768aec83df5d3d | /stock_assignment.py | e0e04920b92df62a1dd861f816a992528d227add | []
| no_license | https://github.com/sultankhodja/Stock | 16d0c3353736871065db65d611922fdeb495692f | 6f6a7ff3665f1c7c0fb50821a0f6af5aff156ec5 | refs/heads/master | 2021-05-17T01:50:55.891397 | 2020-03-27T15:06:51 | 2020-03-27T15:06:51 | 250,564,308 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lichcode import LinkedListTail
# Queue just list popping from head pushing from the end!
class Queue:
def __init__(self):
self.myList = []
# Represent the data through specific memory
def __repr__(self):
return repr(self.myList)
# Pushing to the list
def push(self, data):
self.myList.append(data)
# Getting the list
def get(self):
return self.myList
# Popping from first index
def pop(self):
self.myList.pop(0)
# access_pop popping from every index in the list
def access_pop(self, i):
print(self.myList[i])
self.myList.pop(i)
# access function to pick from every index
def access(self, a):
return int(self.myList[a])
# Creating banks to store information
bank_of_stock = Queue() # Main Bank
number = int(input("What is you option? 1)Adding the stocks 2)Look at the bank of the stocks : ")) # Give an option
if number == 1: # number 1 is for adding stocks as blocks
while True: # Looping till pressing 0
amount = int(input('How many stocks do u wanna buy: ')) # amount is how many stocks in EACH block
print("To stop buying press 0") # Giving an instruction to stop pressing 0
print(f'Adding...{amount}') # Printing how many blocks is added
bank_of_stock.push(amount) # Pushing to the head!
if amount == 0: # if user pressing 0 breaks
break # Breaking function
number = int(input("What is you option?:Press 2 to sell the stocks")) # Giving an option for selling the stock and make profit!
if number == 2: # 2 for printing bank
print(bank_of_stock) # Bank
option = int(input("Which stock to sell: ")) # Option which stock to sell
if option == 1: # if option 1 selling first stock in the list
print(f"The stock which u want to sell {bank_of_stock.access(0)}") # Printing this index
bank_of_stock.pop() # Popping from head
bank_of_stock.access_pop(-1) # Popping the last number because it is 0
print(f"Your current bank of the stocks {bank_of_stock}") # Current bank
prev_price_stock = int(input("What was the previous price of this stock $$$?: ")) # TO give price for pre_s
curr_price_stock = int(input("What is current price of this stock EACH $$$?: ")) # To give price for cur_s
print(f"Your profit from this stock is ${curr_price_stock * bank_of_stock.access(0) - prev_price_stock * bank_of_stock.access(0)}")
else:
option -= 1 # Option -1 because user's option will decrease by one to point the list
print(f"The stock which u want to sell {bank_of_stock.access(option)}") # Printing Bank without first
bank_of_stock.access_pop(option) # Popping from user's preference
bank_of_stock.access_pop(-1) # Popping the last because it is 0
print(f"Your current bank of the stocks {bank_of_stock}") # Printing the current bank
prev_price_stock = int(input("What was the previous price of this stock $$$?: ")) # TO give price for pre_s
curr_price_stock = int(input("What is current price of this stock EACH $$$?: ")) # To give price for cur_s
print(f"Your profit from this stock is ${curr_price_stock * bank_of_stock.access(option) - prev_price_stock * bank_of_stock.access(option)}")
else:
print(bank_of_stock) # Printing empty bank because there is nothing in the first time!
| UTF-8 | Python | false | false | 3,828 | py | 1 | stock_assignment.py | 1 | 0.586468 | 0.580199 | 0 | 68 | 54.764706 | 153 |
Sauci/pylink | 15,633,680,973,344 | 147dec7270cdbda987c9bd794dd442477e453cd2 | 8871842ce5334fb04d38b188afb060d219f975bd | /tests/unit/test_util.py | c60d6da0f93fcf6e82dbcded89c510fd64f8de05 | [
"Apache-2.0"
]
| permissive | https://github.com/Sauci/pylink | c453263659b889cbd33d27865e32a1e800d13e02 | bdd68ea8aceebc01b7dd980c8b9a11ad7e7a7c6b | refs/heads/master | 2021-11-05T03:01:03.224310 | 2021-10-26T06:11:12 | 2021-10-26T06:11:12 | 140,023,392 | 0 | 0 | Apache-2.0 | true | 2018-07-06T19:38:06 | 2018-07-06T19:38:05 | 2018-07-05T18:02:17 | 2018-05-30T23:02:14 | 211 | 0 | 0 | 0 | null | false | null | # Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.enums as enums
import pylink.util as util
import mock
try:
import StringIO
except ImportError:
import io as StringIO
import unittest
class TestUtil(unittest.TestCase):
"""Unit test for the `util` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
pass
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
pass
def test_is_integer(self):
"""Tests that the `is_integer()` method returns correctly.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
self.assertTrue(util.is_integer(4))
self.assertTrue(util.is_integer(0))
self.assertTrue(util.is_integer(-1))
self.assertFalse(util.is_integer('4'))
self.assertFalse(util.is_integer('Stranger Things'))
def test_is_natural(self):
"""Tests that the `is_natural()` method returns correctly.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
self.assertTrue(util.is_natural(4))
self.assertTrue(util.is_natural(0))
self.assertFalse(util.is_natural(-1))
self.assertFalse(util.is_natural('4'))
self.assertFalse(util.is_natural('The 100'))
def test_is_os_64bit(self):
"""Tests that the ``is_os_64bit()`` method returns correctly.
It should return ``True`` on 64-bit platforms, otherwise ``False``.
Args:
self (TestUtil): the ``TestUtil`` instance
Returns:
``None``
"""
with mock.patch('platform.machine') as mock_machine:
mock_machine.return_value = 'i386'
self.assertFalse(util.is_os_64bit())
mock_machine.return_value = ''
self.assertFalse(util.is_os_64bit())
mock_machine.return_value = 'i686'
self.assertFalse(util.is_os_64bit())
mock_machine.return_value = 'x86_64'
self.assertTrue(util.is_os_64bit())
def test_noop(self):
"""Tests that the `noop()` method does nothing and takes any args.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
self.assertEqual(None, util.noop())
self.assertEqual(None, util.noop(*range(100)))
self.assertEqual(None, util.noop(arg=4))
def test_unsecure_hook_dialog(self):
"""Tests that the unsecure hook dialog always returns `YES`.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
self.assertEqual(enums.JLinkFlags.DLG_BUTTON_NO,
util.unsecure_hook_dialog('', '', 0))
@mock.patch('sys.stdout', new_callable=StringIO.StringIO)
def test_progress_bar(self, stream):
"""Tests the progress bar calls the appropriate stream functions.
When percent is full, the `progress_bar()` should append a newline to
the stream, otherwise not.
Args:
self (TestUtil): the `TestUtil` instance
stream (StringIO): the mock output stream
Returns:
`None`
"""
self.assertEqual(None, util.progress_bar(0, 100))
self.assertFalse(stream.getvalue() == '\n')
self.assertEqual(None, util.progress_bar(100, 100))
messages = stream.getvalue().split('\n')
self.assertEqual(2, len(messages))
self.assertTrue(messages.pop(0).endswith(' '))
self.assertTrue(messages.pop(0) == '')
@mock.patch('sys.stdout', new_callable=StringIO.StringIO)
def test_flash_progress_callback(self, stream):
"""Tests that the callback triggers a progress bar.
Args:
self (TestUtil): the `TestUtil` instance
stream (StringIO): the mock output stream
Returns:
`None`
"""
self.assertEqual(None, util.flash_progress_callback('compare', '', 0))
self.assertEqual('', stream.getvalue())
self.assertEqual(None, util.flash_progress_callback('Erase', '', 0))
self.assertTrue(len(stream.getvalue()) > 0)
def test_calculate_parity(self):
"""Tests that the parity is properly calculated.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
self.assertEqual(1, util.calculate_parity(1))
self.assertEqual(1, util.calculate_parity(2))
self.assertEqual(0, util.calculate_parity(3))
def test_calculate_parity_invalid(self):
"""Tests that an exception is raised for invalid args to `parity()`.
Args:
self (TestUtil): the `TestUtil` instance
Returns:
`None`
"""
with self.assertRaises(ValueError):
util.calculate_parity('4')
with self.assertRaises(ValueError):
util.calculate_parity(-1)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 5,830 | py | 78 | test_util.py | 27 | 0.59434 | 0.58199 | 0 | 208 | 27.028846 | 78 |
tetrismegistus/minutia | 2,542,620,658,169 | 25ccd0ad8bd28b4f47715d0c94940b06cbd6bfc8 | a47c11905907cb76d5c32382383d9e2b00f24599 | /exercises/guided_tutorials/advent17/16_1.py | 8dfe111ebf006677a170edf20a04196d937869aa | []
| no_license | https://github.com/tetrismegistus/minutia | 9ea7db3c7e9f164c83a8cc3f082000fd894fb55b | 51d0d41740701ef117598ef3e00c99e208ee5ca8 | refs/heads/master | 2022-12-06T15:16:59.968911 | 2020-07-22T21:42:47 | 2020-07-22T21:42:47 | 160,570,462 | 12 | 1 | null | false | 2022-11-22T05:16:44 | 2018-12-05T19:51:17 | 2020-07-22T21:43:01 | 2022-11-22T05:16:44 | 126,546 | 10 | 0 | 2 | Python | false | false | def load_steps(filename):
with open(filename) as f:
file = f.read().strip('\n').split(',')
steps = []
for instruction in file:
step = [instruction[0]] + instruction[1:].split('/')
steps.append(step)
return steps
def spin(string, number):
index = -(number % len(string))
return string[index:] + string[:index]
def index_swap(string, index1, index2):
target_string = list(string)
s1, s2 = string[index1], string[index2]
target_string[index1] = s2
target_string[index2] = s1
return ''.join(target_string)
def partner_swap(string, letter1, letter2):
index1 = string.index(letter1)
index2 = string.index(letter2)
return index_swap(string, index1, index2)
def dance(steps, line):
for step in steps:
if step[0] == 's':
line = spin(line, int(step[1]))
elif step[0] == 'x':
line = index_swap(line, int(step[1]), int(step[2]))
elif step[0] == 'p':
line = partner_swap(line, step[1], step[2])
return line
def main(filename):
steps = load_steps(filename)
line = ''.join([chr(i) for i in range(97, 113)])
line = dance(steps, line)
print(line)
main('input.txt')
| UTF-8 | Python | false | false | 1,284 | py | 149 | 16_1.py | 109 | 0.562305 | 0.536604 | 0 | 50 | 23.52 | 63 |
maksymx/ml_dl_ds_cv_snippets | 5,437,428,629,852 | a214bb2baccd71bcd74a07b8cb4c082e44f30a0e | 4975c9c9e65f1b92b49f5c9344577440eab81821 | /other/basic.py | a7e9b4fe33515a5fb9b34a6a8e3b95c108235382 | [
"MIT"
]
| permissive | https://github.com/maksymx/ml_dl_ds_cv_snippets | 528e2b5f5c21a0371d0317037e6fa23525bde9c7 | ebb4ec5e97f52e55e97f3c8c71cf375da7a30dc5 | refs/heads/master | 2021-09-17T11:40:29.517605 | 2018-07-01T22:26:16 | 2018-07-01T22:26:16 | 114,045,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scipy as sp
x = sp.random.random()
print("Random number: ", x)
x_square_root = sp.sqrt(x)
print("Square root: ", x_square_root)
matrix = sp.random.random(3)
print('Random matrix: ', matrix)
matrix = sp.append(matrix, sp.random.random(2))
print('New matrix: ', matrix)
| UTF-8 | Python | false | false | 279 | py | 9 | basic.py | 8 | 0.684588 | 0.677419 | 0 | 13 | 20.461538 | 47 |
jennndol/ayongampus | 11,484,742,596,501 | e720475ec5a74dd2533d563b086f218f4f4793c8 | dbab648b60358fce5c92f758019500a5bce3f842 | /ayongampus/quiz/essay/views.py | 612052fb142edf3d6652b4a00807643f9451ec32 | []
| no_license | https://github.com/jennndol/ayongampus | 6741ce1a50e5c2f183bccf96f6e5d549645a261d | 76b5841547f3f40e10393e0e0fb7de73ca46b8df | refs/heads/master | 2021-01-21T21:06:35.341896 | 2017-07-27T12:10:43 | 2017-07-27T12:10:43 | 64,501,515 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ayongampus.quiz.essay.models import Essay_Question
from ayongampus.subject.models import Chapter
from ayongampus.subject.views import get_user
from django.core.urlresolvers import reverse_lazy
from django.db import transaction
from django.views.generic import CreateView
from django.views.generic import UpdateView
class EssayQuestionCreateView(CreateView):
model = Essay_Question
fields = ['chapter', 'content', 'explanation']
success_url = reverse_lazy('core_question_list')
def get_chapter_list(self):
return Chapter.objects.filter(subject__author=self.request.user)
def get_form(self, form_class=None):
form = super(EssayQuestionCreateView, self).get_form(form_class)
form.fields['chapter'].queryset = self.get_chapter_list()
return form
def form_valid(self, form):
with transaction.atomic():
self.object = form.save(commit=False)
self.object.author = get_user(self.request)
self.object.save()
return super(EssayQuestionCreateView, self).form_valid(form)
class EssayQuestionUpdateView(UpdateView):
model = Essay_Question
fields = ['chapter', 'content', 'explanation']
success_url = reverse_lazy('core_question_list')
def get_chapter_list(self):
return Chapter.objects.filter(subject__author=self.request.user)
def get_form(self, form_class=None):
form = super(EssayQuestionUpdateView, self).get_form(form_class)
form.fields['chapter'].queryset = self.get_chapter_list()
return form
def form_valid(self, form):
with transaction.atomic():
self.object = form.save(commit=False)
self.object.author = get_user(self.request)
self.object.save()
return super(EssayQuestionUpdateView, self).form_valid(form)
| UTF-8 | Python | false | false | 1,834 | py | 62 | views.py | 26 | 0.695202 | 0.695202 | 0 | 49 | 36.428571 | 72 |
MaxWroe/EnviromentWasteDetection | 6,880,537,636,999 | 0f8fa8fdf8fd1c238df49df2b6f7e226978a9dcf | 51232ac21a5915c9981ddd0c44596da9f55a6af7 | /annotations.py | 626effed6dc168c3c99500b405bfba9d67c58d2e | []
| no_license | https://github.com/MaxWroe/EnviromentWasteDetection | 44701ce7b97a3eb411c910ff1338d262fd59441f | 91cf9526b0a9bfbbdfd1e97f1e8ff48053aa1eec | refs/heads/master | 2023-05-07T09:37:47.108060 | 2021-06-04T05:55:59 | 2021-06-04T05:55:59 | 372,350,851 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Provides new annotations for TACO dataset images, alters classifications from the 60 original classes to be inline
with COMP3330 requirements (Plastic Cups, Plastic Bags, Other Plastic Waste, No Plastic Waste)
This script has been modified from :
https://github.com/wimlds-trojmiasto/detect-waste/blob/7852bce50405b9797e9b2c5b09b4ac033aa52edf/annotations_preprocessing.py
For the purposes of COMP3330 Assignment 2 Part 2
'''
#Run this script to preprocess images from the original imported taco dataset, place them into corresponding folders
import os
import json
from collections import defaultdict, Counter
import funcy
import numpy as np
from iterstrat.ml_stratifiers import MultilabelStratifiedShuffleSplit
NEW_CATEGORY_DIR = 'data/annotations_comp3330.json'
TRAIN_DEST = 'train_annotations.json'
TEST_DEST = 'test_annotations.json'
SOURCE_ANNOTATIONS_DIR = './data/annotations_unofficial.json'
#Save dictionary in coco dataset style
def save_coco(dest, info,
images, annotations, categories):
data_dict = {'info': info,
'images': images,
'annotations': annotations,
'categories': categories}
with open(dest, 'w') as f:
json.dump(data_dict,
f, indent=2, sort_keys=True)
return data_dict
# filter_annotations and save_coco on akarazniewicz/cocosplit
def filter_annotations(annotations, images):
image_ids = funcy.lmap(lambda im: int(im['id']), images)
return funcy.lfilter(lambda ann:
int(ann['image_id']) in image_ids, annotations)
# function based on https://github.com/trent-b/iterative-stratification''', shuffles annotations
#into train or test folders based on a random split
def MultiStratifiedShuffleSplit(images,
annotations,
test_size):
# count categories per image
categories_per_image = defaultdict(Counter)
max_id = 0
for ann in annotations:
categories_per_image[ann['image_id']][ann['category_id']] += 1
if ann['category_id'] > max_id:
max_id = ann['category_id']
# prepare list with count of cateory objects per image
all_categories = []
for cat in categories_per_image.values():
pair = []
for i in range(1, max_id + 1):
pair.append(cat[i])
all_categories.append(pair)
# multilabel-stratified-split
strat_split = MultilabelStratifiedShuffleSplit(n_splits=1,
test_size=test_size,
random_state=2020)
for train_index, test_index in strat_split.split(images,
all_categories):
x = [images[i] for i in train_index]
y = [images[i] for i in test_index]
print('Train:', len(x), 'images, test:', len(y))
return x, y
# Converts TACO labels to COMP3330 classes (Plastic Bags, Plastic bottles, other, no plastic)
def taco_to_comp3330(label):
plastic_bags = [
"degraded_plasticbag", "trash_plastic", "Single-use carrier bag", "Polypropylene bag", "Drink carton","Plastified paper bag"
]
plastic_bottles = [
"bottleTops", "bottleLabel", "fizzyDrinkBottle", "milk_bottle", "degraded_plasticbottle",
"Clear plastic bottle",
"Other plastic bottle", "Plastic bottle cap",
]
other_plastic_waste = [
"Styrofoam piece", "Garbage bag", "Other plastic wrapper", "microplastics", "smoking_plastic",
"plasticAlcoholPackaging",
"alcohol_plastic_cups", "macroplastics", "plastic_cups", "plasticCutlery",
"plastic_cup_tops", "mediumplastics", "plasticFoodPackaging", "metals_and_plastic", "plastic", "Plastic straw",
"Other plastic", "Plastic film",
"Other plastic container", "Plastic glooves", "Plastic utensils", "Tupperware", "Disposable food container",
"Plastic Film", "Six pack rings", "Spread tub",
"Disposable plastic cup", "Other plastic cup", "Plastic lid", "Metal lid"
]
# I think this might just confuse the network, these all look pretty similiar to plastic waste....
no_plastic_waste = [
"rubbish", "Rubbish", "litter", "Unlabeled litter", "trash_etc", "unknown", "Food waste", "trash_wood", "wood",
"bio",
"Corrugated carton", "Egg carton", "Toilet tube", "Other carton", "Normal paper", "Paper bag", "trash_paper",
"paper",
"Aluminium blister pack", "Carded blister pack", "Meal carton", "Pizza box", "Cigarette", "Paper cup",
"Meal carton", "Foam cup",
"Glass cup", "Wrapping paper", "Magazine paper",
"Foam food container", "Rope", "Shoe", "Squeezable tube", "Paper straw", "Rope & strings", "Tissues",
"trash_fabric", "cloth", "non_recyclable", "Battery", "trash_fishing_gear", "other", "Glass bottle",
"Broken glass", "Glass jar",
"Glass", "glass", "beerBottle", "wineBottle", "juice_bottles", "waterBottle", "glass_jar", "ice_tea_bottles",
"spiritBottle",
"glass_jar_lid", "crisp_large", "crisp_small", "aluminium_foil", "ice_tea_can", "energy_can", "beerCan",
"tinCan",
"metal", "trash_rubber", "rubber", "trash_metal", "HDPEM", "PET", "AluCan", "Crisp packet", "Food Can",
"Aluminium foil",
"Metal bottle cap", "Aerosol", "Drink can", "Food can", "Pop tab", "Scrap metal",
]
if (label in no_plastic_waste):
label = "no_plastic_waste"
elif (label in other_plastic_waste):
label = "other_plastic_waste"
elif (label in plastic_bottles):
label = "plastic_bottles"
elif (label in plastic_bags):
label = "plastic_bags"
else:
label = "unknown"
return label
#Now split the dataset into training and test files respectively
def split_coco_dataset(dataset_directory,
test_size=0.2):
with open(dataset_directory, 'r') as f:
dataset = json.loads(f.read())
categories = dataset['categories']
info = dataset['info']
annotations = dataset['annotations']
images = dataset['images']
images_with_annotations = funcy.lmap(lambda ann:
int(ann['image_id']), annotations)
images = funcy.lremove(lambda i: i['id'] not in
images_with_annotations, images)
#If only one category than standard random shuffle
if len(dataset['categories']) == 1:
np.random.shuffle(images)
x = images[int(len(images) * test_size):]
y = images[0:int(len(images) * test_size)]
print('Train:', len(x), 'images, valid:', len(y))
#Otherwise use multi stratified shuffle split
else:
x, y = MultiStratifiedShuffleSplit(images, annotations, test_size)
train = save_coco(TRAIN_DEST, info,
x, filter_annotations(annotations, x), categories)
test = save_coco(TEST_DEST , info, y,
filter_annotations(annotations, y), categories)
print('Finished stratified shuffle split. Results saved in:',
TRAIN_DEST, TEST_DEST)
return train, test
#The main script, correctly categorises dataset
if __name__ == '__main__':
# create directory to store all annotations
if not os.path.exists(SOURCE_ANNOTATIONS_DIR):
os.mkdir(SOURCE_ANNOTATIONS_DIR)
#Move the category ids from the TACO dataset (60 categories) to COMP annotation style (4 Categories)
#Read in annotations.json
with open(SOURCE_ANNOTATIONS_DIR, 'r') as f:
dataset = json.loads(f.read())
categories = dataset['categories']
annotations = dataset['annotations']
info = dataset['info']
#Change categories to comp3330
comp3330_categories = dataset['categories']
for annotation in annotations:
cat_id = annotation['category_id']
cat_taco = categories[cat_id - 1]['name']
comp3330_categories[cat_id - 1]['supercategory'] = taco_to_comp3330(cat_taco)
comp3330_ids = {}
comp3330_category_names = []
category_id = 1
for category in comp3330_categories:
if category['supercategory'] not in comp3330_ids:
comp3330_category_names.append(category['supercategory'])
comp3330_ids[category['supercategory']] = category_id
category_id += 1
#Update the IDs of annotations
taco_to_comp3330_ids = {}
for i, category in enumerate(comp3330_categories):
taco_to_comp3330_ids[category['id']] = comp3330_ids[category['supercategory']]
annotations_temp = annotations.copy()
annotations_comp3330 = annotations
for i, ann in enumerate(annotations):
annotations_comp3330[i]['category_id'] = \
taco_to_comp3330_ids[ann['category_id']]
annotations_comp3330[i].pop('segmentation', None)
for ann in annotations_temp:
cat_id = ann['category_id']
try:
comp3330_categories[cat_id]['category'] = \
comp3330_categories[cat_id]['supercategory']
except:
continue
try:
comp3330_categories[cat_id]['name'] = \
comp3330_categories[cat_id]['supercategory']
except:
continue
annotations = annotations_comp3330
#Now create the updated annotations json file
for cat, items in zip(dataset['categories'], comp3330_ids.items()):
dataset['categories'] = [cat for cat in dataset['categories']
if cat['id'] < len(comp3330_ids)]
category, id = items
cat['name'] = category
cat['supercategory'] = category
cat['category'] = category
cat['id'] = id
with open(NEW_CATEGORY_DIR, 'w') as f:
json.dump(dataset, f)
#Split the data into train test splits, default 80-20 train-test split
# split_coco_dataset(NEW_CATEGORY_DIR, 0.2)
| UTF-8 | Python | false | false | 9,937 | py | 7 | annotations.py | 4 | 0.625943 | 0.608332 | 0 | 248 | 39.068548 | 132 |
KKshitiz/J.A.R.V.I.S | 5,746,666,268,073 | 874d82b76316efd2dd4a313aecbb9fee9f67c5db | 76a56f6974a682ddbcd69b84f0827c4084fbaa41 | /software_AI/natural-language-processing/speech2text.py | 46c091e93fe19086dbd7318ce096d02d3b48531b | [
"MIT"
]
| permissive | https://github.com/KKshitiz/J.A.R.V.I.S | 91610800f1ab5aea3e3c25f02b0c49fb66ed8de3 | 620cffec86991c948839a5f9ceaceb59e0a69e78 | refs/heads/master | 2022-11-30T02:57:04.528347 | 2020-08-12T14:39:02 | 2020-08-12T14:39:02 | 251,086,624 | 18 | 18 | null | null | null | null | null | null | null | null | null | null | null | null | null | import speech_recognition as sr
from playsounds import *
#this module converts speech2text
def startStt(lang='en-in'):
r=sr.Recognizer()
print("Listening")
# startAudio()
startPlayAudio('jarvislistening.wav')
with sr.Microphone() as source:
print("Say something")
# r.pause_threshold=1 #Represents the minimum length of silence (in seconds) that will register as the end of a phrase
# r.energy_threshold=300
# r.adjust_for_ambient_noise(source,duration=0.6)
audio=r.listen(source)
speech=""
try:
speech=r.recognize_google(audio,language=lang)
print("Speech:",speech)
except sr.UnknownValueError:
speech="Sorry! Couldn't understand"
print("Sorry! Couldn't understand")
except sr.RequestError:
print("Could not process request")
speech="Could not process request"
return speech.lower()
if __name__ == "__main__":
startStt() | UTF-8 | Python | false | false | 964 | py | 34 | speech2text.py | 25 | 0.651452 | 0.644191 | 0 | 33 | 28.242424 | 131 |
MUDDASICK/CodeSamples | 15,779,709,875,200 | cdaea3b266e9bf5ae1b4778e4158d0cb457912a4 | 4fc357805e30cd634c753073f8b9e4750ba6eac7 | /Juniper-Automation/Nornir4-filtering/runbook2.py | 72c98a0009eec9abf8a4d166130043319e39767e | []
| no_license | https://github.com/MUDDASICK/CodeSamples | e970634e5b50e93e39261d9af335652da37f60c4 | 1af073e3ad61cf9efe0e26f7097a8414d4bf4739 | refs/heads/main | 2023-08-17T07:34:33.203566 | 2023-06-17T15:42:45 | 2023-06-17T15:42:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import logging
from nornir import InitNornir
from nornir_netmiko.tasks import netmiko_send_config
from nornir_utils.plugins.functions import print_result
logger = logging.getLogger(__name__)
nr = InitNornir(config_file="config.yaml")
def push_snmp(task, filename):
if not os.path.isfile(filename):
logger.error(f"{filename} cannot be found")
task.run(task=netmiko_send_config, config_file=filename)
north_devices = nr.filter(latitude="north")
results = north_devices.run(task=push_snmp, filename="northconfig.cfg")
print_result(results)
| UTF-8 | Python | false | false | 569 | py | 531 | runbook2.py | 266 | 0.759227 | 0.759227 | 0 | 20 | 27.45 | 71 |
jmchilton/galaxy-central | 18,262,200,962,257 | ea24503e17bc91b98f5d60c636839c8abd1bfef6 | 1f579b0ee9ed450fd84da56b9f3e8328ef372a81 | /tools/encode/random_intervals_no_bits.py | 875831cc796cd35e708227b99fdcbe8097a86691 | [
"MIT"
]
| permissive | https://github.com/jmchilton/galaxy-central | 42bfc91532bd6444fbe4dfc239a78b24437e4518 | 31e2fd3a32b06ddfba06ae5b044efdce1d93f08c | refs/heads/master | 2021-01-01T05:37:10.582680 | 2006-11-15T16:28:21 | 2006-11-15T16:28:21 | 6,923,705 | 4 | 4 | null | false | 2015-02-12T19:30:55 | 2012-11-29T15:40:37 | 2015-02-12T19:30:17 | 2015-02-12T19:30:17 | 321,333 | 10 | 10 | 0 | Python | null | null | #!/usr/bin/env python2.4
#%prog bounding_region_file mask_intervals_file intervals_to_mimic_file out_file mask_chr mask_start mask_end interval_chr interval_start interval_end interval_strand use_mask allow_strand_overlaps
import sys, random
max_iters = 100000
#Try to add a random region
def add_random_region(region, b_chr, b_start, b_end, exist_regions, mask, overlaps):
rand_region = region.copy()
for iter in range(max_iters):
rand_region['start'] = random.randint(b_start, b_end - rand_region['length'])
rand_region['end'] = rand_region['start'] + rand_region['length']
if overlaps == "all":
exist_regions.append(rand_region)
return exist_regions, True
found_overlap = False
for region in exist_regions:
if (rand_region['start'] >= region['start'] and rand_region['start'] <= region['end']) or (rand_region['end'] >= region['start'] and rand_region['end'] <= region['end']):
if overlaps=="none" or rand_region['strand'] == region['strand']:
found_overlap = True
if not found_overlap:
for region in mask:
if region['chr'] != rand_region['chr']:
continue
if (rand_region['start'] >= region['start'] and rand_region['start'] <= region['end']) or (rand_region['end'] >= region['start'] and rand_region['end'] <= region['end']):
found_overlap = True
if not found_overlap:
exist_regions.append(rand_region)
return exist_regions, True
return exist_regions, False
def main():
region_uid = sys.argv[1]
mask_fname = sys.argv[2]
intervals_fname = sys.argv[3]
out_fname = sys.argv[4]
mask_chr = int(sys.argv[5])-1
mask_start = int(sys.argv[6])-1
mask_end = int(sys.argv[7])-1
interval_chr = int(sys.argv[8])-1
interval_start = int(sys.argv[9])-1
interval_end = int(sys.argv[10])-1
interval_strand = int(sys.argv[11])-1
use_mask = sys.argv[12]
overlaps = sys.argv[13]
available_regions = {}
loc_file = "/cache/regions/regions.loc"
try:
for line in open( loc_file ):
if line[0:1] == "#" : continue
fields = line.split('\t')
#read each line, if not enough fields, go to next line
try:
build = fields[0]
uid = fields[1]
description = fields[2]
filepath =fields[3].replace("\n","").replace("\r","")
available_regions[uid]=filepath
except:
continue
except Exception, exc:
print >>sys.stdout, 'random_intervals.py initialization error -> %s' % exc
if region_uid not in available_regions:
print >>stderr, "Invalid region selected"
sys.exit(0)
region_fname = available_regions[region_uid]
bounds = []
for line in open(region_fname):
try:
if line[0:1] == "#":
continue
fields = line.split("\t")
b_dict = {}
b_dict['chr'] = fields[0]
b_dict['start'] = int(fields[1])
b_dict['end'] = int(fields[2].replace("\n","").replace("\r",""))
bounds.append(b_dict)
except:
continue
regions = []
for i in range(len(bounds)):
regions.append([])
for line in open(intervals_fname):
try:
if line[0:1] == "#":
continue
fields = line.split("\t")
r_dict = {}
r_dict['chr'] = fields[interval_chr].replace("\n","").replace("\r","")
r_dict['start'] = int(fields[interval_start].replace("\n","").replace("\r",""))
r_dict['end'] = int(fields[interval_end].replace("\n","").replace("\r",""))
if interval_strand < 0:
r_dict['strand'] = "+"
else:
try:
r_dict['strand'] = fields[interval_strand].replace("\n","").replace("\r","")
except:
r_dict['strand'] = "+"
r_dict['length'] = r_dict['end'] - r_dict['start']
#loop through bounds, find first proper bounds then add in parrallel to regions
#if an interval crosses bounds, it will be added to the first bound
for i in range(len(bounds)):
b_chr = bounds[i]['chr']
if b_chr != r_dict["chr"]:
continue
b_start = bounds[i]['start']
b_end = bounds[i]['end']
if (r_dict['start'] >= b_start and r_dict['start'] <= b_end) or (r_dict['end'] >= b_start and r_dict['end'] <= b_end):
regions[i].append(r_dict)
break
except:
continue
mask = []
if use_mask != "no_mask":
for line in open(mask_fname):
try:
if line[0:1] == "#":
continue
fields = line.split("\t")
m_dict = {}
m_dict['chr'] = fields[mask_chr].replace("\n","").replace("\r","")
m_dict['start'] = int(fields[mask_start].replace("\n","").replace("\r",""))
m_dict['end'] = int(fields[mask_end].replace("\n","").replace("\r",""))
mask.append(m_dict)
except:
continue
out_file = open (out_fname, "w") or die ("Can not open output file")
i = 0
i_iters = 0
region_count = 1
while i < (len(bounds)):
i_iters += 1
random_regions = []
added = True
for j in range(len(regions[i])):
random_regions, added = add_random_region(regions[i][j], bounds[i]['chr'], bounds[i]['start'], bounds[i]['end'], random_regions, mask, overlaps)
if added == False:
if i_iters < max_iters:
i-=1
break
else:
added = True
i_iters = 0
print "After",str(max_iters),"x",str(max_iters),"iterations, a region could not be added."
if use_mask == "use_mask":
print "The mask you have provided may be too restrictive."
if added == True:
i_iters = 0
for region in random_regions:
print >>out_file, "%s\t%d\t%d\t%s\t%s\t%s" % ( region['chr'], region['start'], region['end'], "region_"+str(region_count), "0", region['strand'] )
region_count +=1
i+=1
if __name__ == "__main__": main()
| UTF-8 | Python | false | false | 6,767 | py | 379 | random_intervals_no_bits.py | 154 | 0.492242 | 0.483523 | 0 | 180 | 35.594444 | 197 |
ErykMalinowski/JC | 10,368,051,060,812 | 0e23614340b23e9b619f651d6b1317d595e26816 | 77144c478973a82017135454a79cbcdd7644c5e9 | /website/myapp/migrations/0002_auto_20200312_2119.py | 07a0f8d78865ec856ca5cc8f8c6f548d80a001a1 | []
| no_license | https://github.com/ErykMalinowski/JC | b35ee52e0b4e7de2aab3de602c8f801cbdeef14b | bdd7149f0f7bd408e50ab7d2e3623a5b546298c7 | refs/heads/master | 2022-04-28T10:35:56.919632 | 2020-07-20T12:01:17 | 2020-07-20T12:01:17 | 236,815,872 | 0 | 0 | null | false | 2022-04-22T23:10:23 | 2020-01-28T19:02:50 | 2020-07-20T12:01:27 | 2022-04-22T23:10:21 | 44,116 | 0 | 0 | 2 | HTML | false | false | # Generated by Django 2.2.10 on 2020-03-12 21:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('personal_details', models.CharField(max_length=40)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_year', models.PositiveSmallIntegerField()),
('end_year', models.PositiveSmallIntegerField()),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('img', models.FileField(upload_to='')),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='TeamSeason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('matches', models.PositiveSmallIntegerField()),
('wins', models.PositiveSmallIntegerField()),
('draws', models.PositiveSmallIntegerField()),
('losses', models.PositiveSmallIntegerField()),
('goals_for', models.PositiveSmallIntegerField()),
('goals_against', models.PositiveSmallIntegerField()),
('points', models.SmallIntegerField()),
('season', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Season')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Team')),
],
),
migrations.CreateModel(
name='Round',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveSmallIntegerField()),
('active', models.BooleanField(default=False)),
('season', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Season')),
],
),
migrations.CreateModel(
name='PlayerSeason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goals', models.PositiveSmallIntegerField(null=True)),
('assists', models.PositiveSmallIntegerField(null=True)),
('yellow_cards', models.PositiveSmallIntegerField(null=True)),
('red_cards', models.PositiveSmallIntegerField(null=True)),
('conceded', models.PositiveSmallIntegerField(null=True)),
('clean_sheets', models.PositiveSmallIntegerField(null=True)),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Player')),
],
),
migrations.AddField(
model_name='player',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Team'),
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score_home', models.PositiveSmallIntegerField(blank=True, null=True)),
('score_away', models.PositiveSmallIntegerField(blank=True, null=True)),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.Round')),
('team_away', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team_away', to='myapp.Team')),
('team_home', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team_home', to='myapp.Team')),
],
),
]
| UTF-8 | Python | false | false | 4,548 | py | 30 | 0002_auto_20200312_2119.py | 10 | 0.57124 | 0.565963 | 0 | 93 | 47.903226 | 137 |
wogus3602/study | 15,461,882,295,997 | 246821491965de6f58761f967ee7a65427d53bb1 | 2e2eb7e00899bd75ce9a6f6bf81267dde31e0e69 | /윤석주/day9/알파벳거리.py | ff99d0a10ebc9129d8ae4d4c6e03979d1535196a | []
| no_license | https://github.com/wogus3602/study | 3f8e47bbcf7a45aa1ba0781f50140a6f9be89764 | cba81bec9a7044567627d197ae9c8620d4fb8729 | refs/heads/master | 2022-11-18T23:32:50.632074 | 2020-07-10T04:02:42 | 2020-07-10T04:02:42 | 273,912,878 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
for case in range(n):
word1, word2 = input().split(' ')
print("Distances:",end="")
for i in range(len(word1)):
diff = ord(word2[i]) - ord(word1[i])
if diff < 0:
diff += 26
print("",diff,end="")
print() | UTF-8 | Python | false | false | 291 | py | 159 | 알파벳거리.py | 159 | 0.453608 | 0.426117 | 0 | 15 | 18.466667 | 44 |
VN-Pikachu/Machine-Learning-from-scratch | 8,564,164,820,574 | 4c3455e80ce43d01d4ee96970a1e6e8fe24bdf03 | db6837c33e32d396fdd60279f319527776eddeb5 | /LDA/LDA.py | 843a12124bf8d8848364d8165953a92361ef10d4 | []
| no_license | https://github.com/VN-Pikachu/Machine-Learning-from-scratch | e887817e31024a5c6e9018bb845632abd1992db4 | a1f6e2993d3d44bec1cd9b1300e598621638f56e | refs/heads/main | 2023-03-20T11:34:26.868615 | 2021-03-03T10:07:18 | 2021-03-03T10:07:18 | 344,062,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
class LDA:
def __init__(self, n_components = None):
self.n_components_ = n_components
def fit(self, X, y):
#n samples, m features
n, m = X.shape
#Between-Class Variance Matrix
S_B = np.zeros((m,m))
#Within-Class Variance Matrix
S_W = np.zeros((m,m))
#Global mean over all samples
M = np.mean(X, axis = 0)
#Go though each class
for group in np.unique(y):
#Get samples from the current class
x = X[y == group]
#Local mean of the current group
Mi = np.mean(x, axis = 0)
#Centering-data
x -= Mi
#The number of samples of the current group
Ni = len(x)
#Update Between-Class Variance Matrix and Within-Class Variance Matrix
S_B += Ni * np.outer(Mi - M, Mi - M)
S_W += x.T @ x #Covariance Matrix: np.cov(x.T) : normalized
#Calculate Eigenvectors, Eigenvalues of: inv(S_W) @ S_b
#Order by eigenvalues descending
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W) @ S_B)
keys = sorted(range(len(eig_vals)), key = lambda x: -abs(eig_vals[x]))
eig_vecs = eig_vecs[:, keys]
eig_vals = eig_vals[keys]
if self.n_components_ == None:
self.n_components_ = m
self.components_ = eig_vecs[:, :self.n_components_].T
self.explained_variance_ = eig_vals[:self.n_components_]
self.explained_variance_ratio_ = [eig_vals / np.sum(eig_vals)][:self.n_components_]
def transform(self, X):
return X @ self.components_.T
| UTF-8 | Python | false | false | 1,725 | py | 118 | LDA.py | 85 | 0.545507 | 0.544348 | 0 | 44 | 37.204545 | 91 |
pkgpl/gpl | 10,024,453,679,448 | 6db842b9c26f3539f9f29d7326f895ce0955884e | 5f02dc4398423561bb8560ab35325d3dedfc36c7 | /script/abspath.py | e89605fde0292c93a0a82daa8bc7934f589c7521 | []
| no_license | https://github.com/pkgpl/gpl | 0f081c8671cb450c02415b5a39350d085a4cec57 | a7d3861120a6af9269c82e34a3b96c5dfb61e70a | refs/heads/master | 2021-07-14T14:20:16.287372 | 2021-06-24T01:53:01 | 2021-06-24T01:53:01 | 22,287,752 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#
# Program : print abs path for scp
# Date :
import sys,commands,os.path
if len(sys.argv) < 2:
print 'input file names'
sys.exit(1)
cmd=commands.getstatusoutput
user=cmd('echo $USER')[1]
host=cmd('echo $HOSTNAME')[1]
if host=='master02': host='gplhpc'
if host=='laplace001': host='laplace'
if host=='node01': host='gop604'
if host=='cudamaster01': host='newcuda'
if host=='cuda01': host='cuda'
list=sys.argv[1:]
for item in list:
path=os.path.abspath(item)
print "%s@%s:%s"%(user,host,path)
| UTF-8 | Python | false | false | 525 | py | 99 | abspath.py | 42 | 0.674286 | 0.638095 | 0 | 24 | 20.875 | 41 |
soarlab/AAQNN | 13,245,679,155,673 | ca47ea3014a1b0eaad2c02277b6f785e92b98bc7 | 35844c887d6da13d5b72e297183991aa0cea1b52 | /experiments/__init__.py | 8d1eb5964c8358e795e794f286f5d745adcfd230 | []
| no_license | https://github.com/soarlab/AAQNN | 5c5f87cf594ddb6f6c800907fa11d452bc88b4dc | ea6627ad9f0d55196d0dde90d7dbe5472be99d66 | refs/heads/master | 2022-01-21T08:11:44.616642 | 2019-06-24T08:42:45 | 2019-06-24T08:42:45 | 178,188,010 | 0 | 0 | null | false | 2022-01-13T01:08:45 | 2019-03-28T11:17:56 | 2019-06-24T08:43:03 | 2022-01-13T01:08:42 | 178,500 | 0 | 0 | 8 | Python | false | false | '''
In this directory codes several experiments are implemented.
Every experiment has description of it in the beginning of the file.
''' | UTF-8 | Python | false | false | 137 | py | 34 | __init__.py | 31 | 0.788321 | 0.788321 | 0 | 4 | 33.5 | 68 |
Yossarian92/Project_UCSD | 3,384,434,266,718 | 49f46a73bdd34b9fc6ccf13390713631b93b1b6b | e2e939d6226fda52f7768c2b764979e1e194efc7 | /mux.py | 77bd2537635e4d120e1ca2b0319f80a91ed4c59b | []
| no_license | https://github.com/Yossarian92/Project_UCSD | 96f279251650337eeb37f8ad5521b096ef4569b2 | 5188190f5fa1946f8110540aab5dd0ca957a76ec | refs/heads/master | 2022-07-17T08:28:53.429061 | 2019-09-02T00:03:22 | 2019-09-02T00:03:22 | 198,270,462 | 0 | 1 | null | false | 2020-05-13T13:19:19 | 2019-07-22T17:19:59 | 2019-10-23T14:21:20 | 2019-09-02T00:03:22 | 21,193 | 0 | 1 | 1 | Python | false | false | import time
def map_select_bit_to_gpio_pin(bit):
if bit == 0:
return 24
if bit == 1:
return 25
if bit == 2:
return 26
if bit == 3:
return 27
def s_mask(bit):
if bit == 0:
return 1
if bit == 1:
return 2
if bit == 2:
return 4
if bit == 3:
return 8
def s_ch(bit):
if bit == 2:
return 1
if bit == 4:
return 2
if bit == 8:
return 3
if bit == 16:
return 4
if bit == 32:
return 5
def write_bit_to_gpio_pin(pin, value):
# here is the code to write "value" to pin #
if value == 0:
filename = "/gpio/pin" + str(pin) + "/value"
file = open(filename, 'w')
file.write("0")
file.close()
if value == 1:
filename = "/gpio/pin" + str(pin) + "/value"
file = open(filename, 'w')
file.write("1")
file.close()
def mux_select_and_set(channel, num, en=True):
s = [0 for i in range(s_ch(num))]
for i in range(s_ch(num)):
s[i] = (channel & s_mask(i)) >> i
write_bit_to_gpio_pin(map_select_bit_to_gpio_pin(i), s[i])
class mux:
def __init__(self, number_of_channels, en_pin, signal_pin, select_pins, active_low=True):
self.en_pin = en_pin
self.select_pins = select_pins
self.number_of_channels = number_of_channels
self.signal_pin = signal_pin
def enable(self):
write_bit_to_gpio_pin(self.en_pin, 0) # mux enable is active low
def disable(self):
write_bit_to_gpio_pin(self.en_pin, 1)
def select_channel(self, ch):
mux_select_and_set(ch, self.number_of_channels)
return 0
def read_signal(self):
# ADC reading code
sig_dict = {0:"0/in_voltage0", 1:"0/in_voltage1", 2:"0/in_voltage2", 3:"0/in_voltage3", 4:"1/in_voltage0", 5:"1/in_voltage1"}
raw = int(open("/sys/bus/iio/devices/iio:device"+ sig_dict[self.signal_pin] + "_raw").read())
scale = float(open("/sys/bus/iio/devices/iio:device0/in_voltage_scale").read())
return raw*scale
def oversampling(self, num):
val = 0
for i in range(num):
val += self.read_signal()
val = val / num
return val
def SO2(WE, AE, T):
SO2_dict = {-3:0.85, -2:0.85, -1:0.85, 0:0.85, 1:0.85, 2:1.15, 3:1.45, 4:1.75, 5:1.95}
num = T//10
if num in SO2_dict:
t = SO2_dict.get(num)
ppb = ((WE-280) - t*(AE-306)) / 0.296
return ppb
def O3(WE, AE, T):
O3_dict = {-3:0.18, -2:0.18, -1:0.18, 0:0.18, 1:0.18, 2:0.18, 3:0.18, 4:0.18, 5:2.87}
num = T//10
if num in O3_dict:
t = O3_dict.get(num)
ppb = ((WE-418) - t*(AE-404)) / 393
return ppb
def CO(WE, AE, T):
CO_dict = {-3:1.40, -2:1.03, -1:0.85, 0:0.62, 1:0.30, 2:0.03, 3:-0.25, 4:-0.48, 5:-0.80}
num = T//10
if num in CO_dict:
t = CO_dict.get(num)
ppb = ((WE-311) - t*(AE-276)) / 292
return ppb
def NO2(WE, AE, T):
NO2_dict = {-3:1.18, -2:1.18, -1:1.18, 0:1.18, 1:1.18, 2:1.18, 3:1.18, 4:2.00, 5:2.70}
num = T//10
if num in NO2_dict:
t = NO2_dict.get(num)
ppb = ((WE-215) - t*(AE-246)) / 0.212
return ppb
def temperature(V):
T = (V - 830) / 10 + 25
return T
def PM25(V):
hppcf = 50 + (0.2433*V) + (0.1386*V**2)
pm = 0.518 + 0.00274*hppcf
return pm | UTF-8 | Python | false | false | 3,197 | py | 10 | mux.py | 9 | 0.545824 | 0.453863 | 0 | 141 | 20.687943 | 127 |
philna/Python-Space-Projects | 5,549,097,769,212 | 75d5c84cae9ccd57007f9453aac601ed9aeb4484 | 44efcd773173c1a341974eb9208f740d92ed1635 | /Download asterank data.py | 6e2a0cbcdd65e5b7ce4ff83a816fa55cd02a9798 | []
| no_license | https://github.com/philna/Python-Space-Projects | 7d76efa7daa2da967dfb51d712ed50bfe997bc41 | af95f8586a8691a9c371805d87fc289dad7f9abc | refs/heads/master | 2021-01-23T19:17:28.211782 | 2017-09-08T05:37:19 | 2017-09-08T05:37:19 | 102,817,142 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """""
This script connects to the Asterank API which provides access to the NASA Small Body Database
along with some modifications to estimate monetary value and other financial/feasiblity metrics
August 11, 2017
"""
import requests
import json
#Request results from Asterank API - query for all results with limit value set to 100
response = requests.get("http://www.asterank.com/api/asterank?query={}&limit=1000")
#Decode response as json list (not a dictionary for this API)
data = response.json()
# OPTIONAL - Add exception handling here in case response.json fails - look for ValueError: No JSON object could be decode
#This code enumerates data column names such as e - eccentricity, name - asteroid name
for i, item in enumerate(data[3],0):
print(i, item)
#Iterate through Aterank JSON and extract data - temporary - use map and joing to format and then export as .csv file
for i in range(len(data)):
print(
data[i]['e'],
data[i]['a'],
data[i]['name'],
data[i]['producer'],
data[i]['full_name'],
data[i]['class'],
data[i]['producer'],
data[i]['GM'],
data[i]['spec'],
data[i]['closeness'],
data[i]['price'],
data[i]['score'],
data[i]['neo']
)
"""
print(data[0]['e'])
print(data[0]['a'])
# Get the response data as a python object.
data = response.json()
print(response)
print(type(data))
print(data[5]["e"])
for i, v in data.items():
print(i,v)
for i, item in enumerate(data[3],0):
print(i, item)
for i in data:
print(i)
data = json.load(response,
cls=None,
object_hook=None,
parse_float=None,
parse_int=None,
parse_constant=None,
object_pairs_hook=None, **kw)
)
print(type(data))
"""
| UTF-8 | Python | false | false | 1,883 | py | 2 | Download asterank data.py | 1 | 0.603293 | 0.592671 | 0 | 89 | 20.101124 | 123 |
yusuk6185/algorithm_practice | 4,294,967,309,116 | c1d552d78e0cf75547236145d1087de480d01e28 | 5eee9e8edb71dc16e964ab158ce252e798cd513b | /Challenge/예산.py | 286605567e5529d35ed13939f2373498596d145e | []
| no_license | https://github.com/yusuk6185/algorithm_practice | b209328f3d2ca633046f3530267083e20ce5108d | 36ddb7e319cb4714bd53a95786de612a80d0ccb5 | refs/heads/master | 2023-08-17T18:44:40.674355 | 2021-10-12T12:27:06 | 2021-10-12T12:27:06 | 393,629,077 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://programmers.co.kr/learn/courses/30/lessons/12982
def solution(d, budget):
d.sort()
answer = []
for i in range(len(d)):
answer.append(d[i])
if sum(answer) > budget:
answer.pop()
break
elif sum(answer) == budget:
break
return len(answer)
print(solution([1,3,5,2,4], 9))
# another solution
def solution(d, budget):
d.sort()
while budget < sum(d):
d.pop()
return len(d)
| UTF-8 | Python | false | false | 487 | py | 104 | 예산.py | 103 | 0.533881 | 0.507187 | 0 | 26 | 17.730769 | 58 |
diasurgical/scalpel | 8,186,207,690,051 | 392e35b2c7edbac023fad30915f87b3c415a4cb1 | dfe1f796a54143e5eb8661f3328ad29dbfa072d6 | /psx/_dump_/34/_dump_ida_/overlay_b/set_vars.py | ea143f7c9b6fabbb98c90e2c0cc672f814dac9ac | [
"Unlicense"
]
| permissive | https://github.com/diasurgical/scalpel | 0f73ad9be0750ce08eb747edc27aeff7931800cd | 8c631dff3236a70e6952b1f564d0dca8d2f4730f | refs/heads/master | 2021-06-10T18:07:03.533074 | 2020-04-16T04:08:35 | 2020-04-16T04:08:35 | 138,939,330 | 15 | 7 | Unlicense | false | 2019-08-27T08:45:36 | 2018-06-27T22:30:04 | 2019-08-26T13:07:27 | 2019-08-27T06:45:36 | 11,265 | 12 | 8 | 2 | C | false | false | del_items(0x801392A8)
SetType(0x801392A8, "struct THEME_LOC themeLoc[50]")
del_items(0x801399F0)
SetType(0x801399F0, "int OldBlock[4]")
del_items(0x80139A00)
SetType(0x80139A00, "unsigned char L5dungeon[80][80]")
del_items(0x80139690)
SetType(0x80139690, "struct ShadowStruct SPATS[37]")
del_items(0x80139794)
SetType(0x80139794, "unsigned char BSTYPES[206]")
del_items(0x80139864)
SetType(0x80139864, "unsigned char L5BTYPES[206]")
del_items(0x80139934)
SetType(0x80139934, "unsigned char STAIRSUP[34]")
del_items(0x80139958)
SetType(0x80139958, "unsigned char L5STAIRSUP[34]")
del_items(0x8013997C)
SetType(0x8013997C, "unsigned char STAIRSDOWN[26]")
del_items(0x80139998)
SetType(0x80139998, "unsigned char LAMPS[10]")
del_items(0x801399A4)
SetType(0x801399A4, "unsigned char PWATERIN[74]")
del_items(0x80139298)
SetType(0x80139298, "unsigned char L5ConvTbl[16]")
del_items(0x80141C5C)
SetType(0x80141C5C, "struct ROOMNODE RoomList[81]")
del_items(0x801422B0)
SetType(0x801422B0, "unsigned char predungeon[40][40]")
del_items(0x801403B8)
SetType(0x801403B8, "int Dir_Xadd[5]")
del_items(0x801403CC)
SetType(0x801403CC, "int Dir_Yadd[5]")
del_items(0x801403E0)
SetType(0x801403E0, "struct ShadowStruct SPATSL2[2]")
del_items(0x801403F0)
SetType(0x801403F0, "unsigned char BTYPESL2[161]")
del_items(0x80140494)
SetType(0x80140494, "unsigned char BSTYPESL2[161]")
del_items(0x80140538)
SetType(0x80140538, "unsigned char VARCH1[18]")
del_items(0x8014054C)
SetType(0x8014054C, "unsigned char VARCH2[18]")
del_items(0x80140560)
SetType(0x80140560, "unsigned char VARCH3[18]")
del_items(0x80140574)
SetType(0x80140574, "unsigned char VARCH4[18]")
del_items(0x80140588)
SetType(0x80140588, "unsigned char VARCH5[18]")
del_items(0x8014059C)
SetType(0x8014059C, "unsigned char VARCH6[18]")
del_items(0x801405B0)
SetType(0x801405B0, "unsigned char VARCH7[18]")
del_items(0x801405C4)
SetType(0x801405C4, "unsigned char VARCH8[18]")
del_items(0x801405D8)
SetType(0x801405D8, "unsigned char VARCH9[18]")
del_items(0x801405EC)
SetType(0x801405EC, "unsigned char VARCH10[18]")
del_items(0x80140600)
SetType(0x80140600, "unsigned char VARCH11[18]")
del_items(0x80140614)
SetType(0x80140614, "unsigned char VARCH12[18]")
del_items(0x80140628)
SetType(0x80140628, "unsigned char VARCH13[18]")
del_items(0x8014063C)
SetType(0x8014063C, "unsigned char VARCH14[18]")
del_items(0x80140650)
SetType(0x80140650, "unsigned char VARCH15[18]")
del_items(0x80140664)
SetType(0x80140664, "unsigned char VARCH16[18]")
del_items(0x80140678)
SetType(0x80140678, "unsigned char VARCH17[14]")
del_items(0x80140688)
SetType(0x80140688, "unsigned char VARCH18[14]")
del_items(0x80140698)
SetType(0x80140698, "unsigned char VARCH19[14]")
del_items(0x801406A8)
SetType(0x801406A8, "unsigned char VARCH20[14]")
del_items(0x801406B8)
SetType(0x801406B8, "unsigned char VARCH21[14]")
del_items(0x801406C8)
SetType(0x801406C8, "unsigned char VARCH22[14]")
del_items(0x801406D8)
SetType(0x801406D8, "unsigned char VARCH23[14]")
del_items(0x801406E8)
SetType(0x801406E8, "unsigned char VARCH24[14]")
del_items(0x801406F8)
SetType(0x801406F8, "unsigned char VARCH25[18]")
del_items(0x8014070C)
SetType(0x8014070C, "unsigned char VARCH26[18]")
del_items(0x80140720)
SetType(0x80140720, "unsigned char VARCH27[18]")
del_items(0x80140734)
SetType(0x80140734, "unsigned char VARCH28[18]")
del_items(0x80140748)
SetType(0x80140748, "unsigned char VARCH29[18]")
del_items(0x8014075C)
SetType(0x8014075C, "unsigned char VARCH30[18]")
del_items(0x80140770)
SetType(0x80140770, "unsigned char VARCH31[18]")
del_items(0x80140784)
SetType(0x80140784, "unsigned char VARCH32[18]")
del_items(0x80140798)
SetType(0x80140798, "unsigned char VARCH33[18]")
del_items(0x801407AC)
SetType(0x801407AC, "unsigned char VARCH34[18]")
del_items(0x801407C0)
SetType(0x801407C0, "unsigned char VARCH35[18]")
del_items(0x801407D4)
SetType(0x801407D4, "unsigned char VARCH36[18]")
del_items(0x801407E8)
SetType(0x801407E8, "unsigned char VARCH37[18]")
del_items(0x801407FC)
SetType(0x801407FC, "unsigned char VARCH38[18]")
del_items(0x80140810)
SetType(0x80140810, "unsigned char VARCH39[18]")
del_items(0x80140824)
SetType(0x80140824, "unsigned char VARCH40[18]")
del_items(0x80140838)
SetType(0x80140838, "unsigned char HARCH1[14]")
del_items(0x80140848)
SetType(0x80140848, "unsigned char HARCH2[14]")
del_items(0x80140858)
SetType(0x80140858, "unsigned char HARCH3[14]")
del_items(0x80140868)
SetType(0x80140868, "unsigned char HARCH4[14]")
del_items(0x80140878)
SetType(0x80140878, "unsigned char HARCH5[14]")
del_items(0x80140888)
SetType(0x80140888, "unsigned char HARCH6[14]")
del_items(0x80140898)
SetType(0x80140898, "unsigned char HARCH7[14]")
del_items(0x801408A8)
SetType(0x801408A8, "unsigned char HARCH8[14]")
del_items(0x801408B8)
SetType(0x801408B8, "unsigned char HARCH9[14]")
del_items(0x801408C8)
SetType(0x801408C8, "unsigned char HARCH10[14]")
del_items(0x801408D8)
SetType(0x801408D8, "unsigned char HARCH11[14]")
del_items(0x801408E8)
SetType(0x801408E8, "unsigned char HARCH12[14]")
del_items(0x801408F8)
SetType(0x801408F8, "unsigned char HARCH13[14]")
del_items(0x80140908)
SetType(0x80140908, "unsigned char HARCH14[14]")
del_items(0x80140918)
SetType(0x80140918, "unsigned char HARCH15[14]")
del_items(0x80140928)
SetType(0x80140928, "unsigned char HARCH16[14]")
del_items(0x80140938)
SetType(0x80140938, "unsigned char HARCH17[14]")
del_items(0x80140948)
SetType(0x80140948, "unsigned char HARCH18[14]")
del_items(0x80140958)
SetType(0x80140958, "unsigned char HARCH19[14]")
del_items(0x80140968)
SetType(0x80140968, "unsigned char HARCH20[14]")
del_items(0x80140978)
SetType(0x80140978, "unsigned char HARCH21[14]")
del_items(0x80140988)
SetType(0x80140988, "unsigned char HARCH22[14]")
del_items(0x80140998)
SetType(0x80140998, "unsigned char HARCH23[14]")
del_items(0x801409A8)
SetType(0x801409A8, "unsigned char HARCH24[14]")
del_items(0x801409B8)
SetType(0x801409B8, "unsigned char HARCH25[14]")
del_items(0x801409C8)
SetType(0x801409C8, "unsigned char HARCH26[14]")
del_items(0x801409D8)
SetType(0x801409D8, "unsigned char HARCH27[14]")
del_items(0x801409E8)
SetType(0x801409E8, "unsigned char HARCH28[14]")
del_items(0x801409F8)
SetType(0x801409F8, "unsigned char HARCH29[14]")
del_items(0x80140A08)
SetType(0x80140A08, "unsigned char HARCH30[14]")
del_items(0x80140A18)
SetType(0x80140A18, "unsigned char HARCH31[14]")
del_items(0x80140A28)
SetType(0x80140A28, "unsigned char HARCH32[14]")
del_items(0x80140A38)
SetType(0x80140A38, "unsigned char HARCH33[14]")
del_items(0x80140A48)
SetType(0x80140A48, "unsigned char HARCH34[14]")
del_items(0x80140A58)
SetType(0x80140A58, "unsigned char HARCH35[14]")
del_items(0x80140A68)
SetType(0x80140A68, "unsigned char HARCH36[14]")
del_items(0x80140A78)
SetType(0x80140A78, "unsigned char HARCH37[14]")
del_items(0x80140A88)
SetType(0x80140A88, "unsigned char HARCH38[14]")
del_items(0x80140A98)
SetType(0x80140A98, "unsigned char HARCH39[14]")
del_items(0x80140AA8)
SetType(0x80140AA8, "unsigned char HARCH40[14]")
del_items(0x80140AB8)
SetType(0x80140AB8, "unsigned char USTAIRS[34]")
del_items(0x80140ADC)
SetType(0x80140ADC, "unsigned char DSTAIRS[34]")
del_items(0x80140B00)
SetType(0x80140B00, "unsigned char WARPSTAIRS[34]")
del_items(0x80140B24)
SetType(0x80140B24, "unsigned char CRUSHCOL[20]")
del_items(0x80140B38)
SetType(0x80140B38, "unsigned char BIG1[10]")
del_items(0x80140B44)
SetType(0x80140B44, "unsigned char BIG2[10]")
del_items(0x80140B50)
SetType(0x80140B50, "unsigned char BIG5[10]")
del_items(0x80140B5C)
SetType(0x80140B5C, "unsigned char BIG8[10]")
del_items(0x80140B68)
SetType(0x80140B68, "unsigned char BIG9[10]")
del_items(0x80140B74)
SetType(0x80140B74, "unsigned char BIG10[10]")
del_items(0x80140B80)
SetType(0x80140B80, "unsigned char PANCREAS1[32]")
del_items(0x80140BA0)
SetType(0x80140BA0, "unsigned char PANCREAS2[32]")
del_items(0x80140BC0)
SetType(0x80140BC0, "unsigned char CTRDOOR1[20]")
del_items(0x80140BD4)
SetType(0x80140BD4, "unsigned char CTRDOOR2[20]")
del_items(0x80140BE8)
SetType(0x80140BE8, "unsigned char CTRDOOR3[20]")
del_items(0x80140BFC)
SetType(0x80140BFC, "unsigned char CTRDOOR4[20]")
del_items(0x80140C10)
SetType(0x80140C10, "unsigned char CTRDOOR5[20]")
del_items(0x80140C24)
SetType(0x80140C24, "unsigned char CTRDOOR6[20]")
del_items(0x80140C38)
SetType(0x80140C38, "unsigned char CTRDOOR7[20]")
del_items(0x80140C4C)
SetType(0x80140C4C, "unsigned char CTRDOOR8[20]")
del_items(0x80140C60)
SetType(0x80140C60, "int Patterns[10][100]")
del_items(0x80147CC8)
SetType(0x80147CC8, "unsigned char lockout[40][40]")
del_items(0x80147A28)
SetType(0x80147A28, "unsigned char L3ConvTbl[16]")
del_items(0x80147A38)
SetType(0x80147A38, "unsigned char L3UP[20]")
del_items(0x80147A4C)
SetType(0x80147A4C, "unsigned char L3DOWN[20]")
del_items(0x80147A60)
SetType(0x80147A60, "unsigned char L3HOLDWARP[20]")
del_items(0x80147A74)
SetType(0x80147A74, "unsigned char L3TITE1[34]")
del_items(0x80147A98)
SetType(0x80147A98, "unsigned char L3TITE2[34]")
del_items(0x80147ABC)
SetType(0x80147ABC, "unsigned char L3TITE3[34]")
del_items(0x80147AE0)
SetType(0x80147AE0, "unsigned char L3TITE6[42]")
del_items(0x80147B0C)
SetType(0x80147B0C, "unsigned char L3TITE7[42]")
del_items(0x80147B38)
SetType(0x80147B38, "unsigned char L3TITE8[20]")
del_items(0x80147B4C)
SetType(0x80147B4C, "unsigned char L3TITE9[20]")
del_items(0x80147B60)
SetType(0x80147B60, "unsigned char L3TITE10[20]")
del_items(0x80147B74)
SetType(0x80147B74, "unsigned char L3TITE11[20]")
del_items(0x80147B88)
SetType(0x80147B88, "unsigned char L3ISLE1[14]")
del_items(0x80147B98)
SetType(0x80147B98, "unsigned char L3ISLE2[14]")
del_items(0x80147BA8)
SetType(0x80147BA8, "unsigned char L3ISLE3[14]")
del_items(0x80147BB8)
SetType(0x80147BB8, "unsigned char L3ISLE4[14]")
del_items(0x80147BC8)
SetType(0x80147BC8, "unsigned char L3ISLE5[10]")
del_items(0x80147BD4)
SetType(0x80147BD4, "unsigned char L3ANVIL[244]")
del_items(0x8014CAE4)
SetType(0x8014CAE4, "unsigned char dung[20][20]")
del_items(0x8014CC74)
SetType(0x8014CC74, "unsigned char hallok[20]")
del_items(0x8014CC88)
SetType(0x8014CC88, "unsigned char L4dungeon[80][80]")
del_items(0x8014E588)
SetType(0x8014E588, "unsigned char L4ConvTbl[16]")
del_items(0x8014E598)
SetType(0x8014E598, "unsigned char L4USTAIRS[42]")
del_items(0x8014E5C4)
SetType(0x8014E5C4, "unsigned char L4TWARP[42]")
del_items(0x8014E5F0)
SetType(0x8014E5F0, "unsigned char L4DSTAIRS[52]")
del_items(0x8014E624)
SetType(0x8014E624, "unsigned char L4PENTA[52]")
del_items(0x8014E658)
SetType(0x8014E658, "unsigned char L4PENTA2[52]")
del_items(0x8014E68C)
SetType(0x8014E68C, "unsigned char L4BTYPES[140]")
| UTF-8 | Python | false | false | 10,665 | py | 1,891 | set_vars.py | 1,504 | 0.787998 | 0.511861 | 0 | 300 | 34.55 | 55 |
kaleyra/kaleyra-python | 3,925,600,134,132 | 115d900ecc8d61ef42ee2f766fc95d0b38fd9fbf | 8d174b71de3a33dcb7d51448601bd61688407756 | /example/txtly/txtly-log.py | 0b45569d7382c0e6ff87539fca88fa1b937fddf7 | [
"MIT"
]
| permissive | https://github.com/kaleyra/kaleyra-python | 7bdda17b891091cdbcaeaa27ae345f2f87c92cd3 | 782333149e7f8673f383a5743f4ed06efb867d36 | refs/heads/master | 2023-08-14T10:21:12.738547 | 2019-08-05T17:30:08 | 2019-08-05T17:30:08 | 196,204,276 | 2 | 7 | MIT | false | 2023-07-25T16:40:30 | 2019-07-10T12:48:18 | 2021-09-21T19:28:03 | 2023-07-25T16:34:26 | 224 | 3 | 4 | 4 | Python | false | false | #!/usr/bin/env python
from api.txtly.txtly_request import TxtlyRequest
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "support@kaleyra.com"
__status__ = "Production"
# The user will be able to pull out event logs for particular txtly URL.
# A Txtly campaign must be created and executed to track its logs.
# id (Txtly ID) is a mandatory parameter.
logRequest = TxtlyRequest(id='778975090')
logResponse = logRequest.log()
txtlys = logResponse.get_txtlys_responses()
pagination = logResponse.get_pagination_responses()
print(logResponse.get_message())
print(logResponse.get_txtlys())
print(logResponse.to_json())
print(txtlys[1].get_log_id())
print(txtlys[3].get_log_id())
print(txtlys[0].get_fk_link_id())
print(txtlys[3].get_browser_version())
print(pagination.get_now())
print(pagination.get_limit())
print(pagination.get_limitstart())
| UTF-8 | Python | false | false | 937 | py | 44 | txtly-log.py | 37 | 0.729989 | 0.709712 | 0 | 32 | 28.28125 | 72 |
lowysole/bot_carnaval | 15,006,615,756,409 | 91acf4002a249b3f4f0ef4c8ebc43243cb6f067d | c280fd1b21d7309eb4642a1282cdb84fdc310ed7 | /run.py | 3fbfced99469e6821a372b7f16e329bafbbfa07a | []
| no_license | https://github.com/lowysole/bot_carnaval | 63670990a6f0a7b78ce116162b875edcb7874c5c | 566a0dd8d72c2c1f42fa483e28f0abaed2fe41c5 | refs/heads/master | 2020-12-14T11:05:44.286685 | 2020-02-10T20:36:45 | 2020-02-10T20:36:45 | 234,722,083 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
"""
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
import argparse
from telegram import (Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove,
InlineKeyboardMarkup,InlineKeyboardButton)
from telegram.ext import (Updater, CommandHandler,
MessageHandler, Handler, Filters,
ConversationHandler, CallbackQueryHandler)
from backend import settings, easter
from backend import menus as m
from backend import emojis as emoji
from backend.database import query as q
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def inici(update, context):
user = update.message.chat
logger.info("User {}: {} {} entering to the BOT".format(
user.username, user.first_name, user.last_name))
full_name = str(user.first_name) + ' ' + str(user.last_name)
db = q.Query(settings.DB_FILE)
db.get_or_create_user(user.username, full_name)
update.message.reply_text(
'{} Benvinguts al BOT de Sa Majestat,'
'Reina del Carnestoltes de Tàrrega! {} \n\n'
'Aquest BOT et donarà tota la informació necessària per estar '
'informat de tot el que passa a Tàrrega.\n\n'
'{} Tecleja /menu per accedir al menú sempre que vulguis\n'
'{} Envia /tancar per parar de parlar amb mi.\n\n'.format(
emoji.festa, emoji.festa, emoji.carpeta, emoji.creu))
menu(update, context)
def start(update, context):
inici(update, context)
def menu(update, context):
try:
chat_id = update.callback_query.message.chat_id
except:
chat_id = update.message.chat_id
bot = context.bot
bot.send_message(chat_id=chat_id,
text=main_menu_message(),
reply_markup=main_menu_keyboard())
def tancar(update, context):
user = update.message.chat.username
logger.info("User %s canceled the conversation.", user)
update.message.reply_text(
'{} Adeu! Si vols tornar a parlar amb mi, tecleja /inici'.format(
emoji.adeu), reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def unknown_command(update, context):
context.bot.send_message(
chat_id=update.effective_chat.id,
text="{} Ho sento, no t'he entès.\n\n"
"{} Tecleja /menu per accedir al menú sempre que vulguis\n"
"{} Envia /tancar per parar de parlar amb mi.\n\n".format(
emoji.think, emoji.carpeta, emoji.creu))
def chat_message(update, context):
text = update.message.text.lower()
bot = context.bot
answer_text = easter.message_answer(text)
if answer_text[0] == 'text':
bot.send_message(chat_id=update.message.chat_id,
text=answer_text[1])
elif answer_text[0] == 'photo':
bot.send_photo(chat_id=update.message.chat_id,
photo=open(answer_text[1], 'rb'),
caption=answer_text[2])
elif answer_text[0] == 'audio':
bot.send_audio(chat_id=update.message.chat_id,
audio=open(answer_text[1], 'rb'))
# Menus
def main_menu(update, context):
query = update.callback_query
bot = context.bot
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=main_menu_message(),
reply_markup=main_menu_keyboard())
def programa_fisic(update, context):
query = update.callback_query
bot = context.bot
bot.answerCallbackQuery(callback_query_id=update.callback_query.id,
text="Enviant Programa...")
bot.send_document(chat_id=query.message.chat_id,
document=open('./backend/files/programa.pdf', 'rb'))
menu(update, context)
def programa_online(update, context):
query= update.callback_query
bot = context.bot
bot.send_message(chat_id=query.message.chat_id,
text="{} Tecleja /programa per consultar "
"per dia i hora els "
"esdeveniments propers d'aquest Carnestoltes.\n\n".format(
emoji.date))
def program_day(update, context):
reply_keyboard = [['Dijous 20'],['Divendres 21'],
['Dissabte 22'],['Diumenge 23']]
update.message.reply_text(
'{} Quin dia de la setmana vols consultar?'.format(emoji.lupa),
reply_markup=ReplyKeyboardMarkup(reply_keyboard,
one_time_keyboard=True))
return 0
def program_hour(update,context):
reply_keyboard = [['0', '1', '2', '3' ,'4' ,'5'],
['6', '7', '8', '9' ,'10' ,'11'],
['12', '13', '14', '15' ,'16' ,'17'],
['18', '19', '20', '21' ,'22' ,'23']]
db = q.Query(settings.DB_FILE)
db.add_tmp_day(update.message.chat_id,
update.message.text)
update.message.reply_text(
"{} Quina hora vols consultar?".format(emoji.lupa),
reply_markup=ReplyKeyboardMarkup(
reply_keyboard, one_time_keyboard=True))
db.close()
return 1
def program_query(update, context):
query = update.callback_query
bot = context.bot
db = q.Query(settings.DB_FILE)
args = []
day = db.get_tmp_day(update.message.chat_id)[0][0]
args.append(day)
args.append(update.message.text)
text = ''
for row in db.process_query(args):
template= "*{}*\n" \
"{} {} | {}\n" \
"{} _{}_\n\n".format(
row[0], emoji.date, row[1], row[2], emoji.ubi, row[3])
text = text + template
if text == '':
text = "{} No s'ha trobat cap esdeveniment" \
"amb aquest horari.".format(emoji.creu)
bot.send_message(chat_id=update.message.chat_id,
text=text,
parse_mode='Markdown')
bot.send_message(chat_id=update.message.chat_id,
text='{} Tecleja /programa per tornar'
'a consultar un esdeveniment\n'
'{} Tecleja /menu per accedir al menú'.format(
emoji.carpeta, emoji.date))
db.close()
return ConversationHandler.END
def cartell_menu(update, context):
query = update.callback_query
bot = context.bot
bot.answerCallbackQuery(callback_query_id=update.callback_query.id,
text="Enviant cartell...")
try:
bot.send_photo(chat_id=query.message.chat_id,
photo=open('./backend/files/cartell.png', 'rb'),
timeout=60)
except:
bot.send_message(chat_id=query.message.chat_id,
text='Hem tingut problemes de connexió.\n'
'Torna-ho a provar més tard')
menu(update, context)
def video_menu(update, context):
query = update.callback_query
bot = context.bot
bot.answerCallbackQuery(callback_query_id=update.callback_query.id,
text="Enviant vídeo...")
bot.send_video(chat_id=query.message.chat_id,
video=open('./backend/files/video.mp4', 'rb'))
menu(update, context)
def link_menu(update, context):
query = update.callback_query
bot = context.bot
bot.send_message(chat_id=query.message.chat_id,
text="{} *INSCRIPCIONS*\n\n"
"{} INSCRIPCIONS OBERTES AL "
"*SOPAR DEL CARNESTOLTES*, no et quedis sense!\n"
"http://bit.ly/SoparCARNA2020\n\n"
"{} INSCRIPCIONS OBERTES A LA "
"*RUA DE COMPARSES DEL CARNESTOLTES*\n"
"http://bit.ly/RuaComparsesCarnestoltes2020\n\n"
"{} Tota la informació i la resta d'inscripcions\n"
"http://www.carnestoltestarrega.cat/".format(
emoji.date, emoji.sopar,
emoji.dancer, emoji.info),
parse_mode='Markdown')
menu(update, context)
def xarxes_menu(update, context):
query = update.callback_query
bot = context.bot
bot.send_message(chat_id=query.message.chat_id,
text="{} *INSTAGRAM*\n"
"https://www.instagram.com/carnavaltarrega\n\n"
"{} *FACEBOOK*\n"
"https://www.facebook.com/CarnestoltesTarrega\n\n"
"{} *TWITTER*\n"
"https://twitter.com/carnavaltarrega\n\n"
"{} *WEB*\n"
"www.carnestoltestarrega.cat\n\n".format(
emoji.camera, emoji.book,
emoji.twitter, emoji.web),
parse_mode='Markdown')
menu(update, context)
def answer_menu(update, context):
query = update.callback_query
bot = context.bot
bot.send_message(chat_id=query.message.chat_id,
text='Holis, com estem?\n'
'He estat programada per respondre les teves '
'inquietuds més Targarines. Així que aifina bé '
'les teves preguntes!')
# Messages
def main_menu_message():
return '{} Que vols consultar?'.format(emoji.lupa)
# Keyboards
def main_menu_keyboard():
main_menu = [[InlineKeyboardButton('Cartell',
callback_data='cartell'),
InlineKeyboardButton('Programa Físic',
callback_data='programa_f')],
[InlineKeyboardButton('Vídeo',
callback_data='video'),
InlineKeyboardButton('Programa Online',
callback_data='programa_o')],
[InlineKeyboardButton('Inscripcions',
callback_data='link'),
InlineKeyboardButton('Xarxes Socials',
callback_data='xarxes')],
[InlineKeyboardButton('Conversa amb mi',
callback_data='answer')]]
return InlineKeyboardMarkup(main_menu, one_time_keyboard=True)
def back_menu_keyboard():
back_menu = [[InlineKeyboardButton('Endarrere',
callback_data='main_menu')]]
return InlineKeyboardMarkup(back_menu)
def main(args):
token = settings.DEV_TOKEN if args.dev_bot else settings.TOKEN
# Initialize bot
bot = Bot(token)
updater = Updater(token, use_context=True)
dp = updater.dispatcher
# Start Handler
start_handler = CommandHandler('start', start)
dp.add_handler(start_handler)
inici_handler = CommandHandler('inici', inici)
dp.add_handler(inici_handler)
# Menu Handler
inici_handler = CommandHandler('menu', menu)
dp.add_handler(inici_handler)
dp.add_handler(CallbackQueryHandler(main_menu, pattern='main_menu'))
dp.add_handler(CallbackQueryHandler(programa_fisic, pattern='programa_f'))
dp.add_handler(CallbackQueryHandler(programa_online, pattern='programa_o'))
dp.add_handler(CallbackQueryHandler(cartell_menu, pattern='cartell'))
dp.add_handler(CallbackQueryHandler(video_menu, pattern='video'))
dp.add_handler(CallbackQueryHandler(link_menu, pattern='link'))
dp.add_handler(CallbackQueryHandler(xarxes_menu, pattern='xarxes'))
dp.add_handler(CallbackQueryHandler(answer_menu, pattern='answer'))
# Program Handler
program_handler = ConversationHandler(
entry_points=[CommandHandler('programa', program_day)],
states={
0: [MessageHandler(Filters.text, program_hour)],
1: [MessageHandler(Filters.text, program_query)],
},
fallbacks=[CommandHandler('menu', menu)]
)
dp.add_handler(program_handler)
# Finish
finish_handler = CommandHandler('tancar', tancar)
dp.add_handler(finish_handler)
# Chat message
chat_message_handler = MessageHandler(Filters.text, chat_message)
dp.add_handler(chat_message_handler)
# Incorrect command
unknown_command_handler = MessageHandler(Filters.command, unknown_command)
dp.add_handler(unknown_command_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
parser= argparse.ArgumentParser()
parser.add_argument('--dev-bot', dest='dev_bot', action='store_true',
help='Execute dev BOT')
args = parser.parse_args()
main(args)
| UTF-8 | Python | false | false | 13,554 | py | 10 | run.py | 8 | 0.586215 | 0.580674 | 0 | 346 | 38.121387 | 80 |
zamerman/Udacity-AI-Programming | 12,008,728,589,092 | 5b8040c69bc4163979e7adacb243783d5efc637d | 9d7e4608c86f3afba2b2002fb740f8126554a502 | /Image Classifier Project/get_input_args_T.py | 020d56d6e87cac2600639a847240906afa9b016b | [
"MIT"
]
| permissive | https://github.com/zamerman/Udacity-AI-Programming | 75f9c76a81f0ddb11de9d0680c7491baf7c52f71 | 6537f273fb00531d448330c1c85886d86e1161d2 | refs/heads/master | 2020-05-16T22:56:37.815592 | 2019-04-25T14:18:40 | 2019-04-25T14:18:40 | 183,350,504 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# PROGRAMMER: Zachary Amerman
# DATE CREATED: April 23, 2019
# REVISED DATE:
# PURPOSE: Creates a function that retrieves command line inputs from the user.
# Command Line Arguments:
# 1. Image Directory as data_dir
# 2. Save Directory as --save_dir with default value 'checkpoints'
# 3. Network Architecture as --arch with default value 'vgg11'
# 4. Switch for GPU calculations as --gpu, default value is 'cpu' and when placed in the line changes it to 'cuda'
# 5. Learning rate as --learning_rate with default value 0.01
# 6. Hidden units as --hidden_units with default value 512
# 7. Epochs as --epochs with default value 5
# Import argparse library
import argparse
def get_input_args():
"""
Enables command line arguments in order to control directories, network architectures,
choose your device, and control training variables.
Parameters
None
Returns
parser.parse_args() - a data structure that stores the command line arguments object
"""
# Setup command line interface and retrieve arguments
parser = argparse.ArgumentParser(description='''Create and train a new network on a dataset and save the model as a checkpoint''')
parser.add_argument('data_dir', type=str, help='The path to the training and validation datasets')
parser.add_argument('--save_dir', dest='save_dir', default='checkpoints', help='The directory where checkpoints are stored')
parser.add_argument('--arch', dest='arch', default='vgg11', help='''The pretrained architecture to use. Possible values are "alexnet", "vgg11", "squeezenet1_0", and "densenet121"''')
parser.add_argument('--learning_rate', type=float, dest='lr', default=0.01,
help='Determines model learning rate')
parser.add_argument('--hidden_units', type=int, dest='hidden_units', default=512,
help='Determines number of hidden units in hidden layer')
parser.add_argument('--epochs', dest='epochs', type=int, default=5,
help='Determines number of epochs over which the model is trained')
parser.add_argument('--gpu', dest='device', action='store_const', const='cuda', default='cpu',
help='Determines if architecture uses gpu or not')
# Store command line arguments
return parser.parse_args() | UTF-8 | Python | false | false | 2,380 | py | 12 | get_input_args_T.py | 11 | 0.684874 | 0.668908 | 0 | 42 | 55.690476 | 186 |
camsterguy/locksmith | 4,415,226,410,339 | b02f66673da67e433cc5de32fa41c96e87048a40 | fcc5bbb8ae4e83b50e7d0bc260b39dab45da1bae | /main.py | dfb58872bd7f62faac2593fb74c1ede3a4f3b99f | []
| no_license | https://github.com/camsterguy/locksmith | c8750b5b03ea407687fd7c7f7e62e450d30eb915 | 6119e391a50c6da41cf255342725b886e79b3d01 | refs/heads/main | 2023-03-19T14:14:20.318431 | 2021-03-08T14:13:10 | 2021-03-08T14:13:10 | 339,189,521 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import criticalFunctions as crit
import statFunctions as stat
from datetime import date
import statistics
today = date.today()
month = str((today.strftime("%m"))).lower()
year = (today.strftime("%Y"))
day = (today.strftime("%d"))
scheduleURL = "https://www.basketball-reference.com/leagues/NBA_"+year+"_games-"+month+".html"
crit.get_Spreads(year, month, day)
from criticalFunctions import spreads
crit.get_Games(year, month, day)
from criticalFunctions import hometeams, awayteams
for team in hometeams:
stat.get_SLine(team)
for team in awayteams:
stat.get_SLine(team)
from statFunctions import teamStats
print("Slate:")
count = 0
for spread in spreads:
try:
print(awayteams[count],(stat.get_BRStats(awayteams[count])+(statistics.mean(teamStats[awayteams[count]])))/2,spreads[count],"@",hometeams[count],(stat.get_BRStats(hometeams[count])+(statistics.mean(teamStats[hometeams[count]])))/2)
count += 1
except:
print("Complete.")
| UTF-8 | Python | false | false | 958 | py | 6 | main.py | 5 | 0.735908 | 0.731733 | 0 | 38 | 24.052632 | 233 |
bigheart7697/SE-backend | 11,982,958,784,138 | ddbd7c7b7a5c561531a51bc66187e40873e2e472 | 0bb0a899272a39a68eae0682cd8ff3d815cbf7b5 | /apps/accounts/serializers.py | dd9786bb4f60dbecc17ac87d904789915bde6343 | []
| no_license | https://github.com/bigheart7697/SE-backend | c6df8dfa105f34e7efe47d2bc36e4325276a7094 | 107fedef1df9dd7ab6f0c59515467f44d73886b1 | refs/heads/master | 2020-12-23T21:18:00.092467 | 2020-02-04T00:04:15 | 2020-02-04T00:04:15 | 237,277,022 | 0 | 0 | null | false | 2021-06-04T22:24:54 | 2020-01-30T18:24:09 | 2020-02-04T00:04:31 | 2021-06-04T22:24:51 | 60 | 0 | 0 | 3 | Python | false | false | from drf_writable_nested import WritableNestedModelSerializer
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from .models import CustomUser, Student, Advisor, Field
class UserSerializer(serializers.ModelSerializer):
@staticmethod
def get_token(obj):
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(obj)
token = jwt_encode_handler(payload)
return token
def create(self, validated_data):
password = validated_data.pop('password', None)
user = self.Meta.model(**validated_data)
if password is not None:
user.set_password(password)
user.save()
return user
def update(self, instance, validated_data):
instance.username = validated_data.get('username', instance.username)
instance.email = validated_data.get('email', instance.email)
class Meta:
model = CustomUser
fields = ('id', 'username', 'email', 'password', 'is_advisor')
read_only_fields = ['id', 'is_advisor']
class FieldSerializer(serializers.ModelSerializer):
class Meta:
model = Field
fields = ['id', 'name']
read_only_fields = ['id', 'name']
class AdvisorPublicSerializer(WritableNestedModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Advisor
fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'education', 'record')
read_only_fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'education', 'record')
depth = 1
class AdvisorEditSerializer(WritableNestedModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Advisor
fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'education', 'record')
depth = 1
class AdvisorSerializer(WritableNestedModelSerializer):
user = UserSerializer()
def create(self, validated_data):
user = validated_data.pop('user')
password = user['password']
user = CustomUser.objects.create(**user)
if password is not None:
user.set_password(password)
validated_data['user'] = user
advisor = Advisor.objects.create(**validated_data)
user.is_advisor = True
user.save()
advisor.user = user
advisor.user.set_password(password)
advisor.save()
return advisor
class Meta:
model = Advisor
fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'education', 'record')
depth = 1
class StudentSerializer(WritableNestedModelSerializer):
user = UserSerializer(many=False)
# def create(self, validated_data):
# user = validated_data.pop('user')
# fields_data = validated_data.pop('fields')
# password = user['password']
# user = CustomUser.objects.create(**user)
# if password is not None:
# user.set_password(password)
# fields = []
# for field in fields_data:
# field, created = Field.objects.get_or_create(name=field['name'])
# fields.append(field)
# validated_data['user'] = user
# student = Student.objects.create(**validated_data)
# student.fields.set(fields)
# user.save()
# student.user = user
# student.user.set_password(password)
# student.save()
# return student
class Meta:
model = Student
fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'grade')
depth = 1
class StudentEditSerializer(WritableNestedModelSerializer):
user = UserSerializer(read_only=True)
# fields_of_interest = FieldSerializer()
# def update(self, instance, validated_data):
# fields_data = validated_data.pop('fields_of_interest')
# fields = []
# for field in fields_data:
# field, created = Field.objects.get_or_create(name=field['name'])
# fields.append(field)
# instance.fields.set(fields)
# instance.cv = validated_data['cv']
# instance.first_name = validated_data['first_name']
# instance.last_name = validated_data['last_name']
# instance.age = validated_data['age']
# instance.gender = validated_data['gender']
# instance.country = validated_data['country']
# instance.city = validated_data['city']
# instance.address = validated_data['address']
# instance.phone_number = validated_data['phone_number']
# instance.photo = validated_data['photo']
# instance.save()
# return instance
class Meta:
model = Student
fields = ('user', 'name', 'age', 'gender', 'country', 'city',
'phone_number', 'grade', 'school_name', 'last_grade_score', 'average_grade_score',
'fields_of_interest')
depth = 1
| UTF-8 | Python | false | false | 5,188 | py | 13 | serializers.py | 11 | 0.602737 | 0.601773 | 0 | 152 | 33.125 | 100 |
crud3306/python | 10,565,619,581,678 | d9a5ed3295ea53dd496259fd40e8adbf0f7aaf44 | 5998f2643d797ae7469d098f1adbd95d6e5e77d8 | /99-Training/yield/yield02.py | 39e894c7e05e6a6286e665f5a4e7a9923bde31b2 | []
| no_license | https://github.com/crud3306/python | c8bfca2d0c80385a603fec27558d36ea686707d1 | df18d0d80fe20060e37d98f525393ac6b9a0d39f | refs/heads/master | 2023-01-13T17:52:40.460875 | 2020-11-19T11:46:37 | 2020-11-19T11:46:37 | 144,098,797 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def fun():
for i in range(20):
x=yield i
print('good',x)
if __name__ == '__main__':
a=fun()
print(a)
x = a.next()
print(x)
x=a.send(5)
print(x) | UTF-8 | Python | false | false | 202 | py | 40 | yield02.py | 24 | 0.524752 | 0.50495 | 0 | 15 | 12.533333 | 26 |
brainslushee/branch_creator | 11,897,059,425,422 | dfa61ce4d3612926493faacb88587ebcd34af038 | d3f4257a1d8af87a3df4cf2a434370ac16702cad | /imdb_top_250.py | 3cd2d1cbfe545bf3cb76909e07ec7a4c3e455ad8 | []
| no_license | https://github.com/brainslushee/branch_creator | 9e5e195be730de85e61db8509feb1353fea79054 | 5daac9f65ae59cd969fb27bdf5d5457265de43f4 | refs/heads/master | 2021-01-20T13:38:03.254634 | 2017-05-10T00:04:54 | 2017-05-10T00:04:54 | 90,509,996 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import re
from bs4 import BeautifulSoup
url = "http://www.imdb.com/chart/top"
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
titles = soup.find_all("td", {"class": "titleColumn"})
ratings = soup.find_all("td", {"class": "ratingColumn imdbRating"})
for title in titles:
print(title.text)
for rating in ratings:
print(rating.text)
| UTF-8 | Python | false | false | 383 | py | 1 | imdb_top_250.py | 1 | 0.699739 | 0.697128 | 0 | 20 | 18.15 | 67 |
mohisen/zdotfiles | 14,018,773,276,211 | dece810d9dd04494f5034ea75da8aba7ec5c901e | ce79f454483beef0007337219d9415e5feabb0cc | /1/.zsh/ipy_w.py | 670d35b5fc6faa5a7adbf61ed9e48fa13049c0e3 | []
| no_license | https://github.com/mohisen/zdotfiles | 5be5417dca38f93f44443566737effc0cd0d3514 | 8c127d0e9ffdc43f9bc12a9d79a1690df063c2b4 | refs/heads/master | 2021-01-01T17:02:58.296952 | 2017-01-30T02:08:44 | 2017-01-30T02:08:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: w.py
Author:
Email: 0
Github: 0
Description: /home/ahmed/.config/ipython/profile_z/startup/w.py
"""
import os, sys, re
xarab = u"احمد"
print(xarab)
| UTF-8 | Python | false | false | 212 | py | 932 | ipy_w.py | 451 | 0.649038 | 0.634615 | 0 | 15 | 12.8 | 63 |
superdesk/superdesk-planning | 9,431,748,194,153 | d0a705cdccb2c2462cea3a950c8326c9a01c5f2b | a823235ebebc3714ba289479a4fe7a10f65e7208 | /server/planning/planning/planning_cancel.py | ebbebe78f6fa1955ccf903d4b0ba710c4df99e0f | []
| no_license | https://github.com/superdesk/superdesk-planning | 6b64e2edd08a6b67f96cb12ca27141e1df7ea1a5 | c45392954498925b1b414ff6b49296817ea02981 | refs/heads/develop | 2023-08-17T01:19:52.690841 | 2023-08-10T14:04:17 | 2023-08-10T14:04:17 | 69,333,814 | 15 | 41 | null | false | 2023-09-11T12:53:25 | 2016-09-27T08:07:26 | 2023-03-24T11:54:13 | 2023-09-11T12:53:23 | 13,614 | 11 | 35 | 4 | TypeScript | false | false | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk import get_resource_service
from superdesk.services import BaseService
from superdesk.notification import push_notification
from superdesk.errors import SuperdeskApiError
from apps.archive.common import get_user, get_auth
from eve.utils import config
from copy import deepcopy
from .planning import PlanningResource, planning_schema
from planning.common import (
WORKFLOW_STATE,
ITEM_STATE,
update_post_item,
ITEM_ACTIONS,
is_valid_event_planning_reason,
ASSIGNMENT_WORKFLOW_STATE,
get_coverage_status_from_cv,
)
from flask import request
planning_cancel_schema = deepcopy(planning_schema)
planning_cancel_schema["reason"] = {
"type": "string",
"nullable": True,
}
planning_cancel_schema["cancel_all_coverage"] = {"type": "boolean", "nullable": True}
class PlanningCancelResource(PlanningResource):
url = "planning/cancel"
resource_title = endpoint_name = "planning_cancel"
datasource = {"source": "planning"}
resource_methods = []
item_methods = ["PATCH"]
privileges = {"PATCH": "planning_planning_management"}
schema = planning_cancel_schema
merge_nested_documents = True
class PlanningCancelService(BaseService):
def on_update(self, updates, original):
if not is_valid_event_planning_reason(updates, original):
raise SuperdeskApiError.badRequestError(message="Reason is required field.")
def update(self, id, updates, original):
user = get_user(required=True).get(config.ID_FIELD, "")
session = get_auth().get(config.ID_FIELD, "")
event_cancellation = request.view_args.get("event_cancellation")
cancel_all_coverage = updates.pop("cancel_all_coverage", False)
event_reschedule = updates.pop("event_reschedule", False)
coverage_cancel_state = get_coverage_status_from_cv("ncostat:notint")
coverage_cancel_state.pop("is_active", None)
ids = []
updates["coverages"] = deepcopy(original.get("coverages"))
coverages = updates.get("coverages") or []
reason = updates.pop("reason", None)
planning_service = get_resource_service("planning")
for coverage in coverages:
if coverage["workflow_status"] not in [
WORKFLOW_STATE.CANCELLED,
ASSIGNMENT_WORKFLOW_STATE.COMPLETED,
]:
ids.append(coverage.get("coverage_id"))
planning_service.cancel_coverage(
coverage,
coverage_cancel_state,
coverage.get("workflow_status"),
None,
reason,
event_cancellation,
event_reschedule,
)
if cancel_all_coverage:
item = None
if len(ids) > 0:
item = self.backend.update(self.datasource, id, updates, original)
push_notification(
"coverage:cancelled",
planning_item=str(original[config.ID_FIELD]),
user=str(user),
session=str(session),
reason=reason,
coverage_state=coverage_cancel_state,
etag=item.get("_etag"),
ids=ids,
)
return item if item else self.find_one(req=None, _id=id)
self._cancel_plan(updates, reason)
item = self.backend.update(self.datasource, id, updates, original)
push_notification(
"planning:cancelled",
item=str(original[config.ID_FIELD]),
user=str(user),
session=str(session),
reason=reason,
coverage_state=coverage_cancel_state,
event_cancellation=event_cancellation,
)
return item
def _cancel_plan(self, updates, reason):
updates["state_reason"] = reason
updates[ITEM_STATE] = WORKFLOW_STATE.CANCELLED
def on_updated(self, updates, original):
lock_action = original.get("lock_action")
allowed_actions = [
ITEM_ACTIONS.EDIT,
ITEM_ACTIONS.PLANNING_CANCEL,
ITEM_ACTIONS.CANCEL_ALL_COVERAGE,
]
if (
request.view_args.get("event_cancellation")
or lock_action in allowed_actions
or self.is_related_event_completed(updates, original)
):
update_post_item(updates, original)
def is_related_event_completed(self, updates, original):
if (
len(original.get("coverages")) > 0
and len(updates.get("coverages") or []) > 0
and not original["coverages"][0]["planning"].get("workflow_status_reason")
and updates["coverages"][0]["planning"].get("workflow_status_reason") == "Event Completed"
):
return True
return False
| UTF-8 | Python | false | false | 5,199 | py | 1,206 | planning_cancel.py | 549 | 0.608194 | 0.605501 | 0 | 150 | 33.66 | 102 |
dp92987/pactice-python | 936,302,905,328 | a3fcc6b7825d3ff6c7f7c9d7a055976ad77dd923 | a1604f9aef6764206f77a323a911ab4716b90a55 | /hackerrank.com/Python/find-a-string.py | ce34db379b50ec898c1618994a32067ea57b1491 | []
| no_license | https://github.com/dp92987/pactice-python | cb4d274582fed7831fd64d9c2fb80c7c103650fc | 45ffde4b2614a9d8bd9c3a202778ab48a5ccfcbb | refs/heads/master | 2021-04-23T15:36:17.386586 | 2020-12-31T00:12:16 | 2020-12-31T00:12:16 | 249,936,669 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://www.hackerrank.com/challenges/find-a-string/problem
def count_substring_loop(string, sub_string):
counter = 0
n = 0
for i in range(len(string)-len(sub_string)+1):
print(string[n:len(sub_string)+n], sub_string)
if string[n:len(sub_string)+n] == sub_string:
counter += 1
n += 1
return counter
def count_substring(string, sub_string, counter=0):
if string[:len(sub_string)] == sub_string:
counter += 1
if len(string) == len(sub_string):
return counter
else:
return count_substring(string[1:], sub_string, counter)
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count) | UTF-8 | Python | false | false | 768 | py | 42 | find-a-string.py | 40 | 0.605469 | 0.595052 | 0 | 28 | 26.464286 | 63 |
sushi-boy/stcode-redisconf19 | 13,469,017,480,515 | a5b37c63f33b589509d5836cf147894bed8ec0e2 | 9f64e933ece1ae6d5226f14af0fc9e1748008da6 | /st_insert.py | 584d375a129d032eab26a5257679c67093e21829 | []
| no_license | https://github.com/sushi-boy/stcode-redisconf19 | 89ef20578be0882dcb7dd72225c45edce807070f | 6033147e79d613f940054e3d1db9639e89e0080f | refs/heads/master | 2020-04-19T14:52:23.138874 | 2019-03-26T06:34:06 | 2019-03-26T06:34:06 | 168,256,758 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import redis
import glob
import csv
import random
import sys
import copy
from operator import mul
from functools import reduce
try:
import mmh3
except ImportError:
import pymmh3.pymmh3 as mmh3
"""
Initialize parameters
"""
#Maximum number of precision bits
max_precision = 96
#Where you split the ST-code into PRE-code and SUF-code
split_precision = 45
#The precision that you want to search in default
search_precision = 48
#Number of nodes
all_nodes = 2
#set IP address of redis
REDIS_SRV_IPS = [
'ip1',
'ip2'
]
#set port number
PORT = 6379
#set input data path
DATAPATH = 'set path of input data'+'*'
#max and minimum of latitude, longitude, and time
lat_maxmin = (-90.0, 90.0)
lon_maxmin = (-180.0, 180.0)
time_maxmin = (0.0, 2018304000.0)
#function for creating morton-code for each dimension
def create_bin(input_val, maxmin, precision):
tmp = (input_val - maxmin[0]) / (maxmin[1] - maxmin[0]) * (2**precision)
return format(int(tmp), '0' + str(precision) + 'b')
#merge all morton-code obtained from each dimension
def merge_bin(*binaries):
return ''.join((''.join(tuple) for tuple in zip(*binaries)))
#validations
def validate_latitude(latitude):
if(latitude < lat_maxmin[0] or latitude > lat_maxmin[1]):
print("latitude is not valid")
sys.exit()
def validate_longitude(longitude):
if(longitude < lon_maxmin[0] or longitude > lon_maxmin[1]):
print("longitude is not valid")
sys.exit()
def validate_time(time):
if(time < time_maxmin[0] or time > time_maxmin[1]):
print("time is not valid")
sys.exit()
#create ST-code from longitude, latitude, and time
def create_longitude_binary_code(v, exp):
return create_bin(v, lon_maxmin, exp)
def create_latitude_binary_code(v, exp):
return create_bin(v, lat_maxmin, exp)
def create_time_binary_code(v, exp):
return create_bin(v, time_maxmin, exp)
def create_morton_code(lon_bin_code, lat_bin_code, time_bin_code, base_exp):
return merge_bin(lon_bin_code, lat_bin_code, time_bin_code)
#split ST-code depending on split_precision
def set_split_precision(morton_code, split_precision):
pre_code = morton_code[:split_precision]
suf_code = morton_code[split_precision:]
return pre_code, suf_code
"""
@prams:
longitude in degrees(DOUBLE),
latitude in degrees(DOUBLE),
time in timestamp(INT),
value(STRING)
"""
#insert ST-code to redis by using sorted set
def insert(longitude, latitude, time, value):
validate_latitude(latitude)
validate_longitude(longitude)
validate_time(time)
#calculate base length of bit code by dividing max precision by 3(number of dimensions)
base_exp = max_precision/3
lon_exp = base_exp
lat_exp = base_exp
time_exp = base_exp
#create ST-code
lon_bin_code = create_longitude_binary_code(longitude, int(lon_exp))
lat_bin_code = create_latitude_binary_code(latitude, int(lat_exp))
time_bin_code = create_time_binary_code(time, int(time_exp))
morton_code = create_morton_code(lon_bin_code, lat_bin_code, time_bin_code, int(base_exp))
#split ST-code
PRE_code, SUF_code = set_split_precision(morton_code, split_precision)
node_number = 0
print("Dest:"+REDIS_SRV_IPS[node_number])
pool = redis.ConnectionPool(host=REDIS_SRV_IPS[node_number], port=PORT, db=0)
r = redis.StrictRedis(connection_pool=pool)
key = str(PRE_code)
print("set key:"+key+" sorted-key:"+str(int(SUF_code,2))+" value:"+value)
dict = {}
dict[value] = int(SUF_code,2)
# ZADD Changed since v3.2.0 !
r.zadd(key, dict)
return morton_code
#decode ST-code for confirmation
def decoder(code):
lng_interval = (-180.0, 180.0)
lat_interval = (-90.0, 90.0)
time_interval = (0.0, 2018304000.0)
precision = len(code)
for i in range(precision):
remain = i % 3
if remain == 0:
mid_lng_interval = (lng_interval[1] + lng_interval[0]) / 2.0
if code[i] == '0':
lng_interval = (lng_interval[0], mid_lng_interval)
else:
lng_interval = (mid_lng_interval, lng_interval[1])
elif remain == 1:
mid_lat_interval = (lat_interval[1] + lat_interval[0]) / 2.0
if code[i] == '0':
lat_interval = (lat_interval[0], mid_lat_interval)
else:
lat_interval = (mid_lat_interval, lat_interval[1])
else:
mid_time_interval = (time_interval[1] + time_interval[0]) / 2.0
if code[i] == '0':
time_interval = (time_interval[0], mid_time_interval)
else:
time_interval = (mid_time_interval, time_interval[1])
lat_val = (lat_interval[0] + lat_interval[1]) / 2.0
lng_val = (lng_interval[0] + lng_interval[1]) / 2.0
time_val = (time_interval[0] + time_interval[1]) / 2.0
return lng_val, lat_val, time_val
if __name__ == "__main__":
#test
morton_code = insert(135, 35, 5, '[value]test,135,35,5')
print(decoder(morton_code))
| UTF-8 | Python | false | false | 5,107 | py | 5 | st_insert.py | 4 | 0.634619 | 0.607402 | 0 | 167 | 29.413174 | 94 |
robertnowell/algos | 18,631,568,135,377 | 9873bed5bd2429cac9852726cc259f97c203c743 | 0c532599478a29b09b4f158f416df6dbc464cfbc | /recursion/dynamic/contiguous.py | 211c6e474e2af253739a0de8252e5afaa04c6e52 | []
| no_license | https://github.com/robertnowell/algos | 33d35dc2e453e2ecb994381bdf89ed084a2070b1 | 375e1015958d6d0edf05f285ff2aae3713d53a6c | refs/heads/master | 2021-06-28T22:52:30.516697 | 2020-04-05T16:45:26 | 2020-04-05T16:45:26 | 98,332,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def subsequenceSum(a, sum, start, end, res):
if start > end:
print sum, res
return sum
if start == end:
print sum, res
return max(sum + a[start], sum)
news = start+1
newe = end-1
sval = a[start]
endval = a[end]
return max(subsequenceSum(a, sum+sval, news, newe, res + [sval]), max(subsequenceSum(a, sum+endval, news, newe, res+[endval]), subsequenceSum(a, sum+sval+endval, news, newe, res+ [sval, endval])))
a = [1, -8, 3, -2, 4, -10]
# print(subsequenceSum(a, 0, 0, len(a)-1, []))
def contig(a):
start = 0
end = len(a)
bestStart = start
bestEnd = end
m = sumBetween(a, start, end)
for i in range(len(a)):
for j in range(i, len(a)):
newM = sumBetween(a, i, j)
if newM > m:
bestStart = i
bestEnd = j
m = newM
print(bestStart, bestEnd)
return m
def sumBetween(a, start, end):
s = 0
for i in range(start, end):
s += a[i]
return s
a = [1, -8, 3, -2, 4, -10]
print(contig(a))
| UTF-8 | Python | false | false | 927 | py | 93 | contiguous.py | 88 | 0.606257 | 0.583603 | 0 | 44 | 20.068182 | 197 |
nicholasgdml/exercicioscursoemvideo-python | 15,547,781,625,282 | 98135016088964b0d3dac17dd029e145c25e25e9 | 39d65ea505801f821ef800ba4e733872f7b361b3 | /ex_python_mundo3/ex088.py | d7f4405ce2ffefa1a9131c984ae13d80b7e75328 | []
| no_license | https://github.com/nicholasgdml/exercicioscursoemvideo-python | ac1183be8b94f12ec02241077fcc29350984d942 | b184a1bbafe54984abf1933a81e0b3c1249eb990 | refs/heads/main | 2023-08-29T03:54:27.985080 | 2021-09-27T01:16:08 | 2021-09-27T01:16:08 | 379,691,216 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''Exercício Python 088: Faça um programa que ajude um jogador da MEGA SENA a criar palpites.
O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo,
cadastrando tudo em uma lista composta.'''
from random import randint
lista = list()
temp = list()
quantiaListas = int(input('Quantos jogos você quer que eu sorteie?: '))
for j in range(0, quantiaListas):
for n in range(0, 6):
temp.append(randint(1, 60))
lista.append(temp[:])
temp.clear()
print(f'Sorteando {quantiaListas} jogos!')
for j in range(0, quantiaListas):
print(f'Jogo {j + 1}: {lista[j]}') | UTF-8 | Python | false | false | 639 | py | 92 | ex088.py | 91 | 0.697161 | 0.673502 | 0 | 20 | 30.75 | 105 |
juzisan/zyys | 3,994,319,601,300 | e0b3ce9bb6558961aea8cab223dc79cdda4ecbea | 7d38f7d4c04e15c10aede607e7d2bfed1d59c803 | /6.web住院医师/web住院医师账号密码.py | 0099b0218c6e25f7fd23a7d190b292548834003d | []
| no_license | https://github.com/juzisan/zyys | 46089a2ceef64abf39d5341cbe925622fd1a111f | 0a8d69749fb2bc869c42d58a0278860f23dae3bf | refs/heads/master | 2023-08-18T13:00:32.700485 | 2023-08-05T14:12:54 | 2023-08-05T14:12:54 | 149,263,336 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Created on Sat Nov 5 10:50:13 2016
@author: hello
"""
import codecs
import pandas as pd
from requests_html import HTMLSession
import random
from shuju import url_dict
from shuju import html_tou
import time
def timmer(func):
def warpper(*args,**kwargs):
print ('\033[1;32;40mstart\033[0m')
strat_time = time.time()
func()
seconds = time.time() - strat_time
m, s = divmod(seconds, 60)
print("\033[1;32;40mthe run time is %02d:%.6f\033[0m" %(m, s))
return warpper
def read_to_list(neir=None):
url_lj = url_dict['tou'] + url_dict['dl']#生成?号登陆链接
values0 =['90','95','100','105','110','115','120','125','130','135','140','145','150']
values = ['red','green','magenta','chocolate','brown','deeppink',r'#000080']
json_neir = neir[neir.find("["):neir.rfind("]")+1]
df = pd.read_json(json_neir)
df = df[['userName','loginName', 'password', 'departmentName', "subjectName"]]
hangshu = df['userName'].count()
df['字体'] = random.sample(values0 * hangshu, hangshu)
df['颜色'] = random.sample(values * hangshu, hangshu)
df['link'] = '⇒<a href="' + url_lj +df['loginName']+'&password='+df['password']+'" style="font-size:'+df['字体']+'%;color:'+df['颜色']+'">'+df['userName']+'_'+df['subjectName']+'_'+df['departmentName']+'</a> '
return df['link'].tolist()
@timmer
def main():
url_dl = url_dict['tou'] + url_dict['yy']#登陆链接
url_xy = url_dict['tou'] + url_dict['xy']#学员
url_ls = url_dict['tou'] + url_dict['ls']#老师
url_jm = url_dict['tou'] + url_dict['jm']#教秘
url_zr = url_dict['tou'] + url_dict['zr']#主任
session = HTMLSession()
s_get = session.get
r = s_get(url_dl)#登陆医院
print ('login')
#学员
r = s_get(url_xy)
textt = r.html.find('script', containing='var userGridData',first = True ).html
#搜索script 内容为var userGridData,返回第一个,再转符串
x_list = read_to_list(textt)
print ("学员:", len(x_list))
xueyuan_str = ''.join(x_list)
#老师
r = s_get(url_ls)
textt = r.html.find('script', containing='var userGridData',first = True ).html
x_list = read_to_list(textt)
print ("老师:", len(x_list))
laoshi_str = ''.join(x_list)
#教秘
r = s_get(url_jm)
textt = r.html.find('script', containing='var userGridData',first = True ).html
x_list = read_to_list(textt)
print ("教秘:", len(x_list))
jiaomi_str = ''.join(x_list)
#主任
r = s_get(url_zr)
textt = r.html.find('script', containing='var userGridData',first = True ).html
x_list = read_to_list(textt)
print ("主任:", len(x_list))
zhuren_str = ''.join(x_list)
#合并html
html_x = r'学员</th><td>'
html_l = r'''</td></tr>
<tr style="border:2px dashed;"><th>老师</th><td>'''
html_j =r'''</td></tr>
<tr style="border:2px dashed;"><th>教秘</th><td>'''
html_z =r'''</td></tr>
<tr style="border:2px dashed;"><th>主任</th><td>'''
html_wei ='''</td></tr>
</table></h5>
<pre>
</pre>
</div>
</body>
</html>
'''
html_body = ''.join([html_tou,html_x,xueyuan_str,html_l,laoshi_str,html_j,jiaomi_str,html_z,zhuren_str,html_wei])
with codecs.open('zyys.html','w','utf-8') as f2:
f2.write(html_body)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,425 | py | 19 | web住院医师账号密码.py | 15 | 0.576935 | 0.54818 | 0 | 129 | 24.341085 | 219 |
bogdanpop/BuildingAre | 12,206,297,086,856 | d446577a016198e42f4deed4811d5f8f1eef11f7 | 4a8386dd175ed8a96e4fed9da17b37d5ac520845 | /src/tethpy/core/models/errors.py | b5cb3e2a89429f4a4a8648605804e300294611ce | []
| no_license | https://github.com/bogdanpop/BuildingAre | c01ff59dacd1b5ce0cc54b63c923e6112fd50eb1 | c75300f6dd9a75a8aec3a3c928718e092ad067b4 | refs/heads/master | 2016-08-11T20:23:46.503322 | 2015-11-19T23:01:17 | 2015-11-19T23:01:17 | 45,944,169 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.db.models import fields
class CustomError(models.Model):
data = fields.CharField(max_length=4096)
| UTF-8 | Python | false | false | 145 | py | 24 | errors.py | 16 | 0.77931 | 0.751724 | 0 | 6 | 23.166667 | 44 |
dcolinmorgan/mili_benchmark | 1,142,461,330,042 | 6672f8dce509f20abde7bb01efb1e6727a6630a5 | 6d1526293a9a45cf1e67f830edfe9d3aae57e96c | /src/python/run_predScore.py | eb75373b6dcfeba30d2016e1e00430d7b616e222 | []
| no_license | https://github.com/dcolinmorgan/mili_benchmark | 77dab8a7b15b8861bcdfa6d3321a695733a911f8 | 6ba4a2cba23b344f16bfef87b4d27f569ffd3d79 | refs/heads/master | 2023-07-29T17:21:52.890199 | 2021-09-10T02:06:33 | 2021-09-10T02:06:33 | 278,576,747 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# source /proj/relibs/relib00/conda/bin/activate
# echo $PATH
# source /proj/relibs/relib00//conda/env/mypy3 #./conda/env/mypy3
# source /proj/relibs/relib00/conda/etc/profile.d/conda.sh
# source activate mypy3
# cd netZooPy
# python
"""
Description:
Run predScore after benchmark.
Usage:
-h, --help: help
-i, --indir: directory where intersections are location
-o, --outdir
-c, --cell: if specific cell line wanted
-t, --TF: if specific TF wanted
bash data/MotifPipeline/ENCODE/sthlm_motif_pipeline_beta.sh -b50 -c'A549 K562 GM12878 SKNSH HepG2 HeLa'
Example:
source /proj/relibs/relib00/conda/bin/activate
source activate mypy3
python netZooPy/milipede/benchmark/run_predScore.py -i data/MotifPipeline/sthlm_motif_5_QCbeta -o data/MotifPipeline/sthlm_motif_5_QCbeta/red/test
"""
import sys
sys.path.append("/udd/redmo/netZooPy")
import getopt
import tests
# from netZooPy.lioness.lioness import Lioness
from mili_benchmark.src.python.predScore import predScore
from mili_benchmark.src.python.predRegion import predRegion
from mili_benchmark.src.python.buffer_distr_comp import buffer_distr_comp
from mili_benchmark.src.python.plot_predScore import plot_predScore
from mili_benchmark.src.python.plot_allPredScore import plot_allPredScore
def main(argv):
#Create variables
indir = None
outdir = None
cell = None
TF = None
try:
opts, args = getopt.getopt(argv, 'hi:o:c:t:', ['help', 'indir=','outdir=','cell=','TF='])
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
print(__doc__)
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
print(__doc__)
return 0z
elif opt in ('-i', '--indir'):
indir = arg
elif opt in ('-o', '--outdir'):
outdir = arg
elif opt in ('-c', '--cell'):
cell = arg
elif opt in ('-t','--TF'):
TF = arg
else:
print('Unknown option', opt)
return 1
#Check if required options are given
if indir is None or outdir is None:
print('Missing argument!')
print(__doc__)
return 1
else:
print('indir: ', indir)
print('outdir: ', outdir)
if TF is not None and cell is not None:
print('TF: ', TF)
print('cell: ', cell)
elif TF is not None and cell is None:
print('TF: ', TF)
elif TF is None and cell is not None:
print('cell: ', cell)
else:
print('all cell and TF combos')
# Run panda
print('Starting regional analysis ...')
RR,DD=predScore(indir,outdir,'mean',cell,TF,'N_Shore',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,'S_Shore',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,'OpenSea',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,'N_Shelf',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
##STILL NEED TO RUN
RR,DD=predScore(indir,outdir,'mean',cell,TF,'S_Shelf',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,'Island',None)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
print('Finished regional analysis ...')
print('Starting depth analysis ...')
RR,DD=predScore(indir,outdir,'mean',cell,TF,None,5)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,None,10)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,None,15)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
RR,DD=predScore(indir,outdir,'mean',cell,TF,None,20)
plot_predScore(outdir,outdir,'mean','auroc',RR,DD)
plot_allPredScore(outdir,'mean','auroc',RR,DD)
print('Finished '+RR+' '+DD)
print('Finished depth analysis ...')
# print('Start predRegion ...')
# predRegion(indir,outdir,cell,TF)
# print('Start buffer_distr_comp ...')
# buffer_distr_comp('mean',indir)
# print('Start plot_predScore ...')
# plot_predScore(outdir,outdir,'mean','aupr')
# print('Start plot_allPredScore ...')
# plot_allPredScore(outdir,'mean','aupr')
# # print('All done!')
# print('Start predScore ...')
# predScore(indir,outdir,'median',cell,TF)
# # print('Start buffer_distr_comp ...')
# # buffer_distr_comp('median',indir)
# print('Start plot_predScore ...')
# plot_predScore(outdir,outdir,'median','auroc')
# plot_predScore(outdir,outdir,'median','aupr')
# print('Start plot_allPredScore ...')
# plot_allPredScore(outdir,'median','auroc')
# plot_allPredScore(outdir,'median','aupr')
print('All done!')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| UTF-8 | Python | false | false | 5,792 | py | 67 | run_predScore.py | 34 | 0.631561 | 0.624655 | 0 | 172 | 32.674419 | 148 |
ushkarev/django-moj-template | 16,630,113,371,621 | 7b4dff97baf54c73079f3ae6a22427fbcbbce81a | 75013ae741b538751a8b392c606c1ca68174dbba | /builder/template/django_moj_template/app.py | 89f5e1c6b43b26b9fde86b17907038bbb4defc0c | [
"MIT"
]
| permissive | https://github.com/ushkarev/django-moj-template | 0f941f7a013f1dfcb89b4f36fd91503666b9e07f | ff1bd0e6cd7791593fceee8c731ec152f0d72e5a | refs/heads/master | 2021-01-10T02:09:09.423313 | 2016-02-26T12:33:24 | 2016-02-29T17:47:07 | 52,619,306 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig as DjangoAppConfig
class AppConfig(DjangoAppConfig):
name = 'django_moj_template'
verbose_name = 'MoJ Template'
| UTF-8 | Python | false | false | 156 | py | 15 | app.py | 9 | 0.75641 | 0.75641 | 0 | 6 | 25 | 52 |
uniite/super-socket | 16,612,933,530,630 | 61a8bf05e1d577f82a7c9f37adf6d5726cb86852 | 1f864859eb7ea7bdf8268c619de637857c1688d0 | /proxy_exit.py | aadd91f1ca5d0a2ee6d9bb074233162de706c31e | []
| no_license | https://github.com/uniite/super-socket | c2f133538b590b02f2c2ff1de90e296d5a1f1b83 | a9da240ed6d6eb115d3bc858366eaab8d633b41d | refs/heads/master | 2021-01-22T11:51:09.825745 | 2011-10-10T05:56:51 | 2011-10-10T05:56:51 | 2,545,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gevent import spawn, spawn_later
from gevent.event import Event
from gevent.socket import create_connection
from gevent.server import StreamServer
from gevent.queue import Queue
from gevent import sleep
from proxy_core import ProxyCore
ENTRY_HOST = "0.0.0.0"
ENTRY_PORT = 5000
ENDPOINTS = [
("127.0.0.1", 5005)
]
current_connection = None
class Connection(ProxyCore):
def reconnect(self):
self.connected.wait()
def kill_connection():
print "Killing connection!"
current_connection.endpoints["client"].close()
def start_session(client_socket, address):
global current_connection
print ("New connection from %s:%s" % address)
# Connect to the server
if not current_connection:
current_connection = Connection(client_socket, ENDPOINTS)
#spawn_later(5.5, kill_connection)
else:
print "Got new connection!"
current_connection.endpoints["client"] = client_socket
current_connection.connected.set()
# Stream client -> server
#Connection.stream(client_socket, server_socket)
# Stream client <- server
#spawn(stream, server_socket, client_socket)
if __name__ == "__main__":
server = StreamServer((ENTRY_HOST, ENTRY_PORT), start_session)
print ("Starting STCP server on port %s" % ENTRY_PORT)
server.serve_forever()
| UTF-8 | Python | false | false | 1,341 | py | 6 | proxy_exit.py | 6 | 0.692767 | 0.677852 | 0 | 52 | 24.788462 | 66 |
js-ferguson/gPMS | 2,869,038,183,306 | ee9050d1b96e93c1016bc63a00c15251aed418de | 2924e268fde98f3c4926c20880ebdf026ad84f81 | /clinic/migrations/0001_initial.py | 83d483e7e0bb647a86c3cf71888d9c2c98ee45f4 | []
| no_license | https://github.com/js-ferguson/gPMS | 0780c70487dd86ff4c495e257da8b1133f67bd4c | dc47549d815c656ef1a803c8250645a9789c8c9a | refs/heads/master | 2022-12-13T01:18:16.658142 | 2021-11-26T21:43:58 | 2021-11-26T21:43:58 | 232,631,335 | 0 | 1 | null | false | 2022-12-08T03:28:32 | 2020-01-08T18:29:28 | 2021-11-26T21:44:02 | 2022-12-08T03:28:31 | 5,988 | 0 | 1 | 2 | Python | false | false | # Generated by Django 3.0.2 on 2020-04-20 18:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Clinic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lat', models.FloatField(blank=True, null=True)),
('lng', models.FloatField(blank=True, null=True)),
('web', models.CharField(blank=True, max_length=128, null=True)),
('name', models.CharField(max_length=128)),
('phone', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('description', models.TextField(max_length=5000)),
('street', models.CharField(max_length=128)),
('city', models.CharField(max_length=128)),
('practitioner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('body', models.TextField(max_length=500)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('clinic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='clinic.Clinic')),
],
),
]
| UTF-8 | Python | false | false | 1,895 | py | 54 | 0001_initial.py | 39 | 0.603166 | 0.582058 | 0 | 43 | 43.069767 | 134 |
Sargreis/LuffyProject | 19,318,762,919,054 | d874ceae3d4830ad1144aabe0ba71cbb1eb4a9f6 | 962f977e8470ed8b16dd134e50701c39077ec94e | /always/app01/admin.py | 020a976bdef222188ca93b856a9f4939b93a6103 | []
| no_license | https://github.com/Sargreis/LuffyProject | 775ef1e53d8aa7bb709b2267adc4ef623602c9ca | a679b64868e4fb41bfc0a823b2397194f7a9350f | refs/heads/master | 2021-09-09T10:16:20.295557 | 2018-03-15T03:20:34 | 2018-03-15T03:20:34 | 112,281,830 | 0 | 0 | null | false | 2017-11-30T03:46:57 | 2017-11-28T03:39:04 | 2017-11-28T11:34:28 | 2017-11-30T03:46:57 | 32,554 | 0 | 0 | 0 | Python | false | null | from django.contrib import admin
from app01 import models
# Register your models here.
admin.site.register(models.PricePolicy)
admin.site.register(models.Course)
admin.site.register(models.CourseDetail)
admin.site.register(models.Teacher)
admin.site.register(models.Account)
| UTF-8 | Python | false | false | 287 | py | 15 | admin.py | 10 | 0.797909 | 0.790941 | 0 | 9 | 29.666667 | 40 |
ypliu/leetcode-python | 9,062,381,005,468 | 473009319f5873260497cb3533cbe43857444066 | 02dc1f70da529c7c2aa45dcfe5e0a3aeeb1f98cc | /src/062_unique_paths/062_unique_paths.py | 404b9e7ed5c1e8dec068debb4e4d6da261e5ebce | []
| no_license | https://github.com/ypliu/leetcode-python | 2a5a14de6310cae19b9cc42091d81586e697fffb | 13e61c13c406a73debcfc996937cf16f715d55d1 | refs/heads/master | 2020-03-27T06:17:32.303442 | 2019-02-24T14:50:11 | 2019-02-24T14:50:11 | 146,094,735 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if m <= 0 or n <= 0:
return 0
elif 1 == m or 1 == n:
return 1
total = m + n - 2
counts = min(m, n) - 1
numerator = denominator = 1
while counts >= 1:
numerator *= total
denominator *= counts
total -= 1
counts -= 1
return (numerator / denominator)
# debug
s = Solution()
print s.uniquePaths(3, 2)
print s.uniquePaths(7, 3)
| UTF-8 | Python | false | false | 596 | py | 226 | 062_unique_paths.py | 224 | 0.45302 | 0.426174 | 0 | 27 | 21.074074 | 40 |
thisisiron/Algorithms | 19,181,323,964,349 | e5042d0bb7141d824b405d27a9a1afccba813d03 | 71ffa58b7a6fc7bb896f5beff3d09481cbecdd84 | /BOJ/1912.py | 61af57db4fbcb358bbf3374e3b527d2475cbd971 | []
| no_license | https://github.com/thisisiron/Algorithms | 2770a4fa52d72b439c4c3913021ed62a2f162f85 | 292c10c8759903ed556eb55cf3b340203205aced | refs/heads/master | 2021-05-25T12:12:58.041247 | 2021-05-24T14:44:16 | 2021-05-24T14:44:16 | 127,402,348 | 0 | 0 | null | false | 2018-09-11T10:48:32 | 2018-03-30T08:23:38 | 2018-09-11T07:53:59 | 2018-09-11T10:48:31 | 1,349 | 0 | 0 | 0 | Python | false | null | import sys
input = sys.stdin.readline
if __name__ == '__main__':
n: int = int(input())
numbers: list = list(map(int, input().split()))
dp: list = [0] * n
for i in range(n):
if dp[i - 1] + numbers[i] > numbers[i]:
dp[i] = dp[i - 1] + numbers[i]
else:
dp[i] = numbers[i]
print(max(dp))
| UTF-8 | Python | false | false | 347 | py | 374 | 1912.py | 372 | 0.475504 | 0.466859 | 0 | 15 | 22.133333 | 51 |
b-mccormack/python-projects | 8,418,135,907,414 | 5ee179ea52753fe9c98a1f9589c8e760ef5f3be9 | c66d98e73e9ef0bec85be2f01e1edff8f86b6915 | /Beginner-Python/One hit wonders/fibonacci.py | d639a4c81c543e58bec3a1bf49e9c102b19cf072 | []
| no_license | https://github.com/b-mccormack/python-projects | fb143b1d0cdf7218ad791ea973d75b880463e403 | 2c4fcc716481b9c52048b5f6c72adcd94474f377 | refs/heads/master | 2023-01-13T03:42:56.012340 | 2021-07-19T22:22:48 | 2021-07-19T22:22:48 | 181,610,104 | 0 | 0 | null | false | 2020-10-20T02:39:28 | 2019-04-16T03:44:06 | 2020-10-19T08:28:26 | 2020-10-20T02:39:27 | 741 | 0 | 0 | 0 | Python | false | false | #Ask the user how many fibonacci numbers they would like to generate, then print out the sequence
userNum = int(input("How many fibonacci numbers would you like to see? Enter a whole number"))
fibList = [1, 1]
count = 1
while count <= userNum:
addNum = fibList[count] + fibList[count-1]
fibList.append(addNum)
count = count + 1
print(fibList)
| UTF-8 | Python | false | false | 353 | py | 58 | fibonacci.py | 57 | 0.725212 | 0.711048 | 0 | 12 | 28.333333 | 97 |
sebastian-code/ideas_sueltas | 11,295,764,025,801 | e84ca0aa2ceb70f54700d7ed1bfa30e5221294e7 | b91a9ca73c131f2296fcd07e0b69942b5fb6911d | /scrappers/robots_txt.py | f247f7eab4614fd4fd8187b3e7739cc66f0c0b52 | [
"MIT"
]
| permissive | https://github.com/sebastian-code/ideas_sueltas | 2a5c77ad482c31f080b9b04c9044586ce5899379 | 40ceac081f5181d01e188a5a1c40463d891203e6 | refs/heads/master | 2022-05-04T04:58:10.569717 | 2022-03-24T13:33:09 | 2022-03-24T13:33:09 | 29,372,124 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import io
import urllib.request
def get_robots_txt(url):
if not url.endswith("/"):
path = url + "/"
request = urllib.request.urlopen(f"{path}robots.txt", data=None)
data = io.TextIOWrapper(request, encoding="utf-8")
return data.read()
| UTF-8 | Python | false | false | 262 | py | 114 | robots_txt.py | 101 | 0.645038 | 0.641221 | 0 | 11 | 22.818182 | 68 |
NateWeiler/Resources | 2,327,872,318,407 | f04933dd991da1abacd8a47063e9c72427993db5 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/pyinstaller/PyInstaller/hooks/hook-gi.repository.Gdk.py | 9d97892eed3e150f7875896f2b4823e28cdf31fd | [
"LicenseRef-scancode-other-permissive"
]
| permissive | https://github.com/NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | false | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | 2021-12-09T12:39:04 | 2022-09-08T15:20:18 | 2,434,051 | 1 | 0 | 32 | null | false | false | version https://git-lfs.github.com/spec/v1
oid sha256:0586794fa1ffd887c7f6f6135b54e01a88e8dee0904b9c6d8e244a604e31c004
size 747
| UTF-8 | Python | false | false | 128 | py | 36,207 | hook-gi.repository.Gdk.py | 16,386 | 0.882813 | 0.5 | 0 | 3 | 41.666667 | 75 |
tapiaw38/desarrollo | 12,034,498,396,999 | 99f128758dc3d8347ef66fb34e6c6f8cbecde6a5 | ad9ab95361275f26c2ff1e20f7ab99bab3fcdc97 | /usuario/views.py | 8f3e7846087502471d70ce6b0b5e8ba59991424b | []
| no_license | https://github.com/tapiaw38/desarrollo | efbd03433c20b52c1e74b69fd63a539e0927dabf | e64f27aea9321da5884cbde986385e08006b9ccc | refs/heads/master | 2021-05-16T18:59:57.452208 | 2020-03-27T04:08:19 | 2020-03-27T04:08:19 | 250,429,496 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
from django.core.paginator import Paginator
from usuario.forms import UsuarioForm, CargaForm
from usuario.models import Usuario
# Create your views here.
from django.views.generic import View
from desarrollo.utileria import render_pdf
from django.views.generic import CreateView, ListView, UpdateView, TemplateView
from django.urls import reverse_lazy
class index(TemplateView):
template_name = 'usuario/index.html'
class usuario_create(CreateView):
model = Usuario
form_class = UsuarioForm
template_name = 'usuario/usuario_form.html'
success_url = reverse_lazy('usuario_listar')
class usuario_list(ListView):
model = Usuario
template_name = 'usuario/usuario_list.html'
paginate_by = 7
class usuario_edit(UpdateView):
model = Usuario
form_class = UsuarioForm
template_name = 'usuario/usuario_form.html'
success_url = reverse_lazy('usuario_listar')
class carga_edit(UpdateView):
model = Usuario
form_class = CargaForm
template_name = 'usuario/carga_form.html'
success_url = reverse_lazy('usuario_listar')
class mas_datos(View):
def get(self, request,id_usuario):
usuario = Usuario.objects.get(id=id_usuario)
contexto = {'usuario':usuario}
datos = render(request,'usuario/mas_datos.html',contexto)
return HttpResponse(datos)
def usuario_search(request):
buscar = request.POST.get('buscalo')
if buscar:
usuario=Usuario.objects.filter(dni__contains=buscar)
else:
usuario = Usuario.objects.all()
paginator = Paginator(usuario, 7)
page = request.GET.get('page')
usuario = paginator.get_page(page)
contexto = {'object_list': usuario}
return render(request, 'usuario/buscar.html',contexto)
| UTF-8 | Python | false | false | 1,822 | py | 6 | views.py | 4 | 0.716795 | 0.715697 | 0 | 55 | 32.127273 | 79 |
TTOFFLINE-LEAK/ttoffline | 8,478,265,447,376 | 2df02146d387d44bcdc49789a34acd4c4a40e0d6 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/uberdog/DistributedDeliveryManagerUD.py | 06d593d29a39b24e57f31ef731e64343e79b6f59 | [
"MIT"
]
| permissive | https://github.com/TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
class DistributedDeliveryManagerUD(DistributedObjectGlobalUD):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDeliveryManagerUD')
def requestAck(self):
avId = self.air.getAvatarIdFromSender()
if not avId:
return
self.sendUpdateToAvatarId(avId, 'returnAck', []) | UTF-8 | Python | false | false | 458 | py | 717 | DistributedDeliveryManagerUD.py | 715 | 0.779476 | 0.779476 | 0 | 11 | 40.727273 | 88 |
vikramahuja1001/Classification-Of-News-Data | 17,248,588,692,704 | 7748878548889645d715ffcb445164f6f2f2eb58 | 91372fe7e6df241bd906cc252722c5e65cd8b896 | /get_data.py | 0eb191aed4c127e3df437e7512f410e6e750aba9 | []
| no_license | https://github.com/vikramahuja1001/Classification-Of-News-Data | 2b0f20bcbd1b313d5f63cdc3594b167ba78f9d6a | c1e89e2bb26b90387836251e1c8acfd3985bd55e | refs/heads/master | 2021-01-10T01:26:43.142921 | 2016-01-17T07:07:07 | 2016-01-17T07:07:07 | 49,806,981 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import os.path
import re
import math
import random
count = 0
#19997 files total
topics = ["alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt" ,"sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc"]
def get_data():
data1=[]
for dirpath, dirnames, filenames in os.walk("."):
for filename in [f for f in filenames]:
if filename.isdigit() == True:
a = os.path.join(dirpath, filename)
fo = open(a ,'r')
data = fo.readlines()
for i in range(len(data)):
data[i] = data[i].strip()
data[i] = re.sub(' +',' ',data[i])
data[i] = re.split(' ', data[i])
i = 0
while i!=len(data):
if len(data[i]) == 1:
data.pop(i)
i -= 1
i +=1
k=0
n=len(data)
while k<n:
if data[k][0][-1] == ":":
data.pop(k)
n -=1
k -=1
k +=1
data2 = ""
for i in data:
for j in range(len(i)):
data2 += str(i[j]) + " "
a = re.split('/',dirpath)
if a[-1] in topics:
index = topics.index(a[-1])
data2 = [data2,index]
data1.append(data2)
return data1
def tokenise(data):
tok = []
for i in data:
chars = re.findall("i.e.|Dr.|Mr.|Mrs.|Inc.|Cir.|St.|Jr.|U.S.|N.A.S.A|text-align|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|www\.\w+\.\w+\.?\w+|\w+\.?\w+@\w+\.?\w+\.?\w+|\s[a-zA-Z]\.\s|[\w]+|\"\.|[\+\-()\"=;:*\.,\?!@#$%^&`~'|\\/<>]|\d+%|[0-9]+(?:st|nd|rd|th)",i[0])
a = [chars,i[1]]
tok.append(a)
return tok
def get_unigram(tok_data):
unigram={}
for i in tok_data:
for j in i[0]:
if j.lower() not in unigram:
unigram[j.lower()] = 1
else:
unigram[j.lower()] +=1
return unigram
def cosine_similarity(query,doc):
dot = 0.0
for i in range(len(query)):
dot += query[i]*doc[i]
#dot = sum(p*q for p,q in zip(query, doc))
q=0.0
d=0.0
for i in range(len(query)):
q +=(query[i]*query[i])
d +=(doc[i]*doc[i])
q = math.sqrt(q)
d = math.sqrt(d)
if q*d == 0.0:
return 0
return dot/(q*d)
print "Getting data"
data = get_data()
total_len = len(data)
print "Tokenising"
tok_data = tokenise(data)
print "get unigram"
unigram = get_unigram(tok_data)
print "sorting"
res =[]
res = sorted(unigram, key=unigram.get, reverse=True)
res2=[]
for i in range(len(res)):
res2.append(unigram[res[i]])
for i in range(len(res)):
print res[i],res2[i]
#print tok_data[0]
#removing stop words
stp = []
for i in range(1000):
stp.append(res[i])
data = []
print len(res)
res = res[500:]
res2 = res2[500:]
print len(res)
count = 0
for i in range(len(res)):
if res2[i] == 1 or res2[i] == 2 or res2[i] == 3 or res2[i] == 4:
count +=1
res = res[:-count]
res2 = res2[:-count]
print len(res)
print "Removing stop words"
for i in tok_data:
data1 = []
for j in i[0]:
if j not in stp:
data1.append(j)
a = [data1,i[1]]
data.append(a)
#print data[0]
print len(res)
tf = {}
print "Creating TF"
for i in range(len(res)):
print i
num = []
posn = []
final = []
if res[i] not in tf:
for j in range(len(data)):
count = 0
for k in data[j][0]:
if k == res[i]:
count +=1
if count !=0:
num.append(count)
posn.append(j)
final = [num,posn]
if len(final) !=0 :
tf[res[i]] = final
#print tf
idf = {}
print "Calculating IDF"
for i,j in tf.iteritems():
idf[i] = 1 + (math.log(total_len/(len(j[0])*1.0))/math.log(math.e))
#print idf
tfidf = {}
for i in tf:
tfidf[i] = tf[i]
print "Calculating TFIDF"
for i ,j in tfidf.iteritems():
a = []
b = j[0]
c = j[1]
for k in range(len(j[0])):
b[k] = (b[k] * idf[i])
a.append(b)
a.append(c)
tfidf[i] = a
#
#print tfidf
tfidf_final = {}
for i in tfidf:
a = []
for j in range(total_len):
a.append(0.0)
tfidf_final[i] = a
#print tfidf_final
for i in tfidf:
a = tfidf_final[i]
#print a
#print len(i[1])
for j in range(len(tfidf[i][1])):
#print tfidf[i][0][j]
#print tfidf[i][1][j]
index = tfidf[i][1][j]
value = tfidf[i][0][j]
#print index
#print value
#a = tfidf_final[i]
a[index] = value
#print a
tfidf_final[i] = a
#print tfidf['date']
#print tfidf_final['date']
#Testing
#kmeans
print len(tfidf_final)
score = []
for j in range(total_len):
a = []
for i in tfidf_final:
a.append(tfidf_final[i][j])
score.append(a)
print len(score)
print len(score[0])
centroid = []
while(len(centroid)!=20):
a = random.randint(0,len(score))
if a not in centroid:
centroid.append(a)
points = [0] *len(score)
for i in range(len(centroid)):
for j in range(len(score)):
if centroid[i]!= j:
a = cosine_similarity(score(centroid[i]),score(j))
print a
"""
a = "./20_newsgroups/alt.atheism/49960"
fo = open(a ,'r')
data = fo.readlines()
for i in range(len(data)):
data[i] = data[i].strip()
print data[i]
data[i] = re.sub(' +',' ',data[i])
data[i] = re.split(' ',data[i])
print data[i]
i = 0
while i!=len(data):
if len(data[i]) == 1:
data.pop(i)
i -= 1
i +=1
k=0
n=len(data)
while k<n:
m=re.match(r'((\w+\-*\w+)+\-?)+\:',data[k][0])
if m:
data.pop(k)
n-=1
k-=1
k+=1
for i in data:
print i
#print data[0][1]
"""
"""
for i in data:
if i[0] == " ":
print i
""" | UTF-8 | Python | false | false | 5,320 | py | 3 | get_data.py | 3 | 0.57312 | 0.54906 | 0 | 274 | 18.419708 | 402 |
Bytamine/ff-olymp | 8,959,301,803,299 | fe116136efd330e6dabd522afa12816dd64dc07f | e6304b9e9989bc54f66896b371f63eac41cc1405 | /08 lists/8.py | 0cff018e776fb58a6a5afab19fb56e117d7b913b | []
| no_license | https://github.com/Bytamine/ff-olymp | ece2f8f28a44c16f379c8e1b7ea3cacc65fbb032 | 3b3ffbf602f1099f1a2e9f817a094d3e98863f61 | refs/heads/master | 2022-09-27T12:25:56.835132 | 2020-06-03T17:33:49 | 2020-06-03T17:33:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
a = list(map(int, input().split()))
a = [x for x in a if x != 0] + [0] * a.count(0)
print(' '.join(map(str, a))) | UTF-8 | Python | false | false | 138 | py | 123 | 8.py | 123 | 0.536232 | 0.507246 | 0 | 7 | 18.857143 | 47 |
ydanilin/dattime | 17,867,063,959,280 | b131d62eeea16af30632ce6f34386e64276a2d67 | 8b6beaece9cc58848b73b1d96ca93e48668ffbc8 | /dattimewww/epoch/views.py | 211b6985f08f990b44a8faeec6ddcf04c1309229 | []
| no_license | https://github.com/ydanilin/dattime | bb68573a143d44a2ba4ab1a8cd1690c700ca1bad | 40d961582a8ae250b2cd107855fb286201995680 | refs/heads/master | 2021-01-20T03:14:20.064068 | 2017-06-05T19:21:29 | 2017-06-05T19:21:29 | 89,516,006 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from datetime import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django import forms
from .models import Epocher
from django.template import Template, Context
Ep = Epocher()
tzChoices = [(-12, 'GMT-12'), (-11, 'GMT-11'), (-10, 'GMT-10'),
(-9, 'GMT-9'), (-8, 'GMT-8'), (-7, 'GMT-7'),
(-6, 'GMT-6'), (-5, 'GMT-5'), (-4, 'GMT-4'),
(-3, 'GMT-3'), (-2, 'GMT-2'), (-1, 'GMT-1'),
(0, 'GMT'),
(1, 'GMT+1'), (2, 'GMT+2'), (3, 'GMT+3'),
(4, 'GMT+4'), (5, 'GMT+5'), (5.5, 'GMT+5:30'), (5.75, 'GMT+5:45'),
(6, 'GMT+6'), (6.5, 'GMT+6:30'),
(7, 'GMT+7'), (8, 'GMT+8'),
(9, 'GMT+9'), (9.5, 'GMT+9:30'),
(10, 'GMT+10'), (11, 'GMT+11'), (12, 'GMT+12')]
monthChoices = [(1, 'Jan'), (2, 'Feb'), (3, 'Mar'), (4, 'Apr'),
(5, 'May'), (6, 'Jun'), (7, 'Jul'), (8, 'Aug'),
(9, 'Sep'), (10, 'Oct'), (11, 'Nov'), (12, 'Dec')]
# http://stackoverflow.com/questions/32084837/changing-id-of-form-input-element-in-django-when-its-created
class formWorldToAlt(forms.Form):
def __init__(self, *args, **kwargs):
super(formWorldToAlt, self).__init__(*args, **kwargs)
# self.initial['timezone'] = 2
timezone = forms.ChoiceField(label="Time zone", choices=tzChoices)
day = forms.IntegerField(label="Day", min_value=1, max_value=31)
month = forms.ChoiceField(label='Month', choices=monthChoices)
year = forms.IntegerField(label="Year")
hour = forms.IntegerField(label="Hour", min_value=0, max_value=23)
minute = forms.IntegerField(label="Minute", min_value=0, max_value=59)
class formAltToWorld(forms.Form):
def __init__(self, *args, **kwargs):
super(formAltToWorld, self).__init__(*args, **kwargs)
# self.initial['wtimezone'] = 2
self.initial['wdirection'] = 1
wtimezone = forms.ChoiceField(label="Time zone", choices=tzChoices)
wyear = forms.IntegerField(label="Year")
wmonth = forms.IntegerField(label='Month', min_value=0, max_value=9)
wday = forms.IntegerField(label="Day", min_value=0, max_value=99)
whour = forms.IntegerField(label="Hour", min_value=0, max_value=99)
wminute = forms.IntegerField(label="Minute", min_value=0, max_value=99)
wdirection = forms.ChoiceField(label='Direction', choices=[(-1, 'BST'),
(1, 'FST')])
menuSet = [{'caption': 'Homepage', 'viewName': 'epochTime'},
{'caption': 'My Birthday in the New Epoch', 'viewName': 'birthday'},
{'caption': 'The Bridge between calendars', 'viewName': 'bridge'},
{'caption': 'The New Time', 'viewName': 'calculation'}]
def epochTime(request):
# if not request.session.session_key:
# request.session.create()
# print(request.session.session_key)
footerItems = [menuSet[1], menuSet[2], menuSet[3]]
dt = datetime.utcnow()
epTime = Ep.getSpaceMoment(dt)
return render(request, 'epoch/screen01.html', {'epTime': epTime,
'footerItems': footerItems})
def birthday(request):
dt = datetime.utcnow()
footerItems = [menuSet[0], menuSet[2], menuSet[3]]
if request.POST:
timezone = float(request.POST.get('timezone'))
day = int(request.POST.get('day'))
month = int(request.POST.get('month'))
year = int(request.POST.get('year'))
hour = int(request.POST.get('hour'))
minute = int(request.POST.get('minute'))
event = Ep.getAnnualEventDetails(timezone, dt, year, month, day, hour, minute)
output = dict(epDob=event[0], epAge=event[1], epUntil=event[2],
epNextDob=event[3], wNextDob=event[4].isoformat())
return HttpResponse(
json.dumps(output),
content_type="application/json")
else:
# this is on page load
ini = {}
if 'country' in request.session:
if request.session['country'] == 'RU':
ini = dict(timezone=6)
form = formWorldToAlt(initial=ini)
return render(request, 'epoch/birthday.html',
{'footerItems': footerItems, 'form': form})
def bridge(request):
footerItems = [menuSet[0], menuSet[1], menuSet[3]]
if request.POST:
timezone = float(request.POST.get('timezone'))
day = int(request.POST.get('day'))
month = int(request.POST.get('month'))
year = int(request.POST.get('year'))
hour = int(request.POST.get('hour'))
minute = int(request.POST.get('minute'))
sender = request.POST.get('senderr')
output = {}
if sender == 'world':
output = Ep.getEventSpaceDate(timezone, year, month, day, hour, minute)
if sender == 'alt':
direction = int(request.POST.get('direction'))
output = dict(wtime=Ep.getEventUserDate(timezone, year, month, day,
hour, minute, direction))
return HttpResponse(
json.dumps(output),
content_type="application/json"
)
else:
formW = formWorldToAlt(initial=dict(timezone=3))
formA = formAltToWorld(initial=dict(wtimezone=3))
return render(request, 'epoch/bridge.html',
dict(footerItems=footerItems, formW=formW, formA=formA))
def calculation(request):
footerItems = [menuSet[0], menuSet[1], menuSet[2]]
toAlt = Ep.getToAltConversionRatios()
toWorld = Ep.getToWorldConversionRatios()
units = Ep.getSubUnits()
return render(request, 'epoch/calculation.html',
dict(footerItems=footerItems,
toAlt=toAlt,
toWorld=toWorld,
units=units)
)
def testick(request):
# http://stackoverflow.com/questions/3889769/how-can-i-get-all-the-request-headers-in-django
t = Template(('<b>request.META</b><br>'
'{% for k_meta, v_meta in request.META.items %}'
'<code>{{ k_meta }}</code> : {{ v_meta }} <br>'
'{% endfor %}'))
c = Context(dict(request=request))
return HttpResponse(t.render(c))
| UTF-8 | Python | false | false | 6,304 | py | 23 | views.py | 13 | 0.563293 | 0.538071 | 0 | 153 | 40.202614 | 106 |
AmberJBlue/Project-Euler-Solutions | 18,769,007,084,731 | ac117b6547025f6f59bc260b0c2d947b15b976c8 | 7ed1135be27cbf578d003f922dd15585387b0f91 | /python/1-50/2.py | 740da13fe2620936f546859d73e597aed03faad1 | []
| no_license | https://github.com/AmberJBlue/Project-Euler-Solutions | a83484b29e6fa4c79ac4bf970272ac36bb338b7a | 754730e9e3644ee20523f7dffc6c60b7c0ac33c7 | refs/heads/master | 2020-04-29T08:58:55.941759 | 2019-08-14T19:11:02 | 2019-08-14T19:11:02 | 176,006,651 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solution():
result = 0
current = 1
nextNum = 2
while current <= 4000000:
if current % 2 == 0:
result += current
current, nextNum = nextNum, current + nextNum
return str(result)
if __name__ == "__main__":
print(solution())
| UTF-8 | Python | false | false | 242 | py | 96 | 2.py | 94 | 0.619835 | 0.570248 | 0 | 13 | 17.538462 | 47 |
nky-uno/es-apps | 7,129,645,728,302 | e3abe67d43c9c42c351dbe628e983e58c792d006 | 847b23ed62e52ff23ccf01da67802db1e5d89ff7 | /pymodules/es_search.py | fef0fe2b60e7ffea9fdc00052b3480287d1d4876 | []
| no_license | https://github.com/nky-uno/es-apps | 68382ae1f3429da55034338fad4f4aef3fcb97bb | a719e559b1875dfc4b366e93612612b0fb2a6fad | refs/heads/master | 2020-12-06T15:07:13.096888 | 2020-01-08T11:16:29 | 2020-01-08T11:16:29 | 232,492,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import date
from dateutil import relativedelta
import calendar
import logging
import pandas as pd
from retry import retry
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
@retry(tries=3, delay=2, backoff=2)
def get_elastic():
try:
# connect to the Elasticsearch cluster
elastic = Elasticsearch([{'host': 'nky.uno', 'port': 9200}])
return elastic
except Exception as ex:
logging.error(ex)
raise
def next_day(dt_now):
year = dt_now.split('-')[0]
month = dt_now.split('-')[1]
d = date(int(year),int(month),1)
nextday = d + relativedelta.relativedelta(months=1)
return nextday.strftime('%Y-%m-%d')
def result(dt_now):
elastic = get_elastic()
response = elastic.search(
index="daily",
body={
"aggs": {
"range": {
"date_range": {
"field": "date",
"format": "yyyy-MM-dd",
"ranges": [
{ "from": "{}-01".format(dt_now), "to": "{}".format(str(next_day(dt_now))) }
]
},
"aggs": {
"histogram": {
"date_histogram": {
"field": "date",
"interval": "1d",
"order": {
"_key": "desc"
}
},
"aggs": {
"by_category": {
"terms": {
"field": "category",
"min_doc_count": 0,
"order": {
"_key":"asc"
}
},
"aggs": {
"count_sum": {
"sum": {
"field": "time"
}
}
}
}
}
}
}
}
}
}
)
es_rows = []
for tag1 in response['aggregations']['range']['buckets']:
for tag2 in tag1['histogram']['buckets']:
if (tag2['doc_count'] > 0):
entry = {"date": tag2['key_as_string']}
for tag3 in tag2['by_category']['buckets']:
entry[str(tag3['key'])] = tag3['count_sum']['value']
es_rows.append(entry)
df = pd.DataFrame(es_rows)
return df;
| UTF-8 | Python | false | false | 2,977 | py | 8 | es_search.py | 5 | 0.331878 | 0.32348 | 0 | 86 | 33.616279 | 105 |
mhema524/Udacity-DataModeling-with-PostgreSQL | 1,795,296,372,804 | 25026b62f3711266cc9094b399b4d293cb4a36b1 | 95b522ce90c439c2c29c39f30dc0a99b79a24c9d | /sql_queries.py | 5aef89ffbe7109d521be76112483f9bf32c82129 | []
| no_license | https://github.com/mhema524/Udacity-DataModeling-with-PostgreSQL | 1934310e8f889c50fdefa0fef2160582c0be4924 | c8ba836bca4fb5dc8f87d264e72882935ca1ddbe | refs/heads/master | 2022-07-05T13:39:46.912380 | 2020-05-15T16:42:55 | 2020-05-15T16:42:55 | 264,006,155 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplay_table"
user_table_drop = "DROP TABLE IF EXISTS user_table_drop"
song_table_drop = "DROP TABLE IF EXISTS song_table_drop"
artist_table_drop = "DROP TABLE IF EXISTS artist_table_drop"
time_table_drop = "DROP TABLE IF EXISTS time_table_drop"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id SERIAL PRIMARY KEY,
start_time time,
user_id int,
level varchar(10),
song_id varchar(25),
artist_id varchar(25),
session_id int,
location text,
user_agent text);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (user_id int, first_name varchar(20), last_name varchar(20), gender char(1), level varchar(20));
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id varchar PRIMARY KEY,
title varchar,
artist_id varchar,
year int,
duration numeric)
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artist (
artist_id varchar PRIMARY KEY,
name varchar,
location text,
latitude float,
longitude float);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (start_time time, hour int, date int, week int, month int, year int, weekday int);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
user_table_insert = ("""
INSERT INTO users (
user_id,
first_name,
last_name,
gender,
level)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
song_table_insert = ("""
INSERT INTO songs (
song_id,
title,
artist_id,
year,
duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artist (
artist_id,
name,
location,
latitude,
longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
time_table_insert = ("""
INSERT INTO time (
start_time,
hour,
date,
week,
month,
year,
weekday)
VALUES(%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
# FIND SONGS
song_select = ("""
SELECT
t1.song_id,
t2.artist_id
FROM songs t1
JOIN artist t2
ON t1.artist_id = t2.artist_id
WHERE t1.title = %s
and t2.name = %s
and t1.duration = %s;
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | UTF-8 | Python | false | false | 2,956 | py | 2 | sql_queries.py | 1 | 0.581529 | 0.574087 | 0 | 134 | 21.067164 | 129 |
Priyanka328/KnapsackProblem | 386,547,075,240 | 0d3d29cf8183541e9275cc3c70a9428df6222164 | 78cee9c7a0722e3d94120b33615beaa13e2c9bc2 | /algorithm/bound_and_branches.py | 1b8570871dfc265a99463e864023b462d4b243aa | [
"MIT"
]
| permissive | https://github.com/Priyanka328/KnapsackProblem | 59efc0acec82e7ce170e5bdeb44ccf20f3236028 | 5a6e384f70ecc21f8b28b78a93ff352b85519d09 | refs/heads/master | 2022-07-19T14:30:45.352261 | 2020-05-16T15:37:03 | 2020-05-16T15:37:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
import numpy as np
from datetime import datetime
from datetime import timedelta
from utils.simple_queue import SimpleQueue
from utils.simple_queue import Node
from algorithm.base import Algorithm
from ortools.linear_solver import pywraplp
import sys, os
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
class BranchAndBound(Algorithm):
def __init__(self, knapsack):
assert type(knapsack) == dict
self.start_time = 0
self.profits = knapsack['profits']
self.weights = knapsack['weights']
self.n_items = len(knapsack['weights'])
self.bounds = [[0, 1]] * self.n_items
self.capacity = knapsack['capacity'][0]
self.optimal = knapsack['optimal']
self.low_bound, self.fixed_weight, self.ans = 0, 0, 0
self.picks = []
self.check_inputs()
@property
def name(self):
return 'Branch-And-Bound'
def eval(self):
assert len(self.picks) != 0
return self.optimal == self.picks
def solve(self):
self.start_time = datetime.now()
_, self.picks = self.branch_and_bound()
if (_ == -1 and self.picks == -1):
return -1
return list(self.picks)
def branch_and_bound(self):
solver = pywraplp.Solver('simple_mip_program', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
x_dict = np.array([solver.IntVar(self.bounds[i][0], self.bounds[i][1], f'x_{i}') for i in range(self.n_items)])
solver.Add(solver.Sum([self.weights[i]*x_dict[i] for i in range(self.n_items)]) <= self.capacity)
solver.Maximize(solver.Sum([self.profits[i]*x_dict[i] for i in range(self.n_items)]))
status = solver.Solve()
profit = solver.Objective().Value()
end_time = datetime.now() - self.start_time
if (end_time > timedelta(seconds=228)):
return -1, -1
if profit <= self.low_bound:
return 0, 0
X = np.array([x_d.solution_value() for x_d in x_dict])
try:
idx = np.where(X != X.round())[0][0]
except IndexError:
idx = -1
if (idx != -1):
self.bounds[idx] = [0, 0]
new_profit, new_X = self.branch_and_bound()
if (new_profit):
self.low_bound, self.ans = new_profit, new_X
self.bounds[idx] = [1, 1]
self.fixed_weight += self.weights[idx]
if self.fixed_weight <= self.capacity:
new_profit, new_X = self.branch_and_bound()
if (new_profit):
self.low_bound, self.ans = new_profit, new_X
self.bounds[idx] = [0, 1]
self.fixed_weight -= self.weights[idx]
else:
self.low_bound = profit
self.ans = X
return self.low_bound, self.ans
def check_inputs(self):
# check variable type
assert(isinstance(self.profits, list))
assert(isinstance(self.weights, list))
assert(isinstance(self.n_items, int))
assert(isinstance(self.capacity, int))
# check value type
assert(all(isinstance(val, int) or isinstance(val, float) for val in self.profits))
assert(all(isinstance(val, int) for val in self.weights))
# check validity of value
assert(all(val >= 0 for val in self.weights))
assert(self.n_items > 0)
assert(self.capacity > 0)
| UTF-8 | Python | false | false | 3,681 | py | 10 | bound_and_branches.py | 7 | 0.572942 | 0.563977 | 0 | 108 | 33.083333 | 119 |
GhostofAdam/FBGAN | 18,640,158,082,734 | a7d2b971bd82e935f184837f6a399dadc0caa243 | d06ec71db7d7b27d878415610a9f8841182453bd | /loader.py | 3770bfec60aed1437833116fe217597b3d4bba1f | []
| no_license | https://github.com/GhostofAdam/FBGAN | 4b48230d3ada5597aa1154350efb8c6a4d1c09df | 9e002fbd680c65890dd90a7c608e787a42236849 | refs/heads/master | 2022-12-02T14:16:24.059723 | 2020-08-19T15:00:03 | 2020-08-19T15:00:03 | 281,961,691 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
def data_loader(embed_type):
data = pd.read_csv("data.csv",index_col=0)
if embed_type == "NAVIE":
X = np.zeros((len(data),26))
for i, seq in enumerate(data['i']):
for letter in seq:
X[i][ord(letter)-ord('A')] += 1
else:
embedding = pd.read_csv("./iFeature/iFeature/"+embed_type+".tsv",sep='\t')
X = embedding.iloc[:,1:].to_numpy()
Y = data['c'].to_numpy()
rng_state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(rng_state)
np.random.shuffle(Y)
train_X = X
train_Y = Y
df = pd.read_excel("thermo_tyrosineRS.xlsx",index_col=0)
test_Y = df[['Optimal growth temperature']].to_numpy().squeeze()
if embed_type == "NAVIE":
test_X = np.zeros((len(df),26))
for i, seq in enumerate(df['organism']):
for letter in seq:
X[i][ord(letter)-ord('A')] += 1
else:
embedding = pd.read_csv("./iFeature/iFeature/"+embed_type+"_test.tsv",sep='\t')
test_X = embedding.iloc[:,1:].to_numpy()
return train_X,train_Y,test_X,test_Y
if __name__ == "__main__":
Y,X = data_loader()
print(X[0]) | UTF-8 | Python | false | false | 1,222 | py | 20 | loader.py | 12 | 0.549918 | 0.540917 | 0 | 37 | 32.054054 | 88 |
ruanramos/LoL_Key_Pressing_Counter | 9,758,165,702,338 | 2ede1ea715f6c8a98ad4708eba253cf9785e989a | 486be403edc226824254f0b076263cf2ca0fe249 | /Tests/tests.py | b226b9ce2847353eae5b426364c9dfbfd3f716ac | []
| no_license | https://github.com/ruanramos/LoL_Key_Pressing_Counter | e03f66c6a3ed300ab8426331d2c23e0f6ae80ebe | 1ce19608668328a1451d5eed49f71cb3b4051199 | refs/heads/master | 2020-07-14T12:23:32.025420 | 2019-09-06T03:50:42 | 2019-09-06T03:50:42 | 205,317,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from key_counter import str_time_2_seconds
class TestStrTime2Seconds(unittest.TestCase):
def test_str_time_2_seconds(self):
self.assertEqual(9945, str_time_2_seconds('2:45:45'))
def test_str_time_2_seconds_1_hour(self):
self.assertEqual(3600, str_time_2_seconds('1:00:00'))
def test_str_time_2_seconds_59_minutes_59_sec(self):
self.assertEqual(3599, str_time_2_seconds('0:59:59'))
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 486 | py | 3 | tests.py | 2 | 0.668724 | 0.58642 | 0 | 17 | 27.588235 | 61 |
yamaz420/PythonIntro-LOLcodeAndShit | 8,194,797,608,452 | 8cdc797b87b0c0d372962aee10a2faf0522e3991 | 4fc4cbf4aa267e76a628b1d03cad20f3f1e7d5f4 | /pythonLOL/StringsAndCasting.py | b8fea2461cca44a53cce48c60742bd4f569939fb | []
| no_license | https://github.com/yamaz420/PythonIntro-LOLcodeAndShit | 99eabe791d94373ac00cd81330bb871898322ee5 | 028d5c767e2c033ad77521fb8dcca0be1507da9e | refs/heads/main | 2023-03-16T00:11:43.645834 | 2021-03-12T22:30:32 | 2021-03-12T22:30:32 | 347,214,330 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | zz = 10 #int
my_name = "Erik" #string
height = 188.5 #float
is_cool = True #boolean
is_ready = False #boolean
print(my_name)
my_name = 40 #int
print(my_name);
p, y, z = "HEJ", 200, 1000
print(p)
print(y)
print(z)
# *, +, -, /,
a = 5**2 #25
b = 3**3 #27
c = 5/ 2 #2.5 float
d = 5//2 #2 int-division
print(a,b,c,d)
print(a + b)
age = 28
name = "Erik"
print("hello my name is {} and i am {} years old.".format(name,age))
print("hello, my name is {1} and i am {0} years old".format(age, name))
print("this awesome guys' name is {1}. {1} is {0} years old".format(age, name))
print("this boring dudes' name is {name} and i think he is {age} years old".format(age = 50, name = "Erik"))
print(f'hello, my name is {name} and i am {age} years old') #by prefixinf the string with an "f". pythin understands it as a formatted string and lets u infuse variables inot the string
a = 10
b = 43
print(f"what is {a} times {b}? it's {a*b} ofcourse!")
# username = input("what is your username?")
# print(username)
# int() - casts to an int
# float() - casts to float
# str() casts to string
age= int(input("what is your age"))
print(type(age))
print(age)
| UTF-8 | Python | false | false | 1,201 | py | 19 | StringsAndCasting.py | 19 | 0.610325 | 0.574521 | 0 | 45 | 24.6 | 186 |
CLCMacTeam/bigfiximport | 4,715,874,108,934 | 8af524b2d90a33937d21680dd55f1cf76daf801f | d98f4e30c8b571c1fcc59dff18781276384061cf | /munkilib/installer.py | 6c40956ac9cdf8294c14ede1bc7b50f1b37f68e4 | []
| no_license | https://github.com/CLCMacTeam/bigfiximport | c4b6712dd3dcb4fdf71fbd58674c23bd5066ca0c | 624719845067ef0bd287eb3e9f341d006241a47c | refs/heads/master | 2021-01-17T05:21:42.337360 | 2016-07-06T15:54:10 | 2016-07-06T15:54:10 | 62,732,287 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# encoding: utf-8
#
# Copyright 2009-2014 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
installer.py
munki module to automatically install pkgs, mpkgs, and dmgs
(containing pkgs and mpkgs) from a defined folder.
"""
import datetime
import os
import pwd
import subprocess
import time
import stat
import adobeutils
import launchd
import munkicommon
import munkistatus
import profiles
import updatecheck
import FoundationPlist
from removepackages import removepackages
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from Foundation import NSDate
# stuff for IOKit/PowerManager, courtesy Michael Lynn, pudquick@github
from ctypes import c_uint32, cdll, c_void_p, POINTER, byref
from CoreFoundation import CFStringCreateWithCString
from CoreFoundation import kCFStringEncodingASCII
from objc import pyobjc_id
# pylint: enable=E0611
# lots of camelCase names
# pylint: disable=C0103
libIOKit = cdll.LoadLibrary('/System/Library/Frameworks/IOKit.framework/IOKit')
libIOKit.IOPMAssertionCreateWithName.argtypes = [
c_void_p, c_uint32, c_void_p, POINTER(c_uint32)]
libIOKit.IOPMAssertionRelease.argtypes = [c_uint32]
def CFSTR(py_string):
'''Returns a CFString given a Python string'''
return CFStringCreateWithCString(None, py_string, kCFStringEncodingASCII)
def raw_ptr(pyobjc_string):
'''Returns a pointer to a CFString'''
return pyobjc_id(pyobjc_string.nsstring())
def IOPMAssertionCreateWithName(assert_name, assert_level, assert_msg):
'''Creaes a PowerManager assertion'''
assertID = c_uint32(0)
p_assert_name = raw_ptr(CFSTR(assert_name))
p_assert_msg = raw_ptr(CFSTR(assert_msg))
errcode = libIOKit.IOPMAssertionCreateWithName(
p_assert_name, assert_level, p_assert_msg, byref(assertID))
return (errcode, assertID)
IOPMAssertionRelease = libIOKit.IOPMAssertionRelease
# end IOKit/PowerManager bindings
# initialize our report fields
# we do this here because appleupdates.installAppleUpdates()
# calls installWithInfo()
munkicommon.report['InstallResults'] = []
munkicommon.report['RemovalResults'] = []
def removeBundleRelocationInfo(pkgpath):
'''Attempts to remove any info in the package
that would cause bundle relocation behavior.
This makes bundles install or update in their
default location.'''
munkicommon.display_debug1("Looking for bundle relocation info...")
if os.path.isdir(pkgpath):
# remove relocatable stuff
tokendefinitions = os.path.join(
pkgpath, "Contents/Resources/TokenDefinitions.plist")
if os.path.exists(tokendefinitions):
try:
os.remove(tokendefinitions)
munkicommon.display_debug1(
"Removed Contents/Resources/TokenDefinitions.plist")
except OSError:
pass
plist = {}
infoplist = os.path.join(pkgpath, "Contents/Info.plist")
if os.path.exists(infoplist):
try:
plist = FoundationPlist.readPlist(infoplist)
except FoundationPlist.NSPropertyListSerializationException:
pass
if 'IFPkgPathMappings' in plist:
del plist['IFPkgPathMappings']
try:
FoundationPlist.writePlist(plist, infoplist)
munkicommon.display_debug1("Removed IFPkgPathMappings")
except FoundationPlist.NSPropertyListWriteException:
pass
def install(pkgpath, choicesXMLpath=None, suppressBundleRelocation=False,
environment=None):
"""
Uses the apple installer to install the package or metapackage
at pkgpath. Prints status messages to STDOUT.
Returns a tuple:
the installer return code and restart needed as a boolean.
"""
restartneeded = False
installeroutput = []
if os.path.islink(pkgpath):
# resolve links before passing them to /usr/bin/installer
pkgpath = os.path.realpath(pkgpath)
if suppressBundleRelocation:
removeBundleRelocationInfo(pkgpath)
packagename = ''
restartaction = 'None'
pkginfo = munkicommon.getInstallerPkgInfo(pkgpath)
if pkginfo:
packagename = pkginfo.get('display_name')
restartaction = pkginfo.get('RestartAction', 'None')
if not packagename:
packagename = os.path.basename(pkgpath)
#munkicommon.display_status_major("Installing %s..." % packagename)
munkicommon.log("Installing %s from %s" % (packagename,
os.path.basename(pkgpath)))
cmd = ['/usr/sbin/installer', '-query', 'RestartAction', '-pkg', pkgpath]
if choicesXMLpath:
cmd.extend(['-applyChoiceChangesXML', choicesXMLpath])
proc = subprocess.Popen(cmd, shell=False, bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
restartaction = str(output).decode('UTF-8').rstrip("\n")
if restartaction == "RequireRestart" or \
restartaction == "RecommendRestart":
munkicommon.display_status_minor(
'%s requires a restart after installation.' % packagename)
restartneeded = True
# get the OS version; we need it later when processing installer's output,
# which varies depending on OS version.
os_version = munkicommon.getOsVersion()
cmd = ['/usr/sbin/installer', '-verboseR', '-pkg', pkgpath, '-target', '/']
if choicesXMLpath:
cmd.extend(['-applyChoiceChangesXML', choicesXMLpath])
# set up environment for installer
env_vars = os.environ.copy()
# get info for root
userinfo = pwd.getpwuid(0)
env_vars['USER'] = userinfo.pw_name
env_vars['HOME'] = userinfo.pw_dir
if environment:
# Munki admin has specified custom installer environment
for key in environment.keys():
if key == 'USER' and environment[key] == 'CURRENT_CONSOLE_USER':
# current console user (if there is one) 'owns' /dev/console
userinfo = pwd.getpwuid(os.stat('/dev/console').st_uid)
env_vars['USER'] = userinfo.pw_name
env_vars['HOME'] = userinfo.pw_dir
else:
env_vars[key] = environment[key]
munkicommon.display_debug1(
'Using custom installer environment variables: %s', env_vars)
# run installer as a launchd job
try:
job = launchd.Job(cmd, environment_vars=env_vars)
job.start()
except launchd.LaunchdJobException, err:
munkicommon.display_error(
'Error with launchd job (%s): %s', cmd, str(err))
munkicommon.display_error('Can\'t run installer.')
return (-3, False)
timeout = 2 * 60 * 60
inactive = 0
last_output = None
while True:
installinfo = job.stdout.readline()
if not installinfo:
if job.returncode() is not None:
break
else:
# no data, but we're still running
inactive += 1
if inactive >= timeout:
# no output for too long, kill this installer session
munkicommon.display_error(
"/usr/sbin/installer timeout after %d seconds"
% timeout)
job.stop()
break
# sleep a bit before checking for more output
time.sleep(1)
continue
# we got non-empty output, reset inactive timer
inactive = 0
# Don't bother parsing the stdout output if it hasn't changed since
# the last loop iteration.
if last_output == installinfo:
continue
last_output = installinfo
installinfo = installinfo.decode('UTF-8')
if installinfo.startswith("installer:"):
# save all installer output in case there is
# an error so we can dump it to the log
installeroutput.append(installinfo)
msg = installinfo[10:].rstrip("\n")
if msg.startswith("PHASE:"):
phase = msg[6:]
if phase:
munkicommon.display_status_minor(phase)
elif msg.startswith("STATUS:"):
status = msg[7:]
if status:
munkicommon.display_status_minor(status)
elif msg.startswith("%"):
percent = float(msg[1:])
if os_version == '10.5':
# Leopard uses a float from 0 to 1
percent = int(percent * 100)
if munkicommon.munkistatusoutput:
munkistatus.percent(percent)
else:
munkicommon.display_status_minor(
"%s percent complete" % percent)
elif msg.startswith(" Error"):
munkicommon.display_error(msg)
if munkicommon.munkistatusoutput:
munkistatus.detail(msg)
elif msg.startswith(" Cannot install"):
munkicommon.display_error(msg)
if munkicommon.munkistatusoutput:
munkistatus.detail(msg)
else:
munkicommon.log(msg)
# installer exited
retcode = job.returncode()
if retcode != 0:
# append stdout to our installer output
installeroutput.extend(job.stderr.read().splitlines())
munkicommon.display_status_minor(
"Install of %s failed with return code %s" % (packagename, retcode))
munkicommon.display_error("-"*78)
for line in installeroutput:
munkicommon.display_error(line.rstrip("\n"))
munkicommon.display_error("-"*78)
restartneeded = False
elif retcode == 0:
munkicommon.log("Install of %s was successful." % packagename)
if munkicommon.munkistatusoutput:
munkistatus.percent(100)
return (retcode, restartneeded)
def installall(dirpath, choicesXMLpath=None, suppressBundleRelocation=False,
environment=None):
"""
Attempts to install all pkgs and mpkgs in a given directory.
Will mount dmg files and install pkgs and mpkgs found at the
root of any mountpoints.
"""
retcode = 0
restartflag = False
installitems = munkicommon.listdir(dirpath)
for item in installitems:
if munkicommon.stopRequested():
return (retcode, restartflag)
itempath = os.path.join(dirpath, item)
if munkicommon.hasValidDiskImageExt(item):
munkicommon.display_info("Mounting disk image %s" % item)
mountpoints = munkicommon.mountdmg(itempath, use_shadow=True)
if mountpoints == []:
munkicommon.display_error("No filesystems mounted from %s",
item)
return (retcode, restartflag)
if munkicommon.stopRequested():
munkicommon.unmountdmg(mountpoints[0])
return (retcode, restartflag)
for mountpoint in mountpoints:
# install all the pkgs and mpkgs at the root
# of the mountpoint -- call us recursively!
(retcode, needsrestart) = installall(mountpoint,
choicesXMLpath,
suppressBundleRelocation,
environment)
if needsrestart:
restartflag = True
if retcode:
# ran into error; should unmount and stop.
munkicommon.unmountdmg(mountpoints[0])
return (retcode, restartflag)
munkicommon.unmountdmg(mountpoints[0])
if munkicommon.hasValidInstallerItemExt(item):
(retcode, needsrestart) = install(
itempath, choicesXMLpath, suppressBundleRelocation, environment)
if needsrestart:
restartflag = True
if retcode:
# ran into error; should stop.
return (retcode, restartflag)
return (retcode, restartflag)
def copyAppFromDMG(dmgpath):
'''copies application from DMG to /Applications
This type of installer_type is deprecated and should be
replaced with the more generic copyFromDMG'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = munkicommon.mountdmg(dmgpath)
if mountpoints:
retcode = 0
appname = None
mountpoint = mountpoints[0]
# find an app at the root level, copy it to /Applications
for item in munkicommon.listdir(mountpoint):
itempath = os.path.join(mountpoint, item)
if munkicommon.isApplication(itempath):
appname = item
break
if appname:
# make an itemlist we can pass to copyItemsFromMountpoint
itemlist = []
item = {}
item['source_item'] = appname
item['destination_path'] = "/Applications"
itemlist.append(item)
retcode = copyItemsFromMountpoint(mountpoint, itemlist)
if retcode == 0:
# let the user know we completed successfully
munkicommon.display_status_minor(
"The software was successfully installed.")
else:
munkicommon.display_error(
"No application found on %s" % os.path.basename(dmgpath))
retcode = -2
munkicommon.unmountdmg(mountpoint)
return retcode
else:
munkicommon.display_error("No mountable filesystems on %s",
os.path.basename(dmgpath))
return -1
def copyItemsFromMountpoint(mountpoint, itemlist):
'''copies items from the mountpoint to the startup disk
Returns 0 if no issues; some error code otherwise.
If the 'destination_item' key is provided, items will be copied
as its value.'''
for item in itemlist:
# get itemname
source_itemname = item.get("source_item")
dest_itemname = item.get("destination_item")
if not source_itemname:
munkicommon.display_error("Missing name of item to copy!")
return -1
# check source path
source_itempath = os.path.join(mountpoint, source_itemname)
if not os.path.exists(source_itempath):
munkicommon.display_error(
"Source item %s does not exist!" % source_itemname)
return -1
# check destination path
destpath = item.get('destination_path')
if not destpath:
destpath = item.get('destination_item')
if destpath:
# split it into path and name
dest_itemname = os.path.basename(destpath)
destpath = os.path.dirname(destpath)
if not destpath:
munkicommon.display_error("Missing destination path for item!")
return -1
if not os.path.exists(destpath):
munkicommon.display_detail(
"Destination path %s does not exist, will determine "
"owner/permissions from parent" % destpath)
parent_path = destpath
new_paths = []
# work our way back up to an existing path and build a list
while not os.path.exists(parent_path):
new_paths.insert(0, parent_path)
parent_path = os.path.split(parent_path)[0]
# stat the parent, get uid/gid/mode
parent_stat = os.stat(parent_path)
parent_uid, parent_gid = parent_stat.st_uid, parent_stat.st_gid
parent_mode = stat.S_IMODE(parent_stat.st_mode)
# make the new tree with the parent's mode
try:
os.makedirs(destpath, mode=parent_mode)
except IOError:
munkicommon.display_error(
"There was an IO error in creating the path %s!" % destpath)
return -1
except BaseException:
munkicommon.display_error(
"There was an unknown error in creating the path %s!"
% destpath)
return -1
# chown each new dir
for new_path in new_paths:
os.chown(new_path, parent_uid, parent_gid)
# setup full destination path using 'destination_item', if supplied
if dest_itemname:
full_destpath = os.path.join(
destpath, os.path.basename(dest_itemname))
else:
full_destpath = os.path.join(
destpath, os.path.basename(source_itemname))
# remove item if it already exists
if os.path.exists(full_destpath):
retcode = subprocess.call(["/bin/rm", "-rf", full_destpath])
if retcode:
munkicommon.display_error(
"Error removing existing %s" % full_destpath)
return retcode
# all tests passed, OK to copy
munkicommon.display_status_minor(
"Copying %s to %s" % (source_itemname, full_destpath))
retcode = subprocess.call(["/bin/cp", "-pR",
source_itempath, full_destpath])
if retcode:
munkicommon.display_error(
"Error copying %s to %s" % (source_itempath, full_destpath))
return retcode
# set owner
user = item.get('user', 'root')
munkicommon.display_detail(
"Setting owner for '%s' to '%s'" % (full_destpath, user))
retcode = subprocess.call(
['/usr/sbin/chown', '-R', user, full_destpath])
if retcode:
munkicommon.display_error(
"Error setting owner for %s" % (full_destpath))
return retcode
# set group
group = item.get('group', 'admin')
munkicommon.display_detail(
"Setting group for '%s' to '%s'" % (full_destpath, group))
retcode = subprocess.call(
['/usr/bin/chgrp', '-R', group, full_destpath])
if retcode:
munkicommon.display_error(
"Error setting group for %s" % (full_destpath))
return retcode
# set mode
mode = item.get('mode', 'o-w')
munkicommon.display_detail(
"Setting mode for '%s' to '%s'" % (full_destpath, mode))
retcode = subprocess.call(['/bin/chmod', '-R', mode, full_destpath])
if retcode:
munkicommon.display_error(
"Error setting mode for %s" % (full_destpath))
return retcode
# remove com.apple.quarantine attribute from copied item
cmd = ["/usr/bin/xattr", full_destpath]
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, dummy_err) = proc.communicate()
if out:
xattrs = str(out).splitlines()
if "com.apple.quarantine" in xattrs:
dummy_result = subprocess.call(
["/usr/bin/xattr", "-d", "com.apple.quarantine",
full_destpath])
# all items copied successfully!
return 0
def copyFromDMG(dmgpath, itemlist):
'''copies items from DMG to local disk'''
if not itemlist:
munkicommon.display_error("No items to copy!")
return -1
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = munkicommon.mountdmg(dmgpath)
if mountpoints:
mountpoint = mountpoints[0]
retcode = copyItemsFromMountpoint(mountpoint, itemlist)
if retcode == 0:
# let the user know we completed successfully
munkicommon.display_status_minor(
"The software was successfully installed.")
munkicommon.unmountdmg(mountpoint)
return retcode
else:
munkicommon.display_error(
"No mountable filesystems on %s" % os.path.basename(dmgpath))
return -1
def removeCopiedItems(itemlist):
'''Removes filesystem items based on info in itemlist.
These items were typically installed via DMG'''
retcode = 0
if not itemlist:
munkicommon.display_error("Nothing to remove!")
return -1
for item in itemlist:
if 'destination_item' in item:
itemname = item.get("destination_item")
else:
itemname = item.get("source_item")
if not itemname:
munkicommon.display_error("Missing item name to remove.")
retcode = -1
break
destpath = item.get("destination_path")
if not destpath:
munkicommon.display_error("Missing path for item to remove.")
retcode = -1
break
path_to_remove = os.path.join(destpath, os.path.basename(itemname))
if os.path.exists(path_to_remove):
munkicommon.display_status_minor('Removing %s' % path_to_remove)
retcode = subprocess.call(['/bin/rm', '-rf', path_to_remove])
if retcode:
munkicommon.display_error(
'Removal error for %s', path_to_remove)
break
else:
# path_to_remove doesn't exist
# note it, but not an error
munkicommon.display_detail("Path %s doesn't exist.", path_to_remove)
return retcode
def itemPrereqsInSkippedItems(item, skipped_items):
'''Looks for item prerequisites (requires and update_for) in the list
of skipped items. Returns a list of matches.'''
# shortcut -- if we have no skipped items, just return an empty list
# also reduces log noise in the common case
if not skipped_items:
return []
munkicommon.display_debug1(
'Checking for skipped prerequisites for %s-%s'
% (item['name'], item.get('version_to_install')))
# get list of prerequisites for this item
prerequisites = item.get('requires', [])
prerequisites.extend(item.get('update_for', []))
if not prerequisites:
munkicommon.display_debug1(
'%s-%s has no prerequisites.'
% (item['name'], item.get('version_to_install')))
return []
munkicommon.display_debug1('Prerequisites: %s' % ", ".join(prerequisites))
# build a dictionary of names and versions of skipped items
skipped_item_dict = {}
for skipped_item in skipped_items:
if skipped_item['name'] not in skipped_item_dict:
skipped_item_dict[skipped_item['name']] = []
normalized_version = updatecheck.trimVersionString(
skipped_item.get('version_to_install', '0.0'))
munkicommon.display_debug1(
'Adding skipped item: %s-%s',
skipped_item['name'], normalized_version)
skipped_item_dict[skipped_item['name']].append(normalized_version)
# now check prereqs against the skipped items
matched_prereqs = []
for prereq in prerequisites:
(name, version) = updatecheck.nameAndVersion(prereq)
munkicommon.display_debug1(
'Comparing %s-%s against skipped items', name, version)
if name in skipped_item_dict:
if version:
version = updatecheck.trimVersionString(version)
if version in skipped_item_dict[name]:
matched_prereqs.append(prereq)
else:
matched_prereqs.append(prereq)
return matched_prereqs
def installWithInfo(
dirpath, installlist, only_unattended=False, applesus=False):
"""
Uses the installlist to install items in the
correct order.
"""
restartflag = False
itemindex = 0
skipped_installs = []
for item in installlist:
# Keep track of when this particular install started.
utc_now = datetime.datetime.utcnow()
itemindex = itemindex + 1
if only_unattended:
if not item.get('unattended_install'):
skipped_installs.append(item)
munkicommon.display_detail(
('Skipping install of %s because it\'s not unattended.'
% item['name']))
continue
elif blockingApplicationsRunning(item):
skipped_installs.append(item)
munkicommon.display_detail(
'Skipping unattended install of %s because '
'blocking application(s) running.'
% item['name'])
continue
skipped_prereqs = itemPrereqsInSkippedItems(item, skipped_installs)
if skipped_prereqs:
# one or more prerequisite for this item was skipped or failed;
# need to skip this item too
skipped_installs.append(item)
if only_unattended:
format_str = ('Skipping unattended install of %s because these '
'prerequisites were skipped: %s')
else:
format_str = ('Skipping install of %s because these '
'prerequisites were not installed: %s')
munkicommon.display_detail(
format_str % (item['name'], ", ".join(skipped_prereqs)))
continue
if munkicommon.stopRequested():
return restartflag, skipped_installs
display_name = item.get('display_name') or item.get('name')
version_to_install = item.get('version_to_install', '')
retcode = 0
if 'preinstall_script' in item:
retcode = munkicommon.runEmbeddedScript('preinstall_script', item)
if retcode == 0 and 'installer_item' in item:
munkicommon.display_status_major(
"Installing %s (%s of %s)"
% (display_name, itemindex, len(installlist)))
installer_type = item.get("installer_type", "")
itempath = os.path.join(dirpath, item["installer_item"])
if installer_type != "nopkg" and not os.path.exists(itempath):
# can't install, so we should stop. Since later items might
# depend on this one, we shouldn't continue
munkicommon.display_error(
"Installer item %s was not found.", item["installer_item"])
return restartflag, skipped_installs
if installer_type.startswith("Adobe"):
retcode = adobeutils.doAdobeInstall(item)
if retcode == 0:
if (item.get("RestartAction") == "RequireRestart" or
item.get("RestartAction") == "RecommendRestart"):
restartflag = True
if retcode == 8:
# Adobe Setup says restart needed.
restartflag = True
retcode = 0
elif installer_type == "copy_from_dmg":
retcode = copyFromDMG(itempath, item.get('items_to_copy'))
if retcode == 0:
if (item.get("RestartAction") == "RequireRestart" or
item.get("RestartAction") == "RecommendRestart"):
restartflag = True
elif installer_type == "appdmg":
munkicommon.display_warning(
"install_type 'appdmg' is deprecated. Use 'copy_from_dmg'.")
retcode = copyAppFromDMG(itempath)
elif installer_type == 'profile':
# profiles.install_profile returns True/False
retcode = 0
identifier = item.get('PayloadIdentifier')
if not profiles.install_profile(itempath, identifier):
retcode = -1
elif installer_type == "nopkg": # Packageless install
if (item.get("RestartAction") == "RequireRestart" or
item.get("RestartAction") == "RecommendRestart"):
restartflag = True
elif installer_type != "":
# we've encountered an installer type
# we don't know how to handle
munkicommon.display_error(
"Unsupported install type: %s" % installer_type)
retcode = -99
else:
# better be Apple installer package
suppressBundleRelocation = item.get(
"suppress_bundle_relocation", False)
munkicommon.display_debug1(
"suppress_bundle_relocation: %s", suppressBundleRelocation)
if 'installer_choices_xml' in item:
choicesXMLfile = os.path.join(munkicommon.tmpdir(),
"choices.xml")
FoundationPlist.writePlist(item['installer_choices_xml'],
choicesXMLfile)
else:
choicesXMLfile = ''
installer_environment = item.get('installer_environment')
if munkicommon.hasValidDiskImageExt(itempath):
munkicommon.display_status_minor(
"Mounting disk image %s" % item["installer_item"])
mountWithShadow = suppressBundleRelocation
# we need to mount the diskimage as read/write to
# be able to modify the package to suppress bundle
# relocation
mountpoints = munkicommon.mountdmg(
itempath, use_shadow=mountWithShadow)
if mountpoints == []:
munkicommon.display_error("No filesystems mounted "
"from %s",
item["installer_item"])
return restartflag, skipped_installs
if munkicommon.stopRequested():
munkicommon.unmountdmg(mountpoints[0])
return restartflag, skipped_installs
retcode = -99 # in case we find nothing to install
needtorestart = False
if munkicommon.hasValidInstallerItemExt(
item.get('package_path', '')):
# admin has specified the relative path of the pkg
# on the DMG
# this is useful if there is more than one pkg on
# the DMG, or the actual pkg is not at the root
# of the DMG
fullpkgpath = os.path.join(
mountpoints[0], item['package_path'])
if os.path.exists(fullpkgpath):
(retcode, needtorestart) = install(
fullpkgpath, choicesXMLfile,
suppressBundleRelocation,
installer_environment)
else:
# no relative path to pkg on dmg, so just install all
# pkgs found at the root of the first mountpoint
# (hopefully there's only one)
(retcode, needtorestart) = installall(
mountpoints[0], choicesXMLfile,
suppressBundleRelocation, installer_environment)
if (needtorestart or
item.get("RestartAction") == "RequireRestart" or
item.get("RestartAction") == "RecommendRestart"):
restartflag = True
munkicommon.unmountdmg(mountpoints[0])
elif (munkicommon.hasValidPackageExt(itempath) or
itempath.endswith(".dist")):
(retcode, needtorestart) = install(
itempath, choicesXMLfile, suppressBundleRelocation,
installer_environment)
if (needtorestart or
item.get("RestartAction") == "RequireRestart" or
item.get("RestartAction") == "RecommendRestart"):
restartflag = True
else:
# we didn't find anything we know how to install
munkicommon.log(
"Found nothing we know how to install in %s"
% itempath)
retcode = -99
if retcode == 0 and 'postinstall_script' in item:
# only run embedded postinstall script if the install did not
# return a failure code
retcode = munkicommon.runEmbeddedScript(
'postinstall_script', item)
if retcode:
# we won't consider postinstall script failures as fatal
# since the item has been installed via package/disk image
# but admin should be notified
munkicommon.display_warning(
'Postinstall script for %s returned %s'
% (item['name'], retcode))
# reset retcode to 0 so we will mark this install
# as successful
retcode = 0
# record install success/failure
if not 'InstallResults' in munkicommon.report:
munkicommon.report['InstallResults'] = []
if applesus:
message = "Apple SUS install of %s-%s: %s"
else:
message = "Install of %s-%s: %s"
if retcode == 0:
status = "SUCCESSFUL"
else:
status = "FAILED with return code: %s" % retcode
# add this failed install to the skipped_installs list
# so that any item later in the list that requires this
# item is skipped as well.
skipped_installs.append(item)
log_msg = message % (display_name, version_to_install, status)
munkicommon.log(log_msg, "Install.log")
# Calculate install duration; note, if a machine is put to sleep
# during the install this time may be inaccurate.
utc_now_complete = datetime.datetime.utcnow()
duration_seconds = (utc_now_complete - utc_now).seconds
download_speed = item.get('download_kbytes_per_sec', 0)
install_result = {
'display_name': display_name,
'name': item['name'],
'version': version_to_install,
'applesus': applesus,
'status': retcode,
'time': NSDate.new(),
'duration_seconds': duration_seconds,
'download_kbytes_per_sec': download_speed,
'unattended': only_unattended,
}
munkicommon.report['InstallResults'].append(install_result)
# check to see if this installer item is needed by any additional
# items in installinfo
# this might happen if there are multiple things being installed
# with choicesXML files applied to a metapackage or
# multiple packages being installed from a single DMG
foundagain = False
current_installer_item = item['installer_item']
# are we at the end of the installlist?
# (we already incremented itemindex for display
# so with zero-based arrays itemindex now points to the item
# after the current item)
if itemindex < len(installlist):
# nope, let's check the remaining items
for lateritem in installlist[itemindex:]:
if (lateritem.get('installer_item') ==
current_installer_item):
foundagain = True
break
# need to check skipped_installs as well
if not foundagain:
for skipped_item in skipped_installs:
if (skipped_item.get('installer_item') ==
current_installer_item):
foundagain = True
break
# ensure package is not deleted from cache if installation
# fails by checking retcode
if not foundagain and retcode == 0:
# now remove the item from the install cache
# (if it's still there)
itempath = os.path.join(dirpath, current_installer_item)
if os.path.exists(itempath):
if os.path.isdir(itempath):
retcode = subprocess.call(
["/bin/rm", "-rf", itempath])
else:
# flat pkg or dmg
retcode = subprocess.call(["/bin/rm", itempath])
if munkicommon.hasValidDiskImageExt(itempath):
shadowfile = os.path.join(itempath, ".shadow")
if os.path.exists(shadowfile):
retcode = subprocess.call(
["/bin/rm", shadowfile])
return (restartflag, skipped_installs)
def skippedItemsThatRequireThisItem(item, skipped_items):
'''Looks for items in the skipped_items that require or are update_for
the current item. Returns a list of matches.'''
# shortcut -- if we have no skipped items, just return an empty list
# also reduces log noise in the common case
if not skipped_items:
return []
munkicommon.display_debug1(
'Checking for skipped items that require %s' % item['name'])
matched_skipped_items = []
for skipped_item in skipped_items:
# get list of prerequisites for this skipped_item
prerequisites = skipped_item.get('requires', [])
prerequisites.extend(skipped_item.get('update_for', []))
munkicommon.display_debug1(
'%s has these prerequisites: %s'
% (skipped_item['name'], ', '.join(prerequisites)))
for prereq in prerequisites:
(prereq_name, dummy_version) = updatecheck.nameAndVersion(prereq)
if prereq_name == item['name']:
matched_skipped_items.append(skipped_item['name'])
return matched_skipped_items
def processRemovals(removallist, only_unattended=False):
'''processes removals from the removal list'''
restartFlag = False
index = 0
skipped_removals = []
for item in removallist:
if only_unattended:
if not item.get('unattended_uninstall'):
skipped_removals.append(item)
munkicommon.display_detail(
('Skipping removal of %s because it\'s not unattended.'
% item['name']))
continue
elif blockingApplicationsRunning(item):
skipped_removals.append(item)
munkicommon.display_detail(
'Skipping unattended removal of %s because '
'blocking application(s) running.' % item['name'])
continue
dependent_skipped_items = skippedItemsThatRequireThisItem(
item, skipped_removals)
if dependent_skipped_items:
# need to skip this too
skipped_removals.append(item)
munkicommon.display_detail(
'Skipping removal of %s because these '
'skipped items required it: %s'
% (item['name'], ", ".join(dependent_skipped_items)))
continue
if munkicommon.stopRequested():
return restartFlag, skipped_removals
if not item.get('installed'):
# not installed, so skip it (this shouldn't happen...)
continue
index += 1
display_name = item.get('display_name') or item.get('name')
munkicommon.display_status_major(
"Removing %s (%s of %s)...", display_name, index, len(removallist))
retcode = 0
# run preuninstall_script if it exists
if 'preuninstall_script' in item:
retcode = munkicommon.runEmbeddedScript('preuninstall_script', item)
if retcode == 0 and 'uninstall_method' in item:
uninstallmethod = item['uninstall_method']
if uninstallmethod == "removepackages":
if 'packages' in item:
if item.get('RestartAction') == "RequireRestart":
restartFlag = True
retcode = removepackages(item['packages'],
forcedeletebundles=True)
if retcode:
if retcode == -128:
message = ("Uninstall of %s was "
"cancelled." % display_name)
else:
message = "Uninstall of %s failed." % display_name
munkicommon.display_error(message)
else:
munkicommon.log(
"Uninstall of %s was successful.", display_name)
elif uninstallmethod.startswith("Adobe"):
retcode = adobeutils.doAdobeRemoval(item)
elif uninstallmethod == "remove_copied_items":
retcode = removeCopiedItems(item.get('items_to_remove'))
elif uninstallmethod == "remove_app":
remove_app_info = item.get('remove_app_info', None)
if remove_app_info:
path_to_remove = remove_app_info['path']
munkicommon.display_status_minor(
'Removing %s' % path_to_remove)
retcode = subprocess.call(
["/bin/rm", "-rf", path_to_remove])
if retcode:
munkicommon.display_error(
"Removal error for %s", path_to_remove)
else:
munkicommon.display_error(
"Application removal info missing from %s",
display_name)
elif uninstallmethod == 'remove_profile':
identifier = item.get('PayloadIdentifier')
if identifier:
retcode = 0
if not profiles.remove_profile(identifier):
retcode = -1
munkicommon.display_error(
"Profile removal error for %s", identifier)
else:
munkicommon.display_error(
"Profile removal info missing from %s", display_name)
elif uninstallmethod == 'uninstall_script':
retcode = munkicommon.runEmbeddedScript(
'uninstall_script', item)
if (retcode == 0 and
item.get('RestartAction') == "RequireRestart"):
restartFlag = True
elif os.path.exists(uninstallmethod) and \
os.access(uninstallmethod, os.X_OK):
# it's a script or program to uninstall
retcode = munkicommon.runScript(
display_name, uninstallmethod, 'uninstall script')
if (retcode == 0 and
item.get('RestartAction') == "RequireRestart"):
restartFlag = True
else:
munkicommon.log("Uninstall of %s failed because "
"there was no valid uninstall "
"method." % display_name)
retcode = -99
if retcode == 0 and item.get('postuninstall_script'):
retcode = munkicommon.runEmbeddedScript(
'postuninstall_script', item)
if retcode:
# we won't consider postuninstall script failures as fatal
# since the item has been uninstalled
# but admin should be notified
munkicommon.display_warning(
'Postuninstall script for %s returned %s'
% (item['name'], retcode))
# reset retcode to 0 so we will mark this uninstall
# as successful
retcode = 0
# record removal success/failure
if not 'RemovalResults' in munkicommon.report:
munkicommon.report['RemovalResults'] = []
if retcode == 0:
success_msg = "Removal of %s: SUCCESSFUL" % display_name
munkicommon.log(success_msg, "Install.log")
removeItemFromSelfServeUninstallList(item.get('name'))
else:
failure_msg = "Removal of %s: " % display_name + \
" FAILED with return code: %s" % retcode
munkicommon.log(failure_msg, "Install.log")
# append failed removal to skipped_removals so dependencies
# aren't removed yet.
skipped_removals.append(item)
removal_result = {
'display_name': display_name,
'name': item['name'],
'status': retcode,
'time': NSDate.new(),
'unattended': only_unattended,
}
munkicommon.report['RemovalResults'].append(removal_result)
return (restartFlag, skipped_removals)
def removeItemFromSelfServeUninstallList(itemname):
"""Remove the given itemname from the self-serve manifest's
managed_uninstalls list"""
ManagedInstallDir = munkicommon.pref('ManagedInstallDir')
selfservemanifest = os.path.join(
ManagedInstallDir, "manifests", "SelfServeManifest")
if os.path.exists(selfservemanifest):
# if item_name is in the managed_uninstalls in the self-serve
# manifest, we should remove it from the list
try:
plist = FoundationPlist.readPlist(selfservemanifest)
except FoundationPlist.FoundationPlistException:
pass
else:
plist['managed_uninstalls'] = [
item for item in plist.get('managed_uninstalls', [])
if item != itemname
]
try:
FoundationPlist.writePlist(plist, selfservemanifest)
except FoundationPlist.FoundationPlistException:
pass
def blockingApplicationsRunning(pkginfoitem):
"""Returns true if any application in the blocking_applications list
is running or, if there is no blocking_applications list, if any
application in the installs list is running."""
if 'blocking_applications' in pkginfoitem:
appnames = pkginfoitem['blocking_applications']
else:
# if no blocking_applications specified, get appnames
# from 'installs' list if it exists
appnames = [os.path.basename(item.get('path'))
for item in pkginfoitem.get('installs', [])
if item['type'] == 'application']
munkicommon.display_debug1("Checking for %s" % appnames)
running_apps = [appname for appname in appnames
if munkicommon.isAppRunning(appname)]
if running_apps:
munkicommon.display_detail(
"Blocking apps for %s are running:" % pkginfoitem['name'])
munkicommon.display_detail(
" %s" % running_apps)
return True
return False
def assertNoIdleSleep():
"""Uses IOKit functions to prevent idle sleep"""
# based on code by Michael Lynn, pudquick@github
kIOPMAssertionTypeNoIdleSleep = "NoIdleSleepAssertion"
kIOPMAssertionLevelOn = 255
reason = "Munki is installing software"
dummy_errcode, assertID = IOPMAssertionCreateWithName(
kIOPMAssertionTypeNoIdleSleep,
kIOPMAssertionLevelOn,
reason)
return assertID
def run(only_unattended=False):
"""Runs the install/removal session.
Args:
only_unattended: Boolean. If True, only do unattended_(un)install pkgs.
"""
# hold onto the assertionID so we can release it later
no_idle_sleep_assertion_id = assertNoIdleSleep()
managedinstallbase = munkicommon.pref('ManagedInstallDir')
installdir = os.path.join(managedinstallbase, 'Cache')
removals_need_restart = installs_need_restart = False
if only_unattended:
munkicommon.log("### Beginning unattended installer session ###")
else:
munkicommon.log("### Beginning managed installer session ###")
installinfopath = os.path.join(managedinstallbase, 'InstallInfo.plist')
if os.path.exists(installinfopath):
try:
installinfo = FoundationPlist.readPlist(installinfopath)
except FoundationPlist.NSPropertyListSerializationException:
munkicommon.display_error("Invalid %s" % installinfopath)
return -1
if (munkicommon.munkistatusoutput and
munkicommon.pref('SuppressStopButtonOnInstall')):
munkistatus.hideStopButton()
if "removals" in installinfo:
# filter list to items that need to be removed
removallist = [item for item in installinfo['removals']
if item.get('installed')]
munkicommon.report['ItemsToRemove'] = removallist
if removallist:
if munkicommon.munkistatusoutput:
if len(removallist) == 1:
munkistatus.message("Removing 1 item...")
else:
munkistatus.message("Removing %i items..." %
len(removallist))
munkistatus.detail("")
# set indeterminate progress bar
munkistatus.percent(-1)
munkicommon.log("Processing removals")
(removals_need_restart,
skipped_removals) = processRemovals(
removallist, only_unattended=only_unattended)
# if any removals were skipped, record them for later
installinfo['removals'] = skipped_removals
if "managed_installs" in installinfo:
if not munkicommon.stopRequested():
# filter list to items that need to be installed
installlist = [item for item in
installinfo['managed_installs']
if item.get('installed') == False]
munkicommon.report['ItemsToInstall'] = installlist
if installlist:
if munkicommon.munkistatusoutput:
if len(installlist) == 1:
munkistatus.message("Installing 1 item...")
else:
munkistatus.message(
"Installing %i items..." % len(installlist))
munkistatus.detail("")
# set indeterminate progress bar
munkistatus.percent(-1)
munkicommon.log("Processing installs")
(installs_need_restart, skipped_installs) = installWithInfo(
installdir, installlist,
only_unattended=only_unattended)
# if any installs were skipped record them for later
installinfo['managed_installs'] = skipped_installs
# update optional_installs with new installation/removal status
for removal in munkicommon.report.get('RemovalResults', []):
matching_optional_installs = [
item for item in installinfo.get('optional_installs', [])
if item['name'] == removal['name']]
if len(matching_optional_installs) == 1:
if removal['status'] != 0:
matching_optional_installs[0]['removal_error'] = True
matching_optional_installs[0]['will_be_removed'] = False
else:
matching_optional_installs[0]['installed'] = False
matching_optional_installs[0]['will_be_removed'] = False
for install_item in munkicommon.report.get('InstallResults', []):
matching_optional_installs = [
item for item in installinfo.get('optional_installs', [])
if item['name'] == install_item['name']
and item['version_to_install'] == install_item['version']]
if len(matching_optional_installs) == 1:
if install_item['status'] != 0:
matching_optional_installs[0]['install_error'] = True
matching_optional_installs[0]['will_be_installed'] = False
else:
matching_optional_installs[0]['installed'] = True
matching_optional_installs[0]['needs_update'] = False
matching_optional_installs[0]['will_be_installed'] = False
# write updated installinfo back to disk to reflect current state
try:
FoundationPlist.writePlist(installinfo, installinfopath)
except FoundationPlist.NSPropertyListWriteException:
# not fatal
munkicommon.display_warning(
"Could not write to %s" % installinfopath)
else:
if not only_unattended: # no need to log that no unattended pkgs found.
munkicommon.log("No %s found." % installinfo)
if only_unattended:
munkicommon.log("### End unattended installer session ###")
else:
munkicommon.log("### End managed installer session ###")
munkicommon.savereport()
# release our Power Manager assertion
dummy_errcode = IOPMAssertionRelease(no_idle_sleep_assertion_id)
return removals_need_restart or installs_need_restart
| UTF-8 | Python | false | false | 54,735 | py | 20 | installer.py | 13 | 0.567133 | 0.563844 | 0 | 1,305 | 40.942529 | 80 |
opeadeyomoye/learning-python | 8,005,819,073,330 | fb8b2344d244c9ec06facbcb510cf30fe6a8dc06 | 0ff544e93a68289ecf42fadda24716ff0dae093c | /the-basics/20-functions-and-files.py | 1a9b6d43a05e5e05dbb5e3dde7560b23ccc09c7e | []
| no_license | https://github.com/opeadeyomoye/learning-python | b34b0b1a44c8cc030260ea434413c9c002c737fd | 01727fc9e5060305bacc644781bfc5dfca7e5982 | refs/heads/master | 2021-05-08T00:11:39.204044 | 2017-12-17T23:47:50 | 2017-12-17T23:48:00 | 107,610,807 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import argv
script, inputFile = argv
def printAll(file):
print file.read()
def rewind(file):
file.seek(0)
def printLine(line_count, file):
print line_count, file.readline()
currentFile = open(inputFile)
print "First, we print the whole file:"
printAll(currentFile)
print "Now, we go back to ze beginning"
rewind(currentFile)
print "Then we print three lines:"
line = 1
printLine(line, currentFile)
line += 1
printLine(line, currentFile)
line += 1
printLine(line, currentFile)
| UTF-8 | Python | false | false | 508 | py | 19 | 20-functions-and-files.py | 17 | 0.724409 | 0.716535 | 0 | 31 | 15.387097 | 39 |
Muksam212/Datastructure-and-algorithm | 11,038,065,965,638 | bc9635b898bf49b65e2da9607f3216095d61d00f | a09f72615cd74e7be3db6761e6378db73cbe5999 | /bubblesort.py | cde7411755b29e41b164d2d7c2d0fae352f89780 | []
| no_license | https://github.com/Muksam212/Datastructure-and-algorithm | 671161c36b242670762500338e3ef49d953688e0 | 5e35c511b594da123de55a043aa96920f4c42fe5 | refs/heads/main | 2023-03-21T00:14:18.906562 | 2021-03-18T12:42:25 | 2021-03-18T12:42:25 | 349,069,186 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def bubble_sort(elements):
size=len(elements)
for i in range(size-1):
swapped=False
for j in range(size-i-1):
if elements[j]>=elements[j+1]:
tmp=elements[j]
elements[j]=elements[j+1]
elements[j+1]=tmp
swapped=True
if not swapped:
break
if __name__ == "__main__":
elements=[10,9,6,5,4,3,2,1]
bubble_sort(elements)
print(elements) | UTF-8 | Python | false | false | 477 | py | 8 | bubblesort.py | 7 | 0.486373 | 0.457023 | 0 | 17 | 26.176471 | 42 |
wk-ff/my-leetcode-python | 11,003,706,222,359 | f1739d540f5680a490f318aad5a08d8c5eacc8c5 | 9fa4966955d0cdd7123bec71093fb6b23baeaabf | /17.Letter_Conbinations_of_a_phone_number.py | 571cf05f55b0c7c4b769b68ebc9d698c8652db09 | []
| no_license | https://github.com/wk-ff/my-leetcode-python | 7cca50eabffa04007d086a22bbadfbd35d57f03c | a8d1724fd9340037c6c0dcac3b3d0e31fe04a2d5 | refs/heads/master | 2022-03-26T11:36:47.829692 | 2020-01-08T13:38:04 | 2020-01-08T13:38:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 24ms, 93.58%
# 12.6MB, less than 100%
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
strList = ['abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs',
'tuv', 'wxyz']
out = []
if len(digits) == 0:
return out
out = [s for s in strList[int(digits[0])-2]]
for digit in digits[1:]:
out = [s+d for s in out for d in strList[int(digit)-2]]
return out
| UTF-8 | Python | false | false | 447 | py | 23 | 17.Letter_Conbinations_of_a_phone_number.py | 22 | 0.503356 | 0.465324 | 0 | 13 | 33.384615 | 67 |
wrpearson/fasta36 | 3,934,190,089,799 | 816dc67ca0e7ddeee6d2072d2648f7468e8c8840 | f3878a19f8cd79cac4beb84ce73105f22bc5cc26 | /scripts/ann_pfam_sql.py | f5245317faebe038a0c8c220e45a8398a84a2ff7 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/wrpearson/fasta36 | 8c1a3f233de906b552e2768917cf89dc46f29f49 | 71b899362e2cef2f5a2c975e57f0f7995f93e3ea | refs/heads/v36.3.8 | 2023-06-10T12:00:55.143828 | 2023-05-31T23:54:18 | 2023-05-31T23:54:18 | 24,374,820 | 101 | 17 | Apache-2.0 | false | 2023-05-19T16:57:44 | 2014-09-23T14:30:42 | 2023-05-16T14:26:10 | 2023-05-03T17:24:27 | 6,766 | 90 | 13 | 6 | C | false | false | #!/usr/bin/env python3
################################################################
# copyright (c) 2022 by William R. Pearson and The Rector &
# Visitors of the University of Virginia */
################################################################
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under this License is distributed on an "AS
# IS" BASIS, WITHOUT WRRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
################################################################
# ann_pfam_www0.py takes an annotation file from fasta36 -V with a line of the form:
# sp|P0810|GSTM1_RAT [tab] seqlen
#
# and returns:
# >P08010|GSTM2_RAT
# 3 - 81 GST_N~1
# 105 - 189 GST_C~2
# This version has been re-written in python from ann_pfam_sql.pl
# by default, it shows clan assignments (not possible with ann_pfam_www.py)
# currently it does not implement virtual domains
import fileinput
import sys
import re
import json
import argparse
import mysql.connector
## import urllib.request
## import urllib.error
sql_get_pfam_acc = '''
SELECT seq_start, seq_end, model_start, model_end, model_length, pfamA_acc, pfamA_id, auto_pfamA_reg_full, domain_evalue_score as evalue, length
FROM pfamseq
JOIN pfamA_reg_full_significant using(pfamseq_acc)
JOIN pfamA USING (pfamA_acc)
WHERE in_full = 1
AND pfamseq_acc="%s"
ORDER BY seq_start
'''
sql_get_upfam_acc = '''
SELECT seq_start, seq_end, model_start, model_end, model_length, pfamA_acc, pfamA_id, auto_uniprot_reg_full as auto_pfamA_reg_full, domain_evalue_score as evalue, length
FROM uniprot
JOIN uniprot_reg_full using(uniprot_acc)
JOIN pfamA USING (pfamA_acc)
WHERE in_full = 1
AND uniprot_acc="%s"
ORDER BY seq_start
'''
sql_get_pfam_clan = '''
SELECT clan_acc, clan_id
FROM clan
JOIN clan_membership using(clan_acc)
WHERE pfamA_acc="%s"
'''
def get_seq_acc(seq_id):
if (re.search(r'^gi\|',seq_id)):
(tmp, gi, sdb, acc, id) = seq_id.split('|')
elif (re.search(r'^(sp|tr|up)\|', seq_id)):
(sdb, acc, id) = seq_id.split('|')
else:
acc = re.split(r'\s',seq_id)[0]
acc = re.sub(r'\.\d+$','',acc)
return acc
def get_pfam_clan(pf_acc, pf_id, db_cursor):
db_cursor.execute(sql_get_pfam_clan%(pf_acc,))
row = db_cursor.fetchone()
if (not row):
return (pf_acc, pf_id)
else:
return ("C."+row['clan_acc'],"C."+row['clan_id'])
def get_pfam_sql(acc, db_cursor):
pf_dom_list = []
prot_len = 0
db_cursor.execute(sql_get_pfam_acc%(acc,))
for row in db_cursor:
prot_len = row['length']
pf_dom_list.append(row)
if (len(pf_dom_list) == 0):
db_cursor.execute(sql_get_upfam_acc%(acc,))
for row in db_cursor:
prot_len = row['length']
pf_dom_list.append(row)
return(pf_dom_list, prot_len)
def add_nodoms(pf_dom_list, seq_len, min_nodom=10):
if (len(pf_dom_list) == 0):
return pf_dom_list
prev_dom = {'seq_end':0}
npf_domains = []
for curr_dom in pf_dom_list:
if(curr_dom['seq_start'] - prev_dom['seq_end'] > min_nodom):
new_dom = {'seq_start':prev_dom['seq_end']+1, 'seq_end':curr_dom['seq_start']-1,
'pfamA_acc':'NODOM', 'pfamA_id':'NODOM','clan_acc':'NODOM','clan_id':'NODOM'}
npf_domains.append(new_dom)
npf_domains.append(curr_dom)
prev_dom = {'seq_end':curr_dom['seq_end']}
if (seq_len - pf_dom_list[-1]['seq_end'] > min_nodom):
new_dom = {'seq_start':pf_dom_list[-1]['seq_end']+1, 'seq_end':seq_len,
'pfamA_acc':'NODOM', 'pfamA_id':'NODOM','clan_acc':'NODOM','clan_id':'NODOM'}
npf_domains.append(new_dom)
return npf_domains
def print_doms(seq_id, color_ix, args, dom_colors, dom_names, db_cursor):
this_acc = get_seq_acc(seq_id)
if (args.lav):
## --lav is missing a '\t-\t' field
print_fmt = "%d\t%d\t%s~%s"
else:
## default format
print_fmt = "%d\t-\t%d\t%s~%s"
(pf_dom_list, prot_len) = get_pfam_sql(this_acc, db_cursor)
if (not args.no_clans):
for dom in pf_dom_list:
(dom['clan_acc'], dom['clan_id']) = get_pfam_clan(dom['pfamA_acc'],dom['pfamA_id'],db_cursor)
## add no-doms if requested
if (args.neg_doms):
pf_dom_list = add_nodoms(pf_dom_list, prot_len, args.min_nodom)
for dom in pf_dom_list:
pf_acc = dom['pfamA_acc']
## check if domain has color number
if (pf_acc in dom_colors):
dom_color = dom_colors[pf_acc]
else:
dom_color = dom_colors[pf_acc] = str(color_ix)
color_ix += 1
## display id or acc?
pf_info = dom['clan_id']
if (args.no_clans):
pf_info = dom['pfamA_id']
if (args.pfam_acc):
pf_info = dom['clan_acc']
if (args.no_clans):
pf_info = dom['pfamA_acc']
if (args.acc_comment):
pf_info = "%s{%s}"%(pf_info,pf_acc)
if (args.bound_comment):
dom_color = "%d:%d"%(dom['seq_start'],dom['seq_end'])
print(print_fmt%(dom['seq_start'],dom['seq_end'],pf_info,dom_color))
return color_ix
def read_print_fd(fd, args, db_cursor):
dom_colors = {'NODOM':'0'}
dom_names = {}
color_ix = 1
for line in fd:
line = line.strip('\n')
seq_id = line.split('\t')[0]
print(">%s"%(seq_id))
color_ix = print_doms(seq_id, color_ix, args, dom_colors, dom_names,db_cursor)
def main() :
parser=argparse.ArgumentParser(description='ann_pfam_www.py P12345')
## db parameters
parser.add_argument('--host',dest='host',action='store',default='wrpxdb.bioch.virginia.edu')
parser.add_argument('--user',dest='user',action='store',default='web_user')
parser.add_argument('--passwd',dest='passwd',action='store',default='fasta_www')
parser.add_argument('--db',dest='db',action='store',default='pfam35')
## domain presentation parameters
parser.add_argument('--lav',dest='lav',action='store_true',default=False)
parser.add_argument('--acc_comment',dest='acc_comment',action='store_true',default=False)
parser.add_argument('--bound_comment',dest='bound_comment',action='store_true',default=False)
parser.add_argument('--no_clans',dest='no_clans',action='store_true',default=False)
parser.add_argument('--no-clans',dest='no_clans',action='store_true',default=False)
parser.add_argument('--neg_doms',dest='neg_doms',action='store_true',default=False)
parser.add_argument('--neg-doms',dest='neg_doms',action='store_true',default=False)
parser.add_argument('--min_nodom',dest='min_nodom',action='store',default=10)
parser.add_argument('--no_over',dest='no_over',action='store_true',default=False)
parser.add_argument('--no-over',dest='no_over',action='store_true',default=False)
parser.add_argument('--pfacc',dest='pfam_acc',action='store_true',default=False)
parser.add_argument('--pfam_acc',dest='pfam_acc',action='store_true',default=False)
parser.add_argument('files', metavar='FILE', help='files to read, stdin if empty', nargs='*')
args=parser.parse_args()
pf_db = mysql.connector.connect(db=args.db, host=args.host, user=args.user, passwd=args.passwd)
pf_cur = pf_db.cursor(dictionary=True, buffered=True)
## check for stdin input
if (len(args.files) == 0):
read_print_fd(sys.stdin, args, pf_cur)
return
try:
fd = open(args.files[0],'r')
read_print_fd(fd, args, pf_cur)
fd.close()
except:
color_ix = 1
dom_colors = {'NODOM':'0'}
dom_names = {}
for seq_id in args.files:
print(">%s"%(seq_id))
print_doms(seq_id, color_ix, args, dom_colors, dom_names,pf_cur)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 8,244 | py | 199 | ann_pfam_sql.py | 33 | 0.598253 | 0.590369 | 0 | 255 | 31.329412 | 169 |
andrew-houghton/misc-python-scripts | 5,085,241,305,093 | daf11a190148404aa076c9380fe072747822f230 | 2d3c712d1e836fdfc7e99696bdca214cf4392f1c | /fourier Analysis/myFourier.py | e343a989ff870069bd790ec04be2ede65602a88f | []
| no_license | https://github.com/andrew-houghton/misc-python-scripts | 4bcc8e04e02a2f33121ae929bc416d980dba482e | 691d63aadadd1d2add146bc19803d4fe37328338 | refs/heads/master | 2021-09-02T07:35:20.644367 | 2017-12-31T14:33:44 | 2017-12-31T14:33:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##Manual fourier analysis.
#Import necessary libraries. (do not use numpy.fft)
import numpy as np
#from scipy.io import wavfile
import matplotlib.pyplot as plt
pi=np.pi
#create sample data with frequency of 2pi
nums=np.arange(0,10*pi,pi/50)
data=np.sin(nums)
#show the sample data
plt.plot(nums,data)
waveIntensity=[]
soundArray=[]
waveRange=np.arange(0,4*pi,pi/50)
for curWave in waveRange:
soundArray=data#[:int(50*curWave/pi)] #only use 1 wavelength
#what distance is wavelength?
tempIntensity=sum([tData-np.sin(curWave*t)-np.cos(curWave*t) for tData,t in zip(soundArray,nums)])
waveIntensity.append(tempIntensity)
print tempIntensity
plt.plot(waveIntensity)
plt.show()
##for frequency in freqRange:
## tempIntensity=0
## print sum([np.sin(tData)+np.cos(tData) for tData in data])
## for tData in data:
## #for each data point find sin+cos of that point
## #put this data into an list
## tempIntensity+=np.sin(tData)+np.cos(tData)
## freq.append(tempIntensity)
## print tempIntensity
#display fourier
#remake function
#compare to original
| UTF-8 | Python | false | false | 1,152 | py | 42 | myFourier.py | 33 | 0.689236 | 0.677083 | 0 | 44 | 24.181818 | 102 |
pannal/Kitana | 12,421,045,467,941 | 4e14d425ff1a3cbee6735c31e23a14637d7de0e4 | fe09acf3920c2ae8449ed4d64c6773a8d8f0db1d | /tools/cache.py | 85353ea9bbd99beec9ff5afdfff9a1ee281d4825 | [
"MIT"
]
| permissive | https://github.com/pannal/Kitana | 8eb520ad1bfdf76537442ba743236fc842a206b5 | 287a8b778385d1ced921e0250665883891022951 | refs/heads/master | 2023-08-14T10:58:25.196287 | 2023-04-26T12:36:02 | 2023-04-26T12:36:02 | 152,675,031 | 460 | 39 | NOASSERTION | false | 2023-07-25T20:47:42 | 2018-10-12T01:09:12 | 2023-07-17T21:52:56 | 2023-07-25T20:47:41 | 830 | 447 | 26 | 19 | SCSS | false | false | # coding=utf-8
from cherrypy.lib.caching import MemoryCache
class BigMemoryCache(MemoryCache):
maxobj_size = 500000
maxsize = 50000000
| UTF-8 | Python | false | false | 145 | py | 26 | cache.py | 10 | 0.758621 | 0.655172 | 0 | 7 | 19.714286 | 44 |
alexcatmu/CFGS_DAM | 17,145,509,464,433 | 4d809e535b0214242e770884cdff49933de49dfd | 0362023a283a492733336dbe899714236b9a06ef | /PRIMERO/python/ejercicio52 cuenta2.py | 28584841bd365ecedd4cfd79bd052109c432fefe | []
| no_license | https://github.com/alexcatmu/CFGS_DAM | 205b8bcc6d09f8351894c5f70e1a354ff25c17a3 | 1a4384dee8833b5d8034fdf0909a0774cbe5b1c0 | refs/heads/master | 2020-04-07T18:27:15.225638 | 2019-12-17T19:34:39 | 2019-12-17T19:34:39 | 158,610,709 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
2.9) Feu un programa que llegeixi una frase, i busqui a dins
la paraula “hola”.
versió a) Si la troba, ens dirà la primera posició on
l’ha trobada (només la busca un cop)
versió b) Ens dirà totes les vegades que ha trobat la
paraula ( doncs la paraula pot estar repetida a la frase,
i cal seguir buscant-la ! )
'''
#programa encuentra a
#variables
frase = []
i = 0
aux = 0
compara = "hola"
comp = 0
posi = ""
posicion = ""
#codigo
frase = str(input())
for i in range(len(frase)):
if (frase[i] == compara[comp]):
comp = comp +1
if (comp+1 == len(compara)):
aux = aux + 1
comp = 0
posi = str(i)
posicion = posicion + posi + ","
else:
comp = 0
print("La palabra hola se ha encontrado", aux , "veces en las posiciones", posicion)
| UTF-8 | Python | false | false | 861 | py | 409 | ejercicio52 cuenta2.py | 263 | 0.584615 | 0.572781 | 0 | 39 | 20.615385 | 84 |
sajibhaskaran/Algorithms | 19,430,432,055,702 | 37a4a845f1ea3dedcf6e92e0ad14051463bfcc9f | 2db2f3f7a292c2c364b3353d74061dc2e2a87831 | /Palindroms/palindromes.py | f6a9f83bd989a44d308e5028e249a6a5be068679 | []
| no_license | https://github.com/sajibhaskaran/Algorithms | 477674f3ff7302329e1f520513d440cd12d7b9e4 | 15c2b81fc2a4c6bfbc18126a67b34ec4592fa0c6 | refs/heads/master | 2021-01-23T06:58:33.741369 | 2017-02-24T23:12:36 | 2017-02-24T23:12:36 | 80,497,212 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Purpose : Check for Palindromes
# Note : Need to remove all non-alphanumeric characters (punctuation, spaces and symbols) and
# turn everything lower case in order to check for palindromes
#
# Author : Saji Bhaskaran
import re
def palindrome(str):
str1 = re.sub(r"[^\w\s]", '', str)
str2 = re.sub(r"[\s+\_]", '', str1)
str3 = str2.lower()
str4 = str3[::-1]
if str3 == str4:
print("yes, it is a palindrome")
else:
print("no, it is not a palindrome")
palindrome("A man, a: plan, (a). -canal. Panama_");
palindrome("1 eye for of 1 eye.")
| UTF-8 | Python | false | false | 601 | py | 16 | palindromes.py | 16 | 0.602329 | 0.582363 | 0 | 22 | 26.272727 | 96 |
vforgione/metaltrenches | 14,714,558,002,232 | f07e4b4e09486605779ae387be960e1a91131ba8 | f9870caaaf130cbc73349449217f06833a9e609a | /metaltrenches/settings/common/dirs.py | dfb9a14d7dd7453b0629b8a00ed13fe0194dea25 | []
| no_license | https://github.com/vforgione/metaltrenches | 5a56f0aa5ef05df519d06e265c17971c6e519e6a | 1f95f940aa1fe625a0ec966f7b434569a3724d33 | refs/heads/master | 2021-03-22T03:49:32.071545 | 2018-12-08T15:13:17 | 2018-12-08T15:13:17 | 36,133,731 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
COMMON_SETTINGS_DIR = os.path.dirname(__file__)
SETTINGS_DIR = os.path.dirname(COMMON_SETTINGS_DIR)
PROJECT_DIR = os.path.dirname(SETTINGS_DIR)
BASE_DIR = os.path.dirname(PROJECT_DIR)
| UTF-8 | Python | false | false | 199 | py | 62 | dirs.py | 27 | 0.738693 | 0.738693 | 0 | 10 | 18.9 | 51 |
fgregg/craigslistings | 13,176,959,683,992 | 6a31c472611ce677b9e5d152a0ef8c8eab8a3614 | 550433f24df037ae9967c03e6ba0ae911f3e208b | /craigslistings/download.py | e7fc227366a6d5b5d08ae299b9e9b3484944a5d1 | [
"MIT"
]
| permissive | https://github.com/fgregg/craigslistings | e5dcce997363c672557741cd371092099ae3ac04 | bd90299537c9e34d5bd22d310780f269872f1789 | refs/heads/master | 2021-01-01T03:30:35.898679 | 2016-05-19T19:38:43 | 2016-05-19T19:38:43 | 58,775,492 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import logging
def save_rss(feed, city, cursor):
section, url = feed
url = url % city
logging.info('%s', city)
logging.info('%s', section)
logging.info('%s', url)
listing = requests.get(url).text
cursor.execute("""INSERT INTO rss (section, url, raw, city) """
"""VALUES (%s, %s, %s, %s) """
"""RETURNING rss_id""",
(section, url, listing, city))
if __name__ == '__main__':
import psycopg2
import urllib.error
import time
try:
from raven import Client
from .sentry import DSN
client = Client(DSN)
except ImportError:
pass
from . import config
logging.basicConfig(level=logging.INFO)
db = psycopg2.connect(database="neighborhood")
c = db.cursor()
for city in config.cities :
if city == "newyork" :
feeds = config.ny_feeds
else :
feeds = config.std_feeds
for feed in feeds:
try:
save_rss(feed, city, c)
except requests.HTTPError as e:
logging.info(e)
logging.info(url)
except:
client.captureException()
raise
db.commit()
time.sleep(1)
c.close()
db.close()
| UTF-8 | Python | false | false | 1,349 | py | 7 | download.py | 4 | 0.510007 | 0.507784 | 0 | 58 | 22.241379 | 67 |
ausk/pyco_utils | 13,829,794,704,377 | a35e6b027263984004356b712a32119de1826b90 | 418959edd5d99a8c526c3ba0253eba8e9f74fd36 | /pyco_utils/flask/upload_files.py | e6e73b0aa39a6d70bf6d1b448c0bdcd782f2bdc9 | []
| no_license | https://github.com/ausk/pyco_utils | 49c8c6842555638cc3be22748bc9891033ea34ac | 2e00ae45ddcfa29a8d4496eb63d3041e20d78f35 | refs/heads/master | 2020-04-01T04:39:13.950956 | 2017-11-06T20:44:25 | 2017-11-06T20:44:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import (
app,
request,
json_response,
)
def upload_file(file, path='.'):
filename = file.filename
filepath = '{}/{}'.format(path, filename)
file.save(filepath)
file.seek(0)
return path
@app.route('/upload/images', methods=['POST'])
def upload_images():
files = request.files
data = [upload_file(f) for f in files.values()]
return json_response(True, data=data)
| UTF-8 | Python | false | false | 416 | py | 29 | upload_files.py | 28 | 0.622596 | 0.620192 | 0 | 20 | 19.8 | 51 |
Bala-subu/test | 12,747,462,978,544 | fcc5f8c9853fac5e8d858d87bff6f0895c437f0c | b215139a56d45065986d8adf8646f2eac727069f | /project.py | a1bb9e35adffb2208d1f51e79c1a55a36ce75b92 | []
| no_license | https://github.com/Bala-subu/test | 577107cf68d57fd7f7d16442c8515d44ccb61413 | eaf70c69a56dcbec8f987d9a31c79af293e97455 | refs/heads/main | 2023-07-29T07:25:25.081121 | 2021-09-14T11:06:59 | 2021-09-14T11:06:59 | 385,990,802 | 0 | 0 | null | false | 2021-09-14T11:06:59 | 2021-07-14T15:37:52 | 2021-07-14T15:44:14 | 2021-09-14T11:06:59 | 2 | 0 | 0 | 0 | Python | false | false | from tkinter import *
root= Tk()
root.geometry("500x300")
def bala():
print("form submitted");
Label(root, text="REGISTRATION FORM", font="arial 20 bold").grid(row=0, column=2)
name= Label(root, text="Name")
phone= Label(root, text="Phone")
gender= Label(root, text="Gender")
email= Label(root, text="Email")
password= Label(root, text="Password")
name.grid(row=1, column=1)
phone.grid(row=2, column=1)
gender.grid(row=3, column=1)
email.grid(row=4, column=1)
password.grid(row=5, column=1)
namevalue =StringVar
phonevalue =StringVar
gendervalue =StringVar
emailvalue =StringVar
passwordvalue =StringVar
checkvalue=IntVar
nameentry=Entry(root, textvariable=namevalue).grid(row=1, column=2)
phoneentry=Entry(root, textvariable=phone).grid(row=2, column=2)
genderentry=Entry(root, textvariable=gendervalue).grid(row=3, column=2)
emailentry=Entry(root, textvariable=emailvalue).grid(row=4, column=2)
passwordentry=Entry(root, textvariable=passwordvalue).grid(row=5, column=2)
checkbtn=Checkbutton(text="remember me !!!!!", variable=checkvalue).grid(row=6, column=2)
Button(text="submit", command=bala).grid(row=7, column=2)
root.mainloop() | UTF-8 | Python | false | false | 1,195 | py | 1 | project.py | 1 | 0.723849 | 0.695397 | 0 | 43 | 25.837209 | 89 |
VoTonggg/bookaholic-project | 13,967,233,650,882 | 80ad7782ee4e51ef50105c5cb7fc71f72b1997c4 | 2abe23cdf20f918748832b10ca6465961e1ff533 | /nytbook_crawl.py | 761242b4824ee9a2c45f24e7eee29b399046d4b4 | []
| no_license | https://github.com/VoTonggg/bookaholic-project | a27912b71e0019893dbbf5f8a8346dbccbb49597 | 1a44a23b751402998d38e81bc6b5ee39c6c07fa5 | refs/heads/master | 2020-03-22T21:59:03.262399 | 2018-07-28T23:20:01 | 2018-07-28T23:20:01 | 140,726,240 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from urllib.request import urlopen, urlretrieve
from bs4 import BeautifulSoup
import pyexcel
import sys
# 1. Download Webpage
#1.1 Create a connection
a_list_of_dict = []
for i in range(1,2):
url = "http://www.ntybooks.com/z/nty/top/" + str(i)
#1.2
html_content = urlopen(url).read().decode('utf-8')
# 2. Extract ROI (region of interest)
soup = BeautifulSoup(html_content, "html.parser")
item_list = soup.find_all("div","product-meta")
# print(item_list)
title = []
link = []
for div in item_list:
title.append(div.h3.a.string.strip())
link.append(div.h3.a['href'])
print(link)
print(title)
item_list = soup.find_all("span", "amount")
price = []
for span in item_list:
price.append(span.string.strip())
# print(price)
item_list = soup.find_all("a", "thumb")
image = []
for a in item_list:
image.append(a.img["src"])
# print(image)
# a_list_of_dict = []
for i in range(len(title)):
dic = {}
dic['title'] = title[i]
dic['link'] = link[i]
dic['image'] = image[i]
dic['price'] = price[i]
a_list_of_dict.append(dic)
pyexcel.save_as(records = a_list_of_dict, dest_file_name="ntybooks.xlsx") | UTF-8 | Python | false | false | 1,263 | py | 19 | nytbook_crawl.py | 9 | 0.586698 | 0.577197 | 0 | 52 | 23.307692 | 73 |
aniknagato/CerebralCortex-DataAnalysis | 17,617,955,850,331 | 8ad3e44e7e9e94ef98c19b2cf9b9cef67c151cdf | 0188fc0a8d5b2a70e9c9e19b8c2c677d1498d162 | /core/feature/task_features/utils.py | f091474138ba8d500b0a9e6ee9d68700d824ab04 | []
| no_license | https://github.com/aniknagato/CerebralCortex-DataAnalysis | 32bea4b8eda4756f0b7cc22d36af02b386e96c2b | 78bd309063f0332f19fdd94f7f329e02bed21824 | refs/heads/master | 2020-03-08T09:56:42.219341 | 2018-04-13T18:01:57 | 2018-04-13T18:01:57 | 128,059,192 | 1 | 0 | null | true | 2018-04-09T05:13:27 | 2018-04-04T12:30:51 | 2018-04-09T05:05:55 | 2018-04-09T05:13:27 | 10,905 | 0 | 0 | 0 | Python | false | null | # Copyright (c) 2018, MD2K Center of Excellence
# -Mithun Saha <msaha1@memphis.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.cerebralcortex import CerebralCortex
from pprint import pprint
from scipy.io import savemat
from datetime import timedelta
from collections import OrderedDict
from cerebralcortex.core.util.data_types import DataPoint
from collections import OrderedDict
from typing import List
import math
import datetime
import pandas as pd
import pytz
import numpy as np
posture_stream_name = 'org.md2k.data_analysis.feature.body_posture.wrist.accel_only.10_second'
activity_stream_name = 'org.md2k.data_analysis.feature.activity.wrist.accel_only.10_seconds'
office_stream_name = 'org.md2k.data_analysis.gps_episodes_and_semantic_location_from_model'
beacon_stream_name = 'org.md2k.data_analysis.feature.v6.beacon.work_beacon_context'
def target_in_fraction_of_context(target_total_time,
context_with_time,
offset, context):
"""
This function total time of posture, activity with total time of
office and beacon to find fraction of time spent in posture,
activity per hour.
:param target_total_time: a dictionary of posture/activity total time
:param context_with_time: a dictionary of office/beacon intervals
:param context_type: office/beacon
:return: fraction of total time spent in posture/activity
in office/around beacon
"""
outputstream = [] # list of datapoints for output
total_context_time = timedelta(0)
# context_slot[0] = start time
# context_slot[1] = end time
for context_slot in context_with_time[context]:
total_context_time += context_slot[1] - context_slot[0]
context_with_time[context].sort()
context_start_time = context_with_time[context][0][0]
context_end_time = \
context_with_time[context][len(context_with_time[context]) - 1][1]
for target in target_total_time:
datapoint = DataPoint(context_start_time, context_end_time, offset,
[str(target),
float(format(target_total_time[target]/total_context_time*60,'.3f'))])
outputstream.append(datapoint)
return outputstream
def output_stream(targetconstruct_with_time, context_with_time,
offset):
"""
This function compares time intervals of posture or activity with time
intervals of office or beacon, to find overlapping time windows to
extract time intervals, in which posture/activity occurs in office/around
work beacon.
:param targetconstruct_with_time: a dictionary of posture/activity time intervals
:param context_with_time: a dictionary of office/beacon time intervals
:param offset: offset for time information
:return: a dictionray of total time spent for posture/activity,
a list of datapoints for output stream
"""
target_total_time = {} #total time for posture/activity
outputstream=[] #list of datapoints for output
if targetconstruct_with_time and context_with_time:
for target in targetconstruct_with_time:
#keeps running total time for posture/activity
time_diff = timedelta(0)
if target == 'sitting' or target == 'standing' or target == 'WALKING':
for time_slot in targetconstruct_with_time[target]:
for context in context_with_time:
if context == 'work' or context == '1':
for context_slot in context_with_time[context]:
start_time = max(time_slot[0],context_slot[0])
end_time = min(time_slot[1],context_slot[1])
if end_time > start_time :
datapoint = DataPoint(start_time, end_time,
offset, [target, context])
time_diff += end_time-start_time
outputstream.append(datapoint)
if target == 'sitting'or target == 'standing'or target == 'WALKING':
target_total_time[target] = time_diff
return target_total_time,outputstream
def process_data(data: List[DataPoint]):
"""
This function takes a list of data points of a stream.
For each datapoint, based on sample value, creats a
dictionary of start and end time.
:param user_id:list of datapoints
:return: a dictionary of start time and end time of sample value
"""
dicts = {}
if len(data)==0:
return None
for v in data:
time = []
if v.sample != None:
time.append(v.start_time)
time.append(v.end_time)
if type(v.sample) == list:
if v.sample[0] in dicts:
dicts[v.sample[0]].append(time)
else:
dicts[v.sample[0]] = []
dicts[v.sample[0]].append(time)
elif type(v.sample) == str:
if v.sample in dicts:
dicts[v.sample].append(time)
else:
dicts[v.sample] = []
dicts[v.sample].append(time)
elif type(v.sample) == np.str_:
if v.sample in dicts:
dicts[v.sample].append(time)
else:
dicts[v.sample] = []
dicts[v.sample].append(time)
elif type(v.sample) == int:
v.sample = str(v.sample)
if v.sample in dicts:
dicts[v.sample].append(time)
else:
dicts[v.sample] = []
dicts[v.sample].append(time)
return dicts
| UTF-8 | Python | false | false | 7,120 | py | 39 | utils.py | 28 | 0.630337 | 0.625 | 0 | 172 | 40.395349 | 101 |
luaffjk/AQV.GUI | 18,416,819,780,859 | 1e7872a02be43b42cf194d53dd39c20da9d33ae6 | 4a3b13ab8061e5991e77bbe32989353167f05307 | /metricas.py | 8180ec6ef89d90f7446dc6476aad9095d2873c38 | []
| no_license | https://github.com/luaffjk/AQV.GUI | 22d655266be9033dac6f5fee4fdf3c5b1acba3e0 | 18adc6addc565a138f2e2c6098f464aabe3730cb | refs/heads/master | 2021-01-19T21:29:45.892586 | 2017-01-30T18:32:38 | 2017-01-30T18:32:38 | 88,659,051 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy import misc
import metrica
def metricas (orig, test, arquivo):
arquiv = arquivo
subjectscores = open(arquiv, 'r')
conteudo_texto1 = subjectscores.read()
aa = conteudo_texto1.split('\n')
cc = aa[:-1]
subjectscores.close()
for i in xrange(0, len(cc)):
a = aa[i].split('\t')
dirOrig = orig
imag = a[0]
e = dirOrig+'/'+imag
ref = plt.imread(e)
dirTest = test
imag = a[1]
ee = dirTest + '/'+imag
teste = plt.imread(ee)
psrn = metrica.psnr(ref, teste)
mse = metrica.mse(ref, teste)
j = 0
a = ref[:,:,j]
b = teste[:,:,j]
ssim0 = metrica.msim(a, b)
j = j+1
a = ref[:,:,j]
b = teste[:,:,j]
ssim1 = metrica.msim(a, b)
j = j+1
a = ref[:,:,j]
b = teste[:,:,j]
ssim2 = metrica.msim(a, b)
ssim = (ssim0 + ssim1 + ssim2)/3
psrn2 = str(psrn)
ssim2 = str(ssim)
mse2 = str(mse)
print 'img' + str(i) + ';' + psrn2 + ';' + ssim2 + ';' + mse2
f = open('arquivo.txt','a')
f.write('img' + str(i) + ';' + psrn2 + ';' + ssim2 + ';' + mse2 + '\n' )
f.close()
| UTF-8 | Python | false | false | 1,364 | py | 7 | metricas.py | 3 | 0.440616 | 0.422287 | 0 | 52 | 25.192308 | 80 |
eebmagic/python_turtle_art | 7,851,200,251,618 | 98511df32633f9138558c133001aaa5b0b5b1d24 | aef3aa1f16b6c10f2ee1f05582be92eec6e5af02 | /triangle_wallpaper/triangleWallpaper.py | 89673372dd5a9b109bf30f7a14e069e9435ef33d | []
| no_license | https://github.com/eebmagic/python_turtle_art | b05574865082d475f38d127d9e9149dc7f383067 | d9d16ad47394833617405c85ad22007e7674fcbc | refs/heads/master | 2023-01-29T15:08:21.182173 | 2023-01-17T22:10:19 | 2023-01-17T22:10:19 | 204,382,000 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
def tri(turt, size=100):
for i in range(3):
turt.fd(size)
turt.left(120)
def drawThatShape(turt):
turt.begin_fill()
for i in range(3):
lastDistance = 100 + 20 * i
tri(turt, lastDistance)
turt.end_fill()
turt.fd(lastDistance + 20)
turt.left(120)
turt.fd(lastDistance + 20)
turt.hideturtle()
size = 2.5
t1 = turtle.Turtle()
t1.hideturtle()
t1.pensize(size)
t1.speed('fastest')
t2 = turtle.Turtle()
t2.hideturtle()
t2.pensize(size)
t2.speed('fastest')
colorSetting = (0.9, 0.3, 0.4)
screen = turtle.Screen()
screen.bgcolor(colorSetting)
drawThatShape(t1)
t2.fd(100)
t2.left(180)
drawThatShape(t2)
t1.hideturtle()
turtle.done()
| UTF-8 | Python | false | false | 669 | py | 27 | triangleWallpaper.py | 20 | 0.689088 | 0.618834 | 0 | 48 | 12.9375 | 30 |
ganl2805/Ejercicios_Python | 12,910,671,722,463 | 14a5e3c79f1edd42c65f74953157653154b93be0 | b04773a25d8fad1e0ef9cabcb2f4ac240c61f424 | /Programa3.py | c36b0fe9b0a151086a445cc2b9775b40cc6817bd | []
| no_license | https://github.com/ganl2805/Ejercicios_Python | 248823032cb1228f1176ae9352cde43901b7f6c3 | 79cb8777fb3977bd7840943c9a75daaa71a36ce4 | refs/heads/master | 2021-01-23T02:40:26.435413 | 2017-03-24T02:08:46 | 2017-03-24T02:08:46 | 86,016,294 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def tres():
i=13
n=1
a = int(input("Numero de impares apartir de 13: "))
while n<=a:
n+=1
print(i)
i+=2
tres()
| UTF-8 | Python | false | false | 141 | py | 4 | Programa3.py | 4 | 0.48227 | 0.432624 | 0 | 9 | 14.666667 | 54 |
24jllewis/deep-rl-tensorflow | 5,995,774,365,239 | f91e0656bbf25e330ac2a6da8f6c87627234ab24 | 94fed3e4e964b97cfe9ec1b563d251dc556408e8 | /networks/layers.py | 86d60520d12571b3822a3a4c5831b467663cec5f | [
"MIT"
]
| permissive | https://github.com/24jllewis/deep-rl-tensorflow | 37a2df57c5b77c4a9d45df13626b262b962c062d | 324998a1eb14f24d13bbd72a4ea3f202c92f03d4 | refs/heads/master | 2020-09-06T06:26:34.360365 | 2019-12-13T00:01:52 | 2019-12-13T00:01:52 | 220,350,644 | 1 | 0 | MIT | true | 2019-11-07T23:54:13 | 2019-11-07T23:54:12 | 2019-11-07T07:03:00 | 2018-06-04T07:19:33 | 613 | 0 | 0 | 0 | null | false | false | import tensorflow as tf
from functools import reduce
from tensorflow.contrib.layers.python.layers import initializers
def conv2d(x,
output_dim,
kernel_size,
stride,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer,
activation_fn=tf.nn.relu,
data_format='NHWC',
padding='VALID',
name='conv2d',
trainable=True):
with tf.variable_scope(name):
if data_format == 'NCHW':
stride = [1, 1, stride[0], stride[1]]
kernel_shape = [kernel_size[0], kernel_size[1], x.get_shape()[1], output_dim]
elif data_format == 'NHWC':
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.get_shape()[-1], output_dim]
w = tf.get_variable('w', kernel_shape,
tf.float32, initializer=weights_initializer, trainable=trainable)
conv = tf.nn.conv2d(x, w, stride, padding, data_format=data_format)
b = tf.get_variable('b', [output_dim],
tf.float32, initializer=biases_initializer, trainable=trainable)
out = tf.nn.bias_add(conv, b, data_format)
if activation_fn != None:
out = activation_fn(out)
return out, w, b
def linear(input_,
output_size,
weights_initializer=initializers.xavier_initializer(),
biases_initializer=tf.zeros_initializer,
activation_fn=None,
trainable=True,
name='linear'):
shape = input_.get_shape().as_list()
if len(shape) > 2:
input_ = tf.reshape(input_, [-1, reduce(lambda x, y: x * y, shape[1:])])
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('w', [shape[1], output_size], tf.float32,
initializer=weights_initializer, trainable=trainable)
b = tf.get_variable('b', [output_size],
initializer=biases_initializer, trainable=trainable)
out = tf.nn.bias_add(tf.matmul(input_, w), b)
if activation_fn != None:
return activation_fn(out), w, b
else:
return out, w, b
def batch_sample(probs, name='batch_sample'):
with tf.variable_scope(name):
uniform = tf.random_uniform(tf.shape(probs), minval=0, maxval=1)
samples = tf.argmax(probs - uniform, dimension=1)
return samples
| UTF-8 | Python | false | false | 2,319 | py | 8 | layers.py | 7 | 0.624838 | 0.611902 | 0 | 66 | 34.136364 | 84 |
ArkadiyLin/Protective-Groups-Reactivity | 8,297,876,839,299 | d3a0ad1431d75378c252563ff35846353ee2558a | 1317c0d8a5f7cd6a9ef5b0375a37c95fe36c0983 | /Smiles_read.py | e97cd00ca1cbfd2ca2f44c0005af8f4d4fef6a4c | []
| no_license | https://github.com/ArkadiyLin/Protective-Groups-Reactivity | 7ea1ec4d47d5f70b570ff79dd403122d7fe2cdda | 1b322c71002f0ceda3c24fba8e3fe0ceee45fa1e | refs/heads/master | 2020-12-24T17:54:51.645122 | 2015-09-04T07:28:03 | 2015-09-04T07:28:03 | 41,902,481 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'arkadii'
import subprocess as sp
def smiles_read(folder, way_smiles_reaction):
f = open(folder + way_smiles_reaction)
SMR = f.readlines()
f.close()
SMILES = []
SMILES_reaction = []
for i in range(len(SMR)):
SMILES.append(SMR[i].split('>>')[0].strip())
out = open(folder + 'query.smarts', 'w')
for i in range(len(SMR)):
SMILES_reaction.append(SMR[i].rstrip('\n'))
SMILES_reaction.append(SMR[i].rstrip('\n').split('>>')[0] + '>>' + SMR[i].rstrip('\n').split('>>')[0])
out.write('\n'.join(SMILES_reaction))
out.close()
sp.call(['molconvert', 'rdf', folder + 'query.smarts', '-o', folder + 'query.mapped.rdf'])
print ('Smiles has been read...')
return SMILES, SMILES_reaction | UTF-8 | Python | false | false | 763 | py | 24 | Smiles_read.py | 23 | 0.585845 | 0.581913 | 0 | 21 | 35.380952 | 110 |
Divi76h/python_first_project | 13,185,549,601,876 | d5988c79dce1d3bb0682a6259101ff119df11558 | 5d99edf0932308b4a87f765e2c9a0797eb721d0a | /gorgiassprial.py | 109cd8be16d0822df4b119882fba551ba0016491 | []
| no_license | https://github.com/Divi76h/python_first_project | 8f141c4bdd8342176e32aa10216cc2c9eb1c57b7 | cf86fd29c06c9b222cfbf5b1898c6df3e0c4ca77 | refs/heads/master | 2021-06-30T22:12:06.370800 | 2020-11-21T11:13:02 | 2020-11-21T11:13:02 | 189,379,998 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
import math
import random
import time
wn = turtle.Screen()
wn.bgcolor('black')
Albert = turtle.Turtle()
Albert.speed(0)
Albert.color('white')
rotate = int(360)
def drawCircles(t, size):
for i in range(10):
t.circle(size)
size = size-4
def drawSpecial(t, size, repeat):
for i in range(repeat):
drawCircles(t, size)
t.right(360/repeat)
drawSpecial(Albert, 100, 10)
Steve = turtle.Turtle()
Steve.speed(0)
Steve.color('yellow')
rotate = int(90)
def drawCircles(t, size):
for i in range(4):
t.circle(size)
size = size-10
def drawSpecial(t, size, repeat):
for i in range(repeat):
drawCircles(t, size)
t.right(360/repeat)
drawSpecial(Steve, 100, 10)
Barry = turtle.Turtle()
Barry.speed(0)
Barry.color('blue')
rotate = int(80)
def drawCircles(t, size):
for i in range(4):
t.circle(size)
size = size-5
def drawSpecial(t, size, repeat):
for i in range(repeat):
drawCircles(t, size)
t.right(360/repeat)
drawSpecial(Barry, 100, 10)
Terry = turtle.Turtle()
Terry.speed(0)
Terry.color('orange')
rotate = int(90)
def drawCircles(t, size):
for i in range(4):
t.circle(size)
size = size-19
def drawSpecial(t, size, repeat):
for i in range(repeat):
drawCircles(t, size)
t.right(360/repeat)
drawSpecial(Terry, 100, 10)
Will = turtle.Turtle()
Will.speed(0)
Will.color('pink')
rotate = int(90)
def drawCircles(t, size):
for i in range(4):
t.circle(size)
size = size-20
def drawSpecial(t, size, repeat):
for i in range(repeat):
drawCircles(t, size)
t.right(360/repeat)
drawSpecial(Will, 100, 10)
| UTF-8 | Python | false | false | 1,718 | py | 81 | gorgiassprial.py | 79 | 0.623981 | 0.583236 | 0 | 101 | 16.009901 | 33 |
ipums/mendeley-python-sdk | 2,791,728,753,841 | c752ac7f7ca5fb161ff5f6b238e08267489d9e23 | 2de5fd54fbf82ce012c6496b1495412b06b8516b | /setup.py | 9a2db7617881bfb794df2853e80d5ebed8d517cb | [
"Apache-2.0"
]
| permissive | https://github.com/ipums/mendeley-python-sdk | 0bc69ab7182ca82645397b067beaef647b40f31a | 0df41fbdafd164d27c42a101f2ed0bba91f3bdfb | refs/heads/master | 2022-12-18T01:12:25.000719 | 2022-12-09T20:19:50 | 2022-12-09T20:19:50 | 94,386,874 | 1 | 1 | Apache-2.0 | true | 2022-12-09T20:19:57 | 2017-06-15T01:25:11 | 2022-11-08T23:45:53 | 2022-12-09T20:19:50 | 445 | 1 | 1 | 0 | Python | false | false | from setuptools import setup
__version__ = None
with open('mendeley/version.py') as f:
exec(f.read())
setup(
name='mendeley',
version=__version__,
packages=['mendeley', 'mendeley.models', 'mendeley.resources'],
url='http://dev.mendeley.com',
license='Apache',
author='Mendeley',
author_email='api@mendeley.com',
description='Python SDK for the Mendeley API',
install_requires=[
'arrow==1.2.3',
'future==0.18.2',
'memoized-property==1.0.3',
'requests==2.28.1',
'requests-oauthlib==1.3.1',
'oauthlib==3.2.2'
],
tests_require=[
'pytest==7.2.0',
'vcrpy==4.2.1'
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| UTF-8 | Python | false | false | 1,150 | py | 88 | setup.py | 80 | 0.573043 | 0.547826 | 0 | 42 | 26.380952 | 70 |
kaust-cs249-2020/reem-alghamdi | 3,770,981,292,220 | b8e184c15c08097ae6b83d70bfcd3dad9b311e37 | 3a6790f8945f64653b7afb31c6df8bdc05f0966c | /ch5/code/ch5_14.py | 29ecc24fc185b26e95542b594a2b6b7187962ceb | []
| no_license | https://github.com/kaust-cs249-2020/reem-alghamdi | bef6dc5f93d24a6541b7182dd6b98c6d2764d6b2 | ebcb329ab86d58fca734e3cb675af4bc4d7b1fbd | refs/heads/master | 2023-02-05T22:50:18.264405 | 2020-12-24T09:18:48 | 2020-12-24T09:18:48 | 291,866,793 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
@BY: Reem Alghamdi
@DATE: 15-10-2020
"""
import numpy as np
def multiple_longest_common_subsequence(v, w, u, sigma=0, scoring_matrix=None, matches=1, mismatches=0):
len_v = len(v)
len_w = len(w)
len_u = len(u)
s = np.zeros((len_v + 1, len_w + 1, len_u + 1), dtype=int)
backtrack = np.zeros((len_v + 1, len_w + 1, len_u + 1), dtype=int)
for i in range(1, len_v + 1):
for j in range(1, len_w + 1):
for k in range(1, len_u + 1):
m = int(scoring_matrix[v[i - 1]][w[j - 1]][u[k - 1]]) if scoring_matrix else matches if v[i - 1] == w[j - 1] == u[k - 1] else - mismatches
options = [s[i-1][j][k]-sigma, s[i][j-1][k]-sigma, s[i][j][k-1]-sigma,
s[i-1][j][k-1], s[i][j-1][k-1]-sigma, s[i-1][j-1][k],
s[i-1][j-1][k-1] + m]
backtrack[i][j][k], s[i][j][k] = max(enumerate(options), key=lambda p: p[1])
score_max = s[-1][-1][-1]
i = len_v
j = len_w
k = len_u
# print(s, backtrack)
while i != 0 and j != 0 and k != 0:
if backtrack[i][j][k] == 0:
i -= 1
w = w[:j] + "-" + w[j:]
u = u[:k] + "-" + u[k:]
elif backtrack[i][j][k] == 1:
j -= 1
v = v[:i] + "-" + v[i:]
u = u[:k] + "-" + u[k:]
elif backtrack[i][j][k] == 2:
k -= 1
v = v[:i] + "-" + v[i:]
w = w[:j] + "-" + w[j:]
elif backtrack[i][j][k] == 3:
i -= 1
k -= 1
w = w[:j] + "-" + w[j:]
elif backtrack[i][j][k] == 4:
j -= 1
k -= 1
v = v[:i] + "-" + v[i:]
elif backtrack[i][j][k] == 5:
i -= 1
j -= 1
u = u[:k] + "-" + u[k:]
else:
i -= 1
j -= 1
k -= 1
for r in range(i, max(i, j, k)):
v = v[:0] + "-" + v[0:]
for r in range(j, max(i, j, k)):
w = w[:0] + "-" + w[0:]
for r in range(k, max(i, j, k)):
u = u[:0] + "-" + u[0:]
return score_max, v, w, u
if __name__ == "__main__":
# print(multiple_longest_common_subsequence("ATATCCG", "TCCGA", "ATGTACTG"))
# print(multiple_longest_common_subsequence("A", "AT", "A"))
# print(multiple_longest_common_subsequence("AAAAT", "CCCCT", "T"))
# print(multiple_longest_common_subsequence("AT", "ACCT", "AGGGGT"))
# print(multiple_longest_common_subsequence("GGAG", "TT", "CCCC"))
# print(multiple_longest_common_subsequence("T", "T", "T"))
s, v, w, u = multiple_longest_common_subsequence("ACGGAGCT", "TGACTCAC", "GGTGGTTCC")
print(s)
print(v)
print(w)
print(u)
| UTF-8 | Python | false | false | 2,712 | py | 121 | ch5_14.py | 91 | 0.425147 | 0.398599 | 0 | 83 | 31.674699 | 154 |
MB-Huang/FIRA-marathon-arrow | 1,821,066,151,837 | 68988e3fc4e418e1a5473e419f237f6f6c6d135f | c9aa0df66677936df592cf75659ece4740c3d448 | /arrow_predict.py | 6b750370507cf4e5a849346847b0d6d642fcabde | []
| no_license | https://github.com/MB-Huang/FIRA-marathon-arrow | 8c6639ce1b61c70133616faa7b2132a4f3e09eac | cd6c03e22f1d7dd3205d83602e8ec1c0cd62aa54 | refs/heads/master | 2020-07-12T03:42:20.182140 | 2019-08-27T13:36:14 | 2019-08-27T13:36:14 | 204,709,181 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import sys
import os
# from PIL import Image
# from PIL import ImageFont
# from PIL import ImageDraw
import os
import csv
import h5py
import numpy as np
import keras
np.random.seed(1337) # for reproducibility
#from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dropout,Input,Dense,Reshape,Permute, Activation, Convolution2D, MaxPooling2D,GRU,Flatten,LSTM,TimeDistributed ,concatenate
from keras.optimizers import Adam,SGD,RMSprop,Adagrad
# from keras import optimizers
from keras.models import load_model
from keras.models import Model
#from keras.utils import plot_model
from keras.utils.vis_utils import plot_model
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
from keras import backend as K
import numpy as np
# import pyttsx
# from gtts import gTTS
import os
# import pygame
# from pygame import mixer # Load the required library
data_augmentation=True
num_classes = 4
index=0
width=220
height=220
num_channels=1#3
num_pic = 1703
X_Pre=np.zeros((1,height,width,num_channels))
X_Train=np.zeros((num_pic,height,width,num_channels))
Y_Train=np.zeros((num_pic,num_classes))
#numberof camera:0 1 ,2,lsusb
cap = cv2.VideoCapture(1)
cap.set(3,640)
cap.set(4,480)
# time.sleep(2) #need this step
cap.set(15, -8.0)
#160.0 x 120.0
#176.0 x 144.0
#320.0 x 240.0
#352.0 x 288.0
#640.0 x 480.0
#1024.0 x 768.0
#1280.0 x 1024.0
index=0
# size = (int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
# int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
# fourcc = cv2.cv.FOURCC(*'CVID')
# out = cv2.VideoWriter(filePath, fourcc, fps, size)
model = load_model('res_model_0811_20.h5')#testmodel
# model = load_model('proposedmodelII.h5')#
# model = load_model('proposedmodel.h5')#
result=np.zeros(4)
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
x=100
y=20
w=440
h=440
res = frame[y:y+h, x:x+w]
res=cv2.resize(res,(width,height))
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
X_Pre[0,:,:,0] = np.array(gray)
X_Pre/=255
y_proba=model.predict(X_Pre,64,0)
y_max_pro=y_proba.max(axis=-1)
y_result=y_proba.argmax(axis=-1)
# cv2.putText(frame,printstring,(10,30), font, 1,(0,0,0),1,cv2.LINE_AA)
cv2.namedWindow('camera') # Create a named window
# cv2.moveWindow('camera', 0,300)
#
if y_result ==0:
printstring='left'
elif y_result ==1:
printstring='straight'
elif y_result ==2:
printstring='right'
else:
printstring='none'
# printstring+= str(y_max_pro)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(frame,printstring,(100,20), font, 0.8,(255,150,0),1,cv2.LINE_AA)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),1)
cv2.imshow('camera',frame)
# print cap.get(cv2.CAP_PROP_FPS)
# out.write(gray)
# print y_result
print y_proba
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
# out.release()
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 3,217 | py | 6 | arrow_predict.py | 6 | 0.646876 | 0.587504 | 0 | 119 | 26.033613 | 147 |
kusoof/wprof | 18,769,007,107,959 | 1a95d2c01f20a1007fec1b368d776f81a16163c8 | 8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6 | /WProf/build/masters/master.client.dart/.svn/text-base/slaves.cfg.svn-base | ac8f692ed6e07023bb640605177b99a49647b519 | []
| no_license | https://github.com/kusoof/wprof | ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e | 8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8 | refs/heads/master | 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- python -*-
# ex: set syntax=python:
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# See master.experimental/slaves.cfg for documentation.
slaves = [
# MAC
{
'master': 'Dart',
'builder': ['vm-mac-debug'],
'hostname': 'vm602-m3',
'os': 'mac',
'version': '10.6',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['vm-mac-release'],
'hostname': 'vm603-m3',
'os': 'mac',
'version': '10.6',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-mac-inc'],
'hostname': 'vm604-m3',
'os': 'mac',
'version': '10.6',
'bits': '64'
},
{
'master': 'Dart',
'builder': ['dartium-mac-full'],
'hostname': 'vm605-m3',
'os': 'mac',
'version': '10.6',
'bits': '64'
},
{
'master': 'Dart',
'builder': ['dartium-mac-debug'],
'hostname': 'vm606-m3',
'os': 'mac',
'version': '10.6',
'bits': '64'
},
{
'master': 'Dart',
'builder': ['web-chrome-mac'],
'hostname': 'vm607-m3',
'os': 'mac',
'version': '10.6',
'bits': '64'
},
{
'master': 'Dart',
'builder': ['web-safari-mac'],
'hostname': 'vm608-m3',
'os': 'mac',
'version': '10.6',
'bits': '64',
},
# LINUX
{
'master': 'Dart',
'builder': ['vm-linux-debug'],
'hostname': 'vm92-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['vm-linux-release'],
'hostname': 'vm93-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['vm-win32-debug'],
'hostname': 'vm146-m3',
'os': 'win',
'version': 'win7',
'bits': '32',
},
{
'master': 'Dart',
'builder': ['vm-win32-release'],
'hostname': 'vm147-m3',
'os': 'win',
'version': 'win7',
'bits': '32',
},
{
'master': 'Dart',
'builder': ['dartc-linux-debug'],
'hostname': 'vm26-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartc-linux-release'],
'hostname': 'vm27-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart-editor'],
'hostname': 'vm5-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart-editor-win'],
'hostname': 'vm56-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart-editor-mac'],
'hostname': 'vm609-m3',
'os': 'mac',
'version': '10.6',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart-editor-linux'],
'hostname': 'vm54-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-1-4'],
'hostname': 'vm188-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-2-4'],
'hostname': 'vm160-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-3-4'],
'hostname': 'vm161-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-4-4'],
'hostname': 'vm162-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-checked-1-4'],
'hostname': 'vm194-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-checked-2-4'],
'hostname': 'vm164-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-checked-3-4'],
'hostname': 'vm165-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dart2js-linux-release-checked-4-4'],
'hostname': 'vm167-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-lucid64-inc'],
'hostname': 'vm129-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-lucid64-full'],
'hostname': 'vm130-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-lucid64-debug'],
'hostname': 'vm67-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-ff-linux'],
'hostname': 'vm16-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-chrome-linux'],
'hostname': 'vm17-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-opera-linux'],
'hostname': 'vm18-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
# Windows
{
'master': 'Dart',
'builder': ['dartium-win-full'],
'hostname': 'vm139-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-win-inc'],
'hostname': 'vm77-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['dartium-win-debug'],
'hostname': 'vm112-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-ff-win7'],
'hostname': 'vm148-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-chrome-win7'],
'hostname': 'vm192-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-ie-win7-1-2'],
'hostname': 'vm128-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
{
'master': 'Dart',
'builder': ['web-ie-win7-2-2'],
'hostname': 'vm195-m3',
'os': 'win',
'version': 'win7',
'bits': '64',
},
# Trunk
{
'master': 'Dart',
'builder': ['vm-linux-trunk', 'dartium-lucid64-full-trunk'],
'hostname': 'vm83-m3',
'os': 'linux',
'version': 'lucid',
'bits': '64',
},
]
| UTF-8 | Python | false | false | 6,490 | 687 | slaves.cfg.svn-base | 346 | 0.462096 | 0.415562 | 0 | 331 | 18.607251 | 72 |
|
sig49san/kyopro | 8,881,992,391,542 | b3d558f7563d1e0b0cad2fe1451d549a4fe75760 | b98d8899e99696247f2742302eb2501fe5f6a33d | /forWaterBlue/H.py | a1d1c018aa4bfb34b73fca1f7ab55c4052c43dfe | []
| no_license | https://github.com/sig49san/kyopro | bbc36dd082897e48ab5ab9d0854e28d13eb6c6f8 | ea49864b055d3ab996d3841392d5d8c6b1532075 | refs/heads/main | 2023-03-03T06:16:58.597057 | 2021-02-16T12:40:27 | 2021-02-16T12:40:27 | 339,395,608 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
N,M,K = list(map(int, input().split()))
As = list(map(int, input().split()))
Bs = list(map(int, input().split()))
Asdq = deque(As)
Bsdq = deque(Bs)
Asdq.append(10**10)
Bsdq.append(10**10)
#print(Asdq)
#print(Bsdq)
ans = 0
time = 0
A = Asdq.popleft()
B = Bsdq.popleft()
while True:
# print(A)
# print(B)
if(A == 10 ** 10 and B == 10 ** 10):
print(ans)
break
if(A >= B):
time += B
B = Bsdq.popleft()
else:
time += A
A = Asdq.popleft()
if(K >= time):
ans += 1
else:
print(ans)
break
| UTF-8 | Python | false | false | 636 | py | 79 | H.py | 74 | 0.492138 | 0.462264 | 0 | 42 | 14.119048 | 40 |
sonyjagatap/MedhaTraining | 10,299,331,597,771 | 7ffc17b8ef2a798b53b4be51ed6b1dfb02b44bbb | ce8616ced58327a0f572a265d46ada5489939e56 | /shoba/pythonscripts/openbrowser.py | ccff5666e95f561e8e72fc855b6b5d2ba0a99259 | []
| no_license | https://github.com/sonyjagatap/MedhaTraining | 503a2f47dac8a71607724e9c1c20258d3921446d | 4e13a14f3d5463365ad03f1dcc5c5f74a4223539 | refs/heads/master | 2020-12-30T18:03:39.371072 | 2015-09-30T06:41:48 | 2015-09-30T06:41:48 | 28,310,297 | 0 | 0 | null | true | 2014-12-21T19:41:36 | 2014-12-21T19:26:29 | 2014-12-21T19:26:29 | 2014-12-21T19:41:36 | 2,054 | 0 | 0 | 0 | Python | null | null | import selenium
from selenium import webdriver
br=webdriver.Firefox()
br.get("https://google.com")
| UTF-8 | Python | false | false | 100 | py | 135 | openbrowser.py | 110 | 0.78 | 0.78 | 0 | 5 | 19 | 30 |
vassilux/ramses_proxy | 17,678,085,392,874 | c05518a42ac26b6ab804a33c3f9bb69a0776fea3 | 2e72926c888730d69d06c289b97b63d529c68819 | /tests/scripts/twistedUdpClient.py | b304918d5fbf971b5d008b0a1be057971d1d04bf | []
| no_license | https://github.com/vassilux/ramses_proxy | eee9076974a5ef17ccaf72a182ce8c0e1c67e33c | 6cfc9781d0e0cfdd4036c31c7e9c3a7e7d2d302c | refs/heads/master | 2020-05-17T10:49:52.456223 | 2014-09-09T09:19:32 | 2014-09-09T09:19:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Emulator for proxy laod test
#
#
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.task import LoopingCall
import sys
from datetime import datetime
import os
import string
import random
host = "192.168.1.3"
port = 2036
my_id="1"
def id_generator(size=7, chars=string.ascii_uppercase + string.digits):
callref = '@'
generated = ''.join(random.choice(chars) for x in range(size))
callref += generated
return callref
class UdpClient(DatagramProtocol):
def startProtocol(self):
self.transport.connect(host, port)
def doPolling(self):
polling = "polling from %s at %s\n" %(my_id, datetime.now())
log.msg("Send polling : [%s]" % (polling))
self.transport.write(polling)
def doAlarm(self):
alarm = "alarm from %s at %s\n" %(my_id,datetime.now())
log.msg("Send alarm : [%s]" % (alarm))
self.transport.write(alarm)
def datagramReceived(self, data, (host, port)):
log.msg("Receive data from the server : [%r]." % (data))
if __name__ == "__main__":
log.startLogging(sys.stdout)
my_id=id_generator()
udpclient = UdpClient()
reactor.listenUDP(0, udpclient)
#reactor.callWhenRunning(udpclient.doPolling)
lcPolling = LoopingCall(udpclient.doPolling)
lcPolling.start(1.0)
#
lcAlarm = LoopingCall(udpclient.doAlarm)
lcAlarm.start(5.0)
reactor.run()
| UTF-8 | Python | false | false | 1,409 | py | 44 | twistedUdpClient.py | 27 | 0.701916 | 0.688432 | 0 | 53 | 25.566038 | 71 |
ShivaPariyar/exc | 6,940,667,185,512 | f2e47505158fa80a0de799d0a6e9999becc43bc6 | a4eee56e749520e44c0adfc5fcda087699c09e06 | /Pycharm Sample Project/check/Random.py | 0f86fac30de7594b30df8ba75a6b19526f4a31ad | []
| no_license | https://github.com/ShivaPariyar/exc | e5e681be95f52e77845d8864793361d092f66224 | 7d232d50fdac2a2ba6d93541609eb485ef7a4aa7 | refs/heads/master | 2022-11-20T21:37:03.784189 | 2020-07-27T17:04:51 | 2020-07-27T17:04:51 | 282,962,445 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a_list = ["hero", "alian", "plane", "sky"]
dict = {}
def remove_duplicates(a_list):
for list in a_list:
for i in list:
if i == [a-zA-Z]:
print( remove_duplicates(a_list)) | UTF-8 | Python | false | false | 200 | py | 10 | Random.py | 10 | 0.535 | 0.535 | 0 | 9 | 21.333333 | 42 |
zhankq/pythonlearn | 10,110,353,052,485 | 961f467808dcf7cbf724dd7c18e725a0f21505c6 | b4afd14e3b4e9cff0a99906a69587e348b243aeb | /Hello.py | 18ce3c6be3d1da446440085a229bc58c2b18396c | []
| no_license | https://github.com/zhankq/pythonlearn | d694df23826cda6ba662e852e531e96a10ab2092 | cb714fbb8257193029f958e73e0f9bd6a68d77f1 | refs/heads/master | 2021-12-16T13:51:23.381206 | 2021-12-03T01:13:36 | 2021-12-03T01:13:36 | 205,632,135 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
import time
import urllib3
urllib3.disable_warnings()
def ios12_get_rank(keyword,appid):
st = time.time()
cookies = {
'wosid-lite': 'udbGWDV4NWTsCS8xPuGVcg',
'pldfltcid': '37d90499766645efaeffab0e31f26247055',
'ndcd': 'wc1.1.w-855182.1.2.YYTq2gydU37Bp1LD-JCstg%%2C%%2C.4LmgZ5CbRibf6Xj5DaXo9kIAD9o-ZNyXTSkIyF-ltVHOWvAHIipI0NtN9dgxgLE7CBe_ixKwC1M2L0c6TT3Vjjl3uKiL0bmkij-b5CNKHZ4IPrBKs0JHsxigWuYzLlwQoHUarFtPqqWmoyP3IXQD-08EMPbyKziAbMVdCpjKSM0%%2C',
'mz_at_ssl-16322342170': 'AwUAAAF4AAHVJgAAAABdCd/KNn5+rDEOUw2/ePGa9f9ywrHJQgU=',
'mz_at0-16322342170': 'AwQAAAF4AAHVJgAAAABdCd/KJQiA/YPuJHq8TXY2f1CYWDYGkFM=',
'itspod': '55',
'xp_ci': '3zhkROTz6MHz4S5zCwOzXiPqEDD4',
'xp_abc': 'tctfLBP2',
'xt-b-ts-16322342170': '1560928202008',
'X-Dsid': '16322342170',
'vrep': 'CJrajOc8EgQIAhAAEgQIARAAEgQIBBAAEgQIAxAAEgQIBRAAEgQIBhAAEgQICRAAEgQIBxAAEgQICBAAEgQIChAA',
'xt-src': 'b',
'xp_ab': '1#NyCxKBD+471+qnaQUdu0#isj11bm+9100+tctfLBP2',
}
headers = {
'Host': 'api-edge.apps.apple.com',
'User-Agent': 'AppStore/3.0 iOS/12.1.2 model/iPhone8,1 hwp/s8000 build/16C101 (6; dt:120) AMS/1',
'X-Apple-Store-Front': '143441-1,29 t:apps3',
'X-Apple-iAd-Request-Data': 'AAAAHAAAAV0K1gIJAACAx7ZE10ESEmNvbS5hcHBsZS5BcHBTdG9yZRoJaVBob25lOCwxJQAAAEEoAjIBMTodVmVyc2lvbiAxMi4xLjIgKEJ1aWxkIDE2QzEwMSlCCzE0MzQ0MS0xLDI5SgV6aF9DTlIOemhfSGFucy1QaW55aW5SDnpoX0hhbnMtUGlueWluUgVlbW9qaVIFZW5fVVNYAWAAaAGoAQCyASRCODI1RkUyNi1GODNFLTQ1OTgtOTJBOS02NEE0M0I0QjE1QkO6ASQwRDExMzBCOC05N0E0LTQxNEYtQkE1RC04QTI5N0Q3RUYyRTHKASlEUElELTZBMTZBNTkxLTQ5RDktNDlGNS1BQ0UxLTczQTRFNTk5RURBRfABAPoBJDU3OUZFRjcxLUE3MTAtNDEyRS1CREZELTFEN0FFQTJGRDU5M5ICAjAxmgIDNDYwqAICsgICZW64AgEQABgA',
'X-DSID': '16322342170',
'X-Apple-Tz': '28800',
'X-Apple-iAd-Env-Name': 'AAAAAAAAAEQKAlNTEjlodHRwczovL3RyLmlhZHNkay5hcHBsZS5jb20vYWRzZXJ2ZXIvMi42L3Nwb25zb3JlZC9zZWFyY2gaAzIuNg==',
'X-Apple-Client-Application': 'com.apple.AppStore',
'X-Apple-I-TimeZone': 'GMT+8',
'Connection': 'keep-alive',
'X-Apple-I-Client-Time': '2019-06-26T02:40:30Z',
'X-Apple-App-Store-Client-Request-Id': '5B28EEB8-1C6B-485C-ACDE-CE607E0D61A8',
'Authorization': 'Bearer eyJraWQiOiJGNDdEWk4xOEYwIiwiYWxnIjoiRVMyNTYifQ.eyJpc3MiOiJBUzI4UjdHMTdNIiwiaWF0IjoxNTYwNDkyODgyLCJleHAiOjE1NjMwODQ4ODJ9.vPl82SQDbs0o8TFBoEkvjSk3TzDdh5npaQlP2mQspVPv2qYOHscV-jun8o4PZmMSpwanmovCDCFf3bHHlBPZFQ',
'Accept-Language': 'zh-Hans-CN',
'X-Apple-I-MD-RINFO': '17106176',
'X-Apple-ADSID': '000651-08-e8e477c4-1d62-41e0-9236-a84ae8fa2803',
'Accept': '*/*',
'Accept-Encoding': 'br, gzip, deflate',
'X-Apple-I-MD-M': 'wpgb7FOqeEG2INWFLKKHRfSnUCAw15zJcIk8P8OLKR+Tp/1PV+VKMUDkOcTmcAo7dT835g/usrX+EztS',
'X-Apple-I-Locale': 'zh_CN',
'X-Apple-I-MD': 'AAAABQAAABBFuvbzMu+lvc0EVeOntirfAAAAAw==',
}
# proxies = {
# "https": "https://129.28.149.223:16819",
# }
# proxy = '129.28.149.223:16819'
# list = []
# proxies = {
# 'http': 'http://' + proxy,
# 'https': 'https://' + proxy,
# }
response = requests.get('https://api-edge.apps.apple.com/v1/catalog/cn/search?platform=iphone&extend=editorialBadgeInfo,messagesScreenshots,minimumOSVersion,requiredCapabilities,screenshotsByType,supportsFunCamera,videoPreviewsByType&include=apps,top-apps&bubble[search]=apps,developers,groupings,editorial-items,app-bundles,in-apps&term={}&l=zh-Hans-CN'.format(keyword), headers=headers, cookies=cookies, verify=False)
print(response.status_code)
print(response.text)
print(type(json.loads(response.text)))
print(json.loads(response.text)["results"]["search"]["data"])
allapp = json.loads(response.text)["results"]["search"]["data"]
app_id_list =[]
for i in allapp:
if i["type"]=="apps":
app_id_list.append(i["id"])
apprank = 0
try:
apprank = app_id_list.index(appid)+1
except:
apprank = 0
et = time.time()
print("间隔时间",et-st)
return apprank
if __name__ == '__main__':
rank = ios12_get_rank("卡牌游戏","1356040265")
print(rank)
'''
import time
import requests
import urllib3
urllib3.disable_warnings()
def get(keyword):
headers = {'x-apple-store-front': '143465-19,29 t:apps3','authorization': 'Bearer eyJraWQiOiJGNDdEWk4xOEYwIiwiYWxnIjoiRVMyNTYifQ.eyJpc3MiOiJBUzI4UjdHMTdNIiwiaWF0IjoxNTY3NTc4Mzc2LCJleHAiOjE1NzAxNzAzNzZ9.a9TkggDjSFDJ9YH7gzKsxA0Q2N2GMH8RK-2ZG1uVtOw9j1asG1ujMvH-uKXDynTBP3roNR6zM6i685BC38kITA'}
params = (('platform', 'iphone'),('extend', 'editorialBadgeInfo,messagesScreenshots,minimumOSVersion,requiredCapabilities,screenshotsByType,supportsFunCamera,videoPreviewsByType'),('include', 'apps,top-apps'),('bubble[search]', 'apps,developers,groupings,editorial-items,app-bundles,in-apps'),('term', '{}'.format(keyword)),('l', 'zh-Hans-CN'))
response = requests.get('https://api-edge.apps.apple.com/v1/catalog/cn/search', headers=headers, params=params,verify=False)
print(response.text)
get("快手")
'''
| UTF-8 | Python | false | false | 5,172 | py | 381 | Hello.py | 319 | 0.720303 | 0.62927 | 0 | 95 | 53.231579 | 515 |
willwng/QTClock | 2,422,361,603,063 | 67e60fb5776407b2a156df6300d78e7bb9012ff2 | 8b910815483e77b9289e38c5367883bd41d0c7da | /Old Versions/Clock.py | 2077471c59712fcff52ada0af445bf372b397cc4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/willwng/QTClock | 46482871c0ae93f8319166f94fc2d51fa804d229 | dba3862c38b7c91f20efc075f5294981ceb6aa87 | refs/heads/master | 2022-06-25T06:08:16.737805 | 2017-04-10T23:42:09 | 2017-04-10T23:42:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##Plotting with Turtle##
from turtle import *
import turtle
import random
import numpy
import math
from math import pi
import numpy as np
import csv
from datetime import datetime
import matplotlib.pyplot as plt
import os
import Tkinter
from Tkinter import *
from Tkinter import Tk
from tkFileDialog import askopenfilename
import tkMessageBox
import subprocess
#Turtle Prog
Lines = []
Connections = []
lineColors = []
lineNames = []
startingTimes = []
found = False
csv_location = ""
filename = ""
date_format = ""
def parseConcentrations(data, tformat):
conX = data[0]
conY = data[1]
timePlacements = []
cons = []
#Split into X and Y/
conC = 0
while(conC < len(conX)):
currentTime = conX[conC]
currentTime = currentTime.translate(None,':')
currentTime = datetime.strptime(currentTime,tformat)
timePlacements.append(360 * (float(currentTime.hour) / 24.0))
#print(timePlacements[conC])
conC += 1
cscounter, setCounter, thisCon, previousInterpolate, cic = 0, 0, 0, 0 ,0
nextCon, previousCon = conY[0], conY[0]
while(cscounter < 360):
if(cscounter == timePlacements[setCounter]) & (setCounter < len(timePlacements)):
cons.append(conY[setCounter])
thisCon = conY[setCounter]
previousInterpolate = thisCon
previousCon = conY[setCounter]
if(setCounter < len(timePlacements) - 1):
setCounter += 1
cic = timePlacements[setCounter] - timePlacements[setCounter - 1]
nextCon = conY[setCounter]
else:
thisCon = float(previousInterpolate) - ((float(previousCon) - float(nextCon)) / float(cic))
previousInterpolate = thisCon
cons.append(thisCon)
cscounter += 1
print(cons)
print(len(cons))
return(cons)
#RETURN A 360 length array of concentrations smoothed
def readData(fileName, header = True):
x = []
y = []
with open(fileName,'r') as csvfile:
reader = csv.reader(csvfile,delimiter = ',')
for row in reader:
x.append(row[0])
y.append(row[1])
if(header == True):
x = x[1:]
y = y[1:]
rData = [x,y]
return(rData)
def parseData(data,dateFormat,length,smoothing = 1, yScale = 1000):
dataX = data[0]
dataY = data[1]
#Get the first day
startingTimes.append(dataX[0])
firstTime = 0
firstDay = 0
percentage = 0
#================#
firstSample = dataX[0]
firstSample = firstSample.translate(None,':')
firstSample = firstSample[:length]
firstSample = datetime.strptime(firstSample, dateFormat)
firstTime = firstSample.time()
firstDay = firstSample.day
#================#
##Making new data set
newX = []
newY = []
dataCounter = 0
while(dataCounter < len(dataX)):
percentage = float(dataCounter) / float(len(dataX))
percentage = percentage * 100
# print("%%%1.2f parsing.. "%percentage)
#convert the x
currentX = dataX[dataCounter]
currentY = dataY[dataCounter]
#Filter the current x to a datetime
currentX = currentX.translate(None,':')
currentX = currentX[:length]
currentX = datetime.strptime(currentX, dateFormat)
if(currentX.day == firstDay) | (firstTime < currentX.time()):
#Convert to 'weird' time
currentX = str(currentX.time())
currentX = currentX.translate(None,':')
currentX = float(currentX)
currentY = int(1000 * float(currentY))
newX.append(currentX)
newY.append(currentY)
dataCounter += 1
newData = [newX,newY]
if(smoothing > 1):
print("Smoothing data.. ")
smoothCounter = 0
smoothX = []
smoothY = []
while(smoothCounter < (len(newX) / smoothing)):
indexT = 0
indexF = smoothCounter * smoothing
indexT = indexF + smoothing
smoothedX = numpy.mean(newX[indexF:indexT])
smoothedY = numpy.mean(newY[indexF:indexT])
smoothX.append(smoothedX)
smoothY.append(smoothedY)
smoothCounter += 1
smoothedData = [smoothX,smoothY]
return(smoothedData)
else:
return(newData)
#return(newData)
#Parse x , y data (qtcb, etc..) into readable time and qtcb
def everything():
xcounter1 = 0
xcounter2 = 0
xLength1 = random.randint(2,1000)
xLength2 = random.randint(2,1000)
xList = []
xList2 = []
yList = []
yList2 = []
while(xcounter1 < xLength1):
xVal = xcounter1
yVal = random.randint(0,100)
xList.append(xVal)
yList.append(yVal)
xcounter1 += 1
while(xcounter2 < xLength2):
xVal = xcounter1
yVal = random.randint(0,100)
xList2.append(xVal)
yList2.append(yVal)
xcounter2 += 1
print("Retrieving Data...")
theDat = readData(csv_location)
print("Data Received")
print("Parsing Data...")
theDatx = theDat[0]
theDaty = theDat[1]
newDAT = [theDatx[0:1000],theDaty[0:1000]]
someDat = parseData(theDat,date_format,17,smoothing = 1000,yScale = 1000)
first_x = theDatx[1]
blaa = len(theDatx)
ax = plt.subplot(111, polar=True)
first_x = first_x[11:16]
hours_x = float(first_x[:2])
minutes_x = first_x[-2:]
minutes_x = (float(minutes_x))/60
final_x = hours_x + minutes_x
equals = np.linspace(0, 360, blaa, endpoint=False) #np.arange(24)
ones = np.ones(blaa)
plt.plot(np.deg2rad(equals), theDaty)
# Set the circumference labels
ax.set_xticks(np.linspace(0, 2*np.pi, 24, endpoint=False))
ax.set_xticklabels(range(24))
ax.set_yticks((0.300,0.350,0.400,0.450,0.500,0.550,0.600))
ax.set_ylim([0.3,0.6])
ax.set_yticklabels(('300','350','400','450','500','550','600'))
# Make the labels go clockwise
ax.set_theta_direction(-1)
# Place 0 at the top
ax.set_theta_offset(np.pi/2.0)
plt.show()
top = Tkinter.Tk()
top.resizable(width=False, height=False)
top.geometry('{}x{}'.format(500, 500))
def findCSV():
#subprocess.Popen(r'explorer /select,"C:\path\of\folder\file"')
global filename
global found
global csv_location
filename = askopenfilename()
if(filename ==''):
tkMessageBox.showinfo("Error", "Please Specify File")
csv_location = str(filename)
found = True
print(csv_location)
def finished():
global date_format
date_format = E1.get()
if(date_format == ''):
date_format = '%Y-%m-%dT%H%M%S'
if(date_format == 'Date Format'):
date_format = '%Y-%m-%dT%H%M%S'
if(found == True):
everything()
else:
tkMessageBox.showinfo("Error", "Please Specify File")
L1 = Label(top, text="(Leave Blank for Default)")
E1 = Entry(top, bd =5)
E1.insert(0, 'Date Format')
Find = Tkinter.Button(top, text ="Find CSV", command = findCSV)
Done = Tkinter.Button(top, text ="Done", command = finished)
w = Tkinter.Label(top, text="QTClock",font=("Helvetica", 30))
w2 = Tkinter.Label(top, text="By Will Wang and Rowan McNitt", font=("Helvetica", 8))
# Place Everything on Canvas Window
L1.place(relx=0.3625,rely=0.45)
E1.place(relx=0.3625,rely=0.4)
Find.place(relx=0.25,rely=0.4)
Done.place(relx=0.625,rely=0.4)
w.place(relx=0.35,rely=0.1)
w2.place(relx=0.35,rely=0.2)
top.mainloop()
| UTF-8 | Python | false | false | 7,476 | py | 10 | Clock.py | 4 | 0.607678 | 0.573569 | 0 | 239 | 30.280335 | 103 |
KissBalazs/dipterv | 1,949,915,153,838 | e1cd2bdcca44c38e43da7f2ff55053217fe34aeb | 58aafe46b67bee59b078adffbec49915f9fdb0e9 | /LabelerApp/editor/topicmodeller/topicmodellerServices.py | 9ba5815c5fd1d54788fb192bc9f0e5e4ec01f8e4 | []
| no_license | https://github.com/KissBalazs/dipterv | 3cc410d7678378e9c94394692bf438bfa064b3e7 | ba88bb1099f4ac3ef9fd186d72029c1a24dbcde1 | refs/heads/master | 2021-01-19T13:27:10.793596 | 2017-05-12T06:59:14 | 2017-05-12T06:59:14 | 82,391,198 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'forestg'
from gensim import corpora, models, similarities
import os.path
import pickle
import sys
# logging bekapcsolása:
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def yieldLabels(article,label):
stopwordlist = (u'a az egy be ki le fel meg el át rá ide oda szét össze vissza de hát és vagy hogy van lesz volt csak'+
u'nem igen mint én te õ mi ti õk ön ide volt ő ők ahogy ahol aki akik akkor alatt által általában '+
u'amely amelyek amelyekben amelyeket amelyet amelynek ami amit amolyan amíg amikor abban ahhoz annak '+
u'arra arról azok azon azt azzal azért aztán azután azonban bár belül benne cikk cikkek cikkeket e '+
u'eddig egész egyes egyetlen egyéb egyik egyre ekkor elég ellen elő először előtt első éppen ebben '+
u'ehhez emilyen ennek erre ez ezt ezek ezen ezzel ezért felé hanem hiszen hogyan így illetve ill. ill '+
u'ilyen ilyenkor ismét itt jó jól jobban kell kellett keresztül keressünk kívül között közül legalább '+
u'lehet lehetett legyen lenne lenni lett maga magát majd már más másik még mellett mert mely melyek '+
u'mit míg miért milyen mikor minden mindent mindenki mindig mintha mivel most nagy nagyobb nagyon ne '+
u'néha nekem neki néhány nélkül nincs olyan ott őket pedig persze s saját sem semmi sok sokat sokkal '+
u'számára szemben szerint szinte talán tehát teljes tovább továbbá több úgy ugyanis új újabb újra után '+
u'utána utolsó vagyis valaki valami valamint való vagyok vannak voltam voltak voltunk vele viszont '+
u'volna alá ha is ad szerző helyett amúgy főleg os es szerintem oka hozzászólás soha hozzászólások '+
u'száma kategória feladva komment szia hello üdv')
#
# dictionaryPath = 'documents/dictionary' # -- I do not use this at the moment for anything.
# corpusPath = "documents/corpus" # -- I do not use this at the moment for anything.
# documentsPath = 'documents/labelerAppDocuments'
# labelsPath = 'documents/labelerAppLabels'
dir = os.path.dirname(__file__)
dictionaryPath = os.path.join(dir, '/documents/dictionary') # -- I do not use this at the moment for anything.
corpusPath = os.path.join(dir, 'documents/corpus') # -- I do not use this at the moment for anything.
documentsPath = os.path.join(dir, 'documents/labelerAppDocuments')
labelsPath = os.path.join(dir, 'documents/labelerAppLabels')
documents_list = []
labels_list = []
print("Initializing... given strings:")
print(article)
print(label)
print("IO actions...")
#1. Dokumentumok megnyitása.
if (os.path.isfile(documentsPath)):
print("Opening stored documents list...")
with open(labelsPath,'rb') as f:
labels_list = pickle.load(f)
with open(documentsPath, 'rb') as f:
documents_list = pickle.load(f)
print(documents_list)
else:
print("Creating new documents list fie...")
documents_list.append("dummy article")
labels_list.append("dummy labels")
print(documents_list)
#2. Tokenize and get rid of stop words.
print("--tokenized stopword-filtered:")
tokenizalt = [[word for word in document.lower().split() if word not in stopwordlist]
for document in documents_list]
print(tokenizalt)
#2.5 TODO - insert filtering words that only occures only once. Do I need it?
#3. create dictionary.
dictionary = corpora.Dictionary(tokenizalt)
# dictionary.save(dictionaryPath)
print("-- Dictionary:")
print(dictionary)
print(dictionary.token2id)
#4. transform it to vector space
print("--The corpus transformed:")
corpus = [dictionary.doc2bow(text) for text in tokenizalt]
# corpora.MmCorpus.serialize(corpusPath, corpus) # store to disk, for later use
print(corpus)
#5. Generate TFIDF model - don't need it now
# tfidf = models.TfidfModel(corpus)
#6 generate 2 dimensional space
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)
#7. Now work on the given text, to find the closes neighbor.
print("--paramter text:")
print(article)
vec_bow = dictionary.doc2bow(article.lower().split())
print("in vec_bow:")
print(vec_bow)
vec_lsi = lsi[vec_bow] # convert the query to LSI space
print("in lsi_vec:")
print(vec_lsi)
#8 generate similarities index
index = similarities.MatrixSimilarity(lsi[corpus])
#9. compare the two documents.
sims = index[vec_lsi] # perform a similarity query against the corpus
print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples
print("----")
# eredmény értelmezése: -1 és 1 között a dokumentumok közül melyik mennyire volt hasonló hozzá.
#10. get it sorted by the similarity score
sims = sorted(enumerate(sims), key=lambda item: -item[1])
print("--- Similarities:")
print(sims)
labels = label
#labels += " " + documents_list[sims[0][0]] # to get the documents instead (for debug)
newLabel = labels_list[sims[0][0]]
labels += u" " + newLabel # this gets the number of the highest scored document
#11. Storing new datas on the disk.
documents_list.append(article)
labels_list.append(label)
with open(documentsPath, 'wb') as f:
pickle.dump(documents_list, f)
with open(labelsPath, 'wb') as f:
pickle.dump(labels_list, f)
print("Document list stored.");
print("-------------------------------------------------------Return value:")
return labels
# TODO: ezeket ne hagyd benne mert lefut.
# reload(sys)
# sys.setdefaultencoding('utf8')
# Néhány dummy data próbálgatásnak. Elvilkeg működik
# print (yieldLabels("Ablak Rogán Rogln azasdasdasd Wikc","ElsőDefLabel"))
# print (yieldLabels("Orbán Viktor együtt focizott a Felcsúti kedves bácsikkal, Fidesz pólóban","MásodikDefLabel"))
# print (yieldLabels("Rogán Antal szerint a Fidesz legfőbb feladata, hogy foci stadionokat építsen","HarmadikDefLabel"))
# print (yieldLabels("Senki nem szereti már a facebookot - állapították meg angol kutatók","NegyedikDefLabel"))
# print (yieldLabels( "Bill Clinton újra indul fidesz az Ameriakai elnökválasztásért meg Fidesz.","ÖtödikDefLabel"))
| UTF-8 | Python | false | false | 6,564 | py | 93 | topicmodellerServices.py | 25 | 0.68061 | 0.675789 | 0 | 142 | 44.28169 | 123 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.