repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
gmsardane/Gens_Python_Codes | 16,106,127,374,353 | 631367a6706eb2b86549f62adf6c0d8fbb319973 | 9925fc7003da8282374cf31e689968b5a1e9ebd4 | /dist.py | 82861798db1ddbe53aa8692c017159f97b5e1664 | []
| no_license | https://github.com/gmsardane/Gens_Python_Codes | 8e88e9c69f692962e34d62b4a9f3ce4a8937934b | 5c88dc054e0695469bf1224b74abc8d7f920c420 | refs/heads/master | 2021-06-29T16:47:30.335005 | 2016-09-12T03:55:16 | 2016-09-12T03:55:16 | 29,333,860 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __all__ = ["d2NdWdz"]
import numpy as np
import math
#def d2NdWdz(data, p):
## Norm:
# Nweak = p[1] * (1.0 + np.array(data[:,0]))**(p[3]) #as a function of z: Here p[1] = Nweak at z=0
## Strong component
# argstr = (1.0 + np.array(data[:,0]))**(-p[6]) / p[2]
# #Nstr = (1.0-p[1]) (1.0 + np.array(data[:,0]))**(p[5])
# term1 = (1.0 - p[1]) * (1.0 + np.array(data[:,0]))**(-p[5]) * np.exp(-data[:,1] * argstr) * argstr
# # Weak component
# argwk = (1.0 + np.array(data[:,0]))**(-p[4]) / p[0]
# term2 = Nweak * np.exp(-data[:,1] * argwk ) * argwk
# return np.array(term1 + term2)
#p = [Wwk, Nratio, Wstr, alp_wk, beta_wk, alpha_str, beta_str]
def d2NdWdz(data, p):
# Strong component
# Here p[5] is alphawk-bewtawk (beta = p[6])
term1p1 = (1.0 + np.array(data[:,0]))**(p[6] - p[7])
argz1 = (1.0 + np.array(data[:,0]))**(-p[7])
term1p2 = np.exp(-data[:,1]/p[2] * argz1)
term1 = (p[3]) * term1p1 * term1p2/p[2]
# Weak component
#Here p[3] is alphastrong-betastrong [betastrong = p[4]]
term2p1 = (1.0 + np.array(data[:,0]))**(p[4] - p[5]) #weak term
#fac = np.zeros(len(data))
#for i in range(len(data)):
argz2 = (1.0 + np.array(data[:,0]))**(-p[5])
term2p2 = np.exp(- data[:,1]/p[0] * argz2)
# fac[i] = term2p2
term2 = p[1] * term2p1 * term2p2 /p[0]#fac/p[0]
#fac[i] = t1 * t2
return (term1 + term2)
#p = [Wwk, Nwk, Wstr, Nstr, alp_wk, beta_wk, alp_str, beta_str]
| UTF-8 | Python | false | false | 1,440 | py | 16 | dist.py | 15 | 0.53125 | 0.4625 | 0 | 40 | 34.95 | 105 |
syurskyi/Python_Topics | 18,150,531,806,524 | de6b14c57c6bb5e86a8d2f188864c17901408bc2 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/003_factories/examples/abstract_factory/8-Abstract Factory Pattern/AbstractFactory/autos/ford/lincoln.py | 008a2d42887135fae4ee314eae84c5c4bdd1093b | []
| no_license | https://github.com/syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | false | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | 2022-11-03T01:22:28 | 2023-02-16T03:08:09 | 198,671 | 2 | 2 | 33 | Python | false | false | from autos.abs_auto import AbsAuto
class LincolnMKS(AbsAuto):
def start(self):
print('Lincoln MKS running smoothly.')
def stop(self):
print('Lincoln MKS shutting down.')
| UTF-8 | Python | false | false | 195 | py | 15,362 | lincoln.py | 14,734 | 0.671795 | 0.671795 | 0 | 7 | 26.857143 | 46 |
TrendingTechnology/facerec-bias-bfw | 11,759,620,496,092 | 487094171dd52567f6907671c9f14575a1ce604a | 0d6e501171f47c1c45c3fb0f037bdf0159f2a344 | /code/facebias/metrics.py | 252b2590c1a478dd19ad560e57e924fc5a9f7753 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/TrendingTechnology/facerec-bias-bfw | ed618c330ef5b951fdcd580c6a729b96bbeb9167 | 66fd920c451bea18ca0bd592b851b51f7308ed6a | refs/heads/master | 2023-05-31T00:07:55.883487 | 2021-06-15T02:46:56 | 2021-06-15T02:46:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from numpy import greater_equal
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
precision_score,
recall_score,
roc_curve,
)
# @author Joseph P. Robinson
# @date 18 January 2020
class Metrics:
"""
Calculate pair-wise metrics.
UPDATE: Now based on sklearn.metrics.
Confusion stats:
TP: true positive, TN: true negative,
FP: false positive, FN: false negative
Predicted Classes
p' n'
___|_____|_____|
Actual p | | |
Classes n | | |
precision = TP / (TP + FP) per class label
recall = TP / (TP + FN) per class label
specificity = TN / (FP + TN) per class label
fscore = 2*TP /(2*TP + FP + FN) per class label
True positives (TP) are documents in the same cluster; True negatives (TN)
are two dissimilar documents in two different clusters. There are two error
types: A (FP) decision is when two dissimilar documents are assumed the
same. A (FN) decision is when two similar documents are in different
classes or not considered the same.
"""
n_samples = None
n_predicted = None
true_labels = None
n_classes = None
predicted_labels = None
confusion_stats = {}
def __init__(self):
self.data_is_loaded = False
def fit(self, true_labels, predicted_labels, set_confusion_stats=True):
self.true_labels = true_labels
self.predicted_labels = predicted_labels
self.n_samples = len(true_labels)
self.n_classes = len(np.unique(self.true_labels))
self.data_is_loaded = True
if set_confusion_stats:
self._set_confusion_stats()
def __repr__(self):
if self.confusion_stats:
stats = self.confusion_stats
tp, tn, fp, fn = stats["tp"], stats["tn"], stats["fp"], stats["fn"]
else:
tp = tn = fp = fn = "Not Set"
return (
"CONFUSION METRICS:\n"
"===============\n"
"TP:\t{}\n"
"TN:\t{}\n"
"FP:\t{}\n"
"FN:\t{}\n"
"N_CLASSES:\t{}\n"
"N_SAMPLES:\t{}".format(tp, tn, fp, fn, self.n_classes, self.n_samples)
)
def _check_state(self, check_stats=False):
if self.n_samples is None or not self.data_is_loaded:
print(
"Data must to set. Return NONE. See self.fit() in {}()".format(
self.__class__
)
)
return False
if check_stats and not self.confusion_stats:
self._set_confusion_stats()
return True
def _set_confusion_stats(self):
"""
Calculate TP, FP, TN, and FN and store in dictionary container.
:return: Confusion stats {TP, FP, TN, FN} (dictionary)
"""
tn, fp, fn, tp = confusion_matrix(
self.true_labels, self.predicted_labels
).ravel()
(
self.confusion_stats["tn"],
self.confusion_stats["fp"],
self.confusion_stats["fn"],
self.confusion_stats["tp"],
) = (tn, fp, fn, tp)
self.confusion_stats["n_neg"] = tn + fn
self.confusion_stats["n_pos"] = tp + fp
def precision(self):
"""
Precision (P): How accurate are the positive predictions.
Precision = TP / (TP + FP) (per class)
:return: Precision value (float)
"""
if not self._check_state():
return None
return precision_score(self.true_labels, self.predicted_labels)
def recall(self):
"""
Recall (R): Coverage of actual positive sample.
R = TP / (TP + FN)
:return: Recall value (float)
"""
if not self._check_state():
return None
return recall_score(self.true_labels, self.predicted_labels)
def accuracy(self):
"""
Accuracy (Acc): Overall performance of model
Acc = (TP + TN) / (TP + FP + FN + TN)
"""
if not self._check_state():
return None
return accuracy_score(self.true_labels, self.predicted_labels)
def specificity(self):
"""
TODO - implement (stats["tn"] / (stats["tn"] + stats["fp"]))
Recall = TN / (TN + FP)
"""
if not self._check_state():
return None
pass
def f1score(self):
"""
Recall = 2TP / (2TP + FP + FN)
"""
if not self._check_state():
return None
return f1_score(self.true_labels, self.predicted_labels)
def calculate_negative_rates(self):
"""
Calculate FMR and FNMR.
:return:
"""
if not self._check_state(check_stats=True):
return None
tn, fn, total_negative = (
self.confusion_stats["tn"],
self.confusion_stats["fn"],
self.confusion_stats["n_neg"],
)
fm_rate = tn / total_negative
fnm_rate = fn / total_negative
return fnm_rate, fm_rate
def calculate_tar_and_far_values(y_true, scores):
"""
Get TAR (TPR) and FAR (FNR) across various thresholds (via roc_curve)
:param y_true: ground truth label, boolean (1 if match; else, 0)
:param scores: scores for each pair.
:return: list of tuples (FAR, TAR, thresholds)
"""
fpr, tar, thresholds = roc_curve(y_true, scores, pos_label=1)
far = 1 - tar
return far, tar, thresholds
def calculate_det_curves(y_true, scores):
"""
Calculate false match rates, both for non-matches and matches
:param y_true: ground truth label, boolean (1 if match; else, 0)
:param scores: scores for each pair.
:return: list of tuples (false-match and false-non-match rates.
"""
# y_pred = threshold_scores(scores, threshold)
fpr, tpr, thresholds = roc_curve(y_true, scores, pos_label=1)
fnr = 1 - tpr
return fpr, fnr, thresholds
def sum_tp(threshold, scores, op=greater_equal):
return sum([1 if op(score, threshold) else 0 for score in list(scores)])
def sum_fn(threshold, scores, op=greater_equal):
return sum([0 if op(score, threshold) else 1 for score in list(scores)])
def sum_tn(threshold, scores, op=greater_equal):
return sum([0 if op(score, threshold) else 1 for score in list(scores)])
def sum_fp(threshold, scores, op=greater_equal):
return sum([1 if op(score, threshold) else 0 for score in list(scores)])
| UTF-8 | Python | false | false | 6,612 | py | 32 | metrics.py | 16 | 0.555656 | 0.55127 | 0 | 226 | 28.256637 | 83 |
reidarwood/trader-joes | 11,836,929,902,108 | 11552d1789f0211105efe5f6dc800babdf724e87 | 36491be4945b9a99bc1708cfb3cd38380a62e573 | /code/historic.py | c3ca55020fe3a0e7f4c8d960dbc72e9256b5696c | []
| no_license | https://github.com/reidarwood/trader-joes | 5d6527d9abd74121ff0a7dcdcd386baca440dd77 | 03f3c242bd6ccd82f1672eea7faf2509028f07a1 | refs/heads/main | 2023-02-01T05:13:02.899497 | 2020-12-10T20:31:39 | 2020-12-10T20:31:39 | 312,739,824 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import tensorflow as tf
class Historic(tf.keras.Model):
def __init__(self):
"""
The Model class predicts future stock market prices given historic data
"""
super(Historic, self).__init__()
self.learning_rate = 0.001
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
self.batch_size = 50
self.window_size = 64
self.num_epochs = 256
self.rnn_size = 128
self.lstm1 = tf.keras.layers.LSTM(self.rnn_size, return_sequences=True, return_state=True, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.05))
self.D1 = tf.keras.layers.Dense(64, activation="relu", kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.05))
self.D2 = tf.keras.layers.Dense(32, activation="relu", kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.05))
self.D3 = tf.keras.layers.Dense(1, activation="relu", kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.05))
def call(self, inputs, initial_state=None):
"""
Runs the model on inputs where inputs is a tensor and predicts the prices
given the labels
:param inputs: Stock data as tensor (batch_size, window_size, data_size)
:param initial_state: 2-d array of shape (batch_size, rnn_size) as a tensor
:return: the batch predictions as a tensor of size(batch_size, window_size, 1),
final state of the LSTM which is list [state_h, state_c]
"""
# No clue if this actually works
layer1_out, state_h1, state_c1 = self.lstm1(inputs, initial_state=initial_state)
# layer2_out = self.D1(layer1_out)
layer2_out = self.D1(layer1_out)
layer3_out = self.D2(layer2_out)
layer4_out = self.D3(layer3_out)
return layer4_out, (state_h1, state_c1)
def loss(self, outputs, labels):
"""
Calculates average loss across the batch. Uses MAPE so
not biased towards "cheap" stocks
:param outputs: a matrix of shape (batch_size, window_size) as a tensor
:param labels: matrix of shape (batch_size, window_size) containing the labels
:return: the loss of the model as a tensor of size 1
"""
loss = tf.keras.losses.MAPE(labels, outputs)
loss = tf.reduce_mean(loss)
return loss
| UTF-8 | Python | false | false | 2,417 | py | 433 | historic.py | 4 | 0.640877 | 0.617294 | 0 | 55 | 42.945455 | 166 |
HAR0070/Python-Assignment | 661,424,999,649 | 4bd5e4d208db41f00914a55c7bb5567def667a94 | 859e7115ad360786156d5e7a2a5644e7c2c0face | /Assigning elements to different lists.py | c841553bf302f6e092e454ca5a4d7cbf32389883 | []
| no_license | https://github.com/HAR0070/Python-Assignment | 2885d955f637b1d962ef36254050fc6ab765ebcb | 07e2b39f3974d469bf5d76a86437fa77e946f91f | refs/heads/master | 2022-11-12T12:17:04.436562 | 2020-06-26T00:39:33 | 2020-06-26T00:39:33 | 271,654,806 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | if __name__=='__main__' :
list = [1,2,3,4,4,5,5,6]
print(list[2])
Name_list = ['file','edit','format','run']
print(Name_list[2])
List_list = [2,3,4]
List_list.append(list)
List_list.append(Name_list)
List_list.extend(list)
print(List_list)
| UTF-8 | Python | false | false | 290 | py | 5 | Assigning elements to different lists.py | 3 | 0.527586 | 0.482759 | 0 | 12 | 22.166667 | 46 |
Junxieshiguan/python-0515 | 10,411,000,739,896 | 8fca3b01c080442a81619fdad1fa63a57b8bba16 | c170929f60f9a90100ab6730944ec034f60b2201 | /junxieshiguan/day01/print.py | dfec9f6cf2fa10383805c1294532ccf214f2053f | []
| no_license | https://github.com/Junxieshiguan/python-0515 | 22046210ab5c7573a2ad7b541e2d5f9b2ad963de | fd07ccbff640a846ce21ece6cc902909368ede95 | refs/heads/master | 2020-03-17T08:35:32.510419 | 2018-05-15T03:47:03 | 2018-05-15T03:47:03 | 133,443,952 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | hello_world.py
print("Hellow python world")
| UTF-8 | Python | false | false | 44 | py | 20 | print.py | 19 | 0.772727 | 0.772727 | 0 | 2 | 21 | 28 |
bbowles98/michiganseniors | 12,412,455,489,957 | 9b7b411e60c2f98671c5741b1e9dd0774d3151fb | 72fe0bbbef0e5d170abacc7d458115089ecdf0f5 | /django_project/django_project/urls.py | 0322b0c2e2542bcf5df411a28d12f657539233c4 | []
| no_license | https://github.com/bbowles98/michiganseniors | 4535b9f11771ea78fd18bf6f73066d938e949723 | 789df8102791f2c1a5d229b2378bb031f04d1da2 | refs/heads/master | 2022-12-11T05:56:34.235382 | 2019-12-10T15:18:33 | 2019-12-10T15:18:33 | 208,288,231 | 1 | 0 | null | false | 2022-05-25T02:39:23 | 2019-09-13T15:03:31 | 2019-12-10T15:18:47 | 2022-05-25T02:39:22 | 311,525 | 1 | 0 | 6 | Makefile | false | false | """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from elect_api import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('search/', views.SearchViewSet),
url('register/', views.Register),
url('vote/', views.Vote),
url('cast/', views.Cast),
url('results/', views.ViewResults),
url('election/', views.CreateElection),
url('ballot/', views.CreateBallot),
url('live/', views.GoLive),
url('elections/', views.ViewElections),
url('signup/', views.CreateAccount),
url('login/', obtain_jwt_token, name='login'),
url('login-refresh/', refresh_jwt_token, name='login-refresh'),
url('delete/', views.DeleteElection),
url('deleteAll/', views.DeleteAllElections),
url('registeredElections/', views.ViewRegisteredElections),
url('pastElectionsVotedIn/', views.ViewPastElectionsUserVotedIn),
url('canViewResults/', views.CanViewElectionResults),
url('notify/', views.Notify),
url('publicRegister/', views.PublicRegister),
url('getMessage/', views.GetMessage),
url('addElectionRestrictions/', views.AddElectionRestrictions),
url('isPublic/', views.IsPublic)
]
| UTF-8 | Python | false | false | 1,885 | py | 65 | urls.py | 43 | 0.698143 | 0.693369 | 0 | 45 | 40.888889 | 79 |
callhub/ivr-poll | 14,989,435,871,050 | 4a317518a6701dc5b782f22f5eeacb9c0becf430 | 98d45c5858eedaae4d8a7ce952473f0a439c810d | /polls/models.py | 7d9b021126bd1e9f9104e8f16d4dbc41aa1d1e77 | []
| no_license | https://github.com/callhub/ivr-poll | d88e4fdf6f3dd40e8ba1392138357edb714aedb7 | b25b209e545cfdfc7d066dcb4f0329f0ec3d866d | refs/heads/master | 2018-03-20T05:49:35.963771 | 2016-09-03T13:51:36 | 2016-09-03T13:51:36 | 67,189,019 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import uuid
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Poll(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200, default='unnamed')
class Question(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
description = models.CharField(max_length=500)
class Meta:
ordering = ['created_at']
class Option(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=500)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
class Phonebook(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
number = models.CharField(max_length=15)
class Response(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=True)
subscriber = models.ForeignKey(Phonebook, on_delete=models.CASCADE)
key = models.CharField(max_length=2, blank=True, null=True)
| UTF-8 | Python | false | false | 2,195 | py | 11 | models.py | 6 | 0.692027 | 0.682916 | 0 | 56 | 38.196429 | 79 |
Strongc/python_utils | 12,670,153,549,781 | 5b427db86a2324219d0ee7eecd3083f3e08a52b2 | 1125b680b076f7bb5f4399eb02225c44d5259b95 | /yuncode/match.py | f828b9b3a84cd1dfd03193832c85b53fb399ba4d | []
| no_license | https://github.com/Strongc/python_utils | 4d6a402415951ff37faea66dcdb182650f3e700f | 1ca5da5b901432da635660167a026493318d807a | refs/heads/master | 2021-01-20T03:47:34.182060 | 2017-03-23T09:24:01 | 2017-03-23T09:24:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # conding = utf-8
import yunCode
filepath = u'image/'
filename = u'14713478019.png'
def findCode(filename)
code = yunCode.yunDamaFunction(filename)
return code
| UTF-8 | Python | false | false | 181 | py | 92 | match.py | 88 | 0.685083 | 0.618785 | 0 | 9 | 17.888889 | 44 |
jloehel/xii | 9,818,295,284,604 | fa8376b13ce332a68dacd8a722c4d195e8a2543e | 2742aa8e1953df047c76cd2618f266115ceb940d | /src/xii/builtin/commands/stop/__init__.py | ea6666d87f9069d69ae533a174081080b976da51 | [
"Apache-2.0"
]
| permissive | https://github.com/jloehel/xii | ee9b511bf1c34e0fdd6d02d65b3f519878ee67aa | 6941c08e3948262c8291c5e611932fed3915372b | refs/heads/master | 2021-01-21T06:59:46.029775 | 2017-02-27T12:41:42 | 2017-02-27T12:52:18 | 83,302,324 | 1 | 0 | null | true | 2017-02-27T11:20:54 | 2017-02-27T11:20:54 | 2016-09-14T11:22:11 | 2017-02-27T07:39:39 | 588 | 0 | 0 | 0 | null | null | null | from stop import StopCommand
| UTF-8 | Python | false | false | 29 | py | 51 | __init__.py | 48 | 0.862069 | 0.862069 | 0 | 1 | 28 | 28 |
japeto/secure-file-storage-app | 5,712,306,544,504 | 15e92c722c063256478d344222f29821680605a8 | a7ecee7422ac104e33e0fef2d175d77c66ac2472 | /api/file_manager/user/migrations/0005_alter_userhistory_login_time.py | 7f920eb31a88fa08dcdf095bf670c1e3c0203701 | []
| no_license | https://github.com/japeto/secure-file-storage-app | 023e1f50c91b7e8c5ef19cc4c3c67955f3d53bd0 | 8d1059ad747993cd044e8f84bdb602a433923845 | refs/heads/master | 2023-05-06T05:39:19.441292 | 2021-05-29T09:16:11 | 2021-05-29T09:16:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.2 on 2021-05-02 12:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_alter_userhistory_email'),
]
operations = [
migrations.AlterField(
model_name='userhistory',
name='login_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| UTF-8 | Python | false | false | 403 | py | 40 | 0005_alter_userhistory_login_time.py | 29 | 0.600496 | 0.555831 | 0 | 18 | 21.388889 | 58 |
MaryanneNjeri/pythonModules | 16,965,120,851,983 | e1d5dcea56db06f67e54ec24b529539834fc5e19 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coins_20200607122802.py | 097d20449a97a09f723a624fad8bc63a16bae8e9 | []
| no_license | https://github.com/MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def change(amount,coins):
| UTF-8 | Python | false | false | 30 | py | 867 | coins_20200607122802.py | 867 | 0.666667 | 0.666667 | 0 | 1 | 25 | 25 |
paihengxu/Twitter-noisy-self-report | 10,136,122,832,529 | 57d1a902ccb897ba15d1e3d504e43ea56d0ef34a | 2067ef638eccf65c803256d3c90adef7e41ec419 | /scripts/iama/iama.py | 6edbd26d3f2624f676856ed1330a989f65f077e9 | []
| no_license | https://github.com/paihengxu/Twitter-noisy-self-report | b828313382c2c8e3a3a4b227e633572098e2e34b | 67fae81363be0d87a26fdcde8b6642af07885a08 | refs/heads/master | 2020-09-16T16:29:00.107427 | 2020-08-13T07:01:37 | 2020-08-13T07:01:37 | 223,829,548 | 0 | 0 | null | false | 2020-06-01T21:46:23 | 2019-11-25T00:10:59 | 2020-01-25T19:35:56 | 2020-06-01T21:46:22 | 4,148 | 0 | 0 | 0 | Python | false | false | #!/usr/bin/env python
# encoding: utf-8
import json, gzip, nltk, re, io, os
import argparse
import glob
import time
from nltk.tokenize import TweetTokenizer
from collections import defaultdict
QUERY = {'i am', 'i\'m', 'ima'}
COLLECT_TAG = 'adj'
OUTDIR = '/tmp/outdir/'
RESULT_OUTDIR = '/result/outdir/'
# nltk.download('averaged_perceptron_tagger')
def collectIamaFrequency_nltk(filename):
words_des = defaultdict(int)
swords_des = defaultdict(int)
words_all = defaultdict(int)
swords_all = defaultdict(int)
tknzr = TweetTokenizer()
i = 0
with open(filename, 'r') as inf:
try:
line = inf.readline()
while line: # 176943345 records
try:
if i % 100000 == 0:
print('{} record already at {}'.format(i, time.ctime()))
line = line.strip().split(":", 1)[1]
field, text = line.split(":", 1)
field = field.strip('"')
text = text.strip('"')
text = text.lower().replace('/', ',').replace('|', ',') # lower case
tokens = tknzr.tokenize(text) # preprocess
tags = nltk.tag.pos_tag(tokens)
for t in tokens:
words_all[t] += 1
if field == 'description':
words_des[t] += 1
# TODO: Get multiple words entity later
for q in QUERY:
if q in text:
q_token = tknzr.tokenize(q)
for w in getIamaWords(q_token, tokens, tags):
# filter the test without english words
if bool(re.search('[a-z0-9]', w)):
if field == 'description':
swords_des[w] += 1
swords_all[w] += 1
line = inf.readline()
i += 1
except Exception as er:
print(er)
line = inf.readline()
continue
except Exception as err:
print(err)
print("processed {} lines in this thread".format(str(i)))
return swords_des, words_des, swords_all, words_all
def getIamaWords(q_token, tokens, tags):
# potentially multiple I am a in the description.
swords = []
lq = len(q_token)
# (i, e) in the enumerator, only get index for e==first term of q_token
for ind in (i for i, e in enumerate(tokens) if e == q_token[0]):
if tokens[ind:ind + lq] == q_token:
if COLLECT_TAG == 'noun':
if ind + lq + 1 < len(tokens):
# simply matched the following one
# swords.append(tokens[ind+lq])
if tags[ind + lq][1] == 'NN':
swords.append(tokens[ind + lq])
if tags[ind + lq][1] == 'JJ' and tags[ind + lq + 1][1] == 'NN':
swords.append(tokens[ind + lq + 1])
if tags[ind + lq][1] == 'DT' and tags[ind + lq + 1][1] == 'NN':
swords.append(tokens[ind + lq + 1])
if ind + lq + 2 < len(tokens):
if tags[ind + lq][1] == 'RB' and tags[ind + lq + 1][1] == 'DT' and tags[ind + lq + 2][1] == 'NN':
swords.append(tokens[ind + lq + 2])
if tags[ind + lq][1] == 'DT' and tags[ind + lq + 1][1] == 'JJ' and tags[ind + lq + 2][1] == 'NN':
swords.append(tokens[ind + lq + 2])
if ind + lq + 3 < len(tokens):
if tags[ind + lq][1] == 'RB' and tags[ind + lq + 1][1] == 'DT' and tags[ind + lq + 2][1] == 'JJ' and \
tags[ind + lq + 3][1] == 'NN':
swords.append(tokens[ind + lq + 3])
elif COLLECT_TAG == 'adj':
if ind + lq + 1 < len(tokens):
# simply matched the following one
# swords.append(tokens[ind+lq])
if tags[ind + lq][1] == 'JJ' and tags[ind + lq + 1][1] == 'NN':
swords.append(tokens[ind + lq])
if ind + lq + 2 < len(tokens):
if tags[ind + lq][1] == 'DT' and tags[ind + lq + 1][1] == 'JJ' and tags[ind + lq + 2][1] == 'NN':
swords.append(tokens[ind + lq + 1])
if ind + lq + 3 < len(tokens):
if tags[ind + lq][1] == 'RB' and tags[ind + lq + 1][1] == 'DT' and tags[ind + lq + 2][1] == 'JJ' and \
tags[ind + lq + 3][1] == 'NN':
swords.append(tokens[ind + lq + 2])
return swords
def save_json_gz(OUT_DIR, dictionary, outname):
try:
with gzip.open(os.path.join(OUT_DIR + outname + '.json.gz'), 'w') as outf:
outf.write("{}\n".format(json.dumps(dictionary)).encode('utf8'))
except Exception as exp:
print('write {} failed, {}'.format(outname, exp))
def aggregate(file_pattern):
"""
aggregate result dict from parallel tmp files
"""
files = glob.glob(OUTDIR + file_pattern)
data = defaultdict(int)
for fn in files:
print("aggregating {}".format(fn))
with gzip.open(fn, 'r') as inf:
for line in inf:
d = json.loads(line.decode('utf8'))
for key, value in d.items():
data[key] += value
save_json_gz(RESULT_OUTDIR, data, file_pattern.replace("_*.json.gz", ""))
return data
def getFrequency(sfwords, words, filename):
"""
get frequency of selfreporty word, dividing its occurrence as self report by all occurrence
"""
swords_frequency = defaultdict(float)
for (ke, va) in sfwords.items():
swords_frequency[ke] = va / words[ke]
save_json_gz(RESULT_OUTDIR, swords_frequency, filename)
return swords_frequency
def sortDictAndWriteToTxt(dic_count, dic_freq, fn):
swords_list = sorted(dic_count.items(), key=lambda dic_count: dic_count[1], reverse=True)
print('Writing to txt files.')
with io.open(fn, 'w', encoding='UTF-8') as outf_des:
for ele in swords_list:
try:
outf_des.write('\t'.join([ele[0], str(ele[1]), str(dic_freq[ele[0]])]))
except Exception as err:
print(err)
continue
outf_des.write(u'\n')
if __name__ == '__main__':
# sourcefile = '/export/c10/zach/data/demographics/iama.json.gz'
parser = argparse.ArgumentParser()
parser.add_argument("job_name", type=str)
parser.add_argument("job_num", type=int)
parser.add_argument("num_jobs", type=int)
args = parser.parse_args()
num_jobs = args.num_jobs
job_num = args.job_num
job_name = args.job_name
file_num = -1
if job_name == 'collect':
# source files are divided to run the collection parallelly
files = glob.glob('/path/to/source/files/??')
files.sort()
assert len(files) == num_jobs
for fn in files:
file_num += 1
# parallelization purposes
if file_num % num_jobs != args.job_num:
continue
print("processing {}".format(fn))
swords_des, words_des, swords_all, words_all = collectIamaFrequency_nltk(fn)
print('swords count in all text is {}'.format(sum(swords_all.values())))
print('swords count in description only is {}'.format(sum(swords_des.values())))
print('Saving json.gz files.')
save_json_gz(OUTDIR, swords_des,
'selfreporty_{}_{}_in_description_{}'.format(COLLECT_TAG, 'count', job_num))
# save_json_gz(OUTDIR, swords_frequency_des, 'selfreporty_{}_{}_in_description_{}'.format(COLLECT_TAG, 'frequency', job_num))
save_json_gz(OUTDIR, swords_all, 'selfreporty_{}_{}_in_alltext_{}'.format(COLLECT_TAG, 'count', job_num))
save_json_gz(OUTDIR, words_des, 'all_words_count_in_description_{}'.format(job_num))
save_json_gz(OUTDIR, words_all, 'all_words_count_in_alltext_{}'.format(job_num))
# save_json_gz(OUTDIR, swords_frequency_all, 'selfreporty_{}_{}_in_alltext_{}'.format(COLLECT_TAG, 'frequency', job_num))
elif job_name == 'aggregate':
sf_words_des = aggregate('selfreporty_{}_{}_in_{}_*.json.gz'.format(COLLECT_TAG, 'count', 'description'))
words_des = aggregate('all_words_count_in_{}_*.json.gz'.format('description'))
sf_words_all = aggregate('selfreporty_{}_{}_in_{}_*.json.gz'.format(COLLECT_TAG, 'count', 'alltext'))
words_all = aggregate('all_words_count_in_{}_*.json.gz'.format('alltext'))
print("get frequency for words in description")
sf_words_des_freq = getFrequency(sf_words_des, words_des,
filename='selfreporty_{}_{}_in_{}'.format(COLLECT_TAG, 'frequency',
'description'))
print("get frequency for words in all text")
sf_words_all_freq = getFrequency(sf_words_all, words_all,
filename='selfreporty_{}_{}_in_{}'.format(COLLECT_TAG, 'frequency', 'alltext'))
sortDictAndWriteToTxt(sf_words_des, sf_words_des_freq,
fn='selfreporty_{}_count_sorted_in_{}.txt'.format(COLLECT_TAG, 'description'))
sortDictAndWriteToTxt(sf_words_all, sf_words_all_freq,
fn='selfreporty_{}_count_sorted_in_{}.txt'.format(COLLECT_TAG, 'alltext'))
print('ALL DONE!')
| UTF-8 | Python | false | false | 9,792 | py | 15 | iama.py | 12 | 0.509702 | 0.500306 | 0 | 216 | 44.333333 | 137 |
ritalrw/TAF | 2,259,152,817,273 | 77b4f2dc5e844921677971fea741a347857b1e09 | 7568d16711c583440f32a528dbdc91f715a6028b | /pytests/rebalance_new/rebalance_base.py | 69f1656758cda69bd5c9a3d84e0c4432afffc787 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/ritalrw/TAF | f164e960bc0792e8ea4efe160880a08f357512c1 | 7c175b861564fcf5b7c427e6c514c2b737484085 | refs/heads/master | 2022-04-20T11:14:03.513369 | 2020-04-22T12:53:12 | 2020-04-22T16:44:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from Cb_constants import CbServer
from basetestcase import BaseTestCase
from bucket_utils.bucket_ready_functions import BucketUtils
from couchbase_helper.document import View
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from sdk_exceptions import SDKException
from BucketLib.BucketOperations import BucketHelper
from math import ceil
retry_exceptions = list([SDKException.AmbiguousTimeoutException,
SDKException.DurabilityImpossibleException,
SDKException.DurabilityAmbiguousException])
class RebalanceBaseTest(BaseTestCase):
def setUp(self):
super(RebalanceBaseTest, self).setUp()
self.rest = RestConnection(self.cluster.master)
self.doc_ops = self.input.param("doc_ops", "create")
self.key_size = self.input.param("key_size", 0)
self.zone = self.input.param("zone", 1)
self.replica_to_update = self.input.param("new_replica", None)
self.default_view_name = "default_view"
self.defaul_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
self.default_view = View(self.default_view_name, self.defaul_map_func,
None)
self.max_verify = self.input.param("max_verify", None)
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
self.flusher_total_batch_limit = self.input.param("flusher_total_batch_limit", None)
self.test_abort_snapshot = self.input.param("test_abort_snapshot",
False)
self.items = self.num_items
node_ram_ratio = self.bucket_util.base_bucket_ratio(self.cluster.servers)
info = self.rest.get_nodes_self()
self.rest.init_cluster(username=self.cluster.master.rest_username,
password=self.cluster.master.rest_password)
self.rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved*node_ram_ratio))
self.check_temporary_failure_exception = False
nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
if nodes_init:
result = self.task.rebalance([self.cluster.master], nodes_init, [])
self.assertTrue(result, "Initial rebalance failed")
self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
self.check_replica = self.input.param("check_replica", False)
self.spec_name = self.input.param("bucket_spec", None)
# If buckets creation and initial data load is to be done by bucket_spec
if self.spec_name is not None:
self.log.info("Creating buckets from spec")
# Create bucket(s) and add rbac user
buckets_spec = self.bucket_util.get_bucket_template_from_package(
self.spec_name)
doc_loading_spec = \
self.bucket_util.get_crud_template_from_package("initial_load")
self.bucket_util.create_buckets_using_json_data(buckets_spec)
self.bucket_util.wait_for_collection_creation_to_complete()
# Create clients in SDK client pool
if self.sdk_client_pool:
self.log.info("Creating required SDK clients for client_pool")
bucket_count = len(self.bucket_util.buckets)
max_clients = self.task_manager.number_of_threads
clients_per_bucket = int(ceil(max_clients / bucket_count))
for bucket in self.bucket_util.buckets:
self.sdk_client_pool.create_clients(
bucket,
[self.cluster.master],
clients_per_bucket,
compression_settings=self.sdk_compression)
self.bucket_util.run_scenario_from_spec(self.task,
self.cluster,
self.bucket_util.buckets,
doc_loading_spec,
mutation_num=0)
self.bucket_util.add_rbac_user()
self.cluster_util.print_cluster_stats()
# Verify initial doc load count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.validate_docs_per_collections_all_buckets()
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.bucket_helper_obj = BucketHelper(self.cluster.master)
self.log.info("==========Finished rebalance base setup========")
else:
self.bucket_util.add_rbac_user()
if self.standard_buckets > 10:
self.bucket_util.change_max_buckets(self.standard_buckets)
self.create_buckets(self.bucket_size)
# Create Scope/Collection based on inputs given
for bucket in self.bucket_util.buckets:
if self.scope_name != CbServer.default_scope:
self.scope_name = BucketUtils.get_random_name()
BucketUtils.create_scope(self.cluster.master,
bucket,
{"name": self.scope_name})
if self.collection_name != CbServer.default_collection:
self.collection_name = BucketUtils.get_random_name()
BucketUtils.create_collection(self.cluster.master,
bucket,
self.scope_name,
{"name": self.collection_name,
"num_items": self.num_items})
self.log.info("Bucket %s using scope::collection - '%s::%s'"
% (bucket.name,
self.scope_name,
self.collection_name))
# Update required num_items under default collection
bucket.scopes[self.scope_name] \
.collections[self.collection_name] \
.num_items = self.num_items
if self.flusher_total_batch_limit:
self.bucket_util.set_flusher_total_batch_limit(
self.cluster.master,
self.flusher_total_batch_limit,
self.bucket_util.buckets)
self.gen_create = self.get_doc_generator(0, self.num_items)
if self.active_resident_threshold < 100:
self.check_temporary_failure_exception = True
if not self.atomicity:
_ = self._load_all_buckets(self.cluster, self.gen_create,
"create", 0, batch_size=self.batch_size)
self.log.info("Verifying num_items counts after doc_ops")
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.validate_docs_per_collections_all_buckets(
timeout=120)
else:
self.transaction_commit = True
self._load_all_buckets_atomicty(self.gen_create, "create")
self.transaction_commit = self.input.param("transaction_commit",
True)
# Initialize doc_generators
self.active_resident_threshold = 100
self.gen_create = None
self.gen_delete = None
self.gen_update = self.get_doc_generator(0, (self.items / 2))
self.durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to, persist_to=self.persist_to)
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.log.info("==========Finished rebalance base setup========")
def _create_default_bucket(self, bucket_size):
node_ram_ratio = self.bucket_util.base_bucket_ratio(self.servers)
info = RestConnection(self.cluster.master).get_nodes_self()
available_ram = int(info.memoryQuota * node_ram_ratio)
if bucket_size is not None:
available_ram = bucket_size
elif available_ram < 100 or self.active_resident_threshold < 100:
available_ram = 100
self.bucket_util.create_default_bucket(
ram_quota=available_ram,
bucket_type=self.bucket_type,
replica=self.num_replicas,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
def _create_multiple_buckets(self):
buckets_created = self.bucket_util.create_multiple_buckets(
self.cluster.master,
self.num_replicas,
bucket_count=self.standard_buckets,
bucket_type=self.bucket_type,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
self.assertTrue(buckets_created, "Unable to create multiple buckets")
for bucket in self.bucket_util.buckets:
ready = self.bucket_util.wait_for_memcached(
self.cluster.master,
bucket)
self.assertTrue(ready, msg="Wait_for_memcached failed")
def create_buckets(self, bucket_size):
if self.standard_buckets == 1:
self._create_default_bucket(bucket_size)
else:
self._create_multiple_buckets()
def tearDown(self):
self.cluster_util.print_cluster_stats()
super(RebalanceBaseTest, self).tearDown()
def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
"""
Shuffle the nodes present in the cluster if zone > 1.
Rebalance the nodes in the end.
Nodes are divided into groups iteratively. i.e: 1st node in Group 1,
2nd in Group 2, 3rd in Group 1 & so on, when zone=2
:param to_remove: List of nodes to be removed.
"""
if not to_remove:
to_remove = []
serverinfo = self.servers[0]
rest = RestConnection(serverinfo)
zones = ["Group 1"]
nodes_in_zone = {"Group 1": [serverinfo.ip]}
# Create zones, if not existing, based on params zone in test.
# Shuffle the nodes between zones.
if int(self.zone) > 1:
for i in range(1, int(self.zone)):
a = "Group "
zones.append(a + str(i + 1))
if not rest.is_zone_exist(zones[i]):
rest.add_zone(zones[i])
nodes_in_zone[zones[i]] = []
# Divide the nodes between zones.
nodes_in_cluster = [node.ip for node in self.cluster_util.get_nodes_in_cluster()]
nodes_to_remove = [node.ip for node in to_remove]
for i in range(1, len(self.servers)):
if self.servers[i].ip in nodes_in_cluster \
and self.servers[i].ip not in nodes_to_remove:
server_group = i % int(self.zone)
nodes_in_zone[zones[server_group]].append(self.servers[i].ip)
# Shuffle the nodesS
for i in range(1, self.zone):
node_in_zone = list(set(nodes_in_zone[zones[i]]) -
set([node for node in rest.get_nodes_in_zone(zones[i])]))
rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
otpnodes = [node.id for node in rest.node_statuses()]
nodes_to_remove = [node.id for node in rest.node_statuses()
if node.ip in [t.ip for t in to_remove]]
# Start rebalance and monitor it.
started = rest.rebalance(otpNodes=otpnodes,
ejectedNodes=nodes_to_remove)
if started:
result = rest.monitorRebalance()
self.assertTrue(result, msg="Rebalance failed{}".format(result))
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
# Verify replicas of one node should not be in the same zone
# as active vbuckets of the node.
if self.zone > 1:
self.cluster_util.verify_replica_distribution_in_zones(nodes_in_zone)
def add_remove_servers_and_rebalance(self, to_add, to_remove):
"""
Add and/or remove servers and rebalance.
:param to_add: List of nodes to be added.
:param to_remove: List of nodes to be removed.
"""
serverinfo = self.cluster.master
rest = RestConnection(serverinfo)
for node in to_add:
rest.add_node(user=serverinfo.rest_username,
password=serverinfo.rest_password,
remoteIp=node.ip)
self.shuffle_nodes_between_zones_and_rebalance(to_remove)
self.cluster.nodes_in_cluster = \
list(set(self.cluster.nodes_in_cluster + to_add) - set(to_remove))
def get_doc_generator(self, start, end):
return doc_generator(self.key, start, end,
doc_size=self.doc_size,
doc_type=self.doc_type,
target_vbucket=self.target_vbucket,
vbuckets=self.cluster_util.vbuckets,
key_size=self.key_size,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value,
mix_key_size=self.mix_key_size)
def _load_all_buckets(self, cluster, kv_gen, op_type, exp, flag=0,
only_store_hash=True, batch_size=1000, pause_secs=1,
timeout_secs=30, compression=True):
retry_exceptions_local = retry_exceptions \
+ [SDKException.RequestCanceledException]
tasks_info = self.bucket_util.sync_load_all_buckets(
cluster, kv_gen, op_type, exp, flag,
persist_to=self.persist_to, replicate_to=self.replicate_to,
durability=self.durability_level, timeout_secs=timeout_secs,
only_store_hash=only_store_hash, batch_size=batch_size,
pause_secs=pause_secs, sdk_compression=compression,
process_concurrency=self.process_concurrency,
retry_exceptions=retry_exceptions_local,
active_resident_threshold=self.active_resident_threshold,
scope=self.scope_name,
collection=self.collection_name)
if self.active_resident_threshold < 100:
for task, _ in tasks_info.items():
self.num_items = task.doc_index
self.assertTrue(self.bucket_util.doc_ops_tasks_status(tasks_info),
"Doc_ops failed in rebalance_base._load_all_buckets")
return tasks_info
def _load_all_buckets_atomicty(self, kv_gen, op_type):
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, kv_gen, op_type, 0,
batch_size=10,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync)
self.task.jython_task_manager.get_task_result(task)
def start_parallel_cruds_atomicity(self, sync=True,
task_verification=True):
tasks_info = dict()
if "update" in self.doc_ops:
tasks_info.update(
{self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, self.gen_update,
"rebalance_only_update", 0, batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
update_count=self.update_count,
commit=self.transaction_commit,
durability=self.durability_level, sync=sync,
defer=self.defer): None})
if "create" in self.doc_ops:
tasks_info.update(
{self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, self.gen_create,
"create", 0, batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit,
durability=self.durability_level,
sync=sync, defer=self.defer): None})
if "delete" in self.doc_ops:
tasks_info.update(
{self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, self.gen_delete,
"rebalance_delete", 0, batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit,
durability=self.durability_level,
sync=sync, defer=self.defer): None})
if task_verification:
for task in tasks_info.keys():
self.task.jython_task_manager.get_task_result(task)
return tasks_info
def start_parallel_cruds(self, retry_exceptions=[], ignore_exceptions=[],
task_verification=False):
tasks_info = dict()
if "update" in self.doc_ops:
tem_tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, self.gen_update, "update", 0, batch_size=self.batch_size,
persist_to=self.persist_to, replicate_to=self.replicate_to,
process_concurrency=self.process_concurrency,
durability=self.durability_level, pause_secs=5,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
retry_exceptions=retry_exceptions,
ignore_exceptions=ignore_exceptions,
scope=self.scope_name, collection=self.collection_name)
tasks_info.update(tem_tasks_info.items())
if "create" in self.doc_ops:
tem_tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, self.gen_create, "create", 0, batch_size=self.batch_size,
persist_to=self.persist_to, replicate_to=self.replicate_to,
process_concurrency=self.process_concurrency,
durability=self.durability_level, pause_secs=5,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
retry_exceptions=retry_exceptions,
ignore_exceptions=ignore_exceptions,
scope=self.scope_name, collection=self.collection_name)
tasks_info.update(tem_tasks_info.items())
self.num_items += (self.gen_create.end - self.gen_create.start)
for bucket in self.bucket_util.buckets:
bucket \
.scopes[self.scope_name] \
.collections[self.collection_name] \
.num_items += (self.gen_create.end - self.gen_create.start)
if "delete" in self.doc_ops:
tem_tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, self.gen_delete, "delete", 0, batch_size=self.batch_size,
persist_to=self.persist_to, replicate_to=self.replicate_to,
process_concurrency=self.process_concurrency,
durability=self.durability_level, pause_secs=5,
timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
retry_exceptions=retry_exceptions,
ignore_exceptions=ignore_exceptions,
scope=self.scope_name, collection=self.collection_name)
tasks_info.update(tem_tasks_info.items())
for bucket in self.bucket_util.buckets:
bucket \
.scopes[self.scope_name] \
.collections[self.collection_name] \
.num_items -= (self.gen_delete.end - self.gen_delete.start)
self.num_items -= (self.gen_delete.end - self.gen_delete.start)
if task_verification:
# Wait for tasks to complete and then verify
for task in tasks_info:
self.task_manager.get_task_result(task)
self.bucket_util.verify_doc_op_task_exceptions(tasks_info,
self.cluster)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
return tasks_info
def loadgen_docs(self, retry_exceptions=[], ignore_exceptions=[],
task_verification=False):
loaders = []
retry_exceptions = \
list(set(retry_exceptions +
[SDKException.AmbiguousTimeoutException,
SDKException.RequestCanceledException,
SDKException.DurabilityImpossibleException,
SDKException.DurabilityAmbiguousException]))
if self.check_temporary_failure_exception:
retry_exceptions.append(SDKException.TemporaryFailureException)
if self.atomicity:
loaders = self.start_parallel_cruds_atomicity(self.sync,
task_verification)
else:
loaders = self.start_parallel_cruds(retry_exceptions,
ignore_exceptions,
task_verification)
return loaders
def induce_rebalance_test_condition(self, test_failure_condition):
if test_failure_condition == "verify_replication":
set_command = "testconditions:set(verify_replication, {fail, \"" + "default" + "\"})"
elif test_failure_condition == "backfill_done":
set_command = "testconditions:set(backfill_done, {for_vb_move, \"" + "default\", 1 , " + "fail})"
else:
set_command = "testconditions:set({0}, fail)" \
.format(test_failure_condition)
for server in self.servers:
rest = RestConnection(server)
shell = RemoteMachineShellConnection(server)
shell.enable_diag_eval_on_non_local_hosts()
_, content = rest.diag_eval(set_command)
self.log.debug("Set Command: {0} Return: {1}"
.format(set_command, content))
shell.disconnect()
def start_rebalance(self, rebalance_operation):
self.log.debug("Starting rebalance operation of type: {0}"
.format(rebalance_operation))
if rebalance_operation == "rebalance_out":
task = self.task.async_rebalance(
self.servers[:self.nodes_init], [],
[self.servers[self.nodes_init - 1]])
elif rebalance_operation == "rebalance_in":
task = self.task.async_rebalance(
self.servers[:self.nodes_init],
[self.servers[self.nodes_init]], [])
elif rebalance_operation == "swap_rebalance":
self.rest.add_node(self.cluster.master.rest_username,
self.cluster.master.rest_password,
self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port)
task = self.task.async_rebalance(
self.servers[:self.nodes_init], [],
[self.servers[self.nodes_init - 1]])
elif rebalance_operation == "graceful_failover":
task = self.task.async_failover([self.cluster.master],
failover_nodes=[self.servers[1]],
graceful=True,
wait_for_pending=120)
return task
def delete_rebalance_test_condition(self, test_failure_condition):
delete_command = "testconditions:delete({0})".format(test_failure_condition)
for server in self.servers:
rest = RestConnection(server)
shell = RemoteMachineShellConnection(server)
shell.enable_diag_eval_on_non_local_hosts()
_, content = rest.diag_eval(delete_command)
self.log.debug("Delete Command: {0} Return: {1}"
.format(delete_command, content))
shell.disconnect()
def check_retry_rebalance_succeeded(self):
result = json.loads(self.rest.get_pending_rebalance_info())
self.log.debug("Result from get_pending_rebalance_info: {0}"
.format(result))
retry_after_secs = result["retry_after_secs"]
attempts_remaining = result["attempts_remaining"]
retry_rebalance = result["retry_rebalance"]
self.log.debug("Attempts remaining: {0}, Retry rebalance: {1}"
.format(attempts_remaining, retry_rebalance))
while attempts_remaining:
# wait for the afterTimePeriod for the failed rebalance to restart
self.sleep(retry_after_secs,
message="Waiting for the afterTimePeriod to complete")
try:
result = self.rest.monitorRebalance()
msg = "monitoring rebalance {0}"
self.log.debug(msg.format(result))
self.assertTrue(result, "Retried rebalance did not succeed")
except Exception:
result = json.loads(self.rest.get_pending_rebalance_info())
self.log.debug(result)
try:
attempts_remaining = result["attempts_remaining"]
retry_rebalance = result["retry_rebalance"]
retry_after_secs = result["retry_after_secs"]
except KeyError:
self.fail("Retrying of rebalance still did not help. "
"All the retries exhausted...")
self.log.debug("Attempts remaining: {0}, Retry rebalance: {1}"
.format(attempts_remaining, retry_rebalance))
else:
self.log.info("Retry rebalanced fixed the rebalance failure")
break
def change_retry_rebalance_settings(self, enabled=True,
afterTimePeriod=300, maxAttempts=1):
# build the body
body = dict()
if enabled:
body["enabled"] = "true"
else:
body["enabled"] = "false"
body["afterTimePeriod"] = afterTimePeriod
body["maxAttempts"] = maxAttempts
rest = RestConnection(self.cluster.master)
rest.set_retry_rebalance_settings(body)
result = rest.get_retry_rebalance_settings()
self.log.debug("Retry rebalance settings changed to {0}"
.format(json.loads(result)))
def reset_retry_rebalance_settings(self):
body = dict()
body["enabled"] = "false"
rest = RestConnection(self.cluster.master)
rest.set_retry_rebalance_settings(body)
self.log.debug("Retry Rebalance settings reset ....")
| UTF-8 | Python | false | false | 28,389 | py | 24 | rebalance_base.py | 24 | 0.567227 | 0.563775 | 0 | 554 | 50.243682 | 109 |
LaMemeBete/TP-INFORMATIQUE | 11,957,188,966,771 | c343cc77cb0af392a6eed283d775059a228b2821 | 1027c297b91a90e2d2a4ba45ce5d842e3ac73acd | /tp-5.py | 50adee5156c0b2c24adeff42088d50ae4baf049a | []
| no_license | https://github.com/LaMemeBete/TP-INFORMATIQUE | 293c3ff8bb6ff26eb547895e8d943afc2ba7bff9 | 807fd941cc37a1933e89478995adcc26e7b63042 | refs/heads/master | 2021-01-17T18:43:41.686779 | 2016-11-25T14:46:51 | 2016-11-25T14:46:51 | 71,565,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def suppression_second(c, s):
return s.replace(c, '');
def suppression(c, s):
strSum = '';
for i in range(0,len(s)):
if(s[i] != c):
strSum += s[i];
return strSum;
def suppression_debut(c, s):
strSum = '';
booleanCheck = False;
for i in range(0,len(s)):
if(booleanCheck == True):
strSum += s[i];
else:
if(s[i] != c ):
strSum += s[i];
else:
booleanCheck = True;
return strSum;
def suppression_dernier(c, s):
strSum = '';
booleanCheck = False;
i = len(s) -1
while i >= 0:
if(booleanCheck == True):
strSum += s[i];
else:
if(s[i] != c ):
strSum += s[i];
else:
booleanCheck = True;
i = i - 1;
return strSum;
#5.7
def chiffre(c):
return ord(c) - 48;
def entier(s):
finalNum = 0;
for i in range(0, len(s)):
finalNum += chiffre(s[i]) * (10**(len(s)-i-1));
return finalNum;
def caractere(n):
return(chr(n + 48))
def chaine(n):
strFinal = '';
while n > 10:
valueToGet = n%10
n = n//10
print(valueToGet)
if(valueToGet<10):
strFinal += caractere(valueToGet)
return strFinal;
print(chaine(421))
| UTF-8 | Python | false | false | 1,316 | py | 14 | tp-5.py | 12 | 0.473404 | 0.452888 | 0 | 57 | 22.087719 | 55 |
BurnFaithful/KW | 10,943,576,704,413 | 4a2182952f4d2fbc83681387894820d10a709f29 | 914ca4921c114c917267214e0987ebecf30b3510 | /Programming_Practice/Python/Base/Bigdata_day1007/PG13.py | 977532c50a68599c25c6d3d5ae8aa92656173d54 | []
| no_license | https://github.com/BurnFaithful/KW | 52535030ea57f1489a0d108d599b66ffee50a1f4 | 15deb50449b8f902f623f20b97448c0f473a9342 | refs/heads/master | 2022-12-20T16:06:01.827398 | 2020-09-12T08:51:23 | 2020-09-12T08:51:23 | 294,897,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # BMI = 몸무게 / 키 * 키
height = int(input("키가 몇 cm입니까? "))
weight = int(input("몸무게가 몇 kg입니까? "))
height = height / 100
bmi = weight / (height * height)
if bmi <= 18.5:
print(f"당신의 BMI는 {bmi:.2f}로 저체중입니다.")
elif bmi > 18.5 and bmi <= 22.9:
print(f"당신의 BMI는 {bmi:.2f}로 정상입니다.")
elif bmi >= 23.0 and bmi <= 24.9:
print(f"당신의 BMI는 {bmi:.2f}로 과체중입니다.")
elif bmi >= 25.0 and bmi <= 29.9:
print(f"당신의 BMI는 {bmi:.2f}로 비만입니다.")
elif bmi >= 30.0:
print(f"당신의 BMI는 {bmi:.2f}로 고도비만입니다.") | UTF-8 | Python | false | false | 640 | py | 521 | PG13.py | 322 | 0.58502 | 0.520243 | 0 | 17 | 28.117647 | 42 |
Upasna4/Training | 10,831,907,530,794 | 14d3884e012d4ac7776e8edf36781bc17720b360 | 55965f592cb7e915cd68bd371ee1a6ad2a6e0247 | /amit.py | 5a3cabdd64be6b33d236149856b4726d10e06efb | []
| no_license | https://github.com/Upasna4/Training | 2b5b57fc3e5229304860f153db93d912a44472bf | 33c6eeb565c422e40ea88d50af787f58b9f0da6d | refs/heads/master | 2020-08-05T03:50:36.280910 | 2019-10-02T16:36:09 | 2019-10-02T16:36:09 | 212,383,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mysql.connector
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import random
con = mysql.connector.connect(host='localhost', user='root', password='amit', database='db_new1')
obj = con.cursor()
print("Hi welcome to the website")
print("1. Login ")
print("2. Sign up")
n = int(input("What would you like to do?"))
if n == 2:
name = input("Please enter your name")
email = input("Please enter your email id")
pwd = input("Please enter your password")
city = input("Please enter your city")
isactive = 0
otp = random.randint(10000, 100000)
obj.execute("INSERT INTO db_new1(name,emailid,password,city,otp,isactive)values('%s','%s','%s','%s',%s,%s)"%(name,email,pwd,city,otp,isactive))
con.commit()
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login('amitmadaan9595@gmail.com', '8295951610')
msg = MIMEMultipart()
msg['From'] = 'amitmadaan9595@gmail.com'
msg['To'] = email
msg['Subject'] = "Sign up confirmation"
body = "Your otp is:"+str(otp)
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
server.sendmail(msg['From'], msg['To'], text)
server.quit()
o = int(input("Pls enter the otp sent to your mentioned email id to confirm sign up"))
if (o==otp):
obj.execute("update db_new1 set isactive='2' where emailid=email")
'''email id='madaanamit367@gmail.com'''
con.commit()
print("Signed up successfully")
else:
print("The otp did not match, pls try again")
if(n==1):
while(True):
email = input("Enter your email id")
pwd = input("Enter your password")
obj.execute("SELECT * from db_new1 where emailid='%s'"%(email))
data = obj.fetchall()
print(data)
if(len(data)>0):
if(data[0][1]==email and data[0][2]==pwd):
print("Login successfull")
print("Welcome, "+data[0][0])
break
else:
print("Data entered is incorrect, pls try again") | UTF-8 | Python | false | false | 2,124 | py | 211 | amit.py | 140 | 0.607345 | 0.582863 | 0 | 61 | 33.836066 | 148 |
phpactor/phpactor | 996,432,436,088 | f70c9b8f86690d4c9c97c4e4bddbe4c5b2adb595 | b625daaf38b69a9a4704e95e9f2d1260ec9e53ab | /doc/_ext/phpactor.py | f08c62950531bf18ad19a1971aee5370bbdcc835 | [
"MIT"
]
| permissive | https://github.com/phpactor/phpactor | cfa238ef2eaed57852d922668eca39d2b31eeed6 | 2f0909c94967fc711ccf43451974e8a51d084a13 | refs/heads/master | 2023-08-17T20:03:53.997351 | 2023-08-12T15:59:21 | 2023-08-12T15:59:21 | 43,064,439 | 1,090 | 147 | MIT | false | 2023-09-11T12:32:19 | 2015-09-24T12:07:36 | 2023-09-08T17:02:00 | 2023-09-11T12:32:19 | 8,484 | 1,041 | 113 | 198 | PHP | false | false | from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class GitHubRepoDirective(Directive):
"""Directive for Github Repositories."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = False
def run(self):
repo = self.arguments[0]
env = self.state.document.settings.env
repo_link = nodes.reference('', repo, refuri='https://github.com/' + repo)
title = nodes.paragraph(classes=['github-link'])
github_icon = nodes.image(uri=directives.uri("/images/github.svg"),width="15px",height="15px")
title += github_icon,
title += nodes.emphasis(strong=True,text=' Github:')
title += nodes.inline(text=' ')
title += repo_link,
new_nodes = [title]
return new_nodes
def setup(app):
app.add_directive("github-link", GitHubRepoDirective)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| UTF-8 | Python | false | false | 1,078 | py | 2,070 | phpactor.py | 1,884 | 0.62987 | 0.621521 | 0 | 39 | 26.641026 | 102 |
elliotwoods/Rulr-2.0 | 4,647,154,620,212 | 1ae7e5250f3d3843d201c05ff212b952723f701c | f7e5ac230cb265989f6cb113b83db83463996976 | /rulr/Utils/_Exports.py | 914ee52098776694f05c1b726cb186354febedc4 | [
"MIT"
]
| permissive | https://github.com/elliotwoods/Rulr-2.0 | 0c0aa8146e58d448931dd6a6c10a0b536449b2fd | 125503a8d58c4fcfbec9a32ca4a2cc7d9f3ec825 | refs/heads/master | 2022-12-11T12:35:12.678020 | 2020-08-25T13:18:19 | 2020-08-25T13:18:19 | 140,419,572 | 6 | 0 | MIT | false | 2022-06-21T21:43:20 | 2018-07-10T11:04:46 | 2020-08-26T17:02:44 | 2022-06-21T21:43:19 | 1,146 | 5 | 0 | 8 | JavaScript | false | false | import sys
import traceback
import weakref
import numpy as np
BASIC_TYPES = [int, float, dict, list, str, bool]
CUSTOM_EXPORTS = [np.ndarray]
EXPORTABLE_PROPERTY_TYPES = BASIC_TYPES + CUSTOM_EXPORTS
exported_objects = {}
def format_exception(exception):
# Get the traceback info
exc_tb = sys.exc_info()[2]
tracebackList = traceback.extract_tb(exc_tb, 5)
formattedTracebackList = []
for tracebackEntry in tracebackList:
formattedTracebackList.append({
"name" : tracebackEntry.name,
"filename" : tracebackEntry.filename,
"lineNumber" : tracebackEntry.lineno,
"line" : tracebackEntry.line
})
formattedException = {
"type" : type(exception),
"args" : exception.args,
"message" : str(exception),
"traceback" : formattedTracebackList
}
return formattedException
def export_object(instance):
# object is callable, needs wrapping
object_id = None
# Check if we have an existing wrapping for this object
for key, value in exported_objects.items():
if value() == instance:
object_id = key
# If no existing wrapping, create one
if object_id is None:
object_id = len(exported_objects)
exported_objects[object_id] = weakref.ref(instance)
# TODO : Each time the object is 'reexported' we rebuild the property and method names - let's reduce this
# Get callable methods
attributes = dir(instance)
attributes = [x for x in attributes if x[0] != '_'] # Trim 'private' attributes
method_names = [att for att in attributes if callable(getattr(instance, att))]
property_names = [att for att in attributes if not att in method_names]
#TODO : just export everything automatically
return {
"object_id" : object_id,
"object_creation_info" : {
"module" : instance.__module__[len("rulr."):],
"class" : instance.__class__.__name__
},
"method_names" : method_names,
"property_names" : property_names
}
def to_basic_type(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return str(instance)
def set_from_advanced_type(instance, value):
if isinstance(instance, np.ndarray):
instance[:] = value
def return_object(instance, success_callback, success_object_callback):
instance_type = type(instance)
if instance_type in BASIC_TYPES or instance is None:
# return the value directly
success_callback.Call(instance)
elif instance_type in CUSTOM_EXPORTS:
translated_instance = to_basic_type(instance)
success_callback.Call(translated_instance)
else:
# object is callable, needs wrapping
exported_object = export_object(instance)
success_object_callback.Call(exported_object)
def call_exported_object_method(success_callback, success_object_callback, exception_callback, object_id, method_name, *args):
global exported_objects
#TODO : This exception won't succesfully be passed to JS right now
if not object_id in exported_objects:
raise Exception("object_id {0} not found in exported_objects".format(object_id))
instance = exported_objects[object_id]()
method = getattr(instance, method_name)
try:
result = method(*args)
return_object(result, success_callback, success_object_callback)
except Exception as exception:
exception_callback.Call(format_exception(exception))
def call_exported_object_property_get(success_callback, success_object_callback, exception_callback, object_id, property_name):
global exported_objects
try:
if not object_id in exported_objects:
raise Exception("object_id {0} not found in exported_objects".format(object_id))
instance = exported_objects[object_id]()
property_ = getattr(instance, property_name)
return_object(property_, success_callback, success_object_callback)
except Exception as exception:
exception_callback.Call(format_exception(exception))
def call_exported_object_property_set(success_callback, success_object_callback, exception_callback, object_id, property_name, value):
global exported_objects
try:
if not object_id in exported_objects:
raise Exception("object_id {0} not found in exported_objects".format(object_id))
instance = exported_objects[object_id]()
property_ = getattr(instance, property_name)
property_type = type(property_)
if property_type in BASIC_TYPES:
setattr(instance, property_name, value)
elif property_type in CUSTOM_EXPORTS:
set_from_advanced_type(property_, value)
else:
raise Exception("Cannot call set on property of type [{}]".format(str(property_type)))
success_callback.Call(None)
except Exception as exception:
exception_callback.Call(format_exception(exception)) | UTF-8 | Python | false | false | 4,670 | py | 85 | _Exports.py | 67 | 0.718415 | 0.717131 | 0 | 143 | 30.671329 | 134 |
FaisalWant/ObjectOrientedPython | 5,772,436,058,123 | 5894c4d5a41e483c74e63a6a156a621e66b5ca7d | 2ea3e35f00e8044b69d3fb5341394ad33eac62a7 | /regex/OrConditional.py | b7736e8921d424372fb8c8cb3f9f6a7ebae0f195 | []
| no_license | https://github.com/FaisalWant/ObjectOrientedPython | 3a3f28ed038bfb8991fd21b3ac8ebe962a3abf93 | ce38d92bf94fbeca577b4724363e5db6d857ab40 | refs/heads/master | 2023-02-05T03:14:54.926386 | 2019-07-08T09:15:15 | 2019-07-08T09:15:15 | 113,368,669 | 0 | 0 | null | false | 2023-01-25T23:32:31 | 2017-12-06T21:23:28 | 2019-07-08T09:28:40 | 2023-01-25T23:32:31 | 18,803 | 0 | 0 | 6 | Python | false | false | #OrConditional.py
import re
randStr= "1. Dog 2. Cat 3.Turtle"
regex= re.compile(r"\d\.\s(Dog|Cat)")
matches=re.findall(regex, randStr)
for i in matches:
print(i)
| UTF-8 | Python | false | false | 163 | py | 137 | OrConditional.py | 123 | 0.705521 | 0.687117 | 0 | 7 | 22.285714 | 37 |
nursix/drkcm | 9,878,424,805,191 | 7bcc72e665024d6583d4abd7c7a2843f67a6cb58 | 7a3e9d88b21ef7e4b73d0632e08546d65a9df2ca | /modules/templates/locations/BA/config.py | c11b46230373b0dfd3ea6c66e68db240b057e5c3 | [
"MIT"
]
| permissive | https://github.com/nursix/drkcm | 64eeb8ead30784d379d64a0ba2bc2c93bcafb8ca | 7ec4b959d009daf26d5ca6ce91dd9c3c0bd978d6 | refs/heads/master | 2023-09-04T10:07:52.596460 | 2023-09-04T00:43:45 | 2023-09-04T00:43:45 | 97,222,001 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gluon import current
def config(settings):
"""
Template settings for Bosnia and Herzegovina
- designed to be used in a Cascade with an application template
"""
#T = current.T
# Pre-Populate
settings.base.prepopulate.append("locations/BA")
# Restrict to specific country/countries
settings.gis.countries.append("BA")
# Dosable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False
# L10n (Localization) settings
settings.L10n.languages["bs"] = "Bosnian"
settings.L10n.languages["hr"] = "Croatian"
settings.L10n.languages["sr"] = "Serbian"
# Default Language (put this in custom template if-required)
#settings.L10n.default_language = "bs"
#settings.L10n.default_language = "hr"
#settings.L10n.default_language = "sr"
# Default timezone for users
settings.L10n.timezone = "Europe/Sarajevo"
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 387
settings.fin.currencies["BAM"] = "Convertible Marks"
settings.fin.currency_default = "BAM"
# END =========================================================================
| UTF-8 | Python | false | false | 1,197 | py | 798 | config.py | 617 | 0.650794 | 0.63325 | 0 | 35 | 33.2 | 79 |
tiyd-python-2015-01/freeshelf | 6,030,134,112,179 | e46638f012bd953cfcc46566180343ea4438bd1f | 3474e2251e4b8f00794c0407b10cf8f8e33b2efe | /freeshelf/__init__.py | 8df93fefba23d61d4cb571c05860cab2b1236d56 | []
| no_license | https://github.com/tiyd-python-2015-01/freeshelf | d97b1d80687fd50445e4c46326a8b68357e8409a | 17082f0f1764521118b84fb86aaf538c57ae4fd4 | refs/heads/master | 2016-09-05T18:16:51.135820 | 2015-03-03T18:34:17 | 2015-03-03T18:34:17 | 31,000,557 | 1 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from flask.ext.wtf import CsrfProtect
from .extensions import (
db,
migrate,
debug_toolbar,
bcrypt,
login_manager,
config,
)
from . import models
from .views.users import users
from .views.books import books
from .views.api import api
SQLALCHEMY_DATABASE_URI = "postgres://localhost/freeshelf"
DEBUG = True
SECRET_KEY = 'development-key'
DEBUG_TB_INTERCEPT_REDIRECTS = False
def create_app():
app = Flask("freeshelf")
app.config.from_object(__name__)
app.register_blueprint(users)
app.register_blueprint(books)
app.register_blueprint(api, url_prefix="/api/v1")
config.init_app(app)
db.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
bcrypt.init_app(app)
login_manager.init_app(app)
login_manager.login_view = "users.login"
return app | UTF-8 | Python | false | false | 863 | py | 29 | __init__.py | 18 | 0.69409 | 0.692932 | 0 | 42 | 19.571429 | 58 |
martinsbalodis/scrapy-couchdb | 9,405,978,413,502 | be3403da09435c5c58e8f6f86a71702f01841eae | 1acf4d1753b15f4ed995c010a426ec284c4fc7c0 | /scrapycouchdb.py | 878bc9a362a1f20aaa220ea362faab86b24657f4 | [
"Apache-2.0"
]
| permissive | https://github.com/martinsbalodis/scrapy-couchdb | 6efee369a13a49b89a0cb3dbd456c2c851f3b47d | d4f7ead1fa8ba6aa2f5323f5fa1215ec5ef34f7f | refs/heads/master | 2021-01-17T22:17:00.495344 | 2012-12-19T20:12:52 | 2012-12-19T20:12:52 | 7,041,749 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import couchdb
from scrapy.conf import settings
from scrapy import log
import datetime
from w3lib.http import headers_dict_to_raw, headers_raw_to_dict
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from urlparse import urlparse
class CouchDBPipeline(object):
def __init__(self):
couch = couchdb.Server(settings['COUCHDB_SERVER'])
self.db = couch[settings['COUCHDB_DB']]
def process_item(self, item, spider):
data = {}
for key in item.keys():
if key in settings['COUCHDB_IGNORE_FIELDS']:
continue
elif isinstance(item[key], datetime.datetime):
data[key] = item[key].isoformat()
else:
data[key] = item[key]
#Throw exception if unknow type
data['_id'] = data[settings['COUCHDB_UNIQ_KEY']]
try:
old = self.db[data['_id']]
data['_rev'] = old['_rev']
except couchdb.http.ResourceNotFound:
change = True
#Only save the document if new content
if data.has_key('_rev'):
change = False
for key in data.keys():
if not old.has_key(key):
change = True
else:
if old[key] != data[key]:
change = True
if change:
self.db.save(data)
log.msg("Item wrote to CouchDB database %s/%s" %
(settings['COUCHDB_SERVER'], settings['COUCHDB_DB']),
level=log.DEBUG, spider=spider)
return item
class CouchDBCacheStorage(object):
def __init__(self, settings):
couch = couchdb.Server(settings['COUCHDB_SERVER'])
try:
self.db = couch[settings['COUCHDB_DB']]
except couchdb.http.ResourceNotFound:
couch.create(settings['COUCHDB_DB'])
self.db = couch[settings['COUCHDB_DB']]
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
try:
document = self.db[self._inverse_url(request.url)]
except couchdb.http.ResourceNotFound:
return
# @TODO expiration
body = document['response_body']
url = document['response_url']
status = document['status']
headers = Headers(headers_raw_to_dict(document['response_headers']))
encoding = document['encoding']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body,
encoding=encoding)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
data = {
'_id': self._inverse_url(request.url),
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': datetime.datetime.now().strftime("%s"),
'response_body': response.body_as_unicode(),
'response_headers': headers_dict_to_raw(response.headers),
'request_headers': headers_dict_to_raw(request.headers),
'request_body': request.body,
'encoding': response.encoding
}
self.db.save(data)
def _inverse_url(self, url):
elements = urlparse(url)
return ".".join(elements.netloc.split('.')[::-1])+':'+elements.scheme\
+elements.path+elements.query | UTF-8 | Python | false | false | 3,688 | py | 3 | scrapycouchdb.py | 2 | 0.569414 | 0.568872 | 0 | 102 | 35.166667 | 78 |
GlenMue/polar_curve_scetching_turtle.py | 2,379,411,908,688 | c3320f3b21dc0fa455c8acd217d41cce9816d0bd | 95b17a60616a31a7db59b59f153c7eedf93fb387 | /polar_curve_scetching_turtle.py | 558a44276dffb86d4c98a7e5bc5097d08d042b02 | []
| no_license | https://github.com/GlenMue/polar_curve_scetching_turtle.py | 13288a5832a5b5d48a9270ca21ae519fe1dbca4a | c2dda65696eb55e387020dc39f307c1784e50836 | refs/heads/main | 2023-08-19T12:33:19.520907 | 2021-10-18T18:47:39 | 2021-10-18T18:47:39 | 418,626,981 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from turtle import*
from math import *
import matplotlib.pyplot as plt
ø = 0
speed(0)
hideturtle()
ex = []
why = []
while ø <= 360:
ç = radians(ø)
const = ç-radians(360)
if ø <= 360:
r = (sqrt(2))+(2*sin(ç))
if ø == 0:
penup()
x = r*cos(ç)
y = r*sin(ç)
else:
pendown()
if ø <= 90:
x = r*cos(ç)
y = r*sin(ç)
elif ø <= 180:
x = -r*cos(radians(180)-ç)
y = r*sin(radians(180)-ç)
elif ø <= 270:
x = -r*cos(ç-radians(180))
y = -r*sin(ç-radians(180))
elif ø <= 360:
x = r*cos(radians(360)-ç)
y = -r*sin(radians(360)-ç)
elif ø <= 720:
r = (cos(const))**2
if const <= 90:
x = -r*cos(const)
y = -r*sin(const)
elif const <= 180:
x = r*cos(radians(180)-const)
y = -r*sin(radians(180)-const)
elif const <= 270:
x = r*cos(const-radians(180))
y = r*sin(const-radians(180))
elif const <= 360:
x = -r*cos(radians(360)-const)
y = r*sin(radians(360)-const)
else:
ø = 0
print(x, ' ', y, ' ', 'angle', ø)
setpos(x, y)
ex.append(x)
why.append(y)
ø += 1
plt.plot(ex, why)
plt.show()
onclick()
| UTF-8 | Python | false | false | 1,439 | py | 1 | polar_curve_scetching_turtle.py | 1 | 0.408351 | 0.353149 | 0 | 66 | 20.409091 | 42 |
kalolad1/cosmos | 6,794,638,288,000 | b95c624134ab81053e8436abf3d46aac6595a30f | 0f13dfcb2484228d4e0d5bfe12f3cfe1f1f51cc6 | /main/migrations/0023_providerprofile.py | 419516a997e6bea3b98eadaa540f644d79aa403a | []
| no_license | https://github.com/kalolad1/cosmos | e00462b6f98aa06ebbfb7818c6f0e6340cc6fed0 | b6d757895132b9b3c8c6682c11efadf993d5905b | refs/heads/master | 2023-04-05T16:30:43.414232 | 2021-03-30T15:01:21 | 2021-03-30T15:01:21 | 266,000,890 | 0 | 0 | null | false | 2021-03-20T00:10:11 | 2020-05-22T02:35:27 | 2020-12-14T16:17:45 | 2021-03-20T00:10:11 | 4,663 | 1 | 0 | 5 | TypeScript | false | false | # Generated by Django 3.0.6 on 2020-07-05 22:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0022_auto_20200703_1828'),
]
operations = [
migrations.CreateModel(
name='ProviderProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(default=None, max_length=60)),
('last_name', models.CharField(default=None, max_length=60)),
('date_of_birth', models.DateField(default=django.utils.timezone.now)),
('sex', models.CharField(choices=[('male', 'Male'), ('female', 'Female')], max_length=60)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='provider_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| UTF-8 | Python | false | false | 1,063 | py | 148 | 0023_providerprofile.py | 90 | 0.619003 | 0.584196 | 0 | 27 | 38.37037 | 154 |
guimedeiros1/adapt_recommender | 6,751,688,624,565 | c2eb5557664d0ebfabf00f36edde680e7b468f98 | a93bf5489fd98ab84921a04ca8b493db65fcafa3 | /venv/lib/python3.6/abc.py | 9fffc289a0b335cd5d313ee953de7c64b3573538 | []
| no_license | https://github.com/guimedeiros1/adapt_recommender | 702bb2e80966bb14066765179d76afb63b07941f | c57a9df4abae66d77b8316f39e8ff5ee837a511d | refs/heads/master | 2021-03-27T20:53:59.239760 | 2018-01-05T13:39:55 | 2018-01-05T13:39:55 | 102,633,258 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/guilherme/anaconda3/lib/python3.6/abc.py | UTF-8 | Python | false | false | 46 | py | 81 | abc.py | 57 | 0.826087 | 0.76087 | 0 | 1 | 46 | 46 |
daniel-reich/ubiquitous-fiesta | 13,048,110,677,633 | 84412d85468e8ac801419667b2a448714ee43a3d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /J9fCHDa3yYJWnK3A7_18.py | e889484c3fe4ba38fddec6e16f8efd2c806a7f3f | []
| no_license | https://github.com/daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def is_happy(n):
return True if s(n) == 1 else False if s(n) == 4 else is_happy(s(n))
def s(n):
return sum([int(x)**2 for x in str(n)])
| UTF-8 | Python | false | false | 142 | py | 38,088 | J9fCHDa3yYJWnK3A7_18.py | 38,088 | 0.577465 | 0.556338 | 0 | 4 | 34 | 70 |
Dmitri-2/BCPNN-Sim-Python | 21,209 | 7506db452863783961542dbf0dfde5fd1c962b5c | e272b512e8ff6fd4733534b9ad8d083205f9b6b8 | /Neuron.py | a6a175c7add4f7b699cdd712549ce3cbebb3004f | []
| no_license | https://github.com/Dmitri-2/BCPNN-Sim-Python | ebc7cf999119b53d3229b8c2f3f471753ef45266 | 0ca025910daae20fadfd66e33e863c527adb8c64 | refs/heads/master | 2023-01-14T22:37:04.369070 | 2020-11-16T17:58:18 | 2020-11-16T17:58:18 | 273,996,611 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import numpy as np
import math
import random
import sys
class Neuron:
## Only takes the number of inputs it should expect
def __init__(self, numOfInputs, initialProbability):
self.value = 0 # e.g "activity"
self.bias = 0
self.probability = initialProbability
self.previousProbability = 0
self.connectedProbabilities = []
self.beta = 0
self.numOfInputs = numOfInputs
self.input = 0
self.connections = np.array([])
self.isWinningNode = False
self.connectionWeights = np.array([])
self.weights = np.random.uniform(low=0, high=0.05, size=(numOfInputs))
self.tau = 250
self.calcTau = 80
self.weightTau = 1250
self.biasMultiplier = 1.5
self.debug = False
# print("Initialized prop to: "+str(initialProbability))
if(len(sys.argv) > 1):
self.tau = int(sys.argv[4])
self.calcTau = int(sys.argv[5])
self.weightTau = int(sys.argv[6])
self.biasMultiplier = float(sys.argv[7])
def isActivated(self):
return True if(self.value > 0.5) else False
def reinitializeWeightMatrix(self):
self.weights = np.random.uniform(low=0, high=0.05, size=(self.numOfInputs))
def reinitializeConWeightMatrix(self):
## Reinitialize the connection weight matrix
self.connectionWeights = np.random.uniform(low=0, high=0.05, size=(len(self.connections)))
## Reinitialize the connected prob array
self.connectedProbabilities = np.repeat(0.5, len(self.connections))
def addConnectionFrom(self, otherNode):
self.connections = np.append(self.connections, otherNode)
self.reinitializeConWeightMatrix()
def addConnectionList(self, connectionList):
self.connections = np.concatenate([self.connections, connectionList])
self.reinitializeConWeightMatrix()
# Function that calculates the INITIAL activation of the neuron
# Inputs:
# - input values
# - weights
# Output:
# - node activation value
def calculate(self, input):
self.input = input
if self.debug:
print("Input is: ")
print(input)
print("Weights are: ")
print(list(self.weights))
print("my probability is: ")
print(self.probability)
# ## Equation 1 - update probability for self
changeInProb = (input - self.probability) / self.calcTau
self.probability += changeInProb
# Calculate the node activation (should get a single value)
# Following formula #5 from Lansner paper
activation = sum(np.dot(input, self.weights)) + self.bias
## Take average of activation
self.value = activation
if self.debug:
print("Value is: ")
print(self.value)
# Function that augments the node's activation by computing the connection weights
# Inputs:
# - node activations
# Output:
# - new node activation for self
def calculateConnectedNodes(self):
# print(list(self.weights))
# if self.debug:
# print("Weights are: ")
# print(list(self.weights))
# print("my probability is: ")
# print(self.probability)
if (self.probability <= 0):
self.probability = 0.00000000000000000000000001
# Equation 3 (Lansner)
self.bias = math.log(self.probability, 10)
# For each connected node, multiply the other node's value by a internally stored weight
# print("Initial value: "+str(self.value))
for index, node in enumerate(self.connections):
## Update the co-activation probability - EQUATION #2
newConnProb = ((self.input*node.input) - self.connectedProbabilities[index])
if (newConnProb == 0):
newConnProb = 0.00000000000000000000000001
if (newConnProb > 1e20):
newConnProb = 1e20
self.connectedProbabilities[index] += newConnProb / self.tau
# self.value = 0
# Equation 5 - taking sum of unit's activity * connected weights
self.value += node.value * self.connectionWeights[index]
# Equation 5 (support value being calculated with bias)
self.value += (self.bias * self.biasMultiplier)
# print("Conn - weights")
# print(self.connectionWeights)
# Taking sigsmoid - otherwise value accelerates away
# self.value = self.sigmoidOfValue(self.value)
if self.debug:
print("FINAL -- My value is: ")
print(self.value)
def sigmoidOfValue(self, value):
sigmoid = 1 / (1 + math.exp(-value))
return sigmoid
# Function to update the weights that the node has control over
# Inputs:
# - self weights
# - input to node
# Output:
# - new node weights for self
def updateWeights(self, target):
# if self.isWinningNode == False:
# self.probability = 0
# return
# Equation 1
## Calculate own
self.previousProbability = self.probability
changeInProb = (target - self.probability)/self.weightTau
self.probability = self.probability + changeInProb
# if self.debug:
# print("My prob: " + str(self.probability) + " | change: " + str(changeInProb)+ " | old: " + str(self.previousProbability ))
if(self.probability <= 0):
self.probability = 0.01
# print("Probability was less than 0 - "+str(self.probability))
# Doing logs in base 10
# for index, weight in enumerate(self.weights):
# # New weight value = log( input - e^(-1 / tau) * (input - a ^ old weight value)
#
# # Verify the connected probability is not 0
# if (self.connectedProbabilities[index] <= 0):
# self.connectedProbabilities[index] = 0.00000000000000000000000001
#
#
# # # Note - if statement avoids divide by 0
# # if((self.probability * self.connections[index].probability) != 0):
# # Weight update rule
# print("Weight length")
# print(len(self.weights))
# print("connectedProbabilities length")
# print(len(self.connectedProbabilities))
# print("connections length")
# print(len(self.connections))
#
# self.weights[index] = math.log(self.connectedProbabilities[index]/(self.probability * self.connections[index].probability), 10)
connections = map(lambda conn: conn.probability, self.connections)
c = self.connectedProbabilities[:len(self.weights)]/map(lambda x: self.probability * x, connections[:len(self.weights)])
self.weights = (np.log10(c))/8
# print(self.weights)
# print(testweights)
# Update the interconnected nodes
def updateConnectedProbabilities(self):
for index, connNode in enumerate(self.connections):
# print("\nConnected probabilities: \n")
# print(self.connectedProbabilities)
try:
if(connNode.value > 0.5 and self.value > 0.5 and self.connectedProbabilities[index] < 300):
self.connectedProbabilities[index] *= 1.0000005
self.connectionWeights[index] *= 1.0000005
# print("UPDATING")
# print(self.connectedProbabilities[index])
else:
self.connectedProbabilities[index] *= 0.9999995
self.connectionWeights[index] *= 0.9999995
# print("VALUES")
# print(str(connNode.value)+" "+str(self.value))
except RuntimeWarning:
print("Encountered issue!")
print(self.connectedProbabilities[index])
def increaseConnectionWeights(self, percent):
# Check if both nodes are active at the same time
for index, node in enumerate(self.connections):
# Implementing hebbian learning - if
# both nodes active at the same time - strengthen the connection
if(self.isWinningNode and node.isWinningNode):
self.connectionWeights[index] += abs(self.connectionWeights[index] * percent)
def decreaseConnectionWeights(self, percent):
# Check if both nodes are active at the same time
for index, node in enumerate(self.connections):
if (self.isWinningNode or node.isWinningNode):
self.connectionWeights[index] -= abs(self.connectionWeights[index] * percent)
| UTF-8 | Python | false | false | 8,801 | py | 8 | Neuron.py | 6 | 0.600614 | 0.578343 | 0 | 248 | 34.479839 | 141 |
ChrisThor/Gate-To-Gods | 326,417,544,871 | d02d2003b54fad35e1ea8e05f1fe0ccf6f2f0c88 | 82f4a0f00ad629eef5bb6a736ee4e63983a33914 | /readchar/readchar_windows.py | 8637a2efd65fa086df88312ed1d613cf01dae1c2 | []
| no_license | https://github.com/ChrisThor/Gate-To-Gods | 0df5efee2f22427b2b5e089efdec3b44c4137bde | c66e09b38475831673dcd40bc51878b7fd4b43d2 | refs/heads/master | 2020-09-07T16:12:54.382595 | 2019-12-18T13:18:50 | 2019-12-18T13:18:50 | 220,838,620 | 0 | 0 | null | false | 2019-12-03T17:10:11 | 2019-11-10T19:21:26 | 2019-12-03T14:10:50 | 2019-12-03T17:10:10 | 109 | 1 | 0 | 4 | Python | false | false | # -*- coding: utf-8 -*-
# Initially taken from:
# http://code.activestate.com/recipes/134892/#c9
# Thanks to Stephen Chappell
# Licenced under MIT-Licence:
"""Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
import msvcrt
import sys
win_encoding = 'mbcs'
XE0_OR_00 = '\x00\xe0'
def readchar(blocking=False):
"Get a single character on Windows."
while msvcrt.kbhit():
msvcrt.getch()
ch = msvcrt.getch()
# print('ch={}, type(ch)={}'.format(ch, type(ch)))
# while ch.decode(win_encoding) in unicode('\x00\xe0', win_encoding):
while ch.decode(win_encoding) in XE0_OR_00:
# print('found x00 or xe0')
msvcrt.getch()
ch = msvcrt.getch()
return (
ch
if sys.version_info.major > 2
else ch.decode(encoding=win_encoding)
)
| UTF-8 | Python | false | false | 1,787 | py | 34 | readchar_windows.py | 29 | 0.72244 | 0.70901 | 0.002238 | 47 | 37.021277 | 118 |
jbsam2/algo_problem | 2,619,930,071,745 | 9dc0f22f00c95cca8ab6c5df161d0eed636d42ad | cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5 | /kakao/2018/kakao2018_5.py | 5e72f610892dc2e33c1d050a008fa921d6f74bbd | []
| no_license | https://github.com/jbsam2/algo_problem | 141c17003e88a69afdeea93a723e7f27c4626fdc | 18f2cab5a9af2dec57b7fd6f8218badd7de822e4 | refs/heads/master | 2023-05-18T10:03:00.408300 | 2021-06-02T10:36:50 | 2021-06-02T10:36:50 | 282,104,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def solution(str1, str2):
a=b=0;c=[0]*676;d=[0]*676
for i in range(len(str1)-1):
if str1[i].isalpha() and str1[i+1].isalpha():
c[((ord(str1[i])&31)-1)*26+(ord(str1[i+1])&31)-1]+=1
for i in range(len(str2)-1):
if str2[i].isalpha() and str2[i+1].isalpha():
d[((ord(str2[i])&31)-1)*26+(ord(str2[i+1])&31)-1]+=1
for i in range(676):a+=min(c[i],d[i]);b+=max(c[i],d[i])
return a*65536//b if b else 65536 | UTF-8 | Python | false | false | 457 | py | 592 | kakao2018_5.py | 587 | 0.522976 | 0.396061 | 0 | 10 | 44.8 | 64 |
rlworkgroup/garage | 455,266,540,773 | 606a0177b559425a3d1e9aba0cd60553645831ac | 6181fcd4a266d963a0ee85971768c97922ca77cd | /src/garage/examples/tf/te_ppo_point.py | 5afd0f63528bb69172b84de31371a2d0106d0ff8 | [
"MIT"
]
| permissive | https://github.com/rlworkgroup/garage | 5d215bbecb3a4e74b504988d6684a7b04df69a80 | 2d594803636e341660cab0e81343abbe9a325353 | refs/heads/master | 2023-08-21T22:58:49.338034 | 2023-01-04T06:06:27 | 2023-01-04T06:06:27 | 136,846,372 | 1,832 | 363 | MIT | false | 2023-09-11T11:36:40 | 2018-06-10T21:31:23 | 2023-09-10T08:30:15 | 2023-05-04T14:44:22 | 60,358 | 1,729 | 296 | 232 | Python | false | false | #!/usr/bin/env python3
"""This is an example to train Task Embedding PPO with PointEnv."""
# pylint: disable=no-value-for-parameter
import click
import numpy as np
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import PointEnv
from garage.envs.multi_env_wrapper import MultiEnvWrapper, round_robin_strategy
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
from garage.trainer import TFTrainer
def circle(r, n):
"""Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
"""
for t in np.arange(0, 2 * np.pi, 2 * np.pi / n):
yield r * np.sin(t), r * np.cos(t)
N = 4
goals = circle(3.0, N)
TASKS = {
str(i + 1): {
'args': [],
'kwargs': {
'goal': g,
'never_done': False,
'done_bonus': 10.0,
}
}
for i, g in enumerate(goals)
}
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=600)
@click.option('--batch_size_per_task', default=1024)
@wrap_experiment
def te_ppo_pointenv(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
tasks = TASKS
latent_length = 2
inference_window = 6
batch_size = batch_size_per_task * len(TASKS)
policy_ent_coeff = 1e-3
encoder_ent_coeff = 1e-3
inference_ce_coeff = 5e-2
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = 2.0
policy_min_std = None
task_names = sorted(tasks.keys())
task_args = [tasks[t]['args'] for t in task_names]
task_kwargs = [tasks[t]['kwargs'] for t in task_names]
with TFTrainer(snapshot_config=ctxt) as trainer:
task_envs = [
PointEnv(*t_args, **t_kwargs, max_episode_length=100)
for t_args, t_kwargs in zip(task_args, task_kwargs)
]
env = MultiEnvWrapper(task_envs, round_robin_strategy, mode='vanilla')
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=0.1,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=TaskEmbeddingWorker)
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
inference=inference,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
center_adv=True,
stop_ce_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
te_ppo_pointenv()
| UTF-8 | Python | false | false | 5,728 | py | 506 | te_ppo_point.py | 437 | 0.566515 | 0.55412 | 0 | 174 | 31.91954 | 79 |
abdhk383/bitbonn | 5,128,190,989,781 | cf1c67df861d630332c6656ef8cafad3bb19573d | 7496428cdc7a0e51c5720ca449987995ae7b64b7 | /MyCode/features/colorsift.py | 06360167fd263d9202128e5f873f15fa2c30e58e | []
| no_license | https://github.com/abdhk383/bitbonn | 11155c9184e4266abeefb8dcf62662e69b3ecf39 | 82201c53f0d51b3f7988d2ef23a990b6abe471b6 | refs/heads/master | 2016-05-26T02:17:11.680546 | 2012-12-21T16:13:43 | 2012-12-21T16:13:43 | 3,581,614 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cv2
import tempfile,os
from MyCode.features import DescriptorIO
from MyCode.utils.spatial import *
import scipy
def get_colorsift(im_arr,pSize=None,steps=6):
binary_software = '/home/eweiwi/bin/colorDescriptor'
#import ipdb; ipdb.set_trace()
tmp_im = 'tmp_im.jpg'
cv2.imwrite(tmp_im,im_arr)
descriptor_type = 'sift'
print 'Extract %s from image with shape %s'%(descriptor_type,str(im_arr.shape))
keep_Limited = 1500
extractionConfig = "--detector densesampling --ds_spacing %d --ds_scales 3.2+1.2+1.8+2.4 --descriptor %s --keepLimited %s"%(steps,descriptor_type,keep_Limited)
(f, tempFilename) = tempfile.mkstemp()
#keep_Limited = 2500
os.close(f)
#print "Created temporary file:", tempFilename
#print "Keeping maximum of %d descriptors per image for clustering" % keep_Limited
cmd_line = "%s %s --outputFormat binary --output %s %s" % \
(binary_software, tmp_im, tempFilename, extractionConfig)
return_code = os.system(cmd_line)
if return_code != 0 :
raise Exception("Error when executing '%s': command returned error" % cmd_line)
#import ipdb; ipdb.set_trace()
(points, descriptors) = DescriptorIO.readDescriptors(tempFilename)
print "Number of features extracted: %s \n"%(str(descriptors.shape[0]))
#invert axes to meet numpy representation
points[:,[0,1]] = points[:,[1,0]]
#points_scaled = np.c_[points[:,0]/im_arr.shape[0],points[:,1]/im_arr.shape[1]]
#points_scaled *= 300
#import ipdb; ipdb.set_trace()
#return np.c_[points,points_scaled,descriptors]
#descriptors = normalize2d(descriptors,2,1)
points = scipy.delete(points,[2,3,4],1)
feature = np.c_[points,descriptors]
feature = feature.astype('float16')
return feature
if __name__ == '__main__' :
#sys.exit(main())
pass
| UTF-8 | Python | false | false | 1,890 | py | 102 | colorsift.py | 85 | 0.660847 | 0.639153 | 0 | 54 | 34 | 167 |
Axaxa1/2048 | 1,047,972,046,941 | bff263fc32685305829b695ae48eb87e1e26ed61 | f22696356c32a570a2e590b965d15268567c83ad | /mon_module/mon_module/pool.py | 35c9ec80c8a8d0fb5db5f89078699d1b2123cdc7 | [
"MIT"
]
| permissive | https://github.com/Axaxa1/2048 | 19aea8b2b0113eb28ca088024473f2b2364dc565 | b4a6dd1c4262ad99389abb79de13f217574ef370 | refs/heads/master | 2020-09-03T15:15:51.089249 | 2019-11-12T21:29:59 | 2019-11-12T21:29:59 | 219,495,556 | 0 | 1 | null | false | 2019-11-12T21:30:01 | 2019-11-04T12:23:16 | 2019-11-12T20:53:14 | 2019-11-12T21:30:00 | 58 | 0 | 1 | 0 | Python | false | false | import random
from Game import *
from Ai import *
from multiprocessing import Pool
from statistics import mean
def monteCarloPoolMove(jeu,nsim,pool):
#pool sur les parties random
averages = [0,0,0,0]
test = game()
for firstMove in range(4):
test.copyGame(jeu)
if not(test.gameOver()):
test.play(firstMove)
li_test = [test]*nsim
li_res = pool.map(auxPool, li_test)
averages[firstMove] += mean(li_res)
attempt = 0
im = 0
sucess = False
while (attempt <=4 and sucess == False):
for i in range(4):
if averages[i] > averages[im]:
im = i
averages[im] = 0
attempt += 1
sucess = jeu.play(im)
def auxPool(jeu):
g = game();
g.copyGame(jeu)
return playRandom(g,-1)
def main():
jeu = game()
i = 0
pool = Pool()
while not(jeu.gameOver()):
monteCarloPoolMove(jeu,1000,pool)
#if i%10 == 0:
jeu.show()
i+=1
return jeu.score
pool.close()
if __name__== '__main__':
main()
| UTF-8 | Python | false | false | 1,154 | py | 25 | pool.py | 21 | 0.512132 | 0.493934 | 0 | 50 | 21.08 | 47 |
mutiangua/EIS2020 | 1,872,605,759,400 | af3e3c67b4a4f3d14f40e81b59f309f3d7e17364 | 14b5679d88afa782dc5d6b35878ab043089a060a | /students/LiRuomeng/6.5/PUB1.py | a00187e4b90bc52107fefd4154d3e14688efa77a | []
| no_license | https://github.com/mutiangua/EIS2020 | c541ef32623f67f9277945cd39cff3c02f06e4dd | 92aa2711b763a2c93be238825c445bf2db8da391 | refs/heads/master | 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from MXMqtt import MXMqtt
MQTTHOST = "mqtt.16302.com"
MQTTPORT = 1883
mqtt = MXMqtt(MQTTHOST,MQTTPORT)
mqtt.SUB("lrm1")
mqtt.SUB("zkx")
mqtt.SUB("oys1552")
mqtt.SUB("gch")
mqtt.SUB("wzy")
mqtt.SUB("mooc12345")
while True:
msg = mqtt.returnMsg()
if msg != None:
print(msg)
| UTF-8 | Python | false | false | 316 | py | 967 | PUB1.py | 466 | 0.610759 | 0.550633 | 0 | 17 | 16.352941 | 32 |
mbreyes/spikelearn | 249,108,119,464 | 88cf8b4a33ccbc4150a1f9bb76c185147c53539d | 1d09d529180daaa7167509ee1a5b3087c8e9875a | /spikelearn/models/shuffle_decoding.py | d665702901c281bb1f99ee98d1737daaf3b194d3 | [
"MIT"
]
| permissive | https://github.com/mbreyes/spikelearn | de43d85a101dde11553d8cd760262aa97b16fc8a | 060206558cc37c31493f1c9f01412d90375403cb | refs/heads/master | 2022-03-14T20:15:49.284035 | 2019-11-18T19:07:18 | 2019-11-18T19:07:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from sklearn.model_selection import GroupShuffleSplit, cross_val_predict, GroupKFold
from sklearn.base import clone, TransformerMixin
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# from multiprocessing import Pool
# from contextlib import closing
from numpy.random import permutation
from scipy.stats import pearsonr
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
class MonteCarloFeatures(TransformerMixin):
def __init__(self, n_features):
self.n_features = n_features
self.features_ = None
def fit(self, X, y):
if type(X) is not pd.DataFrame:
X = pd.DataFrame(X)
assert X.shape[1] >= self.n_features
self.features_ = np.random.permutation(X.columns.values)[:self.n_features]
return self
def transform(self, X, y=None):
assert X.shape[1] >= self.n_features
if type(X) is not pd.DataFrame:
X = pd.DataFrame(X)
return X[self.features_].values
else:
return X[self.features_]
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
# TODO support parallelization
# TODO separate cross and simple
class Results_shuffle_val():
"""
Organization dataholder for results from
shuffle_val_predict
Attributes
----------
n_splits, train_size, test_size : int
Parameters used in the analysis.
scoring_function : list of tuples [(name, callable), ]
the scoring function used in the analysis
classes, groups, features : arrays
Characteristics of the dataset
id_vars, fat_vars : list of string
The name of identifier variables
Including 'true_label' and 'group' in fat.
data, proba, weights, predictions, scores, stats: DataFrames
The results of the analysis
"""
def __init__(self, n_splits, train_size,
test_size, scoring_metric,
classes, groups, features, id_vars):
self.n_splits = n_splits
self.train_size = train_size
self.test_size = test_size
self.scoring_metric = scoring_metric
self.classes = classes
self.groups = groups
self.features = features
self.id_vars = id_vars
self.fat_vars = id_vars + ['true_label', 'group']
self.proba = pd.DataFrame(columns=self.id_vars)
self.weights = pd.DataFrame(columns=self.id_vars)
self.predictions = pd.DataFrame(columns=self.id_vars)
self.score = pd.DataFrame()
self.stats = pd.DataFrame()
# Internal
def scoring_function(self, true, pred, metric):
if metric == 'pearson':
return np.nan_to_num(pearsonr(true, pred)[0])
elif metric == 'accuracy':
return accuracy_score(true, pred)
else:
if callable(metric):
return metric(true, pred)
else:
raise NotImplementedError
def _input_id_vars(self, df, **kwargs):
for key in self.id_vars:
df[key] = kwargs[key]
return df
def _thin(self, df, let_labels=False):
if let_labels:
to_drop = self.id_vars
else:
to_drop = self.fat_vars
return df.drop(to_drop, axis=1)
# shuffle_val_predict usage
def append_probas(self, probas,
true_labels, groups, **kwargs):
local = pd.DataFrame(probas, columns=self.classes)
local['true_label'] = true_labels
local['group'] = groups
local = self._input_id_vars(local, **kwargs)
self.proba = self.proba.append(local)
def append_pred(self, pred, true_labels, groups, **kwargs):
local = pd.DataFrame(pred, columns=['prediction'])
local['true_label'] = true_labels
local['group'] = groups
local = self._input_id_vars(local, **kwargs)
self.predictions = self.predictions.append(local)
def append_weights(self, weights, **kwargs):
index = pd.Index(self.classes)
local = pd.DataFrame(weights,
index=self.classes,
columns=self.features)
local = local.reset_index().melt(var_name='feature',
id_vars=[self.classes.name])
local = self._input_id_vars(local, **kwargs)
self.weights = self.weights.append(local)
def calculate_predictions(self):
pred_max = lambda x: x.idxmax()
self.predictions['predictions_max'] = self._thin(self.proba).apply(pred_max, axis=1)
pred_mean = lambda x: np.sum(self._thin(self.proba).columns.values * x.values)
self.predictions['predictions_mean'] = self._thin(self.proba).apply(pred_mean, axis=1)
self.predictions[self.fat_vars] = self.proba[self.fat_vars]
def compute_score(self):
for metric in self.scoring_metric:
if metric in ['pearson']:
for which in ['max', 'mean']:
scoring = lambda df: self.scoring_function(df['true_label'], df['predictions_' + which], metric)
self.score['{}_{}'.format(metric, which)] = self.predictions.groupby(self.id_vars).apply(scoring)
else:
scoring = lambda df: self.scoring_function(df['true_label'], df['predictions_max'], metric)
self.score[metric] = self.predictions.groupby(self.id_vars).apply(scoring)
self.score = self.score.reset_index()
def add_identifiers(self, **kwargs):
for key, val in kwargs.items():
self.id_vars.append(key)
self.fat_vars.append(key)
self.data[key] = val
self.proba[key] = val
self.weights[key] = val
def compute_stats(self):
fields = ['score_max', 'score_mean']
def SEM(df):
return df.std() / np.sqrt(df.shape[0])
def pool_SEM(df):
pool = df.groupby('trained_here').get_group(True)[fields]
gtest = df.groupby('tested_on')
return gtest.apply(lambda df: SEM(pd.concat((df[fields], pool))))
def pool_MEAN(df):
pool = df.groupby('trained_here').get_group(True)[fields]
gtest = df.groupby('tested_on')
return gtest.apply(lambda df: (pd.concat((df[fields], pool))).mean())
def Z_score(df):
df_sem = pool_SEM(df)
df_means = df.groupby('tested_on').mean()[fields]
df_pm = pool_MEAN(df)
return (df_means - df_pm) / df_sem
self.stats = self.score.groupby('trained_on').apply(Z_score)
# Outside usage
def proba_matrix(self, plot=True, grouping=None, **kwargs):
if grouping is not None:
df = self.proba.groupby(grouping[0]).get_group(grouping[1])
else:
df = self.proba
df = df.groupby('true_label').mean().drop('group', axis=1)
if plot:
sns.heatmap(df, **kwargs)
plt.title('Mean probability associated with labels')
plt.xlabel('Possible labels');
plt.ylabel('True label')
return df
def confusion_matrix(self, plot=True, grouping=None, which='max', **kwargs):
if grouping is not None:
df = self.predictions.groupby(grouping[0]).get_group(grouping[1])
else:
df = self.predictions
mat = confusion_matrix(df.true_label, df['predictions_' + which])
if plot:
sns.heatmap(mat, **kwargs)
plt.title('Confusion matrix');
plt.xlabel('Predicted label');
plt.ylabel('True label')
return mat
def save(self, filename):
pickle.dump(self, open(filename, 'wb'))
def shuffle_val_predict(clf, dfs, names=None, X=None, y=None, group=None,
cv='sh', n_splits=5, feature_scaling=None,
train_size=.8, test_size=.2,
balance_feature_number=False,
get_weights=False, score=['pearson', 'accuracy'],
id_kwargs=None, verbose=0, **kwargs):
"""
Trains in each dataset, possibly testing on both, to calculate statistics
about generalization between datasets.
Parameters
----------
clf : sklearn classifier instance
The model which will be fitted and used to predict labels
dfs : DataFrame, or list of DataFrames
The data holders
names : list of strings, optional, default range
The ID variables of each DataFrame
If not given, will default to 0, 1, ..., len(dfs)-1
X, y, group : indices [str[, str] ], optional
The indices of each variable in the dataframes
If not given, will default to
X -> df columns
y -> second index name
group -> first index name
See notes
cv : str or callable, default 'sh'
The splitting method to use.
n_splits : int
Number of splits to be done.
feature_scaling : string
The kind of scaling to apply to features.
Implemented via pipeline (fitted only during training)
One of 'standard', 'minmax', 'robust'.
balance_feature_number : bool, default False
get_weights : bool
Whether to save and return the weights of each model
score : callable
function( true_label, pred_label ) -> number
Defaults to pearson's correlation
Keyword Arguments
-----------------
Extra kwargs will be passed on to the cv function
Notes
-----
While the number of features can differ in some cases,
the variables y and group must be the same for all dfs
Returns
-------
results : Results_shuffle_val
dataholder for this results.
consists in many dataframes for ease of access
proba, weights, predictions, scores, stats
See also
--------
Results_shuffle_val
"""
# Dealing with other optional variables
if names is None:
names = np.arange(len(dfs))
if type(dfs) == pd.DataFrame:
dfs, names = [dfs], [names]
print(names)
if X is None:
assert group == None and y == None
if get_weights:
assert len(np.unique([len(df.columns) for df in dfs])) == 1
assert all([(df.columns == dfs[0].columns).all() for df in dfs])
X = dfs[0].columns
else:
X = {name: df.columns for df, name in zip(dfs, names)}
y = dfs[0].index.names[1]
group = dfs[0].index.names[0]
dfs = [df.reset_index() for df in dfs]
# Number of training and testing is defined by the smallest dataframe if it is fractional
size_smallest = min([df[group].unique().shape[0] for df in dfs])
if train_size < 1:
n_train = int(size_smallest * train_size)
else:
n_train = train_size
if test_size < 1:
n_test = int(size_smallest * test_size)
else:
n_test = test_size
# Number of features is also defined by the smallest
if balance_feature_number:
n_feats = min([df.shape[1] for df in dfs]) - 2
# Method of cross-validation
if cv == 'kf':
sh = GroupKFold(n_splits=n_splits, **kwargs)
elif cv == 'sh':
sh = GroupShuffleSplit(n_splits=n_splits,
train_size=n_train, test_size=n_test, **kwargs)
elif isinstance(cv, object):
sh = cv
# Scaling
if feature_scaling is None:
pass
elif feature_scaling == 'minmax':
clf = Pipeline([('minmaxscaler', MinMaxScaler((-1, 1))),
('classifier', clf)])
elif feature_scaling == 'standard':
clf = Pipeline([('standardscaler', StandardScaler()),
('classifier', clf)])
elif feature_scaling == 'robust':
clf = Pipeline([('robustscaler', RobustScaler()),
('classifier', clf)])
else:
raise ValueError(
'%s scaling is not accepted.\n Lookup the documentation for accepted scalings' % feature_scaling)
# Define the results format
classes = pd.Index(np.unique(dfs[0][y]), name=y)
id_vars = ['cv',
'trained_on',
'tested_on',
'trained_here',
'n_features']
res = Results_shuffle_val(n_splits=n_splits,
train_size=n_train,
test_size=n_test,
scoring_metric=score,
classes=classes,
groups=dfs[0][group].unique(),
features=pd.Index(X, name='unit'),
id_vars=id_vars)
# Make the cross validation on each dataset
for traindf, name in zip(dfs, names):
if verbose > 0: print('\n-------\nDataset %s' % name)
for i, (train_idx, test_idx) in enumerate(sh.split(traindf[y], traindf[y], traindf[group])):
if verbose > 1: print(i, end=', ')
if get_weights:
shufX = X
elif balance_feature_number:
shufX = np.random.permutation(X[name])[:n_feats]
else:
shufX = X[name]
clf_local = clone(clf)
clf_local.fit(traindf[shufX].values[train_idx], traindf[y].values[train_idx])
if verbose >= 4:
print("Has %d features" % len(X[name]), end=', ')
print('now using %s' % shufX)
trained_here = True
testdf, testname, idx = traindf, name, test_idx
if hasattr(clf_local, 'predict_proba'):
probas = clf_local.predict_proba(testdf[shufX].values[idx])
else:
probas = clf_local.decision_function(testdf[shufX].values[idx])
true_labels = testdf[y].values[idx]
pred_groups = testdf[group].values[idx]
res.append_probas(probas, true_labels,
cv=i, trained_on=name,
tested_on=testname,
trained_here=trained_here,
groups=pred_groups,
n_features=len(shufX))
if get_weights:
res.append_weights(clf_local.coef_,
cv=i, trained_on=name,
tested_on=np.nan,
trained_here=np.nan)
if id_kwargs is not None:
res.add_identifiers(id_kwargs)
res.calculate_predictions()
res.compute_score()
# res.compute_stats()
return res
def shuffle_cross_predict(clf, dfs, names=None, X=None, y=None, group=None,
cv='sh', n_splits=5, feature_scaling=None,
train_size=.8, test_size=.2, problem='classification',
balance_feature_number=False,
get_weights=False, score=['pearson', 'accuracy'],
id_kwargs=None, verbose=0, **kwargs):
"""
Trains in each dataset, possibly testing on both, to calculate statistics
about generalization between datasets.
Parameters
----------
clf : sklearn classifier instance
The model which will be fitted and used to predict labels
dfs : DataFrame, or list of DataFrames
The data holders
names : list of strings, optional, default range
The ID variables of each DataFrame
If not given, will default to 0, 1, ..., len(dfs)-1
problem : char, default 'classification'
may be 'classification' or 'regression'
X, y, group : indices [str[, str] ], optional
The indices of each variable in the dataframes
If not given, will default to
X -> df columns
y -> second index name
group -> first index name
See notes
cv : str or callable, default 'sh'
The splitting method to use.
n_splits : int
Number of splits to be done.
feature_scaling : string
The kind of scaling to apply to features.
Implemented via pipeline (fitted only during training)
One of 'standard', 'minmax', 'robust'.
balance_feature_number : bool, default False
get_weights : bool
Whether to save and return the weights of each model
score : callable
function( true_label, pred_label ) -> number
Defaults to pearson's correlation
Keyword Arguments
-----------------
Extra kwargs will be passed on to the cv function
Notes
-----
While the number of features can differ in some cases,
the variables y and group must be the same for all dfs
Returns
-------
results : Results_shuffle_val
dataholder for this results.
consists in many dataframes for ease of access
proba, weights, predictions, scores, stats
See also
--------
Results_shuffle_val
"""
# Dealing with other optional variables
if names is None:
names = np.arange(len(dfs))
if type(dfs) == pd.DataFrame:
dfs, names = [dfs], [names]
print(names)
if X is None:
assert group == None and y == None
assert len(np.unique([len(df.columns) for df in dfs])) == 1
assert all([(df.columns == dfs[0].columns).all() for df in dfs])
X = dfs[0].columns
y = dfs[0].index.names[1]
group = dfs[0].index.names[0]
dfs = [df.reset_index() for df in dfs]
# Number of training and testing is defined by the smallest dataframe if it is fractional
size_smallest = min([df[group].unique().shape[0] for df in dfs])
if train_size < 1:
n_train = int(size_smallest * train_size)
else:
n_train = train_size
if test_size < 1:
n_test = int(size_smallest * test_size)
else:
n_test = test_size
# Number of features is also defined by the smallest
if balance_feature_number:
assert not cross_prediction
n_feats = min([df.shape[1] for df in dfs]) - 2
# Method of cross-validation
if cv == 'kf':
sh = GroupKFold(n_splits=n_splits, **kwargs)
elif cv == 'sh':
sh = GroupShuffleSplit(n_splits=n_splits,
train_size=n_train, test_size=n_test, **kwargs)
elif isinstance(cv, object):
sh = cv
# Scaling
if feature_scaling is None:
pass
elif feature_scaling == 'minmax':
clf = Pipeline([('minmaxscaler', MinMaxScaler((-1, 1))),
('classifier', clf)])
elif feature_scaling == 'standard':
clf = Pipeline([('standardscaler', StandardScaler()),
('classifier', clf)])
elif feature_scaling == 'robust':
clf = Pipeline([('robustscaler', RobustScaler()),
('classifier', clf)])
else:
raise ValueError(
'%s scaling is not accepted.\n Lookup the documentation for accepted scalings' % feature_scaling)
# Define the results format
classes = pd.Index(np.unique(dfs[0][y]), name=y)
id_vars = ['cv',
'trained_on',
'tested_on',
'trained_here',
'n_features']
res = Results_shuffle_val(n_splits=n_splits,
train_size=n_train,
test_size=n_test,
scoring_metric=score,
classes=classes,
groups=dfs[0][group].unique(),
features=pd.Index(X, name='unit'),
id_vars=id_vars)
# Make the cross validation on each dataset
for traindf, name in zip(dfs, names):
if verbose > 0: print('\n-------\nDataset %s' % name)
for i, (train_idx, test_idx) in enumerate(sh.split(traindf[y], traindf[y], traindf[group])):
if verbose > 1: print(i, end=', ')
clf_local = clone(clf)
print(type(traindf), type(X))
print(traindf.shape)
clf_local.fit(traindf[X].values[train_idx], traindf[y].values[train_idx])
# also test on each dataset
for testdf, testname in zip(dfs, names):
if testname == name:
trained_here = True
idx = test_idx
else:
trained_here = False
size = len(test_idx)
idx = permutation(testdf.shape[0])[:size]
true_labels = testdf[y].values[idx]
pred_groups = testdf[group].values[idx]
if problem == 'classification':
if hasattr(clf_local, 'predict_proba'):
probas = clf_local.predict_proba(testdf[X].values[idx])
else:
probas = clf_local.decision_function(testdf[X].values[idx])
res.append_probas(probas, true_labels,
cv=i, trained_on=name,
tested_on=testname,
trained_here=trained_here,
groups=pred_groups,
n_features=len(X))
elif problem == 'regression':
pred = clf_local.predict(testdf[X].values[idx])
res.append_pred(pred, true_labels,
cv=i, trained_on=name,
tested_on=testname,
trained_here=trained_here,
groups=pred_groups,
n_features=len(X))
if get_weights:
res.append_weights(clf_local.coef_,
cv=i, trained_on=name,
tested_on=np.nan,
trained_here=np.nan)
if id_kwargs is not None:
res.add_identifiers(id_kwargs)
if problem=='classification':
res.calculate_predictions()
res.compute_score()
return res
| UTF-8 | Python | false | false | 22,446 | py | 129 | shuffle_decoding.py | 70 | 0.550343 | 0.547492 | 0 | 633 | 34.459716 | 117 |
PhysicsOfMobility/ridesharing_topology_dependence | 18,743,237,292,516 | 030b3d1b4ceb0341f1ab9597689021a2e024423e | 67c0b3b04b0929a5f6f36b90707a28d35a67cbf3 | /toysimulations/test_simulator.py | 84d823b8541e4d3506284b2b4ae98b5f6773c280 | [
"BSD-3-Clause"
]
| permissive | https://github.com/PhysicsOfMobility/ridesharing_topology_dependence | 2c93b322a69d0dbe056216d578892c45fcc82720 | 43e78ecd2a23bbe9bcdfc5fe86547dd4662d56d5 | refs/heads/master | 2022-07-28T18:02:20.673591 | 2021-08-23T20:35:52 | 2021-10-12T09:18:09 | 235,116,234 | 1 | 2 | BSD-3-Clause | false | 2022-06-22T00:51:35 | 2020-01-20T14:10:02 | 2022-06-10T09:14:04 | 2022-06-22T00:51:34 | 2,839 | 0 | 0 | 3 | Jupyter Notebook | false | false | import unittest
import networkx as nx
from toysimulations import ZeroDetourBus, Stop, Request
class TestZeroDetourBus(unittest.TestCase):
def setUp(self):
G = nx.cycle_graph(10)
self.bus = ZeroDetourBus(G, req_gen=None, initpos=0)
def test_interpolation(self):
ground_truths = [#curtime, orig, dest, started_at, interp, rem_time
[2, 0, 3, 0, 2, 0],
[1.9, 0, 3, 0, 2, 0.1],
[3.1, 0, 3, 0, 3, 0],
]
for ground_truth in ground_truths:
with self.subTest(ground_truth=ground_truth):
curtime, started_from, going_to, started_at, true_pos,\
true_remaining_time = ground_truth
got_pos, got_rem_time = self.bus.interpolate(
curtime, started_from, going_to, started_at)
self.assertEqual(got_pos, true_pos)
self.assertAlmostEqual(got_rem_time, true_remaining_time)
def load_generator_from_list(self, req_list):
for req_idx, (req_time, origin, dest) in enumerate(req_list):
yield Request(req_idx, req_time, origin, dest)
def test_no_simultaneity_2_requests(self):
# req list is a 3 tuple:
# (time_of_req, origin, dest)
req_list = [(0.2, 1, 3),
(0.8, 2, 4)]
self.bus.req_gen = self.load_generator_from_list(req_list)
self.bus.simulate_all_requests()
output = self.bus.req_data
self.assertDictEqual(
output,
{0: {'req_epoch': 0.2, 'origin': 1, 'destination': 3,
'pickup_epoch': 1.2, 'dropoff_epoch': 3.2},
1: {'req_epoch': 0.8, 'origin': 2, 'destination': 4,
'pickup_epoch': 2.2, 'dropoff_epoch': 4.2},
}
)
def test_simultaneity_2_requests(self):
# req list is a 3 tuple:
# (epoch_of_req, origin, dest)
req_list = [(0.2, 1, 3),
(1.2, 2, 4)]
self.bus.req_gen = self.load_generator_from_list(req_list)
self.bus.simulate_all_requests()
output = self.bus.req_data
self.assertDictEqual(
output,
{0: {'req_epoch': 0.2, 'origin': 1, 'destination': 3,
'pickup_epoch': 1.2, 'dropoff_epoch': 3.2},
1: {'req_epoch': 1.2, 'origin': 2, 'destination': 4,
'pickup_epoch': 2.2, 'dropoff_epoch': 4.2},
}
)
def test_long_involved_test(self):
# req list is a 3 tuple:
# (epoch_of_req, origin, dest)
req_list = [(0.2, 1, 3),
(1.2, 2, 4),
(5, 4, 7), # insertion after idle
(7.8, 6, 4)]
self.bus.req_gen = self.load_generator_from_list(req_list)
self.bus.simulate_all_requests()
output = self.bus.req_data
# prune unnecesssary data from output. we want to match
# only pickup epoch and dropoff-epoch
only_epochs = {req_idx: {'pickup_epoch': data['pickup_epoch'],
'dropoff_epoch': data['dropoff_epoch']}
for req_idx, data in output.items()}
self.assertDictEqual(
only_epochs,
{0: {'pickup_epoch': 1.2, 'dropoff_epoch': 3.2},
1: {'pickup_epoch': 2.2, 'dropoff_epoch': 4.2},
2: {'pickup_epoch': 5, 'dropoff_epoch': 8},
3: {'pickup_epoch': 9, 'dropoff_epoch': 11},
}
)
def test_long_involved_test_inbetween_insert(self):
# req list is a 3 tuple:
# (epoch_of_req, origin, dest)
req_list = [(0.2, 1, 3),
(1.2, 2, 4),
(5, 4, 8), # insertion after idle
(5.2, 6, 7)]
self.bus.req_gen = self.load_generator_from_list(req_list)
self.bus.simulate_all_requests()
output = self.bus.req_data
# prune unnecesssary data from output. we want to match
# only pickup epoch and dropoff-epoch
only_epochs = {req_idx: {'pickup_epoch': data['pickup_epoch'],
'dropoff_epoch': data['dropoff_epoch']}
for req_idx, data in output.items()}
self.assertDictEqual(
only_epochs,
{0: {'pickup_epoch': 1.2, 'dropoff_epoch': 3.2},
1: {'pickup_epoch': 2.2, 'dropoff_epoch': 4.2},
2: {'pickup_epoch': 5, 'dropoff_epoch': 9},
3: {'pickup_epoch': 7, 'dropoff_epoch': 8},
}
)
def test_volume_comp(self):
# req list is a 3 tuple:
# (epoch_of_req, origin, dest)
req_list = [(0.2, 1, 3),
(1.2, 2, 4),
(5, 4, 8), # insertion after idle
(5.2, 6, 7)]
self.bus.req_gen = self.load_generator_from_list(req_list)
self.bus.simulate_all_requests()
output = self.bus.insertion_data
# prune unnecesssary data from output. we want to match
# only pickup epoch and dropoff-epoch
time_len_vol = [(row[0], row[1], row[2]) for row in output]
self.assertListEqual(time_len_vol,
[(0.2, 1, 4), # cpe is counted, but length *before* insertion
(1.2, 2, 4),
(5, 1, 5),
(6, 2, 4) # 6 because jump
]
) | UTF-8 | Python | false | false | 5,559 | py | 20 | test_simulator.py | 9 | 0.487498 | 0.455298 | 0 | 135 | 40.185185 | 90 |
vhsw/CodeMasters_Tourney | 7,275,674,600,794 | cedd4566df1dd7e40af2ec8d0642d3382294b4bf | 9a43d3cc69dc218436d1184e8cab3a4918a72c20 | /Python 3/candies.py | 22230855eda2aa5d45d938399fc464a7ffe9e1f6 | []
| no_license | https://github.com/vhsw/CodeMasters_Tourney | aa584b2fe9b3c1aab939db6fcc811d9e0970e7be | 3c1207e2b76d91cd1eb743a16dd967794a59b60a | refs/heads/master | 2020-03-17T17:34:45.904107 | 2018-05-23T09:29:29 | 2018-05-23T09:29:29 | 133,793,828 | 0 | 0 | null | false | 2018-05-22T11:58:19 | 2018-05-17T09:53:58 | 2018-05-22T11:53:02 | 2018-05-22T11:58:19 | 56 | 0 | 0 | 0 | Python | false | null | # n children have got m pieces of candy.
# They want to eat as much candy as they can,
# but each child must eat exactly the same amount of candy as any other child.
# Determine how many pieces of candy will be eaten by all the children together.
# Individual pieces of candy cannot be split.
def candies(n, m) :
return m // n * n
| UTF-8 | Python | false | false | 336 | py | 130 | candies.py | 130 | 0.729167 | 0.729167 | 0 | 9 | 36.333333 | 80 |
vinissimus/opencoverage | 18,038,862,643,366 | 7fedd02ca2b5e9af3ee2b971b8b89390d349e9a7 | 660f6959397989922bd63ceecde45c0e049a0972 | /tests/utils.py | 79aa3182d1c90db628017812e0cff004f4da8f03 | [
"MIT"
]
| permissive | https://github.com/vinissimus/opencoverage | 32d681da899b0ad6b9b6d1b048174d4a5706a73f | 38f7ecda318135925f45dbad7465c5fae84ed54d | refs/heads/master | 2023-02-20T03:47:26.170714 | 2021-01-22T11:43:40 | 2021-01-22T11:43:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from opencoverage.parser import parse_raw_coverage_data
_data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
def read_data(name):
with open(os.path.join(_data_dir, name), "rb") as fi:
return fi.read()
async def add_coverage(db, organization, repo, branch, commit, coverage):
coverage = parse_raw_coverage_data(read_data(coverage))
await db.save_coverage(
organization=organization,
repo=repo,
branch=branch,
commit_hash=commit,
coverage=coverage,
)
| UTF-8 | Python | false | false | 554 | py | 11 | utils.py | 7 | 0.66065 | 0.66065 | 0 | 21 | 25.380952 | 77 |
jflondonog/ProyectoIntegrador1 | 9,277,129,366,200 | 004eebf40b533cfb6550c06d2ad07c2834aed740 | 31f4acb23698834e0cbbb397aaabb90d3b60e080 | /EverGreen.py | 8fe93c2e331d7dfc527a8ae689e907f7710a443c | []
| no_license | https://github.com/jflondonog/ProyectoIntegrador1 | 371bf9a40af05da6ffc32a859882240832b4161f | 29f1fd4430c814b12ec39a7d28533eab442bc503 | refs/heads/master | 2020-07-08T12:00:23.775515 | 2019-08-22T00:00:13 | 2019-08-22T00:00:13 | 203,666,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, jsonify, request
from flask_cors import CORS
from datetime import datetime
import statistics as stats
app = Flask(__name__)
CORS(app)
tipo_medicion = { 'sensor' : 'DS18B20', 'variable' : 'Temperatura', 'unidades' : 'Centigrados'}
mediciones = [
{'fecha' : '2019-08-20 15:38:43', **tipo_medicion, 'valor' : 100},
{'fecha' : '2019-08-20 15:40:56', **tipo_medicion, 'valor' : 98},
{'fecha' : '2019-08-20 15:41:16', **tipo_medicion, 'valor' : 101},
{'fecha' : '2019-08-20 15:43:18', **tipo_medicion, 'valor' : 101},
{'fecha' : '2019-08-20 15:46:16', **tipo_medicion, 'valor' : 98},
{'fecha' : '2019-08-20 15:50:20', **tipo_medicion, 'valor' : 98},
{'fecha' : '2019-08-20 16:01:16', **tipo_medicion, 'valor' : 99},
{'fecha' : '2019-08-20 16:03:18', **tipo_medicion, 'valor' : 97},
{'fecha' : '2019-08-20 16:08:20', **tipo_medicion, 'valor' : 97},
{'fecha' : '2019-08-21 16:14:16', **tipo_medicion, 'valor' : 97},
{'fecha' : '2019-08-21 16:23:18', **tipo_medicion, 'valor' : 97},
{'fecha' : '2019-08-21 16:26:18', **tipo_medicion, 'valor' : 96}
]
@app.route("/")
def get():
return jsonify(tipo_medicion)
@app.route('/mediciones', methods = ['GET'])
def getAll():
return jsonify(mediciones)
@app.route('/mediciones', methods = ['POST'])
def postOne():
now = datetime.now()
body = request.json
body['fecha'] = datetime.strftime(now, '%Y-%m-%d %H:%M:%S')
mediciones.append({**body, **tipo_medicion})
return jsonify(mediciones)
"""
@app.route('mediciones/<string:fecha>', methods=['DELETE'])
def deleteOne(fecha):
x = False
for medicion in mediciones:
if (fecha in medicion['fecha']):
x = True
mediciones.remove(medicion)
return 'Eliminado' if x else "No Encontrado"
@app.route('/mediciones/<string:fecha>', methods=['PUT'])
def putOne(fecha):
body = request.json
x = False
for medicion in mediciones:
if(fecha in medicion['fecha']):
x = True
medicion['valor'] = body['valor']
return 'Modificado' if x else 'No Encontrado'
"""
@app.route('/mediciones/moda', methods = ['GET'])
def getModa():
moda = []
x = 0
for medicion in mediciones:
moda.append(medicion['valor'])
x = stats.mode(moda)
return jsonify(x)
app.run(port=5000, debug=True)
| UTF-8 | Python | false | false | 2,471 | py | 2 | EverGreen.py | 1 | 0.571429 | 0.488871 | 0 | 79 | 29.227848 | 95 |
alexp25/wdn-model-experiments | 11,269,994,211,226 | bd8aaa4b8b4423dfeb728b4e35fbadbef2a024f8 | 0cf5412f69633a98d0e7b93fa58ea5a86b050c85 | /eval_results_cross_check.py | 6b16604836cb2e90e2088051e48acfc3577751d5 | []
| no_license | https://github.com/alexp25/wdn-model-experiments | ec46bae7e88e4c658116d1fde6a6a4ffd904a516 | 26031c10561743bd0dfd019f6c3aa9c9673c1621 | refs/heads/master | 2023-04-13T08:37:13.954049 | 2020-07-01T17:12:46 | 2020-07-01T17:12:46 | 248,577,407 | 0 | 0 | null | false | 2023-03-25T00:06:02 | 2020-03-19T18:33:33 | 2020-07-01T17:13:01 | 2023-03-25T00:05:58 | 121,900 | 0 | 0 | 1 | Python | false | false | from modules import graph
from modules.graph import Timeseries, CMapMatrixElement
import numpy as np
from os import listdir
from os.path import isfile, join
import json
from typing import List
import yaml
# import copy
with open("config.yml", "r") as f:
config = yaml.load(f)
elements: List[CMapMatrixElement] = []
rowsdict = {}
colsdict = {}
mode = "deep_1"
mode = "deep_2_rnn"
# mode = "dtree_1"
# mode = "dtree_2_multioutput"
# mode = "svm_1"
# mode = "naive_bayes_1"
mode2 = "train"
mode2 = "test"
input_file = "./data/selected/output/cross_check_" + mode + "_" + mode2 + ".csv"
with open(input_file, "r") as f:
content = f.read().split("\n")
for line in content:
spec = line.split(",")
if len(spec) > 1:
print(spec)
e = CMapMatrixElement()
e.i = int(spec[0])-1
e.j = int(spec[1])-1
e.val = float(spec[4])
elements.append(e)
if spec[0] not in rowsdict:
rowsdict[spec[0]] = True
if spec[0] not in colsdict:
colsdict[spec[0]] = True
print(elements)
labels = ["1-N-80", "1-N-1-80", "1-N-1-50", "GRAY-80"]
labels = ["A", "B", "C", "ABC"]
xlabels = list(rowsdict)
ylabels = list(colsdict)
xlabels = ylabels = labels
xsize = len(rowsdict)
ysize = len(colsdict)
print(xsize)
print(ysize)
# intersection_matrix = np.random.randint(0, 10, size=(max_val, max_val))
intersection_matrix = np.zeros((xsize, ysize))
# print(intersection_matrix)
avg = 0
count = 0
for e in elements:
intersection_matrix[e.i][e.j] = e.val
# if e.val > 0:
avg += e.val
count += 1
print(intersection_matrix)
avg /= count
# avg = np.mean(intersection_matrix)
print(avg)
# quit()
fig = graph.plot_matrix_cmap(elements, len(rowsdict), len(
colsdict), "Model accuracy cross-validation (" + mode2 + ")", "dataset", "model", xlabels, ylabels, (70, 100))
graph.save_figure(fig, "./figs/accuracy_cross_check_" + mode + "_" + mode2)
| UTF-8 | Python | false | false | 1,999 | py | 116 | eval_results_cross_check.py | 28 | 0.605803 | 0.582791 | 0 | 96 | 19.822917 | 114 |
SummerZm/LeafxuTool | 17,892,833,761,272 | 441843cb0cb9ad899126c94db30670858d45567c | 4adab20021c687380f89f125d40f2469c93d4b7b | /Python/Excel/read.py | 4fe03f5e2af518132ff3ece6b10d2678b1044390 | []
| no_license | https://github.com/SummerZm/LeafxuTool | 9d8009a93578e713074946b1dd073272cc021f08 | f162da13ef1666b2d1b74291ab36bb0c20a2da88 | refs/heads/master | 2021-11-23T05:38:41.016030 | 2021-11-12T09:33:13 | 2021-11-12T09:33:13 | 221,664,883 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import xdrlib ,sys
import xlrd
#打开excel文件
def open_excel(file= 'test.xlsx'):
data = xlrd.open_workbook(file)
return data
#根据名称获取Excel表格中的数据 参数:file:Excel文件路径 colnameindex:表头列名所在行的索引 ,by_name:Sheet1名称
def excel_table_byname(file='C:/Users/LB/Desktop/Python/test.xlsx', colnameindex=0, by_name=u'Sheet1'):
data = open_excel(file) #打开excel文件
table = data.sheet_by_name(by_name) #根据sheet名字来获取excel中的sheet
nrows = table.nrows #行数
colnames = table.row_values(colnameindex) #某一行数据
list =[] #装读取结果的序列
for rownum in range(0, nrows): #遍历每一行的内容
row = table.row_values(rownum) #根据行号获取行
if row: #如果行存在
app = [] #一行的内容
for i in range(len(colnames)): #一列列地读取行的内容
app.append(row[i])
list.append(app) #装载数据
return list
#主函数
def main():
tables = excel_table_byname()
for row in tables:
print(row)
if __name__=="__main__":
main()
input() | UTF-8 | Python | false | false | 1,229 | py | 38 | read.py | 5 | 0.614018 | 0.610069 | 0 | 36 | 26.194444 | 103 |
PhonieZ/coding_things | 7,241,314,898,845 | f16b3cce9a188a1628b6111a7dc647976f2d7b3b | cfd4fbc65f63ca209ebd969c6c502fbc6643d8b2 | /Year 9 Computing Code/Spring Exam Things/roll thing.py | e78612bb45477c1fa583e8d7230f5d4f4fe7c87b | [
"MIT"
]
| permissive | https://github.com/PhonieZ/coding_things | e03f5dff307365a617fa6041e12e455309d1f644 | 38e2148a2f607c7793efaa609731d1c066f0be00 | refs/heads/main | 2023-06-12T17:22:44.617329 | 2022-08-03T09:54:31 | 2022-08-03T09:54:31 | 383,204,236 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
roll_total=1
roll=random.randint(1,6)
while roll != 6:
print("You rolled a "+str(roll))
roll_total = roll_total+1
roll = random.randint(1,6)
if roll_total==1:
print("It took you "+str(roll_total)+" roll until you rolled a 6")
else:
print("It took you "+str(roll_total)+" rolls until you rolled a 6")
| UTF-8 | Python | false | false | 334 | py | 96 | roll thing.py | 77 | 0.658683 | 0.628743 | 0 | 11 | 29.363636 | 71 |
mehdibenamorr/entitykb | 13,546,326,889,070 | 61cc2057cf48edd59512276583223a69614648cb | f3248eb4020f60590443778df0c2148cad730445 | /src/entitykb/contrib/email/__init__.py | bb8bdb2dbf47c82d322b8e12098b3f420fdab407 | [
"MIT"
]
| permissive | https://github.com/mehdibenamorr/entitykb | 1b380a94df333253fd9e19653fe1d4f3f9400d1e | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | refs/heads/master | 2023-06-28T02:31:40.074794 | 2021-07-28T14:35:54 | 2021-07-28T14:35:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .model import Email
from .resolvers import EmailResolver
__all__ = ("Email", "EmailResolver")
| UTF-8 | Python | false | false | 100 | py | 153 | __init__.py | 112 | 0.73 | 0.73 | 0 | 4 | 24 | 36 |
SarthakU/DailyProgrammer | 18,932,215,847,912 | 7f6551bd52ea6b3c016843d5a6dc67955554f468 | dff48edafc9562dee11691d7e6c89179c480be54 | /Python/Daily044_difficult/PrimeNumberThingy.py | 0d37670e9bb6bb938f98f8b908400dd3f59c6810 | []
| no_license | https://github.com/SarthakU/DailyProgrammer | 62feb30d3df9ef9e6ca333dda1ccff8794c6e48d | 00e655ef5621f0bce5ebd9de129b1983824271e1 | refs/heads/master | 2023-08-18T21:31:45.147102 | 2023-08-17T21:00:00 | 2023-08-17T21:00:00 | 25,544,739 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## PRIME NUMBER THINGY
##
## challenge #44 (difficult)
## http://www.reddit.com/r/dailyprogrammer/comments/srp5q/4252012_challenge_44_difficult/
##
##
## sarthak7u@gmail.com
##
from sys import argv
lower_limit = int(argv[1]);upper_limit = int(argv[2]) + lower_limit
prime_list = []
prime_sum = 0
count = 0
for i in xrange(lower_limit,upper_limit):
is_prime = True
if i % 2 == 0 or i % 3 == 0 or i % 5 == 0 or i % 7 == 0:
is_prime = False
else:
for j in xrange(11, i, 2):
if i % j == 0:
is_prime = False
break
count += 1
if is_prime == True:
prime_list.append(i)
prime_sum += i
print ""
print len(prime_list), "Prime numbers between", argv[1], "and", argv[1] + argv[2], "are :"
print ""
print prime_list
print ""
print "Sum of these numbers is", prime_sum
| UTF-8 | Python | false | false | 860 | py | 155 | PrimeNumberThingy.py | 87 | 0.572093 | 0.533721 | 0 | 33 | 25.060606 | 90 |
dr-dos-ok/Code_Jam_Webscraper | 317,827,589,133 | 8a7593fbb595651a5f165899d0774c66826ec7e5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/868.py | 5a482ed92b9707937e84ab5069efd79872cd3e9a | []
| no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from math import floor, ceil
MAX_POSSIBLE_PANCAKES = 1000
pancake_holder = [0 for i in range(MAX_POSSIBLE_PANCAKES+1)]
class Diner:
__slots__ = ["eaters", "max_pancake"]
def __init__(self, num_eaters, eaters):
self.eaters = pancake_holder[:]
self.max_pancake = 0
for plate in list(map(int, eaters.split(" "))):
self.eaters[plate] += 1
if plate > self.max_pancake:
self.max_pancake = plate
def simulate_splits(original, hiding_spot, max_pancakes):
eaters = original[:]
splits = 0
splitting = True
time_needed = 0
while splitting:
splitting = False
for j in reversed(range(hiding_spot + 1, max_pancakes + 1)):
if eaters[j] > 0:
# decrement
eaters[j] -= 1
# distribute:
eaters[j - hiding_spot] += 1
eaters[hiding_spot] += 1
splitting = True
splits += 1
break
if max_pancakes > hiding_spot:
return hiding_spot + splits
else:
for j in reversed(range(max_pancakes + 1)):
if eaters[j] > 0:
time_needed = splits + j
break
return hiding_spot + splits
def solve_diner(diner):
# find worst plate (the one to split):
max_pancake = diner.max_pancake
# num splits:
min_time = max_pancake
for hiding_spot in range(1, max_pancake + 1):
min_time = min(
simulate_splits(
diner.eaters,
hiding_spot,
max_pancake),
min_time)
return min_time
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage %s [filename]" % (sys.argv[0]))
exit(1)
filename = sys.argv[1]
read_file = open(filename, "rt").read()
number_of_sols, diners = read_file.split("\n", 1)
diner_sols = []
number_of_sols = int(number_of_sols)
diners = diners.split("\n")
number_of_lines_to_read = 2 * number_of_sols
line_number = 0
problem_number = 1
while line_number < number_of_lines_to_read:
print("Case #%d: %d" % (problem_number, solve_diner(
Diner(
diners[line_number],
diners[line_number+1]
)
)
)
)
problem_number += 1
line_number += 2
| UTF-8 | Python | false | false | 2,463 | py | 60,747 | 868.py | 60,742 | 0.507105 | 0.494113 | 0 | 84 | 28.321429 | 68 |
fyxcc/seleniumPython | 13,134,010,028,511 | 0bcfe6ca5e53cca0f0e8e141d382420783b7c028 | 073692946ee7f532f5d5e5162a0550cbb60810b5 | /case/login_ddt_case.py | 0b7008af21f57b9b26921772409787da0061b31c | []
| no_license | https://github.com/fyxcc/seleniumPython | 9dba3a6a02b2121a580be28ad6a382aba5150839 | 57e20f98d6d8faf595b8eeb105b3b355b9dba7bb | refs/heads/master | 2021-05-18T11:37:30.192541 | 2020-07-03T03:10:56 | 2020-07-03T03:10:56 | 251,228,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import sys
sys.path.append('D:/pythonWork/autoTest')
import ddt
import unittest
import time
import os
import HTMLTestRunner
from selenium import webdriver
from business.login_business import LoginBusiness
from util.excel_util import ExcelUtil
# 获取数据
ex = ExcelUtil(excel_path=r"D:\pythonWork\autoTest\data\loginDdtData.xls")
data = ex.get_data()
# 测试类前加修饰@ddt.ddt
@ddt.ddt
# 用户名,密码,验证码,错误信息定位元素,错误提示信息
class LoginDdtCase(unittest.TestCase):
# 所有case执行之前的装饰器---前置条件
@classmethod
def setUpClass(cls):
print('所有case执行的前置条件')
cls.login_url = 'http://localhost:9090/exam-place/login'
cls.driver = webdriver.Firefox()
cls.driver.get(cls.login_url)
cls.driver.maximize_window()
cls.lb = LoginBusiness(cls.driver)
# 所有case执行之后的后置条件
@classmethod
def tearDownClass(cls):
print('所有case执行的后置条件')
cls.driver.close()
# 每一条case执行之前的前置条件
def setUp(self):
print('每一条case执行前的前置条件')
# self.login_url = 'http://localhost:9090/exam-place/login'
# self.driver = webdriver.Chrome()
# self.driver.get(self.login_url)
# self.driver.maximize_window()
# self.lb = LoginBusiness(self.driver)
# 每一条case执行之后的后置条件
def tearDown(self):
print('每一条case执行之后的后置条件')
# case执行失败进行截图
for method_name, error in self._outcome.errors:
if error:
# 获取当前执行的case名字
case_name = self._testMethodName
# 设置失败截图存储路径
file_path = os.path.join(os.path.pardir + "/report/" + case_name + ".png")
self.driver.save_screenshot(file_path)
self.driver.refresh()
# self.driver.close()
# case前加修饰 @ ddt.data()
@ddt.data(*data)
# 执行用例,并判断是否执行成功
def test_login_case(self, data):
username, password, file_name, assertCode, assertText = data
login_error = self.lb.login_function(username, password, file_name, assertCode, assertText)
if len(assertCode) != 0:
self.assertTrue(login_error, "账号登录成功,该用例执行失败")
if __name__ == "__main__":
# 报告存放路径
# fire_path = os.path.join(os.path.pardir + "/report/" + "login_ddt_case.html")
fire_path = r"D:\pythonWork\autoTest/report/first_case.html"
f = open(fire_path, 'wb')
# 添加测试用例
suite = unittest.TestLoader().loadTestsFromTestCase(LoginDdtCase)
# 测试结果以报告显示
runner = HTMLTestRunner.HTMLTestRunner(stream=f, title='this is the first ddt report',
description=u'这是我们登录模块数据驱动测试报告',
verbosity=2)
runner.run(suite)
| UTF-8 | Python | false | false | 3,113 | py | 51 | login_ddt_case.py | 48 | 0.624672 | 0.620547 | 0 | 88 | 29.306818 | 99 |
jdad24/Computer-Vision---Hand-Tracking | 9,887,014,747,482 | fa86833253a0dce92986596115b39caa8688289c | dbe8f2f550c3cdf29e1446de33f21c1742023258 | /handdetector.py | cfa052ec6c1f64f7dc6c470ed76b2b8a97e4bd71 | []
| no_license | https://github.com/jdad24/Computer-Vision---Hand-Tracking | 84fbdc3b53b5f1255f59a11617ec3b3146be0b3b | 4b549b15c2f6cb3fa878912572c6bcad1cfd6c68 | refs/heads/master | 2020-06-18T23:59:28.945436 | 2019-07-12T02:56:41 | 2019-07-12T02:56:41 | 196,498,254 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Import OpenCV
import cv2
# Will perform hand detection
class HandDetector:
def __init__(self, handCascadePath):
# Loading the hand detector classifier into memory
self.handCascade = cv2.CascadeClassifier(handCascadePath)
def detect(self, image, scaleFactor = 1.1, minNeighbors = 5, minSize = (30, 30)):
# Detects hands in the image
rects = self.handCascade.detectMultiScale(image,
scaleFactor = scaleFactor, minNeighbors = minNeighbors,
minSize = minSize, flags = cv2.CASCADE_SCALE_IMAGE)
# Returns the bounding boxes
return rects | UTF-8 | Python | false | false | 557 | py | 3 | handdetector.py | 2 | 0.748654 | 0.7307 | 0 | 18 | 30 | 82 |
wj2016/py3 | 5,909,875,009,908 | 6d01aa57b658a90aebe797a582845dbb5267bc79 | 1901457fdd8eeb8ec99eb0d0a90977d684cb46f4 | /NetworkProgramming/simple_tcp_client.py | 4b49d9957f3978bed688fecda09574fcdea66a8f | []
| no_license | https://github.com/wj2016/py3 | 6730ab7d9e6d72dbd4d698c500d4320e779698ea | f4cd6aeb98560d797ad4b19ad2a508812f7d20cf | refs/heads/master | 2018-02-07T14:20:40.143725 | 2017-09-07T19:16:27 | 2017-09-07T19:16:27 | 95,907,659 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
def run_simple_tcp_client():
try:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = ("127.0.0.1", 5050)
mysocket.connect(addr)
mysocket.sendall(b"Hello, simple TCP client\r\n")
except ConnectionRefusedError:
print("Can not connect to server, is server running?")
except TypeError:
print("Socket can only send bytes, not str")
except socket.error:
print(f"Error: {socket.errno}")
finally:
print("DONE")
mysocket.close()
if __name__ == '__main__':
run_simple_tcp_client() | UTF-8 | Python | false | false | 599 | py | 94 | simple_tcp_client.py | 82 | 0.614357 | 0.597663 | 0 | 20 | 29 | 68 |
ozcnsimge/physiological-multi-emorec | 6,047,313,982,388 | 91274e42c3b6c9168207684c562044536df8110f | 1392f314b3609ed27af3e2a8f0afe97d217b1b3f | /classifier/fcn.py | aa49de7f6a550f055a78d728e3f7d3160e2352f7 | []
| no_license | https://github.com/ozcnsimge/physiological-multi-emorec | 3683ab406d2a190d2ef7ea94b8e0b7606712c6b5 | 7a115442adeeb4bcc9a188a77cb672e9f8330b9c | refs/heads/master | 2023-08-11T09:26:56.455925 | 2021-09-11T12:45:52 | 2021-09-11T12:45:52 | 403,412,302 | 8 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import keras
from classifier.classifier import get_multipliers, Classifier, focal_loss
class ClassifierFcn(Classifier):
def build_model(self, input_shapes, nb_classes, hyperparameters):
input_layers = []
channel_outputs = []
drop_rate = .25
l2_lambda = .001
filters_multipliers, kernel_size_multipliers = get_multipliers(len(input_shapes), hyperparameters)
for channel_id, input_shape in enumerate(input_shapes):
input_layer = keras.layers.Input(input_shape)
input_layers.append(input_layer)
conv1 = keras.layers.Conv1D(filters=int(filters_multipliers[channel_id] * 128),
kernel_size=int(kernel_size_multipliers[channel_id] * 8), padding='same')(
input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.Activation(activation='relu')(conv1)
conv2 = keras.layers.Conv1D(filters=int(filters_multipliers[channel_id] * 256),
kernel_size=int(kernel_size_multipliers[channel_id] * 5), padding='same')(
conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv3 = keras.layers.Conv1D(filters=int(filters_multipliers[channel_id] * 128),
kernel_size=int(kernel_size_multipliers[channel_id] * 3), padding='same')(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
#gap_layer = keras.layers.GlobalAveragePooling1D()(conv3)
channel_out = keras.layers.Flatten()(conv3)
channel_outputs.append(channel_out)
x = keras.layers.concatenate(channel_outputs, axis=-1) if len(channel_outputs) > 1 else channel_outputs[0]
x = keras.layers.Dropout(drop_rate)(x)
x = keras.layers.Dense(64, activation="relu", kernel_regularizer=keras.regularizers.l2(l2_lambda))(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Dropout(drop_rate)(x)
x = keras.layers.Dense(128, activation="relu", kernel_regularizer=keras.regularizers.l2(l2_lambda))(x)
x = keras.layers.Dropout(drop_rate)(x)
output_layer = keras.layers.Dense(nb_classes, activation='softmax')(x)
model = keras.models.Model(inputs=input_layers, outputs=output_layer)
#uncomment to use focal loss
#model.compile(loss=[focal_loss(alpha=.25, gamma=2)], optimizer=self.get_optimizer(), metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer=self.get_optimizer(), metrics=['accuracy'])
return model
| UTF-8 | Python | false | false | 2,789 | py | 31 | fcn.py | 27 | 0.629258 | 0.609179 | 0 | 57 | 47.929825 | 120 |
Epic1121/Comp-Sci-Coursework | 12,309,376,292,638 | 2d89586647c9a3cf6aff18bcc71a9927bbde8be4 | 82acee8bad192bc47b528270ee65c7b862ee30e3 | /CPCW Flask/flaskr/blog.py | 9ebf59194f425fe3a747b68972fb9e6b34991b77 | []
| no_license | https://github.com/Epic1121/Comp-Sci-Coursework | 5410852b43a7864cd890aa538a88f1dabcc81c93 | 61f9a230c4851a6f6e4ea03e4abf082e7b45a624 | refs/heads/master | 2023-04-28T18:33:31.624287 | 2021-05-21T10:55:17 | 2021-05-21T10:55:17 | 348,408,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
from flaskr import model
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
"""
finds all posts, and renders them on index.html
:return: template for index.html
"""
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
"""
This function oversees the creation of a blog post, using create.html
:return: template for create.html
"""
if request.method == 'POST':
title = request.form['title']
body = model.work(title)
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
"""
This function finds a post based on the post id provided, and checks the author id against the tweet id
:param id: id of the post to find
:param check_author: author of the post
:return: the post
"""
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
"""
Updates an existing post, using update.html
:param id: the id of the post to be updated
:return: the template for the updated post
"""
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = model.work(title)
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
"""
` deletes a post based on the post id provided
:param id: the id of the post to be deleted
:return: a redirect to /index
"""
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
| UTF-8 | Python | false | false | 3,351 | py | 4 | blog.py | 2 | 0.561325 | 0.559236 | 0 | 135 | 23.822222 | 107 |
AdilAdilovich/work-with-strings | 25,769,834,748 | 5ce0792ae62aeded7dec554b0068d2f7e1c4a214 | c76ed30aa19b9bce9d2b6c0da07e2dec8f47b2ce | /Main.py | 1cac7feb73370b1a880244333731b074cabb1e53 | []
| no_license | https://github.com/AdilAdilovich/work-with-strings | 81355366213d8b5ea32e81ef9acf24a57863f9b7 | c849265d97b0f5bff67e310dd47b96cc3c9d1161 | refs/heads/master | 2020-11-27T20:06:47.485607 | 2019-12-22T15:10:05 | 2019-12-22T15:10:05 | 229,586,206 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# coding=utf-8
import re
import sys
from PyQt5 import QtGui
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUi
class Main(QDialog):
def __init__(self):
super(Main, self).__init__()
loadUi('uis/main.ui', self)
self.setWindowTitle('Работа со строками в Python')
self.setWindowIcon(QtGui.QIcon('images/logo.png'))
self.btn_solve.clicked.connect(self.solve)
self.btn_clear.clicked.connect(self.clear)
def solve(self):
text = self.textEdit_text.toPlainText().strip().replace(',', '').replace('.', '') # получаем наш текст
if text == "":
self.textEdit_words.insertPlainText("Нет текста")
else:
def ret(i):
return i[1]
in_text = text.replace('\n', ' ').split(' ')
popular_words = [[in_text[0], 1]]
for index, item in enumerate(in_text):
bool = False
if (index == 0):
continue
for elem in popular_words:
if elem[0].lower() == item.lower():
elem[1] += 1
bool = True
break
if(bool == False):
popular_words.append([item, 1])
popular_words.sort(key=lambda i: i[0], reverse=1)
self.textEdit_words.clear()
if len(popular_words) > 5:
for i in range(5):
self.textEdit_words.insertPlainText(str(popular_words[i][0]) + " " + str(popular_words[i][1]) + "\n")
else:
for item in popular_words:
self.textEdit_words.insertPlainText(str(item[0]) + " " + str(item[1]) + "\n")
def clear(self):
self.textEdit_text.clear()
self.textEdit_words.clear()
def main():
app = QApplication(sys.argv)
window = Main()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,050 | py | 3 | Main.py | 1 | 0.508964 | 0.498506 | 0 | 67 | 28.970149 | 121 |
tezmen/yametrika-api | 9,440,338,140,430 | 19d93e51abe72c4add63f0aa6fe8cb71c3ea34b5 | e2a39876afefa50275642a52409bb0135f3ccaad | /yametrikapy/tests/test_yametrikapy.py | d614e96a200e78a354266b40a2499e7a31579fc6 | [
"MIT"
]
| permissive | https://github.com/tezmen/yametrika-api | 845895c771c683cbe43f5d0a2281d99a52253886 | d6537ee3246a9e2c6f43cff1c6710c616595fcf5 | refs/heads/master | 2020-04-13T08:29:46.164975 | 2019-10-25T22:54:36 | 2019-10-25T22:54:36 | 163,082,586 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import unittest
import time
from io import StringIO
from os.path import basename
from yametrikapy import Metrika
try:
from . import secret_settings
except ImportError:
class secret_settings:
MAIN_USER_LOGIN = ''
MAIN_CLIENT_ID = ''
MAIN_CLIENT_SECRET = ''
MAIN_TOKEN = ''
OTHER_USER_LOGIN = ''
OTHER_CLIENT_ID = ''
OTHER_TOKEN = ''
MAIN_USER_LOGIN = secret_settings.MAIN_USER_LOGIN
MAIN_CLIENT_ID = secret_settings.MAIN_CLIENT_ID
MAIN_CLIENT_SECRET = secret_settings.MAIN_CLIENT_SECRET
MAIN_TOKEN = secret_settings.MAIN_TOKEN
OTHER_USER_LOGIN = secret_settings.OTHER_USER_LOGIN
OTHER_CLIENT_ID = secret_settings.OTHER_CLIENT_ID
OTHER_TOKEN = secret_settings.OTHER_TOKEN
class TestMetrikaBase(unittest.TestCase):
def setUp(self):
self.main_user_login = MAIN_USER_LOGIN
self.metrika = Metrika(MAIN_CLIENT_ID, token=MAIN_TOKEN)
self.other_user_login = OTHER_USER_LOGIN
self.other_metrika = Metrika(OTHER_CLIENT_ID, token=OTHER_TOKEN)
class TestMetrikaWithCounter(TestMetrikaBase):
def setUp(self):
super(TestMetrikaWithCounter, self).setUp()
counter = self.metrika.add_counter('Test name of counter', 'test-name-counter.ru')
self.counter_id = counter.counter['id']
self.counter_name = counter.counter['name']
self.assertIsInstance(self.counter_id, int)
self.assertTrue(self.counter_id)
def tearDown(self):
counter = self.metrika.delete_counter(self.counter_id)
self.assertTrue(counter.success)
def test_counters(self):
counters = self.metrika.counters().counters
self.assertIsInstance(counters, list)
counters = self.metrika.counters(permission='own').counters
self.assertIsInstance(counters, list)
# self.assertTrue(len(counters) > 0)
counter = self.metrika.update_counter(self.counter_id, name='New test name of counter')
self.assertEqual(self.counter_id, counter.counter['id'])
counter = self.metrika.counter(self.counter_id)
self.assertEqual(self.counter_id, counter.counter['id'])
counter = self.metrika.delete_counter(self.counter_id)
self.assertTrue(counter.success)
counter = self.metrika.undelete_counter(self.counter_id)
self.assertTrue(counter.success)
def test_goals(self):
goals = self.metrika.goals(self.counter_id).goals
self.assertFalse(goals)
# goal = self.metrika.add_goal(self.counter_id, 'Goal name', 'number', depth=2)
CONDITION_URL = 'indexpage'
CONDITION_TYPE = 'contain'
conditions = [{'url': CONDITION_URL, 'type': CONDITION_TYPE}]
goal = self.metrika.add_goal(self.counter_id, 'Goal name', 'url', conditions=conditions)
goal_id = goal.goal['id']
self.assertIsInstance(goal_id, int)
self.assertTrue(goal_id)
CONDITION_URL = 'indexpage2'
CONDITION_TYPE = 'contain'
conditions = [{'url': CONDITION_URL, 'type': CONDITION_TYPE}]
# goal = self.metrika.update_goal(self.counter_id, goal_id, 'New goal name', 'action', conditions=conditions)
goal = self.metrika.update_goal(self.counter_id, goal_id, 'New goal name', 'url', conditions=conditions)
self.assertEqual(goal_id, goal.goal['id'])
goal = self.metrika.goal(self.counter_id, goal_id).goal
self.assertTrue(goal_id == goal['id'])
self.assertTrue(goal['conditions'][0]['url'] == CONDITION_URL)
self.assertTrue(goal['conditions'][0]['type'] == CONDITION_TYPE)
goal = self.metrika.delete_goal(self.counter_id, goal_id)
self.assertTrue(goal.success)
def test_filters(self):
filters = self.metrika.filters(self.counter_id).filters
self.assertFalse(filters)
filter = self.metrika.add_filter(self.counter_id, 'url', 'contain', 'indexpage')
filter_id = filter.filter['id']
self.assertIsInstance(filter_id, int)
self.assertTrue(filter_id)
filter = self.metrika.update_filter(self.counter_id, filter_id, 'url', 'contain', 'newindexpage')
self.assertEqual(filter_id, filter.filter['id'])
filter = self.metrika.filter(self.counter_id, filter_id).filter
self.assertEqual(filter_id, filter['id'])
filter = self.metrika.delete_filter(self.counter_id, filter_id)
self.assertTrue(filter.success)
def test_operations(self):
operations = self.metrika.operations(self.counter_id)
self.assertTrue(len(operations.operations) == 0)
operation = self.metrika.add_operation(self.counter_id, 'merge_https_and_http', 'url', '', status='active')
operation_id = operation.operation['id']
self.assertIsInstance(operation_id, int)
self.assertTrue(operation_id)
operation = self.metrika.update_operation(self.counter_id, operation_id, 'merge_https_and_http', 'url', '',
status='disabled')
self.assertEqual(operation_id, operation.operation['id'])
operation = self.metrika.operation(self.counter_id, operation_id)
self.assertEqual(operation_id, operation.operation['id'])
operation = self.metrika.delete_operation(self.counter_id, operation_id)
self.assertTrue(operation.success)
def test_grants(self):
grants = self.metrika.grants(self.counter_id).grants
self.assertIsInstance(grants, list)
grant = self.metrika.add_grant(self.counter_id, self.other_user_login, 'view')
grant = self.metrika.grant(self.counter_id, self.other_user_login).grant
grant = self.metrika.update_grant(self.counter_id, '', 'public_stat')
grant = self.metrika.delete_grant(self.counter_id, '')
self.assertTrue(grant.success)
def test_clients(self):
self.assertIsInstance(self.metrika.clients([self.counter_id]).clients, list)
def test_binding_counters_to_labels(self):
LABEL_NAME = 'TEST_LABEL'
self.metrika.add_label(LABEL_NAME)
labels = self.metrika.labels().labels
self.assertTrue(len(labels) > 0)
labels = list(filter(lambda label: label['name'] == LABEL_NAME, labels))
self.assertTrue(len(labels) > 0)
label_id = labels[0]['id']
binding = self.metrika.bind_to_label(self.counter_id, label_id)
self.assertTrue(binding.success)
unbinding = self.metrika.unbind_from_label(self.counter_id, label_id)
self.assertTrue(unbinding.success)
label = self.metrika.delete_label(label_id)
self.assertTrue(label.success)
def test_segments(self):
segments = self.metrika.segments(self.counter_id).segments
self.assertIsInstance(segments, list)
SEGMENT_NAME = 'TEST_SEGMENT'
TEST_EXPRESSION = u"ym:s:regionCityName=='Москва'"
segment = self.metrika.add_segment(self.counter_id, SEGMENT_NAME, TEST_EXPRESSION).segment
self.assertTrue(segment['name'] == SEGMENT_NAME)
segment_id = segment['segment_id']
segment = self.metrika.segment(self.counter_id, segment_id).segment
self.assertTrue(segment['name'] == SEGMENT_NAME)
NEW_SEGMENT_NAME = 'TEST_NEW_SEGMENT'
self.metrika.update_segment(self.counter_id, segment_id, name=NEW_SEGMENT_NAME)
segment = self.metrika.segment(self.counter_id, segment_id).segment
self.assertTrue(segment['name'] == NEW_SEGMENT_NAME)
self.assertTrue(segment['expression'] == TEST_EXPRESSION)
segment = self.metrika.delete_segment(self.counter_id, segment_id)
self.assertTrue(segment.success)
def test_uploadings(self):
uploadings = self.metrika.uploadings(self.counter_id).uploadings
self.assertIsInstance(uploadings, list)
with StringIO('"P12345","age",42\r\n"P12345","name","abc"') as f:
filename = basename(getattr(f, 'file', 'file.csv'))
uploading = self.metrika.upload_uploading(self.counter_id, f).uploading
uploading_id = uploading['id']
COMMENT = u'Файл {}'.format(filename)
uploading = self.metrika.confirm_uploading(self.counter_id, uploading_id, content_id_type='user_id',
action='update', status='is_processed',
comment=COMMENT).uploading
self.assertTrue(uploading['comment'] == COMMENT)
uploading = self.metrika.uploading(self.counter_id, uploading_id).uploading
self.assertTrue(uploading['comment'] == COMMENT)
NEW_COMMENT = 'file'
uploading = self.metrika.update_uploading(self.counter_id, uploading_id, comment=NEW_COMMENT).uploading
self.assertTrue(uploading['comment'] == NEW_COMMENT)
n = 60
while uploading.get('status', '') == 'is_processed' and n > 0:
time.sleep(2)
uploading = self.metrika.uploading(self.counter_id, uploading_id).uploading
n -= 1
self.assertTrue(uploading['status'] == 'linkage_failure')
def test_offline_conversions(self):
extended_threshold = self.metrika.on_extended_threshold(self.counter_id)
self.assertTrue(extended_threshold.success)
extended_threshold = self.metrika.off_extended_threshold(self.counter_id)
self.assertTrue(extended_threshold.success)
data = '''UserId,Target,DateTime,Price,Currency
133591247640966458,GOAL1,1481718166,123.45,RUB
133591247640966458,GOAL2,1481718142,678.90,RUB
133591247640966458,GOAL3,1481718066,123.45,RUB
579124169844706072,GOAL3,1481718116,678.90,RUB
148059425477661429,GOAL2,1481718126,123.45,RUB
148059425477661429,GOAL3,1481714026,678.90,RUB
'''
with StringIO(data) as f:
filename = basename(getattr(f, 'file', 'file.csv'))
uploading = self.metrika.upload_offline_conversions(self.counter_id, f, 'USER_ID', comment=filename).uploading
self.assertTrue(uploading['status'] in ('LINKAGE_FAILURE', 'PROCESSED', 'UPLOADED'))
uploadings = self.metrika.offline_conversions_uploadings(self.counter_id).uploadings
self.assertTrue(len(uploadings) == 1)
uploading_id = uploadings[0]['id']
uploading = self.metrika.offline_conversions_uploading(self.counter_id, uploading_id).uploading
self.assertTrue(uploading['status'] in ('LINKAGE_FAILURE', 'PROCESSED', 'UPLOADED'))
def test_offline_conversions_calls(self):
calls_extended_threshold = self.metrika.on_calls_extended_threshold(self.counter_id)
self.assertTrue(calls_extended_threshold.success)
calls_extended_threshold = self.metrika.off_calls_extended_threshold(self.counter_id)
self.assertTrue(calls_extended_threshold.success)
data = '''UserId, DateTime, Price, Currency, PhoneNumber, TalkDuration, HoldDuration, CallMissed, Tag, FirstTimeCaller, URL, CallTrackerURL
133591247640966458, 1481714026, 678.90, RUB, +71234567890, 136, 17, 0,, 1, https://test.com/, https://test.com/
579124169844706072, 1481718066, 123.45, RUB, +70987654321, 17, 23, 0,, 2, https://test.com/, https://test.com/
148059425477661429, 1481718126, 678.90, RUB, +71234509876, 72, 11, 0,, 0, https://test.com/, https://test.com/
'''
with StringIO(data) as f:
filename = basename(getattr(f, 'file', 'file.csv'))
uploading = self.metrika.upload_calls(self.counter_id, f, 'USER_ID', comment=filename, new_goal_name='GOAL1').uploading
self.assertTrue(uploading['status'] in ('LINKAGE_FAILURE', 'PROCESSED', 'UPLOADED'))
uploadings = self.metrika.calls_uploadings(self.counter_id).uploadings
self.assertTrue(len(uploadings) == 1)
uploading_id = uploadings[0]['id']
uploading = self.metrika.calls_uploading(self.counter_id, uploading_id).uploading
self.assertTrue(uploading['status'] in ('LINKAGE_FAILURE', 'PROCESSED', 'UPLOADED'))
def test_statistics(self):
metrics = ['ym:s:visits', 'ym:s:users']
stat = self.metrika.stat_data(self.counter_id, ','.join(metrics), dimensions='ym:s:searchEngineName',
filters="ym:s:trafficSourceName=='Переходы из поисковых систем'")
self.assertIsInstance(stat.data, list)
self.assertEquals(stat.query['metrics'], metrics)
metrics = ['ym:s:pageviews']
stat = self.metrika.stat_data_drilldown(self.counter_id, ','.join(metrics))
self.assertIsInstance(stat.data, list)
self.assertEquals(stat.query['metrics'], metrics)
stat = self.metrika.stat_data_bytime(self.counter_id, ','.join(metrics))
self.assertIsInstance(stat.data, list)
self.assertEquals(stat.query['metrics'], metrics)
stat = self.metrika.stat_data_comparison(self.counter_id, ','.join(metrics))
self.assertIsInstance(stat.data, list)
self.assertEquals(stat.query['metrics'], metrics)
stat = self.metrika.stat_data_comparison_drilldown(self.counter_id, ','.join(metrics), limit=50)
self.assertIsInstance(stat.data, list)
self.assertEquals(stat.query['metrics'], metrics)
class TestMetrikaWithoutCounter(TestMetrikaBase):
def test_accounts(self):
delegates = self.metrika.add_delegate(self.other_user_login, comment='comments').delegates
self.assertIsInstance(delegates, list)
self.assertTrue(list(filter(lambda item: item['user_login'] == self.other_user_login, delegates)))
accounts = self.other_metrika.accounts().accounts
self.assertIsInstance(accounts, list)
self.assertTrue(list(filter(lambda item: item['user_login'] == self.main_user_login, accounts)))
accounts = self.other_metrika.update_accounts(accounts).accounts
self.assertIsInstance(accounts, list)
self.assertTrue(list(filter(lambda item: item['user_login'] == self.main_user_login, accounts)))
account = self.other_metrika.delete_account(self.main_user_login)
self.assertTrue(account.success)
delegates = self.metrika.delegates().delegates
self.assertIsInstance(delegates, list)
self.assertFalse(list(filter(lambda item: item['user_login'] == self.other_user_login, delegates)))
def test_delegates(self):
delegates = self.metrika.delegates().delegates
self.assertIsInstance(delegates, list)
delegates = self.metrika.add_delegate(self.other_user_login, comment='comments').delegates
self.assertIsInstance(delegates, list)
self.assertTrue(list(filter(lambda item: item['user_login'] == self.other_user_login, delegates)))
delegates = self.metrika.delegates().delegates
self.assertIsInstance(delegates, list)
self.assertTrue(list(filter(lambda item: item['user_login'] == self.other_user_login, delegates)))
delegate = self.metrika.delete_delegate(self.other_user_login)
self.assertTrue(delegate.success)
def test_labels(self):
labels = self.metrika.labels().labels
self.assertIsInstance(labels, list)
LABEL_NAME = 'TEST_LABEL'
self.metrika.add_label(LABEL_NAME)
labels = self.metrika.labels().labels
self.assertTrue(len(labels) > 0)
labels = list(list(filter(lambda label: label['name'] == LABEL_NAME, labels)))
self.assertTrue(len(labels) > 0)
label_id = labels[0]['id']
label = self.metrika.label(label_id).label
self.assertTrue(label['name'] == LABEL_NAME)
NEW_LABEL_NAME = 'TEST_LABEL2'
label = self.metrika.update_label(label_id, NEW_LABEL_NAME).label
self.assertTrue(label['name'] == NEW_LABEL_NAME)
label = self.metrika.delete_label(label_id)
self.assertTrue(label.success)
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 15,928 | py | 6 | test_yametrikapy.py | 4 | 0.664716 | 0.639985 | 0 | 376 | 41.263298 | 147 |
patcoronel/praise | 4,303,557,278,318 | 7c7d789281908e5f745b52d75fbc8ee4ed8829db | fb5d4164474309b43c9099fa14d524f67db37572 | /praise/lyrics/migrations/0003_auto_20150623_0857.py | 140908314730c2c95c9d0501c14d22e4f4bbe16a | []
| no_license | https://github.com/patcoronel/praise | 0e52bbae651a6caacf489c923aa7f308c491b8ab | 404ca098a01dcb522d6b91e35a90a175b7f24436 | refs/heads/master | 2018-01-07T15:38:00.873632 | 2015-07-11T07:17:13 | 2015-07-11T07:17:13 | 37,776,223 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('lyrics', '0002_auto_20150623_0741'),
]
operations = [
migrations.AlterField(
model_name='song',
name='default_lyric_order',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=50), blank=True),
),
migrations.AlterField(
model_name='song',
name='writers',
field=models.ManyToManyField(to='lyrics.Artist', blank=True),
),
]
| UTF-8 | Python | false | false | 713 | py | 15 | 0003_auto_20150623_0857.py | 10 | 0.614306 | 0.587658 | 0 | 25 | 27.52 | 138 |
jacob414/flexirest | 14,834,817,074,486 | 1f816b044fb4c601b2a462109b235ac59f72d2c5 | 6872ab0d2636bde1af1eff36d80c913d20db67d9 | /flexirest/main.py | d6c471febe6c43874cfba11a8a3151fffd44c278 | []
| no_license | https://github.com/jacob414/flexirest | 1b9b3233041b6b56772485c0bd1812f4cb44f948 | 211934a99286cc0465d7692e934b745a373bc124 | refs/heads/master | 2022-02-22T05:51:28.672246 | 2019-10-25T13:51:43 | 2019-10-25T13:51:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import with_statement
import os
import sys
import optparse
import imp
import errno
import platform
from functools import partial
import flexirest
from flexirest.cli import (dispatch, DefaultAction, ShowVersion, SilentExit,
UnknownSubcommand)
from flexirest.io import BufferedFile
from flexirest import rendering, defaults, strategies
from flexirest.util import shellopen, StdoutConsole
def _import(modname, onFailRaise=True):
try:
return __import__(modname)
except ImportError:
if onFailRaise:
raise
return imp.new_module(modname)
class Io(object):
"""
A composite that represents input/output channels that the program
need:
* The user's console (typically `sys.stdout`)
* The error channel (typically `sys.stderr`)
* The source (input file/`sys.stdin`)
* The destination (output file/`sys.stdout`)
"""
def __init__(self, stdin=sys.stdin, stderr=sys.stderr):
self.stderr = stderr
self.console = sys.stdout
self.source = sys.stdin
self.destination = sys.stdout
def line(self, msg):
"""
Convinience method that formats a line of output.
"""
return '%s%s' % (msg, os.linesep)
def say(self, msg):
"""
Tell the user something (typically on `sys.stdout`)
"""
self.console.write(self.line(msg))
def complain(self, msg):
"""
Inform the user about error conditions (typically on `sys.stderr`)
"""
self.stderr.write(self.line(msg))
def out(self, data):
"""
Output data to destination (output file/`sys.stdout`).
"""
self.destination.write(data)
def show_status(io, args):
"""
Shows a summary of which writers are present and functional on the
user's system. (TODO: with small instructions on how to fix
non-functional writers)
"""
functional, nonfunctional = strategies.check_writers()
linefn = lambda n, desc: (lambda: ' %s\t%s' % (n, desc))().expandtabs(16)
def lines(strategies):
for name in sorted(strategies):
yield linefn(name, strategies[name].description)
import tempita
tmpl = tempita.Template(flexirest.STATUS,
namespace={'functional': lines(functional),
'nonfunctional': lines(nonfunctional)})
io.say(tmpl.substitute())
return 0
def show_info(io):
"""
Writes brief information about the program when it's run with no
parameters.
"""
io.say(flexirest.INFO % flexirest.VERSION)
show_status(io, ())
return 0
def show_version(io):
io.say(flexirest.VERSION)
return 0
def writer_action(io, name, Strategy, options, args):
"""
Called by `commandline()` as a result of sub-command
dispatching. At this point the name of the writer is in `name` and
the strategy that should be send to the `rendering` module is in
the parameter `Strategy`.
The return value of this function will be the command line's
return value.
"""
dest = None
if options.outfile:
dest = shellopen(options.outfile, 'w')
if len(args) == 1:
io.source = shellopen(args[0], 'r')
else:
if len(args) == 1:
# Only infile
io.source = shellopen(args[0], 'r')
elif len(args) > 1:
io.source = shellopen(args[0], 'r')
dest = BufferedFile(os.path.expanduser(args[1]))
if dest:
io.destination = dest
sys.path.append(os.getcwd())
if options.config:
confmod = _import(options.config)
else:
confmod = _import('flexiconf', False)
tpath = getattr(options, 'template', False)
if tpath:
template = shellopen(tpath, 'r').read()
else:
template = defaults.templates[name]
strategy = strategies.from_name(name)
if options.dump_parts:
rendering.dump_parts(strategy,
io,
confmod,
options,
template)
else:
rendering.render(strategy,
io,
confmod,
options,
template)
# XXX Way to simple way to treat return codes
return 0
def options(console, name, Strategy, args):
"""
Calls the strategy to add writer-specific command line options.
"""
parser = optparse.OptionParser(usage='usage',
description='description')
Strategy.add_options(parser)
parser.add_option('-l', '--lang', action='store', dest='lang', default='en',
help='specify language (both input and output)')
parser.add_option('-o', '--outfile', action='store', dest='outfile',
help='write output to this file')
return parser
def commandline(args=None, io=None):
"""
Entry point for the command line script.
"""
if io is None:
io = Io()
if args is None:
args = sys.argv[1:]
actions = {
'st': partial(show_status, io),
'status': partial(show_status, io)
}
for name, Strategy in strategies.possible_writers.iteritems():
actions[name] = (partial(writer_action, io, name, Strategy),
partial(options, io, name, Strategy))
try:
return dispatch(actions, args)
except DefaultAction:
show_info(io)
return 0
except UnknownSubcommand, e:
io.complain("flexirest: '%s' is not a valid writer" % e.subcmd)
return errno.EINVAL
except strategies.NonFunctionalStrategy, e:
io.complain("flexirest: the '%s' writer is not functional on "
"your system" % e.name)
io.complain(" (hint: %s)" % e.hint)
return errno.ENOSYS
except ShowVersion:
show_version(io)
return 0
except SilentExit:
return 0
| UTF-8 | Python | false | false | 6,064 | py | 29 | main.py | 22 | 0.585422 | 0.582619 | 0 | 208 | 28.153846 | 80 |
david-sherman/statelegislators | 14,018,773,302,456 | 40f086eba4d035bb24e9c1b1461e3a555d98933a | add3bd52c2114f8efd259e50ca4f2d21e02e31d5 | /statelegislators.py | 8b4a379019fd436d11fc2f8411b98108ac0e8f8c | [
"MIT"
]
| permissive | https://github.com/david-sherman/statelegislators | cf6b46e498b4d3e6b65514afb2226f7441d9953b | f77e83503bf64dfa6c65d21d93cebd6243dc457b | refs/heads/master | 2021-07-20T17:36:17.830698 | 2018-05-04T17:02:33 | 2018-05-04T17:02:33 | 120,516,720 | 4 | 0 | MIT | false | 2021-06-01T21:49:12 | 2018-02-06T20:05:03 | 2019-01-14T19:01:48 | 2021-06-01T21:49:11 | 148 | 3 | 0 | 2 | Python | false | false | import requests
from urllib.parse import quote
from time import sleep
import json
import sys
class openstates():
headers = None
base_url = "https://openstates.org/api/v1/"
def __init__(self, api_key):
self.headers = {'X-API-KEY': api_key}
def fetch(self, url):
result = requests.get(url,headers=self.headers)
return json.loads(result.text)
def stateLegislators(self, state, chamber ):
url = "%s%s?state=%s&chamber=%s" % (self.base_url, "legislators", state, chamber)
return self.fetch(url)
def metatdata(self):
url = "%s/metadata" % (self.base_url)
return self.fetch(url)
class google():
api_key = ""
civic_template = "https://www.googleapis.com/civicinfo/v2/representatives/%s?key=%s&recursive=true"
sleep = 1 # seconds to sleep between calls to the google civi api. The api is definitely rate limited
def __init__(self,api_key):
self.api_key = api_key
def stateLegislators(self, state,chamber,district):
sleep( self.sleep)
oc_id = "ocd-division/country:us/state:%s/sld%s:%s" % (state, chamber[0:1], district)
url = self.civic_template % (quote(oc_id, safe=""), self.api_key)
result = requests.get(url)
payload = json.loads(result.text)
if 'error' in payload:
print( "ERROR")
print ( payload["error"]["errors"][0])
print( "Adjust global sleep parameter ")
sys.exit(1)
return payload
class stateClass():
openstates = None
abbreviation = None
chambers = [ 'upper','lower']
resultset = {}
openstateLegislatorCount = 0
civicLegislatorCount = 0
matched = 0
matched_with_accounts = 0
accounts = 0
def __init__(self,oso,civic,abbreviation):
self.openstates = oso
self.civic = civic
self.abbreviation = abbreviation
self.resultset = { 'state': self.abbreviation , "upper" : [], "lower" : []}
print("Processing %s" % self.abbreviation)
def processDistrict(self, chamber, district, openstatesLegislators):
print( "Processing %s %s %s " % (self.abbreviation, chamber, district))
districtLegislators = self.civic.stateLegislators(self.abbreviation,chamber,district)
if not "officials" in districtLegislators:
print(
"CIVIC API returns no officials found for state %s chamber %s district %s" % (self.abbreviation, chamber, district))
print(districtLegislators)
return None
matched = 0
for official in districtLegislators["officials"]:
self.civicLegislatorCount = self.civicLegislatorCount + 1
# determine the last name. Strip out punctuation and prefixes
name = ''.join(ch for ch in official['name'] if ch not in set( '.,'))
official['adjusted_name'] = name
components = name.lower().split(" ")
components.reverse()
if components[0] in ['jr', 'sr', 'ii', 'iii', 'iv', 'phd', 'esq', 'md']: components.pop(0)
official['last_name'] = components[0]
for person in openstatesLegislators:
openstatesname = person['first_name'] + " " + person['last_name']
for official in districtLegislators["officials"]:
# Two ways names can match. (1) they simply do as in : "Robert Van Wagner" == "Robert Van Wagner"
# or (2) their last names match as in : "Julio E. Rodriguez Jr. == "Rodriguez"
if ( openstatesname == official["adjusted_name"] ) or person['last_name'].lower() == official["last_name"] :
accounts = []
if "channels" in official: accounts = official["channels"]
self.resultset[chamber].append( { "id" : person["id"], "name" : openstatesname, "accounts" : accounts})
self.accounts = self.accounts + len(accounts)
matched = matched + 1
self.matched = self.matched + 1
if (len(accounts) > 0 ) :
self.matched_with_accounts = self.matched_with_accounts + 1
if matched != len(openstatesLegislators):
print("Match issue: %s %s district '%3s' matched %s of %s." % (self.abbreviation, chamber, district, matched, len(openstatesLegislators)))
for official in districtLegislators["officials"]:
print( " --- civic :'%s'" % official['name'] )
for person in openstatesLegislators:
print( " --- openstates : '%s %s'" % (person['first_name'],person['last_name']))
def processChamber(self, chamber, legislators):
chambermap = {}
for person in legislators:
self.openstateLegislatorCount = self.openstateLegislatorCount + 1
district = str(person["district"])
if not district in chambermap:
chambermap[district] = []
bucket = chambermap[district]
bucket.append(person)
for district in chambermap:
if not district.isnumeric():
print("skipping %s %s district '%s'" % (self.abbreviation, chamber, district))
else:
self.processDistrict(chamber,district,chambermap[district])
def process(self):
for chamber in self.chambers:
self.processChamber(chamber, oso.stateLegislators(self.abbreviation,chamber))
filename = "data/%s.json" % self.abbreviation
with open(filename, 'w') as outfile:
json.dump(self.resultset, outfile)
print("----------------------------------------")
print( "%s : %s open state legislators found" % (self.abbreviation, self.openstateLegislatorCount ))
print( "%s : %s google civic API legislators found" % (self.abbreviation, self.civicLegislatorCount ))
print( "%s : %s matched legislators" % (self.abbreviation, self.matched ))
print( "%s : %s matched legislators with social media accounts" % (self.abbreviation, self.matched_with_accounts ))
print( "%s : %s assigned accounts" % (self.abbreviation, self.accounts ))
print( "%s : %s " % ( self.abbreviation, filename ))
###################################
if len(sys.argv) != 3 :
print( "Usage : statelegislators.py openstates_api_key google_civic_api_key" )
oso = openstates( sys.argv[1])
civic = google(sys.argv[2])
states = oso.metatdata()
for item in states:
if item["abbreviation"] == 'pr' : continue
state = stateClass(oso, civic, item["abbreviation"])
state.process()
| UTF-8 | Python | false | false | 6,653 | py | 53 | statelegislators.py | 1 | 0.594018 | 0.589809 | 0 | 163 | 39.809816 | 150 |
zains97/se-project-ainak | 11,330,123,761,147 | 48a7fc4fb8658be783afc136611a49ac4df65681 | 06c5ffef598aaa1ec3f0f56d1a4a51ea47597fec | /website/ainak/recommend_system/migrations/0044_auto_20201006_1645.py | 8a7186556d391a1570f257b75b69faf1cb8b02cb | []
| no_license | https://github.com/zains97/se-project-ainak | 81895ec5ae68de060ab367a0ee0115a0a485190a | de17684a66c3ec3ac8ae5ce968c9745503166a93 | refs/heads/main | 2023-04-08T16:23:25.172505 | 2021-04-12T08:47:55 | 2021-04-12T08:47:55 | 357,113,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.5 on 2020-10-06 11:45
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recommend_system', '0043_auto_20201006_1632'),
]
operations = [
migrations.RemoveField(
model_name='click_item',
name='datetime',
),
migrations.RemoveField(
model_name='comment',
name='rating',
),
migrations.RemoveField(
model_name='search',
name='pid',
),
migrations.RemoveField(
model_name='wish_list',
name='datetime',
),
migrations.AddField(
model_name='search',
name='datetime',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='search',
name='post_id',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='recommend_system.Home'),
),
migrations.AlterField(
model_name='search',
name='user_id',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='recommend_system.User'),
),
]
| UTF-8 | Python | false | false | 1,333 | py | 75 | 0044_auto_20201006_1645.py | 55 | 0.55964 | 0.536384 | 0 | 46 | 27.978261 | 121 |
PidronBatol/pythonProjectFriday | 12,498,354,848,251 | 1b31f7d4d14277ab57e36091630a15526fe18d16 | b4944cb28290e959be45648f1766882e801b0ebb | /main.py | 94a4e8957df46191ff2c9c922d245c7361ada1e1 | []
| no_license | https://github.com/PidronBatol/pythonProjectFriday | 6dd816bf89bbbaccfc6bc9ee4e22b00ee7a46c61 | 78be2d022b1b303acae3eb9ca84dea21ad42b562 | refs/heads/master | 2023-04-27T16:18:03.490906 | 2021-05-21T21:35:49 | 2021-05-21T21:35:49 | 369,658,361 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('Version control')
print(2+3)
| UTF-8 | Python | false | false | 37 | py | 1 | main.py | 1 | 0.702703 | 0.648649 | 0 | 3 | 11.333333 | 24 |
yangboyubyron/DS_Recipes | 10,599,979,289,019 | 7a4eeddf9a40401529bab92c056d6524f0baf9fc | d473a271deb529ed2199d2b7f1c4c07b8625a4aa | /Stats_and_Math/BasicPlottingSlope.py | e42ddb2f00b191a18dbcffe25fc7fd1dcb3b94e8 | []
| no_license | https://github.com/yangboyubyron/DS_Recipes | e674820b9af45bc71852ac0acdeb5199b76c8533 | 5436e42597b26adc2ae2381e2180c9488627f94d | refs/heads/master | 2023-03-06T05:20:26.676369 | 2021-02-19T18:56:52 | 2021-02-19T18:56:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Math for Modelers Session#1 Python Module #2
# Reading assignment "Think Python" either 2nd or 3rd edition:
# 2nd Edition Chapter 2, Chapter 3 (3.1-3.3), and Chapter 8 (8.1-8.2)
# 3rd Edition Chapter 2, Chapter 3 (pages 23-25). and Chapter 8 (pages 85-88)
# Also review the handouts dealing with numpy and Matplotlib
# Module #2 objectives: 1) demonstrate computational capabilities of
# Python and 2) illustrate some of Python's printing and plotting capabilities.
# (This will require importing software that provides the necessary capabilities.
# Matplotlib and numpy are commonly used.)
# Instructions---
# Execute this script as a single program. The results will appear below or
# in a separate window which will reveal the plots. Print statements will be
# used to separate sections.
# matplotlibe.pyplot is a library of plotting software for Python.
# numpy is the library of numerical functions and software for Python.
# Note the difference in the import statements. Because of the number of
# functions used for plotting, the asterisk is being used.
import matplotlib.pyplot
from matplotlib.pyplot import *
import numpy
from numpy import linspace
# The first example will demonstrate calculation of the slope for a linear
# equation using Example 5 from Section 1.1 of Lial.
# The use of \n causes a line to be skipped before the text is printed. The
# characters \n are otherwise ignored.
print ('\nlinear equation construction')
x1= 5.0
y1= 4.0
x2= -10.0
y2= -2.0
slope= (y2-y1)/(x2-x1)
print ('\nslope of line = %r') %slope
# The next example will show how to use the calculated slope to form the
# linear equation and calculate a result.
x= 1.0
y= y1 + slope*(x-x1)
print ('\nValue of y if x is 1.0 equals %r') %y
# The next section shows how to assign a string to a variable for printing
# purposes. The variable is called "equation" and it will be printed as a
# string. This is a useful technique that will be used in later modules.
# Note the use of %s in the print statement. The s denotes the output is a
# string. Note the compound print statement for x1 and y1.
equation= str('y = y1 + slope*(x-x1)')
print ('\nEquation of a line is %s') %equation
print ('\nx1 equals %r and y1 equals %r ') % (x1, y1)
# The next example will show how to plot Example 11 from Section 1.1 of Lial.
# Plotting limits need to be set to define the dimensions of the plot. The
# linspace statement divides the interval [-1,8] into 100 points including
# the first, -1, and the last 100. If you have doubts about the contents or the
# length of x, enter either the statements print x or len(x) in the interpreter
# to see the contents or find out the length.
x= linspace(-1,8,100)
y= 6.0 - 1.5*x
title('Plot of Linear Equation '+equation) # Note how the title appears.
plot(x,y)
show()
# The next example will show how to solve Example 6 Section 1.2 of Lial
# graphically using Python. Note that integers and floating point numbers can
# combined in the same equation. The result is a floating point number. The
# figure() statement separates the following plot from the previous plot.
# Two windows will appear with separate plots one behind the other.
x= linspace(0,50,100)
y= 20*x+100.0
z= 24*x
# loc=2 places the legend in he upper left corner. The order in which these
# statements appear is important. legend() will associate 'cost' with the
# first statement and 'revenue' with the second. For more information about
# plot(), type the phrase plot? in the interpreter and enter.
figure()
plot (x,y)
plot (x,z)
legend (('cost','revenue'),loc=2)
title ('Breakeven Analysis')
show()
# Exercise #1: Use Python graphically to solve the supply and demand problem
# shown in Example 2 Section 1.2 of Lial. Compare your code and plot to the
# answer sheet.
# Exercise #2: Using Python as a calculator, calculate the correlation
# coefficient in Example 4 of Section 1.3 of Lial. Compare your code and
# computed result with the answer sheet.
| UTF-8 | Python | false | false | 4,016 | py | 496 | BasicPlottingSlope.py | 335 | 0.736803 | 0.708416 | 0 | 102 | 38.323529 | 81 |
shiv4m/hough_transformation | 13,460,427,520,123 | f5f8e27a82f0c5dca7eacc631d2512b797c9d778 | 22f99f5e0705219fca1c695d1bf04e15df6c3fe7 | /task3_bonus.py | 306c231c2e7d85c1c206709f6e6323fec14be252 | []
| no_license | https://github.com/shiv4m/hough_transformation | d16611ba276ce5cca0c59845038fb7d431894cdd | e73180da1877493ace8af02773d8ebc4d5a8f9bd | refs/heads/master | 2020-05-02T16:39:18.107962 | 2019-03-27T21:27:25 | 2019-03-27T21:27:25 | 178,075,686 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import cv2
import math
def detect_circles(edgeImage):
circleList = []
sine = []
cosine = []
for theta in range(0, 360):
sine.append(np.sin(theta * np.pi/180))
cosine.append(np.cos(theta * np.pi/180))
r = [i for i in range(24, 19, -1)]
abrSpace = np.zeros([edgeImage.shape[0], edgeImage.shape[1], len(r)])
for radius in range(len(r)):
for x in range(edgeImage.shape[0]):
for y in range(edgeImage.shape[1]):
if(edgeImage[x][y] == 255):
for angle in range(0, 360):
a = np.absolute(x - int(r[radius] * cosine[angle]))
b = np.absolute(y + int(r[radius] * sine[angle]))
if(a < edgeImage.shape[0] and b < edgeImage.shape[1]):
abrSpace[int(a)][int(b)][radius] += 1
print(np.max(abrSpace))
prev = 0
for k in range(len(r)):
for i in range(edgeImage.shape[0]):
for j in range(edgeImage.shape[1]):
if(i > 0 and j > 0 and i < edgeImage.shape[0]-1 and j < edgeImage.shape[1]-1 and abrSpace[i][j][k] >= 170):
circleList.append((i, j, r[k]))
return circleList
img = cv2.imread('original_imgs/original_imgs/hough.jpg', 0)
img_copy = img
blurred_img = cv2.GaussianBlur(img_copy, (5, 5), 0.50)
edgeImage = cv2.Canny(blurred_img, 100, 200)
circleList = detect_circles(edgeImage)
print(circleList)
previous, __ = 0, 0
imgs = cv2.imread('original_imgs/original_imgs/hough.jpg', 1)
for x in circleList:
if(x[2]== 20 or x[2]==21 or x[2]==22 or x[2]==23 or x[2]==24):
if(x[0] - previous > 2):
previous = x[0]
cv2.circle(imgs, (x[1], x[0]), x[2], (28, 28, 255), 2)
if(x[2]==21):
if(x[0] - __ > 8):
__ = x[0]
cv2.circle(imgs, (x[1], x[0]), x[2], (28, 28, 255), 2)
cv2.imwrite('coin.jpg', imgs) | UTF-8 | Python | false | false | 1,803 | py | 3 | task3_bonus.py | 2 | 0.570715 | 0.509706 | 0 | 52 | 32.711538 | 111 |
UWCoffeeNCode/barista | 1,022,202,234,535 | b0d47d73f1fead798bcdec338b23bcb5d0823313 | de94c60833f69229821cae90a9f1a7ef87eb2f5a | /barista/migrations/0006_auto_20201028_1444.py | b5de7adc60ed3eed876f3072d339a17c22fb370a | [
"MIT"
]
| permissive | https://github.com/UWCoffeeNCode/barista | 0d17e9d9d9eec8a1864579686ba7a632bb0c3b87 | edfbfada463d3b2b2e93f2f58673a13541a3e2f8 | refs/heads/master | 2023-01-06T11:20:08.018123 | 2020-10-30T05:27:49 | 2020-10-30T05:47:03 | 307,414,356 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-10-28 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('barista', '0005_auto_20201028_1319'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_verified',
field=models.BooleanField(default=False, help_text='Designates that this user has verified their email address and set a password.', verbose_name='verified'),
),
]
| UTF-8 | Python | false | false | 509 | py | 20 | 0006_auto_20201028_1444.py | 14 | 0.636542 | 0.575639 | 0 | 18 | 27.277778 | 170 |
qayrat-sultan/FS-mini-project | 3,985,729,662,779 | 1462818bc7ab1908d244e59f2901cf537c5b8e54 | 6739c0e9d566bacb25ff31ff4637506aa4295a76 | /main.py | d316c045f1b1916ecb909d055a2b3f8b031b3349 | [
"MIT"
]
| permissive | https://github.com/qayrat-sultan/FS-mini-project | ed3d28304968c586bc7aec7157f486201094ee3b | c2477305ba521fbdda1fc7d6dee8ba131aa4055e | refs/heads/main | 2023-07-19T23:37:15.109077 | 2021-08-31T08:18:45 | 2021-08-31T08:18:45 | 434,164,317 | 1 | 0 | MIT | true | 2021-12-02T09:50:17 | 2021-12-02T09:50:16 | 2021-08-31T08:18:48 | 2021-09-01T15:43:04 | 6,034 | 0 | 0 | 0 | null | false | false | """
This is the runner file of the application
It creates the login widget
"""
import sys
from PySide6.QtWidgets import QApplication
from widgets import LoginWidget
# runner
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle('Fusion')
window = LoginWidget()
window.show()
app.exec()
| UTF-8 | Python | false | false | 324 | py | 35 | main.py | 23 | 0.685185 | 0.682099 | 0 | 17 | 18.058824 | 42 |
rajeshyogeshwar/exchange_calendars | 2,774,548,923,446 | b9e7b2883dfc94eb0c85f833b7c3a6823bbd2768 | 255625c09652dc3b595282f02fe142f9c11adcfc | /tests/test_bvmf_calendar.py | bfe1ba7c3009472c8b7bb0a3ffff1a4dfef32841 | [
"Apache-2.0"
]
| permissive | https://github.com/rajeshyogeshwar/exchange_calendars | 89fce34e136cfcdbc33e4846d02faf31cc986924 | a5f2ca17fc4a9195bc56b4b783420b4e9b999fc5 | refs/heads/master | 2023-08-25T15:19:45.196733 | 2021-10-06T23:05:30 | 2021-10-07T07:38:47 | 415,899,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from exchange_calendars.exchange_calendar_bvmf import BVMFExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBaseNew
class TestBVMFCalendar(ExchangeCalendarTestBaseNew):
@pytest.fixture(scope="class")
def calendar_cls(self):
yield BVMFExchangeCalendar
@pytest.fixture
def max_session_hours(self):
yield 7
@pytest.fixture
def regular_holidays_sample(self):
yield [
# 2017
"2017-01-25", # Sao Paolo City Anniversary
"2017-02-27", # Carnival
"2017-02-28", # Carnival
"2017-04-14", # Good Friday
"2017-04-21", # Tiradentes Day
"2017-05-01", # Labor Day
"2017-06-15", # Corpus Christi Day
"2017-09-07", # Independence Day
"2017-10-12", # Our Lady of Aparecida Day
"2017-11-02", # All Souls Day
"2017-11-15", # Proclamation of the Republic Day
"2017-11-20", # Black Consciousness Day
"2017-12-25", # Christmas Day
"2017-12-29", # Day before New Years
"2018-01-01", # New Year's Day
#
# First occurrences
"1998-07-09", # First occurrence of Constitutionalist Revolution holiday
"2006-11-20", # Day of Black Awareness
#
# New Year's Eve
# if Jan 1 is Tuesday through Saturday, exchange closed the day before.
# if Jan 1 is Monday or Sunday, exchange closed the Friday before.
"2017-12-29", # 2018: Jan 1 is Monday, so Friday 12/29 should be closed
"2016-12-30", # 2017: Jan 1 is Sunday, so Friday 12/30 should be closed
"2010-12-31", # 2011: Jan 1 is Saturday, so Friday 12/31 should be closed
"2013-12-31", # 2014: Jan 1 is Wednesday, so Tuesday 12/31 should be closed
]
@pytest.fixture
def adhoc_holidays_sample(self):
yield ["2014-06-12"] # world-cup
@pytest.fixture
def non_holidays_sample(self):
yield [
"1997-07-09", # year prior to first Constitutionalist Revolution holiday
"2003-11-20", # year prior to first Day of Black Awareness holiday
]
# FIXME: add back in later (NB late opens not included to calendar)
# @pytest.fixture(scope="class")
# def late_opens_sample(self):
# # Ash Wednesday, 46 days before Easter Sunday
# yield ["2016-02-10", "2017-03-01", "2018-02-14"]
| UTF-8 | Python | false | false | 2,526 | py | 39 | test_bvmf_calendar.py | 32 | 0.585907 | 0.482581 | 0 | 64 | 38.46875 | 88 |
Mallika2000/Face-Recognition | 11,759,620,501,691 | 880bbcde45e1eeb62be0aa7110d0a408d962ed28 | 944c7bd26fc3ef41d04998a7edc9e9c32d203c50 | /Face Recognition.py | 7c18671d9b2205bedcfa44c7cf7086f8c87a4a78 | []
| no_license | https://github.com/Mallika2000/Face-Recognition | 1316f2052ceef7fe0e9e7d987031ccec287543ef | f23aad437c5e2a743346550a88d8d6a5b075cdbe | refs/heads/main | 2023-07-27T21:26:09.380131 | 2021-08-22T10:32:15 | 2021-08-22T10:32:15 | 398,772,325 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import face_recognition
imgV = face_recognition.load_image_file('Images/bts-jungkook.jpg')
imgV = cv2.cvtColor(imgV,cv2.COLOR_BGR2RGB)
imgVTest = face_recognition.load_image_file('Images/BTS-RM.jpg')
imgVTest = cv2.cvtColor(imgVTest,cv2.COLOR_BGR2RGB)
faceLoc = face_recognition.face_locations(imgV)[0]
encodeV = face_recognition.face_encodings(imgV)[0]
cv2.rectangle(imgV,(faceLoc[3],faceLoc[0]),(faceLoc[1],faceLoc[2]),(255,0,255),2)
faceLocTest = face_recognition.face_locations(imgVTest)[0]
encodeVTest = face_recognition.face_encodings(imgVTest)[0]
cv2.rectangle(imgVTest,(faceLocTest[3],faceLocTest[0]),(faceLocTest[1],faceLocTest[2]),(255,0,255),2)
results = face_recognition.compare_faces([encodeV],encodeVTest)
faceDistance= face_recognition.face_distance([encodeV],encodeVTest)
print(results,faceDistance)
cv2.putText(imgVTest,f'{results}{round(faceDistance[0],2)}',(50,50),cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,1,(0,0,255),2)
cv2.imshow('BTS V',imgV)
imgVTest1 = cv2.resize(imgVTest, (960, 540))
cv2.imshow("IMAGEV", imgVTest1)
#cv2.imshow('BTS V TEST',imgVTest)
cv2.waitKey(0)
| UTF-8 | Python | false | false | 1,121 | py | 1 | Face Recognition.py | 1 | 0.763604 | 0.704728 | 0 | 30 | 36.366667 | 114 |
ab-natcap/idb-scripts | 11,330,123,768,655 | da0387b11d198b1c1ea483125a31b98ed62cf281 | 7bf56c0bba5a6b73ca04fda3e51cdd32e24bb29c | /sargassum/reclass_sum_and_nodata_count.py | a8678cc1010ca4d202193f0e640533cefa9d4a62 | []
| no_license | https://github.com/ab-natcap/idb-scripts | b1b0b18053c8966b4b0d9ebf9f68dd67be14835d | 77c630954694c3fe94c30c33312c679b1797b0eb | refs/heads/master | 2022-08-28T17:28:55.339014 | 2022-08-26T23:02:35 | 2022-08-26T23:02:35 | 232,914,050 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ reclass_sum_and_nodata_count.py
Author: Doug Denu
Date: 2022-01-05
From https://gist.github.com/dcdenu4/f0c7c93da397e03768d548162fb2c8f2
"""
import os
import sys
import logging
import glob
import numpy
from osgeo import gdal
from osgeo import osr
import pygeoprocessing
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'),
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
TARGET_NODATA = -1
TARGET_DATATYPE = gdal.GDT_Int16
def align_rasters_step(raster_path_list, intermediate_dir):
"""Make sure that all the rasters are aligned for per pixel operations.
Args:
raster_path_list (list): list of strings of raster file paths to align
intermediate_dir (string): location on disk to save the aligned rasters
Returns:
a list of strings for the aligned raster paths
"""
base_raster_info = pygeoprocessing.get_raster_info(raster_path_list[0])
aligned_raster_path_list = []
# create a list of corresponding target paths for the aligned rasters
for raster_path in raster_path_list:
aligned_name = os.path.splitext(os.path.basename(raster_path))[0]
aligned_path = os.path.join(
intermediate_dir, f'{aligned_name}_aligned.tif')
aligned_raster_path_list.append(aligned_path)
# setup a list of resampling methods to use for each aligned raster
resample_method_list = ['near']*len(raster_path_list)
target_pixel_size = base_raster_info['pixel_size']
bounding_box_mode = 'intersection'
pygeoprocessing.align_and_resize_raster_stack(
raster_path_list, aligned_raster_path_list, resample_method_list,
target_pixel_size, bounding_box_mode)
return aligned_raster_path_list
def reclassify_rasters_step(raster_path_list, intermediate_dir):
"""Reclassify rasters.
Args:
raster_path_list (list): list of strings of raster file paths to
reclassify
intermediate_dir (string): location on disk to save reclassed rasters
Returns:
a list of strings for the reclassed raster paths
"""
# reclassification value map
reclass_map = {
-9999: 0,
-1: -1,
0: 0,
1: 1
}
reclassified_raster_list = []
for raster_path in raster_path_list:
target_name = os.path.splitext(os.path.basename(raster_path))[0]
target_path = os.path.join(intermediate_dir, f'{target_name}_reclass.tif')
reclassified_raster_list.append(target_path)
pygeoprocessing.reclassify_raster(
(raster_path, 1), reclass_map, target_path, TARGET_DATATYPE,
TARGET_NODATA)
return reclassified_raster_list
def sum_by_pixel(raster_path_list, out_dir):
"""Pixel sum the rasters treating nodata values as zero.
Args:
raster_path_list (list): list of strings of raster file paths to sum
out_dir (string): directory location on disk to save sum raster
Returns:
Nothing
"""
sum_raster_path = os.path.join(out_dir, 'sum_by_pixel.tif')
def sum_op(*arrays):
"""Computes the per pixel sum of the arrays.
This operation treats nodata values as 0.
Args:
*arrays (list): a list of numpy arrays
Returns:
Per pixel sums.
"""
sum_result = numpy.full(arrays[0].shape, 0, dtype=numpy.int16)
for array in arrays:
valid_mask = ~numpy.isclose(array, TARGET_NODATA)
sum_result[valid_mask] = sum_result[valid_mask] + array[valid_mask]
return numpy.where(sum_result == 0, TARGET_NODATA, sum_result)
# raster calculate expects a list of (raster_path, band) tuples
raster_path_band_list = [(raster_path, 1) for raster_path in raster_path_list]
pygeoprocessing.raster_calculator(
raster_path_band_list, sum_op, sum_raster_path, gdal.GDT_Int16,
TARGET_NODATA)
def nodata_count_by_pixel(raster_path_list, out_dir):
"""A nodata pixel count of rasters.
Args:
raster_path_list (list): list of strings of raster file paths
out_dir (string): directory location on disk to save raster
Returns:
Nothing
"""
nodata_count_raster_path = os.path.join(
out_dir, 'nodata_count_by_pixel.tif')
def nodata_count_op(*arrays):
"""Computes the nodata count per pixel of the arrays.
Args:
*arrays (list): a list of numpy arrays
Returns:
Nodata counts.
"""
nodata_count_result = numpy.full(arrays[0].shape, 0, dtype=numpy.int16)
for array in arrays:
nodata_mask = numpy.isclose(array, TARGET_NODATA)
nodata_count_result[nodata_mask] = nodata_count_result[nodata_mask] + 1
return numpy.where(
nodata_count_result == 0, TARGET_NODATA, nodata_count_result)
# raster calculate expects a list of (raster_path, band) tuples
raster_path_band_list = [(raster_path, 1) for raster_path in raster_path_list]
pygeoprocessing.raster_calculator(
raster_path_band_list, nodata_count_op, nodata_count_raster_path,
gdal.GDT_Int16, TARGET_NODATA)
def create_test_rasters(intermediate_dir):
"""Create 3 rasters for testing purposes.
Args:
intermediate_dir (string): directory location on disk to save raster
Returns:
List of raster paths
"""
srs = osr.SpatialReference()
srs.ImportFromEPSG(32731) # WGS84/UTM zone 31s
wkt = srs.ExportToWkt()
raster_path_list = []
for temp_number in [1,2,3]:
raster_path = os.path.join(
intermediate_dir, f'raster_temp_{temp_number}.tif')
int_array = numpy.ones((4,4), dtype=numpy.int16)
int_array[1,1] = TARGET_NODATA
int_array[temp_number, temp_number] = TARGET_NODATA
pygeoprocessing.numpy_array_to_raster(
int_array, TARGET_NODATA, (2, -2), (2, -2), wkt, raster_path)
raster_path_list.append(raster_path)
return raster_path_list
if __name__ == "__main__":
LOGGER.debug("Starting script execution.")
### Get list of rasters ###
# setup directories
remote_source_dir = '/Users/arbailey/Google Drive/My Drive/sargassum/s2toa_classified_v1'
remote_base_dir = '/Users/arbailey/Google Drive/My Drive/sargassum/paper2022/data/source/s2qr_sargassum' # Remote
local_source_dir = '/Users/arbailey/natcap/idb/data/work/sargassum/s2qr_sargassum/s2toa_classified_v1' # Local
local_base_dir = '/Users/arbailey/natcap/idb/data/work/sargassum/s2qr_sargassum' # Local
# base_dir = os.path.join('Users', 'arbailey', 'natcap', 'idb', 'data', 'work', 'sargassum')
source_dir = local_source_dir
base_dir = local_base_dir
# source_dir = os.path.join(base_dir, 's2qr_sargassum', 's2qr_sargassum')
intermediate_dir = os.path.join(base_dir, 'intermediate')
out_dir = os.path.join(base_dir, 'out')
# create intermediate and output directories if they don't exist
for new_dir in [intermediate_dir, out_dir]:
if not os.path.exists(new_dir):
os.mkdir(new_dir)
# collect the raster paths from the source directory
raster_path_list = [r for r in glob.glob(os.path.join(source_dir, "*mosaic_nd0.vrt"))]
LOGGER.debug(f"Number of source rasters: {len(raster_path_list)}")
### Option to create rasters for testing ###
# uncomment the below code to use three 4x4 test rasters
#raster_path_list = create_test_rasters(intermediate_dir)
### Align rasters ###
aligned_raster_list = align_rasters_step(raster_path_list, intermediate_dir)
### Reclass rasters ###
reclassified_raster_list = reclassify_rasters_step(
aligned_raster_list, intermediate_dir)
### Create output rasters ###
# These steps incorporated into s2_sargassum_metrics.py for different time ranges
# sum_by_pixel(reclassified_raster_list, out_dir)
# nodata_count_by_pixel(reclassified_raster_list, out_dir)
LOGGER.debug("Done.") | UTF-8 | Python | false | false | 8,084 | py | 71 | reclass_sum_and_nodata_count.py | 25 | 0.660317 | 0.647204 | 0 | 219 | 35.917808 | 117 |
codacy-badger/yt-media | 16,303,695,877,318 | 3116aa4ec38fdb4c126c3f9a78a1128a5eaadb2d | 430c308333dcbdc2b0ffd73bee0ff6ecccad0043 | /smorest_sfs/extensions/sqla/softdelete.py | 0fd9534bbe86d44c911530b3ce91a2be72363931 | []
| no_license | https://github.com/codacy-badger/yt-media | 375a2a4833fa6aaa838c0810099fbcc7af9c263a | 05256238fe45cd38b5335fe885e45781deebbfaa | refs/heads/master | 2021-05-20T00:54:32.928724 | 2020-04-01T08:20:44 | 2020-04-01T08:20:44 | 252,116,474 | 0 | 0 | null | true | 2020-04-01T08:29:30 | 2020-04-01T08:29:29 | 2020-04-01T08:20:58 | 2020-04-01T08:20:55 | 599 | 0 | 0 | 0 | null | false | false | # Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
提供拓展后的基础对象
"""
from typing import Any, Union
from flask_sqlalchemy import BaseQuery
from sqlalchemy.orm.base import _entity_descriptor
from .db_instance import db
class QueryWithSoftDelete(BaseQuery):
"""
软删除模块
根据deleted字段来决定是否显示此对象
"""
_with_deleted = False
def __new__(cls, *args: Any, **kwargs: Any) -> db.Model:
obj = super(QueryWithSoftDelete, cls).__new__(cls)
obj._with_deleted = kwargs.pop("_with_deleted", False)
if len(args) > 0:
super(QueryWithSoftDelete, obj).__init__(*args, **kwargs)
return obj.filter_by(deleted=False) if not obj._with_deleted else obj
return obj
def __init__(self, *args, **kwargs): # pylint: disable=W0231
pass
def with_deleted(self) -> BaseQuery:
return self.__class__(
db.class_mapper(self._mapper_zero().class_),
session=db.session(),
_with_deleted=True,
)
def _get(self, ident) -> Union[db.Model, None]:
"""提供原本的get方法"""
return super(QueryWithSoftDelete, self).get(ident)
def get(self, ident) -> Union[db.Model, None]:
obj = self.with_deleted()._get(ident) # pylint: disable=W0212
return obj if obj is None or self._with_deleted or not obj.deleted else None
def filter_like_by(self, **kwargs) -> BaseQuery:
"""like方法"""
clauses = [
_entity_descriptor(self._joinpoint_zero(), key).like("%{}%".format(value))
for key, value in kwargs.items()
]
return self.filter(*clauses)
| UTF-8 | Python | false | false | 2,278 | py | 58 | softdelete.py | 55 | 0.645777 | 0.638056 | 0 | 65 | 32.876923 | 86 |
Mishka1012/Lesson-19 | 2,508,260,906,849 | 61975be0dd469d5ae76bb4ec0994e5c90adbfd25 | 58d8b440b9f2a515c07d31db0794b2cca4714fee | /classificator/userflow/views.py | 939dab257bd70a1bee3dcbc3621bc5a9b52cad95 | []
| no_license | https://github.com/Mishka1012/Lesson-19 | 5d38bd2b65303fb4aefaec5bf6f1e8e8c017ab6a | 35f3b90db596f8ab1aa6b555d24a35557871ada0 | refs/heads/master | 2023-06-05T23:49:21.230780 | 2021-06-27T14:12:08 | 2021-06-27T14:12:08 | 380,756,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from .forms import CreateUserForm, UpdateProfileForm, UpdateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from .decorators import unauthenticated_user, allowed_users
@allowed_users(allowed_roles=['user'])
def update_profile_page(request):
if request.method == 'POST':
form1 = UpdateProfileForm(request.POST, request.FILES, instance=request.user.profile)
if form1.is_valid():
form1.save()
form2 = UpdateUserForm(request.POST, instance=request.user)
if form2.is_valid():
form2.save()
return redirect('user')
form = UpdateProfileForm(instance=request.user.profile)
form2 = UpdateUserForm(instance=request.user)
context = {
'userform': form2,
'form': form,
}
return render(request, 'update_user.html', context=context)
# Create your views here.
@allowed_users(allowed_roles=['user'])
def user_page(request):
images = request.user.image_set.all()
context = {
'images': images,
}
return render(request, 'user.html', context)
@unauthenticated_user
def register_page(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Account was created for ' + form.cleaned_data.get('username'))
return redirect('login')
context = {
'form': form,
}
return render(request, 'register.html', context)
@unauthenticated_user
def login_page(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username or Password is incorrect!')
return render(request, 'registration/login.html')
@allowed_users(allowed_roles=['user'])
def logout_page(request):
logout(request)
return redirect('login') | UTF-8 | Python | false | false | 2,205 | py | 15 | views.py | 12 | 0.660317 | 0.656689 | 0 | 66 | 32.424242 | 101 |
dankvul/adjust_test | 3,126,736,206,569 | d9d261e4da9ecb856f2acacd6f0db95668da964b | 118a1bfb5a97b9143b0607ee865bfb797d50ca91 | /backend/main.py | 4a8483aaafe52bbb0a398f7eeec0e7484b263fbf | []
| no_license | https://github.com/dankvul/adjust_test | d1dcc2fe1e4525268906a6255dd04d9c7bf3582f | 5275712a228be348e239675dc59d57c41e0b3628 | refs/heads/master | 2023-04-13T16:11:31.159531 | 2021-04-26T20:58:30 | 2021-04-26T20:58:30 | 361,883,076 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fastapi import FastAPI
from starlette.middleware import cors
from api.routes import api_router
from core.utils import exception_handlers
from database.db_events import close_db_connection, test_db_connection
from config import *
docs_config = (
{
"docs_url": "/api/docs/",
"redoc_url": "/api/redocs/",
"openapi_url": "/api/docs/openapi.json",
}
if not IS_PRODUCTION
else {}
)
app = FastAPI(
title="Adjust Test Task",
exception_handlers=exception_handlers,
on_startup=[test_db_connection],
on_shutdown=[close_db_connection],
**docs_config,
)
#########
# Routes
##########
app.include_router(api_router, prefix="/api")
##########
# Middlewares
##########
app.add_middleware(
cors.CORSMiddleware,
allow_origins=ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
| UTF-8 | Python | false | false | 886 | py | 17 | main.py | 15 | 0.638826 | 0.638826 | 0 | 43 | 19.581395 | 70 |
tarpeyd12/pylon | 5,995,774,376,977 | def3fa5949af1c9184b7376f77ed1f4b4d110b99 | 80ce0a1103d437eb3b0b9daf4635ee82a03fff99 | /pylon/archiveCode/Data/code/pylonsupport/classes/simulation.py | fc04e14efed01a0478ba5c44a8fc4bc4a58e8a62 | []
| no_license | https://github.com/tarpeyd12/pylon | 51f1f623fff4a2f6556a257963c16a57e4e6d869 | 9072968c60a0cb855730a633d5f0e017f6fbb2aa | refs/heads/master | 2021-01-10T02:49:18.012035 | 2018-03-02T10:04:28 | 2018-03-02T10:04:28 | 47,670,575 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | try:
import pylon
import _pylon
import _pylon_calc
import _pylon_draw
import math
from pylonclasses import *
from objects import *
from position import *
except ImportError:
print 'Importing failure for the required modules in \"classes.simulation.py\".'
print _pylon.exit(-1)
quit()
class Simulation:
def __init__(self,name,col):
self.name = name
self._collisions = col
pylon.sim_new(self.name,self._collisions)
pylon.sim_weight(self.name,True)
self.start()
def addObjectFromString(self,objstr):
return pylon.sim_add_object_s(self.name,objstr)
def newObject(self,obj):
if pylon.object_new(self.name,obj) or not self.hasObject(obj):
print "Cannot create object:'",obj,"' in sim:'",self.name,"'"
return None
return Object(obj,self.name)
def newObjectFromFile(self,objname,filename,filetype):
if pylon.object_new_fromfile(self.name,objname,filename,filetype) or not pylon.object_check(self.name,objname):
print "Cannot create object:'",objname,"' in sim:'",self.name,"' from file:'",filename,"' of type:'",filetype,"'"
return None
return Object(objname,self.name)
def hasObject(self,objname):
if pylon.object_check(self.name,objname):
return True
return False
def getObject(self,objname):
return Object(objname,self.name)
def clearAllObjects(self):
return pylon.sim_clear(self.name)
def removeObject(self,objname):
if pylon.sim_remove_object(self.name,objname):
print "pylon.sim_remove_object(",self.name,",",objname,")"
return False
return True
def setNewtonianGravity(self,chs):
pylon.sim_weight(self.name,chs)
def newtonianGravityOn(self):
pylon.sim_weight(self.name,True)
def newtonianGravityOff(self):
pylon.sim_weight(self.name,False)
def setGravityVector(self,vect):
pylon.sim_set_gravity_3f( self.name, vect.x, vect.y, vect.z )
def getGravityVector(self):
vect = pylon.sim_get_gravity_3f( self.name )
return Position( vect[0], vect[1], vect[2] )
def canCollide(self):
return self._collisions
def setCollisionIterations(self,iters):
if self._collisions:
pylon.sim_set_itter(self.name,iters)
def getCollisionIterations(self):
if self._collisions:
return pylon.sim_get_itter(self.name)
return 0
def stop(self):
pylon.sim_toggle(self.name,False)
def restart(self):
pylon.sim_toggle(self.name,True)
def start(self):
pylon.sim_toggle(self.name,True)
def halt(self):
pylon.sim_halt(self.name)
def resume(self):
pylon.sim_toggle(self.name,True)
pylon.sim_visibility(self.name,True)
| UTF-8 | Python | false | false | 2,567 | py | 260 | simulation.py | 210 | 0.714453 | 0.711726 | 0 | 107 | 22.953271 | 116 |
eduardonery1/PLIMM-Bank | 2,894,807,957,671 | 186efd5d97d8e2842d77ffc42c1f27bfbd1a2404 | 4b8d49f0a01fdc9a367a188448b1f872c93152a7 | /swagger_server/models/card_block_request.py | 163bf679cedf49e566cee705f69cf0bf60394638 | []
| no_license | https://github.com/eduardonery1/PLIMM-Bank | c2db258073eaf57d13e23940921b84012b05612e | ac2f7cc0d94cb3f3665f45a1cbdc25479fcbd17c | refs/heads/master | 2023-01-30T16:02:24.571895 | 2020-12-13T20:56:58 | 2020-12-13T20:56:58 | 321,154,663 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.source_audit import SourceAudit # noqa: F401,E501
from swagger_server import util
class CardBlockRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, issuer_card_block_id: str=None, block_code: int=None, reason: str=None, source_audit: SourceAudit=None): # noqa: E501
"""CardBlockRequest - a model defined in Swagger
:param issuer_card_block_id: The issuer_card_block_id of this CardBlockRequest. # noqa: E501
:type issuer_card_block_id: str
:param block_code: The block_code of this CardBlockRequest. # noqa: E501
:type block_code: int
:param reason: The reason of this CardBlockRequest. # noqa: E501
:type reason: str
:param source_audit: The source_audit of this CardBlockRequest. # noqa: E501
:type source_audit: SourceAudit
"""
self.swagger_types = {
'issuer_card_block_id': str,
'block_code': int,
'reason': str,
'source_audit': SourceAudit
}
self.attribute_map = {
'issuer_card_block_id': 'issuerCardBlockId',
'block_code': 'blockCode',
'reason': 'reason',
'source_audit': 'sourceAudit'
}
self._issuer_card_block_id = issuer_card_block_id
self._block_code = block_code
self._reason = reason
self._source_audit = source_audit
@classmethod
def from_dict(cls, dikt) -> 'CardBlockRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The CardBlockRequest of this CardBlockRequest. # noqa: E501
:rtype: CardBlockRequest
"""
return util.deserialize_model(dikt, cls)
@property
def issuer_card_block_id(self) -> str:
"""Gets the issuer_card_block_id of this CardBlockRequest.
Identificador único da requisição de bloqueio. Gerado pelo emissor. # noqa: E501
:return: The issuer_card_block_id of this CardBlockRequest.
:rtype: str
"""
return self._issuer_card_block_id
@issuer_card_block_id.setter
def issuer_card_block_id(self, issuer_card_block_id: str):
"""Sets the issuer_card_block_id of this CardBlockRequest.
Identificador único da requisição de bloqueio. Gerado pelo emissor. # noqa: E501
:param issuer_card_block_id: The issuer_card_block_id of this CardBlockRequest.
:type issuer_card_block_id: str
"""
self._issuer_card_block_id = issuer_card_block_id
@property
def block_code(self) -> int:
"""Gets the block_code of this CardBlockRequest.
Código identificando o tipo de bloqueio. # noqa: E501
:return: The block_code of this CardBlockRequest.
:rtype: int
"""
return self._block_code
@block_code.setter
def block_code(self, block_code: int):
"""Sets the block_code of this CardBlockRequest.
Código identificando o tipo de bloqueio. # noqa: E501
:param block_code: The block_code of this CardBlockRequest.
:type block_code: int
"""
if block_code is None:
raise ValueError("Invalid value for `block_code`, must not be `None`") # noqa: E501
self._block_code = block_code
@property
def reason(self) -> str:
"""Gets the reason of this CardBlockRequest.
Motivo do bloqueio. # noqa: E501
:return: The reason of this CardBlockRequest.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason: str):
"""Sets the reason of this CardBlockRequest.
Motivo do bloqueio. # noqa: E501
:param reason: The reason of this CardBlockRequest.
:type reason: str
"""
self._reason = reason
@property
def source_audit(self) -> SourceAudit:
"""Gets the source_audit of this CardBlockRequest.
:return: The source_audit of this CardBlockRequest.
:rtype: SourceAudit
"""
return self._source_audit
@source_audit.setter
def source_audit(self, source_audit: SourceAudit):
"""Sets the source_audit of this CardBlockRequest.
:param source_audit: The source_audit of this CardBlockRequest.
:type source_audit: SourceAudit
"""
self._source_audit = source_audit
| UTF-8 | Python | false | false | 4,734 | py | 118 | card_block_request.py | 117 | 0.62336 | 0.612357 | 0 | 149 | 30.718121 | 141 |
Rhytham/face-detection-and-recognition-using-KNN | 10,531,259,844,144 | 692dc0d868bd78ecd7ddcb0e361b3752ec0b1c8f | 307d95f3933b1425b0da2677efc3eefe9a28c83d | /detect.py | 1b8fad5572ee8ca0d6166761e48221661ce859f3 | [
"Apache-2.0"
]
| permissive | https://github.com/Rhytham/face-detection-and-recognition-using-KNN | d974b0796df1ad7fed5f9d85961820cb3f61df91 | 29e1ba8df5f9fca141a88f6246bfe5114d1d8492 | refs/heads/master | 2020-07-25T21:02:36.695224 | 2019-09-14T10:30:40 | 2019-09-14T10:30:40 | 208,423,299 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
from os import path
cap = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
name = input("Enter your name : ")
counter = 30
face_list = []
while True:
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray)
areas = []
for face in faces:
x, y, w, h = face
areas.append((w*h, face))
if len(faces) > 0:
face = max(areas)[1]
x, y, w, h = face
face_img = gray[y:y+h, x:x+w]
face_img = cv2.resize(face_img, (100, 100))
face_flatten = face_img.flatten()
face_list.append(face_flatten)
counter -= 1
print("loaded with", 30 - counter)
if counter <= 0:
break
cv2.imshow("video", face_img)
key = cv2.waitKey(1)
if key & 0xff == ord('q'):
break
X = np.array(face_list)
y = np.full((len(X), 1), name)
data = np.hstack([y, X])
print(data.shape)
print(data.dtype)
cap.release()
cv2.destroyAllWindows()
if path.exists("face_data.npy"):
face_data = np.load("face_data.npy")
face_data = np.vstack([face_data, data])
np.save("face_data.npy", face_data)
else:
np.save("face_data.npy", data)
| UTF-8 | Python | false | false | 1,439 | py | 3 | detect.py | 2 | 0.529534 | 0.510076 | 0 | 66 | 19.772727 | 73 |
jmgc/pyston | 85,899,395,570 | 2f5f569468ca7126718a4ac3a997aa73550ace5f | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /microbenchmarks/sre_optimize_unicode.py | 8c99da77a79f9dab8f436f98bf584dc0dd124cfc | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | https://github.com/jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | true | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | 2020-07-30T23:21:24 | 2020-09-11T14:38:38 | 23,735 | 0 | 0 | 0 | Python | false | false | import sre_compile
import sre_constants
def identity(o):
return o
charset = [(sre_constants.RANGE, (128, 65535))]
for i in xrange(100):
sre_compile._optimize_unicode(charset, identity)
# print sre_compile._optimize_charset(charset, identity)
| UTF-8 | Python | false | false | 257 | py | 1,040 | sre_optimize_unicode.py | 963 | 0.719844 | 0.677043 | 0 | 11 | 22.363636 | 60 |
804173948/100Days-Public | 6,648,609,385,571 | ea879df8d664a224956a9234054df61414837e47 | 4ddddff9162f1b7ced548e3351c8d4fb7e26bd6c | /Server/QuestionGetterTest/rename.py | dc3fe96af5b41bdb7248404bf1d9e67643998ae5 | []
| no_license | https://github.com/804173948/100Days-Public | 78e804412ae45bb945f7f18dcefb5e32da4bf1c7 | ebbe30d8f88c182c4d1dcfbf73b9695887de4245 | refs/heads/master | 2020-05-22T07:51:12.185206 | 2019-05-13T16:26:48 | 2019-05-13T16:26:48 | 186,271,633 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, re
""" rename pictures
for _, _, files in os.walk('pictures'):
for file in files:
name = os.path.splitext(file)[0]
ext = os.path.splitext(file)[1]
if ext != '.png':
num = int(ext[4:])
new_name = str(num)+name+'.png'
print("file: "+file+" new: "+new_name)
os.system('COPY pictures\\'+file+' pictures\\'+new_name)#+' '+new_name)
os.system('DEL pictures\\'+file)#+' '+new_name)
"""
def repl(matched):
num = str(matched.group('num'))
name = str(matched.group('name'))
return '\\\\'+num+name
reg = r'\\\\(?P<name>(.+?).png)(?P<num>\d+)'
for i in [0,1,2,5,6,7]:
file = 'subject_'+str(i)+'.que'
with open(file, 'r', encoding = 'utf-8') as f1:
data = f1.read()
data = re.sub(reg, repl, data)
with open(file+'.bak', 'w', encoding = 'utf-8') as f2:
f2.write(data)
| UTF-8 | Python | false | false | 821 | py | 131 | rename.py | 20 | 0.565164 | 0.546894 | 0 | 30 | 26.366667 | 74 |
mesosphere/cloudkeeper | 7,541,962,608,966 | f3eb3df11a20a2d71d852bc37a85de58438dbac1 | 516f05f5abd5d92dd84753d34bef704b3dd4dffe | /plugins/gcp/resoto_plugin_gcp/resources/billing.py | c7b723175a6e0dfb0afccbb23e52ffb8a555cac5 | [
"Apache-2.0"
]
| permissive | https://github.com/mesosphere/cloudkeeper | 604a7aa97ba01f8ab6eaae9378a1c26748e74bcd | b06fedc4da7046ebff0ae3f439fb4d71a49ece6a | refs/heads/main | 2023-06-07T09:54:16.909012 | 2023-06-07T08:17:01 | 2023-06-07T08:17:01 | 255,744,821 | 99 | 13 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Type, cast
from attr import define, field
from resoto_plugin_gcp.gcp_client import GcpApiSpec
from resoto_plugin_gcp.resources.base import GcpResource, GcpDeprecationStatus, GraphBuilder
from resotolib.baseresources import ModelReference
from resotolib.json_bender import Bender, S, Bend, ForallBend
from resotolib.types import Json
@define(eq=False, slots=False)
class GcpBillingAccount(GcpResource):
kind: ClassVar[str] = "gcp_billing_account"
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["gcp_project_billing_info"]},
}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["billingAccounts"],
action="list",
request_parameter={},
request_parameter_in=set(),
response_path="billingAccounts",
response_regional_sub_path=None,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"display_name": S("displayName"),
"master_billing_account": S("masterBillingAccount"),
"open": S("open"),
}
display_name: Optional[str] = field(default=None)
master_billing_account: Optional[str] = field(default=None)
open: Optional[bool] = field(default=None)
def post_process(self, graph_builder: GraphBuilder, source: Json) -> None:
for info in GcpProjectBillingInfo.collect_resources(graph_builder, name=self.name):
graph_builder.add_edge(self, node=info)
@define(eq=False, slots=False)
class GcpProjectBillingInfo(GcpResource):
kind: ClassVar[str] = "gcp_project_billing_info"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["billingAccounts", "projects"],
action="list",
request_parameter={"name": "{name}"},
request_parameter_in={"name"},
response_path="projectBillingInfo",
response_regional_sub_path=None,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name").or_else(S("id")).or_else(S("selfLink")),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"billing_account_name": S("billingAccountName"),
"billing_enabled": S("billingEnabled"),
"project_billing_info_project_id": S("projectId"),
}
billing_account_name: Optional[str] = field(default=None)
billing_enabled: Optional[bool] = field(default=None)
project_billing_info_project_id: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpService(GcpResource):
kind: ClassVar[str] = "gcp_service"
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["gcp_sku"]},
}
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["services"],
action="list",
request_parameter={},
request_parameter_in=set(),
response_path="services",
response_regional_sub_path=None,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("serviceId"),
"tags": S("labels", default={}),
"name": S("name"),
"display_name": S("displayName"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"business_entity_name": S("businessEntityName"),
}
business_entity_name: Optional[str] = field(default=None)
display_name: Optional[str] = field(default=None)
@classmethod
def collect(cls: Type[GcpResource], raw: List[Json], builder: GraphBuilder) -> List[GcpResource]:
# Additional behavior: iterate over list of collected GcpService and for each:
# - collect related GcpSku
result: List[GcpResource] = super().collect(raw, builder) # type: ignore
SERVICES_COLLECT_LIST = [
"Compute Engine",
]
service_names = [
service.name for service in cast(List[GcpService], result) if service.display_name in SERVICES_COLLECT_LIST
]
for service_name in service_names:
builder.submit_work(GcpSku.collect_resources, builder, parent=service_name)
return result
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
def filter(node: GcpResource) -> bool:
return isinstance(node, GcpSku) and node.name is not None and node.name.startswith(self.id)
builder.add_edges(self, filter=filter)
@define(eq=False, slots=False)
class GcpCategory:
kind: ClassVar[str] = "gcp_category"
mapping: ClassVar[Dict[str, Bender]] = {
"resource_family": S("resourceFamily"),
"resource_group": S("resourceGroup"),
"service_display_name": S("serviceDisplayName"),
"usage_type": S("usageType"),
}
resource_family: Optional[str] = field(default=None)
resource_group: Optional[str] = field(default=None)
service_display_name: Optional[str] = field(default=None)
usage_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpGeoTaxonomy:
kind: ClassVar[str] = "gcp_geo_taxonomy"
mapping: ClassVar[Dict[str, Bender]] = {"regions": S("regions", default=[]), "type": S("type")}
regions: List[str] = field(factory=list)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpAggregationInfo:
kind: ClassVar[str] = "gcp_aggregation_info"
mapping: ClassVar[Dict[str, Bender]] = {
"aggregation_count": S("aggregationCount"),
"aggregation_interval": S("aggregationInterval"),
"aggregation_level": S("aggregationLevel"),
}
aggregation_count: Optional[int] = field(default=None)
aggregation_interval: Optional[str] = field(default=None)
aggregation_level: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpMoney:
kind: ClassVar[str] = "gcp_money"
mapping: ClassVar[Dict[str, Bender]] = {
"currency_code": S("currencyCode"),
"nanos": S("nanos"),
"units": S("units"),
}
currency_code: Optional[str] = field(default=None)
nanos: Optional[int] = field(default=None)
units: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpTierRate:
kind: ClassVar[str] = "gcp_tier_rate"
mapping: ClassVar[Dict[str, Bender]] = {
"start_usage_amount": S("startUsageAmount"),
"unit_price": S("unitPrice", default={}) >> Bend(GcpMoney.mapping),
}
start_usage_amount: Optional[float] = field(default=None)
unit_price: Optional[GcpMoney] = field(default=None)
@define(eq=False, slots=False)
class GcpPricingExpression:
kind: ClassVar[str] = "gcp_pricing_expression"
mapping: ClassVar[Dict[str, Bender]] = {
"base_unit": S("baseUnit"),
"base_unit_conversion_factor": S("baseUnitConversionFactor"),
"base_unit_description": S("baseUnitDescription"),
"display_quantity": S("displayQuantity"),
"tiered_rates": S("tieredRates", default=[]) >> ForallBend(GcpTierRate.mapping),
"usage_unit": S("usageUnit"),
"usage_unit_description": S("usageUnitDescription"),
}
base_unit: Optional[str] = field(default=None)
base_unit_conversion_factor: Optional[float] = field(default=None)
base_unit_description: Optional[str] = field(default=None)
display_quantity: Optional[float] = field(default=None)
tiered_rates: List[GcpTierRate] = field(factory=list)
usage_unit: Optional[str] = field(default=None)
usage_unit_description: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpPricingInfo:
kind: ClassVar[str] = "gcp_pricing_info"
mapping: ClassVar[Dict[str, Bender]] = {
"aggregation_info": S("aggregationInfo", default={}) >> Bend(GcpAggregationInfo.mapping),
"currency_conversion_rate": S("currencyConversionRate"),
"effective_time": S("effectiveTime"),
"pricing_expression": S("pricingExpression", default={}) >> Bend(GcpPricingExpression.mapping),
"summary": S("summary"),
}
aggregation_info: Optional[GcpAggregationInfo] = field(default=None)
currency_conversion_rate: Optional[float] = field(default=None)
effective_time: Optional[datetime] = field(default=None)
pricing_expression: Optional[GcpPricingExpression] = field(default=None)
summary: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class GcpSku(GcpResource):
kind: ClassVar[str] = "gcp_sku"
api_spec: ClassVar[GcpApiSpec] = GcpApiSpec(
service="cloudbilling",
version="v1",
accessors=["services", "skus"],
action="list",
request_parameter={"parent": "{parent}"},
request_parameter_in={"parent"},
response_path="skus",
response_regional_sub_path=None,
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("skuId"),
"tags": S("labels", default={}),
"name": S("name"),
"ctime": S("creationTimestamp"),
"description": S("description"),
"link": S("selfLink"),
"label_fingerprint": S("labelFingerprint"),
"deprecation_status": S("deprecated", default={}) >> Bend(GcpDeprecationStatus.mapping),
"category": S("category", default={}) >> Bend(GcpCategory.mapping),
"geo_taxonomy": S("geoTaxonomy", default={}) >> Bend(GcpGeoTaxonomy.mapping),
"sku_pricing_info": S("pricingInfo", default=[]) >> ForallBend(GcpPricingInfo.mapping),
"service_provider_name": S("serviceProviderName"),
"service_regions": S("serviceRegions", default=[]),
"sku_id": S("skuId"),
}
category: Optional[GcpCategory] = field(default=None)
geo_taxonomy: Optional[GcpGeoTaxonomy] = field(default=None)
sku_pricing_info: List[GcpPricingInfo] = field(factory=list)
service_provider_name: Optional[str] = field(default=None)
service_regions: List[str] = field(factory=list)
usage_unit_nanos: Optional[int] = field(default=None)
def post_process(self, graph_builder: GraphBuilder, source: Json) -> None:
if len(self.sku_pricing_info) > 0:
if not (pricing_expression := self.sku_pricing_info[0].pricing_expression):
return
tiered_rates = pricing_expression.tiered_rates
cost = -1
if len(tiered_rates) == 1:
if tiered_rates[0].unit_price and tiered_rates[0].unit_price.nanos:
cost = tiered_rates[0].unit_price.nanos
else:
for tiered_rate in tiered_rates:
if sua := tiered_rate.start_usage_amount:
if sua > 0:
if tiered_rate.unit_price and tiered_rate.unit_price.nanos:
cost = tiered_rate.unit_price.nanos
break
if cost > -1:
self.usage_unit_nanos = cost
resources = [GcpBillingAccount, GcpService]
| UTF-8 | Python | false | false | 11,876 | py | 805 | billing.py | 481 | 0.633799 | 0.632705 | 0 | 294 | 39.394558 | 119 |
Kotletta-TT/cbot-cm-sim-consumer-megafon | 12,661,563,594,198 | 45eb363eab15d905519e5217c791bf030c0e060d | e66b04814b2f6c4840bea072b4e335fda463c364 | /db/db_helper.py | 7c05b88180eeedee0e00d71b38f74e62a326f054 | []
| no_license | https://github.com/Kotletta-TT/cbot-cm-sim-consumer-megafon | 26a2fb2f1c8ad609dd061544c407c33fde3a7317 | 17bcfe6226e0d8da45983396862f5d1586f21f1c | refs/heads/master | 2023-02-26T11:52:01.905618 | 2021-02-05T15:03:51 | 2021-02-05T15:03:51 | 336,256,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
sys.path.insert(0, '..')
from config.conf import DB_DRIVER, DB_USER, DB_PASS, DB_HOST, DB_PORT, DB_NAME
# Есть косяк, для создания/применения миграции необходимо перед config ставить точку
url = URL(drivername=DB_DRIVER, username=DB_USER, password=DB_PASS, host=DB_HOST, port=DB_PORT, database=DB_NAME,
query={"charset": "utf8mb4"})
engine = create_engine(url, echo=True)
Session = sessionmaker(bind=engine) | UTF-8 | Python | false | false | 612 | py | 11 | db_helper.py | 7 | 0.756856 | 0.751371 | 0 | 15 | 35.533333 | 113 |
frankmalcolmkembery/conjure-up | 979,252,566,938 | b989009be61fcc783cef4e33ac57d7340f04e8ee | d41456d49124025f97c1bce47112e9a58d81789d | /test/test_controllers_summary_tui.py | dc44de5a3953dfd1ee301cd9d2adfc1304d0894d | [
"MIT"
]
| permissive | https://github.com/frankmalcolmkembery/conjure-up | 13bad0d4016f72f8efd1f84a73adc9b3c7f5c3f3 | 891f089dbaa0de43c737aed587d1871612dd39a7 | refs/heads/master | 2020-05-30T13:57:00.156316 | 2016-08-30T22:43:42 | 2016-08-30T22:43:42 | 67,026,271 | 1 | 1 | null | true | 2016-08-31T10:04:24 | 2016-08-31T10:04:24 | 2016-08-17T05:21:47 | 2016-08-30T22:43:44 | 1,108 | 0 | 0 | 0 | null | null | null | #!/usr/bin/env python
#
# tests controllers/summary/tui.py
#
# Copyright 2016 Canonical, Ltd.
import unittest
from unittest.mock import patch, MagicMock, sentinel
from conjureup.controllers.summary.tui import SummaryController
class SummaryTUIRenderTestCase(unittest.TestCase):
def setUp(self):
self.utils_patcher = patch(
'conjureup.controllers.summary.tui.utils')
self.mock_utils = self.utils_patcher.start()
self.app_patcher = patch(
'conjureup.controllers.summary.tui.app')
mock_app = self.app_patcher.start()
mock_app.ui = MagicMock(name="app.ui")
self.controller = SummaryController()
self.controller.save_path = sentinel.savepath
def tearDown(self):
self.utils_patcher.stop()
self.app_patcher.stop()
def test_render_empty(self):
"call render with empty results"
with patch("conjureup.controllers.summary.tui.common") as m_c:
self.controller.render({})
m_c.write_results.assert_called_once_with({}, sentinel.savepath)
| UTF-8 | Python | false | false | 1,082 | py | 154 | test_controllers_summary_tui.py | 129 | 0.669131 | 0.665434 | 0 | 37 | 28.243243 | 76 |
bing1100/pinball | 13,950,053,796,694 | 808603b3b60eab3209b5484898dd2b1452b2d9fa | 24870303ba0b04896cc40184d51a4efb82c9a32b | /generate_pinball_poincare.py | d5393b06399792e93037172fcb1659203bb4770c | []
| no_license | https://github.com/bing1100/pinball | 2588d195621cb6910de8fcd88226d2088f9e858c | 022dc73a825f6e7f7f4948811e175d595bd7941e | refs/heads/master | 2021-08-23T18:27:03.396192 | 2017-12-06T02:13:15 | 2017-12-06T02:13:15 | 111,614,163 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pinball as pinball
import poincare as poincare
import mpmath as mp
# Global variables for the cd and cr ratio
CD_CR_RATIO = 2.5
STEP_X = 0.01
STEP_Y = mp.pi / 360
MAX_REFLECTIONS = 2
domain_x = [0, 1]
domain_y = [-mp.pi/2 , mp.pi/2]
global_intervals = []
def frange (start, stop, incr):
i = start
while i <= stop:
yield i
i += incr
pinball.set_cd_cr_ratio(CD_CR_RATIO)
for x_val in frange(domain_x[0], domain_x[1], STEP_X):
interval_x = []
interval_theta = []
for theta in frange(domain_y[0], domain_y[1], STEP_Y):
cline = pinball.create_line(x_val, theta)
res = pinball.run(cline, MAX_REFLECTIONS, False)
if res[0]:
interval_x.append(x_val)
interval_theta.append(theta)
#print([interval_x, interval_theta])
else:
if len(interval_x) != 0:
global_intervals.append([interval_x, interval_theta])
interval_x = []
interval_theta = []
fig = poincare.Poincare(global_intervals)
fig.show_poincare()
| UTF-8 | Python | false | false | 1,096 | py | 4 | generate_pinball_poincare.py | 4 | 0.57573 | 0.558394 | 0 | 53 | 19.45283 | 69 |
kelvincjr/shared | 14,474,039,808,108 | 2a5b0224d0b5830ac0deca8a85c74a5fd009f9d3 | caa06eca3eef2549d5088f6487201f734b35822e | /gaiic2022/ark-nlp-main_nezha/ark_nlp/factory/predictor/base/__init__.py | ddab79b0ef8e5949272ea89d0227e465d7a3c046 | [
"Apache-2.0"
]
| permissive | https://github.com/kelvincjr/shared | f947353d13e27530ba44ea664e27de51db71a5b6 | 4bc4a12b0ab44c6847a67cbd7639ce3c025f38f8 | refs/heads/master | 2023-06-23T19:38:14.801083 | 2022-05-17T09:45:22 | 2022-05-17T09:45:22 | 141,774,490 | 6 | 1 | null | false | 2023-06-12T21:30:07 | 2018-07-21T02:22:34 | 2023-01-30T12:41:49 | 2023-06-12T21:30:06 | 619,370 | 5 | 1 | 5 | Python | false | false | from ark_nlp.factory.predictor.base._predictor import Predictor
from ark_nlp.factory.predictor.base._sequence_classification import SequenceClassificationPredictor
from ark_nlp.factory.predictor.base._token_classification import TokenClassificationPredictor
| UTF-8 | Python | false | false | 258 | py | 488 | __init__.py | 255 | 0.875969 | 0.875969 | 0 | 3 | 85 | 99 |
LordSir101/GameThing | 17,875,653,913,556 | a873a22ed3687e7d84bab796752d34acacee91c5 | dd5649ce310e655d46526ba961242f93569fd071 | /player.py | 4ac88f37243ebbba28fde6be281bd6af23d479e1 | []
| no_license | https://github.com/LordSir101/GameThing | 783820e0d040a82c075c9271c0d3f8c788c316d3 | 97cdd5191cc4f7a64803f57a5fd8cde9be952a19 | refs/heads/master | 2022-11-04T23:10:41.077602 | 2020-06-22T20:25:47 | 2020-06-22T20:25:47 | 257,407,091 | 0 | 0 | null | false | 2020-06-22T20:25:48 | 2020-04-20T21:23:25 | 2020-05-03T19:27:46 | 2020-06-22T20:25:48 | 677 | 0 | 0 | 0 | Python | false | false | from pygame import image, Color
import pygame
class Player:
def __init__(self, width, height):
# load images for pacman animation
self.folder = "animations/pacman_move/"
self.imgs_alive = [pygame.image.load(self.folder+"pacman1.png"),
pygame.image.load(self.folder+"pacman2.png"),
pygame.image.load(self.folder+"pacman3.png"),
pygame.image.load(self.folder+"pacman4.png")]
self.folder = "animations/pacman_death/"
self.imgs_dead = [pygame.image.load(self.folder+"death00.png"),
pygame.image.load(self.folder+"death01.png"),
pygame.image.load(self.folder+"death02.png"),
pygame.image.load(self.folder+"death03.png"),
pygame.image.load(self.folder+"death04.png"),
pygame.image.load(self.folder+"death05.png"),
pygame.image.load(self.folder+"death06.png"),
pygame.image.load(self.folder+"death07.png"),
pygame.image.load(self.folder+"death08.png"),
pygame.image.load(self.folder+"death09.png"),
pygame.image.load(self.folder+"death10.png"),
pygame.image.load(self.folder+"death11.png")]
self.movemap = image.load('movemap.png')
# define screen dimensions
self.scrnW = width
self.scrnH = height
# use an image as reference for pacman dimensions
self.scale = 28
self.sprite = pygame.transform.scale(self.imgs_alive[1], (self.scale, self.scale))
self.rad = (self.sprite.get_width() / 2) + 1
self.width = self.sprite.get_width()
self.height = self.sprite.get_height()
# define pacman's spawn location
self.spawnX = self.scrnW/2 - self.rad + 10
self.spawnY = self.scrnH/2 - self.rad + 54
# state variables
self.isLiving = True
self.pauseDone = False
# animation variables
self.frame_alive = 0
self.frame_dead = 0
self.animationRate = 6 # speed of pacman animation
# current movement variables
self.vel = 2.5
self.x = self.spawnX
self.y = self.spawnY
self.dirX = 0
self.dirY = 0
# previous movement variables
self.prevX = self.x #{ initialized to inital position
self.prevY = self.y #{
self.prevDirX = 0 #{ stores the direction from the previous frame
self.prevDirY = 0 #{
# future movement variables
self.intendedDirX = None #{ initialized as None, since there is no initial intended move
self.intendedDirY = None #{
# counter variables
self.lives = 3
self.score = 0
def move(self):
# store current position and direction for future reference
self.prevX = self.x
self.prevY = self.y
self.prevDirX = self.dirX
self.prevDirY = self.dirY
if self.isMoveValid(self.dirX, self.dirY):
self.x += self.dirX * self.vel
self.y += self.dirY * self.vel
def hasMoved(self):
if self.x == self.prevX and self.y == self.prevY:
return False
else:
return True
def isMoveValid(self, dirX, dirY):
# error handling
if dirX == None or dirY == None:
return False
# this is approximately where pacman's sprite will collide with a wall
nextX = self.x + (dirX * self.vel)
nextY = self.y + (dirY * self.vel)
# out of bounds error handling for the tunnel
if(nextX >= self.movemap.get_width()):
nextX = self.movemap.get_width() - 1
if(nextX < 0):
nextX = 0
if(nextY >= self.movemap.get_height()):
nextY = self.movemap.get_height() - 1
if(nextY < 0):
nextY = 0
# prevents pacman clipping with the wall
buffer = 6
# if going down, check bottom left and bottom right corners of pacman
if (dirX == 0 and dirY > 0 and
self.movemap.get_at((int(nextX + (self.rad - buffer)), int(nextY + (self.rad - buffer)))) == Color(0, 0, 0) and
self.movemap.get_at((int(nextX - (self.rad - buffer)), int(nextY + (self.rad - buffer)))) == Color(0, 0, 0)):
return True
# if going up, check top left and top right corners of pacman
elif (dirX == 0 and dirY < 0 and
self.movemap.get_at((int(nextX + (self.rad - buffer)), int(nextY - (self.rad - buffer)))) == Color(0, 0, 0) and
self.movemap.get_at((int(nextX - (self.rad - buffer)), int(nextY - (self.rad - buffer)))) == Color(0, 0, 0)):
return True
# if going right, check top right and bottom right corners of pacman
elif(dirX > 0 and dirY == 0 and
self.movemap.get_at((int(nextX + (self.rad - buffer)), int(nextY + (self.rad - buffer)))) == Color(0, 0, 0) and
self.movemap.get_at((int(nextX + (self.rad - buffer)), int(nextY - (self.rad - buffer)))) == Color(0, 0, 0)):
return True
# if going left, check top left and bottom left corners of pacman
elif(dirX < 0 and dirY == 0 and
self.movemap.get_at((int(nextX - (self.rad - buffer)), int(nextY + (self.rad - buffer)))) == Color(0, 0, 0) and
self.movemap.get_at((int(nextX - (self.rad - buffer)), int(nextY - (self.rad - buffer)))) == Color(0, 0, 0)):
return True
else:
return False
def draw(self, screen):
if self.isLiving == True:
# choose which frame to use
# all pacman images face left, so various transformations must be applied depending on pacman's direction
if self.dirX > 0:
sprite = pygame.transform.flip(self.imgs_alive[self.frame_alive], True, False)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
elif self.dirX < 0:
sprite = pygame.transform.scale(self.imgs_alive[self.frame_alive], (self.scale, self.scale))
screen.blit(sprite, (self.x - self.rad, self.y - self.rad))
elif self.dirY > 0:
sprite = pygame.transform.rotate(self.imgs_alive[self.frame_alive], 90)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
elif self.dirY < 0:
sprite = pygame.transform.rotate(self.imgs_alive[self.frame_alive], -90)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
else:
# the case where pacman is idle
sprite = pygame.transform.scale(self.imgs_alive[0], (self.scale, self.scale))
screen.blit(sprite, (self.x - self.rad, self.y - self.rad))
else:
# the case where self.isLiving == False and self.pauseDone == False
if self.pauseDone == False:
# during the pause after dying to a ghost, keep the last frame of pacman walking before he died
if self.prevDirX > 0:
sprite = pygame.transform.flip(self.imgs_alive[self.frame_alive], True, False)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
elif self.prevDirX < 0:
sprite = pygame.transform.scale(self.imgs_alive[self.frame_alive], (self.scale, self.scale))
screen.blit(sprite, (self.x - self.rad, self.y - self.rad))
elif self.prevDirY > 0:
sprite = pygame.transform.rotate(self.imgs_alive[self.frame_alive], 90)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
elif self.prevDirY < 0:
sprite = pygame.transform.rotate(self.imgs_alive[self.frame_alive], -90)
scaled = pygame.transform.scale(sprite, (self.scale, self.scale))
screen.blit(scaled, (self.x - self.rad, self.y - self.rad))
else:
sprite = pygame.transform.scale(self.imgs_alive[0], (self.scale, self.scale))
screen.blit(sprite, (self.x - self.rad, self.y - self.rad))
# the case where self.isLiving == False and self.pauseDone == True
else:
sprite = pygame.transform.scale(self.imgs_dead[self.frame_dead], (self.scale, self.scale))
screen.blit(sprite, (self.x - self.rad, self.y - self.rad))
# after death animation is done
if self.frame_dead == len(self.imgs_dead) - 1:
self.respawnEvents()
def findNode(self, nodes, optional_other=None):
# optional_other option is for searching near x, y instead of the player
# optional_other = [x, y]
for row in nodes:
for val in row:
if val != 0:
tolerance = 25 * 25
if optional_other == None:
distSquaredX = (val.x - self.x)**2
distSquaredY = (val.y - self.y)**2
else:
distSquaredX = (val.x - optional_other[0])**2
distSquaredY = (val.y - optional_other[1])**2
if distSquaredX < tolerance and distSquaredY < tolerance:
return val
def deathEvents(self):
if self.isLiving == True:
self.isLiving = False
self.dirX = 0
self.dirY = 0
def respawnEvents(self):
# respawn pacman
self.isLiving = True
self.pauseDone = False
self.frame_alive = 0
self.frame_dead = 0
self.x = self.spawnX
self.y = self.spawnY
self.dirX = 0
self.dirY = 0
self.prevX = None
self.prevY = None
self.prevDirX = 0
self.prevDirY = 0
self.intendedDirX = None
self.intendedDirY = None
# remove a life
self.lives -= 1
# check if no more lives
if self.lives <= 0:
# display game over text
print("GAME OVER")
# resets player attributes for a new game
def reset(self):
self.lives = 3
self.score = 0
| UTF-8 | Python | false | false | 10,756 | py | 7 | player.py | 6 | 0.550205 | 0.538025 | 0 | 254 | 41.346457 | 123 |
zhanglizhijakezlz/spider | 19,499,151,536,239 | ba16c7f2cf9fb3a17940be0e167cbbafecdfa386 | 96c827f306c8cbc5ff166957011d83c177fa3977 | /com/bsandxpath/xpathandbs1.py | c2650a44ff351fe9f93c14afc279acc96fe1d731 | []
| no_license | https://github.com/zhanglizhijakezlz/spider | 012ac18e33c0e42c0758e724d48668072997f966 | 2e025335f051efc38397286cc24bb47b0b727586 | refs/heads/master | 2020-08-01T15:45:49.351162 | 2019-09-26T08:22:56 | 2019-09-26T08:22:56 | 211,037,487 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
from bs4 import BeautifulSoup
from lxml import etree
import requests
class SpiderCompare(object):
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
def get_code(self):
response = requests.get('https://tieba.baidu.com/p/5743456239?pn=1', headers=self.headers).text
return response
def use_re(self, code):
pattern = re.compile(r'<li class="d_name".*?<a.*?>(.*?)</a>'
r'.*?<cc.*?class="d_post_content j_d_post_content ">(.*?)</div>', re.S)
result = re.findall(pattern, code)
pattern = re.compile(r'<.*?>|<br>|</a>', re.S)
for name, content in result:
new_name = re.sub(pattern, '', name)
new_name = pattern.sub('', name)
# 两种写法
new_content = re.sub(pattern, '', content)
print(new_content)
def use_xpath(self, code):
new_code = etree.HTML(code)
name = new_code.xpath('//li[@class="d_name"]/a/text()')
for new in name:
print(new.replace(' ', '').strip('\n'))
content = new_code.xpath('//cc/div[@class="d_post_content j_d_post_content "]//text()')
for new in content:
print(new)
def use_soup(self, code):
soup = BeautifulSoup(code, 'lxml')
name = soup.select('li.d_name a')
for new in name:
print(new.get_text())
content = soup.select('cc div')
for new in content:
print(new.get_text())
def manager_spider(self):
code = self.get_code()
#正则不出来结果
# self.use_re(code)
# self.use_xpath(code)
self.use_soup(code)
spider = SpiderCompare()
spider.manager_spider() | UTF-8 | Python | false | false | 1,885 | py | 26 | xpathandbs1.py | 25 | 0.540526 | 0.520666 | 0 | 58 | 31.137931 | 103 |
BrandonTheBuilder/robohooker | 19,499,151,563,117 | b9bfa0e4e3c4b968fe4935121255be4ed517646c | 42dd6a165721688964dfed4b21bf7280b2865b51 | /src/track_fish/src/fish_hole.py | 7ca794b57d6098970c5dd319573423a317690f4c | []
| no_license | https://github.com/BrandonTheBuilder/robohooker | 8205cf708cd5752481966227f169f694e387dd3d | 3bd1674888f643f923cf8e43153b16bc479d813d | refs/heads/master | 2016-09-13T11:30:41.843158 | 2016-06-03T13:25:17 | 2016-06-03T13:25:17 | 56,011,153 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from point import Point
from collections import deque
import numpy as np
import cv2
DEQUE_SIZE = 10
CASCADE_CLASSIFIER = '../../fish_detective/training/harr_fish/cascade.xml'
whiteLower =(0,225,50)
whiteUpper =(180,255,255)
HARR_CUTOFF = 1.0
# ANGLE_TOLERANCE = 0.01
LOCATION_TOLERANCE = 100
class FishHole(object):
"""docstring for FishHole"""
def __init__(self):
self.fish = True
self.times_fished = 0
# super(FishHole, self).__init__()
# self.find_fish = cv2.CascadeClassifier(CASCADE_CLASSIFIER)
# self.fishiness = deque()
# def append(self, l, x):
# if len(l) < DEQUE_SIZE:
# l.append(x)
# else:
# l.popleft()
# self.append(l,x)
# def fishy_calibration(self, img):
# # cv2.namedWindow("Fish")
# # cv2.startWindowThread()
# # cv2.imshow('Fish', img)
# # cv2.waitKey(0)
# # fishes = self.find_fish.detectMultiScale(img, 1.01, 1,
# # minSize = (25,25),
# # maxSize=(35,35))
# self.append(self.fishiness, 2)
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # mask_w = cv2.inRange(hsv, whiteLower, whiteUpper)
# # # cv2.imshow("White1", mask_w)
# # #mask_w = cv2.erode(mask_w, None, iterations=2)
# # # cv2.imshow("White2", mask_w)
# # mask_w = cv2.dilate(mask_w, None, iterations=2)
# # # cv2.imshow("White3", mask_w)
# # # find contours in the mask and initialize the current
# # # (x, y) center of the ball
# # cnts = cv2.findContours(mask_w.copy(),
# # cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
# # center = None
# # fishcount=0
# # print len(cnts)
# # self.append(self.fishiness, len(cnts))
# # only proceed if at least one contour was found
# # if len(cnts) > 0:
# # for c in range(0,len(cnts)):
# # # find the largest contour in the mask, then use
# # # it to compute the minimum enclosing circle and
# # # centroid
# # #c = max(cnts, key=cv2.contourArea)
# # ((x, y), radius) = cv2.minEnclosingCircle(cnts[c])
# # M = cv2.moments(cnts[c])
# # center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# # # only proceed if the radius meets a minimum size (default was 10)
# # #print ("Radius:",radius)
# # if radius > 9 and radius <13:
# # fishcount=fishcount+1
# # # draw the circle and centroid on the frame,
# # # then update the list of tracked points
# # #first color is 0,255,255
# # #print "Green"
# # cv2.circle(frame, (int(x), int(y)), int(radius),(0, 0, 0), 2)
# # #fishtext=str(fishcount)
# # #cv2.putText(frame,fishtext,(10,20),font,1,(255,255,255),1)
# # print("Fish COunt: ",fishcount)
def is_fish(self):
return self.fish
# fishy_level = np.mean(self.fishiness)
# if fishy_level < HARR_CUTOFF:
# return False
# else:
# return True
| UTF-8 | Python | false | false | 3,426 | py | 51 | fish_hole.py | 20 | 0.493287 | 0.460595 | 0 | 87 | 38.206897 | 89 |
simonguichandut/GR-envelope | 2,413,771,642,731 | fe7530bd2a13d975a52732679563cfcc882be114 | 531a382c3cdd982315b630db78b19cc72afe6964 | /env_GR_FLD.py | fb9f39c5a2456df7f267ddf894fd296031fc2730 | []
| no_license | https://github.com/simonguichandut/GR-envelope | 31dc8618d2d1b6136defdc503fba4950adcc680f | cdb0286ffded5cd2e8e2aaef7bd77439397bb17d | refs/heads/master | 2021-06-20T15:57:29.193166 | 2021-03-01T18:52:44 | 2021-03-01T18:52:44 | 190,279,912 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ''' Main code to calculate expanded envelopes '''
import sys
from scipy.optimize import brentq
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
from collections import namedtuple
import numpy as np
import IO
import physics
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
# --------------------------------------- Constants and parameters --------------------------------------------
# Constants
kB = 1.380658e-16
arad = 7.5657e-15
c = 2.99792458e10
mp = 1.67e-24
kappa0 = 0.2
sigmarad = 0.25*arad*c
# Parameters
params = IO.load_params()
if params['FLD'] == False:
sys.exit('This script is for FLD calculations')
# Generate EOS class and methods
eos = physics.EOS_FLD(params['comp'])
# Mass-dependent parameters
M,RNS,y_inner = params['M'],params['R'],params['y_inner']
GM = 6.6726e-8*2e33*M
LEdd = 4*np.pi*c*GM/eos.kappa0
ZZ = (1-2*GM/(c**2*RNS*1e5))**(-1/2) # redshift
g = GM/(RNS*1e5)**2 * ZZ
P_inner = g*y_inner
T_inner = 10**8.5
rg = 2*GM/c**2 # gravitationnal radius
# ----------------------------------------- General Relativity ------------------------------------------------
def Swz(r): # Schwartzchild metric term
return (1-2*GM/c**2/r)
def grav(r): # local gravity
return GM/r**2 * Swz(r)**(-1/2)
def Lcr(r,rho,T):
return 4*np.pi*c*GM/eos.kappa(rho,T)*(1-rg/r)**(-1/2)
# ----------------------- Flux-limited diffusion -----------------------------
# Modified version of Pomraning (1983) FLD prescription.
# See Guichandut & Cumming (2020)
def FLD_Lam(L,r,T,return_params=False):
# L is the local luminosity. In envelopes, Linf is constant, L=Linf*(1-rs/r)**(-1)
Flux = L/(4*np.pi*r**2)
alpha = Flux/(c*arad*T**4) # 0 opt thick, 1 opt thin
if isinstance(L, (list,tuple,np.ndarray)):
if len(alpha[alpha>1])>0:
raise Exception("Causality warning F>cE at %d locations"%len(alpha[alpha>1]))
else:
if alpha>1:
# print('causality warning : F>cE')
alpha=1-1e-9
Lam = 1/12 * ( (2-3*alpha) + np.sqrt(-15*alpha**2 + 12*alpha + 4) ) # 1/3 thick , 0 thin
R = alpha/Lam # 0 thick, 1/lam->inf thin
if return_params:
return Lam,alpha,R
else:
return Lam
# ----------------------------------------- Initial conditions ------------------------------------------------
def photosphere(Rphot,f0):
''' Finds photospheric density and temperature (eq 9a-b) for a given luminosity-like parameter f0
Also sets Linf, the luminosity seen by observers at infinity '''
def Teff_eq(T):
return eos.kappa(0.,T) - (GM*c/(Rphot**2*sigmarad) * Swz(Rphot)**(-1/2) * (1-10**f0))/T**4 # Assuming 0 density for opacity, which is no problem at photosphere
Tkeep1, Tkeep2 = 0.0, 0.0
npoints = 10
while Tkeep1 == 0 or Tkeep2 == 0:
logT = np.linspace(6, 8, npoints)
for T in 10**logT:
foo = Teff_eq(T)
if foo < 0.0:
Tkeep1 = T
if foo > 0.0 and Tkeep2 == 0:
Tkeep2 = T
npoints += 10
T = brentq(Teff_eq, Tkeep1, Tkeep2, xtol=1e-10, maxiter=10000)
rho = 2/3 * eos.mu*mp/(kB*T) * grav(Rphot)/eos.kappa(0.,T) * 10**f0
Linf = 4*np.pi*Rphot**2*sigmarad*T**4* Swz(Rphot)
return rho,T,Linf
def get_f0(Rphot,Tphot,Linf): # the opposite of what photosphere does, i.e find f0 based on Linf and values at photosphere
Lph = Linf*Swz(Rphot)**(-1)
Lcrph = 4*np.pi*c*GM/eos.kappa(0.,Tphot)*(1-rg/Rphot)**(-1/2)
return np.log10(1-Lph/Lcrph)
# --------------------------------------- Wind structure equations (v=0) ---------------------------------------
def Y(r):
return np.sqrt(Swz(r)) # v=0
def Tstar(L, T, r, rho):
return L/LEdd * eos.kappa(rho,T)/eos.kappa0 * GM/(4*r) *\
3*rho/(arad*T**4) * Y(r)**(-1)
def A(T):
return 1+1.5*eos.cs2(T)/c**2
def A_e(rho,T):
pe,_,[alpha1,_,f] = eos.electrons(rho,T)
return 1 + 1.5*eos.cs2_I(T)/c**2 + pe/(rho*c**2)*(f/(f-1) - alpha1)
def B(T):
return eos.cs2(T)
def B_e(rho,T):
pe,_,[alpha1,alpha2,f] = eos.electrons(rho,T)
return eos.cs2_I(T) + pe/rho*(alpha1 + alpha2*f)
def C(L,T,r,rho):
Lam,_,R = FLD_Lam(L,r,T,return_params=True)
b = eos.Beta(rho,T, lam=Lam, R=R)
return 1/Y(r) * L/LEdd * eos.kappa(rho,T)/eos.kappa0 * GM/r * \
(1 + b/(12*Lam*(1-b)))
def C_e(L, T, r, rho):
Lam,_,R = FLD_Lam(L,r,T,return_params=True)
_,_,[alpha1,_,_] = eos.electrons(rho,T)
bi,be = eos.Beta_I(rho, T, lam=Lam, R=R), eos.Beta_e(rho, T, lam=Lam, R=R)
return 1/Y(r) * L/LEdd * eos.kappa(rho,T)/eos.kappa0 * GM/r * \
(1 + (bi + alpha1*be)/(12*Lam*(1-bi-be)))
# -------------------------------------------- Calculate derivatives ---------------------------------------
def derivs(r,y,Linf):
# Version with the wind equations (with A,B,C) and v=0
rho,T = y[:2]
# if rho<0:
# rho=1e-10
L = Linf*Swz(r)**(-1)
Lam = FLD_Lam(L,r,T)
# print('r=%.3e \t rho=%.3e \t T=%.3e \t lam=%.3f'%(r,rho,T,Lam))
dlnT_dlnr = -Tstar(L, T, r, rho) / (3*Lam) - 1/Swz(r) * GM/c**2/r
# dlnrho_dlnr = (-GM/Swz(r)/r * A_e(rho,T) + C_e(L,T,r,rho))/B_e(rho,T)
dlnrho_dlnr = (-GM/Swz(r)/r * A(T) + C(L,T,r,rho))/B(T)
dT_dr = T/r * dlnT_dlnr
drho_dr = rho/r * dlnrho_dlnr
return [drho_dr,dT_dr]
# --------------------------------------- Optically thin limit ---------------------------------------
def T_thin(Linf,r):
return ( Linf*Swz(r)**(-1) / (4*np.pi*r**2*arad*c) )**0.25
def drho_thin(r,rho,Linf):
L = Linf*Swz(r)**(-1)
T = T_thin(Linf,r)
Flux = L/(4*np.pi*r**2)
alpha = Flux/(c*arad*T**4)
# Lam = eos.kappa(rho,T)*rho*r/2/Y(r) # optically thin limit of lambda (it's not exactly zero)
Lam = eos.kappa0*rho*r/2/Y(r) # optically thin limit of lambda (it's not exactly zero)
b = eos.Beta(rho,T, lam=Lam, R=alpha/Lam)
C = 1/Y(r) * L/LEdd * eos.kappa(rho,T)/eos.kappa0 * GM/r * (1 + b/(12*Lam*(1-b)))
dlnrho_dlnr = (-GM/Swz(r)/r * A(T) + C)/B(T)
return rho/r * dlnrho_dlnr
# ---------------------------------------------- Integration -----------------------------------------------
def Shoot_in(rspan, rho0, T0, Linf):
''' Integrates in using r as the independent variable, until P=P_inner or
we diverge in the wrong direction. We want to match the location of
p=p_inner to the NS radius '''
inic = [rho0, T0]
def hit_innerPressure(r,Y,*args): return eos.pressure_e(Y[0],Y[1], lam=1/3, R=0)-P_inner
hit_innerPressure.terminal = True # stop integrating at this point
def hit_lowdensity(r,Y,*args):
return Y[0]-rho0/100
hit_lowdensity.terminal = True # stop integrating when hit small density (means about to crash)
sol = solve_ivp(derivs, rspan, inic, args = (Linf,), method='Radau',
events = (hit_innerPressure, hit_lowdensity), dense_output=True,
atol=1e-6, rtol=1e-6, max_step=1e5)
return sol
def Error(r): # Evaluate normalized error on location of the base versus NS radius
return (r[-1]-RNS*1e5)/(RNS*1e5)
def Shoot_out(rspan, rho0, T0, Linf, rtol=1e-6, max_step=1e5):
''' Integrates out using r as the independent variable, until the maximum radius,
or until density goes to zero (or a minimum value).'''
inic = [rho0, T0]
def hit_zero_density(r,y,*args):
return y[0]
hit_zero_density.terminal = True
# Something that can happen that will make my algo fail is that in the outwards integration,
# the bottom branch, the one that should diverge with the density going negative (and stop at zero)
# will not diverge somehow and find a stable region where the temperature stays constant and the
# density goes up and down.
# I want to trigger a stop when this happens, but the condition that I implement must not trigger
# when we're on the upper branch, where density goes up until the step size becomes too small.
# For example, a condition that triggers when drho/dr>0 is not good because it triggers in the other branch.
# I can try and catch the up and down of rho, i.e trigger when drho/dr goes from + to -. That should signal
# than I'm on the bottom branch, because the top branch fails when drho/dr is + and just keeps increasing
def hit_density_mountains(r,y,*args):
rho,T = y
dlnrho_dlnr = (-GM/Swz(r)/r * A(T) + C(Linf*Swz(r)**(-1),T,r,rho))/B(T)
# if dlnrho_dlnr>0: print(dlnrho_dlnr)
return dlnrho_dlnr
hit_density_mountains.direction = -1
hit_density_mountains.terminal = True
# def hit_optically_thin(r,y,*args):
# T_thin = ( Linf*Swz(r)**(-1) / (4*np.pi*r**2*arad*c) )**0.25
# err = abs(y[1]-T_thin)/T_thin
# return err - rtol
# hit_optically_thin.terminal = True
sol = solve_ivp(derivs, rspan, inic, args=(Linf,), method='Radau',
events = (hit_zero_density,hit_density_mountains), dense_output=True,
atol=1e-6, rtol=rtol, max_step=1e5)
return sol
def Shoot_out_thin(rvec, rho0, Linf, rtol=1e-6, rho_min=1e-10):
''' Integrates out using r as the independent variable, using the optically thin
limit to calculate the temperature '''
def hit_minimum_density(r,y,*args):
return y[0]-rho_min
hit_minimum_density.terminal = True
T = ( Linf*Swz(rvec)**(-1) / (4*np.pi*rvec**2*arad*c) )**0.25
sol = solve_ivp(drho_thin, t_span=(rvec[0],rvec[-1]), y0=(rho0,), args=(Linf,),
events = (hit_minimum_density), method='Radau', dense_output=True,
rtol=rtol, max_step = max_step)
rho = sol.sol(rvec)[0]
print('density zero at r=%.4f km'%(sol.t[-1]/1e5))
return rho,T
# ------------------------------------------------- Envelope ---------------------------------------------------
Env = namedtuple('Env',
['rphot','Linf','r','T','rho'])
def get_rhophf0rel(Rphotkm, rend=1e9, tol=1e-6, Verbose=0, f0min=-4.5, f0max=-3.7, npts=40, spacing='linear'):
# find the value of rhoph that allow a solution to go to inf (to tol precision), for each value of f0
if Verbose: print('\nRphot = %.2f km\n'%Rphotkm)
if spacing=='linear':
f0vals = np.linspace(f0min,f0max,npts)
elif spacing=='log':
f0vals = -1*np.logspace(np.log10(abs(f0min)),np.log10(abs(f0max)),npts)
f0vals = np.round(f0vals,8) # 8 decimals
rspan = (Rphotkm*1e5, rend)
for f0 in f0vals:
if Verbose: print('\nFinding rhoph for f0 = %.8f'%f0)
# Start at the initial value given by the approximation for tau=2/3
rhoph,Tph,Linf = photosphere(Rphotkm*1e5, f0)
if Linf/LEdd>1: print('Warning: Linf/LEdd=%.5f'%(Linf/LEdd))
a = rhoph
# sola = solve_ivp(derivs, (Rphotkm*1e5,1e9), (a,Tph), args=(Linf,),
# events=(hit_zero_density), method='Radau', dense_output=True, rtol=tol)
sola = Shoot_out(rspan=rspan, rho0=a, T0=Tph, Linf=Linf)
if sola.status == 1: # hit zero density (intial rhoph is in the bottom branch)
direction = +1
else:
direction = -1
# Step either up or down in rhoph until we find other branch
step = 0.5 # 50% update
b = a
while True:
b *= 1 + direction*step # either *1.5 or *0.5
# solb = solve_ivp(derivs, (Rphotkm*1e5,1e9), (b,Tph), args=(Linf,),
# events=(hit_zero_density), method='Radau', dense_output=True, rtol=tol)
solb = Shoot_out(rspan=rspan, rho0=b, T0=Tph, Linf=Linf)
if solb.status != sola.status:
break
# Bissect to get two values of rhoph close to relative tolerance tol
# print('\nBeginning Bissection')
while abs(b-a)/a>tol:
m = (a+b)/2
# print('%.6e'%m)
# solm = solve_ivp(derivs, (Rphotkm*1e5,1e9), (m,Tph), args=(Linf,),
# events=(hit_zero_density), method='Radau', dense_output=True, rtol=tol)
solm = Shoot_out(rspan=rspan, rho0=m, T0=Tph, Linf=Linf)
if solm.status == sola.status:
a,sola = m,solm
else:
b,solb = m,solm
# a the smaller one just to not get confused
if a>b: (a,b) = (b,a)
if Verbose:
print('\nInitial rhoph based on PA86 formula : \n%.6e'%rhoph)
print('Final bounding values:\n%.6e\n%.6e'%(a,b))
# Save one at a time
IO.save_rhophf0rel(Rphotkm,[f0],[a],[b])
IO.clean_rhophf0relfile(Rphotkm,warning=0)
def OuterBisection(Rphotkm, rho0, T0, Linf, rend=1e9, Verbose=False, tol=1e-4, return_stuff=False):
if return_stuff: stuff=[]
a = rho0
sola = Shoot_out(rspan=(Rphotkm*1e5,rend), rho0=a, T0=T0, Linf=Linf)
if sola.status == 0:
import matplotlib.pyplot as plt
fig,(ax1,ax2) = plt.subplots(2,1,figsize=(6,8),sharex=True)
ax1.set_ylabel('T')
ax2.set_ylabel('rho')
ax2.set_xlabel('r (km)')
ax1.semilogy(sola.t/1e5,sola.y[1],'r-',lw=0.8)
ax1.semilogy(sola.t/1e5,T_thin(Linf,sola.t),'r--',lw=0.8)
ax2.semilogy(sola.t/1e5,sola.y[0],'b-',lw=0.8)
fig,ax3=plt.subplots(1,1)
ax3.plot(sola.t/1e5,np.gradient(sola.y[0]))
plt.show()
sys.exit('reached end of integration interval with root!')
if sola.status == 1: # hit zero density (intial rhoph is in the bottom branch)
direction = +1
else:
direction = -1
# Step either up or down in rhoph until we find other branch
step = 1e-6
b = a
i = 0
while True:
logb = np.log10(b) + direction*step
b = 10**logb
solb = Shoot_out(rspan=(Rphotkm*1e5,rend), rho0=b, T0=T0, Linf=Linf)
if solb.status != sola.status:
break
i+=1
if i==200:
if Verbose:
print('Not able to find a solution that diverges in opposite \
direction after changing rhoph by 200 tolerances. \
Problem in the rhoph-f0 interpolation')
break
# if sola was the high rhoph one, switch sola and solb (such that a is bottom branch)
if direction == -1:
(a,sola),(b,solb) = (b,solb),(a,sola)
if Verbose:
print('Two initial solutions. logrho values at photosphere:')
print('sola:%.6f \t solb:%.6f'%(np.log10(a),np.log10(b)))
if return_stuff:
stuff.append([a,sola,b,solb])
stuff.append([]) # will store the intermediate solutions into this list
def check_convergence(sola,solb,rcheck):
""" checks if two solutions are converged (similar rho, T) at some r """
rhoa,Ta = sola.sol(rcheck)
rhob,Tb = solb.sol(rcheck)
if abs(rhoa-rhob)/rhoa < tol and abs(Ta-Tb)/Ta < tol:
return True,rhoa,rhob,Ta,Tb
else:
return False,rhoa,rhob,Ta,Tb
# Create Radius array on which we will save points. Want many points near the photoshere to aid convergence
if Rphotkm > RNS+0.1:
Rlin = np.linspace(Rphotkm*1e5, (Rphotkm+5)*1e5, 1000)
Rlog = np.logspace(np.log10((Rphotkm+5)*1e5), np.log10(rend), 1000)
elif Rphotkm <= RNS+0.1 and Rphotkm > RNS+0.01:
Rlin = np.linspace(Rphotkm*1e5, (Rphotkm+2)*1e5, 2000)
Rlog = np.logspace(np.log10((Rphotkm+1)*1e5), np.log10(rend), 1000)
else:
Rlin = np.linspace(Rphotkm*1e5, (Rphotkm+0.5)*1e5, 3000)
Rlog = np.logspace(np.log10((Rphotkm+0.5)*1e5), np.log10(rend), 1000)
R = np.concatenate((Rlin,Rlog[1:]))
# Start by finding the first point of divergence
# Npts = 5000
# R = np.logspace(np.log10(Rphotkm*1e5),np.log10(rend),Npts)
for i,ri in enumerate(R):
conv = check_convergence(sola,solb,ri)
if conv[0] is False:
i0=i # i0 is index of first point of divergence
if Verbose: print('First divergence at r = %.3e cm'%ri)
break
else:
rhoa,rhob,Ta,Tb = conv[1:]
# Construct initial arrays
rho,T = sola.sol(R[:i0])
def update_arrays(rho,T,sol,R,j0,jf):
# Add new values from rho and T using ODE solution object.
# Radius points to add are from R[j0] to R[jf]
rho_new,T_new = sol(R[j0:jf+1]) # +1 so R[jf] is included
rho,T = np.concatenate((rho,rho_new)), np.concatenate((T,T_new))
return rho,T
# Begin bisection
if Verbose:
print('\nBeginning bisection')
print('rconv (km) \t Step # \t Iter \t m \t dir')
a,b = 0,1
step,count = 0,0
i = i0
rconv = R[i-1] # converged at that radius
rcheck = R[i] # checking if converged at that radius
do_bisect = True
while rconv<rend:
if do_bisect: # Calculate a new solution from interpolated values
m = (a+b)/2
rhom,Tm = rhoa + m*(rhob-rhoa), Ta + m*(Tb-Ta)
max_step = 1e3 if rconv < (RNS+2)*1e5 else 1e5
solm = Shoot_out(rspan=(rconv,10*rend), rho0=rhom, T0=Tm, Linf=Linf, rtol=1e-10, max_step=max_step)
# go further than rmax to give it the chance to diverge either way
if solm.status == 0: # Reached rend - done
# rho,T = update_arrays(rho,T,solm.sol,R,i0,len(R))
#jf=len(R) so that it includes the last point of R
raise Exception('reached end of integration interval without \
reaching optically thin.. probably wrong')
elif solm.status == 1:
a,sola,sdir = m,solm,'^'
elif solm.status == -1:
b,solb,sdir = m,solm,'v'
else:
i += 1
rconv,rcheck = R[i-1],R[i]
conv = check_convergence(sola,solb,rcheck)
if conv[0] is True:
rhoa,rhob,Ta,Tb = conv[1:]
a,b = 0,1 # reset bissection parameters
step += 1 # update step counter
count = 0 # reset iteration counter
# Converged, so on next iteration just look further
do_bisect = False
# Store solutions for demo plot
if return_stuff:
stuff[-1].extend((sola,solb))
else:
count+=1
do_bisect = True
# Before computing new solution, add converged results to array
# (but only if we made at least 1 step progress)
if i-1>i0:
rho,T = update_arrays(rho,T,solm.sol,R,i0,i-1) # i-1 is where we converged last
i0=i # next time we append
# Check if we have reached the optically thin limit of temperature profile
# but only if we've been stuck for a while and can't make progress with the normal equations
if count == 100:
rx,rhox,Tx = R[len(rho)-1], rho[-1], T[-1]
if abs(Tx-T_thin(Linf,rx))/T_thin(Linf,rx) < 1e-3:
if Verbose: print('Reached optically thin limit at r=%.4f km'%(rx/1e5))
# rho3,T3 = Shoot_out_thin(R2[len(rho2)-1:], rho0, Linf)
# stuff.append([R2[len(rho2)-1:],rho3,T3])
# rho2,T2 = np.append(rho2, rho3[1:]) , np.append(T2, T3[1:])
# Append optically thin limit: T \propto r^-1/2, rho\approx 0
# Rthin = R[len(rho)-1:]
Rthin = np.logspace(np.log10(rx), np.log10(rend), 50) # don't need many points if it's analytical
rhothin = 1e-20 * np.ones(len(Rthin))
Tthin = T_thin(Linf,Rthin)
if return_stuff: stuff.append([Rthin,rhothin,Tthin])
R = np.concatenate((R[:len(rho)] , Rthin[1:]))
rho,T = np.append(rho, rhothin[1:]) , np.append(T, Tthin[1:])
assert(len(R)==len(rho))
break
# Exit after stuck at 200 stuff for debugging (with 'stuff' object)
if return_stuff and count==500:
return R[:len(rho)],rho,T,stuff
# Exit if stuck at one step
nitermax=1000
if count==nitermax:
sys.exit("Could not integrate out to rend! Exiting after being \
stuck at the same step for %d iterations"%nitermax)
# End of step
if Verbose: print('%.4e \t %d \t\t %d \t\t %.10f \t\t %s'%(rconv,step,count,m,sdir))
if return_stuff:
return R,rho,T,stuff
else:
return R,rho,T
def MakeEnvelope(Rphotkm, rend=1e9, Verbose=False, tol=1e-4, return_stuff=False):
global Linf # that way Linf does not have to always be a function input parameter
Rphot = Rphotkm*1e5
rspan = (Rphot , 1.01*rg) # rspan is the integration range
stuff = []
# Load relation between f0 and logrhoph
rel = IO.load_rhophf0rel(Rphotkm)
if rel[0] is False:
if Verbose: print('First need to find relation between f0 and rhoph that allows integration to infinity')
if Rphotkm>=RNS+1:
get_rhophf0rel(Rphotkm, Verbose=Verbose)
elif Rphotkm<RNS+0.02:
get_rhophf0rel(Rphotkm, f0max=-1e-4, f0min=-1, npts=100, Verbose=Verbose, spacing='log')
else:
get_rhophf0rel(Rphotkm, f0max=-1e-3, npts=100, Verbose=Verbose)
rel = IO.load_rhophf0rel(Rphotkm)
if Verbose: print('\nLoaded rhoph-f0 relation from file')
_,f0vals,rhophA,_ = rel
rel_spline = IUS(f0vals[::-1],rhophA[::-1])
# First pass to find border values of f, and their solutions sola (gives r(y8)<RNS) and solb (r(y8)>RNS)
for i,f0 in enumerate(f0vals):
_,T_phot,Linf = photosphere(Rphot,f0)
rho_phot = rhophA[i]
solb = Shoot_in(rspan,rho_phot,T_phot,Linf)
Eb = Error(solb.t)
if i==0 and (Eb<0 or len(solb.t_events[1]) == 1):
# raise Exception('Highest f0 value leads to rb<RNS, rerun get_rhophf0rel() with custom bounds')
print('Highest f0 value leads to rb<RNS, rerun get_rhophf0rel() with custom bounds')
get_rhophf0rel(Rphotkm,f0min=f0vals[0],f0max=min(f0vals[0]+1,-1e-3), Verbose=Verbose, npts=15)
# just restart the whole function at this point
# return MakeEnvelope(Rphotkm, rend=rend, Verbose=Verbose, tol=tol, return_stuff=return_stuff)
# actually doesn't work
raise Exception('Call Again')
if Eb<0 or len(solb.t_events[1]) == 1:
# Either we crossed RNS or density went down and we stopped integration, so we didn't end up
# landing on rb<RNS. We will still keep this solution
a,b = f0vals[i],f0vals[i-1]
Ea,sola = Eb,solb
solb,Eb = solprev,Eprev
break
solprev,Eprev = solb,Eb
if Verbose: print('\nBorder values of f on first pass: %.6f \t %.6f\n'%(a,b))
# In FLD this first pass is more tricky because the density is going to zero in one of the branches.
# We need to do a second pass to find very closer border values for f
while abs((b-a)/b)>1e-6:
a = b - abs(a-b)/2
_,T_phot,Linf = photosphere(Rphot,a)
rho_phot = rel_spline(a)
sola=Shoot_in(rspan,rho_phot,T_phot,Linf)
if len(sola.t_events[0])==1: # hit inner pressure
if Error(sola.t)<0:
Ea,Eb = Error(sola.t),Error(solb.t)
break
else:
b,solb=a,sola
a=b-0.01
Ea,Eb = -1,Error(solb.t) # -1 to have a negative value, the integration cannot get passed R
assert((Ea<0) and (Eb>0))
if Verbose:
print('\nNarrowed down initial values for f at photosphere to:')
if Ea==-1:
print('fa=%.6f -> crashed to low density at r=%.3f km'%(a,sola.t_events[1][0]/1e5))
else:
print('fa=%.6f -> hit inner pressure at at r=%.3f km'%(a,sola.t_events[0][0]/1e5))
print('fb=%.6f -> hit inner pressure at r=%.3f km\n\n'%(b,solb.t_events[0][0]/1e5))
if return_stuff:
stuff.append([a,sola,b,solb])
stuff.append([]) # will store the intermediate solutions into this list
def check_convergence(sola,solb,rcheck_prev):
''' Checks if two solutions have similar parameters rho,T (1 part in tol^-1), some small integration distance in.
If converged, returns the interpolated value of rho,T at that point '''
d = Rphot/100/(count+1) # 1% of photosphere at a time, reduce by count number of current iteration
rcheck = rcheck_prev - d
rhoa,Ta = sola.sol(rcheck)
rhob,Tb = solb.sol(rcheck)
if abs(rhoa-rhob)/rhoa < tol and abs(Ta-Tb)/Ta < tol:
return True,rcheck,rhoa,rhob,Ta,Tb
else:
return False,
# Begin bisection
Em=100
count_iter,count = 0,0
rhoa,rhob,Ta,Tb = [0 for i in range(4)]
f0 = b
r,rho,T = [np.array([]) for i in range(3)]
while abs(Em)>tol: # we can stop when the final radius is the neutron star radius close to one part in 10^5
# middle point. In the first integration, a&b are the f values. In the rest, a&b are between 0 and 1. For interpolation
m = (a+b)/2
if count_iter == 0: # in the first iteration, a&b represent f
_,T_phot,Linf = photosphere(Rphot,m)
rho_phot = rel_spline(m)
solm = Shoot_in(rspan,rho_phot,T_phot, Linf)
rcheck = Rphot
else: # in the other iterations, a&b represent the linear space in [rhoa,rhob] and [Ta,Tb]
rhom,Tm = rhoa + m*(rhob-rhoa) , Ta + m*(Tb-Ta)
solm = Shoot_in(rspan,rhom,Tm,Linf)
if count_iter == 1:
if Verbose:
print('We begin the bissection with values for the photoshere')
print('f = %.3e \t Linf = %.3e \t Tph = %.3e \t rhoph = %.3e'%(f0,Linf,T_phot,rho_phot))
L = Linf*Swz(Rphot)**(-1)
F = L/(4*np.pi*Rphot**2)
alpha = F/(c*arad*T_phot**4)
Lam = FLD_Lam(L,Rphot,T_phot)
if Verbose:
print('alpha = %.3f \t lambda = %.3f'%(alpha,Lam))
print('L/4pir^2sigT^4 = %.3f'%(F/sigmarad/T_phot**4))
print('\nRadius (km) \t Step # \t Iter count \t RNS error')
# Bisection : check which side the new solution lands on and update either a or b
if len(solm.t_events[0])==1: # hit inner pressure
Em = Error(solm.t)
else:
Em = -1 # just need to have a negative value
if Ea*Em>0:
a,sola = m,solm
else:
b,solb = m,solm
conv = check_convergence(sola,solb,rcheck)
# When the two solutions are converging on rho and T, move the starting point inwards and reset a & b
if conv[0] is True:
rcheck,rhoa,rhob,Ta,Tb = conv[1:]
rspan = (rcheck,1.01*rg)
a,b = 0,1
count_iter+=1 # update step counter
count = 0 # reset iteration counter
r, rho, T = np.append(r,rcheck), np.append(rho,(rhoa+rhob)/2), np.append(T,(Ta+Tb)/2)
if return_stuff:
stuff[1].extend((sola,solb))
# End of step
if Verbose: print('%.5f \t %d \t\t %d \t\t %.6e'%(rcheck/1e5,count_iter,count+1,Em))
count+=1
# Exit if stuck at a step
nitermax=1000
if count==nitermax:
sys.exit("Could not arrive at the neutron star radius! Exiting after being stuck at the same step for %d iterations"%nitermax)
# Reached precision criteria for error on radius
if Verbose: print('Reached surface at r=%.5f km!\n'%(solm.t[-1]/1e5))
# Fill out arrays
r,rho,T = np.insert(r,0,Rphot), np.insert(rho,0,rho_phot), np.insert(T,0,T_phot)
ind = solm.t<r[-1]
r,rho,T = np.append(r,solm.t[ind]), np.append(rho,solm.y[0][ind]), np.append(T,solm.y[1][ind])
r,rho,T = np.flip(r),np.flip(rho),np.flip(T)
# Then make a solution to rend by bisection
if return_stuff:
r2,rho2,T2,stuff2 = OuterBisection(Rphotkm, rho0=rho[-1], T0=T[-1], Linf=Linf, rend=rend, Verbose=Verbose, return_stuff=True)
r,rho,T = np.append(r,r2[1:]), np.append(rho,rho2[1:]), np.append(T,T2[1:])
stuff.extend(stuff2)
return Env(Rphot,Linf,r,T,rho),stuff
else:
r2,rho2,T2 = OuterBisection(Rphotkm, rho0=rho[-1], T0=T[-1], Linf=Linf, rend=rend, Verbose=Verbose, return_stuff=False)
r,rho,T = np.append(r,r2[1:]), np.append(rho,rho2[1:]), np.append(T, T2[1:])
return Env(Rphot,Linf,r,T,rho)
| UTF-8 | Python | false | false | 29,051 | py | 234 | env_GR_FLD.py | 14 | 0.556125 | 0.525455 | 0 | 767 | 36.874837 | 168 |
vasanthkumarbalu/nordcloudflaskappsql | 10,514,079,941,083 | 57d01eecb640b1fe5c31d51aeeee5204577e9160 | 82f1d52a9a92e719ffca57819b8687c1eb4a339d | /notejam/__init__.py | b1ea5edcd73c6d8277c78cedc182fa2173fdbf1e | []
| no_license | https://github.com/vasanthkumarbalu/nordcloudflaskappsql | 0310bfaf98cc4df3969eda89aa25dd6760a3950a | 374165dc40665e4117019edb3fc1e1ac0a655885 | refs/heads/master | 2022-02-12T17:46:28.116758 | 2020-02-28T14:38:23 | 2020-02-28T14:38:23 | 243,147,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.parse
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_mail import Mail
from flask_bcrypt import Bcrypt
from notejam.config import (
Config,
DevelopmentConfig,
ProductionConfig,
TestingConfig)
import os
from_env = {'production': ProductionConfig,
'development': DevelopmentConfig,
'testing': TestingConfig,
'dbconfig': Config}
# @TODO use application factory approach
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config.from_object(from_env[os.environ.get('ENVIRONMENT', 'testing')])
params = urllib.parse.quote_plus("DRIVER={SQL Server};SERVER=notejamapp.database.windows.net;DATABASE=notejamappdb;UID=notejamadmin@notejamapp;PWD=123Welcome$;")
app.config['SQLALCHEMY_DATABASE_URI'] = "mssql+pyodbc:///?odbc_connect=%s" % params
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
@app.before_first_request
def create_tables():
db.create_all()
login_manager = LoginManager()
login_manager.login_view = "signin"
login_manager.init_app(app)
mail = Mail()
mail.init_app(app)
from notejam import views
| UTF-8 | Python | false | false | 1,193 | py | 3 | __init__.py | 2 | 0.741827 | 0.739313 | 0 | 40 | 28.825 | 161 |
DesislavaDimitrova/HackBulgaria | 9,320,079,070,655 | cc7db70787ba20436a97b58252cf6b9704200a61 | 4c564648faf5c67bee83950f96aa2d5d71773794 | /week0/1-Python-simple-problems-set /20.Sum_matrix.py | 13ce358f70366eab03083c21823aff4445990fbb | []
| no_license | https://github.com/DesislavaDimitrova/HackBulgaria | 54d3d06dd22b456c6ba430c68adc275844bf2497 | a3dbbba5bf722a956d8a6c64d6cbd4751952f150 | refs/heads/master | 2021-01-01T19:10:45.533168 | 2014-12-05T18:39:01 | 2014-12-05T18:39:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Summing of all numbers in a matrix
def sum_matrix(m):
sum = 0
for index, item in enumerate(m):
for nested_index, nested_item in enumerate(item):
sum += nested_item
return sum
| UTF-8 | Python | false | false | 185 | py | 39 | 20.Sum_matrix.py | 39 | 0.702703 | 0.697297 | 0 | 8 | 22.125 | 51 |
skyu0221/660-iot | 14,199,161,888,373 | a8e116f6f782eb784b5525b16ec53793504f40e7 | d800543b0f9395fa5202767970d138ee25721977 | /backend-server/sensors/migrations/0005_remove_device_create_by.py | ee11e4fa997d24c7a21096bbddc94813fc8896da | [
"Apache-2.0"
]
| permissive | https://github.com/skyu0221/660-iot | 7139479ee0a609ded65b8f143e63e5e51a6f3042 | d31f973c93871bfa8122f1b83364d0147d402e9e | refs/heads/master | 2021-05-23T17:52:55.660400 | 2021-01-19T20:03:24 | 2021-01-19T20:03:24 | 253,407,930 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.4 on 2020-04-02 08:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sensors', '0004_auto_20200402_0246'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='create_by',
),
]
| UTF-8 | Python | false | false | 331 | py | 30 | 0005_remove_device_create_by.py | 22 | 0.58006 | 0.486405 | 0 | 17 | 18.470588 | 47 |
patilanup246/Projects | 8,933,532,022,187 | 5ca3ab6d0552890f67a5685cf7c55f03b162e864 | b8d9bba87ffb1c6945fb1c9268a986587e672785 | /old/Hariharan_Arumugam/temp_sample.py | bd72a6feb76818db043c43342e60e960b8151247 | []
| no_license | https://github.com/patilanup246/Projects | 4f510f5965a2b5c1ca72dd94e70f53e14c7dac59 | b41aaa052a9f211065c184b7a0e167c089aefbc5 | refs/heads/master | 2021-02-28T00:14:01.330374 | 2018-09-01T12:26:29 | 2018-09-01T12:26:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
url = "https://api.skrapp.io/api/v2/find"
headers = {
'host': "api.skrapp.io",
'connection': "keep-alive",
'accept': "*/*",
'origin': "chrome-extension://gklkbifnmojjpmbkojffeamiblineife",
'x-access-key': "1730948352cO9GcaTqbVFTsblIrR6VLpdbGmVsHxJt",
'user-agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.52 Safari/537.36",
'accept-encoding': "gzip, deflate, br",
'accept-language': "en-US,en;q=0.9"
}
domain = 'adroll.com'
fullname = 'Kenny Diep'
querystring = {"fullName":fullname,"domain":domain,"context":"profile","liv":"nli"}
response = requests.request("GET", url, headers=headers, params=querystring, verify = False)
print(response.json())
| UTF-8 | Python | false | false | 765 | py | 333 | temp_sample.py | 333 | 0.67451 | 0.619608 | 0 | 24 | 30.875 | 134 |
skechung/TraffBot | 13,572,096,681,452 | b4246a471f8fbf8e12b214aab0e91b547b2c8686 | d8058fa14df1e3553a334f22d3a27cfa3fb8e9da | /src/server/app/routes/weather.py | cc0bcb5ce31526b2be6342d04fa254f027f04f39 | []
| no_license | https://github.com/skechung/TraffBot | e2eff71ba1cd2746a11ed445575dd8db4a8f3770 | b10d34944eec90b1d8d65d9cb9c9ab34fab5fa6b | refs/heads/main | 2023-04-23T08:38:57.408703 | 2021-05-16T04:51:40 | 2021-05-16T04:51:40 | 360,354,842 | 0 | 0 | null | false | 2021-05-16T04:51:41 | 2021-04-22T01:20:48 | 2021-05-16T03:27:52 | 2021-05-16T04:51:40 | 26,511 | 2 | 5 | 0 | Jupyter Notebook | false | false | from flask import Blueprint, jsonify, request
from config import WEATHER_API_KEY
import requests
weather_blueprint = Blueprint('weather', __name__)
@weather_blueprint.route('/weather', methods=['GET'])
def weather():
"""Fetches weather data for performing predictions
"""
latitude = request.args.get('lat')
longitude = request.args.get('long')
weather_api = f'https://api.openweathermap.org/data/2.5/weather?lat={latitude}&lon={longitude}&appid={WEATHER_API_KEY}&units=imperial'
response = requests.get(weather_api)
data = response.json()
return jsonify(data)
| UTF-8 | Python | false | false | 605 | py | 26 | weather.py | 12 | 0.700826 | 0.697521 | 0 | 19 | 30.842105 | 138 |
Tinsae/Big-Data-Training | 6,098,853,597,316 | 7e24f0eb7b8705cc392f93f758cecc0304bec9d4 | 61adcee473d10f337c1ef60be26c015bef7bacae | /Module 3_ Python Function, OOP, Modules/Case studies/answers/CA1/q (9).py | 255db7398a11d5c7965b106627efe1e66deb2ec1 | []
| no_license | https://github.com/Tinsae/Big-Data-Training | 958e05d00bba79bdd54dc878066bc9b4269f85ba | eef40671ba0d0d7175d7484b1f63498dbf7fbf22 | refs/heads/master | 2020-06-19T07:40:47.938905 | 2019-08-24T20:42:34 | 2019-08-24T20:42:34 | 196,621,366 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #9
rows, cols = input().split(",")
rows = int(rows)
cols = int(cols)
table = [[x * y for y in range(cols)] for x in range(rows)]
print(table) | UTF-8 | Python | false | false | 141 | py | 64 | q (9).py | 33 | 0.624113 | 0.617021 | 0 | 6 | 22.666667 | 59 |
ChenyangTang/bark-ml | 15,925,738,736,996 | ebb6fc308796c22d7766cc59587dd2fc2cb84ce3 | 022f7fd0cb1687bab60d536b411de3405b84ef38 | /configurations/sac_highway_uniform/configuration.py | 4db6b101f1a38d474b878b52293c5ad743fb0845 | [
"MIT"
]
| permissive | https://github.com/ChenyangTang/bark-ml | c73e66145ec1798d151dda06a6de69375a763a19 | 1d2ab1957bf49929e27d718dd4bd3912162197b8 | refs/heads/master | 2020-08-04T06:56:48.425173 | 2020-02-10T15:50:41 | 2020-02-10T15:50:41 | 212,046,188 | 0 | 0 | MIT | true | 2019-10-01T08:23:37 | 2019-10-01T08:23:37 | 2019-09-23T21:36:39 | 2019-09-24T08:41:03 | 222 | 0 | 0 | 0 | null | false | false | from absl import app
from absl import flags
import tensorflow as tf
from tf_agents.environments import tf_py_environment
import sys
sys.path.append("/home/chenyang/bark/modules/runtime/scenario/scenario_generation")
from modules.runtime.scenario.scenario_generation.uniform_vehicle_distribution \
import UniformVehicleDistribution
from modules.runtime.scenario.scenario_generation.deterministic \
import DeterministicScenarioGeneration
from modules.runtime.commons.parameters import ParameterServer
from modules.runtime.viewer.matplotlib_viewer import MPViewer
from modules.runtime.viewer.video_renderer import VideoRenderer
from src.rl_runtime import RuntimeRL
from src.observers.nearest_state_observer import ClosestAgentsObserver
# from src.observers.graph_observer import GraphObserver
from src.wrappers.dynamic_model import DynamicModel
from src.wrappers.tfa_wrapper import TFAWrapper
from src.evaluators.goal_reached import GoalReached
from src.agents.sac_agent import SACAgent
from src.runners.sac_runner import SACRunner
from configurations.base_configuration import BaseConfiguration
# configuration specific evaluator
from configurations.sac_highway_uniform.custom_evaluator import CustomEvaluator
from custom_observer import CustomObserver
FLAGS = flags.FLAGS
flags.DEFINE_enum('mode',
'visualize',
['train', 'visualize', 'evaluate'],
'Mode the configuration should be executed in.')
class SACHighwayConfiguration(BaseConfiguration):
"""Hermetic and reproducible configuration class
"""
def __init__(self,
params):
BaseConfiguration.__init__(
self,
params)
def _build_configuration(self):
"""Builds a configuration using an SAC agent
"""
self._scenario_generator = \
UniformVehicleDistribution(num_scenarios=20,
random_seed=0,
params=self._params)
self._observer = CustomObserver(params=self._params)
self._behavior_model = DynamicModel(params=self._params)
self._evaluator = CustomEvaluator(params=self._params)
self._viewer = MPViewer(params=self._params,
x_range=[-30,30],
y_range=[-60,20],
follow_agent_id=True)
#self._viewer = VideoRenderer(renderer=viewer, world_step_time=0.2)
self._runtime = RuntimeRL(action_wrapper=self._behavior_model,
observer=self._observer,
evaluator=self._evaluator,
step_time=0.2,
viewer=self._viewer,
scenario_generator=self._scenario_generator)
tfa_env = tf_py_environment.TFPyEnvironment(TFAWrapper(self._runtime))
self._agent_0 = SACAgent(tfa_env, params=self._params)
self._agent_1 = SACAgent(tfa_env, params=self._params)
self._runner = SACRunner(tfa_env,
[self._agent_0, self._agent_1],
params=self._params,
unwrapped_runtime=self._runtime)
def run_configuration(argv):
params = ParameterServer(filename="configurations/sac_highway_uniform/config.json")
configuration = SACHighwayConfiguration(params)
if FLAGS.mode == 'train':
configuration._runner.setup_writer()
configuration.train()
elif FLAGS.mode == 'visualize':
configuration.visualize(10)
elif FLAGS.mode == 'evaluate':
configuration.evaluate()
if __name__ == '__main__':
app.run(run_configuration) | UTF-8 | Python | false | false | 3,606 | py | 33 | configuration.py | 23 | 0.671936 | 0.666112 | 0 | 93 | 37.784946 | 85 |
dzx1026/yzj | 3,204,045,652,868 | 831184a4ef50b37ec2ce8e96613da3ee005b9eaf | 21ad86c915d8a6a5b066a8c52cff1cb828427f91 | /Common.py | d1e67f1e26674427a193431a0c172065d6b47dc3 | []
| no_license | https://github.com/dzx1026/yzj | b761ae7f1dcbbc848bf8d20a55d52bcc932f5b5d | 3298848a15748d442a6df7c67dbaca262b258d52 | refs/heads/master | 2020-05-07T16:52:10.477293 | 2019-04-11T03:05:32 | 2019-04-11T03:05:32 | 180,703,087 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
保存一下通用的变量
'''
HOST = 'https://mo.hnrbi.com/'
EID = '15854738'
#轻应用使用
APPID = '500102236'
APPID_SECRET = '2JNjyAIMoD4FZFh6Ubkd'
#获取人员信息key
MODIFYPERSON_SECRET='AxWP9zTxG95WlOYIAYWjSsKbsfEmUNrr'
#报表秀秀使用的参数
REPORT_APPID='SP15854738'
REPORT_SECRET='sSbvXTG4BffiHIzexc18YzAJPrvoRQ'
AESKEY='f7E1eaTKGluPd7Ty'
| UTF-8 | Python | false | false | 368 | py | 7 | Common.py | 6 | 0.780645 | 0.66129 | 0 | 16 | 18.3125 | 54 |
shashman95/archive | 2,207,613,233,810 | f4507f3ea2966d161208f8234425320828e5096d | 69963c3bb4776940852d6cb2a9dc555a25ffeff7 | /pyweb/src/pyweb/core/request.py | 914b0eb000fecb27f66aca9be07f511b082d5d4f | []
| no_license | https://github.com/shashman95/archive | 5e7ea033169f873482c20602d7f77b522ef056e2 | f043c08e32393725b8d9c50a1ecf24486527a578 | refs/heads/master | 2021-01-17T23:34:42.779622 | 2012-08-19T20:23:39 | 2012-08-19T20:23:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from http.client import HTTPMessage
from urllib.parse import urlparse, parse_qs
class Headers(HTTPMessage):
CGI_HEADERS = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
super().__init__()
self.environ = environ
for key, value in self.environ.items():
if key.startswith('HTTP_'):
self['-'.join([part.capitalize() for part in key[5:].split('_')])] = value
elif key in self.CGI_HEADERS:
self['-'.join([part.capitalize() for part in key[5:].split('_')])] = value
class Request():
def __init__(self, environ, application):
self.environ = environ
self.application = application
self.headers = Headers(environ)
self.path = self.environ.get('PATH_INFO', '/')
self.query = parse_qs(self.environ.get('QUERY_STRING', ''))
self.method = self.environ.get('REQUEST_METHOD', 'GET').upper()
| UTF-8 | Python | false | false | 1,595 | py | 24 | request.py | 15 | 0.659561 | 0.657053 | 0 | 37 | 42.108108 | 90 |
eunsu-park/solar_magnetogram_denoising | 9,869,834,861,160 | a5770f726f0215911544a7258af1f65e9d82e86f | f06d6fa5ea0a8c3ab2fd5b56d10f02fd3ae2a15e | /OtherVersions/Diffusion/utils/others.py | 555fa7fea35ba50ebd0dafebc98f96fa5c33cd0e | [
"MIT"
]
| permissive | https://github.com/eunsu-park/solar_magnetogram_denoising | d7ef6fd665325a6cd372b6953d5bad367ab2f697 | 09689b2ca2b97cc987937075a5800d0eeaab3953 | refs/heads/master | 2023-04-14T01:14:15.482608 | 2023-03-20T01:50:56 | 2023-03-20T01:50:56 | 204,967,204 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch.nn as nn
def get_loss_function(loss_type):
if loss_type == "l1" :
loss_function = nn.L1Loss()
elif loss_type == "l2" :
loss_function = nn.MSELoss()
elif loss_type == "huber":
loss_function = nn.HuberLoss()
else :
raise NotImplementedError()
return loss_function | UTF-8 | Python | false | false | 340 | py | 31 | others.py | 27 | 0.576471 | 0.567647 | 0 | 13 | 24.307692 | 38 |
iamthedkr/AIVN-Course-2019 | 6,193,342,875,581 | 8db631485f6290d5a78649943dc4ee2ad33ad5e9 | 621ab137d9dff34932360a9b18069bb1e3e96b13 | /Week6/1. Đọc và xử lý dữ liệu dùng Numpy/2.Iris(text-data)/2.Iris.py | d9d24c6b4aef6f208f6f8a90ca8722509b2cd10b | []
| no_license | https://github.com/iamthedkr/AIVN-Course-2019 | edfc3c8e300c688a2f9cb97aa71f8906e69c0243 | 61bd8877d92a6a867f45e9d4b68e45d199af080d | refs/heads/master | 2020-09-14T16:01:54.159863 | 2019-11-17T10:13:06 | 2019-11-17T10:13:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import numpy.core.defchararray as np_f
# Bộ dữ liệu Iris có chứa cột species ở dạng chuỗi. Do đó, cần chuyển dạng này sang dạng số
# Lấy các đặc trưng và lưu vào biến X
X = np.genfromtxt('iris.csv', delimiter=',', dtype='float', usecols=[0, 1, 2, 3], skip_header=1)
print(X.shape)
# Lấy species và lưu vào biến y
y = np.genfromtxt('iris.csv', delimiter=',', dtype='str', usecols=4, skip_header=1)
# thay chuỗi bằng số
# Sử dụng np.unique() để lấy các loại chuỗi duy nhất trong một mảng np
categories = np.unique(y)
print(categories)
for i in range(categories.size):
# hàm np_f.replace() để thay giá trị kiểu chuỗi
y = np_f.replace(y, categories[i], str(i))
# đưa về kiểu float
y = y.astype('float')
print(y)
| UTF-8 | Python | false | false | 833 | py | 122 | 2.Iris.py | 64 | 0.692308 | 0.682861 | 0 | 24 | 29.875 | 96 |
mike-lloyd03/miCRM_API | 17,377,437,716,953 | 4fcf81fa7e8519bd52db171c886a184de6eea342 | e3bac6ba8448d20f3630e685f21589dfa3600453 | /app/routes.py | 4b063971da751cc45fe4e0a227fedd9ea2634376 | []
| no_license | https://github.com/mike-lloyd03/miCRM_API | a378530a03028768ed84a5161824029ed3d708a3 | 086ef68f1dd24a471eefad0ac47f18cf7d695899 | refs/heads/master | 2022-12-14T07:19:10.156611 | 2020-08-26T07:51:07 | 2020-08-26T07:51:07 | 289,204,608 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import request
from app import app
from app.models import Contact
from app import db
@app.route('/')
def index():
return 'This is a functional app!'
@app.route('/create-contact', methods=['GET'])
def create_contact():
new_contact = Contact(
first_name=request.args.get('first-name'),
last_name=request.args.get('last-name')
)
db.session.add(new_contact)
db.session.commit()
return request.args.get('first-name') + request.args.get('last-name')
@app.route('/show-db')
def show_dp():
all_contacts = Contact.query.all()
print((all_contacts))
return str(all_contacts)
| UTF-8 | Python | false | false | 630 | py | 10 | routes.py | 9 | 0.666667 | 0.666667 | 0 | 24 | 25.25 | 73 |
Mubai-Liu/Machine_Personality | 19,378,892,465,546 | 31fe31bf20cd5c5a04a4f2c8e7bc87029a0cc650 | ca4319c9db6a9a59db778b85788a80099b62849b | /Gamedata.py | 7ca17f2a4117489e4c700b392a2f1433d9b0ab28 | []
| no_license | https://github.com/Mubai-Liu/Machine_Personality | 0272c1883325e2aa52fd3348703818bdd595ad68 | e1e526031d264e5479920bf93c86569d6cadfe0b | refs/heads/master | 2021-01-09T00:14:02.660970 | 2020-02-28T17:27:34 | 2020-02-28T17:27:34 | 242,185,162 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from bs4 import BeautifulSoup
import urllib.request
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import csv
from time import sleep
from random import randint
# csv file name
filename = "Dota_buff.csv"
api = dota2api.Initialise("54699EC1DAEC662D994DAADE57148354", raw_mode = True)
rows = []
# reading csv file
with open(filename, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
#fields = csvreader.next()
# extracting each data row one by one
for row in csvreader:
rows.append(row)
# get total number of rows
print("Total no. of rows: %d"%(csvreader.line_num))
def get_match(match_id):
match = pd.DataFrame()
try:
match = api.get_match_details(match_id)
except:
pass
return match
def get_history(id):
match_hist = pd.DataFrame()
try:
match_hist = api.get_match_history(id)
except:
pass
return match_hist
url = 'https://steamcommunity.com/app/570/discussions/'
matches = pd.DataFrame()
history = pd.DataFrame()
Id = pd.DataFrame(rows)[1]
Id = Id.to_list()
Id = Id[1:]
for item in Id:
history = history.append(pd.DataFrame(get_history(item)))
# Player history file
history.to_csv("player_history.csv")
match_id =[x.get('match_id',0) for x in history['matches']]
for i in match_id:
matches = matches.append(get_match(i), ignore_index = True)
matches.to_csv("match_details.csv")
player_details = pd.DataFrame()
for i in range(10):
player_details = player_details.append(pd.DataFrame(matches.players.to_list())[i].apply(pd.Series))
player_details.to_csv("player_match_details.csv")
# get_history(Id[1])
# pd.DataFrame(api.get_match_history(76561198140336085))
# !pip install -U dota2
# import dota2
# dota.request_match_details(match_id)
| UTF-8 | Python | false | false | 1,902 | py | 11 | Gamedata.py | 3 | 0.679811 | 0.653523 | 0 | 85 | 21.364706 | 103 |
firezdog/CodingDojoPython | 14,955,076,132,237 | 873d6c4a5f239698e973e8597b8bbe25c0702701 | b77ceed7e5b54783452b681aedcc3364bef0a755 | /PythonFundamentals/drawStars.py | 36422bb2f0ec25f509df7e4c358fc8c29a948b55 | []
| no_license | https://github.com/firezdog/CodingDojoPython | 05ff37f733881cd967d6463ca3b11acd0f8781c5 | 8f3f8387cd4b5027752e78900fdbd4a77b522f02 | refs/heads/master | 2021-04-26T23:36:59.505275 | 2018-03-11T03:22:25 | 2018-03-11T03:22:25 | 123,827,779 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def drawStars(lst):
i = 0
while i < len(lst):
if type(lst[i]) == str:
i += 1
continue
if i + 1 < len(lst) and type(lst[i+1]) == str:
char = lst[i+1].lower()[0]
else:
char = "*"
j = 0
string = ""
while j < lst[i]:
string += char
j += 1
print string
i += 1
drawStars([4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"]) | UTF-8 | Python | false | false | 474 | py | 27 | drawStars.py | 21 | 0.371308 | 0.343882 | 0 | 19 | 23.052632 | 56 |
kuThang/py100 | 16,183,436,773,069 | a2bb7a535c597fa06d861030d0e1627e258d8d6f | 2356a6ce91b7de5134117675f2714f166c7143c9 | /d7_binary_addition.py | d2f261cebf13a9518c2a89d599582620a32ef1da | [
"Apache-2.0"
]
| permissive | https://github.com/kuThang/py100 | 89029dd4ede3900f55374014fdd82bcc64f29b4f | d808b8d8de7a97c4c00fa20e35a8bab1c56bd966 | refs/heads/master | 2020-05-23T01:58:01.344550 | 2019-07-16T00:07:47 | 2019-07-16T00:07:47 | 186,593,259 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import zip_longest
T = [(0,0), (1,0), (1,0), (0,1),
(1,0), (0,1), (0,1), (1,1)]
def add1(x,y):
x = list(map(int, reversed(x)))
y = list(map(int, reversed(y)))
sum=[]
print(x)
print(y)
v, t = T[0]
for a, b in zip_longest(x, y, fillvalue=0):
v, t = T[t * 4 + a * 2 + b]
sum.append(v)
sum.append(T[t * 4][0])
print(sum)
return ''.join(list(map(str, reversed(sum))))
print(add1('10100100100111', '100100011011'))
v0c0 = 0, {}
v1c0 = 1, {}
v0c1 = 0, {}
v1c1 = 1, {}
no_carry_update = {(0,0): v0c0, (0,1): v1c0, (1,0): v1c0, (1,1): v0c1}
carry_update = {(0,0): v1c0, (0,1): v0c1, (1,0): v0c1, (1,1): v1c1}
v0c0[1].update(no_carry_update)
v1c0[1].update(no_carry_update)
v0c1[1].update(carry_update)
v1c1[1].update(carry_update)
def add2(x, y):
x = list(map(int, reversed(x)))
y = list(map(int, reversed(y)))
sum=[]
print(x)
print(y)
value, transition = v0c0
for a, b in zip_longest(x, y, fillvalue=0):
value, transition = transition[a, b]
sum.append(value)
sum.append(transition[0, 0][0])
print(sum)
return ''.join(list(map(str, reversed(sum))))
print(add2('10100100100111', '100100011011')) | UTF-8 | Python | false | false | 1,228 | py | 16 | d7_binary_addition.py | 13 | 0.55456 | 0.440554 | 0 | 51 | 23.098039 | 70 |
Bagnis-Gabriele/Robotic-hand | 14,912,126,469,766 | d584fe310b30a69dd518d60fd9ddb776ff3830db | e07f790e0b0b31f37a33af6cf2d57c2acd7fd8f9 | /definitive_code/muse/program.py | 0c2e7290e2ca70b533ac82044d64ca22e0ca468b | []
| no_license | https://github.com/Bagnis-Gabriele/Robotic-hand | 77e0b29a2cc95d10542e369926051d81980c0327 | c5a54e5ac765885785222880ed1c9276c67e3079 | refs/heads/master | 2021-06-30T18:25:34.037343 | 2021-05-13T08:52:11 | 2021-05-13T08:52:11 | 232,750,125 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
BAGNIS GABRIELE
PCTO project of ITIS MARIO DELPOZZO CUNEO
"""
import numpy as np
import pandas as pd
from pylsl import StreamInlet, resolve_byprop
import time, utils, csv, winsound
import login_user as user
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
class Band:
Delta = 0
Theta = 1
Alpha = 2
Beta = 3
BUFFER_LENGTH = 5
EPOCH_LENGTH = 1
OVERLAP_LENGTH = 0.8
SHIFT_LENGTH = EPOCH_LENGTH - OVERLAP_LENGTH
INDEX_CHANNEL = [0]
def readData(eeg_buffer, filter_state, n_win_test, band_buffer):
""" ACQUIRE DATA """
eeg_data, timestamp = inlet.pull_chunk(timeout=1, max_samples=int(SHIFT_LENGTH * fs))
ch_data = np.array(eeg_data)[:, INDEX_CHANNEL]
eeg_buffer, filter_state = utils.update_buffer(eeg_buffer, ch_data, notch=True, filter_state=filter_state)
""" COMPUTE BAND POWERS """
data_epoch = utils.get_last_data(eeg_buffer, EPOCH_LENGTH * fs)
band_powers = utils.compute_band_powers(data_epoch, fs)
return band_powers
def execute(eeg_buffer, filter_state, n_win_test, band_buffer, fileCSV):
data = pd.read_csv(fileCSV)
x = data[['delta','theta','alpha','beta']] #INPUT
y = data['mano']
print("Numero di campioni totali: ",x.shape[0])
model = MLPClassifier(hidden_layer_sizes=(100,100), random_state=1, max_iter=300)
model.fit(x,y)
while (True):
sensor=readData(eeg_buffer, filter_state, n_win_test, band_buffer)
print(model.predict([sensor]))
if __name__ == "__main__":
""" CONNECT TO EEG STREAM """
print('Looking for an EEG stream...')
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
raise RuntimeError('Can\'t find EEG stream.')
print("Start acquiring data")
inlet = StreamInlet(streams[0], max_chunklen=12)
eeg_time_correction = inlet.time_correction()
info = inlet.info()
description = info.desc()
fs = int(info.nominal_srate())
""" INITIALIZE BUFFERS """
eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))
filter_state = None # for use with the notch filter
n_win_test = int(np.floor((BUFFER_LENGTH - EPOCH_LENGTH) / SHIFT_LENGTH + 1))
band_buffer = np.zeros((n_win_test, 4))
""" USE """
nome = user.login()
print("UTENTE SELEZIONATO: " + nome)
fileCSV = "utenti\\" + nome + ".csv"
execute(eeg_buffer, filter_state, n_win_test, band_buffer, fileCSV)
| UTF-8 | Python | false | false | 2,450 | py | 28 | program.py | 20 | 0.655918 | 0.644082 | 0 | 76 | 31.157895 | 110 |
WayupKG/Online_Reception | 3,032,246,936,769 | 4053e1a3187ec6424c0716dc3c049301dd783ec4 | e5a51d6f51a0bc1c21ec1bfa833b7e604a888b75 | /Home/admin.py | eef90cbe387ee337cb72a5565d51bd33893ecc0d | []
| no_license | https://github.com/WayupKG/Online_Reception | 6ac9342a826749981ac908275bc43842d5cdf3df | 56fa3167da966df974439e44c19af08ab5cfdff8 | refs/heads/master | 2023-07-29T01:43:24.055123 | 2020-05-19T14:15:09 | 2020-05-19T14:15:09 | 265,198,745 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Specialty
@admin.register(Specialty)
class SpecialtyAdmin(admin.ModelAdmin):
list_display = ('title', 'publish') | UTF-8 | Python | false | false | 171 | py | 43 | admin.py | 17 | 0.77193 | 0.77193 | 0 | 7 | 23.571429 | 39 |
mrahjoo/Solar-for-Industry-Process-Heat | 1,829,656,112,391 | 17652e0da4917a98edb0aaa6e03aa2e1c762da8d | 5db5bd1bcdfb3d8e7d221bd2d47e29138f0ba3e9 | /Results analysis/tech_opp/extra_elec_emissions.py | d602ca58f101d76aec0059581ce95135a3149b05 | []
| no_license | https://github.com/mrahjoo/Solar-for-Industry-Process-Heat | c848c979db43e8d36ce740f43fcfd25074d06e89 | 84de5160779c5d9727e87d3a44a18c83986650b6 | refs/heads/master | 2023-03-28T08:13:33.207324 | 2021-03-05T05:02:13 | 2021-03-05T05:02:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
class electricity():
"""
Class containing methods used to calculate electricity emission factors
(MTCO2e/MWh) by county from EPA eGRID data.
"""
def __init__(self):
self.data_wd = '../tech_opportunity_analysis/calculation_data'
self.fips_to_zip_file = os.path.join(self.data_wd,
'COUNTY_ZIP_032014.csv')
self.e_grid_file = os.path.join(
self.data_wd, 'power_profiler_zipcode_tool_2014_v7.1_1.xlsx'
)
def format_egrid_data(self):
"""
"""
zip_sub_region_dict = pd.read_excel(
self.e_grid_file, sheet_name=['Zip-subregion',
'Zip_multiple_Subregions']
)
zip_sub_region = pd.concat(
[zip_sub_region_dict[k] for k in zip_sub_region_dict.keys()],
axis=0, ignore_index=True
)
zip_sub_region = pd.melt(zip_sub_region,
id_vars=['ZIP (character)', 'ZIP (numeric)',
'state'], value_name='SUBRGN')
return zip_sub_region
def import_resource_mix(self):
# Extracted from egrid summary tables for 2014 from
# https://www.epa.gov/sites/production/files/2020-01/
# egrid2018_historical_files_since_1996.zip
resource_mix_file = os.path.join(self.data_wd,
'egrid2014_resource_mix.csv')
resource_mix = pd.read_csv(resource_mix_file)
return resource_mix
def import_fips_zips(self):
"""
Returns
-------
fips_zips : dataframe
Mapping of county FIPS codes to Zip codes.
"""
fips_zips = pd.read_csv(self.fips_to_zip_file)
fips_zips.set_index('ZIP', inplace=True)
return fips_zips
def calculate_plant_heatrate(self):
"""
Returns
-------
plant_hr : dataframe
Plant-level data from 2014 eGRID.
"""
# Plant-level heat rate data for estimating weighted average heat rate
# by fuel by eGrid subregion
plant_hr = pd.read_csv(
os.path.join(self.data_wd, 'egrid2014_plant_data.csv'),
usecols=['ORISPL', 'SUBRGN', 'PLFUELCT', 'PLHTRT', 'PLNGENAN']
).dropna()
# Drop negative heat rates
plant_hr = plant_hr.where(plant_hr.PLHTRT > 0).dropna()
return plant_hr
def calc_egrid_emissions_resource_mix(self):
"""
Estimate county grid emissions factor (metric tons CO2e per MWh),
resource mix, heat rate (in MMBtu/MWh), and losses as the mean of
zip code egrid data.
Returns
-------
county_ef_rm : dataframe
Dataframe of of county heat rates (in MMBtu/MWh) by fuel type
"""
subregions = self.format_egrid_data()
subregion_hr = self.calculate_plant_heatrate()
resource_mix = self.import_resource_mix()
fips_zips = self.import_fips_zips()
# Estimate weighted-average heat rate (in by subregion and fuel using
# plant nominal heat rate and annual net generation.
subregion_hr['PLFUELCT'] = subregion_hr.PLFUELCT.apply(
lambda x: x.capitalize()+'_hr'
)
subregion_hr.replace({'Gas_hr': 'Natural_gas_hr',
'Othf_hr': 'Other_hr',
'Ofsl_hr': 'Other_fossil_hr'}, inplace=True)
# Calculate Generation-weighted heat rate by subregion.
# PLHTRT in Btu/kWh
subregion_hr = pd.DataFrame(
subregion_hr.groupby(['SUBRGN', 'PLFUELCT']).apply(
lambda x: np.average(x['PLHTRT'], weights=x['PLNGENAN'])
)
)
# Convert to MMBtu/MWh
subregion_hr = subregion_hr*1000/10**6
subregion_hr.reset_index(inplace=True)
subregion_hr = subregion_hr.pivot(index='SUBRGN', columns='PLFUELCT',
values=0)
hr_cols = subregion_hr.columns
egrid_ef = pd.read_excel(self.e_grid_file,
sheet_name='eGRID Subregion Emission Factor',
skiprows=[0, 1, 2])
subregions_ef_rm = pd.merge(subregions, egrid_ef, on=['SUBRGN'],
how='inner')
subregions_ef_rm = pd.merge(subregions_ef_rm, resource_mix,
on=['SUBRGN'], how='left')
subregions_ef_rm = pd.merge(subregions_ef_rm, subregion_hr,
left_on=['SUBRGN'], right_index=True,
how='left')
new_cols = list(set(['SRCO2RTA', 'SRCH4RTA', 'SRN2ORTA']).union(
resource_mix.columns[1:], subregion_hr.columns
))
subregions_ef_rm = subregions_ef_rm.groupby(
['ZIP (numeric)'], as_index=False
)[new_cols].mean()
subregions_ef_rm.rename(columns={'ZIP (numeric)': 'ZIP'}, inplace=True)
# Convert emissions fractors from lb/MWh to metric tons CO2e per MWh
# (MTCO2e/MWh)
subregions_ef_rm.loc[:, 'MTCO2e_per_MWh'] = (subregions_ef_rm.SRCO2RTA + subregions_ef_rm.SRCH4RTA * 25 +
subregions_ef_rm.SRN2ORTA * 298) * (0.453592 / 10**3)
subregions_ef_rm = subregions_ef_rm.set_index('ZIP').join(fips_zips,
how='left')
final_cols = list(set([
'grid_losses', 'MTCO2e_per_MWh', 'Natural_gas', 'Coal', 'Oil',
'Other_fossil', 'Solar', 'Biomass', 'Other', 'Hydro', 'Wind',
'Nuclear', 'Geothermal'
]).union(hr_cols))
county_ef_rm = subregions_ef_rm.reset_index().groupby(
'COUNTY_FIPS', as_index=False
)[final_cols].mean()
county_ef_rm['COUNTY_FIPS'] = county_ef_rm.COUNTY_FIPS.astype('int')
county_ef_rm['MECS_FT'] = 'Net_electricity'
return county_ef_rm
@staticmethod
def calculate_elect_ghgs(county_ef_rm, county_electricity):
"""
Calculate
Parameters
----------
county_electricity : dataframe
Returns
-------
county_elect_ghgs : dataframe
"""
county_elect_ghgs = pd.merge(
county_ef_rm[['MTCO2e_per_MWh', 'COUNTY_FIPS']],
county_electricity, on='COUNTY_FIPS', how='left'
)
county_elect_ghgs.loc[:, 'MTCO2e'] = county_elect_ghgs.MMBtu.multiply(
county_elect_ghgs.MTCO2e_per_MWh * 0.293297222
)
return county_elect_ghgs
@staticmethod
def calculate_elect_fuel(county_ef_rm, county_elect_use):
"""
Calculate the fossil fuel used for grid electricity to meet county
demand, inclusive of grid losses.
Parameters
----------
county_ef_rm : dataframe
County weighted average heat rate by generator type. Calculated
from EPA eGRID data by the `calc_egrid_emissions_resource_mix`
method.
Returns
-------
county_elect_fuel : dataframe
MMBtu of fossil fuels used by grid electricity to meet county
electricity use, defined by NAICS code and employment size class
"""
# Heat rate in MMBtu/MWh
county_elect_fuel = pd.merge(
county_ef_rm[['COUNTY_FIPS', 'Coal_hr', 'Other_hr', 'grid_losses',
'Other_fossil_hr', 'Oil_hr', 'Natural_gas_hr']],
county_elect_use, on='COUNTY_FIPS', how='left'
)
# Account for grid losses
electricity_fuel = county_elect_fuel.MMBtu.multiply(
county_elect_fuel.grid_losses+1
)
# Multiply by heat rate, converting from MMBtu/MWh to MMBtu/MMBtu
electricity_fuel = county_elect_fuel[['Coal_hr', 'Other_hr', 'Other_fossil_hr',
'Oil_hr', 'Natural_gas_hr']].multiply(
electricity_fuel, axis=0
) * 0.293297222 # unit conversion
county_elect_fuel.update(electricity_fuel, overwrite=True)
county_elect_fuel.rename(columns={
'Coal_hr': 'Coal_MMBtu', 'Other_hr': 'Other_MMBtu',
'Other_fossil_hr': 'Other_fossil_MMBtu', 'Oil_hr': 'Oil_MMBtu',
'Natural_gas_hr': 'Natural_gas_MMBtu'
})
county_elect_fuel.drop(['grid_losses', 'fips_matching',
'MECS_NAICS_dummies', 'est_count',
'MECS_NAICS', 'MECS_NAICS_dummies_mecs',
'fipstate', 'fipscty'], axis=1, inplace=True)
return county_elect_fuel
#from calc_elect_ghgs import electricity
electricity_methods = electricity()
county_electricity_use = pd.read_csv('county_elec_estimates.csv.gzip', compression='gzip')
resource_mix = electricity_methods.calc_egrid_emissions_resource_mix()
electricity_fossil_fuel = electricity_methods.calculate_elect_fuel(resource_mix, county_electricity_use)
# get the breakdown of fuel for electricity by county and their heat rates (MMBtu,fuel/MWh,elec) - keep only carbon fuels
fuel_mix = resource_mix.drop(columns=['Nuclear_hr','Hydro','Geothermal','Wind_hr',
'Other','Solar_hr','Wind','Hydro_hr','Solar',
'Nuclear','Other_hr'])
fuel_mix.rename(columns={'Natural_gas':'Natural_gas_mix', 'Coal':'Coal_mix',
'Oil':'Oil_mix','Biomass':'Biomass_mix','Other_fossil':'Other_fossil_mix'},inplace=True)
# read in tech opp files, change h5 files to dfs
from Results_techopp import techopp
files = techopp().tech_opp_files
swh, lf, ptc_notes, ptc_tes, eboiler, res, whrhp = techopp.read_h5(files, 'winter') # or summer
eboiler= techopp.h5_to_df(eboiler, 'ophours_high') #ophours_low or _mean
res= techopp.h5_to_df(res, 'ophours_high')
whrhp= techopp.h5_to_df(whrhp, 'ophours_high')
def get_extra_elec(sol_tech_df, fuel_mix_df):
# add a column for extra electricity (MWh) needed when hourly load is not met by PV
for i in list(sol_tech_df.index.values):
sol_tech_df.loc[i,'extra_elec'] = ((1-np.where(sol_tech_df.tech_opp[i]>=1, 1, sol_tech_df.tech_opp[i]))*
sol_tech_df.total_load[i]).sum()
# merge county and extra electricity with the grid mix of fuel and fuel heat rates (MMBtu/MWh)
sol_tech_elec = pd.merge(sol_tech_df[['COUNTY_FIPS','extra_elec']],fuel_mix_df,on='COUNTY_FIPS',how='left')
sol_tech_elec.fillna(value=0,inplace=True)
# calculate the amount of fuel required to provide the extra electricity for each cty based on grid mix
sol_tech_elec.loc[:,'extra_fuels'] = sol_tech_elec['extra_elec']*(
(sol_tech_elec['Natural_gas_mix']*sol_tech_elec['Natural_gas_hr'])+
(sol_tech_elec['Coal_mix']*sol_tech_elec['Coal_hr'])+
(sol_tech_elec['Oil_mix']*sol_tech_elec['Oil_hr'])+
(sol_tech_elec['Biomass_mix']*sol_tech_elec['Biomass_hr'])+
(sol_tech_elec['Other_fossil_mix']*sol_tech_elec['Other_fossil_hr']))
return sol_tech_elec
eboiler_elec = get_extra_elec(eboiler,fuel_mix)
res_elec = get_extra_elec(res,fuel_mix)
whrhp_elec = get_extra_elec(whrhp,fuel_mix)
eboiler_elec.extra_fuels.sum()/(10**6) #TBtu
res_elec.extra_fuels.sum()/(10**6) #TBtu
whrhp_elec.extra_fuels.sum()/(10**6) #TBtu
| UTF-8 | Python | false | false | 11,736 | py | 167 | extra_elec_emissions.py | 82 | 0.560838 | 0.549761 | 0 | 324 | 35.216049 | 125 |
kishipro/EmptyClassroomMap | 12,893,491,848,508 | 6a855b28f12d169e21a3c87f136ea71989cc7b46 | 826fc320232bb435753ef58b4b24a921f52b602c | /map/migrations/0001_initial.py | d22a95c934f046d784340a18d77367dd09f6cd01 | []
| no_license | https://github.com/kishipro/EmptyClassroomMap | 22826201eb4a4241695fc2116c336b5cdae9fb4e | 13ce3a931049a2be6e7fbd189d4625bf424db2a7 | refs/heads/master | 2020-07-28T16:02:43.703295 | 2019-11-05T12:49:08 | 2019-11-05T12:49:08 | 209,457,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.5 on 2019-09-24 06:42
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Building',
fields=[
('building_id', models.AutoField(primary_key=True, serialize=False)),
('building_name', models.CharField(max_length=255, verbose_name='building_name')),
('building_svg', models.CharField(max_length=255, verbose_name='building_svg')),
],
options={
'verbose_name': '建物',
'verbose_name_plural': '建物一覧',
},
),
migrations.CreateModel(
name='Course',
fields=[
('course_id', models.AutoField(primary_key=True, serialize=False)),
('class_name', models.CharField(max_length=255, verbose_name='授業')),
],
options={
'verbose_name': '科目',
'verbose_name_plural': '科目一覧',
},
),
migrations.CreateModel(
name='Period',
fields=[
('period_id', models.CharField(max_length=1, primary_key=True, serialize=False)),
('start_time', models.TimeField(verbose_name='start_time')),
('end_time', models.TimeField(verbose_name='end_time')),
],
options={
'verbose_name': '時限',
'verbose_name_plural': '時限一覧',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('teacher_id', models.AutoField(primary_key=True, serialize=False)),
('teacher_name', models.CharField(max_length=255, verbose_name='teacher_name')),
('department', models.IntegerField(choices=[(0, '英語英文学科'), (1, '国際関係学科'), (2, '多文化・国際協力学科'), (3, '数学科'), (4, '情報科学科'), (5, '文学研究科'), (6, '国際関係学研究科'), (7, '理学研究科')], verbose_name='department')),
],
options={
'verbose_name': '教員',
'verbose_name_plural': '教員一覧',
},
),
migrations.CreateModel(
name='Term',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term_id', models.IntegerField(choices=[(1, '第1ターム'), (2, '第2ターム'), (3, '第3ターム'), (4, '第4ターム')], verbose_name='term')),
('start_day', models.DateTimeField(verbose_name='start_day')),
('end_day', models.DateTimeField(verbose_name='end_day')),
],
options={
'verbose_name': 'ターム',
'verbose_name_plural': 'ターム一覧',
},
),
migrations.CreateModel(
name='Floor',
fields=[
('floor_id', models.AutoField(primary_key=True, serialize=False)),
('foor_name', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)], verbose_name='floor')),
('floor_path', models.CharField(max_length=255, verbose_name='floor_path')),
('building_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Building')),
],
options={
'verbose_name': 'フロア',
'verbose_name_plural': 'フロア一覧',
},
),
migrations.CreateModel(
name='Classroom',
fields=[
('classroom_id', models.AutoField(primary_key=True, serialize=False)),
('classroom_name', models.CharField(max_length=255, verbose_name='classroom_name')),
('classroom_path', models.CharField(max_length=255, verbose_name='classroom_svg')),
('building_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Building')),
('foor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Floor')),
],
options={
'verbose_name': '教室',
'verbose_name_plural': '教室一覧',
},
),
migrations.CreateModel(
name='Class',
fields=[
('class_id', models.AutoField(primary_key=True, serialize=False)),
('year', models.IntegerField(choices=[(2019, 2019)], verbose_name='year')),
('day_name', models.IntegerField(choices=[(0, '日'), (1, '月'), (2, '火'), (3, '水'), (4, '木'), (5, '金'), (6, '土')], verbose_name='day_name')),
('classroom_path', models.CharField(max_length=255)),
('classroom_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Classroom')),
('course_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Course')),
('period_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Period')),
('teacher_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Teacher')),
('term_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Term')),
],
options={
'verbose_name': '授業',
'verbose_name_plural': '授業一覧',
},
),
]
| UTF-8 | Python | false | false | 5,792 | py | 18 | 0001_initial.py | 9 | 0.522916 | 0.509744 | 0 | 121 | 44.801653 | 209 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.