hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1993a7bb57d8770057e855f94ed217a2399335
| 612 |
py
|
Python
|
exercises/high-scores/example.py
|
xarxziux/exercism-python
|
c98184d08cda663267681d18a321d99e68599079
|
[
"MIT"
] | null | null | null |
exercises/high-scores/example.py
|
xarxziux/exercism-python
|
c98184d08cda663267681d18a321d99e68599079
|
[
"MIT"
] | null | null | null |
exercises/high-scores/example.py
|
xarxziux/exercism-python
|
c98184d08cda663267681d18a321d99e68599079
|
[
"MIT"
] | null | null | null |
class HighScores(object):
def __init__(self, scores):
self.scores = scores
def latest(self):
return self.scores[-1]
def personal_best(self):
return max(self.scores)
def personal_top(self):
return sorted(self.scores, reverse=True)[:3]
def report(self):
difference = self.personal_best() - self.latest()
result_qualifier = (
"" if difference <= 0 else "{} short of ".format(difference)
)
return "Your latest score was {}. That's {}your personal best!".format(
self.latest(), result_qualifier
)
| 27.818182 | 79 | 0.593137 |
4a1993c5cdbb235b8723083ca6311d41876ade30
| 1,842 |
py
|
Python
|
predict.py
|
emanuel1025/Elo-Ratings-Match-Forecasting
|
75428675a29049294a449736ac0340d7e5cc37d1
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
emanuel1025/Elo-Ratings-Match-Forecasting
|
75428675a29049294a449736ac0340d7e5cc37d1
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
emanuel1025/Elo-Ratings-Match-Forecasting
|
75428675a29049294a449736ac0340d7e5cc37d1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import numpy as np
from tester import test
from sklearn.ensemble import RandomForestRegressor
validation_path = "validation/features/"
features_path = "training/features/"
labels_path = "training/labels/sum.lab"
results_path = "testing/"
features = []
#features.append( "checkmate_move_done")
#features.append( "is_draw")
features.append( "last_scores")
features.append( "match_len")
features.append( "mean")
features.append( "no_of_checks_scaled")
features.append( "no_of_mistakes_scaled")
features.append( "no_of_piece_taken_scaled")
features.append( "std_points")
features.append( "castle")
features.append( "queen")
features.append( "score_dif")
feature_list = []
for feature in features:
feature_list.append( np.loadtxt( features_path + feature + ".fea"))
X = np.asarray( feature_list[0])
for i in xrange(1, len(feature_list)):
X = np.column_stack( (X, feature_list[i]))
Y = np.loadtxt( labels_path)
regressor = RandomForestRegressor(n_estimators=20)
regressor.fit(X,Y)
training_result = regressor.predict(X)
np.savetxt( results_path + "training", training_result, fmt='%.1f')
#Validation Data
feature_list = []
for feature in features:
feature_list.append( np.loadtxt( validation_path + feature + ".fea"))
X = np.asarray( feature_list[0])
for i in xrange(1, len(feature_list)):
X = np.column_stack( (X, feature_list[i]))
validation_result = regressor.predict(X)
np.savetxt( results_path + "validation", validation_result, fmt='%.1f')
#Errors
training_err = test( labels_path, results_path + "training")
validation_err = test( "validation/labels/sum.lab", results_path + "validation")
print "Training Error"
print training_err
print "Validation Error"
print validation_err
#Feature Importance
for i in xrange( len(features)):
print features[i] + " -> " + str( regressor.feature_importances_[i])
| 25.943662 | 80 | 0.753529 |
4a1993fba4739b0289df86b6e07497a2593ae9b0
| 16,789 |
py
|
Python
|
util/reggen/register.py
|
esshiu/opentitan
|
72fc87331f4a5350d475d4ebf7556ac72495ab19
|
[
"Apache-2.0"
] | null | null | null |
util/reggen/register.py
|
esshiu/opentitan
|
72fc87331f4a5350d475d4ebf7556ac72495ab19
|
[
"Apache-2.0"
] | null | null | null |
util/reggen/register.py
|
esshiu/opentitan
|
72fc87331f4a5350d475d4ebf7556ac72495ab19
|
[
"Apache-2.0"
] | null | null | null |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Optional
from .access import SWAccess, HWAccess
from .field import Field
from .lib import (check_keys, check_str, check_name, check_bool,
check_list, check_str_list, check_int)
from .params import ReggenParams
from .reg_base import RegBase
import re
REQUIRED_FIELDS = {
'name': ['s', "name of the register"],
'desc': ['t', "description of the register"],
'fields': ['l', "list of register field description groups"]
}
OPTIONAL_FIELDS = {
'swaccess': [
's',
"software access permission to use for "
"fields that don't specify swaccess"
],
'hwaccess': [
's',
"hardware access permission to use for "
"fields that don't specify hwaccess"
],
'hwext': [
's',
"'true' if the register is stored outside "
"of the register module"
],
'hwqe': [
's',
"'true' if hardware uses 'q' enable signal, "
"which is latched signal of software write pulse."
],
'hwre': [
's',
"'true' if hardware uses 're' signal, "
"which is latched signal of software read pulse."
],
'regwen': [
's',
"if register is write-protected by another register, that "
"register name should be given here. empty-string for no register "
"write protection"
],
'resval': [
'd',
"reset value of full register (default 0)"
],
'tags': [
's',
"tags for the register, following the format 'tag_name:item1:item2...'"
],
'shadowed': [
's',
"'true' if the register is shadowed"
],
'update_err_alert': [
's',
"alert that will be triggered if "
"this shadowed register has update error"
],
'storage_err_alert': [
's',
"alert that will be triggered if "
"this shadowed register has storage error"
]
}
class Register(RegBase):
'''Code representing a register for reggen'''
def __init__(self,
offset: int,
name: str,
desc: str,
hwext: bool,
hwqe: bool,
hwre: bool,
regwen: Optional[str],
tags: List[str],
resval: Optional[int],
shadowed: bool,
fields: List[Field],
update_err_alert: Optional[str],
storage_err_alert: Optional[str]):
super().__init__(offset)
self.name = name
self.desc = desc
self.hwext = hwext
self.hwqe = hwqe
self.hwre = hwre
if self.hwre and not self.hwext:
raise ValueError('The {} register specifies hwre but not hwext.'
.format(self.name))
self.regwen = regwen
self.tags = tags
self.shadowed = shadowed
pattern = r'^[a-z0-9_]+_shadowed(?:_[0-9]+)?'
sounds_shadowy = re.match(pattern, self.name.lower())
if self.shadowed and not sounds_shadowy:
raise ValueError("Register {} has the shadowed flag but its name "
"doesn't end with the _shadowed suffix."
.format(self.name))
elif sounds_shadowy and not self.shadowed:
raise ValueError("Register {} has a name ending in _shadowed, but "
"the shadowed flag is not set."
.format(self.name))
# Take a copy of fields and then sort by bit index
assert fields
self.fields = fields.copy()
self.fields.sort(key=lambda field: field.bits.lsb)
# Index fields by name and check for duplicates
self.name_to_field = {} # type: Dict[str, Field]
for field in self.fields:
if field.name in self.name_to_field:
raise ValueError('Register {} has duplicate fields called {}.'
.format(self.name, field.name))
self.name_to_field[field.name] = field
# Check that fields have compatible access types if we are hwext
if self.hwext:
for field in self.fields:
if field.hwaccess.key == 'hro' and field.sw_readable():
raise ValueError('The {} register has hwext set, but '
'field {} has hro hwaccess and the '
'field value is readable by software '
'mode ({}).'
.format(self.name,
field.name,
field.swaccess.key))
if not self.hwqe and field.sw_writable():
raise ValueError('The {} register has hwext set and field '
'{} is writable by software (mode {}), '
'so the register must also enable hwqe.'
.format(self.name,
field.name,
field.swaccess.key))
# Check that field bits are disjoint
bits_used = 0
for field in self.fields:
field_mask = field.bits.bitmask()
if bits_used & field_mask:
raise ValueError('Register {} has non-disjoint fields: '
'{} uses bits {:#x} used by other fields.'
.format(self.name, field.name,
bits_used & field_mask))
# Compute a reset value and mask from our constituent fields.
self.resval = 0
self.resmask = 0
for field in self.fields:
self.resval |= (field.resval or 0) << field.bits.lsb
self.resmask |= field.bits.bitmask()
# If the register defined a reset value, make sure it matches. We've
# already checked that each field matches, but we still need to make
# sure there weren't any bits unaccounted for.
if resval is not None and self.resval != resval:
raise ValueError('Register {} specifies a reset value of {:#x} but '
'collecting reset values across its fields yields '
'{:#x}.'
.format(self.name, resval, self.resval))
self.update_err_alert = update_err_alert
self.storage_err_alert = storage_err_alert
@staticmethod
def from_raw(reg_width: int,
offset: int,
params: ReggenParams,
raw: object) -> 'Register':
rd = check_keys(raw, 'register',
list(REQUIRED_FIELDS.keys()),
list(OPTIONAL_FIELDS.keys()))
name = check_name(rd['name'], 'name of register')
desc = check_str(rd['desc'], 'desc for {} register'.format(name))
swaccess = SWAccess('{} register'.format(name),
rd.get('swaccess', 'none'))
hwaccess = HWAccess('{} register'.format(name),
rd.get('hwaccess', 'hro'))
hwext = check_bool(rd.get('hwext', False),
'hwext flag for {} register'.format(name))
hwqe = check_bool(rd.get('hwqe', False),
'hwqe flag for {} register'.format(name))
hwre = check_bool(rd.get('hwre', False),
'hwre flag for {} register'.format(name))
raw_regwen = rd.get('regwen', '')
if not raw_regwen:
regwen = None
else:
regwen = check_name(raw_regwen,
'regwen for {} register'.format(name))
tags = check_str_list(rd.get('tags', []),
'tags for {} register'.format(name))
raw_resval = rd.get('resval')
if raw_resval is None:
resval = None
else:
resval = check_int(raw_resval,
'resval for {} register'.format(name))
if not 0 <= resval < (1 << reg_width):
raise ValueError('resval for {} register is {}, '
'not an unsigned {}-bit number.'
.format(name, resval, reg_width))
shadowed = check_bool(rd.get('shadowed', False),
'shadowed flag for {} register'
.format(name))
raw_fields = check_list(rd['fields'],
'fields for {} register'.format(name))
if not raw_fields:
raise ValueError('Register {} has no fields.'.format(name))
fields = [Field.from_raw(name,
idx,
len(raw_fields),
swaccess,
hwaccess,
resval,
reg_width,
params,
rf)
for idx, rf in enumerate(raw_fields)]
raw_uea = rd.get('update_err_alert')
if raw_uea is None:
update_err_alert = None
else:
update_err_alert = check_name(raw_uea,
'update_err_alert for {} register'
.format(name))
raw_sea = rd.get('storage_err_alert')
if raw_sea is None:
storage_err_alert = None
else:
storage_err_alert = check_name(raw_sea,
'storage_err_alert for {} register'
.format(name))
return Register(offset, name, desc,
hwext, hwqe, hwre, regwen,
tags, resval, shadowed, fields,
update_err_alert, storage_err_alert)
def next_offset(self, addrsep: int) -> int:
return self.offset + addrsep
def get_n_bits(self, bittype: List[str]) -> int:
return sum(field.get_n_bits(self.hwext, self.hwqe, self.hwre, bittype)
for field in self.fields)
def get_field_list(self) -> List[Field]:
return self.fields
def is_homogeneous(self) -> bool:
return len(self.fields) == 1
def is_hw_writable(self) -> bool:
'''Returns true if any field in this register can be modified by HW'''
for fld in self.fields:
if fld.hwaccess.allows_write():
return True
return False
def get_width(self) -> int:
'''Get the width of the fields in the register in bits
This counts dead space between and below fields, so it's calculated as
one more than the highest msb.
'''
# self.fields is ordered by (increasing) LSB, so we can find the MSB of
# the register by taking the MSB of the last field.
return 1 + self.fields[-1].bits.msb
def make_multi(self,
reg_width: int,
offset: int,
creg_idx: int,
creg_count: int,
regwen_multi: bool,
compact: bool,
min_reg_idx: int,
max_reg_idx: int,
cname: str) -> 'Register':
'''Generate a numbered, packed version of the register'''
assert 0 <= creg_idx < creg_count
assert 0 <= min_reg_idx <= max_reg_idx
assert compact or (min_reg_idx == max_reg_idx)
new_name = ('{}_{}'.format(self.name, creg_idx)
if creg_count > 1
else self.name)
if self.regwen is None or not regwen_multi or creg_count == 1:
new_regwen = self.regwen
else:
new_regwen = '{}_{}'.format(self.regwen, creg_idx)
strip_field = creg_idx > 0
if compact:
# Compacting multiple registers into a single "compacted" register.
# This is only supported if we have exactly one field (checked at
# the call-site)
assert len(self.fields) == 1
new_fields = self.fields[0].make_multi(reg_width,
min_reg_idx, max_reg_idx,
cname, creg_idx,
strip_field)
else:
# No compacting going on, but we still choose to rename the fields
# to match the registers
assert creg_idx == min_reg_idx
new_fields = [field.make_suffixed('_{}'.format(creg_idx),
cname, creg_idx, strip_field)
for field in self.fields]
# Don't specify a reset value for the new register. Any reset value
# defined for the original register will have propagated to its fields,
# so when we combine them here, the Register constructor can compute a
# reset value for us (which might well be different from self.resval if
# we've replicated fields).
new_resval = None
return Register(offset, new_name, self.desc,
self.hwext, self.hwqe, self.hwre, new_regwen,
self.tags, new_resval, self.shadowed, new_fields,
self.update_err_alert, self.storage_err_alert)
def check_valid_regwen(self) -> None:
'''Check that this register is valid for use as a REGWEN'''
# A REGWEN register should have a single field that's just bit zero.
if len(self.fields) != 1:
raise ValueError('One or more registers use {} as a '
'write-enable so it should have exactly one '
'field. It actually has {}.'
.format(self.name, len(self.fields)))
wen_fld = self.fields[0]
if wen_fld.bits.width() != 1:
raise ValueError('One or more registers use {} as a '
'write-enable so its field should be 1 bit wide, '
'not {}.'
.format(self.name, wen_fld.bits.width()))
if wen_fld.bits.lsb != 0:
raise ValueError('One or more registers use {} as a '
'write-enable so its field should have LSB 0, '
'not {}.'
.format(self.name, wen_fld.bits.lsb))
# If the REGWEN bit is SW controlled, check that the register
# defaults to enabled. If this bit is read-only by SW and hence
# hardware controlled, we do not enforce this requirement.
if wen_fld.swaccess.key != "ro" and not self.resval:
raise ValueError('One or more registers use {} as a '
'write-enable. Since it is SW-controlled '
'it should have a nonzero reset value.'
.format(self.name))
if wen_fld.swaccess.key == "rw0c":
# The register is software managed: all good!
return
if wen_fld.swaccess.key == "ro" and wen_fld.hwaccess.key == "hwo":
# The register is hardware managed: that's fine too.
return
raise ValueError('One or more registers use {} as a write-enable. '
'However, its field has invalid access permissions '
'({} / {}). It should either have swaccess=RW0C '
'or have swaccess=RO and hwaccess=HWO.'
.format(self.name,
wen_fld.swaccess.key,
wen_fld.hwaccess.key))
def _asdict(self) -> Dict[str, object]:
rd = {
'name': self.name,
'desc': self.desc,
'fields': self.fields,
'hwext': str(self.hwext),
'hwqe': str(self.hwqe),
'hwre': str(self.hwre),
'tags': self.tags,
'shadowed': str(self.shadowed),
}
if self.regwen is not None:
rd['regwen'] = self.regwen
if self.update_err_alert is not None:
rd['update_err_alert'] = self.update_err_alert
if self.storage_err_alert is not None:
rd['storage_err_alert'] = self.storage_err_alert
return rd
| 40.069212 | 80 | 0.506582 |
4a19948513f70ca7d7b09a40a4de7c8b141fdbd3
| 3,224 |
py
|
Python
|
simpmods/user.py
|
IsmaelRLG/simpbot
|
7d4288334f637e0a7774ef1acda933a668c60181
|
[
"MIT"
] | 3 |
2017-05-08T14:53:40.000Z
|
2021-12-18T22:15:14.000Z
|
simpmods/user.py
|
IsmaelRLG/simpbot
|
7d4288334f637e0a7774ef1acda933a668c60181
|
[
"MIT"
] | null | null | null |
simpmods/user.py
|
IsmaelRLG/simpbot
|
7d4288334f637e0a7774ef1acda933a668c60181
|
[
"MIT"
] | 1 |
2017-05-15T23:28:56.000Z
|
2017-05-15T23:28:56.000Z
|
# -*- coding: utf-8 -*-
# Simple Bot (SimpBot)
# Copyright 2016-2017, Ismael Lugo (kwargs)
import simpbot
module = simpbot.get_module(sys=True)
loader = module.loader()
@loader('register user', 'register user',
need=[
'requires nickserv',
'unregistered user'],
i18n={
'loader': simpbot.localedata.simplocales,
'module': 'simpmods.user',
'syntax': 'syntax register',
'help': 'help register'})
def register(irc, ev, result, target, channel, _, locale):
user = _['user']
def check_max():
ok = True
no = False
if irc.dbstore.max_users == 0:
return ok
total = len(irc.dbstore.store_request['chan'])
total += irc.dbstore.total_user()
if total < irc.dbstore.max_users:
return ok
elif (total + 1) == irc.dbstore.max_users:
irc.verbose('request', locale['max users'])
irc.error(target, locale['registration disabled'])
return no
if irc.dbstore.userregister == 'allow':
if not check_max():
return
irc.dbstore.register_user(user.account)
irc.notice(target, _(locale['user registered']))
irc.verbose('new user', _(locale['verbose: user registered']))
elif irc.dbstore.userregister == 'request':
if irc.dbstore.has_request('user', user.account):
irc.error(target, locale['you already requested this'])
return
if not check_max():
return
irc.dbstore.request('user', user.account)
code = irc.dbstore.get_request(user.account)[0]
irc.verbose('request', _(locale['user request'], code=code))
irc.notice(target, _(locale['request sent']))
elif irc.dbstore.userregister == 'deny':
irc.error(target, locale['registration disabled'])
@loader('drop user', 'drop user',
need=[
'requires nickserv',
'registered user'],
i18n={
'loader': simpbot.localedata.simplocales,
'module': 'simpmods.user',
'syntax': 'syntax drop',
'help': 'help drop'})
def drop(irc, ev, result, target, channel, _, locale):
user = _['user']
if irc.dbstore.has_drop('user', user.account):
_['hash'] = irc.dbstore.get_hash(user.account)
else:
_['hash'] = irc.dbstore.drop('user', user.account)
irc.notice(user.nick, _(locale['confirm drop']))
@loader('confirm drop:user', 'confirm drop:user !{code}',
syntax="",
need=[
'requires nickserv',
'registered user'],
i18n={
'loader': simpbot.localedata.simplocales,
'module': 'simpmods.user',
'syntax': 'syntax confirm drop',
'help': 'help confirm drop'})
def confirm(irc, ev, result, target, channel, _, locale):
user = _['user']
code = _['code']
if len(code) != 32 or not irc.dbstore.has_drop('user', user.account) or \
irc.dbstore.get_hash(user.account) != code:
irc.error(user.nick, locale['invalid code'])
return
irc.dbstore.del_drop('user', user.account)
irc.dbstore.drop_user(user.account)
irc.notice(user.nick, _(locale['user dropped']))
irc.verbose('drop user', _(locale['verbose: user droppped']))
| 33.237113 | 77 | 0.600496 |
4a1994e530505dc71239893b304a96875bd23f38
| 19,659 |
py
|
Python
|
main.py
|
D9veth/Tg_bot_parcer
|
68cc18f5fb9b18093f63fc74936940a31e743d34
|
[
"MIT"
] | null | null | null |
main.py
|
D9veth/Tg_bot_parcer
|
68cc18f5fb9b18093f63fc74936940a31e743d34
|
[
"MIT"
] | null | null | null |
main.py
|
D9veth/Tg_bot_parcer
|
68cc18f5fb9b18093f63fc74936940a31e743d34
|
[
"MIT"
] | null | null | null |
import telebot
import config
from telebot import types
import time
from main_pars import pars_vv,pars_gor,pars_tts,pars_asna,pars_mag,pars_puma,pars_metro,pars_perek,pars_mvideo,pars_nike,pars_lamoda,pars_sokolov,pars_eladarado,pars_restore
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands = ['start'], content_types=['text'])
def welcome(message):
'''
Данная функция отвечает за отправку приветственного сообщения для пользователя, в котором он может выбрать тип магазина
'''
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Продуктовые магазины', callback_data='food')
item2 = types.InlineKeyboardButton('Магазины одежды',callback_data='clothes')
item3 = types.InlineKeyboardButton('Магазины электроники',callback_data='electrik')
item4 = types.InlineKeyboardButton('Ювелирные магазины',callback_data='jewellery')
item5 = types.InlineKeyboardButton('Аптеки',callback_data='drugs')
markup.add(item1,item2,item3,item4,item5)
bot.send_message(message.chat.id,'Добро пожаловать,{0.first_name}!\nЗдесь вы можете узнать, наиболее продоваемые товары, товары со скидками'.format(
message.from_user, bot.get_me()),reply_markup=markup)
@bot.callback_query_handler(func=lambda call:True)
def reply(call):
'''
Данная функция отвечает за отправку сообщения с выбором магазина, с отправкой скидок товара из магазина выбранного пользователём и за отправку сообщения с повторным выбором типа магазина
'''
try:
if call.message :
if call.data == 'food':
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('ВкусВилл', callback_data='VV')
item2 = types.InlineKeyboardButton('Перекресток', callback_data='PR')
item4 = types.InlineKeyboardButton('Магнолия', callback_data='MAGA')
item5 = types.InlineKeyboardButton('METRO', callback_data='METRO')
markup.add(item1, item2, item4, item5)
bot.send_message(chat_id=call.message.chat.id,text = 'Выберете магазин:', reply_markup= markup)
elif call.data == 'clothes':
markup = types.InlineKeyboardMarkup(row_width=1)
item5 = types.InlineKeyboardButton('Puma', callback_data='kot')
item6 = types.InlineKeyboardButton('Nike', callback_data='ekin')
item7 = types.InlineKeyboardButton('Ламода', callback_data='lam')
markup.add(item5, item6, item7)
bot.send_message(chat_id=call.message.chat.id, text='Выберете магазин:', reply_markup=markup)
elif call.data == 'electrik':
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('МВидео', callback_data='MV')
item4 = types.InlineKeyboardButton('re:Store', callback_data='res')
item2 = types.InlineKeyboardButton('Eldorado', callback_data='old')
markup.add(item1, item4, item2)
bot.send_message(chat_id=call.message.chat.id, text='Выберете магазин:', reply_markup=markup)
elif call.data == 'jewellery':
markup = types.InlineKeyboardMarkup(row_width=1)
item3 = types.InlineKeyboardButton('Sokolov', callback_data='Skolkovo')
markup.add(item3)
bot.send_message(chat_id=call.message.chat.id, text='Выберете магазин:', reply_markup=markup)
elif call.data == 'drugs':
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('36.6', callback_data='36.6')
item2 = types.InlineKeyboardButton('asna', callback_data='asna')
item3 = types.InlineKeyboardButton('Горздрав', callback_data='gor')
markup.add(item1,item2,item3)
bot.send_message(chat_id=call.message.chat.id, text='Выберете магазин:', reply_markup=markup)
elif call.data == 'VV':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
vkusvill_prod,vkusvill_html,vkusvill_old_price,vkusvill_new_price = pars_vv()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти во ВкусВилле:')
for i in range(len(vkusvill_prod)):
txt=str(vkusvill_prod[i])+'\n'+'⛔'+'Старая цена:'+' '+'\u0336'.join(str(vkusvill_old_price[i]))+'\u0336'+'\n'+'✅'+'Новая цена:'+' '+str(vkusvill_new_price[i])+'\n'+'🌐'+'Ссылка:'+' '+str(vkusvill_html[i])
bot.send_message(chat_id=call.message.chat.id,text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'MV':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
mvideo_prod,mvideo_html,mvideo_old_price,mvideo_new_price = pars_mvideo()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти во МВидео:')
for i in range(len(mvideo_prod)):
txt=str(mvideo_prod[i])+'\n'+'⛔'+'Старая цена:'+' '+'\u0336'.join(str(mvideo_old_price[i]))+'\u0336'+'\n'+'✅'+'Новая цена:'+' '+str(mvideo_new_price[i])+'\n'+'🌐'+'Ссылка:'+' '+str(mvideo_html[i])
bot.send_message(chat_id=call.message.chat.id,text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'PR':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
perek_prod,perek_html,perek_old_price,perek_new_price = pars_perek()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Перекрестке:')
for i in range(len(perek_prod)):
txt = str(perek_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(perek_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(perek_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(perek_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'MAGA':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
mag_prod,mag_html,mag_old_price,mag_new_price = pars_mag()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Магнолии:')
for i in range(len(mag_prod)):
txt = str(mag_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(mag_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(mag_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(mag_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'METRO':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
metro_prod,metro_html,metro_old_price,metro_new_price = pars_metro()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в METRO:')
for i in range(len(metro_prod)):
txt = str(metro_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(metro_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(metro_new_price[i])+'\n'+'🌐'+'Ссылка:'+' '+str(metro_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'kot':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
puma_prod, puma_html, puma_old_price, puma_new_price = pars_puma()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Puma:')
for i in range(len(puma_prod)):
txt = str(puma_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(puma_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(puma_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(puma_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'ekin':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
nike_prod, nike_html, nike_old_price, nike_new_price = pars_nike()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Nike:')
for i in range(len(nike_prod)):
txt = str(nike_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(nike_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(nike_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(nike_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == '36.6':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
tts_prod, tts_html, tts_old_price, tts_new_price = pars_tts()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Аптека 36.6:')
for i in range(len(tts_prod)):
txt = str(tts_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(tts_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(tts_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(tts_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'asna':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
asna_prod, asna_html, asna_old_price, asna_new_price = pars_asna()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Аптека asna:')
for i in range(len(asna_prod)):
txt = str(asna_prod[i]) + '\n' + '✅' + 'Цена:' + ' ' + ''.join(str(asna_old_price[i])) + '' + '\n' + '💳' + 'Начисляемые баллы:' + ' ' + str(asna_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(asna_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'gor':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
gor_prod, gor_html, gor_old_price, gor_new_price = pars_gor()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Аптека Горздрав:')
for i in range(len(gor_prod)):
txt = str(gor_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(gor_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(gor_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(gor_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'lam':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
lamoda_prod, lamoda_html, lamoda_old_price, lamoda_new_price = pars_lamoda()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Ламоде:')
for i in range(len(lamoda_prod)):
txt = str(lamoda_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(lamoda_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(lamoda_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(lamoda_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'res':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
restore_prod, restore_html, restore_old_price, restore_new_price = pars_restore()
bot.send_message(chat_id=call.message.chat.id,text='Вот какие скидки нам удалось найти в re:Store:')
for i in range(len(restore_prod)):
txt = str(restore_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(restore_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(restore_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(restore_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'old':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
eladarado_prod, eladarado_html, eldarado_old_price, eldarado_new_price = pars_eladarado()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Эльдарадо:')
for i in range(len(eladarado_prod)):
txt = str(eladarado_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(eldarado_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(eldarado_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(eladarado_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == 'Skolkovo':
bot.send_message(chat_id=call.message.chat.id, text='Подождите загружаем скидки:')
sokolov_prod, sokolov_html, sokolov_old_price, sokolov_new_price = pars_sokolov()
bot.send_message(chat_id=call.message.chat.id, text='Вот какие скидки нам удалось найти в Эльдарадо:')
for i in range(len(sokolov_prod)):
txt = str(sokolov_prod[i]) + '\n' + '⛔' + 'Старая цена:' + ' ' + '\u0336'.join(str(sokolov_old_price[i])) + '\u0336' + '\n' + '✅' + 'Новая цена:' + ' ' + str(sokolov_new_price[i]) + '\n' + '🌐' + 'Ссылка:' + ' ' + str(sokolov_html[i])
bot.send_message(chat_id=call.message.chat.id, text=txt)
time.sleep(0.5)
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Хочу ещё!', callback_data='/restart')
markup.add(item1)
bot.send_message(chat_id=call.message.chat.id, text='Хотите больше скидок?', reply_markup=markup)
elif call.data == '/restart':
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Продуктовые магазины', callback_data='food')
item2 = types.InlineKeyboardButton('Магазины одежды', callback_data='clothes')
item3 = types.InlineKeyboardButton('Магазины электроники', callback_data='electrik')
item4 = types.InlineKeyboardButton('Ювелирные магазины', callback_data='jewellery')
item5 = types.InlineKeyboardButton('Аптеки', callback_data='drugs')
markup.add(item1, item2, item3, item4, item5)
bot.send_message(chat_id = call.message.chat.id,text = 'Выберите категорию в которой мы будем искать вам скидки:'.format(call.message.from_user, bot.get_me()), reply_markup=markup)
except Exception as e:
print(repr(e))
bot.polling(none_stop=True)
| 75.034351 | 259 | 0.603184 |
4a199639ef5e9424ccecca03d2d1fc479e11f182
| 2,437 |
py
|
Python
|
Face-recognition/face_recognition.py
|
sanjar-techie/face-recognition
|
9c9cff3113a54bf80fce60710181d4ae96e1f765
|
[
"MIT"
] | null | null | null |
Face-recognition/face_recognition.py
|
sanjar-techie/face-recognition
|
9c9cff3113a54bf80fce60710181d4ae96e1f765
|
[
"MIT"
] | null | null | null |
Face-recognition/face_recognition.py
|
sanjar-techie/face-recognition
|
9c9cff3113a54bf80fce60710181d4ae96e1f765
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
# from PIL import ImageGrab
path = 'Images'
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def markAttendance(name):
with open('details.csv', 'r+') as f:
myDataList = f.readlines()
nameList = []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'n{name},{dtString}')
#### FOR CAPTURING SCREEN RATHER THAN WEBCAM
# def captureScreen(bbox=(300,300,690+300,530+300)):
# capScr = np.array(ImageGrab.grab(bbox))
# capScr = cv2.cvtColor(capScr, cv2.COLOR_RGB2BGR)
# return capScr
encodeListKnown = findEncodings(images)
print('Encoding Complete')
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
# img = captureScreen()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
markAttendance(name)
cv2.imshow('Webcam', img)
cv2.waitKey(1)
| 30.848101 | 101 | 0.630693 |
4a199696f911639c32f74ff74a5cf0a405af8ba9
| 2,056 |
py
|
Python
|
typed_python/Codebase_test.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 105 |
2019-12-02T01:44:46.000Z
|
2022-03-28T20:27:38.000Z
|
typed_python/Codebase_test.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 173 |
2019-10-08T19:37:06.000Z
|
2022-01-24T18:43:42.000Z
|
typed_python/Codebase_test.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 1 |
2020-01-23T00:06:42.000Z
|
2020-01-23T00:06:42.000Z
|
# Copyright 2020 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import typed_python
from typed_python.Codebase import Codebase
class CodebaseTest(unittest.TestCase):
def test_instantiated_codebase(self):
codebase = Codebase.FromFileMap({
'codebase_test_test_module/__init__.py': '',
'codebase_test_test_module/inner.py': 'f = lambda: 10',
})
codebase.instantiate()
codebase.instantiate()
self.assertEqual(codebase.getClassByName('codebase_test_test_module.inner.f')(), 10)
codebase2 = Codebase.FromRootlevelModule(codebase.getModuleByName("codebase_test_test_module"))
self.assertTrue(codebase2.isInstantiated())
self.assertEqual(codebase2.getClassByName('codebase_test_test_module.inner.f')(), 10)
self.assertEqual(codebase.filesToContents, codebase2.filesToContents)
vals = list(codebase.allModuleLevelValues())
vals = [v[0] for v in vals if "__" not in v[0]]
self.assertEqual(vals, ['codebase_test_test_module.inner', 'codebase_test_test_module.inner.f'])
codebaseAlternativeCode = Codebase.FromFileMap(
{'codebase_test_test_module/__init__.py': ""}
)
with self.assertRaisesRegex(Exception, "Module codebase_test_test_module is"):
codebaseAlternativeCode.instantiate()
def test_grab_native_codebase(self):
codebase = Codebase.FromRootlevelModule(typed_python)
assert codebase.isInstantiated()
| 38.792453 | 104 | 0.718385 |
4a1996ff67d3474da06f96b21126b60a6c0cf580
| 550 |
py
|
Python
|
manage.py
|
robot-lab/main-web-service
|
28b7aa5dc6a27a3819e12abb61a78626858279f8
|
[
"Apache-2.0"
] | 1 |
2019-09-23T13:36:29.000Z
|
2019-09-23T13:36:29.000Z
|
manage.py
|
robot-lab/main-web-service
|
28b7aa5dc6a27a3819e12abb61a78626858279f8
|
[
"Apache-2.0"
] | 137 |
2018-10-12T11:12:08.000Z
|
2021-06-10T20:54:32.000Z
|
manage.py
|
robot-lab/judyst-main-web-service
|
28b7aa5dc6a27a3819e12abb61a78626858279f8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'judyst_web_service.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.375 | 82 | 0.692727 |
4a19982f1420c333af1d481a36198e7c55a9bb28
| 9,974 |
py
|
Python
|
check/validate/client.py
|
siddhya/gstwebrtc-demos
|
9de598332ecc6b9e86cc3b77391b941c61a75879
|
[
"BSD-2-Clause"
] | 451 |
2017-10-21T17:00:18.000Z
|
2022-03-31T07:58:36.000Z
|
check/validate/client.py
|
siddhya/gstwebrtc-demos
|
9de598332ecc6b9e86cc3b77391b941c61a75879
|
[
"BSD-2-Clause"
] | 215 |
2018-02-04T12:58:50.000Z
|
2021-04-28T09:39:29.000Z
|
check/validate/client.py
|
siddhya/gstwebrtc-demos
|
9de598332ecc6b9e86cc3b77391b941c61a75879
|
[
"BSD-2-Clause"
] | 208 |
2017-11-22T12:15:02.000Z
|
2022-03-07T18:25:16.000Z
|
# Copyright (c) 2020, Matthew Waters <matthew@centricular.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
import threading
import copy
from observer import Signal, WebRTCObserver, DataChannelObserver, StateObserver
from enums import NegotiationState, DataChannelState
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
gi.require_version("GstWebRTC", "1.0")
from gi.repository import GstWebRTC
gi.require_version("GstSdp", "1.0")
from gi.repository import GstSdp
gi.require_version("GstValidate", "1.0")
from gi.repository import GstValidate
class WebRTCBinObserver(WebRTCObserver):
"""
Observe a webrtcbin element.
"""
def __init__(self, element):
WebRTCObserver.__init__(self)
self.element = element
self.signal_handlers = []
self.signal_handlers.append(element.connect("on-negotiation-needed", self._on_negotiation_needed))
self.signal_handlers.append(element.connect("on-ice-candidate", self._on_ice_candidate))
self.signal_handlers.append(element.connect("pad-added", self._on_pad_added))
self.signal_handlers.append(element.connect("on-new-transceiver", self._on_new_transceiver))
self.signal_handlers.append(element.connect("on-data-channel", self._on_data_channel))
self.negotiation_needed = 0
self._negotiation_needed_observer = StateObserver(self, "negotiation_needed", threading.Condition())
self.on_negotiation_needed = Signal()
self.on_ice_candidate = Signal()
self.on_pad_added = Signal()
self.on_new_transceiver = Signal()
def _on_negotiation_needed(self, element):
self.negotiation_needed += 1
self._negotiation_needed_observer.update(self.negotiation_needed)
self.on_negotiation_needed.fire()
def _on_ice_candidate(self, element, mline, candidate):
self.on_ice_candidate.fire(mline, candidate)
def _on_pad_added(self, element, pad):
self.on_pad_added.fire(pad)
def _on_description_set(self, promise, desc):
new_state = self._update_negotiation_from_description_state(desc)
if new_state == NegotiationState.OFFER_SET:
self.on_offer_set.fire (desc)
elif new_state == NegotiationState.ANSWER_SET:
self.on_answer_set.fire (desc)
def _on_new_transceiver(self, element, transceiver):
self.on_new_transceiver.fire(transceiver)
def _on_data_channel(self, element, channel):
observer = WebRTCBinDataChannelObserver(channel, channel.props.label, 'remote')
self.add_channel(observer)
def _update_negotiation_from_description_state(self, desc):
new_state = None
if desc.type == GstWebRTC.WebRTCSDPType.OFFER:
new_state = NegotiationState.OFFER_SET
elif desc.type == GstWebRTC.WebRTCSDPType.ANSWER:
new_state = NegotiationState.ANSWER_SET
assert new_state is not None
self._update_negotiation_state(new_state)
return new_state
def _deepcopy_session_description(self, desc):
# XXX: passing 'offer' to both a promise and an action signal without
# a deepcopy will segfault...
new_sdp = GstSdp.SDPMessage.new()[1]
GstSdp.sdp_message_parse_buffer(bytes(desc.sdp.as_text().encode()), new_sdp)
return GstWebRTC.WebRTCSessionDescription.new(desc.type, new_sdp)
def _on_offer_created(self, promise, element):
self._update_negotiation_state(NegotiationState.OFFER_CREATED)
reply = promise.get_reply()
offer = reply['offer']
new_offer = self._deepcopy_session_description(offer)
promise = Gst.Promise.new_with_change_func(self._on_description_set, new_offer)
new_offer = self._deepcopy_session_description(offer)
self.element.emit('set-local-description', new_offer, promise)
self.on_offer_created.fire(offer)
def _on_answer_created(self, promise, element):
self._update_negotiation_state(NegotiationState.ANSWER_CREATED)
reply = promise.get_reply()
offer = reply['answer']
new_offer = self._deepcopy_session_description(offer)
promise = Gst.Promise.new_with_change_func(self._on_description_set, new_offer)
new_offer = self._deepcopy_session_description(offer)
self.element.emit('set-local-description', new_offer, promise)
self.on_answer_created.fire(offer)
def create_offer(self, options=None):
promise = Gst.Promise.new_with_change_func(self._on_offer_created, self.element)
self.element.emit('create-offer', options, promise)
def create_answer(self, options=None):
promise = Gst.Promise.new_with_change_func(self._on_answer_created, self.element)
self.element.emit('create-answer', options, promise)
def set_remote_description(self, desc):
promise = Gst.Promise.new_with_change_func(self._on_description_set, desc)
self.element.emit('set-remote-description', desc, promise)
def add_ice_candidate(self, mline, candidate):
self.element.emit('add-ice-candidate', mline, candidate)
def add_data_channel(self, ident):
channel = self.element.emit('create-data-channel', ident, None)
observer = WebRTCBinDataChannelObserver(channel, ident, 'local')
self.add_channel(observer)
def wait_for_negotiation_needed(self, generation):
self._negotiation_needed_observer.wait_for ((generation,))
class WebRTCStream(object):
"""
An stream attached to a webrtcbin element
"""
def __init__(self):
self.bin = None
def set_description(self, desc):
assert self.bin is None
self.bin = Gst.parse_bin_from_description(desc, True)
def add_and_link(self, parent, link):
assert self.bin is not None
self.bin.set_locked_state(True)
parent.add(self.bin)
src = self.bin.get_static_pad("src")
sink = self.bin.get_static_pad("sink")
assert src is None or sink is None
if src:
self.bin.link(link)
if sink:
link.link(self.bin)
self.bin.set_locked_state(False)
self.bin.sync_state_with_parent()
def add_and_link_to(self, parent, link, pad):
assert self.bin is not None
self.bin.set_locked_state(True)
parent.add(self.bin)
src = self.bin.get_static_pad("src")
sink = self.bin.get_static_pad("sink")
assert src is None or sink is None
if pad.get_direction() == Gst.PadDirection.SRC:
assert sink is not None
pad.link(sink)
if pad.get_direction() == Gst.PadDirection.SINK:
assert src is not None
src.link(pad)
self.bin.set_locked_state(False)
self.bin.sync_state_with_parent()
class WebRTCClient(WebRTCBinObserver):
"""
Client for performing webrtc operations. Controls the pipeline that
contains a webrtcbin element.
"""
def __init__(self):
self.pipeline = Gst.Pipeline(None)
self.webrtcbin = Gst.ElementFactory.make("webrtcbin")
super().__init__(self.webrtcbin)
self.pipeline.add(self.webrtcbin)
self._streams = []
def stop(self):
self.pipeline.set_state (Gst.State.NULL)
def add_stream(self, desc):
stream = WebRTCStream()
stream.set_description(desc)
stream.add_and_link (self.pipeline, self.webrtcbin)
self._streams.append(stream)
def add_stream_with_pad(self, desc, pad):
stream = WebRTCStream()
stream.set_description(desc)
stream.add_and_link_to (self.pipeline, self.webrtcbin, pad)
self._streams.append(stream)
def set_options (self, opts):
if opts.has_field("local-bundle-policy"):
self.webrtcbin.props.bundle_policy = opts["local-bundle-policy"]
class WebRTCBinDataChannelObserver(DataChannelObserver):
"""
Data channel observer for a webrtcbin data channel.
"""
def __init__(self, target, ident, location):
super().__init__(ident, location)
self.target = target
self.signal_handlers = []
self.signal_handlers.append(target.connect("on-open", self._on_open))
self.signal_handlers.append(target.connect("on-close", self._on_close))
self.signal_handlers.append(target.connect("on-error", self._on_error))
self.signal_handlers.append(target.connect("on-message-data", self._on_message_data))
self.signal_handlers.append(target.connect("on-message-string", self._on_message_string))
self.signal_handlers.append(target.connect("on-buffered-amount-low", self._on_buffered_amount_low))
def _on_open(self, channel):
self._update_state (DataChannelState.OPEN)
def _on_close(self, channel):
self._update_state (DataChannelState.CLOSED)
def _on_error(self, channel):
self._update_state (DataChannelState.ERROR)
def _on_message_data(self, channel, data):
self.data.append(msg)
def _on_message_string(self, channel, msg):
self.got_message (msg)
def _on_buffered_amount_low(self, channel):
pass
def close(self):
self.target.emit('close')
def send_string (self, msg):
self.target.emit('send-string', msg)
| 39.896 | 108 | 0.700722 |
4a1998b1139b3238acce2a05b8a8764a35aaf075
| 8,234 |
py
|
Python
|
code/main.py
|
Brian-ZhenLiu/Chatbot
|
29080279b126e80f58fcee1de5243b9526791bbe
|
[
"MIT"
] | null | null | null |
code/main.py
|
Brian-ZhenLiu/Chatbot
|
29080279b126e80f58fcee1de5243b9526791bbe
|
[
"MIT"
] | null | null | null |
code/main.py
|
Brian-ZhenLiu/Chatbot
|
29080279b126e80f58fcee1de5243b9526791bbe
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from data.twitter import data
from sklearn.utils import shuffle
import tensorflow as tf
import numpy as np
import time
replyNum = 5
def getReplying(y, w2idx, idx2w, decode_seqs2, encode_seqs2, start_id, end_id, sess, net_rnn, question):
question = question.lower()
seed_id = []
for word in question.split(" "):
if word not in w2idx:
print("There is no'", word, "'corpus in dataset index list. Please input the sentence again")
return
else:
seed_id.append(w2idx[word])
print("The input words to index are:", seed_id)
for _ in range(replyNum):
state = sess.run(net_rnn.final_state_encode,
{encode_seqs2: [seed_id]})
o, state = sess.run([y, net_rnn.final_state_decode],
{net_rnn.initial_state_decode: state,
decode_seqs2: [[start_id]]})
w_id = tl.nlp.sample_top(o[0], top_k=3)
w = idx2w[w_id]
sentence = [w]
for _ in range(50):
o, state = sess.run([y, net_rnn.final_state_decode],
{net_rnn.initial_state_decode: state,
decode_seqs2: [[w_id]]})
w_id = tl.nlp.sample_top(o[0], top_k=2)
w = idx2w[w_id]
if w_id == end_id:
break
sentence = sentence + [w]
print("Someone>", ' '.join(sentence))
def getDataset(idx_q, idx_a):
(trainX, trainY), (testX, testY), (validX, validY) = data.split_dataset(idx_q, idx_a)
trainX = trainX.tolist()
trainY = trainY.tolist()
testX = testX.tolist()
testY = testY.tolist()
validX = validX.tolist()
validY = validY.tolist()
trainX = tl.prepro.remove_pad_sequences(trainX)
trainY = tl.prepro.remove_pad_sequences(trainY)
testX = tl.prepro.remove_pad_sequences(testX)
testY = tl.prepro.remove_pad_sequences(testY)
validX = tl.prepro.remove_pad_sequences(validX)
validY = tl.prepro.remove_pad_sequences(validY)
return trainX, trainY, testX, testY, validX, validY
def model(encode_seqs, decode_seqs, xvocab_size, is_train=True, reuse=False):
with tf.variable_scope("model", reuse=reuse):
with tf.variable_scope("embedding") as vs:
net_encode = EmbeddingInputlayer(
inputs = encode_seqs,
vocabulary_size = xvocab_size,
embedding_size = 1024,
name = 'seq_embedding')
vs.reuse_variables()
tl.layers.set_name_reuse(True)
net_decode = EmbeddingInputlayer(
inputs = decode_seqs,
vocabulary_size = xvocab_size,
embedding_size = 1024,
name = 'seq_embedding')
net_rnn = Seq2Seq(net_encode, net_decode,
cell_fn = tf.contrib.rnn.BasicLSTMCell,
n_hidden = 1024,
initializer = tf.random_uniform_initializer(-0.1, 0.1),
encode_sequence_length = retrieve_seq_length_op2(encode_seqs),
decode_sequence_length = retrieve_seq_length_op2(decode_seqs),
initial_state_encode = None,
dropout = (0.5 if is_train else None),
n_layer = 3,
return_seq_2d = True,
name = 'seq2seq')
net_out = DenseLayer(net_rnn, n_units=xvocab_size, act=tf.identity, name='output')
return net_out, net_rnn
def main():
metadata, idx_q, idx_a = data.load_data(PATH='data/twitter/')
trainX, trainY, testX, testY, validX, validY = getDataset(idx_q, idx_a)
xseq_len = len(trainX)
yseq_len = len(trainY)
assert xseq_len == yseq_len
batch_size = 32
n_step = int(xseq_len/batch_size)
xvocab_size = len(metadata['idx2w'])
emb_dim = 1024
w2idx = metadata['w2idx']
idx2w = metadata['idx2w']
unk_id = w2idx['unk']
pad_id = w2idx['_']
start_id = xvocab_size
end_id = xvocab_size+1
w2idx.update({'start_id': start_id})
w2idx.update({'end_id': end_id})
idx2w = idx2w + ['start_id', 'end_id']
xvocab_size = yvocab_size = xvocab_size + 2
target_seqs = tl.prepro.sequences_add_end_id([trainY[10]], end_id=end_id)[0]
decode_seqs = tl.prepro.sequences_add_start_id([trainY[10]], start_id=start_id, remove_last=False)[0]
target_mask = tl.prepro.sequences_get_mask([target_seqs])[0]
encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs")
decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs")
target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs")
target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask")
net_out, _ = model(encode_seqs, decode_seqs, xvocab_size, is_train=True, reuse=False)
encode_seqs2 = tf.placeholder(dtype=tf.int64, shape=[1, None], name="encode_seqs")
decode_seqs2 = tf.placeholder(dtype=tf.int64, shape=[1, None], name="decode_seqs")
net, net_rnn = model(encode_seqs2, decode_seqs2, xvocab_size, is_train=False, reuse=True)
y = tf.nn.softmax(net.outputs)
loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost')
net_out.print_params(False)
lr = 0.0001
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
gpu_option = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_option))
tl.layers.initialize_global_variables(sess)
load_parameter = tl.files.load_and_assign_npz(sess=sess, name='twitter.npz', network=net)
if not load_parameter:
print("Loading npz fail, starting to train.")
n_epoch = 50
for epoch in range(n_epoch):
epoch_time = time.time()
from sklearn.utils import shuffle
trainX, trainY = shuffle(trainX, trainY, random_state=0)
total_err, n_iter = 0, 0
for X, Y in tl.iterate.minibatches(inputs=trainX, targets=trainY, batch_size=batch_size, shuffle=False):
step_time = time.time()
X = tl.prepro.pad_sequences(X)
_target_seqs = tl.prepro.sequences_add_end_id(Y, end_id=end_id)
_target_seqs = tl.prepro.pad_sequences(_target_seqs)
_decode_seqs = tl.prepro.sequences_add_start_id(Y, start_id=start_id, remove_last=False)
_decode_seqs = tl.prepro.pad_sequences(_decode_seqs)
_target_mask = tl.prepro.sequences_get_mask(_target_seqs)
_, err = sess.run([train_op, loss],
{encode_seqs: X,
decode_seqs: _decode_seqs,
target_seqs: _target_seqs,
target_mask: _target_mask})
if n_iter % 200 == 0:
print("Epoch[%d/%d] step:[%d/%d] loss:%f took:%.5fs" % (epoch, n_epoch, n_iter, n_step, err, time.time() - step_time))
total_err += err; n_iter += 1
if n_iter% 1000 == 0:
print("Query> happy birthday to you")
getReplying(y,w2idx, idx2w, decode_seqs2, encode_seqs2, start_id, end_id, sess, net_rnn, "happy birthday to you")
print("Query> help me to do the exam")
getReplying(y,w2idx, idx2w, decode_seqs2, encode_seqs2, start_id, end_id, sess, net_rnn, "help me to do the exam")
print("Query> ny is so cold now")
getReplying(y,w2idx, idx2w, decode_seqs2, encode_seqs2, start_id, end_id, sess, net_rnn, "ny is so cold now")
print("Epoch[%d/%d] averaged loss:%f took:%.5fs" % (epoch, n_epoch, total_err/n_iter, time.time()-epoch_time))
tl.files.save_npz(net.all_params, name='n.npz', sess=sess)
while(True):
getReplying(y,w2idx, idx2w, decode_seqs2, encode_seqs2, start_id, end_id, sess, net_rnn, input("You>"))
main()
| 45.491713 | 154 | 0.62752 |
4a1999558e2edc3ec3c1d7522f65c8bc8f19ecd9
| 576 |
py
|
Python
|
apps/diary/migrations/0005_data_migration.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 2 |
2019-07-09T01:42:04.000Z
|
2020-04-09T16:44:59.000Z
|
apps/diary/migrations/0005_data_migration.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 5 |
2019-12-30T22:16:38.000Z
|
2020-09-11T18:13:14.000Z
|
apps/diary/migrations/0005_data_migration.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 1 |
2019-07-09T01:42:07.000Z
|
2019-07-09T01:42:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Denis on 2046-08-15 10:15
from __future__ import unicode_literals
from django.db import migrations
from ..models import DiaryEntry
def add_tags(apps, schema_editor):
"""Add tags for all exisiting diary entries."""
for entry in DiaryEntry.objects.all():
entry.populate_tags()
class Migration(migrations.Migration):
dependencies = [
('diary', '0004_auto_20181106_1838'),
]
operations = [
migrations.RunPython(
add_tags,
migrations.RunPython.noop
),
]
| 20.571429 | 51 | 0.647569 |
4a199a2cf1dd18120e86558af3f5925738c47fec
| 1,893 |
py
|
Python
|
codecarbon/core/units.py
|
nikolaskaris/codecarbon
|
d6b47bf29c1e216e43e708ec49981c3dad9142af
|
[
"MIT"
] | 1 |
2020-12-23T11:44:25.000Z
|
2020-12-23T11:44:25.000Z
|
codecarbon/core/units.py
|
nikolaskaris/codecarbon
|
d6b47bf29c1e216e43e708ec49981c3dad9142af
|
[
"MIT"
] | null | null | null |
codecarbon/core/units.py
|
nikolaskaris/codecarbon
|
d6b47bf29c1e216e43e708ec49981c3dad9142af
|
[
"MIT"
] | null | null | null |
"""
Provides functionality for unit conversions
"""
from dataclasses import dataclass
@dataclass
class EmissionsPerKwh:
"""
Measured in kg/kwh
"""
LBS_MWH_TO_KG_KWH = 0.00045359237
G_KWH_TO_KG_KWH = 0.001
kgs_per_kwh: float
@classmethod
def from_lbs_per_mwh(cls, lbs_per_mwh: float) -> "EmissionsPerKwh":
return cls(kgs_per_kwh=lbs_per_mwh * EmissionsPerKwh.LBS_MWH_TO_KG_KWH)
@classmethod
def from_g_per_kwh(cls, g_per_kwh: float) -> "EmissionsPerKwh":
return cls(kgs_per_kwh=g_per_kwh * EmissionsPerKwh.G_KWH_TO_KG_KWH)
@classmethod
def from_kgs_per_kwh(cls, kgs_per_kwh: float) -> "EmissionsPerKwh":
return cls(kgs_per_kwh=kgs_per_kwh)
@dataclass
class Energy:
"""
Measured in kwh
"""
kwh: float
@classmethod
def from_power_and_time(cls, *, power: "Power", time: "Time") -> "Energy":
return cls(kwh=power.kw * time.hours)
@classmethod
def from_energy(cls, kwh: float) -> "Energy":
return cls(kwh=kwh)
def __add__(self, other: "Energy") -> "Energy":
return Energy(self.kwh + other.kwh)
def __iadd__(self, other: "Energy") -> "Energy":
return Energy(self.kwh + other.kwh)
@dataclass
class Power:
"""
Measured in kw
"""
MILLI_WATTS_TO_WATTS = 0.001
WATTS_TO_KILO_WATTS = 0.001
kw: float
@classmethod
def from_milli_watts(cls, milli_wats: float) -> "Power":
return cls(
kw=milli_wats * Power.MILLI_WATTS_TO_WATTS * Power.WATTS_TO_KILO_WATTS
)
@dataclass
class Time:
"""
Measured in seconds
"""
SECONDS_TO_HOURS = 0.00027777778
seconds: float
@property
def hours(self) -> float:
return self.seconds * Time.SECONDS_TO_HOURS
@classmethod
def from_seconds(cls, seconds: float) -> "Time":
return cls(seconds=seconds)
| 21.033333 | 82 | 0.651875 |
4a199ada9492d28139b4a33c3b6306863215c796
| 2,926 |
py
|
Python
|
sender/Messages/ACK.py
|
schc-over-sigfox/schc-over-sigfox
|
d03e26cf5524ebd6bd64b4ca33a83149eec5e59f
|
[
"MIT"
] | null | null | null |
sender/Messages/ACK.py
|
schc-over-sigfox/schc-over-sigfox
|
d03e26cf5524ebd6bd64b4ca33a83149eec5e59f
|
[
"MIT"
] | null | null | null |
sender/Messages/ACK.py
|
schc-over-sigfox/schc-over-sigfox
|
d03e26cf5524ebd6bd64b4ca33a83149eec5e59f
|
[
"MIT"
] | 1 |
2022-03-19T12:36:30.000Z
|
2022-03-19T12:36:30.000Z
|
from Messages.ACKHeader import ACKHeader
from schc_utils import bitstring_to_bytes, is_monochar, zfill
class ACK:
PROFILE = None
BITMAP = None
HEADER = None
PADDING = None
def __init__(self, profile, rule_id, dtag, w, c, bitmap, padding=''):
self.PROFILE = profile
self.BITMAP = bitmap
self.PADDING = padding
# Bitmap may or may not be carried
self.HEADER = ACKHeader(profile, rule_id, dtag, w, c)
while len(self.HEADER.to_string() + self.BITMAP + self.PADDING) < profile.DOWNLINK_MTU:
self.PADDING += '0'
def to_string(self):
return self.HEADER.to_string() + self.BITMAP + self.PADDING
def to_bytes(self):
return bitstring_to_bytes(self.to_string())
def length(self):
return len(self.to_string())
def is_receiver_abort(self):
ack_string = self.to_string()
l2_word_size = self.PROFILE.L2_WORD_SIZE
header_length = len(self.HEADER.RULE_ID + self.HEADER.DTAG + self.HEADER.W + self.HEADER.C)
header = ack_string[:header_length]
padding = ack_string[header_length:ack_string.rfind('1') + 1]
padding_start = padding[:-l2_word_size]
padding_end = padding[-l2_word_size:]
if padding_end == "1" * l2_word_size:
if padding_start != '' and len(header) % l2_word_size != 0:
return is_monochar(padding_start) and padding_start[0] == '1'
else:
return len(header) % l2_word_size == 0
else:
return False
@staticmethod
def parse_from_hex(profile, h):
ack = zfill(bin(int(h, 16))[2:], profile.DOWNLINK_MTU)
ack_index_dtag = profile.RULE_ID_SIZE
ack_index_w = ack_index_dtag + profile.T
ack_index_c = ack_index_w + profile.M
ack_index_bitmap = ack_index_c + 1
ack_index_padding = ack_index_bitmap + profile.BITMAP_SIZE
return ACK(profile,
ack[:ack_index_dtag],
ack[ack_index_dtag:ack_index_w],
ack[ack_index_w:ack_index_c],
ack[ack_index_c],
ack[ack_index_bitmap:ack_index_padding],
ack[ack_index_padding:])
@staticmethod
def parse_from_bytes(profile, b):
ack = ''.join("{:08b}".format(int(byte)) for byte in b)
ack_index_dtag = profile.RULE_ID_SIZE
ack_index_w = ack_index_dtag + profile.T
ack_index_c = ack_index_w + profile.M
ack_index_bitmap = ack_index_c + 1
ack_index_padding = ack_index_bitmap + profile.BITMAP_SIZE
return ACK(profile,
ack[:ack_index_dtag],
ack[ack_index_dtag:ack_index_w],
ack[ack_index_w:ack_index_c],
ack[ack_index_c],
ack[ack_index_bitmap:ack_index_padding],
ack[ack_index_padding:])
| 35.682927 | 99 | 0.612098 |
4a199b8a38f796fce6cdf4258cc16eb646034a98
| 252 |
py
|
Python
|
tests/basics/try_continue.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 13,648 |
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/basics/try_continue.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 7,092 |
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/basics/try_continue.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 4,942 |
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
# test continue within exception handler
def f():
lst = [1, 2, 3]
for x in lst:
print('a', x)
try:
if x == 2:
raise Exception
except Exception:
continue
print('b', x)
f()
| 18 | 40 | 0.444444 |
4a199cd517aff88ee1a844eef0b77faa326b851b
| 4,535 |
py
|
Python
|
examples/machine_translation/transformer/faster_transformer/encoder_decoding_predict.py
|
XieYunshen/PaddleNLP
|
c3d1df6e9e9b77df7af74b7f8a819182ee9c3a7f
|
[
"Apache-2.0"
] | 1 |
2021-07-22T08:33:53.000Z
|
2021-07-22T08:33:53.000Z
|
examples/machine_translation/transformer/faster_transformer/encoder_decoding_predict.py
|
ZHUI/PaddleNLP
|
ddbae3d2baa7f6072d70e5e0dd90251c9149a36e
|
[
"Apache-2.0"
] | null | null | null |
examples/machine_translation/transformer/faster_transformer/encoder_decoding_predict.py
|
ZHUI/PaddleNLP
|
ddbae3d2baa7f6072d70e5e0dd90251c9149a36e
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import numpy as np
from attrdict import AttrDict
import argparse
import time
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import yaml
from pprint import pprint
from paddlenlp.transformers import TransformerModel
from paddlenlp.transformers import position_encoding_init
from paddlenlp.ops import FasterTransformer
sys.path.append("../")
import reader
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default="../configs/transformer.base.yaml",
type=str,
help="Path of the config file. ")
parser.add_argument(
"--decoding_lib",
default="../../../../paddlenlp/ops/build/lib/libdecoding_op.so",
type=str,
help="Path of libdecoding_op.so. ")
parser.add_argument(
"--use_fp16_decoding",
action="store_true",
help="Whether to use fp16 decoding to predict. ")
parser.add_argument(
"--decoding_strategy",
default="beam_search",
type=str,
choices=["beam_search", "topk_sampling", "topp_sampling"],
help="Decoding strategy. Can be one of ['beam_search', 'topk_sampling', 'topp_sampling']. "
)
parser.add_argument("--beam_size", default=5, type=int, help="Beam size. ")
parser.add_argument(
"--topk",
default=4,
type=int,
help="The k value for topk_sampling. Default is 4. ")
parser.add_argument(
"--topp",
default=0.0,
type=float,
help="The probability threshold for topp_sampling. Default is 0.0 which means it won't go through topp_sampling. "
)
args = parser.parse_args()
return args
def post_process_seq(seq, bos_idx, eos_idx, output_bos=False, output_eos=False):
"""
Post-process the decoded sequence.
"""
eos_pos = len(seq) - 1
for i, idx in enumerate(seq):
if idx == eos_idx:
eos_pos = i
break
seq = [
idx for idx in seq[:eos_pos + 1]
if (output_bos or idx != bos_idx) and (output_eos or idx != eos_idx)
]
return seq
def do_predict(args):
place = "gpu"
paddle.set_device(place)
# Define data loader
test_loader, to_tokens = reader.create_infer_loader(args)
# Define model
transformer = FasterTransformer(
src_vocab_size=args.src_vocab_size,
trg_vocab_size=args.trg_vocab_size,
max_length=args.max_length + 1,
n_layer=args.n_layer,
n_head=args.n_head,
d_model=args.d_model,
d_inner_hid=args.d_inner_hid,
dropout=args.dropout,
weight_sharing=args.weight_sharing,
bos_id=args.bos_idx,
eos_id=args.eos_idx,
decoding_strategy=args.decoding_strategy,
beam_size=args.beam_size,
max_out_len=args.max_out_len,
decoding_lib=args.decoding_lib,
use_fp16_decoding=args.use_fp16_decoding)
# Set evaluate mode
transformer.eval()
# Load checkpoint.
transformer.load(init_from_params=os.path.join(args.init_from_params,
"transformer.pdparams"))
f = open(args.output_file, "w")
with paddle.no_grad():
for (src_word, ) in test_loader:
finished_seq = transformer(src_word=src_word)
if args.decoding_strategy == "beam_search":
finished_seq = finished_seq.numpy().transpose([1, 2, 0])
elif args.decoding_strategy == "topk_sampling" or args.decoding_strategy == "topp_sampling":
finished_seq = np.expand_dims(
finished_seq.numpy().transpose([1, 0]), axis=1)
for ins in finished_seq:
for beam_idx, beam in enumerate(ins):
if beam_idx >= args.n_best:
break
id_list = post_process_seq(beam, args.bos_idx, args.eos_idx)
word_list = to_tokens(id_list)
sequence = " ".join(word_list) + "\n"
f.write(sequence)
if __name__ == "__main__":
ARGS = parse_args()
yaml_file = ARGS.config
with open(yaml_file, 'rt') as f:
args = AttrDict(yaml.safe_load(f))
pprint(args)
args.decoding_lib = ARGS.decoding_lib
args.use_fp16_decoding = ARGS.use_fp16_decoding
args.decoding_strategy = ARGS.decoding_strategy
args.beam_size = ARGS.beam_size
args.topk = ARGS.topk
args.topp = ARGS.topp
args.benchmark = False
do_predict(args)
| 31.275862 | 122 | 0.626681 |
4a199ebd0ca5c9ce152e9987e8fbb25890c26714
| 703 |
py
|
Python
|
thai_nner/utils/util.py
|
vistec-AI/Thai-NNER
|
31c7112b194bb26f19296f32df7f3550fdfc4b88
|
[
"MIT"
] | 16 |
2022-03-05T07:07:55.000Z
|
2022-03-30T06:16:32.000Z
|
thai_nner/utils/util.py
|
vistec-AI/Thai-NNER
|
31c7112b194bb26f19296f32df7f3550fdfc4b88
|
[
"MIT"
] | 1 |
2022-03-12T04:10:27.000Z
|
2022-03-24T09:09:17.000Z
|
thai_nner/utils/util.py
|
vistec-AI/Thai-NNER
|
31c7112b194bb26f19296f32df7f3550fdfc4b88
|
[
"MIT"
] | null | null | null |
import json
import pandas as pd
from pathlib import Path
from itertools import repeat
from collections import OrderedDict
def ensure_dir(dirname):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=False)
def read_json(fname):
fname = Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
fname = Path(fname)
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
| 26.037037 | 61 | 0.705548 |
4a199eca2d99d41445e8eeddd7adefa07abbb85b
| 3,409 |
py
|
Python
|
membership-manager.py
|
buraktokman/Reddit-Jokes
|
3945e2fd986d3ed81459ae813a9ee8eadb50de47
|
[
"MIT"
] | 1 |
2021-08-09T07:10:36.000Z
|
2021-08-09T07:10:36.000Z
|
membership-manager.py
|
buraktokman/Reddit-Jokes
|
3945e2fd986d3ed81459ae813a9ee8eadb50de47
|
[
"MIT"
] | null | null | null |
membership-manager.py
|
buraktokman/Reddit-Jokes
|
3945e2fd986d3ed81459ae813a9ee8eadb50de47
|
[
"MIT"
] | null | null | null |
#! usr/bin/env python3
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
Project : Project JaaS
Module : membership_manager
Purpose : Add new users & check membership status of existing ones
Version : 0.1.1 beta
Status : Development
Modified : 2020 Mar 04
Created : 2020 Mar 04
Author : Burak Tokman
Email : buraktokman@hotmail.com
Copyright : 2020, Bulrosa OU
Licence : EULA
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
#-------------------------------------------------------------------------------
'''
from pathlib import Path
from psaw import PushshiftAPI
from datetime import datetime as dt
from colorama import Fore, Back, Style
import os
import sys
import time
import json
import requests
import random
import praw
sys.path.insert(0, str(Path(Path(__file__).parents[0] / 'lib')))
import logz
import postgres
CONFIG = {'refresh-interval': 10 # mins
}
def check_membership_status(user):
"""
"""
time_now_unix = int(time.time())
time_membership_end_unix = int(time.mktime(dt.strptime(user['membership_time_end'], "%Y-%m-%d %H:%M:%S").timetuple()))
if time_membership_end_unix > time_now_unix:
return True
else:
return False
def main():
# Connect to DB
postgres.connect_db()
while True:
# Start
time_start = time.time()
# ------ FETCH USERS ---------------------------
print(f"{logz.timestamp()}{Fore.GREEN} MEMBERSHIP → INIT → {Style.RESET_ALL}Fething users...")
# From database
users_database = postgres.get_user_all()
# From Shopify (or ?)
# ----------------------------------------------
# # # # # # # # # # #
# #
# MANAGE #
# #
# # # # # # # # # # #
# ------ OLD USERS -----------------------------
print(f"{logz.timestamp()}{Fore.GREEN} MEMBERSHIP → EXISTING → {Style.RESET_ALL}Checking expired memberships")
for user in users_database:
# Check if membership of existing users
if check_membership_status(user) == False:
print(f"{logz.timestamp()}{Fore.GREEN} MEMBERSHIP → CAUTION → {Style.RESET_ALL}User {user['id']} membership expired")
r = postgres.set_user_membership_status(user_id=user['id'],
status=False)
# ------ NEW USERS -----------------------------
#
# INCOMPLETE - FETCH FROM WHERE?
#
# users_remote = shopify.get_orders()
# for user in users_remote:
# for user_local in users_database:
# if user_local['email'] == user['email']:
# # Add user to database
# # Send Welcome joke
# # Mark joke as sent
# break
# ------ SLEEP ---------------------------------
print(f"{logz.timestamp()}{Fore.GREEN} MEMBERSHIP → COMPLETED → {Style.RESET_ALL}Sleeping {CONFIG['refresh-interval'] * 60}mins")
time.sleep(CONFIG['refresh-interval'] * 60)
if __name__ == '__main__':
main()
| 30.711712 | 137 | 0.50132 |
4a199edc19378961945f9c03f292c9261890e6df
| 3,974 |
py
|
Python
|
libs/backtest.py
|
illi4/Crypto_trading_robot
|
14d3021909232765ebb8c4d16d3ba0064c6bd93d
|
[
"MIT"
] | 19 |
2017-12-08T01:36:29.000Z
|
2021-12-06T06:59:54.000Z
|
libs/backtest.py
|
illi4/Crypto_trading_robot
|
14d3021909232765ebb8c4d16d3ba0064c6bd93d
|
[
"MIT"
] | null | null | null |
libs/backtest.py
|
illi4/Crypto_trading_robot
|
14d3021909232765ebb8c4d16d3ba0064c6bd93d
|
[
"MIT"
] | 10 |
2017-12-02T08:14:40.000Z
|
2021-11-04T16:29:19.000Z
|
################################ Libraries ############################################
import exceptions
import time as t
from datetime import datetime
import config
from collections import deque
from sys import exit
## Backtest class
class backtesting(object):
def __init__(self):
self.backtesting, self.exchange_abbr, self.market = False, None, None
self._curr_time, self._curr_price, self._end_time, self._curr_pricelog_line_no = None, None, None, None
self.finished = False
self._price_history_queue = deque()
# For proper timezone and DST handling
self.pytz_timezone = config.pytz_timezone
self.td_price_base_constant = config.td_price_base_constant
self.timedelta, self.timedelta_current = config.timedelta, config.timedelta
self.timedelta_str = ":".join([str(self.timedelta_current), '00', '00'])
def init_testing(self, datetime_in, datetime_until, exchange_abbr_in, market_in):
if config.backtesting_enabled:
self.backtesting = True
self._curr_time = t.mktime(datetime_in.timetuple())
self._end_time = t.mktime(datetime_until.timetuple())
self.exchange_abbr = exchange_abbr_in
self.market = market_in
if self.backtesting:
with open('price_log/' + self.market + '_' + self.exchange_abbr.lower() + '.csv') as file:
start_line_no = 0
self._curr_pricelog_line_no = -1
for line in file:
strs = line.split(',')
self._curr_pricelog_line_no = self._curr_pricelog_line_no + 1
# If starting earlier than available - finish
if (float(strs[0]) > self._curr_time) and (self._curr_pricelog_line_no == 0):
raise exceptions.IndexError('Backtesting time earlier than available')
if not start_line_no and float(strs[0]) >= self._curr_time:
start_line_no = self._curr_pricelog_line_no
if start_line_no:
if float(strs[0]) > self._end_time:
break
else:
pair = (float(strs[0])), float(strs[1])
self._price_history_queue.append(pair)
self._curr_pricelog_line_no = start_line_no
self._find_current_price()
def get_market_price(self, exchange_abbr_in, market_in, logger = None):
#print exchange_abbr_in.lower(), self.exchange_abbr.lower(), market_in.lower() , self.market.lower()
if exchange_abbr_in.lower() != self.exchange_abbr.lower() or market_in.lower() != self.market.lower():
raise exceptions.InputError('exchange_abbr_in or market_in is incorrect')
return self._curr_price
def _find_current_price(self):
while len(self._price_history_queue) > 0:
if self._price_history_queue[0][0] >= self._curr_time:
self._curr_price = self._price_history_queue[0][1]
return
else:
self._price_history_queue.popleft();
self._curr_pricelog_line_no = self._curr_pricelog_line_no + 1
print('Price history finished')
self.finished = True
exit(0)
def time(self):
if not self.backtesting:
return t.time()
else:
return self._curr_time
def strftime(self, format):
return t.strftime(format, datetime.fromtimestamp(self.time()).timetuple())
def sleep(self, seconds):
if not self.backtesting:
t.sleep(seconds)
else:
self._curr_time += seconds
if self._curr_time <= self._end_time:
self._find_current_price()
else:
self.finished = True
def not_supported(self):
raise exceptions.Exception('not supported in backtesting')
| 40.55102 | 111 | 0.599144 |
4a19a081d1274a69cad1ace66f25f9e6275f7284
| 3,086 |
py
|
Python
|
benchmarks/postgres/zzzeek_asyncio_greenlet_part_11.py
|
moriyoshi/greenletio
|
75fc944cefe58ee491e5b8ff951a64f13ae0104d
|
[
"MIT"
] | 131 |
2020-07-17T22:36:25.000Z
|
2022-03-06T21:29:23.000Z
|
benchmarks/postgres/zzzeek_asyncio_greenlet_part_11.py
|
moriyoshi/greenletio
|
75fc944cefe58ee491e5b8ff951a64f13ae0104d
|
[
"MIT"
] | 7 |
2020-07-18T07:36:13.000Z
|
2021-07-16T08:28:47.000Z
|
benchmarks/postgres/zzzeek_asyncio_greenlet_part_11.py
|
moriyoshi/greenletio
|
75fc944cefe58ee491e5b8ff951a64f13ae0104d
|
[
"MIT"
] | 5 |
2020-07-18T01:08:40.000Z
|
2021-05-12T09:17:17.000Z
|
"""
Source: https://gist.github.com/zzzeek/a63254eedac043b3c233a0de5352f9c5
This is a simpler version of the greenlet
example at https://gist.github.com/zzzeek/4e89ce6226826e7a8df13e1b573ad354
Instead of the "await" keyword, we use the "await_()" function to interact with
the greenlet context. the greenlet context itself is 23 lines of code right
here.
"""
import asyncio
import random
import sys
import asyncpg
import greenlet
def await_(coroutine):
current = greenlet.getcurrent()
try:
spawning = current.spawning_greenlet
except AttributeError:
raise Exception(
"not running inside a greenlet right now, "
"can't use await_() function"
)
else:
return spawning.switch(coroutine)
async def greenlet_spawn(__fn, *args, **kw):
target = greenlet.greenlet(__fn)
target.spawning_greenlet = greenlet.getcurrent()
target_return = target.switch(*args, **kw)
try:
while True:
if not target:
return target_return
task = asyncio.create_task(target_return)
try:
await task
except:
target_return = target.throw(*sys.exc_info())
else:
target_return = target.switch(task.result())
finally:
target.spawning_greenlet = None
if __name__ == "__main__":
def add_and_select_data(conn, data):
row = await_(
conn.fetchrow(
"insert into mytable(data) values ($1) returning id", data
)
)
id_ = row[0]
result = await_(
conn.fetchrow("select data from mytable where id=($1)", id_)
)
return result[0]
async def setup_database():
conn = await (
asyncpg.connect(
user="postgres", password="postgres", host="localhost", database="test",
)
)
await (conn.execute("drop table if exists mytable"))
await (
conn.execute(
"create table if not exists "
"mytable (id serial primary key, data varchar)"
)
)
await conn.close()
concurrent_requests = 40
num_recs = 1000
async def run_request():
conn = await (
asyncpg.connect(
user="postgres", password="postgres", host="localhost", database="test",
)
)
for i in range(num_recs):
random_data = "random %d" % (random.randint(1, 1000000))
retval = await greenlet_spawn(
add_and_select_data, conn, random_data
)
assert retval == random_data, "%s != %s" % (retval, random_data)
await (conn.close())
async def main():
await setup_database()
await asyncio.gather(
*[run_request() for j in range(concurrent_requests)]
)
import time
now = time.perf_counter()
asyncio.run(main())
print(
"%f"
% (
(time.perf_counter() - now),
)
)
| 24.688 | 88 | 0.568049 |
4a19a0a0411b965bd35eae068e8473b515e31a7e
| 14,775 |
py
|
Python
|
tensorflow_federated/python/core/impl/tensorflow_serialization.py
|
xueyuuu/federated
|
ad617401324c133eb838e4e0af7442a4dfa71d6e
|
[
"Apache-2.0"
] | 2 |
2019-07-09T10:04:39.000Z
|
2019-10-02T05:10:16.000Z
|
tensorflow_federated/python/core/impl/tensorflow_serialization.py
|
xueyuuu/federated
|
ad617401324c133eb838e4e0af7442a4dfa71d6e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/impl/tensorflow_serialization.py
|
xueyuuu/federated
|
ad617401324c133eb838e4e0af7442a4dfa71d6e
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for serializing TensorFlow computations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import shutil
import tempfile
import types
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import context_stack_base
from tensorflow_federated.python.core.impl import function_utils
from tensorflow_federated.python.core.impl import graph_utils
from tensorflow_federated.python.core.impl import tf_computation_context
from tensorflow_federated.python.core.impl import type_serialization
from tensorflow_federated.python.tensorflow_libs import graph_keys
def finalize_binding(binding, tensor_info_map):
"""Mutates binding by filling in actual tensor names.
Args:
binding: A `pb.Binding` or one of its submessages.
tensor_info_map: A dict mapping the placeholder `tensor_name`s found in
`binding` to final tensor names.
"""
if not binding:
if tensor_info_map:
raise ValueError('Empty binding, but non-empty tensor_info_map {}:\n' +
str(tensor_info_map))
return
if isinstance(binding, pb.TensorFlow.Binding):
sub_binding = getattr(binding, binding.WhichOneof('binding'))
finalize_binding(sub_binding, tensor_info_map)
elif isinstance(binding, pb.TensorFlow.TensorBinding):
name = binding.tensor_name
if name not in tensor_info_map:
raise ValueError(
'Did not find tensor_name {} in provided tensor_info_map with keys {}'
.format(name, list(tensor_info_map.keys())))
binding.tensor_name = tensor_info_map[name].name
elif isinstance(binding, pb.TensorFlow.NamedTupleBinding):
for sub_binding in binding.element:
finalize_binding(sub_binding, tensor_info_map)
else:
raise ValueError('Unsupported binding type {}'.format(
py_typecheck.type_string(type(binding))))
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
"""Serializes the 'target' as a TF computation with a given parameter type.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function or `tf.function`, with arguments
matching the 'parameter_type'.
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
unpack: Whether to always unpack the parameter_type. Necessary for support
of polymorphic tf2_computations.
Returns:
The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
set.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
py_typecheck.check_callable(target)
parameter_type = computation_types.to_type(parameter_type)
argspec = function_utils.get_argspec(target)
if argspec.args and parameter_type is None:
raise ValueError(
'Expected the target to declare no parameters, found {}.'.format(
repr(argspec.args)))
# In the codepath for TF V1 based serialization (tff.tf_computation),
# we get the "wrapped" function to serialize. Here, target is the
# raw function to be wrapped; however, we still need to know if
# the parameter_type should be unpacked into multiple args and kwargs
# in order to construct the TensorSpecs to be passed in the call
# to get_concrete_fn below.
unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
arg_typespecs, kwarg_typespecs, parameter_binding = (
graph_utils.get_tf_typespec_and_binding(
parameter_type, arg_names=argspec.args, unpack=unpack))
# Pseudo-global to be appended to once when target_poly below is traced.
type_and_binding_slot = []
# N.B. To serialize a tf.function or eager python code,
# the return type must be a flat list, tuple, or dict. However, the
# tff.tf_computation must be able to handle structured inputs and outputs.
# Thus, we intercept the result of calling the original target fn, introspect
# its structure to create a result_type and bindings, and then return a
# flat dict output. It is this new "unpacked" tf.function that we will
# serialize using tf.saved_model.save.
#
# TODO(b/117428091): The return type limitation is primarily a limitation of
# SignatureDefs and therefore of the signatures argument to
# tf.saved_model.save. tf.functions attached to objects and loaded back with
# tf.saved_model.load can take/return nests; this might offer a better
# approach to the one taken here.
@tf.function(autograph=False)
def target_poly(*args, **kwargs):
result = target(*args, **kwargs)
result_dict, result_type, result_binding = (
graph_utils.get_tf2_result_dict_and_binding(result))
assert not type_and_binding_slot
# A "side channel" python output.
type_and_binding_slot.append((result_type, result_binding))
return result_dict
# Triggers tracing so that type_and_binding_slot is filled.
cc_fn = target_poly.get_concrete_function(*arg_typespecs, **kwarg_typespecs)
assert len(type_and_binding_slot) == 1
result_type, result_binding = type_and_binding_slot[0]
# N.B. Note that cc_fn does *not* accept the same args and kwargs as the
# Python target_poly; instead, it must be called with **kwargs based on the
# unique names embedded in the TensorSpecs inside arg_typespecs and
# kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
# between these tensor names and the components of the (possibly nested) TFF
# input type. When cc_fn is serialized, concrete tensors for each input are
# introduced, and the call finalize_binding(parameter_binding,
# sigs['serving_default'].inputs) updates the bindings to reference these
# concrete tensors.
# Associate vars with unique names and explicitly attach to the Checkpoint:
var_dict = {
'var{:02d}'.format(i): v for i, v in enumerate(cc_fn.graph.variables)
}
saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)
try:
# TODO(b/122081673): All we really need is the meta graph def, we could
# probably just load that directly, e.g., using parse_saved_model from
# tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
# depend on that presumably non-public symbol. Perhaps TF can expose a way
# to just get the MetaGraphDef directly without saving to a tempfile? This
# looks like a small change to v2.saved_model.save().
outdir = tempfile.mkdtemp('savedmodel')
tf.saved_model.save(saveable, outdir, signatures=cc_fn)
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph) as sess:
mgd = tf.saved_model.loader.load(
sess, tags=[tf.saved_model.tag_constants.SERVING], export_dir=outdir)
finally:
shutil.rmtree(outdir)
sigs = mgd.signature_def
# TODO(b/123102455): Figure out how to support the init_op. The meta graph def
# contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
# probably won't do what we want, because it will want to read from
# Checkpoints, not just run Variable initializerse (?). The right solution may
# be to grab the target_poly.get_initialization_function(), and save a sig for
# that.
# Now, traverse the signature from the MetaGraphDef to find
# find the actual tensor names and write them into the bindings.
finalize_binding(parameter_binding, sigs['serving_default'].inputs)
finalize_binding(result_binding, sigs['serving_default'].outputs)
annotated_type = computation_types.FunctionType(parameter_type, result_type)
return pb.Computation(
type=pb.Type(
function=pb.FunctionType(
parameter=type_serialization.serialize_type(parameter_type),
result=type_serialization.serialize_type(result_type))),
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(mgd.graph_def),
parameter=parameter_binding,
result=result_binding)), annotated_type
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
"""Serializes the 'target' as a TF computation with a given parameter type.
See also `serialize_tf2_as_tf_computation` for TensorFlow 2
serialization.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function. In the future, we will add here
support for serializing the various kinds of non-eager and eager
functions, and eventually aim at full support for and compliance with TF
2.0. This function is currently required to declare either zero parameters
if `parameter_type` is `None`, or exactly one parameter if it's not
`None`. The nested structure of this parameter must correspond to the
structure of the 'parameter_type'. In the future, we may support targets
with multiple args/keyword args (to be documented in the API and
referenced from here).
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
context_stack: The context stack to use.
Returns:
A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
the instance with the `pb.TensorFlow` variant set, and the type is an
instance of `tff.Type`, potentially including Python container annotations,
for use by TensorFlow computation wrappers.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
# TODO(b/113112108): Support a greater variety of target type signatures,
# with keyword args or multiple args corresponding to elements of a tuple.
# Document all accepted forms with examples in the API, and point to there
# from here.
py_typecheck.check_type(target, types.FunctionType)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
parameter_type = computation_types.to_type(parameter_type)
argspec = inspect.getargspec(target) # pylint: disable=deprecated-method
with tf.Graph().as_default() as graph:
args = []
if parameter_type is not None:
if len(argspec.args) != 1:
raise ValueError(
'Expected the target to declare exactly one parameter, '
'found {}.'.format(repr(argspec.args)))
parameter_name = argspec.args[0]
parameter_value, parameter_binding = graph_utils.stamp_parameter_in_graph(
parameter_name, parameter_type, graph)
args.append(parameter_value)
else:
if argspec.args:
raise ValueError(
'Expected the target to declare no parameters, found {}.'.format(
repr(argspec.args)))
parameter_binding = None
context = tf_computation_context.TensorFlowComputationContext(graph)
with context_stack.install(context):
result = target(*args)
# TODO(b/122081673): This needs to change for TF 2.0. We may also
# want to allow the person creating a tff.tf_computation to specify
# a different initializer; e.g., if it is known that certain
# variables will be assigned immediately to arguments of the function,
# then it is wasteful to initialize them before this.
#
# The following is a bit of a work around: the collections below may
# contain variables more than once, hence we throw into a set. TFF needs
# to ensure all variables are initialized, but not all variables are
# always in the collections we expect. tff.learning._KerasModel tries to
# pull Keras variables (that may or may not be in GLOBAL_VARIABLES) into
# TFF_MODEL_VARIABLES for now.
all_variables = set(tf.compat.v1.global_variables() +
tf.compat.v1.local_variables() +
tf.compat.v1.get_collection(
graph_keys.GraphKeys.VARS_FOR_TFF_TO_INITIALIZE))
if all_variables:
# Use a readable but not-too-long name for the init_op.
name = 'init_op_for_' + '_'.join(
[v.name.replace(':0', '') for v in all_variables])
if len(name) > 50:
name = 'init_op_for_{}_variables'.format(len(all_variables))
with tf.control_dependencies(context.init_ops):
# Before running the main new init op, run any initializers for sub-
# computations from context.init_ops. Variables from import_graph_def
# will not make it into the global collections, and so will not be
# initialized without this code path.
init_op_name = tf.compat.v1.initializers.variables(
all_variables, name=name).name
elif context.init_ops:
init_op_name = tf.group(
*context.init_ops, name='subcomputation_init_ops').name
else:
init_op_name = None
result_type, result_binding = graph_utils.capture_result_from_graph(
result, graph)
annotated_type = computation_types.FunctionType(parameter_type, result_type)
return pb.Computation(
type=pb.Type(
function=pb.FunctionType(
parameter=type_serialization.serialize_type(parameter_type),
result=type_serialization.serialize_type(result_type))),
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding,
initialize_op=init_op_name)), annotated_type
| 46.171875 | 80 | 0.733401 |
4a19a290ec506c74b55a09daec35b2e88ca3f96b
| 2,078 |
py
|
Python
|
integration/experiment/monitor/run_monitor_minife.py
|
asmaalrawi/geopm
|
e93548dfdd693a17c81163787ba467891937356d
|
[
"BSD-3-Clause"
] | null | null | null |
integration/experiment/monitor/run_monitor_minife.py
|
asmaalrawi/geopm
|
e93548dfdd693a17c81163787ba467891937356d
|
[
"BSD-3-Clause"
] | null | null | null |
integration/experiment/monitor/run_monitor_minife.py
|
asmaalrawi/geopm
|
e93548dfdd693a17c81163787ba467891937356d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
Run MiniFE with the monitor agent.
'''
import argparse
from experiment.monitor import monitor
from apps.minife import minife
if __name__ == '__main__':
parser = argparse.ArgumentParser()
monitor.setup_run_args(parser)
args, extra_args = parser.parse_known_args()
app_conf = minife.MinifeAppConf(args.node_count)
monitor.launch(app_conf=app_conf, args=args,
experiment_cli_args=extra_args)
| 40.745098 | 74 | 0.744466 |
4a19a482adc8aeb451c7eed504dd3e88036bea31
| 4,891 |
py
|
Python
|
docs/conf.py
|
fastscape-lem/fastscape-litho
|
0ce9c7c056b197a2f558343597af7c7375ff6f20
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
fastscape-lem/fastscape-litho
|
0ce9c7c056b197a2f558343597af7c7375ff6f20
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
fastscape-lem/fastscape-litho
|
0ce9c7c056b197a2f558343597af7c7375ff6f20
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# fastscape_litho documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import fastscape_litho
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fastscape_litho'
copyright = "2021, Boris Gailleton"
author = "Boris Gailleton"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = fastscape_litho.__version__
# The full version, including alpha/beta/rc tags.
release = fastscape_litho.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fastscape_lithodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fastscape_litho.tex',
'fastscape_litho Documentation',
'Boris Gailleton', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fastscape_litho',
'fastscape_litho Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fastscape_litho',
'fastscape_litho Documentation',
author,
'fastscape_litho',
'One line description of project.',
'Miscellaneous'),
]
| 30.006135 | 77 | 0.69127 |
4a19a4e77607c50196e4c29a1a054a73dcdc20a3
| 8,072 |
py
|
Python
|
packages/Python/lldbsuite/test/functionalities/plugins/python_os_plugin/TestPythonOSPlugin.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 2 |
2019-05-24T14:10:24.000Z
|
2019-05-24T14:27:38.000Z
|
packages/Python/lldbsuite/test/functionalities/plugins/python_os_plugin/TestPythonOSPlugin.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | null | null | null |
packages/Python/lldbsuite/test/functionalities/plugins/python_os_plugin/TestPythonOSPlugin.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | null | null | null |
"""
Test that the Python operating system plugin works correctly
"""
from __future__ import print_function
import os
import time
import re
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class PluginPythonOSPlugin(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_python_os_plugin(self):
"""Test that the Python operating system plugin works correctly"""
self.build()
self.run_python_os_funcionality()
def run_python_os_step(self):
"""Test that the Python operating system plugin works correctly when single stepping a virtual thread"""
self.build()
self.run_python_os_step()
def verify_os_thread_registers(self, thread):
frame = thread.GetFrameAtIndex(0)
registers = frame.GetRegisters().GetValueAtIndex(0)
reg_value = thread.GetThreadID() + 1
for reg in registers:
self.assertTrue(
reg.GetValueAsUnsigned() == reg_value,
"Verify the registers contains the correct value")
reg_value = reg_value + 1
def run_python_os_funcionality(self):
"""Test that the Python operating system plugin works correctly"""
# Set debugger into synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
python_os_plugin_path = os.path.join(self.getSourceDir(),
"operating_system.py")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set breakpoints inside and outside methods that take pointers to the
# containing struct.
lldbutil.run_break_set_by_source_regexp(self, "// Set breakpoint here")
# Register our shared libraries for remote targets so they get
# automatically uploaded
arguments = None
environment = None
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
arguments, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Make sure there are no OS plug-in created thread when we first stop
# at our breakpoint in main
thread = process.GetThreadByID(0x111111111)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x111111111 before we load the python OS plug-in")
thread = process.GetThreadByID(0x222222222)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x222222222 before we load the python OS plug-in")
thread = process.GetThreadByID(0x333333333)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x333333333 before we load the python OS plug-in")
# Now load the python OS plug-in which should update the thread list and we should have
# OS plug-in created threads with the IDs: 0x111111111, 0x222222222,
# 0x333333333
command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
self.dbg.HandleCommand(command)
# Verify our OS plug-in threads showed up
thread = process.GetThreadByID(0x111111111)
self.assertTrue(
thread.IsValid(),
"Make sure there is a thread 0x111111111 after we load the python OS plug-in")
self.verify_os_thread_registers(thread)
thread = process.GetThreadByID(0x222222222)
self.assertTrue(
thread.IsValid(),
"Make sure there is a thread 0x222222222 after we load the python OS plug-in")
self.verify_os_thread_registers(thread)
thread = process.GetThreadByID(0x333333333)
self.assertTrue(
thread.IsValid(),
"Make sure there is a thread 0x333333333 after we load the python OS plug-in")
self.verify_os_thread_registers(thread)
# Now clear the OS plug-in path to make the OS plug-in created threads
# dissappear
self.dbg.HandleCommand(
"settings clear target.process.python-os-plugin-path")
# Verify the threads are gone after unloading the python OS plug-in
thread = process.GetThreadByID(0x111111111)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x111111111 after we unload the python OS plug-in")
thread = process.GetThreadByID(0x222222222)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x222222222 after we unload the python OS plug-in")
thread = process.GetThreadByID(0x333333333)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x333333333 after we unload the python OS plug-in")
def run_python_os_step(self):
"""Test that the Python operating system plugin works correctly and allows single stepping of a virtual thread that is backed by a real thread"""
# Set debugger into synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
python_os_plugin_path = os.path.join(self.getSourceDir(),
"operating_system2.py")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set breakpoints inside and outside methods that take pointers to the
# containing struct.
lldbutil.run_break_set_by_source_regexp(self, "// Set breakpoint here")
# Register our shared libraries for remote targets so they get
# automatically uploaded
arguments = None
environment = None
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
arguments, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Make sure there are no OS plug-in created thread when we first stop
# at our breakpoint in main
thread = process.GetThreadByID(0x111111111)
self.assertFalse(
thread.IsValid(),
"Make sure there is no thread 0x111111111 before we load the python OS plug-in")
# Now load the python OS plug-in which should update the thread list and we should have
# OS plug-in created threads with the IDs: 0x111111111, 0x222222222,
# 0x333333333
command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
self.dbg.HandleCommand(command)
# Verify our OS plug-in threads showed up
thread = process.GetThreadByID(0x111111111)
self.assertTrue(
thread.IsValid(),
"Make sure there is a thread 0x111111111 after we load the python OS plug-in")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
frame.IsValid(),
"Make sure we get a frame from thread 0x111111111")
line_entry = frame.GetLineEntry()
self.assertTrue(
line_entry.GetFileSpec().GetFilename() == 'main.c',
"Make sure we stopped on line 5 in main.c")
self.assertTrue(
line_entry.GetLine() == 5,
"Make sure we stopped on line 5 in main.c")
# Now single step thread 0x111111111 and make sure it does what we need
# it to
thread.StepOver()
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
frame.IsValid(),
"Make sure we get a frame from thread 0x111111111")
line_entry = frame.GetLineEntry()
self.assertTrue(
line_entry.GetFileSpec().GetFilename() == 'main.c',
"Make sure we stepped from line 5 to line 6 in main.c")
self.assertTrue(line_entry.GetLine() == 6,
"Make sure we stepped from line 5 to line 6 in main.c")
| 40.767677 | 153 | 0.647547 |
4a19a50fcd79f4841e1f12a4054599180e3be67c
| 11,621 |
py
|
Python
|
src/robusta/runner/config_loader.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
src/robusta/runner/config_loader.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
src/robusta/runner/config_loader.py
|
kandahk/robusta
|
61a2001cb1c4e90e8a74b810463ec99e6cb80787
|
[
"MIT"
] | null | null | null |
import importlib.util
import logging
import os
import pkgutil
import subprocess
import sys
import threading
import yaml
from typing import Optional, List, Dict
from inspect import getmembers
from ..cli.utils import get_package_name
from ..integrations.receiver import ActionRequestReceiver
from ..integrations.scheduled.trigger import ScheduledTriggerEvent
from ..core.playbooks.playbooks_event_handler import PlaybooksEventHandler
from ..core.model.runner_config import RunnerConfig, PlaybookRepo
from ..core.playbooks.actions_registry import ActionsRegistry, Action
from ..core.model.env_vars import (
INTERNAL_PLAYBOOKS_ROOT,
PLAYBOOKS_CONFIG_FILE_PATH,
PLAYBOOKS_ROOT, DEFAULT_PLAYBOOKS_ROOT, CUSTOM_PLAYBOOKS_ROOT,
)
from ..integrations.git.git_repo import (
GitRepoManager,
GitRepo,
GIT_URL_PREFIX,
LOCAL_PATH_URL_PREFIX,
)
from ..utils.file_system_watcher import FileSystemWatcher
from ..model.playbook_definition import PlaybookDefinition
from ..model.config import (
Registry,
SinksRegistry,
PlaybooksRegistryImpl,
PlaybooksRegistry,
)
from ..integrations.scheduled.playbook_scheduler_manager_impl import (
PlaybooksSchedulerManagerImpl,
)
class ConfigLoader:
# the structure on disk is:
# root_playbook_path/
# |- playbook_dir1
# |--- playbook1.py
# |--- playbook2.py
# |--- requirements.txt
# |- playbook_dir2
# |--- ...
def __init__(
self,
registry: Registry,
event_handler: PlaybooksEventHandler,
):
self.config_file_path = PLAYBOOKS_CONFIG_FILE_PATH
self.registry = registry
self.event_handler = event_handler
self.root_playbook_path = PLAYBOOKS_ROOT
self.reload_lock = threading.RLock()
self.watcher = FileSystemWatcher(
self.root_playbook_path, self.__reload_playbook_packages
)
self.conf_watcher = FileSystemWatcher(
self.config_file_path, self.__reload_playbook_packages
)
self.__reload_playbook_packages("initialization")
def close(self):
self.watcher.stop_watcher()
self.conf_watcher.stop_watcher()
def reload(self, description: str):
self.__reload_playbook_packages(description)
def __reload_scheduler(self, playbooks_registry: PlaybooksRegistry):
scheduler = self.registry.get_scheduler()
if not scheduler: # no scheduler yet, initialization
scheduler = PlaybooksSchedulerManagerImpl(event_handler=self.event_handler)
self.registry.set_scheduler(scheduler)
scheduler.update(playbooks_registry.get_playbooks(ScheduledTriggerEvent()))
def __reload_receiver(self):
receiver = self.registry.get_receiver()
if not receiver: # no existing receiver, just start one
self.registry.set_receiver(ActionRequestReceiver(self.event_handler))
return
current_account_id = self.event_handler.get_global_config().get("account_id")
current_cluster_name = self.event_handler.get_global_config().get(
"cluster_name"
)
if (
current_account_id != receiver.account_id
or current_cluster_name != receiver.cluster_name
):
# need to re-create the receiver
receiver.stop()
self.registry.set_receiver(ActionRequestReceiver(self.event_handler))
@classmethod
def __get_package_name(cls, local_path) -> str:
package_name = get_package_name(local_path)
if not package_name:
raise Exception(f"Illegal playbooks package {local_path}. Package name not found")
return package_name
def __load_playbooks_repos(
self,
actions_registry: ActionsRegistry,
playbooks_repos: Dict[str, PlaybookRepo],
):
playbook_packages = []
for playbook_package, playbooks_repo in playbooks_repos.items():
try:
if (
playbooks_repo.pip_install
): # skip playbooks that are already in site-packages
if playbooks_repo.url.startswith(GIT_URL_PREFIX):
repo = GitRepo(
playbooks_repo.url,
playbooks_repo.key.get_secret_value(),
)
local_path = repo.repo_local_path
elif playbooks_repo.url.startswith(LOCAL_PATH_URL_PREFIX):
local_path = playbooks_repo.url.replace(
LOCAL_PATH_URL_PREFIX, ""
)
else:
raise Exception(
f"Illegal playbook repo url {playbooks_repo.url}. "
f"Must start with '{GIT_URL_PREFIX}' or '{LOCAL_PATH_URL_PREFIX}'"
)
if not os.path.exists(
local_path
): # in case the repo url was defined before it was actually loaded
logging.error(
f"Playbooks local path {local_path} does not exist. Skipping"
)
continue
# Adding to pip the playbooks repo from local_path
subprocess.check_call(
[sys.executable, "-m", "pip", "install", local_path]
)
playbook_package = self.__get_package_name(local_path=local_path)
playbook_packages.append(playbook_package)
except Exception as e:
logging.error(f"Failed to add playbooks reop {playbook_package} {e}")
for package_name in playbook_packages:
self.__import_playbooks_package(actions_registry, package_name)
@classmethod
def __import_playbooks_package(
cls, actions_registry: ActionsRegistry, package_name: str
):
logging.info(f"Importing actions package {package_name}")
pkg = importlib.import_module(package_name)
playbooks_modules = [
name for _, name, _ in pkgutil.walk_packages(path=pkg.__path__)
]
for playbooks_module in playbooks_modules:
try:
module_name = ".".join([package_name, playbooks_module])
logging.info(f"importing actions from {module_name}")
m = importlib.import_module(module_name)
playbook_actions = getmembers(m, Action.is_action)
for (action_name, action_func) in playbook_actions:
actions_registry.add_action(action_func)
except Exception as e:
logging.error(f"error loading module {playbooks_module}. exception={e}")
def __reload_playbook_packages(self, change_name):
logging.info(f"Reloading playbook packages due to change on {change_name}")
with self.reload_lock:
try:
runner_config = self.__load_runner_config(self.config_file_path)
if runner_config is None:
return
action_registry = ActionsRegistry()
# reordering playbooks repos, so that the internal and default playbooks will be loaded first
# It allows to override these, with playbooks loaded afterwards
playbook_repos: Dict[str, PlaybookRepo] = {}
playbook_repos[
"robusta.core.playbooks.internal"
] = PlaybookRepo(url=INTERNAL_PLAYBOOKS_ROOT, pip_install=False)
# order matters! Loading the default first, allows overriding it if adding package with the same name
# since python 3.7, iteration order is identical to insertion order, if dict didn't change
# default playbooks
playbook_repos[
self.__get_package_name(DEFAULT_PLAYBOOKS_ROOT)
] = PlaybookRepo(url=f"file://{DEFAULT_PLAYBOOKS_ROOT}")
for url, repo in runner_config.playbook_repos.items():
playbook_repos[url] = repo
# saving the ordered playbooks repo into runner config
runner_config.playbook_repos = playbook_repos
# custom playbooks
if os.path.exists(CUSTOM_PLAYBOOKS_ROOT):
for custom_playbooks_location in os.listdir(CUSTOM_PLAYBOOKS_ROOT):
location = os.path.join(CUSTOM_PLAYBOOKS_ROOT, custom_playbooks_location)
runner_config.playbook_repos[
self.__get_package_name(location)
] = PlaybookRepo(url=f"file://{location}")
else:
logging.info(f"No custom playbooks defined at {CUSTOM_PLAYBOOKS_ROOT}")
self.__load_playbooks_repos(
action_registry, runner_config.playbook_repos
)
(sinks_registry, playbooks_registry) = self.__prepare_runtime_config(
runner_config, self.registry.get_sinks(), action_registry
)
# clear git repos, so it would be re-initialized
GitRepoManager.clear_git_repos()
self.__reload_scheduler(playbooks_registry)
self.registry.set_actions(action_registry)
self.registry.set_playbooks(playbooks_registry)
self.registry.set_sinks(sinks_registry)
self.__reload_receiver()
except Exception as e:
logging.exception(
f"unknown error reloading playbooks. will try again when they next change. exception={e}"
)
@classmethod
def __prepare_runtime_config(
cls,
runner_config: RunnerConfig,
sinks_registry: SinksRegistry,
actions_registry: ActionsRegistry,
) -> (SinksRegistry, PlaybooksRegistry):
existing_sinks = sinks_registry.get_all() if sinks_registry else {}
new_sinks = SinksRegistry.construct_new_sinks(
runner_config.sinks_config, existing_sinks, runner_config.global_config
)
sinks_registry = SinksRegistry(new_sinks)
# TODO we will replace it with a more generic mechanism, as part of the triggers separation task
# First, we load the internal playbooks, then add the user activated playbooks
# Order matters. Internal playbooks, should be added first, and run first
active_playbooks = [
PlaybookDefinition(
triggers=[{"on_kubernetes_any_resource_all_changes": {}}],
actions=[{"cluster_discovery_updates": {}}],
)
]
active_playbooks.extend(runner_config.active_playbooks)
playbooks_registry = PlaybooksRegistryImpl(
active_playbooks,
actions_registry,
runner_config.global_config,
sinks_registry.default_sinks,
)
return sinks_registry, playbooks_registry
@classmethod
def __load_runner_config(cls, config_file_path) -> Optional[RunnerConfig]:
if not os.path.exists(config_file_path):
logging.warning(
f"config file not found at {config_file_path} - not configuring any playbooks."
)
return None
logging.info(f"loading config {config_file_path}")
with open(config_file_path) as file:
yaml_content = yaml.safe_load(file)
return RunnerConfig(**yaml_content)
| 41.063604 | 117 | 0.627313 |
4a19a674871ad1c4f7c20a5efda809c835f8157b
| 2,581 |
py
|
Python
|
events/worldcon75/migrations/0001_initial.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
events/worldcon75/migrations/0001_initial.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
events/worldcon75/migrations/0001_initial.py
|
Siikakala/kompassi
|
14cdcd966ab689d762cc885e28b6d15465c216f0
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-17 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import labour.models.signup_extras
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0023_auto_20160704_2155'),
('enrollment', '0003_auto_20170417_2259'),
]
operations = [
migrations.CreateModel(
name='SignupExtra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('special_diet_other', models.TextField(blank=True, help_text='Jos noudatat erikoisruokavaliota, jota ei ole yllä olevassa listassa, ilmoita se tässä. Tapahtuman järjestäjä pyrkii ottamaan erikoisruokavaliot huomioon, mutta kaikkia erikoisruokavalioita ei välttämättä pystytä järjestämään.', verbose_name='Muu erikoisruokavalio')),
('shift_wishes', models.TextField(blank=True, help_text='Miten olet käytettävissä työvuoroihin tapahtuman aikana? Jos tiedät, ettet pääse paikalle johonkin tiettyyn aikaan tai haluat esimerkiksi osallistua johonkin tiettyyn ohjelmanumeroon, mainitse siitä tässä.', verbose_name='Työvuorotoiveet')),
('prior_experience', models.TextField(blank=True, help_text='Kerro aikaisemmasta työkokemuksestasi tapahtuman työvoimana tai muusta kokemuksesta, josta koet olevan hyötyä haetussa/haetuissa työtehtävissä.', verbose_name='Työkokemus')),
('free_text', models.TextField(blank=True, help_text='Tässä kentässä voit kertoa jotain minkä koet tarpeelliseksi, jota ei ole vielä mainittu.', verbose_name='Lisätietoja')),
('is_attending_member', models.BooleanField(verbose_name='Olen Worldcon 75:n <em>attending member</em>')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='worldcon75_signup_extras', to='core.Event')),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='worldcon75_signup_extra', to='core.Person')),
('special_diet', models.ManyToManyField(blank=True, related_name='worldcon75_signupextra', to='enrollment.SpecialDiet', verbose_name='Erikoisruokavalio')),
],
options={
'abstract': False,
},
bases=(labour.models.signup_extras.SignupExtraMixin, models.Model),
),
]
| 64.525 | 347 | 0.707865 |
4a19a73d2fe70f6252960102e8b65f9c9d610e8a
| 4,680 |
py
|
Python
|
youtube_dl/extractor/cbs.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 3,001 |
2020-10-24T05:24:18.000Z
|
2022-03-31T06:45:32.000Z
|
youtube_dl/extractor/cbs.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 274 |
2020-10-24T04:57:21.000Z
|
2022-03-22T01:34:56.000Z
|
youtube_dl/extractor/cbs.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 541 |
2020-10-24T03:32:09.000Z
|
2022-01-12T23:49:30.000Z
|
from __future__ import unicode_literals
from .theplatform import ThePlatformFeedIE
from ..utils import (
ExtractorError,
int_or_none,
find_xpath_attr,
xpath_element,
xpath_text,
update_url_query,
)
class CBSBaseIE(ThePlatformFeedIE):
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
subtitles = {}
for k, ext in [('sMPTE-TTCCURL', 'tt'), ('ClosedCaptionURL', 'ttml'), ('webVTTCaptionURL', 'vtt')]:
cc_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', k)
if cc_e is not None:
cc_url = cc_e.get('value')
if cc_url:
subtitles.setdefault(subtitles_lang, []).append({
'ext': ext,
'url': cc_url,
})
return subtitles
class CBSIE(CBSBaseIE):
_VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)'
_TESTS = [{
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
'info_dict': {
'id': '_u7W953k6la293J7EPTd9oHkSPs6Xn6_',
'ext': 'mp4',
'title': 'Connect Chat feat. Garth Brooks',
'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
'duration': 1495,
'timestamp': 1385585425,
'upload_date': '20131127',
'uploader': 'CBSI-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'_skip': 'Blocked outside the US',
}, {
'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
'only_matching': True,
}, {
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
'only_matching': True,
}]
def _extract_video_info(self, content_id, site='cbs', mpx_acc=2198311517):
items_data = self._download_xml(
'http://can.cbs.com/thunder/player/videoPlayerService.php',
content_id, query={'partner': site, 'contentId': content_id})
video_data = xpath_element(items_data, './/item')
title = xpath_text(video_data, 'videoTitle', 'title', True)
tp_path = 'dJ5BDC/media/guid/%d/%s' % (mpx_acc, content_id)
tp_release_url = 'http://link.theplatform.com/s/' + tp_path
asset_types = []
subtitles = {}
formats = []
last_e = None
for item in items_data.findall('.//item'):
asset_type = xpath_text(item, 'assetType')
if not asset_type or asset_type in asset_types or 'HLS_FPS' in asset_type or 'DASH_CENC' in asset_type:
continue
asset_types.append(asset_type)
query = {
'mbr': 'true',
'assetTypes': asset_type,
}
if asset_type.startswith('HLS') or asset_type in ('OnceURL', 'StreamPack'):
query['formats'] = 'MPEG4,M3U'
elif asset_type in ('RTMP', 'WIFI', '3G'):
query['formats'] = 'MPEG4,FLV'
try:
tp_formats, tp_subtitles = self._extract_theplatform_smil(
update_url_query(tp_release_url, query), content_id,
'Downloading %s SMIL data' % asset_type)
except ExtractorError as e:
last_e = e
continue
formats.extend(tp_formats)
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
if last_e and not formats:
raise last_e
self._sort_formats(formats)
info = self._extract_theplatform_metadata(tp_path, content_id)
info.update({
'id': content_id,
'title': title,
'series': xpath_text(video_data, 'seriesTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'duration': int_or_none(xpath_text(video_data, 'videoLength'), 1000),
'thumbnail': xpath_text(video_data, 'previewImageURL'),
'formats': formats,
'subtitles': subtitles,
})
return info
def _real_extract(self, url):
content_id = self._match_id(url)
return self._extract_video_info(content_id)
| 41.415929 | 224 | 0.583761 |
4a19a784a00b3106bb124ae94b6f4f81605b9cc4
| 194 |
py
|
Python
|
tests/processinst/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
tests/processinst/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
tests/processinst/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_all_contains_only_valid_names():
import pycamunda.processinst
for name in pycamunda.processinst.__all__:
getattr(pycamunda.processinst, name)
| 21.555556 | 46 | 0.721649 |
4a19aa613744ba26d2c394f47c21ca765a63befa
| 9,504 |
py
|
Python
|
tissue_specific_analysis/SampleSpecificIsoforms.py
|
Xinglab/espresso
|
a68b29178079be320f74f62a5cda5bbfa1934edb
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tissue_specific_analysis/SampleSpecificIsoforms.py
|
Xinglab/espresso
|
a68b29178079be320f74f62a5cda5bbfa1934edb
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tissue_specific_analysis/SampleSpecificIsoforms.py
|
Xinglab/espresso
|
a68b29178079be320f74f62a5cda5bbfa1934edb
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#!/usr/bin/env python3
'''
This is a script to identify sample-specific isoforms based on
an isoform read count matrix generated by ESPRESSO (rows are
detected isoforms and columns are samples)
'''
# Load required libraries
import argparse
import numpy as np
import pandas as pd
import concurrent.futures as cf
from scipy.stats import binom, chi2, chi2_contingency
from statsmodels.stats.multitest import multipletests
def ParseMatrix(infile):
# Read infile as a pandas dataframe
matrix = pd.read_csv(infile, sep='\t', header=0)
# Drop all entries with NA gene ID (corresponds to unknown genes)
matrix = matrix[matrix['gene_ID'].notna()]
# Remove any entries where assignment of gene-isoform assignment is not one-to-one
# The gene_ID column will harbor at least one comma
matrix = matrix[~matrix['gene_ID'].str.contains(',')]
# Sort rows by gene ID (and reset row indices); also round all counts to the nearest integer
matrix = matrix.sort_values(by = ['gene_ID', 'transcript_ID']).reset_index(drop=True).round()
# Extract set of gene IDs
genelist = matrix['gene_ID'].unique().tolist()
return matrix, genelist
def FirstPass(chunk, matrix, cutoff):
# Initialize an output dataframe for FirstPass
colNames = ['gene_ID', 'p_value']
outputDF = pd.DataFrame(columns = colNames)
# Iterate over gene IDs in given chunk
for geneID in chunk:
# Extract dataframe associated with geneID
rcMatrix = matrix[matrix['gene_ID'] == geneID]
# Drop isoforms where total count is 0.0
rcMatrix = rcMatrix[rcMatrix.iloc[:,3:].sum(axis=1) > 0.0]
nrow = rcMatrix.shape[0]
# Only work with genes with more than 1 isoform (nrow > 1)
if nrow > 1:
# Remove columns where sum of total read counts is 0
geneCounts = rcMatrix.iloc[:,3:].sum(axis=0)
# Identify tissues where geneCounts == 0 and drop them from rcMatrix
rcMatrix = rcMatrix.drop(geneCounts[geneCounts == 0.0].index.tolist(), axis=1)
ncol = rcMatrix.shape[1]
# If only one tissue remains, ncol = 4 (only proceed if ncol > 4)
if ncol > 4:
# Determine total read count threshold using Cohen's w formula
# Use significance level of cutoff% (and assume effect size of 0.5)
threshold = 4*chi2.ppf(1-cutoff,df=(nrow-1)*(ncol-4))
totalRC = rcMatrix.iloc[:,3:].to_numpy().sum()
if totalRC > threshold:
# Run a chi-square test of homogeneity
pval = chi2_contingency(rcMatrix.iloc[:,3:])[1]
# Update outputDF
outputDF = outputDF.append({'gene_ID': geneID, 'p_value': pval}, ignore_index = True)
return outputDF
def SecondPass(chunk, matrix):
# Initialize an output dataframe for SecondPass
colNames = ['gene_ID', 'transcript_id', 'sample', 'raw_read_count', 'isoform_proportion_global', 'isoform_proportion_sample', 'p_value']
outputDF = pd.DataFrame(columns = colNames)
# Iterate over gene IDs in given chunk
for geneID in chunk:
# Extract dataframe associated with geneID
# All of these genes should have at least one isoform, at least two tissues with nonzero gene-level read counts,
# and a sufficiently large number of total reads (do not need to run filters again)
rcMatrix = matrix[matrix['gene_ID'] == geneID]
# Drop isoforms where total count is 0.0
rcMatrix = rcMatrix[rcMatrix.iloc[:,3:].sum(axis=1) > 0.0]
# Remove columns where sum of total read counts is 0
geneCounts = rcMatrix.iloc[:,3:].sum(axis=0)
# Identify tissues where geneCounts == 0 and drop them from rcMatrix
rcMatrix = rcMatrix.drop(geneCounts[geneCounts == 0.0].index.tolist(), axis=1)
nrow, ncol = rcMatrix.shape
# Retrieve global isoform proportions from rcMatrix
rowCounts = rcMatrix.iloc[:,3:].sum(axis=1)
expected = (rowCounts/rowCounts.sum()).to_list()
# Re-compute geneCounts (after dropping out zero-count columns)
geneCounts = rcMatrix.iloc[:,3:].sum(axis=0)
# One-tailed binomial test p-values
pvalMatrix = np.array([binom.sf(rcMatrix.iloc[i,3:].to_list(), geneCounts.to_list(), expected[i]) +
binom.pmf(rcMatrix.iloc[i,3:].to_list(), geneCounts.to_list(), expected[i]) for i in range(nrow)])
## Build output matrix
# Flatten pvalMatrix and rcMatrix row-wise
pvalArr, rcArr = pvalMatrix.flatten(), rcMatrix.iloc[:,3:].to_numpy().flatten()
# Generate a matrix of isoform proportions (flattened)
propArr = (rcMatrix.iloc[:,3:]/geneCounts).to_numpy().flatten()
# Repeat gene ID, transcript ID, tissue, and expected isoform proportions
geneIDArr, transcriptIDArr = np.repeat(geneID, nrow*(ncol-3)), np.repeat(rcMatrix['transcript_ID'].to_numpy(),ncol-3)
tissueArr, expectedArr = np.tile(rcMatrix.columns[3:],nrow), np.repeat(expected, ncol-3)
resultDF = pd.DataFrame(list(zip(geneIDArr, transcriptIDArr, tissueArr, rcArr, expectedArr, propArr, pvalArr)), columns = colNames)
outputDF = outputDF.append(resultDF, ignore_index = True)
return outputDF
def SampleSpecificIsoforms(infile, threads, cutoff, outfile):
# Parse isoform read count matrix and extract list of genes
print('Parsing isoform read count matrix...', flush=True)
matrix, genelist = ParseMatrix(infile)
# First pass test: Run a chi-square test of homogeneity on the isoform read count matrix
# and return a dataframe with each processed gene and corresponding p-value
# This test will identify genes in which there exists at least one tissue whose isoform
# proportions deviate from the global expected proportion (averaged across all tissues)
print('Running first-pass test...', flush=True)
# Split genelist into chunks for multiprocessing
genechunks = [x.tolist() for x in np.array_split(genelist, threads)]
with cf.ThreadPoolExecutor(max_workers=threads) as executor:
processes = [executor.submit(FirstPass, chunk, matrix, cutoff) for chunk in genechunks]
# Merge together individual dataframes generated from running FirstPass on chunks of gene IDs
# df1 is a dataframe with two columns: (i) gene_ID, (ii) p-value
results1 = [p.result() for p in processes]
df1 = pd.concat(results1, ignore_index=True)
# Perform FDR-adjustment of p-values in df1 and keep genes with FDR < cutoff%
pAdj1 = multipletests(df1['p_value'].tolist(), method='fdr_bh', is_sorted=False, returnsorted=False)[1].tolist()
df1['p_adj'] = pAdj1
# Keep gene IDs for which FDR < cutoff% from FirstPass
filteredGenes = df1[df1['p_adj'] < cutoff]['gene_ID'].to_list()
# Second pass test: Run a one-tailed binomial test on each entry of the isoform read
# count matrix. This test will identify isoform-tissue pairs in which the isoform
# proportion in the given tissue is significantly higher than expected
# Expected proportions are taken as the global isoform proportions across all samples
print('Running second-pass test...', flush=True)
# Split filteredGenes into chunks for multiprocessing
filterChunks = [x.tolist() for x in np.array_split(filteredGenes, threads)]
with cf.ThreadPoolExecutor(max_workers=threads) as executor:
processes = [executor.submit(SecondPass, chunk, matrix) for chunk in filterChunks]
# Merge together individual dataframes generated from running SecondPass on chunks of gene IDs
results2 = [p.result() for p in processes]
outDF = pd.concat(results2, ignore_index=True)
# Perform FDR-adjustment of p-values in outDF and keep isoform-tissue pairs with FDR < cutoff%
pAdj = multipletests(outDF['p_value'].tolist(), method='fdr_bh', is_sorted=False, returnsorted=False)[1].tolist()
outDF['p_adj'] = pAdj
# Drop entries where pAdj < cutoff
outDF = outDF[outDF['p_adj'] < cutoff]
# Sort dataframe by adjusted p-value
outDF = outDF.sort_values(by = 'p_adj')
# Print outDF to outfile
outDF.to_csv(outfile, sep='\t', index=False)
def main():
moduleSummary = 'This is a script to call sample-specific isoforms from an isoform read count matrix generated by ESPRESSO'
parser = argparse.ArgumentParser(description=moduleSummary)
# Add arguments
parser.add_argument('-i', metavar='/path/to/read/count/matrix', required=True,
help='path to isoform read count matrix generated by ESPRESSO')
parser.add_argument('-t', metavar='###', required=True,
help='number of worker threads')
parser.add_argument('-c', metavar='###', required=True,
help='FDR threshold (between 0 and 1)')
parser.add_argument('-o', metavar='/path/to/output/file', required=True,
help='path to output file')
# Parse command-line arguments
args = parser.parse_args()
infile, threads, cutoff, outfile = args.i, int(args.t), float(args.c), args.o
print('Isoform read count matrix: ' + infile, flush=True)
print('Number of threads: ' + str(threads), flush=True)
print('FDR cutoff: ' + str(cutoff), flush=True)
print('Output file: ' + outfile, flush=True)
# Run SampleSpecificIsoforms
SampleSpecificIsoforms(infile, threads, cutoff, outfile)
if __name__ == '__main__':
main()
| 45.042654 | 140 | 0.680029 |
4a19aac285fb925bdd3e2249d82f93c66d103c33
| 3,487 |
py
|
Python
|
neural-network.py
|
benediktaugenstein/simple-neural-network
|
af58617e7f4fbbd222c60daf678981bf369dd838
|
[
"Apache-2.0"
] | null | null | null |
neural-network.py
|
benediktaugenstein/simple-neural-network
|
af58617e7f4fbbd222c60daf678981bf369dd838
|
[
"Apache-2.0"
] | null | null | null |
neural-network.py
|
benediktaugenstein/simple-neural-network
|
af58617e7f4fbbd222c60daf678981bf369dd838
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
# Can be set to make sure the random weights and biases generated are going to be the same for each time the code runs
np.random.seed(0)
# suppress scientific notation
np.set_printoptions(suppress=True)
# input data
input = np.array([[0.5, 0.3, 10.0, 0.2],
[0.7, 0.6, 0.25, 0.5],
[0.6, 0.5, 12.0, 0.3],
[0.5, 0.1, 0.5, 0.12],
[0.25, 0.8, 0.6, 0.7]])
# target output
target = np.array([[1,0,1,0,0]]).T
# activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# sigmoid derivative
def sigmoid_derivative(x):
return x * (1 - x)
class Neural_Network:
def __init__(self, n_inputs, n_neurons):
# random weights created, biases = 0
self.weights = np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
# multiplies input matrix with weights and adds biases
self.output = sigmoid(np.dot(inputs, self.weights)+self.biases)
def backpropagation_hidden_layer(self):
# backpropagation for hidden layer
# application of the chain rule to find the partial derivative of the loss function with respect to corresponding weight
# slope of loss function determines change in weights and biases
d_weights = (-1) * np.dot(input.T, (np.dot(2 * (layer2.output - target) * sigmoid_derivative(self.output), layer2.weights) * sigmoid_derivative(self.output)))
d_biases = (-1) * np.sum((self.output - target) * sigmoid_derivative(self.output))
self.weights += d_weights
self.biases += d_biases
def backpropagation_output_layer(self):
# backpropagation for output layer
d_weights = (-1) * np.dot(layer1.output.T, ((self.output - target) * sigmoid_derivative(self.output)))
d_biases = (-1) * np.sum((self.output - target) * sigmoid_derivative(self.output))
self.weights += d_weights
self.biases += d_biases
# Neural_Network(number of inputs, number of neurons);
# The number of inputs for layer2 to has to be the same as the number of neurons in layer1
layer1 = Neural_Network(4, 5)
layer2 = Neural_Network(5, 1)
# calls forward function with the original input as input
layer1.forward(input)
# calls forward function with the output of layer1 as input
layer2.forward(layer1.output)
# first try with random weights and biases = 0
print("first try")
print(layer2.output)
# backpropagation to fit weights and biases
layer2.backpropagation_output_layer()
layer1.backpropagation_hidden_layer()
# inputs forwarded with new weights and biases
layer1.forward(input)
layer2.forward(layer1.output)
# second try with updated weights and biases from one backpropagation
print("second try")
print(layer2.output)
# repeat the learning process
for i in range(5000):
layer2.backpropagation_output_layer()
layer1.backpropagation_hidden_layer()
layer1.forward(input)
layer2.forward(layer1.output)
print(i)
print(layer2.output)
# PREDICTIONS
# input to be checked in the end
input = np.array([[0.3, 0.4, 11.0, 0.3],
[0.5, 0.35, 0.2, 0.7],
[0.45, 0.7, 9.0, 0.6],
[0.5, 0.1, 0.7, 0.25]])
# forward input (output is going to be calculated with updated weights and biases)
layer1.forward(input)
layer2.forward(layer1.output)
print("PREDICTIONS")
print("input:")
print(input)
print("output (prediction):")
print(layer2.output)
| 31.7 | 166 | 0.675939 |
4a19ab0c65051a972a96af4ff8c92e674c716057
| 2,915 |
py
|
Python
|
research/slim/convert_tiff_to_jpeg.py
|
tathey1/models
|
46a2289b224ec627c3c6dffe65ff3b268fdbba66
|
[
"Apache-2.0"
] | null | null | null |
research/slim/convert_tiff_to_jpeg.py
|
tathey1/models
|
46a2289b224ec627c3c6dffe65ff3b268fdbba66
|
[
"Apache-2.0"
] | null | null | null |
research/slim/convert_tiff_to_jpeg.py
|
tathey1/models
|
46a2289b224ec627c3c6dffe65ff3b268fdbba66
|
[
"Apache-2.0"
] | 1 |
2018-09-07T18:29:48.000Z
|
2018-09-07T18:29:48.000Z
|
"""
author: Thomas Athey
date: 7/30/18
Significant code is borrowed from
tensorflow/models/research/slim/datasets/download_and_convert+flowers.py
Convert a particular directory of tiff files to jpegs.
The directory should have subdirectories corresponding to the different classes.
Jpegs will be found in a directory at the same level of the input
directory, with the same name + "tiff" appended at the end
Usage:
$ python convert_tiff_to_jpeg.py \
--dir=/workspace/data/pathology
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import os
from PIL import Image
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'directory',
None,
'The directory that contains .tiff images')
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names
Args: dataset_dir a directory with subdirectories representing class names
Each subdirectory should contain tiff images
Returns: the name of a corresponding jpeg parent directory
a list of tiff subdirectories
and the enclosed image tiff files paths, relative to 'dataset_dir'
Also a corresponding list of jpeg subdirectories and file paths
that should be produced
"""
jpeg_parent = dataset_dir[:-1] + "jpeg"
jpeg_directories = []
tiff_directories = []
for filename in os.listdir(dataset_dir):
path = os.path.join(dataset_dir,filename)
jpeg_path = os.path.join(jpeg_parent,filename)
if os.path.isdir(path):
tiff_directories.append(path)
jpeg_directories.append(jpeg_path)
tiff_filenames = []
jpeg_filenames = []
for i in range(len(tiff_directories)):
tiff_directory = tiff_directories[i]
jpeg_directory = jpeg_directories[i]
for filename in os.listdir(tiff_directory):
path = os.path.join(tiff_directory, filename)
tiff_filenames.append(path)
jpeg_filename = filename[:-3] + "jpeg"
jpeg_path = os.path.join(jpeg_directory,jpeg_filename)
jpeg_filenames.append(jpeg_path)
return tiff_directories, jpeg_directories, tiff_filenames, jpeg_filenames
def _make_jpeg_dirs(jpeg_dirs):
for directory in jpeg_dirs:
try:
if not os.path.exists(directory):
os.makedirs(directory)
else:
print('Directory: ' + directory + ', already exists')
except:
print('Error: Creating directory: ' + directory)
def _convert_tiff(tiffs, jpegs):
for i in range(len(tiffs)):
tiff = tiffs[i]
jpeg = jpegs[i]
if not os.path.exists(jpeg):
im = Image.open(tiff)
print('Generating jpeg for %s' % tiff)
im.save(jpeg)
else:
print('File: ' + jpeg + ', already exists')
def main(_):
if not FLAGS.directory:
raise ValueError('You must supply the directory with --directory')
dataset_dir = FLAGS.directory
tiff_dir, jpeg_dir, tiff_files, jpeg_files = _get_filenames_and_classes(dataset_dir)
_make_jpeg_dirs(jpeg_dir)
_convert_tiff(tiff_files, jpeg_files)
if __name__ == '__main__':
tf.app.run()
| 26.5 | 85 | 0.754717 |
4a19ab85b6b1e83c679f8a0d696e9185ed480b13
| 564 |
py
|
Python
|
foundation.examples/single_demo/sim3d.py
|
PowerRocker/rpi_spark_examples
|
bc264cbc780ef1640f9caa59ad1e6e2a30c59ceb
|
[
"MIT"
] | 1 |
2018-11-18T03:07:50.000Z
|
2018-11-18T03:07:50.000Z
|
foundation.examples/single_demo/sim3d.py
|
PowerRocker/rpi_spark_examples
|
bc264cbc780ef1640f9caa59ad1e6e2a30c59ceb
|
[
"MIT"
] | 1 |
2020-12-25T16:30:54.000Z
|
2020-12-25T16:31:50.000Z
|
foundation.examples/single_demo/sim3d.py
|
mobinrg/rpi_spark_examples
|
bc264cbc780ef1640f9caa59ad1e6e2a30c59ceb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# RPi-Spark Single Demo
#
# Show mode: Button A
# Field of vision: Joy Up and Down
# Exit: Button A + Joy Up
#
# Author: Kunpeng Zhang
# 2018.6.07
#
# See LICENSE for details.
import sys
from JMRPiFoundations.Skeleton.RPiSparkProvider import initSpark
from JMRPiFoundations.Devices.rpi_spark_z_1_0_0 import RPiSparkConfig as mySparkConfig
from modules.Sim3D import Sim3D
def main(argv):
mySpark = initSpark()
mySingleApp = Sim3D( mySparkConfig, mySpark )
mySingleApp.run()
if __name__ == "__main__":
main(sys.argv[1:])
| 22.56 | 86 | 0.72695 |
4a19abd56cfda186f1973f338d985881e4712ba6
| 1,407 |
py
|
Python
|
t00_chocolate.py
|
2019-fall-csc-226/a01-breaking-bad-concepta-abe
|
287c25fe7ee9447694d84e26c867ae9677767982
|
[
"MIT"
] | null | null | null |
t00_chocolate.py
|
2019-fall-csc-226/a01-breaking-bad-concepta-abe
|
287c25fe7ee9447694d84e26c867ae9677767982
|
[
"MIT"
] | null | null | null |
t00_chocolate.py
|
2019-fall-csc-226/a01-breaking-bad-concepta-abe
|
287c25fe7ee9447694d84e26c867ae9677767982
|
[
"MIT"
] | null | null | null |
######################################################################
# Author: Abraham Moreno
# username: TheOneAndOnlyAbe
#
# Purpose: Designed to compute the total chocolate desired by the user
#
######################################################################
# Acknowledgements:
#
# Modified from original code written by Dr. Jan Pearce
#
# Licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
######################################################################
# Section 1: User Input Section
print("")
entered_name = input("Please enter your name: ")
print("Hello " + entered_name + "! \n")
num_boxes = int(input("How many chocolate boxes you would like? "))
num_lbs = int(input("How many lbs of chocolate in a box? \n"))
# Section 2: Computation Section
lbs_choc = num_boxes * num_lbs
oz_per_lb = 16
oz_choc = lbs_choc / oz_per_lb
# Section 3: Conditional (Decision-making) Section
if oz_choc > 500:
print("Wow, you must really like chocolate!")
else:
print("Not a choc-o-holic, I guess! ")
# Section 4: Displaying Results Section
print(str(num_boxes) + " boxes\nwith " + str(num_lbs) + " lbs of chocolate per box\nis" + " " + str(oz_choc) + " ounces of chocolate.\n" )
print("Studies have shown there are health benefits from eating 1.5 to 3 ounces of dark chocolate daily!")
# To run this code, hit the green run button.
| 31.977273 | 138 | 0.612651 |
4a19acba899d2170e1b5b99de44a471cca691ca9
| 602 |
py
|
Python
|
testing/urls.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
testing/urls.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
testing/urls.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding:utf-8 -*-
from django.conf.urls.defaults import patterns, url, include
from satchless.cart.tests import cart_app
from satchless.category.tests import category_app
from satchless.product.tests import product_app
from satchless.order.tests import order_app
urlpatterns = patterns('',
url(r'^category/', include(category_app.urls)),
url(r'^product/', include(product_app.urls)),
url(r'^cart/', include(cart_app.urls)),
url(r'^contact/', include('satchless.contact.urls')),
url(r'^order/', include(order_app.urls)),
url(r'^image/', include('satchless.image.urls')),
)
| 37.625 | 60 | 0.722591 |
4a19ae7c2282236d231028483bd66345ada03f08
| 1,009 |
py
|
Python
|
examples/add_vectors_image.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 1,345 |
2019-03-03T21:14:14.000Z
|
2022-03-31T19:46:39.000Z
|
examples/add_vectors_image.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 3,904 |
2019-03-02T01:30:24.000Z
|
2022-03-31T20:17:27.000Z
|
examples/add_vectors_image.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 306 |
2019-03-29T17:09:10.000Z
|
2022-03-30T09:54:11.000Z
|
"""
This example generates an image of vectors
Vector data is an array of shape (N, M, 2)
Each vector position is defined by an (x-proj, y-proj) element
where x-proj and y-proj are the vector projections at each center
where each vector is centered on a pixel of the NxM grid
"""
import napari
import numpy as np
# create the viewer and window
viewer = napari.Viewer()
n = 20
m = 40
image = 0.2 * np.random.random((n, m)) + 0.5
layer = viewer.add_image(image, contrast_limits=[0, 1], name='background')
# sample vector image-like data
# n x m grid of slanted lines
# random data on the open interval (-1, 1)
pos = np.zeros(shape=(n, m, 2), dtype=np.float32)
rand1 = 2 * (np.random.random_sample(n * m) - 0.5)
rand2 = 2 * (np.random.random_sample(n * m) - 0.5)
# assign projections for each vector
pos[:, :, 0] = rand1.reshape((n, m))
pos[:, :, 1] = rand2.reshape((n, m))
# add the vectors
vect = viewer.add_vectors(pos, edge_width=0.2, length=2.5)
print(image.shape, pos.shape)
napari.run()
| 25.871795 | 74 | 0.684836 |
4a19aeba30fddfe04195e97f95db1be284ca8e18
| 33,187 |
py
|
Python
|
python_modules/dagster/dagster/core/definitions/pipeline.py
|
chasleslr/dagster
|
88907f9473fb8e7a9b1af9a0a8b349d42f4b8153
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/definitions/pipeline.py
|
chasleslr/dagster
|
88907f9473fb8e7a9b1af9a0a8b349d42f4b8153
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/definitions/pipeline.py
|
chasleslr/dagster
|
88907f9473fb8e7a9b1af9a0a8b349d42f4b8153
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import warnings
from dagster import check
from dagster.core.definitions.solid import NodeDefinition
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidSubsetError,
DagsterInvariantViolationError,
)
from dagster.core.storage.output_manager import IOutputManagerDefinition
from dagster.core.storage.root_input_manager import IInputManagerDefinition
from dagster.core.types.dagster_type import DagsterTypeKind
from dagster.core.utils import str_format_set
from dagster.utils.backcompat import experimental_arg_warning
from .config import ConfigMapping
from .dependency import (
DependencyDefinition,
MultiDependencyDefinition,
SolidHandle,
SolidInvocation,
)
from .graph import GraphDefinition
from .hook import HookDefinition
from .mode import ModeDefinition
from .preset import PresetDefinition
from .solid import NodeDefinition
from .utils import validate_tags
def _anonymous_pipeline_name():
return "__pipeline__" + str(uuid.uuid4()).replace("-", "")
class PipelineDefinition(GraphDefinition):
"""Defines a Dagster pipeline.
A pipeline is made up of
- Solids, each of which is a single functional unit of data computation.
- Dependencies, which determine how the values produced by solids as their outputs flow from
one solid to another. This tells Dagster how to arrange solids, and potentially multiple
aliased instances of solids, into a directed, acyclic graph (DAG) of compute.
- Modes, which can be used to attach resources, custom loggers, custom system storage
options, and custom executors to a pipeline, and to switch between them.
- Presets, which can be used to ship common combinations of pipeline config options in Python
code, and to switch between them.
Args:
solid_defs (List[SolidDefinition]): The set of solids used in this pipeline.
name (Optional[str]): The name of the pipeline. Must be unique within any
:py:class:`RepositoryDefinition` containing the pipeline.
description (Optional[str]): A human-readable description of the pipeline.
dependencies (Optional[Dict[Union[str, SolidInvocation], Dict[str, DependencyDefinition]]]):
A structure that declares the dependencies of each solid's inputs on the outputs of
other solids in the pipeline. Keys of the top level dict are either the string names of
solids in the pipeline or, in the case of aliased solids,
:py:class:`SolidInvocations <SolidInvocation>`. Values of the top level dict are
themselves dicts, which map input names belonging to the solid or aliased solid to
:py:class:`DependencyDefinitions <DependencyDefinition>`.
mode_defs (Optional[List[ModeDefinition]]): The set of modes in which this pipeline can
operate. Modes are used to attach resources, custom loggers, custom system storage
options, and custom executors to a pipeline. Modes can be used, e.g., to vary available
resource and logging implementations between local test and production runs.
preset_defs (Optional[List[PresetDefinition]]): A set of preset collections of configuration
options that may be used to execute a pipeline. A preset consists of an environment
dict, an optional subset of solids to execute, and a mode selection. Presets can be used
to ship common combinations of options to pipeline end users in Python code, and can
be selected by tools like Dagit.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution run of the pipeline.
Values that are not strings will be json encoded and must meet the criteria that
`json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag
values provided at invocation time.
hook_defs (Optional[Set[HookDefinition]]): A set of hook definitions applied to the
pipeline. When a hook is applied to a pipeline, it will be attached to all solid
instances within the pipeline.
_parent_pipeline_def (INTERNAL ONLY): Used for tracking pipelines created using solid subsets.
Examples:
.. code-block:: python
@lambda_solid
def return_one():
return 1
@solid(input_defs=[InputDefinition('num')], required_resource_keys={'op'})
def apply_op(context, num):
return context.resources.op(num)
@resource(config_schema=Int)
def adder_resource(init_context):
return lambda x: x + init_context.resource_config
add_mode = ModeDefinition(
name='add_mode',
resource_defs={'op': adder_resource},
description='Mode that adds things',
)
add_three_preset = PresetDefinition(
name='add_three_preset',
run_config={'resources': {'op': {'config': 3}}},
mode='add_mode',
)
pipeline_def = PipelineDefinition(
name='basic',
solid_defs=[return_one, apply_op],
dependencies={'apply_op': {'num': DependencyDefinition('return_one')}},
mode_defs=[add_mode],
preset_defs=[add_three_preset],
)
"""
def __init__(
self,
solid_defs,
name=None,
description=None,
dependencies=None,
mode_defs=None,
preset_defs=None,
tags=None,
hook_defs=None,
input_mappings=None,
output_mappings=None,
config_mapping=None,
positional_inputs=None,
_parent_pipeline_def=None, # https://github.com/dagster-io/dagster/issues/2115
):
if not name:
warnings.warn(
"Pipeline must have a name. Names will be required starting in 0.10.0 or later."
)
name = _anonymous_pipeline_name()
# For these warnings they check truthiness because they get changed to [] higher
# in the stack for the decorator case
if input_mappings:
experimental_arg_warning("input_mappings", "PipelineDefinition")
if output_mappings:
experimental_arg_warning("output_mappings", "PipelineDefinition")
if config_mapping is not None:
experimental_arg_warning("config_mapping", "PipelineDefinition")
if positional_inputs:
experimental_arg_warning("positional_inputs", "PipelineDefinition")
super(PipelineDefinition, self).__init__(
name=name,
description=description,
dependencies=dependencies,
node_defs=solid_defs,
tags=check.opt_dict_param(tags, "tags", key_type=str),
positional_inputs=positional_inputs,
input_mappings=input_mappings,
output_mappings=output_mappings,
config_mapping=config_mapping,
)
self._current_level_node_defs = solid_defs
self._tags = validate_tags(tags)
mode_definitions = check.opt_list_param(mode_defs, "mode_defs", of_type=ModeDefinition)
if not mode_definitions:
mode_definitions = [ModeDefinition()]
self._mode_definitions = mode_definitions
seen_modes = set()
for mode_def in mode_definitions:
if mode_def.name in seen_modes:
raise DagsterInvalidDefinitionError(
(
'Two modes seen with the name "{mode_name}" in "{pipeline_name}". '
"Modes must have unique names."
).format(mode_name=mode_def.name, pipeline_name=self._name)
)
seen_modes.add(mode_def.name)
self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)
self._preset_defs = check.opt_list_param(preset_defs, "preset_defs", PresetDefinition)
self._preset_dict = {}
for preset in self._preset_defs:
if preset.name in self._preset_dict:
raise DagsterInvalidDefinitionError(
(
'Two PresetDefinitions seen with the name "{name}" in "{pipeline_name}". '
"PresetDefinitions must have unique names."
).format(name=preset.name, pipeline_name=self._name)
)
if preset.mode not in seen_modes:
raise DagsterInvalidDefinitionError(
(
'PresetDefinition "{name}" in "{pipeline_name}" '
'references mode "{mode}" which is not defined.'
).format(name=preset.name, pipeline_name=self._name, mode=preset.mode)
)
self._preset_dict[preset.name] = preset
# Validate solid resource dependencies
_validate_resource_dependencies(
self._mode_definitions,
self._current_level_node_defs,
self._dagster_type_dict,
self._solid_dict,
self._hook_defs,
)
# Validate unsatisfied inputs can be materialized from config
_validate_inputs(self._dependency_structure, self._solid_dict, self._mode_definitions)
# Recursively explore all nodes in the this pipeline
self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)
self._parent_pipeline_def = check.opt_inst_param(
_parent_pipeline_def, "_parent_pipeline_def", PipelineDefinition
)
self._cached_run_config_schemas = {}
self._cached_external_pipeline = None
def copy_for_configured(self, name, description, config_schema, config_or_config_fn):
if not self.has_config_mapping:
raise DagsterInvalidDefinitionError(
"Only pipelines utilizing config mapping can be pre-configured. The pipeline "
'"{graph_name}" does not have a config mapping, and thus has nothing to be '
"configured.".format(graph_name=self.name)
)
return PipelineDefinition(
solid_defs=self._solid_defs,
name=name,
description=description or self.description,
dependencies=self._dependencies,
mode_defs=self._mode_definitions,
preset_defs=self.preset_defs,
hook_defs=self.hook_defs,
input_mappings=self._input_mappings,
output_mappings=self._output_mappings,
config_mapping=ConfigMapping(
self._config_mapping.config_fn, config_schema=config_schema
),
positional_inputs=self.positional_inputs,
_parent_pipeline_def=self._parent_pipeline_def,
)
def get_run_config_schema(self, mode=None):
check.str_param(mode, "mode")
mode_def = self.get_mode_definition(mode)
if mode_def.name in self._cached_run_config_schemas:
return self._cached_run_config_schemas[mode_def.name]
self._cached_run_config_schemas[mode_def.name] = _create_run_config_schema(self, mode_def)
return self._cached_run_config_schemas[mode_def.name]
@property
def mode_definitions(self):
return self._mode_definitions
@property
def preset_defs(self):
return self._preset_defs
def _get_mode_definition(self, mode):
check.str_param(mode, "mode")
for mode_definition in self._mode_definitions:
if mode_definition.name == mode:
return mode_definition
return None
def get_default_mode(self):
return self._mode_definitions[0]
@property
def is_single_mode(self):
return len(self._mode_definitions) == 1
@property
def is_multi_mode(self):
return len(self._mode_definitions) > 1
def has_mode_definition(self, mode):
check.str_param(mode, "mode")
return bool(self._get_mode_definition(mode))
def get_default_mode_name(self):
return self._mode_definitions[0].name
def get_mode_definition(self, mode=None):
check.opt_str_param(mode, "mode")
if mode is None:
check.invariant(self.is_single_mode)
return self.get_default_mode()
mode_def = self._get_mode_definition(mode)
check.invariant(
mode_def is not None,
"Could not find mode {mode} in pipeline {name}".format(mode=mode, name=self._name),
)
return mode_def
@property
def available_modes(self):
return [mode_def.name for mode_def in self._mode_definitions]
@property
def display_name(self):
"""str: Display name of pipeline.
Name suitable for exception messages, logging etc. If pipeline
is unnamed the method will return "<<unnamed>>".
"""
return self._name if self._name else "<<unnamed>>"
@property
def tags(self):
return self._tags
def has_dagster_type(self, name):
check.str_param(name, "name")
return name in self._dagster_type_dict
def dagster_type_named(self, name):
check.str_param(name, "name")
return self._dagster_type_dict[name]
@property
def all_solid_defs(self):
return list(self._all_node_defs.values())
@property
def top_level_solid_defs(self):
return self._current_level_node_defs
def solid_def_named(self, name):
check.str_param(name, "name")
check.invariant(name in self._all_node_defs, "{} not found".format(name))
return self._all_node_defs[name]
def has_solid_def(self, name):
check.str_param(name, "name")
return name in self._all_node_defs
def get_pipeline_subset_def(self, solids_to_execute):
return (
self if solids_to_execute is None else _get_pipeline_subset_def(self, solids_to_execute)
)
def get_presets(self):
return list(self._preset_dict.values())
def has_preset(self, name):
check.str_param(name, "name")
return name in self._preset_dict
def get_preset(self, name):
check.str_param(name, "name")
if name not in self._preset_dict:
raise DagsterInvariantViolationError(
(
'Could not find preset for "{name}". Available presets '
'for pipeline "{pipeline_name}" are {preset_names}.'
).format(
name=name, preset_names=list(self._preset_dict.keys()), pipeline_name=self._name
)
)
return self._preset_dict[name]
def get_pipeline_snapshot(self):
return self.get_pipeline_index().pipeline_snapshot
def get_pipeline_snapshot_id(self):
return self.get_pipeline_index().pipeline_snapshot_id
def get_pipeline_index(self):
from dagster.core.snap import PipelineSnapshot
from dagster.core.host_representation import PipelineIndex
return PipelineIndex(
PipelineSnapshot.from_pipeline_def(self), self.get_parent_pipeline_snapshot()
)
def get_config_schema_snapshot(self):
return self.get_pipeline_snapshot().config_schema_snapshot
@property
def is_subset_pipeline(self):
return False
@property
def parent_pipeline_def(self):
return None
def get_parent_pipeline_snapshot(self):
return None
@property
def solids_to_execute(self):
return None
@property
def hook_defs(self):
return self._hook_defs
def get_all_hooks_for_handle(self, handle):
"""Gather all the hooks for the given solid from all places possibly attached with a hook.
A hook can be attached to any of the following objects
* Solid (solid invocation)
* PipelineDefinition
Args:
handle (SolidHandle): The solid's handle
Returns:
FrozeSet[HookDefinition]
"""
check.inst_param(handle, "handle", SolidHandle)
hook_defs = set()
current = handle
lineage = []
while current:
lineage.append(current.name)
current = current.parent
# hooks on top-level solid
name = lineage.pop()
solid = self.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
# hooks on non-top-level solids
while lineage:
name = lineage.pop()
solid = solid.definition.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
# hooks applied to a pipeline definition will run on every solid
hook_defs = hook_defs.union(self.hook_defs)
return frozenset(hook_defs)
def with_hooks(self, hook_defs):
"""Apply a set of hooks to all solid instances within the pipeline."""
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
return PipelineDefinition(
solid_defs=self.top_level_solid_defs,
name=self.name,
description=self.description,
dependencies=self.dependencies,
mode_defs=self.mode_definitions,
preset_defs=self.preset_defs,
tags=self.tags,
hook_defs=hook_defs.union(self.hook_defs),
_parent_pipeline_def=self._parent_pipeline_def,
)
class PipelineSubsetDefinition(PipelineDefinition):
@property
def solids_to_execute(self):
return frozenset(self._solid_dict.keys())
@property
def solid_selection(self):
# we currently don't pass the real solid_selection (the solid query list) down here.
# so in the short-term, to make the call sites cleaner, we will convert the solids to execute
# to a list
return list(self._solid_dict.keys())
@property
def parent_pipeline_def(self):
return self._parent_pipeline_def
def get_parent_pipeline_snapshot(self):
return self._parent_pipeline_def.get_pipeline_snapshot()
@property
def is_subset_pipeline(self):
return True
def get_pipeline_subset_def(self, solids_to_execute):
raise DagsterInvariantViolationError("Pipeline subsets may not be subset again.")
def _dep_key_of(solid):
return SolidInvocation(solid.definition.name, solid.name)
def _get_pipeline_subset_def(pipeline_def, solids_to_execute):
"""
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solids_to_execute.
"""
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.set_param(solids_to_execute, "solids_to_execute", of_type=str)
for solid_name in solids_to_execute:
if not pipeline_def.has_solid_named(solid_name):
raise DagsterInvalidSubsetError(
"Pipeline {pipeline_name} has no solid named {name}.".format(
pipeline_name=pipeline_def.name, name=solid_name
),
)
solids = list(map(pipeline_def.solid_named, solids_to_execute))
deps = {_dep_key_of(solid): {} for solid in solids}
for solid in solids:
for input_handle in solid.input_handles():
if pipeline_def.dependency_structure.has_singular_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_singular_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
elif pipeline_def.dependency_structure.has_multi_deps(input_handle):
output_handles = pipeline_def.dependency_structure.get_multi_deps(input_handle)
deps[_dep_key_of(solid)][input_handle.input_def.name] = MultiDependencyDefinition(
[
DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
for output_handle in output_handles
if output_handle.solid.name in solids_to_execute
]
)
try:
sub_pipeline_def = PipelineSubsetDefinition(
name=pipeline_def.name, # should we change the name for subsetted pipeline?
solid_defs=list({solid.definition for solid in solids}),
mode_defs=pipeline_def.mode_definitions,
dependencies=deps,
_parent_pipeline_def=pipeline_def,
tags=pipeline_def.tags,
hook_defs=pipeline_def.hook_defs,
)
return sub_pipeline_def
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(solids_to_execute)} for pipeline "
f"{pipeline_def.name} results in an invalid pipeline"
) from exc
def _validate_resource_dependencies(
mode_definitions, node_defs, dagster_type_dict, solid_dict, pipeline_hook_defs
):
"""This validation ensures that each pipeline context provides the resources that are required
by each solid.
"""
check.list_param(mode_definitions, "mode_definitions", of_type=ModeDefinition)
check.list_param(node_defs, "node_defs", of_type=NodeDefinition)
check.dict_param(dagster_type_dict, "dagster_type_dict")
check.dict_param(solid_dict, "solid_dict")
check.set_param(pipeline_hook_defs, "pipeline_hook_defs", of_type=HookDefinition)
for mode_def in mode_definitions:
mode_resources = set(mode_def.resource_defs.keys())
for node_def in node_defs:
for required_resource in node_def.required_resource_keys:
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by solid def {node_def_name}, but is not '
'provided by mode "{mode_name}".'
).format(
resource=required_resource,
node_def_name=node_def.name,
mode_name=mode_def.name,
)
)
_validate_type_resource_deps_for_mode(mode_def, mode_resources, dagster_type_dict)
for intermediate_storage in mode_def.intermediate_storage_defs or []:
for required_resource in intermediate_storage.required_resource_keys:
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
"Resource '{resource}' is required by intermediate storage "
"'{storage_name}', but is not provided by mode '{mode_name}'."
).format(
resource=required_resource,
storage_name=intermediate_storage.name,
mode_name=mode_def.name,
)
)
for solid in solid_dict.values():
for hook_def in solid.hook_defs:
for required_resource in hook_def.required_resource_keys:
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by hook "{hook_name}", but is not '
'provided by mode "{mode_name}".'
).format(
resource=required_resource,
hook_name=hook_def.name,
mode_name=mode_def.name,
)
)
for hook_def in pipeline_hook_defs:
for required_resource in hook_def.required_resource_keys:
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by hook "{hook_name}", but is not '
'provided by mode "{mode_name}".'
).format(
resource=required_resource,
hook_name=hook_def.name,
mode_name=mode_def.name,
)
)
def _validate_type_resource_deps_for_mode(mode_def, mode_resources, dagster_type_dict):
for dagster_type in dagster_type_dict.values():
for required_resource in dagster_type.required_resource_keys:
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by type "{type_name}", but is not '
'provided by mode "{mode_name}".'
).format(
resource=required_resource,
type_name=dagster_type.display_name,
mode_name=mode_def.name,
)
)
if dagster_type.loader:
for required_resource in dagster_type.loader.required_resource_keys():
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by the loader on type '
'"{type_name}", but is not provided by mode "{mode_name}".'
).format(
resource=required_resource,
type_name=dagster_type.display_name,
mode_name=mode_def.name,
)
)
if dagster_type.materializer:
for required_resource in dagster_type.materializer.required_resource_keys():
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by the materializer on type '
'"{type_name}", but is not provided by mode "{mode_name}".'
).format(
resource=required_resource,
type_name=dagster_type.display_name,
mode_name=mode_def.name,
)
)
for plugin in dagster_type.auto_plugins:
used_by_storage = set(
[
intermediate_storage_def.name
for intermediate_storage_def in mode_def.intermediate_storage_defs
if plugin.compatible_with_storage_def(intermediate_storage_def)
]
)
if used_by_storage:
for required_resource in plugin.required_resource_keys():
if required_resource not in mode_resources:
raise DagsterInvalidDefinitionError(
(
'Resource "{resource}" is required by the plugin "{plugin_name}"'
' on type "{type_name}" (used with storages {storages}), '
'but is not provided by mode "{mode_name}".'
).format(
resource=required_resource,
type_name=dagster_type.display_name,
plugin_name=plugin.__name__,
mode_name=mode_def.name,
storages=used_by_storage,
)
)
def _validate_inputs(dependency_structure, solid_dict, mode_definitions):
for solid in solid_dict.values():
for handle in solid.input_handles():
if dependency_structure.has_deps(handle):
for mode_def in mode_definitions:
for source_output_handle in dependency_structure.get_deps_list(handle):
output_manager_key = source_output_handle.output_def.io_manager_key
output_manager_def = mode_def.resource_defs[output_manager_key]
# TODO: remove the IOutputManagerDefinition check when asset store
# API is removed.
if isinstance(
output_manager_def, IOutputManagerDefinition
) and not isinstance(output_manager_def, IInputManagerDefinition):
raise DagsterInvalidDefinitionError(
f'Input "{handle.input_def.name}" of solid "{solid.name}" is '
f'connected to output "{source_output_handle.output_def.name}" '
f'of solid "{source_output_handle.solid.name}". In mode '
f'"{mode_def.name}", that output does not have an output '
f"manager that knows how to load inputs, so we don't know how "
f"to load the input. To address this, assign an IOManager to "
f"the upstream output."
)
else:
if (
not handle.input_def.dagster_type.loader
and not handle.input_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not handle.input_def.root_manager_key
):
raise DagsterInvalidDefinitionError(
'Input "{input_name}" in solid "{solid_name}" is not connected to '
"the output of a previous solid and can not be loaded from configuration, "
"creating an impossible to execute pipeline. "
"Possible solutions are:\n"
' * add a dagster_type_loader for the type "{dagster_type}"\n'
' * connect "{input_name}" to the output of another solid\n'.format(
solid_name=solid.name,
input_name=handle.input_def.name,
dagster_type=handle.input_def.dagster_type.display_name,
)
)
def _build_all_node_defs(node_defs):
all_defs = {}
for current_level_node_def in node_defs:
for node_def in current_level_node_def.iterate_node_defs():
if node_def.name in all_defs:
if all_defs[node_def.name] != node_def:
raise DagsterInvalidDefinitionError(
'Detected conflicting solid definitions with the same name "{name}"'.format(
name=node_def.name
)
)
else:
all_defs[node_def.name] = node_def
return all_defs
def _create_run_config_schema(pipeline_def, mode_definition):
from .environment_configs import (
EnvironmentClassCreationData,
construct_config_type_dictionary,
define_environment_cls,
)
from .run_config_schema import RunConfigSchema
# When executing with a subset pipeline, include the missing solids
# from the original pipeline as ignored to allow execution with
# run config that is valid for the original
if pipeline_def.is_subset_pipeline:
ignored_solids = [
solid
for solid in pipeline_def.parent_pipeline_def.solids
if not pipeline_def.has_solid_named(solid.name)
]
else:
ignored_solids = []
environment_type = define_environment_cls(
EnvironmentClassCreationData(
pipeline_name=pipeline_def.name,
solids=pipeline_def.solids,
dependency_structure=pipeline_def.dependency_structure,
mode_definition=mode_definition,
logger_defs=mode_definition.loggers,
ignored_solids=ignored_solids,
)
)
config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(
pipeline_def.all_solid_defs, environment_type
)
return RunConfigSchema(
environment_type=environment_type,
config_type_dict_by_name=config_type_dict_by_name,
config_type_dict_by_key=config_type_dict_by_key,
)
| 40.87069 | 105 | 0.614096 |
4a19af00c057406e55b8edd3ba462a812924fbdb
| 101 |
py
|
Python
|
python/ql/test/library-tests/stmts/with_stmt/test.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 4,036 |
2020-04-29T00:09:57.000Z
|
2022-03-31T14:16:38.000Z
|
python/ql/test/library-tests/stmts/with_stmt/test.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 2,970 |
2020-04-28T17:24:18.000Z
|
2022-03-31T22:40:46.000Z
|
python/ql/test/library-tests/stmts/with_stmt/test.py
|
ScriptBox99/github-codeql
|
2ecf0d3264db8fb4904b2056964da469372a235c
|
[
"MIT"
] | 794 |
2020-04-29T00:28:25.000Z
|
2022-03-30T08:21:46.000Z
|
def f():
with a:
call()
def g():
with x:
with y:
call()
| 11.222222 | 18 | 0.306931 |
4a19af1a871b983209cb106764f46e1e2eed5cb9
| 2,530 |
py
|
Python
|
taggle/models/heads/saol.py
|
tattaka/Taggle
|
d78e7f76c65cd69336a0347299939eeb3a184d4e
|
[
"MIT"
] | 7 |
2020-08-06T08:53:26.000Z
|
2021-04-17T12:03:28.000Z
|
taggle/models/heads/saol.py
|
tattaka/Taggle
|
d78e7f76c65cd69336a0347299939eeb3a184d4e
|
[
"MIT"
] | null | null | null |
taggle/models/heads/saol.py
|
tattaka/Taggle
|
d78e7f76c65cd69336a0347299939eeb3a184d4e
|
[
"MIT"
] | null | null | null |
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn
class SpatiallyAttentiveOutputHead(nn.Module):
__name__ = 'SpatiallyAttentiveOutputHead'
def __init__(self, encoder_channels, mid_channel=512, p=0.2, last_activation=None, num_class=10):
super(SpatiallyAttentiveOutputHead, self).__init__()
self.sa_layers = nn.Sequential(
nn.Conv2d(encoder_channels[-1], mid_channel, 3, 1, 1),
nn.BatchNorm2d(mid_channel),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channel, 1, 3, 1, 1))
self.interpolate = partial(
F.interpolate, mode='bilinear', align_corners=True)
self.conv1 = nn.Sequential(
nn.Conv2d(encoder_channels[-1], mid_channel, 3, 1, 1),
nn.BatchNorm2d(mid_channel),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(
nn.Conv2d(encoder_channels[-2], mid_channel, 3, 1, 1),
nn.BatchNorm2d(mid_channel),
nn.ReLU(inplace=True),)
self.conv3 = nn.Sequential(
nn.Conv2d(encoder_channels[-3], mid_channel, 3, 1, 1),
nn.BatchNorm2d(mid_channel),
nn.ReLU(inplace=True),)
self.conv4 = nn.Conv2d(mid_channel * 3, num_class, 3, 1, 1)
self.softmax2d = nn.Softmax(2)
if last_activation is None:
self.activation = last_activation
elif last_activation == 'LogSoftmax':
self.activation = nn.LogSoftmax(dim=1)
elif last_activation == 'Softmax':
self.activation = nn.Softmax(dim=1)
elif last_activation == 'Sigmoid':
self.activation = nn.Sigmoid(dim=1)
else:
raise ValueError(
'Activation should be "LogSoftmax"/"Softmax"/"Sigmoid"/None')
def forward(self, feats):
_, _, h, w = feats[-1].size()
spatial_attention_map = self.sa_layers(feats[-1])
spatial_attention_map = self.softmax2d(spatial_attention_map.view(
*spatial_attention_map.size()[:2], -1)).view_as(spatial_attention_map)
feat1 = self.conv1(feats[-1])
feat2 = self.conv2(self.interpolate(feats[-2], size=(h, w)))
feat3 = self.conv3(self.interpolate(feats[-3], size=(h, w)))
spatial_logits = self.conv4(torch.cat([feat1, feat2, feat3], dim=1))
logits = (spatial_attention_map * spatial_logits).sum(axis=(-1, -2))
if self.activation:
logits = self.activation(logits)
return logits
| 40.806452 | 101 | 0.617787 |
4a19afae5fc5290b1be0bfeb6269ec21c0ed595e
| 2,624 |
py
|
Python
|
src/bin/pilprint.py
|
alempedroso/mxnet-lambda
|
05af0db5e2a6fcc6c8d8c8c3821ed989ef520085
|
[
"Apache-2.0"
] | 145 |
2017-01-19T23:33:03.000Z
|
2021-06-05T05:34:55.000Z
|
mxnet_lambda/src/bin/pilprint.py
|
Nuvoola/oreilly-ai-nyc
|
22a683c63b7d0153cc2249a94d76b3c8969b1972
|
[
"MIT"
] | 17 |
2017-02-03T20:51:39.000Z
|
2020-05-21T11:33:52.000Z
|
mxnet_lambda/src/bin/pilprint.py
|
Nuvoola/oreilly-ai-nyc
|
22a683c63b7d0153cc2249a94d76b3c8969b1972
|
[
"MIT"
] | 44 |
2017-02-04T19:40:03.000Z
|
2020-10-01T19:24:19.000Z
|
#!/home/ec2-user/mx-lambda-nocv/bin/python2.7
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| 25.475728 | 68 | 0.568979 |
4a19afc0bfa75ab250f0d2fb7ba96945b595236a
| 229 |
py
|
Python
|
data/xor.py
|
wenyuzhao/Multilayer-Perceptron
|
fcecc761424f7fd0c54b36d325735d25e2e24f14
|
[
"Apache-2.0"
] | 2 |
2017-05-13T15:18:33.000Z
|
2017-11-09T02:33:27.000Z
|
data/xor.py
|
wenyuzhao/Multilayer-Perceptron
|
fcecc761424f7fd0c54b36d325735d25e2e24f14
|
[
"Apache-2.0"
] | null | null | null |
data/xor.py
|
wenyuzhao/Multilayer-Perceptron
|
fcecc761424f7fd0c54b36d325735d25e2e24f14
|
[
"Apache-2.0"
] | 1 |
2018-10-29T12:07:27.000Z
|
2018-10-29T12:07:27.000Z
|
import numpy as np
train_data = [
(np.array([0., 0.]), np.array([0.])),
(np.array([0., 1.]), np.array([1.])),
(np.array([1., 0.]), np.array([1.])),
(np.array([1., 1.]), np.array([0.])),
]
test_data = train_data
| 20.818182 | 41 | 0.489083 |
4a19b000acb146e25136d6f3e9ac58bffffef4ee
| 4,855 |
py
|
Python
|
tests/commit/math/test__extrapolation.py
|
oguzziya/PhiFlow
|
55d9834051e8a52c19a0e6d62e1c7c2f0a42cc34
|
[
"MIT"
] | null | null | null |
tests/commit/math/test__extrapolation.py
|
oguzziya/PhiFlow
|
55d9834051e8a52c19a0e6d62e1c7c2f0a42cc34
|
[
"MIT"
] | null | null | null |
tests/commit/math/test__extrapolation.py
|
oguzziya/PhiFlow
|
55d9834051e8a52c19a0e6d62e1c7c2f0a42cc34
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from phi.math import NUMPY_BACKEND
from phi.math.extrapolation import *
from phi import math, tf
from phi.tf import TF_BACKEND
from phi.torch import TORCH_BACKEND
class TestExtrapolation(TestCase):
def test_pad(self):
test_in_func_out = [
(math.zeros(x=3, y=4, z=5, a=1),
lambda tensor: ConstantExtrapolation(0).pad(tensor, dict(x=[1, 1], y=[1, 0], z=[0, 1], a=[0, 0])),
math.zeros(x=5, y=5, z=6, a=1)),
(math.ones(x=3, y=4, z=5, a=1),
lambda tensor: ConstantExtrapolation(1).pad(tensor, dict(x=[1, 1], y=[1, 0], z=[0, 1], a=[0, 0])),
math.ones(x=5, y=5, z=6, a=1)),
(-math.ones(x=3, y=4, z=5, a=1),
lambda tensor: ConstantExtrapolation(-1).pad(tensor, dict(x=[1, 1], y=[1, 0], z=[0, 1], a=[0, 0])),
- math.ones(x=5, y=5, z=6, a=1)),
]
for val_in, func, val_out in test_in_func_out:
try:
math.assert_close(val_out, func(val_in))
# TypeError('__bool__ should return bool, returned NotImplementedType')
# self.assertEqual(val_out, func(val_in))
except Exception as e:
raise BaseException(AssertionError(e, val_in, func, val_out))
class TestExtrapolationOperators(TestCase):
"""ensures that proper propagation of extrapolation occurs (for Field arithmetics)"""
def test_constant(self):
self.assertEqual(ConstantExtrapolation(2), ONE + ONE)
self.assertEqual(ZERO, ONE - ONE)
self.assertEqual(ONE, ONE * ONE)
self.assertEqual(ONE, ONE / ONE)
self.assertEqual(ZERO, ZERO / ONE)
def test_constant_periodic_working(self):
self.assertEqual(PERIODIC, PERIODIC + ZERO)
self.assertEqual(PERIODIC, PERIODIC - ZERO)
self.assertEqual(PERIODIC, ZERO + PERIODIC)
self.assertEqual(PERIODIC, PERIODIC / ONE)
self.assertEqual(PERIODIC, PERIODIC * ONE)
self.assertEqual(ZERO, PERIODIC * ZERO)
def test_periodic_periodic(self):
self.assertEqual(PERIODIC, PERIODIC + PERIODIC)
self.assertEqual(PERIODIC, PERIODIC - PERIODIC)
self.assertEqual(PERIODIC, PERIODIC * PERIODIC)
self.assertEqual(PERIODIC, PERIODIC / PERIODIC)
def test_cross_errors(self):
try:
PERIODIC + BOUNDARY
assert False
except TypeError:
pass
try:
PERIODIC + ONE
assert False
except TypeError:
pass
def test_pad_tensor(self):
for backend in (NUMPY_BACKEND, TF_BACKEND, TORCH_BACKEND):
with backend:
a = math.meshgrid(x=4, y=3)
# 0
p = math.pad(a, {'x': (1, 2), 'y': (0, 1)}, ZERO)
self.assertEqual((7, 4, 2), p.shape.sizes) # dimension check
math.assert_close(p.x[1:-2].y[:-1], a) # copy inner
math.assert_close(p.x[0], 0)
# 1
p = math.pad(a, {'x': (1, 2), 'y': (0, 1)}, ONE)
self.assertEqual((7, 4, 2), p.shape.sizes) # dimension check
math.assert_close(p.x[1:-2].y[:-1], a) # copy inner
math.assert_close(p.x[0], 1)
# periodic
p = math.pad(a, {'x': (1, 2), 'y': (0, 1)}, PERIODIC)
self.assertEqual((7, 4, 2), p.shape.sizes) # dimension check
math.assert_close(p.x[1:-2].y[:-1], a) # copy inner
math.assert_close(p.x[0].y[:-1], a.x[-1])
math.assert_close(p.x[-2:].y[:-1], a.x[:2])
# boundary
p = math.pad(a, {'x': (1, 2), 'y': (0, 1)}, BOUNDARY)
self.assertEqual((7, 4, 2), p.shape.sizes) # dimension check
math.assert_close(p.x[1:-2].y[:-1], a) # copy inner
math.assert_close(p.x[0].y[:-1], a.x[0])
math.assert_close(p.x[-2:].y[:-1], a.x[-1])
# mixed
p = math.pad(a, {'x': (1, 2), 'y': (0, 1)}, combine_sides({'x': PERIODIC, 'y': (ONE, REFLECT)}))
math.print(p)
self.assertEqual((7, 4, 2), p.shape.sizes) # dimension check
math.assert_close(p.x[1:-2].y[:-1], a) # copy inner
math.assert_close(p.x[0].y[:-1], a.x[-1]) # periodic
math.assert_close(p.x[-2:].y[:-1], a.x[:2]) # periodic
def test_pad_collapsed(self):
a = math.zeros(b=2, x=10, y=10, batch=10)
p = math.pad(a, {'x': (1, 2)}, ZERO)
self.assertIsInstance(p, CollapsedTensor)
self.assertEqual((10, 2, 13, 10), p.shape.sizes)
p = math.pad(a, {'x': (1, 2)}, PERIODIC)
self.assertIsInstance(p, CollapsedTensor)
self.assertEqual((10, 2, 13, 10), p.shape.sizes)
| 43.348214 | 112 | 0.536148 |
4a19b00ac0b8c2d8e450aa66fa2bcbbafcf0bafb
| 25,852 |
py
|
Python
|
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
# We don't import cache.create_cache directly as the facebook
# specific import below may monkey patch it, and we want to
# observe the patched version of this function!
import getdeps.cache as cache_module
from getdeps.buildopts import setup_build_options
from getdeps.dyndeps import create_dyn_dep_munger
from getdeps.errors import TransientFailure
from getdeps.load import ManifestLoader
from getdeps.manifest import ManifestParser
from getdeps.platform import HostType
from getdeps.subcmd import SubCmd, add_subcommands, cmd
try:
import getdeps.facebook # noqa: F401
except ImportError:
# we don't ship the facebook specific subdir,
# so allow that to fail silently
pass
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "getdeps"))
class UsageError(Exception):
pass
@cmd("validate-manifest", "parse a manifest and validate that it is correct")
class ValidateManifest(SubCmd):
def run(self, args):
try:
ManifestParser(file_name=args.file_name)
print("OK", file=sys.stderr)
return 0
except Exception as exc:
print("ERROR: %s" % str(exc), file=sys.stderr)
return 1
def setup_parser(self, parser):
parser.add_argument("file_name", help="path to the manifest file")
@cmd("show-host-type", "outputs the host type tuple for the host machine")
class ShowHostType(SubCmd):
def run(self, args):
host = HostType()
print("%s" % host.as_tuple_string())
return 0
class ProjectCmdBase(SubCmd):
def run(self, args):
opts = setup_build_options(args)
ctx_gen = opts.get_context_generator(facebook_internal=args.facebook_internal)
if args.test_dependencies:
ctx_gen.set_value_for_all_projects("test", "on")
if args.enable_tests:
ctx_gen.set_value_for_project(args.project, "test", "on")
else:
ctx_gen.set_value_for_project(args.project, "test", "off")
loader = ManifestLoader(opts, ctx_gen)
self.process_project_dir_arguments(args, loader)
manifest = loader.load_manifest(args.project)
self.run_project_cmd(args, loader, manifest)
def process_project_dir_arguments(self, args, loader):
def parse_project_arg(arg, arg_type):
parts = arg.split(":")
if len(parts) == 2:
project, path = parts
elif len(parts) == 1:
project = args.project
path = parts[0]
else:
raise UsageError(
"invalid %s argument; too many ':' characters: %s" % (arg_type, arg)
)
return project, os.path.abspath(path)
for arg in args.src_dir:
project, path = parse_project_arg(arg, "--src-dir")
loader.set_project_src_dir(project, path)
for arg in args.build_dir:
project, path = parse_project_arg(arg, "--build-dir")
loader.set_project_build_dir(project, path)
for arg in args.install_dir:
project, path = parse_project_arg(arg, "--install-dir")
loader.set_project_install_dir(project, path)
def setup_parser(self, parser):
parser.add_argument(
"project",
help=(
"name of the project or path to a manifest "
"file describing the project"
),
)
parser.add_argument(
"--no-tests",
action="store_false",
dest="enable_tests",
default=True,
help="Disable building tests for this project.",
)
parser.add_argument(
"--test-dependencies",
action="store_true",
help="Enable building tests for dependencies as well.",
)
parser.add_argument(
"--src-dir",
default=[],
action="append",
help="Specify a local directory to use for the project source, "
"rather than fetching it.",
)
parser.add_argument(
"--build-dir",
default=[],
action="append",
help="Explicitly specify the build directory to use for the "
"project, instead of the default location in the scratch path. "
"This only affects the project specified, and not its dependencies.",
)
parser.add_argument(
"--install-dir",
default=[],
action="append",
help="Explicitly specify the install directory to use for the "
"project, instead of the default location in the scratch path. "
"This only affects the project specified, and not its dependencies.",
)
self.setup_project_cmd_parser(parser)
def setup_project_cmd_parser(self, parser):
pass
class CachedProject(object):
""" A helper that allows calling the cache logic for a project
from both the build and the fetch code """
def __init__(self, cache, loader, m):
self.m = m
self.inst_dir = loader.get_project_install_dir(m)
self.project_hash = loader.get_project_hash(m)
self.ctx = loader.ctx_gen.get_context(m.name)
self.loader = loader
self.cache = cache
self.cache_file_name = "-".join(
(
m.name,
self.ctx.get("os"),
self.ctx.get("distro") or "none",
self.ctx.get("distro_vers") or "none",
self.project_hash,
"buildcache.tgz",
)
)
def is_cacheable(self):
""" We only cache third party projects """
return self.cache and not self.m.shipit_fbcode_builder
def download(self):
if self.is_cacheable() and not os.path.exists(self.inst_dir):
print("check cache for %s" % self.cache_file_name)
dl_dir = os.path.join(self.loader.build_opts.scratch_dir, "downloads")
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
try:
target_file_name = os.path.join(dl_dir, self.cache_file_name)
if self.cache.download_to_file(self.cache_file_name, target_file_name):
tf = tarfile.open(target_file_name, "r")
print(
"Extracting %s -> %s..." % (self.cache_file_name, self.inst_dir)
)
tf.extractall(self.inst_dir)
return True
except Exception as exc:
print("%s" % str(exc))
return False
def upload(self):
if self.cache and not self.m.shipit_fbcode_builder:
# We can prepare an archive and stick it in LFS
tempdir = tempfile.mkdtemp()
tarfilename = os.path.join(tempdir, self.cache_file_name)
print("Archiving for cache: %s..." % tarfilename)
tf = tarfile.open(tarfilename, "w:gz")
tf.add(self.inst_dir, arcname=".")
tf.close()
try:
self.cache.upload_from_file(self.cache_file_name, tarfilename)
except Exception as exc:
print(
"Failed to upload to cache (%s), continue anyway" % str(exc),
file=sys.stderr,
)
shutil.rmtree(tempdir)
@cmd("fetch", "fetch the code for a given project")
class FetchCmd(ProjectCmdBase):
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="fetch the transitive deps also",
action="store_true",
default=False,
)
parser.add_argument(
"--host-type",
help=(
"When recursively fetching, fetch deps for "
"this host type rather than the current system"
),
)
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
projects = loader.manifests_in_dependency_order()
else:
projects = [manifest]
cache = cache_module.create_cache()
for m in projects:
cached_project = CachedProject(cache, loader, m)
if cached_project.download():
continue
inst_dir = loader.get_project_install_dir(m)
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
if os.path.exists(built_marker):
with open(built_marker, "r") as f:
built_hash = f.read().strip()
project_hash = loader.get_project_hash(m)
if built_hash == project_hash:
continue
# We need to fetch the sources
fetcher = loader.create_fetcher(m)
fetcher.update()
@cmd("list-deps", "lists the transitive deps for a given project")
class ListDepsCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
for m in loader.manifests_in_dependency_order():
print(m.name)
return 0
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--host-type",
help=(
"Produce the list for the specified host type, "
"rather than that of the current system"
),
)
def clean_dirs(opts):
for d in ["build", "installed", "extracted", "shipit"]:
d = os.path.join(opts.scratch_dir, d)
print("Cleaning %s..." % d)
if os.path.exists(d):
shutil.rmtree(d)
@cmd("clean", "clean up the scratch dir")
class CleanCmd(SubCmd):
def run(self, args):
opts = setup_build_options(args)
clean_dirs(opts)
@cmd("show-inst-dir", "print the installation dir for a given project")
class ShowInstDirCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
manifests = loader.manifests_in_dependency_order()
else:
manifests = [manifest]
for m in manifests:
inst_dir = loader.get_project_install_dir(m)
print(inst_dir)
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="print the transitive deps also",
action="store_true",
default=False,
)
@cmd("show-source-dir", "print the source dir for a given project")
class ShowSourceDirCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
manifests = loader.manifests_in_dependency_order()
else:
manifests = [manifest]
for m in manifests:
fetcher = loader.create_fetcher(m)
print(fetcher.get_src_dir())
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="print the transitive deps also",
action="store_true",
default=False,
)
@cmd("build", "build a given project")
class BuildCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.clean:
clean_dirs(loader.build_opts)
print("Building on %s" % loader.ctx_gen.get_context(args.project))
projects = loader.manifests_in_dependency_order()
cache = cache_module.create_cache()
# Accumulate the install directories so that the build steps
# can find their dep installation
install_dirs = []
for m in projects:
fetcher = loader.create_fetcher(m)
if args.clean:
fetcher.clean()
build_dir = loader.get_project_build_dir(m)
inst_dir = loader.get_project_install_dir(m)
if m == manifest or not args.no_deps:
print("Assessing %s..." % m.name)
project_hash = loader.get_project_hash(m)
ctx = loader.ctx_gen.get_context(m.name)
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
cached_project = CachedProject(cache, loader, m)
reconfigure, sources_changed = self.compute_source_change_status(
cached_project, fetcher, m, built_marker, project_hash
)
if sources_changed or reconfigure or not os.path.exists(built_marker):
if os.path.exists(built_marker):
os.unlink(built_marker)
src_dir = fetcher.get_src_dir()
builder = m.create_builder(
loader.build_opts, src_dir, build_dir, inst_dir, ctx
)
builder.build(install_dirs, reconfigure=reconfigure)
with open(built_marker, "w") as f:
f.write(project_hash)
# Only populate the cache from continuous build runs
if args.schedule_type == "continuous":
cached_project.upload()
install_dirs.append(inst_dir)
def compute_source_change_status(
self, cached_project, fetcher, m, built_marker, project_hash
):
reconfigure = False
sources_changed = False
if not cached_project.download():
check_fetcher = True
if os.path.exists(built_marker):
check_fetcher = False
with open(built_marker, "r") as f:
built_hash = f.read().strip()
if built_hash == project_hash:
if cached_project.is_cacheable():
# We can blindly trust the build status
reconfigure = False
sources_changed = False
else:
# Otherwise, we may have changed the source, so let's
# check in with the fetcher layer
check_fetcher = True
else:
# Some kind of inconsistency with a prior build,
# let's run it again to be sure
os.unlink(built_marker)
reconfigure = True
sources_changed = True
if check_fetcher:
change_status = fetcher.update()
reconfigure = change_status.build_changed()
sources_changed = change_status.sources_changed()
return reconfigure, sources_changed
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--clean",
action="store_true",
default=False,
help=(
"Clean up the build and installation area prior to building, "
"causing the projects to be built from scratch"
),
)
parser.add_argument(
"--no-deps",
action="store_true",
default=False,
help=(
"Only build the named project, not its deps. "
"This is most useful after you've built all of the deps, "
"and helps to avoid waiting for relatively "
"slow up-to-date-ness checks"
),
)
parser.add_argument(
"--schedule-type", help="Indicates how the build was activated"
)
@cmd("fixup-dyn-deps", "Adjusts dynamic dependencies for packaging purposes")
class FixupDeps(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
projects = loader.manifests_in_dependency_order()
# Accumulate the install directories so that the build steps
# can find their dep installation
install_dirs = []
for m in projects:
inst_dir = loader.get_project_install_dir(m)
install_dirs.append(inst_dir)
if m == manifest:
dep_munger = create_dyn_dep_munger(loader.build_opts, install_dirs)
dep_munger.process_deps(args.destdir, args.final_install_prefix)
def setup_project_cmd_parser(self, parser):
parser.add_argument("destdir", help=("Where to copy the fixed up executables"))
parser.add_argument(
"--final-install-prefix", help=("specify the final installation prefix")
)
@cmd("test", "test a given project")
class TestCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
projects = loader.manifests_in_dependency_order()
# Accumulate the install directories so that the test steps
# can find their dep installation
install_dirs = []
for m in projects:
inst_dir = loader.get_project_install_dir(m)
if m == manifest or args.test_dependencies:
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
if not os.path.exists(built_marker):
print("project %s has not been built" % m.name)
# TODO: we could just go ahead and build it here, but I
# want to tackle that as part of adding build-for-test
# support.
return 1
fetcher = loader.create_fetcher(m)
src_dir = fetcher.get_src_dir()
ctx = loader.ctx_gen.get_context(m.name)
build_dir = loader.get_project_build_dir(m)
builder = m.create_builder(
loader.build_opts, src_dir, build_dir, inst_dir, ctx
)
builder.run_tests(
install_dirs,
schedule_type=args.schedule_type,
owner=args.test_owner,
)
install_dirs.append(inst_dir)
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--schedule-type", help="Indicates how the build was activated"
)
parser.add_argument("--test-owner", help="Owner for testpilot")
@cmd("generate-github-actions", "generate a GitHub actions configuration")
class GenerateGitHubActionsCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
platforms = [
HostType("linux", "ubuntu", "18"),
HostType("darwin", None, None),
HostType("windows", None, None),
]
with open(args.output_file, "w") as out:
# Deliberate line break here because the @ and the generated
# symbols are meaningful to our internal tooling when they
# appear in a single token
out.write("# This file was @")
out.write("generated by getdeps.py\n")
out.write(
"""
name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
"""
)
for p in platforms:
build_opts = setup_build_options(args, p)
self.write_job_for_platform(out, args, build_opts)
def write_job_for_platform(self, out, args, build_opts):
ctx_gen = build_opts.get_context_generator()
loader = ManifestLoader(build_opts, ctx_gen)
manifest = loader.load_manifest(args.project)
manifest_ctx = loader.ctx_gen.get_context(manifest.name)
# Some projects don't do anything "useful" as a leaf project, only
# as a dep for a leaf project. Check for those here; we don't want
# to waste the effort scheduling them on CI.
# We do this by looking at the builder type in the manifest file
# rather than creating a builder and checking its type because we
# don't know enough to create the full builder instance here.
if manifest.get("build", "builder", ctx=manifest_ctx) == "nop":
return None
if build_opts.is_linux():
job_name = "linux"
runs_on = "ubuntu-18.04"
elif build_opts.is_windows():
# We're targeting the windows-2016 image because it has
# Visual Studio 2017 installed, and at the time of writing,
# the version of boost in the manifests (1.69) is not
# buildable with Visual Studio 2019
job_name = "windows"
runs_on = "windows-2016"
else:
job_name = "mac"
runs_on = "macOS-latest"
out.write(" %s:\n" % job_name)
out.write(" runs-on: %s\n" % runs_on)
out.write(" steps:\n")
out.write(" - uses: actions/checkout@v1\n")
projects = loader.manifests_in_dependency_order()
for m in projects:
if m != manifest:
out.write(" - name: Fetch %s\n" % m.name)
out.write(
" run: python build/fbcode_builder/getdeps.py fetch "
"--no-tests %s\n" % m.name
)
for m in projects:
if m != manifest:
out.write(" - name: Build %s\n" % m.name)
out.write(
" run: python build/fbcode_builder/getdeps.py build "
"--no-tests %s\n" % m.name
)
out.write(" - name: Build %s\n" % manifest.name)
out.write(
" run: python build/fbcode_builder/getdeps.py build --src-dir=. %s\n"
% manifest.name
)
out.write(" - name: Test %s\n" % manifest.name)
out.write(
" run: python build/fbcode_builder/getdeps.py test --src-dir=. %s\n"
% manifest.name
)
def setup_project_cmd_parser(self, parser):
parser.add_argument("--output-file", help="The name of the yaml file")
def get_arg_var_name(args):
for arg in args:
if arg.startswith("--"):
return arg[2:].replace("-", "_")
raise Exception("unable to determine argument variable name from %r" % (args,))
def parse_args():
# We want to allow common arguments to be specified either before or after
# the subcommand name. In order to do this we add them to the main parser
# and to subcommand parsers. In order for this to work, we need to tell
# argparse that the default value is SUPPRESS, so that the default values
# from the subparser arguments won't override values set by the user from
# the main parser. We maintain our own list of desired defaults in the
# common_defaults dictionary, and manually set those if the argument wasn't
# present at all.
common_args = argparse.ArgumentParser(add_help=False)
common_defaults = {}
def add_common_arg(*args, **kwargs):
var_name = get_arg_var_name(args)
default_value = kwargs.pop("default", None)
common_defaults[var_name] = default_value
kwargs["default"] = argparse.SUPPRESS
common_args.add_argument(*args, **kwargs)
add_common_arg("--scratch-path", help="Where to maintain checkouts and build dirs")
add_common_arg(
"--vcvars-path", default=None, help="Path to the vcvarsall.bat on Windows."
)
add_common_arg(
"--install-prefix",
help=(
"Where the final build products will be installed "
"(default is [scratch-path]/installed)"
),
)
add_common_arg(
"--num-jobs",
type=int,
help=(
"Number of concurrent jobs to use while building. "
"(default=number of cpu cores)"
),
)
add_common_arg(
"--use-shipit",
help="use the real ShipIt instead of the simple shipit transformer",
action="store_true",
default=False,
)
add_common_arg(
"--facebook-internal",
help="Setup the build context as an FB internal build",
action="store_true",
default=False,
)
ap = argparse.ArgumentParser(
description="Get and build dependencies and projects", parents=[common_args]
)
sub = ap.add_subparsers(
# metavar suppresses the long and ugly default list of subcommands on a
# single line. We still render the nicer list below where we would
# have shown the nasty one.
metavar="",
title="Available commands",
help="",
)
add_subcommands(sub, common_args)
args = ap.parse_args()
for var_name, default_value in common_defaults.items():
if not hasattr(args, var_name):
setattr(args, var_name, default_value)
return ap, args
def main():
ap, args = parse_args()
if getattr(args, "func", None) is None:
ap.print_help()
return 0
try:
return args.func(args)
except UsageError as exc:
ap.error(str(exc))
return 1
except TransientFailure as exc:
print("TransientFailure: %s" % str(exc))
# This return code is treated as a retryable transient infrastructure
# error by Facebook's internal CI, rather than eg: a build or code
# related error that needs to be fixed before progress can be made.
return 128
except subprocess.CalledProcessError as exc:
print("%s" % str(exc), file=sys.stderr)
print("!! Failed", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())
| 35.077341 | 88 | 0.585255 |
4a19b0273ac2746fa2cd253d05341f3532c5d127
| 10,400 |
py
|
Python
|
src/ope/deepiv/learner.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | 1 |
2020-09-05T01:25:39.000Z
|
2020-09-05T01:25:39.000Z
|
src/ope/deepiv/learner.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | null | null | null |
src/ope/deepiv/learner.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | null | null | null |
# Lint as: python3
# pylint: disable=bad-indentation,line-too-long
"""DeepIV Learner implementation."""
import datetime
from typing import Dict, List
import acme
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
from src.utils.tf_linear_reg_utils import fit_linear, linear_reg_loss, linear_reg_pred
# Default Acme checkpoint TTL is 5 days.
_CHECKPOINT_TTL = int(datetime.timedelta(days=30).total_seconds())
class DeepIVLearner(acme.Learner, tf2_savers.TFSaveable):
"""DeepIVLearner.
This is the learning component of a DeepIV learner. IE it takes a dataset as
input and implements update functionality to learn from this dataset.
Optionally it takes a replay client as well to allow for updating of
priorities.
"""
def __init__(self,
value_func: snt.Module,
mixture_density: snt.Module,
policy_net: snt.Module,
discount: float,
value_learning_rate: float,
density_learning_rate: float,
n_sampling: int,
density_iter: int,
dataset: tf.data.Dataset,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True,
checkpoint_interval_minutes: int = 10.0):
"""Initializes the learner.
Args:
value_func: value function network
mixture_density: mixture density function network.
policy_net: policy network.
discount: global discount.
value_learning_rate: learning rate for the treatment_net update.
density_learning_rate: learning rate for the mixture_density update.
n_sampling: number of samples generated in stage 2,
density_iter: number of iteration for mixture_density function,
dataset: dataset to learn from.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
checkpoint_interval_minutes: checkpoint interval in minutes.
"""
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
self.density_iter = density_iter
self.n_sampling = n_sampling
self.discount = discount
# Get an iterator over the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self.value_func = value_func
self.mixture_density = mixture_density
self.policy = policy_net
self._value_func_optimizer = snt.optimizers.Adam(value_learning_rate)
self._mixture_density_optimizer = snt.optimizers.Adam(density_learning_rate)
self._variables = [
value_func.trainable_variables,
mixture_density.trainable_variables,
]
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._mse = tf.keras.losses.MeanSquaredError()
# Create a checkpointer object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
objects_to_save=self.state,
time_delta_minutes=checkpoint_interval_minutes,
checkpoint_ttl_seconds=_CHECKPOINT_TTL)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'value_func': value_func,
'mixture_density': mixture_density,
}, time_delta_minutes=60.)
# @tf.function
def _step(self) -> Dict[str, tf.Tensor]:
stage1_loss = None
stage2_loss = None
# Pull out the data needed for updates/priorities.
if self._num_steps < self.density_iter:
sample = next(self._iterator)
o_tm1, a_tm1, _, d_t, o_t = sample.data[:5]
stage1_loss, obs_loss, discount_loss = self.update_density(
o_tm1, a_tm1, d_t, o_t)
stage2_loss = tf.constant(0.0)
else:
stage1_loss = tf.constant(0.0)
obs_loss = tf.constant(0.0)
discount_loss = tf.constant(0.0)
sample = next(self._iterator)
o_tm1, a_tm1, r_t = sample.data[:3]
stage2_loss = self.update_value(o_tm1, a_tm1, r_t)
self._num_steps.assign_add(1)
fetches = {'stage1_loss': stage1_loss, 'stage2_loss': stage2_loss,
'obs_loss': obs_loss, 'discount_loss': discount_loss,
'num_steps': tf.convert_to_tensor(self._num_steps)}
return fetches
def _density_loss(self, current_obs, action, discount, next_obs):
target = tf2_utils.batch_concat(next_obs)
# density = self.mixture_density(current_obs, action)
obs_distr, discount_distr = self.mixture_density(current_obs, action)
obs_log_prob = obs_distr.log_prob(target)
obs_loss = tf.reduce_mean(-obs_log_prob)
discount_log_prob = discount_distr.log_prob(discount)
discount_loss = tf.reduce_mean(-discount_log_prob)
loss = obs_loss + discount_loss
return loss, obs_loss, discount_loss
def update_density(self, current_obs, action, discount, next_obs):
with tf.GradientTape() as tape:
loss, obs_loss, discount_loss = self._density_loss(
current_obs, action, discount, next_obs)
gradient = tape.gradient(loss, self.mixture_density.trainable_variables)
self._mixture_density_optimizer.apply(
gradient, self.mixture_density.trainable_variables)
return loss, obs_loss, discount_loss
def obtain_one_sampled_value_function(self, current_obs, action):
obs_distr, discount_distr = self.mixture_density(current_obs, action)
sampled_next_obs = obs_distr.sample()
# sampled_next_obs = tf.reshape(sampled_next_obs, current_obs.shape)
sampled_action = self.policy(sampled_next_obs)
sampled_value = self.value_func(sampled_next_obs, sampled_action)
sampled_discount = discount_distr.sample()
sampled_discount = tf.expand_dims(sampled_discount, axis=-1)
if sampled_discount.shape != sampled_value.shape:
raise ValueError(
f'Unmatched shape sampled_discount.shape '
f'({sampled_discount.shape}) != value.shape ({sampled_value.shape})')
sampled_discount = tf.cast(sampled_discount, sampled_value.dtype)
sampled_value = sampled_discount * sampled_value
return sampled_value
def obtain_sampled_value_function(self, current_obs, action):
# res_list = []
# for i in range(self.n_sampling):
# sampled_value = self.mixture_density.obtain_sampled_value_function(current_obs, action, self.policy,
# self.value_func)
# res_list.append(sampled_value)
# return tf.reduce_mean(tf.concat(res_list, axis=0), axis=0)
sampled_value = 0.
for _ in range(self.n_sampling):
sampled_value += self.obtain_one_sampled_value_function(
current_obs, action)
return sampled_value / self.n_sampling
def _value_loss(self, current_obs, action, reward):
next_value = self.obtain_sampled_value_function(current_obs, action)
current_value = self.value_func(current_obs, action)
pred = current_value - self.discount * next_value
loss = self._mse(y_pred=pred, y_true=reward)
return loss
def update_value(self, current_obs, action, reward):
with tf.GradientTape() as tape:
loss = self._value_loss(current_obs, action, reward)
gradient = tape.gradient(loss, self.value_func.trainable_variables)
self._value_func_optimizer.apply(
gradient, self.value_func.trainable_variables)
return loss
def dev_loss(self, dev_dataset):
"""Return state prediction loss and reward mse on the dev dataset."""
stage1_loss_sum = tf.constant(0.0)
obs_loss_sum = tf.constant(0.0)
discount_loss_sum = tf.constant(0.0)
stage2_loss_sum = tf.constant(0.0)
count = tf.constant(0.0)
for sample in dev_dataset:
if self._num_steps < self.density_iter:
o_tm1, a_tm1, _, d_t, o_t = sample.data[:5]
stage1_loss, obs_loss, discount_loss = self._density_loss(
o_tm1, a_tm1, d_t, o_t)
stage1_loss_sum += stage1_loss
obs_loss_sum += obs_loss
discount_loss_sum += discount_loss
else:
o_tm1, a_tm1, r_t = sample.data[:3]
stage2_loss = self._value_loss(o_tm1, a_tm1, r_t)
stage2_loss_sum += stage2_loss
count += 1.
return {
'dev_stage1_loss': stage1_loss_sum / count,
'dev_obs_loss': obs_loss_sum / count,
'dev_discount_loss': discount_loss_sum / count,
'dev_stage2_loss': stage2_loss_sum / count,
}
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'value_function': self.value_func,
'mixture_density': self.mixture_density,
'value_opt': self._value_func_optimizer,
'density_opt': self._mixture_density_optimizer,
'num_steps': self._num_steps,
'counter': self._counter,
}
| 40.466926 | 114 | 0.639904 |
4a19b0821db4161d2a0db44e203017b4fcb2b489
| 978 |
py
|
Python
|
ibsng/handler/user/search_user.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 6 |
2018-03-06T10:16:36.000Z
|
2021-12-05T12:43:10.000Z
|
ibsng/handler/user/search_user.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3 |
2018-03-06T10:27:08.000Z
|
2022-01-02T15:21:27.000Z
|
ibsng/handler/user/search_user.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3 |
2018-01-06T16:28:31.000Z
|
2018-09-17T19:47:19.000Z
|
"""Search user API method."""
from ibsng.handler.handler import Handler
class searchUser(Handler):
"""Search user method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.conds, dict)
self.is_valid(self.from_, int)
self.is_valid(self.to_, int)
self.is_valid(self.order_by, str)
self.is_valid(self.desc, bool)
def setup(self, conds, from_, to_, order_by, desc):
"""Setup required parameters.
:param dict conds: conditions
:param int from: start index, for pagination
:param int to: end index, for pagination
:param choice order_by: order by field
:param bool desc: descending order
:return: None
:rtype: None
"""
self.conds = conds
self.from_ = from_
self.to_ = to_
self.order_by = order_by
self.desc = desc
| 26.432432 | 55 | 0.59407 |
4a19b16e606f47ee40cc8d1fec2eab0d16b2d91a
| 708 |
py
|
Python
|
setup.py
|
mihi-r/numba_timer
|
997b6d305d26983ff51d2a592e356e4edf58b053
|
[
"MIT"
] | 1 |
2020-10-01T02:08:30.000Z
|
2020-10-01T02:08:30.000Z
|
setup.py
|
mihi-r/numba_timer
|
997b6d305d26983ff51d2a592e356e4edf58b053
|
[
"MIT"
] | null | null | null |
setup.py
|
mihi-r/numba_timer
|
997b6d305d26983ff51d2a592e356e4edf58b053
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="numba_timer",
version="0.1.2",
author="Mihir Patel",
author_email="abc55abc55@gmail.com",
description="A helper package to easily time Numba CUDA GPU events",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mihi-r/numba_timer",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['numba>=0.51.0']
)
| 30.782609 | 72 | 0.666667 |
4a19b1a49d7c9479c630a458445de80d754a092b
| 30,761 |
py
|
Python
|
exchangelib/folders/base.py
|
kraglik/exchangelib
|
3d4aad35f5e24bc282f02e5fa8d67ee683fda64e
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/folders/base.py
|
kraglik/exchangelib
|
3d4aad35f5e24bc282f02e5fa8d67ee683fda64e
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/folders/base.py
|
kraglik/exchangelib
|
3d4aad35f5e24bc282f02e5fa8d67ee683fda64e
|
[
"BSD-2-Clause"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from fnmatch import fnmatch
import logging
from operator import attrgetter
from future.utils import python_2_unicode_compatible, PY2
from six import string_types
from ..errors import ErrorAccessDenied, ErrorFolderNotFound, ErrorCannotEmptyFolder, ErrorCannotDeleteObject, \
ErrorDeleteDistinguishedFolder
from ..fields import IntegerField, CharField, FieldPath, EffectiveRightsField, PermissionSetField, EWSElementField, \
Field
from ..items import CalendarItem, RegisterMixIn, Persona, ITEM_CLASSES, ITEM_TRAVERSAL_CHOICES, SHAPE_CHOICES, \
ID_ONLY, DELETE_TYPE_CHOICES, HARD_DELETE
from ..properties import Mailbox, FolderId, ParentFolderId, InvalidField, DistinguishedFolderId
from ..queryset import QuerySet, SearchableMixIn, DoesNotExist
from ..restriction import Restriction
from ..services import CreateFolder, UpdateFolder, DeleteFolder, EmptyFolder, FindPeople
from ..util import TNS
from ..version import EXCHANGE_2007_SP1, EXCHANGE_2010
from .collections import FolderCollection
from .queryset import SingleFolderQuerySet, SHALLOW
log = logging.getLogger(__name__)
class BaseFolder(RegisterMixIn, SearchableMixIn):
"""
MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/folder
"""
ELEMENT_NAME = 'Folder'
NAMESPACE = TNS
# See https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/distinguishedfolderid
DISTINGUISHED_FOLDER_ID = None
# Default item type for this folder. See
# https://docs.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxosfld/68a85898-84fe-43c4-b166-4711c13cdd61
CONTAINER_CLASS = None
supported_item_models = ITEM_CLASSES # The Item types that this folder can contain. Default is all
# Marks the version from which a distinguished folder was introduced. A possibly authoritative source is:
# https://github.com/OfficeDev/ews-managed-api/blob/master/Enumerations/WellKnownFolderName.cs
supported_from = None
# Whether this folder type is allowed with the GetFolder service
get_folder_allowed = True
LOCALIZED_NAMES = dict() # A map of (str)locale: (tuple)localized_folder_names
ITEM_MODEL_MAP = {cls.response_tag(): cls for cls in ITEM_CLASSES}
ID_ELEMENT_CLS = FolderId
LOCAL_FIELDS = [
EWSElementField('parent_folder_id', field_uri='folder:ParentFolderId', value_cls=ParentFolderId,
is_read_only=True),
CharField('folder_class', field_uri='folder:FolderClass', is_required_after_save=True),
CharField('name', field_uri='folder:DisplayName'),
IntegerField('total_count', field_uri='folder:TotalCount', is_read_only=True),
IntegerField('child_folder_count', field_uri='folder:ChildFolderCount', is_read_only=True),
IntegerField('unread_count', field_uri='folder:UnreadCount', is_read_only=True),
]
FIELDS = RegisterMixIn.FIELDS + LOCAL_FIELDS
__slots__ = tuple(f.name for f in LOCAL_FIELDS) + ('is_distinguished',)
# Used to register extended properties
INSERT_AFTER_FIELD = 'child_folder_count'
def __init__(self, **kwargs):
self.is_distinguished = kwargs.pop('is_distinguished', False)
super(BaseFolder, self).__init__(**kwargs)
@property
def account(self):
raise NotImplementedError()
@property
def root(self):
raise NotImplementedError()
@property
def parent(self):
raise NotImplementedError()
@property
def is_deleteable(self):
return not self.is_distinguished
def clean(self, version=None):
# pylint: disable=access-member-before-definition
super(BaseFolder, self).clean(version=version)
# Set a default folder class for new folders. A folder class cannot be changed after saving.
if self.id is None and self.folder_class is None:
self.folder_class = self.CONTAINER_CLASS
@property
def children(self):
# It's dangerous to return a generator here because we may then call methods on a child that result in the
# cache being updated while it's iterated.
return FolderCollection(account=self.account, folders=self.root.get_children(self))
@property
def parts(self):
parts = [self]
f = self.parent
while f:
parts.insert(0, f)
f = f.parent
return parts
@property
def absolute(self):
return ''.join('/%s' % p.name for p in self.parts)
def _walk(self):
for c in self.children:
yield c
for f in c.walk():
yield f
def walk(self):
return FolderCollection(account=self.account, folders=self._walk())
def _glob(self, pattern):
split_pattern = pattern.rsplit('/', 1)
head, tail = (split_pattern[0], None) if len(split_pattern) == 1 else split_pattern
if head == '':
# We got an absolute path. Restart globbing at root
for f in self.root.glob(tail or '*'):
yield f
elif head == '..':
# Relative path with reference to parent. Restart globbing at parent
if not self.parent:
raise ValueError('Already at top')
for f in self.parent.glob(tail or '*'):
yield f
elif head == '**':
# Match anything here or in any subfolder at arbitrary depth
for c in self.walk():
if fnmatch(c.name, tail or '*'):
yield c
else:
# Regular pattern
for c in self.children:
if not fnmatch(c.name, head):
continue
if tail is None:
yield c
continue
for f in c.glob(tail):
yield f
def glob(self, pattern):
return FolderCollection(account=self.account, folders=self._glob(pattern))
def tree(self):
"""
Returns a string representation of the folder structure of this folder. Example:
root
├── inbox
│ └── todos
└── archive
├── Last Job
├── exchangelib issues
└── Mom
"""
tree = '%s\n' % self.name
children = list(self.children)
for i, c in enumerate(sorted(children, key=attrgetter('name')), start=1):
nodes = c.tree().split('\n')
for j, node in enumerate(nodes, start=1):
if i != len(children) and j == 1:
# Not the last child, but the first node, which is the name of the child
tree += '├── %s\n' % node
elif i != len(children) and j > 1:
# Not the last child, and not name of child
tree += '│ %s\n' % node
elif i == len(children) and j == 1:
# Not the last child, but the first node, which is the name of the child
tree += '└── %s\n' % node
else: # Last child, and not name of child
tree += ' %s\n' % node
return tree.strip()
@classmethod
def supports_version(cls, version):
# 'version' is a Version instance, for convenience by callers
if not cls.supported_from or not version:
return True
return version.build >= cls.supported_from
@property
def has_distinguished_name(self):
return self.name and self.DISTINGUISHED_FOLDER_ID and self.name.lower() == self.DISTINGUISHED_FOLDER_ID.lower()
@classmethod
def localized_names(cls, locale):
# Return localized names for a specific locale. If no locale-specific names exist, return the default names,
# if any.
return tuple(s.lower() for s in cls.LOCALIZED_NAMES.get(locale, cls.LOCALIZED_NAMES.get(None, [])))
@staticmethod
def folder_cls_from_container_class(container_class):
"""Returns a reasonable folder class given a container class, e.g. 'IPF.Note'. Don't iterate WELLKNOWN_FOLDERS
because many folder classes have the same CONTAINER_CLASS.
"""
from .known_folders import Messages, Tasks, Calendar, ConversationSettings, Contacts, GALContacts, Reminders, \
RecipientCache, RSSFeeds
for folder_cls in (
Messages, Tasks, Calendar, ConversationSettings, Contacts, GALContacts, Reminders, RecipientCache,
RSSFeeds):
if folder_cls.CONTAINER_CLASS == container_class:
return folder_cls
raise KeyError()
@classmethod
def item_model_from_tag(cls, tag):
try:
return cls.ITEM_MODEL_MAP[tag]
except KeyError:
raise ValueError('Item type %s was unexpected in a %s folder' % (tag, cls.__name__))
@classmethod
def allowed_item_fields(cls, version):
# Return non-ID fields of all item classes allowed in this folder type
fields = set()
for item_model in cls.supported_item_models:
fields.update(
set(item_model.supported_fields(version=version))
)
return fields
def validate_item_field(self, field):
# Takes a fieldname, Field or FieldPath object pointing to an item field, and checks that it is valid
# for the item types supported by this folder.
version = self.account.version if self.account else None
# For each field, check if the field is valid for any of the item models supported by this folder
for item_model in self.supported_item_models:
try:
item_model.validate_field(field=field, version=version)
break
except InvalidField:
continue
else:
raise InvalidField("%r is not a valid field on %s" % (field, self.supported_item_models))
def normalize_fields(self, fields):
# Takes a list of fieldnames, Field or FieldPath objects pointing to item fields. Turns them into FieldPath
# objects and adds internal timezone fields if necessary. Assume fields are already validated.
fields = list(fields)
has_start, has_end = False, False
for i, field_path in enumerate(fields):
# Allow both Field and FieldPath instances and string field paths as input
if isinstance(field_path, string_types):
field_path = FieldPath.from_string(field_path=field_path, folder=self)
fields[i] = field_path
elif isinstance(field_path, Field):
field_path = FieldPath(field=field_path)
fields[i] = field_path
if not isinstance(field_path, FieldPath):
raise ValueError("Field %r must be a string or FieldPath instance" % field_path)
if field_path.field.name == 'start':
has_start = True
elif field_path.field.name == 'end':
has_end = True
# For CalendarItem items, we want to inject internal timezone fields. See also CalendarItem.clean()
if CalendarItem in self.supported_item_models:
meeting_tz_field, start_tz_field, end_tz_field = CalendarItem.timezone_fields()
if self.account.version.build < EXCHANGE_2010:
if has_start or has_end:
fields.append(FieldPath(field=meeting_tz_field))
else:
if has_start:
fields.append(FieldPath(field=start_tz_field))
if has_end:
fields.append(FieldPath(field=end_tz_field))
return fields
@classmethod
def get_item_field_by_fieldname(cls, fieldname):
for item_model in cls.supported_item_models:
try:
return item_model.get_field_by_fieldname(fieldname)
except InvalidField:
pass
raise InvalidField("%r is not a valid field name on %s" % (fieldname, cls.supported_item_models))
def get(self, *args, **kwargs):
return FolderCollection(account=self.account, folders=[self]).get(*args, **kwargs)
def all(self):
return FolderCollection(account=self.account, folders=[self]).all()
def none(self):
return FolderCollection(account=self.account, folders=[self]).none()
def filter(self, *args, **kwargs):
return FolderCollection(account=self.account, folders=[self]).filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return FolderCollection(account=self.account, folders=[self]).exclude(*args, **kwargs)
def people(self):
return QuerySet(
folder_collection=FolderCollection(account=self.account, folders=[self]),
request_type=QuerySet.PERSONA,
)
def find_people(self, q, shape=ID_ONLY, depth=SHALLOW, additional_fields=None, order_fields=None, page_size=None,
max_items=None, offset=0):
"""
Private method to call the FindPeople service
:param q: a Q instance containing any restrictions
:param shape: controls whether to return (id, chanegkey) tuples or Persona objects. If additional_fields is
non-null, we always return Persona objects.
:param depth: controls the whether to return soft-deleted items or not.
:param additional_fields: the extra properties we want on the return objects. Default is no properties.
:param order_fields: the SortOrder fields, if any
:param page_size: the requested number of items per page
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection
:return: a generator for the returned personas
"""
if shape not in SHAPE_CHOICES:
raise ValueError("'shape' %s must be one of %s" % (shape, SHAPE_CHOICES))
if depth not in ITEM_TRAVERSAL_CHOICES:
raise ValueError("'depth' %s must be one of %s" % (depth, ITEM_TRAVERSAL_CHOICES))
if additional_fields:
for f in additional_fields:
Persona.validate_field(field=f, version=self.account.version)
if f.field.is_complex:
raise ValueError("find_people() does not support field '%s'" % f.field.name)
# Build up any restrictions
if q.is_empty():
restriction = None
query_string = None
elif q.query_string:
restriction = None
query_string = Restriction(q, folders=[self], applies_to=Restriction.ITEMS)
else:
restriction = Restriction(q, folders=[self], applies_to=Restriction.ITEMS)
query_string = None
personas = FindPeople(account=self.account, chunk_size=page_size).call(
folder=self,
additional_fields=additional_fields,
restriction=restriction,
order_fields=order_fields,
shape=shape,
query_string=query_string,
depth=depth,
max_items=max_items,
offset=offset,
)
for p in personas:
if isinstance(p, Exception):
raise p
yield p
def bulk_create(self, items, *args, **kwargs):
return self.account.bulk_create(folder=self, items=items, *args, **kwargs)
def save(self, update_fields=None):
if self.id is None:
# New folder
if update_fields:
raise ValueError("'update_fields' is only valid for updates")
res = list(CreateFolder(account=self.account).call(parent_folder=self.parent, folders=[self]))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
self.id, self.changekey = res[0].id, res[0].changekey
self.root.add_folder(self) # Add this folder to the cache
return self
# Update folder
if not update_fields:
# The fields to update was not specified explicitly. Update all fields where update is possible
update_fields = []
for f in self.supported_fields(version=self.account.version):
if f.is_read_only:
# These cannot be changed
continue
if f.is_required or f.is_required_after_save:
if getattr(self, f.name) is None or (f.is_list and not getattr(self, f.name)):
# These are required and cannot be deleted
continue
update_fields.append(f.name)
res = list(UpdateFolder(account=self.account).call(folders=[(self, update_fields)]))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
folder_id, changekey = res[0].id, res[0].changekey
if self.id != folder_id:
raise ValueError('ID mismatch')
# Don't check changekey value. It may not change on no-op updates
self.changekey = changekey
self.root.update_folder(self) # Update the folder in the cache
return None
def delete(self, delete_type=HARD_DELETE):
if delete_type not in DELETE_TYPE_CHOICES:
raise ValueError("'delete_type' %s must be one of %s" % (delete_type, DELETE_TYPE_CHOICES))
res = list(DeleteFolder(account=self.account).call(folders=[self], delete_type=delete_type))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
self.root.remove_folder(self) # Remove the updated folder from the cache
self.id, self.changekey = None, None
def empty(self, delete_type=HARD_DELETE, delete_sub_folders=False):
if delete_type not in DELETE_TYPE_CHOICES:
raise ValueError("'delete_type' %s must be one of %s" % (delete_type, DELETE_TYPE_CHOICES))
res = list(EmptyFolder(account=self.account).call(
folders=[self], delete_type=delete_type, delete_sub_folders=delete_sub_folders)
)
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
if delete_sub_folders:
# We don't know exactly what was deleted, so invalidate the entire folder cache to be safe
self.root.clear_cache()
def wipe(self):
# Recursively deletes all items in this folder, and all subfolders and their content. Attempts to protect
# distinguished folders from being deleted. Use with caution!
log.warning('Wiping %s', self)
has_distinguished_subfolders = any(f.is_distinguished for f in self.children)
try:
if has_distinguished_subfolders:
self.empty(delete_sub_folders=False)
else:
self.empty(delete_sub_folders=True)
except (ErrorAccessDenied, ErrorCannotEmptyFolder):
try:
if has_distinguished_subfolders:
raise # We already tried this
self.empty(delete_sub_folders=False)
except (ErrorAccessDenied, ErrorCannotEmptyFolder):
log.warning('Not allowed to empty %s. Trying to delete items instead', self)
try:
self.all().delete()
except (ErrorAccessDenied, ErrorCannotDeleteObject):
log.warning('Not allowed to delete items in %s', self)
for f in self.children:
f.wipe()
# Remove non-distinguished children that are empty and have no subfolders
if f.is_deleteable and not f.children:
log.warning('Deleting folder %s', f)
try:
f.delete()
except ErrorDeleteDistinguishedFolder:
log.warning('Tried to delete a distinguished folder (%s)', f)
def test_access(self):
"""
Does a simple FindItem to test (read) access to the folder. Maybe the account doesn't exist, maybe the
service user doesn't have access to the calendar. This will throw the most common errors.
"""
list(self.filter(subject='DUMMY').values_list('subject'))
return True
@classmethod
def _kwargs_from_elem(cls, elem, account):
folder_id, changekey = cls.id_from_xml(elem)
kwargs = dict(id=folder_id, changekey=changekey)
# Check for 'DisplayName' element before collecting kwargs because because that clears the elements
has_name_elem = elem.find(cls.get_field_by_fieldname('name').response_tag()) is not None
kwargs.update({f.name: f.from_xml(elem=elem, account=account) for f in cls.supported_fields()})
if has_name_elem and not kwargs['name']:
# When we request the 'DisplayName' property, some folders may still be returned with an empty value.
# Assign a default name to these folders.
kwargs['name'] = cls.DISTINGUISHED_FOLDER_ID
return kwargs
def to_xml(self, version):
if self.is_distinguished:
# Don't add the changekey here. When modifying folder content, we usually don't care if others have changed
# the folder content since we fetched the changekey.
if self.account:
return DistinguishedFolderId(
id=self.DISTINGUISHED_FOLDER_ID,
mailbox=Mailbox(email_address=self.account.primary_smtp_address)
).to_xml(version=version)
return DistinguishedFolderId(id=self.DISTINGUISHED_FOLDER_ID).to_xml(version=version)
if self.id:
return FolderId(id=self.id, changekey=self.changekey).to_xml(version=version)
return super(BaseFolder, self).to_xml(version=version)
@classmethod
def supported_fields(cls, version=None):
return tuple(f for f in cls.FIELDS if f.name not in ('id', 'changekey') and f.supports_version(version))
@classmethod
def resolve(cls, account, folder):
# Resolve a single folder
folders = list(FolderCollection(account=account, folders=[folder]).resolve())
if not folders:
raise ErrorFolderNotFound('Could not find folder %r' % folder)
if len(folders) != 1:
raise ValueError('Expected result length 1, but got %s' % folders)
f = folders[0]
if isinstance(f, Exception):
raise f
if f.__class__ != cls:
raise ValueError("Expected folder %r to be a %s instance" % (f, cls))
return f
def refresh(self):
if not self.account:
raise ValueError('%s must have an account' % self.__class__.__name__)
if not self.id:
raise ValueError('%s must have an ID' % self.__class__.__name__)
fresh_folder = self.resolve(account=self.account, folder=self)
if self.id != fresh_folder.id:
raise ValueError('ID mismatch')
# Apparently, the changekey may get updated
for f in self.FIELDS:
setattr(self, f.name, getattr(fresh_folder, f.name))
def __floordiv__(self, other):
"""Same as __truediv__ but does not touch the folder cache.
This is useful if the folder hierarchy contains a huge number of folders and you don't want to fetch them all"""
if other == '..':
raise ValueError('Cannot get parent without a folder cache')
if other == '.':
return self
# Assume an exact match on the folder name in a shallow search will only return at most one folder
try:
return SingleFolderQuerySet(account=self.account, folder=self).depth(SHALLOW).get(name=other)
except DoesNotExist:
raise ErrorFolderNotFound("No subfolder with name '%s'" % other)
def __truediv__(self, other):
# Support the some_folder / 'child_folder' / 'child_of_child_folder' navigation syntax
if other == '..':
if not self.parent:
raise ValueError('Already at top')
return self.parent
if other == '.':
return self
for c in self.children:
if c.name == other:
return c
raise ErrorFolderNotFound("No subfolder with name '%s'" % other)
if PY2:
# Python 2 requires __div__
__div__ = __truediv__
def __repr__(self):
return self.__class__.__name__ + \
repr((self.root, self.name, self.total_count, self.unread_count, self.child_folder_count,
self.folder_class, self.id, self.changekey))
def __str__(self):
return '%s (%s)' % (self.__class__.__name__, self.name)
@python_2_unicode_compatible
class Folder(BaseFolder):
LOCAL_FIELDS = [
PermissionSetField('permission_set', field_uri='folder:PermissionSet', supported_from=EXCHANGE_2007_SP1),
EffectiveRightsField('effective_rights', field_uri='folder:EffectiveRights', is_read_only=True,
supported_from=EXCHANGE_2007_SP1),
]
FIELDS = BaseFolder.FIELDS + LOCAL_FIELDS
__slots__ = tuple(f.name for f in LOCAL_FIELDS) + ('_root',)
def __init__(self, **kwargs):
self._root = kwargs.pop('root', None) # This is a pointer to the root of the folder hierarchy
parent = kwargs.pop('parent', None)
if parent:
if self.root:
if parent.root != self.root:
raise ValueError("'parent.root' must match 'root'")
else:
self.root = parent.root
if 'parent_folder_id' in kwargs:
if parent.id != kwargs['parent_folder_id']:
raise ValueError("'parent_folder_id' must match 'parent' ID")
kwargs['parent_folder_id'] = ParentFolderId(id=parent.id, changekey=parent.changekey)
super(Folder, self).__init__(**kwargs)
@property
def account(self):
if self.root is None:
return None
return self.root.account
@property
def root(self):
return self._root
@root.setter
def root(self, value):
self._root = value
@classmethod
def get_distinguished(cls, root):
"""Gets the distinguished folder for this folder class"""
try:
return cls.resolve(
account=root.account,
folder=cls(root=root, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
)
except ErrorFolderNotFound:
raise ErrorFolderNotFound('Could not find distinguished folder %r' % cls.DISTINGUISHED_FOLDER_ID)
@property
def parent(self):
if not self.parent_folder_id:
return None
if self.parent_folder_id.id == self.id:
# Some folders have a parent that references itself. Avoid circular references here
return None
return self.root.get_folder(self.parent_folder_id.id)
@parent.setter
def parent(self, value):
if value is None:
self.parent_folder_id = None
else:
if not isinstance(value, BaseFolder):
raise ValueError("'value' %r must be a Folder instance" % value)
self.root = value.root
self.parent_folder_id = ParentFolderId(id=value.id, changekey=value.changekey)
def clean(self, version=None):
# pylint: disable=access-member-before-definition
from .roots import RootOfHierarchy
super(Folder, self).clean(version=version)
if self.root and not isinstance(self.root, RootOfHierarchy):
raise ValueError("'root' %r must be a RootOfHierarchy instance" % self.root)
@classmethod
def from_xml(cls, elem, account):
raise NotImplementedError('Use from_xml_with_root() instead')
@classmethod
def from_xml_with_root(cls, elem, root):
kwargs = cls._kwargs_from_elem(elem=elem, account=root.account)
cls._clear(elem)
folder_cls = cls
if cls == Folder:
# We were called on the generic Folder class. Try to find a more specific class to return objects as.
#
# The "FolderClass" element value is the only indication we have in the FindFolder response of which
# folder class we should create the folder with. And many folders share the same 'FolderClass' value, e.g.
# Inbox and DeletedItems. We want to distinguish between these because otherwise we can't locate the right
# folders types for e.g. Account.inbox and Account.trash.
#
# We should be able to just use the name, but apparently default folder names can be renamed to a set of
# localized names using a PowerShell command:
# https://docs.microsoft.com/en-us/powershell/module/exchange/client-access/Set-MailboxRegionalConfiguration
#
# Instead, search for a folder class using the localized name. If none are found, fall back to getting the
# folder class by the "FolderClass" value.
#
# The returned XML may contain neither folder class nor name. In that case, we default to the generic
# Folder class.
if kwargs['name']:
try:
# TODO: fld_class.LOCALIZED_NAMES is most definitely neither complete nor authoritative
folder_cls = root.folder_cls_from_folder_name(folder_name=kwargs['name'],
locale=root.account.locale)
log.debug('Folder class %s matches localized folder name %s', folder_cls, kwargs['name'])
except KeyError:
pass
if kwargs['folder_class'] and folder_cls == Folder:
try:
folder_cls = cls.folder_cls_from_container_class(container_class=kwargs['folder_class'])
log.debug('Folder class %s matches container class %s (%s)', folder_cls, kwargs['folder_class'],
kwargs['name'])
except KeyError:
pass
if folder_cls == Folder:
log.debug('Fallback to class Folder (folder_class %s, name %s)', kwargs['folder_class'], kwargs['name'])
return folder_cls(root=root, **kwargs)
| 44.775837 | 122 | 0.626898 |
4a19b246b756ba19492546aaef1e1dd3477de823
| 1,269 |
py
|
Python
|
tests/test_switch.py
|
unlhcc/shipwright
|
ef2381758c12ad9fd1644d1095602e9abc33dc41
|
[
"Apache-2.0"
] | 107 |
2015-01-01T03:40:58.000Z
|
2022-01-13T21:15:57.000Z
|
tests/test_switch.py
|
unlhcc/shipwright
|
ef2381758c12ad9fd1644d1095602e9abc33dc41
|
[
"Apache-2.0"
] | 62 |
2015-01-02T00:24:14.000Z
|
2020-09-23T14:59:39.000Z
|
tests/test_switch.py
|
unlhcc/shipwright
|
ef2381758c12ad9fd1644d1095602e9abc33dc41
|
[
"Apache-2.0"
] | 21 |
2015-01-22T20:51:01.000Z
|
2018-04-04T17:46:20.000Z
|
from __future__ import absolute_import
from shipwright._lib import cli
def switch(event):
return cli.switch(event, True)
aux = {
u'aux': {
u'Digest': u'sha256:redacted',
u'Size': 1337,
u'Tag': u'redacted',
},
'event': 'push',
'image': u'redacted/redacted',
u'progressDetail': {},
}
unhandled = {
'event': 'unhandled',
}
def test_aux_record():
assert switch(aux) is None
def test_unhandled_record():
assert switch(unhandled) == '{"event": "unhandled"}'
def test_status():
assert switch({
'status': 'Not Downloading xyz',
'id': 'eg',
}) == '[STATUS] eg: Not Downloading xyz'
def test_progress():
evt = {
'status': 'Downloading xyz',
'id': 'eg',
'progressDetail': {'current': 10, 'total': 100},
}
assert cli.switch(evt, True) == '[STATUS] eg: Downloading xyz 10/100\r'
def test_hide_progress():
evt = {
'status': 'Downloading xyz',
'id': 'eg',
'progressDetail': {'current': 10, 'total': 100},
}
assert cli.switch(evt, False) is None
def test_error():
assert switch({
'error': None,
'errorDetail': {
'message': 'I AM ERROR',
},
}) == '[ERROR] I AM ERROR'
| 19.227273 | 75 | 0.552403 |
4a19b2650909dca46e45fb9e5a9a0147b10845bc
| 556 |
py
|
Python
|
tests/components/smarttub/test_entity.py
|
AdmiralStipe/core
|
e9334347eb8354795cdb17f1401a80ef3abfb269
|
[
"Apache-2.0"
] | 4 |
2016-06-22T12:00:41.000Z
|
2018-06-11T20:31:25.000Z
|
tests/components/smarttub/test_entity.py
|
AdmiralStipe/core
|
e9334347eb8354795cdb17f1401a80ef3abfb269
|
[
"Apache-2.0"
] | 54 |
2016-06-15T04:52:32.000Z
|
2022-03-22T06:02:16.000Z
|
tests/components/smarttub/test_entity.py
|
AdmiralStipe/core
|
e9334347eb8354795cdb17f1401a80ef3abfb269
|
[
"Apache-2.0"
] | 1 |
2016-04-17T09:39:48.000Z
|
2016-04-17T09:39:48.000Z
|
"""Test SmartTubEntity."""
from homeassistant.components.smarttub.entity import SmartTubEntity
async def test_entity(coordinator, spa):
"""Test SmartTubEntity."""
entity = SmartTubEntity(coordinator, spa, "entity1")
assert entity.device_info
assert entity.name
coordinator.data[spa.id] = {}
assert entity.get_spa_status("foo") is None
coordinator.data[spa.id]["status"] = {"foo": "foo1", "bar": {"baz": "barbaz1"}}
assert entity.get_spa_status("foo") == "foo1"
assert entity.get_spa_status("bar.baz") == "barbaz1"
| 29.263158 | 83 | 0.688849 |
4a19b290fe8bafc70f5d6212a728e05fcfeeec1d
| 1,045 |
py
|
Python
|
tests/common/test_debug.py
|
jonathanslenders/edgedb
|
35ad66c4bd525cd9966f8029e5b385e888323f82
|
[
"Apache-2.0"
] | 1 |
2021-12-15T09:34:48.000Z
|
2021-12-15T09:34:48.000Z
|
tests/common/test_debug.py
|
jonathanslenders/edgedb
|
35ad66c4bd525cd9966f8029e5b385e888323f82
|
[
"Apache-2.0"
] | null | null | null |
tests/common/test_debug.py
|
jonathanslenders/edgedb
|
35ad66c4bd525cd9966f8029e5b385e888323f82
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 20017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from edgedb.lang.common import debug
class DebugTests(unittest.TestCase):
def test_common_debug_flags(self):
flags = {flag.name: flag for flag in debug.flags}
self.assertIn('edgeql_compile', flags)
self.assertIn('EdgeQL', flags['edgeql_compile'].doc)
self.assertIsInstance(debug.flags.edgeql_compile, bool)
| 32.65625 | 74 | 0.744498 |
4a19b2af7101bf70ed9721b336cc9585f1dac9a9
| 436 |
py
|
Python
|
travel_recommand/travel/migrations/0017_hotel_booking_ending_date.py
|
nirvasangani/travoyager-travel-recommendation
|
bb0c041630929276a695343b9a25e4e7f5a9e321
|
[
"MIT"
] | 2 |
2021-05-13T17:14:50.000Z
|
2021-05-18T07:59:56.000Z
|
travel_recommand/travel/migrations/0017_hotel_booking_ending_date.py
|
nirvasangani/travoyager-travel-recommendation
|
bb0c041630929276a695343b9a25e4e7f5a9e321
|
[
"MIT"
] | null | null | null |
travel_recommand/travel/migrations/0017_hotel_booking_ending_date.py
|
nirvasangani/travoyager-travel-recommendation
|
bb0c041630929276a695343b9a25e4e7f5a9e321
|
[
"MIT"
] | 3 |
2021-05-14T03:24:32.000Z
|
2021-05-18T08:01:23.000Z
|
# Generated by Django 3.0.6 on 2021-04-07 09:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travel', '0016_auto_20210407_1500'),
]
operations = [
migrations.AddField(
model_name='hotel_booking',
name='ending_date',
field=models.DateField(default='1111-1-1'),
preserve_default=False,
),
]
| 21.8 | 55 | 0.600917 |
4a19b2c605ef6ebdd4f3ecf498e24e36bbcac1e5
| 8,423 |
py
|
Python
|
rotkehlchen/assets/spam_assets.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137 |
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/assets/spam_assets.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385 |
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/assets/spam_assets.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59 |
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import logging
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, Set
import requests
from eth_utils import to_checksum_address
from rotkehlchen.assets.asset import EthereumToken
from rotkehlchen.assets.utils import get_or_create_ethereum_token
from rotkehlchen.chain.ethereum.types import string_to_ethereum_address
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.errors.misc import NotERC20Conformant, RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import SPAM_PROTOCOL, ChecksumEthAddress
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
MISSING_NAME_SPAM_TOKEN = 'Autodetected spam token'
MISSING_SYMBOL_SPAM_TOKEN = 'SPAM-TOKEN'
KNOWN_ETH_SPAM_TOKENS: Dict[ChecksumEthAddress, Dict[str, Any]] = {
# khex.net and said to be spam by etherscan
string_to_ethereum_address('0x4AF9ab04615cB91e2EE8cbEDb43fb52eD205041B'): {
'name': MISSING_NAME_SPAM_TOKEN,
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
# yLiquidate (YQI) seems to be a scam
string_to_ethereum_address('0x3d3d5cCE38afb7a379B2C3175Ee56e2dC72CD7C8'): {
'name': 'yLiquidate',
'symbol': 'YQI',
'decimals': 18,
},
# Old kick token
string_to_ethereum_address('0xC12D1c73eE7DC3615BA4e37E4ABFdbDDFA38907E'): {
'name': 'KICK TOKEN OLD',
'symbol': 'KICK',
'decimals': 18,
},
# kick token. Only can be withdrawn from their exchange
string_to_ethereum_address('0x824a50dF33AC1B41Afc52f4194E2e8356C17C3aC'): {
'name': 'KICK TOKEN',
'symbol': 'KICK',
'decimals': 18,
},
# Fake gear token warned by etherscan
string_to_ethereum_address('0x6D38b496dCc9664C6908D8Afba6ff926887Fc359'): {
'name': 'FAKE gear token',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
# EthTrader Contribution (CONTRIB) few txs and all failed
string_to_ethereum_address('0xbe1fffB262a2C3e65c5eb90f93caf4eDC7d28c8d'): {
'name': 'EthTrader Contribution',
'symbol': 'CONTRIB',
'decimals': 18,
},
string_to_ethereum_address('0x1412ECa9dc7daEf60451e3155bB8Dbf9DA349933'): {
'name': 'a68.net',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
string_to_ethereum_address('0x82dfDB2ec1aa6003Ed4aCBa663403D7c2127Ff67'): {
'name': 'akswap.io',
'symbol': 'akswap.io',
'decimals': 18,
},
string_to_ethereum_address('0x43661F4b1c67dd6c1e48C6Faf2887b22AeE3dDf5'): {
'name': 'akswap.io',
'symbol': 'akswap.io',
'decimals': 18,
},
string_to_ethereum_address('0xF9d25EB4C75ed744596392cf89074aFaA43614a8'): {
'name': 'UP1.org',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
string_to_ethereum_address('0x01454cdC3FAb2a026CC7d1CB2aEa9B909D5bA0EE'): {
'name': 'deApy.org',
'symbol': 'deApy.org',
'decimals': 18,
},
string_to_ethereum_address('0x73885eb0dA4ba8B061acF1bfC5eA7073B07ccEA2'): {
'name': 'Adidas fake token',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
string_to_ethereum_address('0xc85E0474068dbA5B49450c26879541EE6Cc94554'): {
'name': 'KyDy.org',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
string_to_ethereum_address('0x1412ECa9dc7daEf60451e3155bB8Dbf9DA349933'): {
'name': 'A68.net',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
# Apple spam/scam token
string_to_ethereum_address('0x3c4f8Fe3Cf50eCA5439F8D4DE5BDf40Ae71860Ae'): {
'name': 'Apple 29',
'symbol': MISSING_SYMBOL_SPAM_TOKEN,
'decimals': 18,
},
# Blizzard spam/scam token
string_to_ethereum_address('0xbb97a6449A6f5C53b7e696c8B5b6E6A53CF20143'): {
'name': 'Activision Blizzard DAO',
'symbol': 'BLIZZARD',
'decimals': 18,
},
# Audi spam/scam token
string_to_ethereum_address('0x9b9090DfA2cEbBef592144EE01Fe508f0c817B3A'): {
'name': 'Audi Metaverse',
'symbol': 'Audi',
'decimals': 18,
},
}
def query_token_spam_list(db: 'DBHandler') -> Set[EthereumToken]:
"""Generate a set of assets that can be ignored combining information of cryptoscamdb
and the list of spam assets KNOWN_ETH_SPAM_TOKENS. This function also makes sure to get the
bad assets in the list of cryptoscamdb and ensures that they exists in the globaldb before
trying to add them.
TODO
This function tries to add as assets to the globaldb the tokens listed in
KNOWN_ETH_SPAM_TOKENS and not the ones coming from cryptoscamdb. The reason is that until the
v2 of the API the response contains both spam addresses and tokens and there is no way to know
if the address is for a contract or not. Checking if the address is a contract takes too much
time. When V2 gets released this can be fixed.
May raise:
- RemoteError
"""
try:
response = requests.get(
url='https://api.cryptoscamdb.org/v1/addresses',
timeout=DEFAULT_TIMEOUT_TUPLE,
)
data = response.json()
success, tokens_info = data['success'], data['result']
except requests.exceptions.RequestException as e:
raise RemoteError(f'Failed to retrieve information from cryptoscamdb. {str(e)}') from e
except (DeserializationError, JSONDecodeError) as e:
raise RemoteError(f'Failed to deserialize data from cryptoscamdb. {str(e)}') from e
except KeyError as e:
raise RemoteError(
f'Response from cryptoscamdb doesn\'t contain expected key. {str(e)}',
) from e
if success is False:
log.error(f'Failed to deserialize data from cryptoscamdb. {data}')
raise RemoteError(
'Failed to deserialize data from cryptoscamdb. Check the logs '
'to get more information',
)
tokens_to_ignore = set()
for token_addr, token_data in tokens_info.items():
if not token_addr.startswith('0x') or token_data[0]['type'] != 'scam':
continue
try:
checksumed_address = to_checksum_address(token_addr)
except ValueError as e:
log.debug(f'Failed to read address from cryptoscamdb. {str(e)}')
continue
try:
token = EthereumToken(checksumed_address)
except UnknownAsset:
continue
if token is not None:
tokens_to_ignore.add(token)
# Try to add custom list
for token_address, info in KNOWN_ETH_SPAM_TOKENS.items():
try:
own_token = get_or_create_ethereum_token(
userdb=db,
ethereum_address=token_address,
protocol=SPAM_PROTOCOL,
form_with_incomplete_data=True,
decimals=info.get('decimals', 18),
name=info.get('name', MISSING_NAME_SPAM_TOKEN),
symbol=info.get('symbol', MISSING_SYMBOL_SPAM_TOKEN),
)
except (RemoteError, NotERC20Conformant) as e:
log.debug(f'Skipping {checksumed_address} due to {str(e)}')
continue
if own_token is not None:
tokens_to_ignore.add(own_token)
return tokens_to_ignore
def update_spam_assets(db: 'DBHandler') -> int:
"""
Update the list of ignored assets using query_token_spam_list and avoiding
the addition of duplicates. It returns the amount of assets that were added
to the ignore list
"""
spam_tokens = query_token_spam_list(db)
# order maters here. Make sure ignored_assets are queried after spam tokens creation
# since it's possible for a token to exist in ignored assets but not global DB.
# and in that case query_token_spam_list add it to the global DB
ignored_assets = {asset.identifier for asset in db.get_ignored_assets()}
assets_added = 0
for token in spam_tokens:
if token.identifier in ignored_assets:
continue
db.add_to_ignored_assets(token)
assets_added += 1
return assets_added
| 38.461187 | 98 | 0.678381 |
4a19b2d7c424917fb52162cbd441483b6c60c269
| 3,985 |
py
|
Python
|
page/views.py
|
guvenaltunsoyy/django-kaft-clone
|
be4bfee0b7e07113866f36d452b225580d177bb4
|
[
"MIT"
] | null | null | null |
page/views.py
|
guvenaltunsoyy/django-kaft-clone
|
be4bfee0b7e07113866f36d452b225580d177bb4
|
[
"MIT"
] | null | null | null |
page/views.py
|
guvenaltunsoyy/django-kaft-clone
|
be4bfee0b7e07113866f36d452b225580d177bb4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import (Carousel, Page)
from .forms import CarouselModelForm, PageModelForm
from django.utils.text import slugify
from django.contrib.admin.views.decorators import staff_member_required
from product.models import Category, Product
STATUS = 'published'
# user
def index(request):
context = dict()
context['images'] = Carousel.objects.filter(
status=STATUS).exclude(cover_image='')
context['products'] = Product.objects.filter(
status=STATUS
)[:9]
# context['images'] = images
# context['categories'] = Category.objects.filter(
# status=STATUS
# ).order_by('title')
return render(request, 'home/index.html', context)
def manage_list(request):
context = dict()
return render(request, 'manage/manage.html', context)
@staff_member_required
def page_list(request):
context = dict()
context['items'] = Page.objects.all().order_by('-pk')
return render(request, 'manage/page_list.html', context)
def page_create(request):
context = dict()
context['title'] = 'Page Form'
context['form'] = PageModelForm()
if request.method == 'POST':
form = PageModelForm(request.POST, request.FILES)
if form.is_valid():
item = form.save(commit=False)
item.slug = slugify(item.title.replace('ı', 'i'))
item.save()
messages.success(request, 'Page Oluşturuldu.')
return render(request, 'manage/form.html', context)
def page_update(request, pk):
context = dict()
item = Page.objects.get(pk=pk)
context['title'] = f"title : '{item.title}' - pk : {item.pk} Carousel Create Form"
context['form'] = PageModelForm(instance=item)
if request.method == 'POST':
form = PageModelForm(request.POST, request.FILES, instance=item)
if form.is_valid():
item = form.save(commit=False)
if item.slug == '':
item.slug = slugify(item.title.replace('ı', 'i'))
item.save()
messages.success(request, 'Page Güncellendi.')
return redirect('page_update', pk)
return render(request, 'manage/form.html', context)
def page_delete(request, pk):
item = Page.objects.get(pk=pk)
item.status = 'deleted'
item.save()
return redirect('page_list')
# admin
@staff_member_required
def carousel_list(request):
context = dict()
context['carousel'] = Carousel.objects.all().order_by('-pk')
return render(request, 'manage/carousel_list.html', context)
def carousel_update(request, pk):
context = dict()
# kaft_clone.com/manage=carousel/1/edit
# show
item = Carousel.objects.get(pk=pk)
context['title'] = f"title : '{item.title}' - pk : {item.pk} Carousel Create Form"
context['form'] = CarouselModelForm(instance=item)
if request.method == 'POST':
form = CarouselModelForm(request.POST, request.FILES, instance=item)
if form.is_valid():
form.save()
messages.success(request, 'Carousel Güncellendi.')
return redirect('carousel_update', pk)
return render(request, 'manage/form.html', context)
# stuff not checked
# def carousel_form_save(request=None, instance=None):
# if request:
# form = CarouselModelForm(request.POST,
# request.FILES,
# instance=instance
# )
# else:
# form = CarouselModelForm(instance=instance)
# return form
def carousel_form(request):
context = dict()
context['form'] = CarouselModelForm()
if request.method == 'POST':
form = CarouselModelForm(request.POST, request.FILES)
print(form)
if form.is_valid():
form.save()
messages.success(request, 'Birseyler eklendi ama ne oldu bilemiyorum')
return render(request, 'manage/form.html', context)
| 31.377953 | 86 | 0.637139 |
4a19b364453ee2580495c4e1068f9a54102a8bcf
| 8,700 |
py
|
Python
|
cfgov/scripts/http_smoke_test.py
|
cfpb/cfgov-refresh
|
e948e1f589e44999e4744911a73d06e5ff5905cc
|
[
"CC0-1.0"
] | 156 |
2015-01-16T15:16:46.000Z
|
2020-08-04T04:48:01.000Z
|
cfgov/scripts/http_smoke_test.py
|
cfpb/cfgov-refresh
|
e948e1f589e44999e4744911a73d06e5ff5905cc
|
[
"CC0-1.0"
] | 3,604 |
2015-01-05T22:09:12.000Z
|
2020-08-14T17:09:19.000Z
|
cfgov/scripts/http_smoke_test.py
|
cfpb/cfgov-refresh
|
e948e1f589e44999e4744911a73d06e5ff5905cc
|
[
"CC0-1.0"
] | 102 |
2015-01-28T14:51:18.000Z
|
2020-08-10T00:00:39.000Z
|
#!/usr/bin/env python
import argparse
import logging
import sys
import time
import requests
logger = logging.getLogger("http_smoke_tests")
logger.setLevel(logging.FATAL)
shell_log = logging.StreamHandler()
shell_log.setLevel(logging.INFO)
logger.addHandler(shell_log)
parser = argparse.ArgumentParser()
parser.add_argument(
"--base", help="choose a server base other than www.consumerfinance.gov"
)
parser.add_argument(
"--url_list",
type=str,
nargs="+",
help=(
"You can provide a space-separated custom list "
"of relative URLs to check."
),
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set logging level to info to see all message output.",
)
parser.add_argument(
"-t",
"--timeout",
type=str,
help="Set a timeout level, in seconds; the default is 30.",
)
TIMEOUT = 30
ALLOWED_TIMEOUTS = 1
FULL = False
BASE = "https://www.consumerfinance.gov"
S3_URI = "https://files.consumerfinance.gov/build/smoketests/smoketest_urls.json" # noqa: B950
# Fall-back list of top 25 URLs, as of July 2, 2020, from hubcap/wiki
# All URLs in the list should be canonical locations of the given pages,
# not redirects.
TOP = [
"/", # home page
"/about-us/blog/guide-covid-19-economic-stimulus-checks/",
"/about-us/blog/guide-coronavirus-mortgage-relief-options/",
"/find-a-housing-counselor/",
"/complaint/",
"/learnmore/",
"/ask-cfpb/what-is-the-best-way-to-negotiate-a-settlement-with-a-debt-collector-en-1447/", # noqa: B950
"/coronavirus/",
"/about-us/blog/guide-covid-19-economic-stimulus-checks/#qualify/",
"/consumer-tools/prepaid-cards/",
"/coronavirus/cares-act-mortgage-forbearance-what-you-need-know/",
"/about-us/blog/economic-impact-payment-prepaid-card/",
"/about-us/blog/what-you-need-to-know-about-student-loans-and-coronavirus-pandemic/", # noqa: B950
"/complaint/getting-started/",
"/coronavirus/mortgage-and-housing-assistance/",
"/ask-cfpb/what-is-forbearance-en-289/",
"/about-us/blog/guide-covid-19-economic-stimulus-checks/#when/",
"/ask-cfpb/what-should-i-do-when-a-debt-collector-contacts-me-en-1695/",
"/about-us/blog/protect-yourself-financially-from-impact-of-coronavirus/",
"/about-us/contact-us/",
"/about-us/blog/guide-coronavirus-mortgage-relief-options/#relief-options/", # noqa: B950
"/coronavirus/managing-your-finances/economic-impact-payment-prepaid-debit-cards/", # noqa: B950
"/ask-cfpb/how-can-i-tell-who-owns-my-mortgage-en-214/",
"/rules-policy/regulations/",
"/ask-cfpb/what-is-a-debt-to-income-ratio-why-is-the-43-debt-to-income-ratio-important-en-1791/", # noqa: B950
]
# URLs for cfgov sub-apps that are expected to be present
# All URLs in the list should be canonical locations of the given pages,
# not redirects.
APPS = [
"/about-us/budget-strategy/",
"/enforcement/payments-harmed-consumers/",
"/about-us/blog/",
"/about-us/newsroom/",
"/about-us/events/",
"/about-us/careers/",
"/about-us/careers/current-openings/",
"/about-us/doing-business-with-us/",
"/rules-policy/innovation/",
"/activity-log/",
"/ask-cfpb/",
"/your-story/",
"/es/",
"/es/obtener-respuestas/",
"/students/",
"/consumer-tools/educator-tools/servicemembers/",
"/know-before-you-owe/",
"/fair-lending/",
"/paying-for-college/",
"/paying-for-college2/understanding-your-financial-aid-offer/about-this-tool/", # noqa: B950
"/retirement/before-you-claim/",
"/retirement/before-you-claim/es/",
"/consumer-tools/auto-loans/",
"/consumer-tools/credit-reports-and-scores/",
"/consumer-tools/debt-collection/",
"/consumer-tools/prepaid-cards/",
"/consumer-tools/sending-money/",
"/mortgagehelp/",
"/consumer-tools/educator-tools/your-money-your-goals/",
"/consumer-tools/educator-tools/adult-financial-education/",
"/consumer-tools/educator-tools/youth-financial-education/",
"/consumer-tools/educator-tools/library-resources/",
"/consumer-tools/educator-tools/resources-for-tax-preparers/",
"/consumer-tools/money-as-you-grow/",
"/empowerment/",
"/consumer-tools/educator-tools/resources-for-older-adults/",
"/consumer-tools/educator-tools/youth-financial-education/", # TDP
"/data-research/",
"/data-research/research-reports/",
"/data-research/cfpb-research-conference/",
"/data-research/consumer-complaints/",
"/data-research/hmda/",
"/data-research/hmda/for-filers",
"/data-research/consumer-credit-trends/",
"/data-research/credit-card-data/",
"/data-research/cfpb-researchers/",
"/data-research/mortgage-performance-trends/",
"/policy-compliance/",
"/rules-policy/",
"/compliance/",
"/compliance/implementation-guidance/",
"/enforcement/",
"/rules-policy/notice-opportunities-comment/",
"/compliance/amicus/",
"/compliance/implementation-guidance/hmda-implementation/",
"/compliance/implementation-guidance/mortserv/",
"/compliance/implementation-guidance/tila-respa-disclosure-rule/",
]
# call `set` on the combined list to weed out dupes
FALLBACK_URLS = sorted(set(TOP + APPS))
def get_full_list():
"""Fetch a list of URLs to test from s3, or fall back to local default."""
try:
url_data = requests.get(S3_URI).json()
except Exception as e:
logger.warning(
"Using fallback because request for S3 list failed: {}".format(e)
)
url_list = FALLBACK_URLS
else:
url_list = sorted(set(url_data.get("top") + url_data.get("apps")))
return url_list
def check_urls(base, url_list=None):
"""
A smoke test to make sure the main cfgov URLs are returning status 200.
Providing no `url_list` will test a standard list of important site URLs,
which includes megamenu links, main apps, and our 25 most popular pages.
Passing no base value will run the tests against production.
To run the full suite against production, and see its progress:
./cfgov/scripts/http_smoke_test.py -v
You can test a custom set of URLs by passing relative URL strings
(relative to the provided base) as the `url_list` value.
This example tests two URLs against a local cfgov instance:
./cfgov/scripts/http_smoke_test.py -v --base 'http://localhost:8000' --url_list '/' '/retirement/before-you-claim/' # noqa: B950
"""
count = 0
timeouts = []
failures = []
starter = time.time()
if not url_list:
url_list = get_full_list()
for url_suffix in url_list:
logger.info(url_suffix)
count += 1
url = "{}{}".format(base, url_suffix)
try:
response = requests.get(url, timeout=TIMEOUT)
code = response.status_code
if code == 200:
pass
else:
logger.info("{} failed with status code {}".format(url, code))
failures.append((url, code))
except requests.exceptions.Timeout:
logger.info("{} timed out".format(url))
timeouts.append(url)
except requests.exceptions.ConnectionError as e:
logger.info("{} returned a connection error".format(url))
failures.append((url, e))
except requests.exceptions.RequestException as e:
logger.info("{} failed for '{}'".format(url, e))
failures.append((url, e))
timer = int(time.time() - starter)
logger.info(
"\n{} took {} seconds to check {} URLs at {}\n "
"{} failed\n "
"{} timed out".format(
sys.argv[0], timer, count, base, len(failures), len(timeouts)
)
)
if failures:
logger.error("These URLs failed: {}".format(failures))
if len(timeouts) > ALLOWED_TIMEOUTS:
logger.error(
"These URLs timed out after {} seconds: "
"{}".format(TIMEOUT, timeouts)
)
elif timeouts:
logger.info(
"{} allowed timeouts occurred:\n"
"{}".format(len(timeouts), "\n".join(timeouts))
)
if failures or len(timeouts) > ALLOWED_TIMEOUTS:
logger.error("FAIL")
return False
logger.info("\x1B[32mAll URLs return 200. No smoke!\x1B[0m")
return True
if __name__ == "__main__": # pragma: nocover
url_list = None
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
if args.base:
BASE = args.base
if args.url_list:
url_list = args.url_list
if args.timeout:
TIMEOUT = int(args.timeout)
if not check_urls(BASE, url_list=url_list):
sys.exit(1)
| 34.939759 | 133 | 0.65046 |
4a19b62b158214634ec63c7d321baf5a3f8b53ae
| 3,790 |
py
|
Python
|
race.py
|
TyPh00nCdr/agent-hallucination
|
05532b57a05e9b73d7d3883e9ce67e5449ddf3a6
|
[
"Apache-2.0"
] | null | null | null |
race.py
|
TyPh00nCdr/agent-hallucination
|
05532b57a05e9b73d7d3883e9ce67e5449ddf3a6
|
[
"Apache-2.0"
] | null | null | null |
race.py
|
TyPh00nCdr/agent-hallucination
|
05532b57a05e9b73d7d3883e9ce67e5449ddf3a6
|
[
"Apache-2.0"
] | null | null | null |
from os import sched_getaffinity
from pathlib import Path
import gym
import numpy as np
from pyvirtualdisplay import Display
ENV = 'CarRacing-v0'
AVAILABLE_CORES = len(sched_getaffinity(0))
ROLLOUTS_PER_CORE = 100 // AVAILABLE_CORES
def key_press(k, mod):
from pyglet.window import key
if k == key.ENTER:
global restart
restart = True
if k == key.LEFT:
a[0] = -1.0
if k == key.RIGHT:
a[0] = +1.0
if k == key.UP:
a[1] = +1.0
if k == key.DOWN:
a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
def key_release(k, mod):
from pyglet.window import key
if k == key.LEFT and a[0] == -1.0:
a[0] = 0
if k == key.RIGHT and a[0] == +1.0:
a[0] = 0
if k == key.UP:
a[1] = 0
if k == key.DOWN:
a[2] = 0
def gauss_rand_walk(action_space, dt, seq_len=None):
"""
Gaussian Random Walk simulating a Wiener process (Brownian motion):
See: https://de.wikipedia.org/wiki/Wienerprozess#Gau%C3%9Fscher_Random_Walk
"""
sqrt_dt = np.sqrt(dt)
action = action_space.sample()
rng = np.random.default_rng()
cnt = 0
while seq_len is None or cnt < seq_len:
cnt += 1
yield action
action = (action + sqrt_dt * rng.standard_normal(size=action_space.shape)
).clip(action_space.low, action_space.high, dtype=action_space.dtype)
def rollout(index):
race_env = gym.make(ENV)
fps = race_env.metadata['video.frames_per_second']
# observation_space = race_env.observation_space
dir = Path('observations') / f'thread_{index}'
dir.mkdir(exist_ok=True)
with Display(visible=False, size=(1400, 900)):
for rollout in range(ROLLOUTS_PER_CORE):
race_env.reset()
total_reward = 0.0
step = 0
# vielleicht: rollout_observations = np.empty((1000,) + observation_space.shape)
# und am Ende: observations=rollout_observations[:step]
rollout_observations = []
rollout_rewards = []
rollout_terminals = []
rollout_actions = []
for action in gauss_rand_walk(race_env.action_space, 1. / fps):
s, r, done, info = race_env.step(action)
# append() vs += []
# https://stackoverflow.com/a/725882
rollout_observations.append(s)
rollout_rewards.append(r)
rollout_terminals.append(done)
rollout_actions.append(action)
total_reward += r
step += 1
if done or step == 1000:
print(f'shape: {np.array(rollout_observations).shape}')
np.savez_compressed(dir / f'rollout_{rollout}',
observations=np.array(
rollout_observations),
rewards=np.array(rollout_rewards),
actions=np.array(rollout_actions),
terminals=np.array(rollout_terminals))
break
race_env.close()
# if __name__ == "__main__":
# race_env = gym.make(ENV)
# fps = race_env.metadata['video.frames_per_second']
# render_modes = race_env.metadata['render.modes']
# print(f'FPS: {fps}')
# print(f'Render modes: {render_modes}')
# print(f'Available threads: {AVAILABLE_CORES}')
# print(f'Rollouts per thread: {ROLLOUTS_PER_CORE}')
# # race_env.render()
# # race_env.viewer.window.on_key_press = key_press
# # race_env.viewer.window.on_key_release = key_release
# with Pool(AVAILABLE_CORES) as pool:
# pool.map(rollout, range(AVAILABLE_CORES))
| 32.956522 | 92 | 0.573087 |
4a19b70172058021780501843f914722c831e6e2
| 3,798 |
py
|
Python
|
tests/test_validate.py
|
pachterlab/kb_python
|
61eb657a1bb28a7cefd2fbafaaf6f1d0bd757b7d
|
[
"BSD-2-Clause"
] | 71 |
2019-11-05T21:37:21.000Z
|
2022-03-28T14:33:54.000Z
|
tests/test_validate.py
|
pachterlab/kb_python
|
61eb657a1bb28a7cefd2fbafaaf6f1d0bd757b7d
|
[
"BSD-2-Clause"
] | 111 |
2019-11-08T19:27:25.000Z
|
2022-03-29T00:06:03.000Z
|
tests/test_validate.py
|
pachterlab/kb_python
|
61eb657a1bb28a7cefd2fbafaaf6f1d0bd757b7d
|
[
"BSD-2-Clause"
] | 13 |
2019-11-04T22:06:46.000Z
|
2021-12-01T20:34:32.000Z
|
from unittest import mock, TestCase
from unittest.mock import call
import kb_python.validate as validate
from tests.mixins import TestMixin
@validate.validate_files()
def dummy(*args, **kwargs):
return
@validate.validate_files()
def dummy_str(*args, **kwargs):
return 'test'
@validate.validate_files()
def dummy_dict(*args, **kwargs):
return {'test': 'testfile'}
@validate.validate_files()
def dummy_tuple(*args, **kwargs):
return 'test1', 'test2'
class TestValidate(TestMixin, TestCase):
def test_validate_bus(self):
validate.validate_bus(self.bus_path)
def test_validate_bus_failed_parse(self):
with mock.patch('kb_python.validate.run_executable') as run_executable:
run_executable().stdout.read.return_value = ''
with self.assertRaises(validate.FileVerificationFailed):
validate.validate_bus('path')
def test_validate_bus_no_records(self):
with mock.patch('kb_python.validate.run_executable') as run_executable:
run_executable().stdout.read.return_value = 'Read in 0 BUS records'
with self.assertRaises(validate.FileVerificationFailed):
validate.validate_bus('path')
def test_validate_mtx(self):
validate.validate_mtx(self.matrix_path)
def test_validate_mtx_raises_on_error(self):
with mock.patch('kb_python.validate.scipy.io.mmread') as mmread:
mmread.side_effect = ValueError('test')
with self.assertRaises(validate.FileVerificationFailed):
validate.validate_mtx('path')
def test_validate(self):
with mock.patch('kb_python.validate.VALIDATORS'):
validate.validate('path/to/bus.bus')
def validate_files(self):
with mock.patch('kb_python.validate.validate') as v,\
mock.patch('kb_python.validate.os.path.exists') as exists:
exists.return_value = True
self.assertIsNone(dummy('f1', 1, kwarg1='f2', kwarg2=2))
self.assertEqual(2, v.call_count)
v.assert_has_calls([call('f1'), call('f2')])
def validate_files_str(self):
with mock.patch('kb_python.validate.validate') as v,\
mock.patch('kb_python.validate.os.path.exists') as exists:
exists.return_value = True
self.assertEqual('test', dummy_str('f1', 1, kwarg1='f2', kwarg2=2))
self.assertEqual(3, v.call_count)
v.assert_has_calls([call('f1'), call('f2'), call('test')])
def validate_files_dict(self):
with mock.patch('kb_python.validate.validate') as v,\
mock.patch('kb_python.validate.os.path.exists') as exists:
exists.return_value = True
self.assertEqual({'test': 'testfile'},
dummy_str('f1', 1, kwarg1='f2', kwarg2=2))
self.assertEqual(3, v.call_count)
v.assert_has_calls([call('f1'), call('f2'), call('testfile')])
def validate_files_tuple(self):
with mock.patch('kb_python.validate.validate') as v,\
mock.patch('kb_python.validate.os.path.exists') as exists:
exists.return_value = True
self.assertEqual(('test1', 'test2'),
dummy_str('f1', 1, kwarg1='f2', kwarg2=2))
self.assertEqual(4, v.call_count)
v.assert_has_calls([
call('f1'),
call('f2'),
call('test1'),
call('test2')
])
def test_validate_off(self):
with mock.patch('kb_python.validate.is_validate') as is_validate,\
mock.patch('kb_python.validate.validate_bus') as v:
is_validate.return_value = False
validate.validate('path/to/bus.bus')
v.assert_not_called()
| 36.873786 | 79 | 0.628489 |
4a19b80aa863c01a72879b1a25276baffece16c8
| 1,856 |
py
|
Python
|
change-liwc-scores-to-personality.py
|
sarajaksa/IS2020
|
db6b7cdc804465dd4142f27a8c05c3a2e87c1076
|
[
"MIT"
] | null | null | null |
change-liwc-scores-to-personality.py
|
sarajaksa/IS2020
|
db6b7cdc804465dd4142f27a8c05c3a2e87c1076
|
[
"MIT"
] | null | null | null |
change-liwc-scores-to-personality.py
|
sarajaksa/IS2020
|
db6b7cdc804465dd4142f27a8c05c3a2e87c1076
|
[
"MIT"
] | null | null | null |
import pandas
import sqlite3
database_file_name = "data.db"
filename = "LIWC2015 Results (users (52235 files)).csv"
def number(n):
return float(n.replace(",", "."))
def put_personality_to_database(row, cursor):
sql_query = f'INSERT INTO personality (author, WC, C, A, O, E, N) values ("{row.Filename}", {row.WC}, {row.C}, {row.A}, {row.O}, {row.E}, {row.N})'
cursor.execute(sql_query)
with open(filename) as f:
data = f.readlines()
data_split = [v.replace(",", ";", 2).replace("\n", "").split(";") for v in data]
pandas_data = pandas.DataFrame(data_split[1:], columns=data_split[0])
pandas_data["C"] = pandas_data.apply(
lambda row: number(row.achieve)
- number(row.anger)
- number(row.negemo)
- number(row.negate),
axis=1,
)
pandas_data["A"] = pandas_data.apply(
lambda row: -number(row.swear)
+ number(row.home)
+ number(row.leisure)
+ number(row.motion)
+ number(row.space)
- number(row.anger)
- number(row.negemo)
+ number(row.posemo),
axis=1,
)
pandas_data["O"] = pandas_data.apply(
lambda row: +number(row.death)
- number(row.home)
- number(row.leisure)
- number(row.motion)
- number(row.time)
- number(row.family)
- number(row.social)
- number(row.posemo)
+ number(row.prep)
+ number(row.article)
- number(row.i)
- number(row.pronoun),
axis=1,
)
pandas_data["E"] = pandas_data.apply(
lambda row: +number(row.sexual)
+ number(row.friend)
+ number(row.social)
+ number(row.you),
axis=1,
)
pandas_data["N"] = pandas_data.apply(
lambda row: number(row.anx) + number(row.negemo) - number(row.you), axis=1
)
database = sqlite3.connect(database_file_name)
cursor = database.cursor()
pandas_data.apply(lambda row: put_personality_to_database(row, cursor), axis=1)
database.commit()
database.close()
| 25.777778 | 151 | 0.647629 |
4a19b94deb80c06c0e90d0bbd889ff6918d5f96a
| 22,845 |
py
|
Python
|
py/ztools/mtp/mtpxci_remote.py
|
HerrTrigger/NSC_BUILDER
|
e9083e83383281bdd9e167d3141163dcc56b6710
|
[
"MIT"
] | 828 |
2018-11-05T02:43:40.000Z
|
2022-03-27T08:49:56.000Z
|
py/ztools/mtp/mtpxci_remote.py
|
HerrTrigger/NSC_BUILDER
|
e9083e83383281bdd9e167d3141163dcc56b6710
|
[
"MIT"
] | 141 |
2018-11-05T19:59:23.000Z
|
2022-01-10T01:17:32.000Z
|
py/ztools/mtp/mtpxci_remote.py
|
HerrTrigger/NSC_BUILDER
|
e9083e83383281bdd9e167d3141163dcc56b6710
|
[
"MIT"
] | 119 |
2018-11-05T06:57:37.000Z
|
2022-03-25T18:10:33.000Z
|
import aes128
import Print
import os
import shutil
import json
import listmanager
from Fs import Nsp as squirrelNSP
from Fs import Xci as squirrelXCI
from Fs.Nca import NcaHeader
from Fs import Nca
from Fs.File import MemoryFile
import sq_tools
from Fs import Type as FsType
import Keys
from binascii import hexlify as hx, unhexlify as uhx
import subprocess
import sys
from mtp.wpd import is_switch_connected
from python_pick import pick
from python_pick import Picker
import csv
from tqdm import tqdm
from Drive import Private as DrivePrivate
from Drive import DriveTools
def check_connection():
if not is_switch_connected():
sys.exit("Switch device isn't connected.\nCheck if mtp responder is running!!!")
bucketsize = 81920
# SET ENVIRONMENT
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
testroute1=os.path.join(squirrel_dir, "squirrel.py")
testroute2=os.path.join(squirrel_dir, "squirrel.exe")
urlconfig=os.path.join(zconfig_dir,'NUT_DB_URL.txt')
isExe=False
if os.path.exists(testroute1):
squirrel=testroute1
isExe=False
elif os.path.exists(testroute2):
squirrel=testroute2
isExe=True
bin_folder=os.path.join(ztools_dir, 'bin')
nscb_mtp=os.path.join(bin_folder, 'nscb_mtp.exe')
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
games_installed_cache=os.path.join(cachefolder, 'games_installed.txt')
valid_saves_cache=os.path.join(cachefolder, 'valid_saves.txt')
mtp_source_lib=os.path.join(zconfig_dir,'mtp_source_libraries.txt')
mtp_internal_lib=os.path.join(zconfig_dir,'mtp_SD_libraries.txt')
storage_info=os.path.join(cachefolder, 'storage.csv')
download_lib_file = os.path.join(zconfig_dir, 'mtp_download_libraries.txt')
remote_lib_file = os.path.join(zconfig_dir, 'remote_libraries.txt')
cache_lib_file= os.path.join(zconfig_dir, 'remote_cache_location.txt')
_1fichier_token=os.path.join((os.path.join(zconfig_dir, 'credentials')),'_1fichier_token.tk')
remote_lib_cache=os.path.join(zconfig_dir, 'remote_lib_cache')
def libraries(tfile):
db={}
try:
with open(tfile,'rt',encoding='utf8') as csvfile:
readCSV = csv.reader(csvfile, delimiter='|')
i=0
for row in readCSV:
if i==0:
csvheader=row
i=1
else:
dict_={}
for j in range(len(csvheader)):
try:
if row[j]==None or row[j]=='':
dict_[csvheader[j]]=None
else:
dict_[csvheader[j]]=row[j]
except:
dict_[csvheader[j]]=None
db[row[0]]=dict_
return db
except BaseException as e:
Print.error('Exception: ' + str(e))
return False
def get_library_from_path(tfile=None,filename=None):
if tfile==None:
db=libraries(remote_lib_file)
else:
db=libraries(tfile)
TD=None;lib=None;path="null"
for entry in db:
path=db[entry]['path']
if filename.startswith(path):
TD=db[entry]['TD_name']
lib=entry
libpath=path
break
else:
pass
if lib==None:
db=libraries(cache_lib_file)
TD=None;lib=None;path="null"
for entry in db:
path=db[entry]['path']
if filename.startswith(path):
TD=db[entry]['TD_name']
lib=entry
libpath=path
break
else:
pass
if TD=='':
TD=None
return lib,TD,libpath
def install_xci_csv(filepath=None,remote=None,destiny="SD",cachefolder=None,override=False,keypatch=False):
if filepath=="":
filepath=None
if remote=="":
remote=None
if remote==None:
test=filepath.split('|');TD=None
if len(test)<2:
filepath=test[0]
lib,TD,libpath=get_library_from_path(remote_lib_file,filepath)
else:
filepath=test[0]
TD=test[1]
if str(TD).upper()=="NONE":
TD=None
ID,name,type,size,md5,remote=DrivePrivate.get_Data(filepath,TD=TD,Print=False)
check_connection()
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
files_list=DriveTools.get_files_from_head(remote,remote.name)
remote.rewind()
print(f"Installing {remote.name} by content")
print('- Parsing headers...')
files=list();filesizes=list()
fplist=list()
counter=0
for k in range(len(files_list)):
entry=files_list[k]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca'):
counter+=1
print(f"- Detected {counter} content ids")
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0];
if cnmtfile.endswith('.cnmt.nca'):
target_cnmt=cnmtfile
nspname=gen_xci_parts_spec0(remote=remote,target_cnmt=target_cnmt,cachefolder=cachefolder,keypatch=keypatch)
if (remote.name).endswith('xcz'):
nspname=nspname[:-1]+'z'
files_csv=os.path.join(cachefolder, 'remote_files.csv')
process=subprocess.Popen([nscb_mtp,"GDInstallfromCSV","-cs",files_csv,"-nm",nspname,"-dst",destiny])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
counter-=1
print('\n- Still '+str(counter)+' subitems to process')
if counter>0:
print("")
if os.path.exists(cachefolder):
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
def gen_xci_parts_spec0(filepath=None,remote=None,target_cnmt=None,cachefolder=None,keypatch=False,files_list=None):
if filepath=="":
filepath=None
if remote=="":
remote=None
if remote==None:
test=filepath.split('|');TD=None
if len(test)<2:
filepath=test[0]
lib,TD,libpath=get_library_from_path(remote_lib_file,filepath)
else:
filepath=test[0]
TD=test[1]
if str(TD).upper()=="NONE":
TD=None
ID,name,type,size,md5,remote=DrivePrivate.get_Data(filepath,TD=TD,Print=False)
if keypatch!=False:
try:
keypatch=int(keypatch)
except: keypatch=False
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
else:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
if files_list==None:
files_list=DriveTools.get_files_from_head(remote,remote.name)
files=list();filesizes=list()
fplist=list()
for k in range(len(files_list)):
entry=files_list[k]
fplist.append(entry[0])
if target_cnmt==None:
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca'):
target_cnmt=cnmtfile
break
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca') and target_cnmt==cnmtfile:
metadict,d1,d2=DriveTools.get_cnmt_data(target=cnmtfile,file=remote)
ncadata=metadict['ncadata']
content_type=metadict['ctype']
if content_type!="DLC":
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']!='Meta' and row['NCAtype']!='Program' and row['NCAtype']!='DeltaFragment':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']=='Meta':
# print(str(row['NcaId'])+'.cnmt.nca')
files.append(str(row['NcaId'])+'.cnmt.nca')
filesizes.append(int(row['Size']))
for j in range(len(ncadata)):
row=ncadata[j]
# print(row)
if row['NCAtype']=='Program':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
else:
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']!='Meta' and row['NCAtype']!='Data':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']=='Meta':
# print(str(row['NcaId'])+'.cnmt.nca')
files.append(str(row['NcaId'])+'.cnmt.nca')
filesizes.append(int(row['Size']))
for j in range(len(ncadata)):
row=ncadata[j]
# print(row)
if row['NCAtype']=='Data':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
break
remote.rewind()
outheader = sq_tools.gen_nsp_header(files,filesizes)
properheadsize=len(outheader)
# print(properheadsize)
# print(bucketsize)
i=0;sum=properheadsize;
outfile=os.path.join(cachefolder, "0")
outf = open(outfile, 'w+b')
outf.write(outheader)
written=0
nca_program=''
for fi in files:
if fi.endswith('nca') or fi.endswith('ncz') :
for i in range(len(files_list)):
if str(files_list[i][0]).lower() == str(fi).lower():
nca_name=files_list[i][0]
off1=files_list[i][1]
off2=files_list[i][2]
nca_size=files_list[i][3]
break
ncaHeader = NcaHeader()
ncaHeader.open(MemoryFile(remote.read_at(off1,0x400), FsType.Crypto.XTS, uhx(Keys.get('header_key'))))
crypto1=ncaHeader.getCryptoType()
crypto2=ncaHeader.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), ncaHeader.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=ncaHeader.getCryptoType()
crypto2=ncaHeader.getCryptoType2()
if ncaHeader.getRightsId() != 0:
ncaHeader.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
rightsId=metadict['rightsId']
titleKeyDec = DriveTools.get_titlekey(remote,rightsId,masterKeyRev,files_list=files_list)
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < ncaHeader.getCryptoType2():
encKeyBlock,crypto1,crypto2=get_new_cryptoblock(ncaHeader,keypatch,encKeyBlock,t)
t.close()
if ncaHeader.getRightsId() == 0:
ncaHeader.rewind()
encKeyBlock = ncaHeader.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < ncaHeader.getCryptoType2():
encKeyBlock,crypto1,crypto2=get_new_cryptoblock(ncaHeader,keypatch,encKeyBlock,t)
t.close()
ncaHeader.rewind()
i=0
newheader=get_newheader(MemoryFile(remote.read_at(off1,0xC00)),encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
if content_type!="DLC":
if (str(ncaHeader.contentType) != 'Content.PROGRAM'):
nca = Nca()
nca.open(MemoryFile(remote.read_at(off1,nca_size)))
nca.seek(0xC00)
data=nca.read()
outf.write(data)
written+=len(data)
# print(nca_name)
# print(len(newheader)+len(data))
else:
nca_program=nca_name
# print(nca_name)
# print(len(newheader))
else:
if (str(ncaHeader.contentType) != 'Content.PUBLIC_DATA'):
nca = Nca()
nca.open(MemoryFile(remote.read_at(off1,nca_size)))
nca.seek(0xC00)
data=nca.read()
outf.write(data)
written+=len(data)
# print(nca_name)
# print(len(newheader)+len(data))
else:
nca_program=nca_name
# print(nca_name)
# print(len(newheader))
else:pass
outf.flush()
outf.close()
tfile=os.path.join(cachefolder, "remote_files.csv")
with open(tfile,'w') as csvfile:
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format("step","filepath","size","targetsize","off1","off2","token"))
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format(0,outfile,os.path.getsize(outfile),os.path.getsize(outfile),0,os.path.getsize(outfile),"False"))
k=0;
for j in files_list:
if j[0]==nca_program:
# print(j[0])
off1=j[1]+0xC00
off2=j[2]
targetsize=j[3]-0xC00
URL='https://www.googleapis.com/drive/v3/files/'+remote.ID+'?alt=media'
token=remote.access_token
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format(k+1,URL,remote.size,targetsize,off1,off2,token))
k+=1
break
nspname="test.nsp"
try:
g=remote.name
g0=[pos for pos, char in enumerate(g) if char == '[']
g0=(g[0:g0[0]]).strip()
titleid=metadict['titleid']
titleversion=metadict['version']
ctype=metadict['ctype']
nspname=f"{g0} [{titleid}] [v{titleversion}] [{ctype}].nsp"
except BaseException as e:
Print.error('Exception: ' + str(e))
pass
return nspname
def gen_xci_parts_spec1(filepath=None,remote=None,target_cnmt=None,cachefolder=None,keypatch=False,files_list=None):
if filepath=="":
filepath=None
if remote=="":
remote=None
if remote==None:
test=filepath.split('|');TD=None
if len(test)<2:
filepath=test[0]
lib,TD,libpath=get_library_from_path(remote_lib_file,filepath)
else:
filepath=test[0]
TD=test[1]
if str(TD).upper()=="NONE":
TD=None
ID,name,type,size,md5,remote=DrivePrivate.get_Data(filepath,TD=TD,Print=False)
if keypatch!=False:
try:
keypatch=int(keypatch)
except: keypatch=False
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
else:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
if files_list==None:
files_list=DriveTools.get_files_from_head(remote,remote.name)
files=list();filesizes=list()
fplist=list()
for k in range(len(files_list)):
entry=files_list[k]
fplist.append(entry[0])
if target_cnmt==None:
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca'):
target_cnmt=cnmtfile
break
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca') and target_cnmt==cnmtfile:
metadict,d1,d2=DriveTools.get_cnmt_data(target=cnmtfile,file=remote)
ncadata=metadict['ncadata']
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']!='Meta' and row['NCAtype']!='Program':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
for j in range(len(ncadata)):
row=ncadata[j]
if row['NCAtype']=='Meta':
# print(str(row['NcaId'])+'.cnmt.nca')
files.append(str(row['NcaId'])+'.cnmt.nca')
filesizes.append(int(row['Size']))
for j in range(len(ncadata)):
row=ncadata[j]
# print(row)
if row['NCAtype']=='Program':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist:
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
elif test2 in fplist:
files.append(str(row['NcaId'])+'.ncz')
for k in range(len(files_list)):
entry=files_list[k]
if entry[0]==test2:
filesizes.append(int(entry[3]))
break
break
remote.rewind()
outheader = sq_tools.gen_nsp_header(files,filesizes)
properheadsize=len(outheader)
# print(properheadsize)
# print(bucketsize)
i=0;sum=properheadsize;
outfile=os.path.join(cachefolder, "0")
outf = open(outfile, 'w+b')
outf.write(outheader)
written=0
for fi in files:
if fi.endswith('nca') or fi.endswith('ncz') :
for i in range(len(files_list)):
if str(files_list[i][0]).lower() == str(fi).lower():
nca_name=files_list[i][0]
off1=files_list[i][1]
off2=files_list[i][2]
nca_size=files_list[i][3]
break
data=remote.read_at(off1,nca_size)
ncaHeader = NcaHeader()
ncaHeader.open(MemoryFile(remote.read_at(off1,0x400), FsType.Crypto.XTS, uhx(Keys.get('header_key'))))
crypto1=ncaHeader.getCryptoType()
crypto2=ncaHeader.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), ncaHeader.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=ncaHeader.getCryptoType()
crypto2=ncaHeader.getCryptoType2()
if ncaHeader.getRightsId() != 0:
ncaHeader.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
titleKeyDec = Keys.decryptTitleKey(titleKey, Keys.getMasterKeyIndex(int(masterKeyRev)))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < ncaHeader.getCryptoType2():
encKeyBlock,crypto1,crypto2=get_new_cryptoblock(ncaHeader,keypatch,encKeyBlock,t)
t.close()
if ncaHeader.getRightsId() == 0:
ncaHeader.rewind()
encKeyBlock = ncaHeader.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < ncaHeader.getCryptoType2():
encKeyBlock,crypto1,crypto2=get_new_cryptoblock(ncaHeader,keypatch,encKeyBlock,t)
t.close()
ncaHeader.rewind()
i=0
newheader=get_newheader(MemoryFile(remote.read_at(off1,0xC00)),encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
break
else:pass
outf.flush()
outf.close()
tfile=os.path.join(cachefolder, "remote_files.csv")
with open(tfile,'w') as csvfile:
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format("step","filepath","size","targetsize","off1","off2","token"))
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format(0,outfile,properheadsize+written,properheadsize,0,properheadsize,"False"))
k=0;l=0
for fi in files:
for j in files_list:
if j[0]==fi:
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format(k+1,outfile,properheadsize+written,0xC00,(properheadsize+l*0xC00),(properheadsize+(l*0xC00)+0xC00),"False"))
off1=j[1]+0xC00
off2=j[2]
targetsize=j[3]-0xC00
URL='https://www.googleapis.com/drive/v3/files/'+remote.ID+'?alt=media'
token=remote.access_token
csvfile.write("{}|{}|{}|{}|{}|{}|{}\n".format(k+2,URL,remote.size,targetsize,off1,off2,token))
break
k+=2;l+=1
nspname="test.nsp"
try:
g=remote.name
g0=[pos for pos, char in enumerate(g) if char == '[']
g0=(g[0:g0[0]]).strip()
titleid=metadict['titleid']
titleversion=metadict['version']
ctype=metadict['ctype']
nspname=f"{g0} [{titleid}] [v{titleversion}] [{ctype}].nsp"
except:pass
return nspname
def get_new_cryptoblock(ncaHeader, newMasterKeyRev,encKeyBlock,t):
indent = 1
tabs = '\t' * indent
indent2 = 2
tabs2 = '\t' * indent2
masterKeyRev = ncaHeader.getCryptoType2()
if type(ncaHeader) == NcaHeader():
if ncaHeader.getCryptoType2() != newMasterKeyRev:
t.write(tabs + '-----------------------------------')
t.write(tabs + 'Changing keygeneration from %d to %s' % ( ncaHeader.getCryptoType2(), str(newMasterKeyRev)))
t.write(tabs + '-----------------------------------')
if sum(encKeyBlock) != 0:
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev),ncaHeader.keyIndex)
t.write(tabs2 + '+ decrypting with %s (%d, %d)' % (str(hx(key)), Keys.getMasterKeyIndex(masterKeyRev), ncaHeader.keyIndex))
crypto = aes128.AESECB(key)
decKeyBlock = crypto.decrypt(encKeyBlock)
key = Keys.keyAreaKey(Keys.getMasterKeyIndex(newMasterKeyRev),ncaHeader.keyIndex)
t.write(tabs2 + '+ encrypting with %s (%d, %d)' % (str(hx(key)), Keys.getMasterKeyIndex(newMasterKeyRev), ncaHeader.keyIndex))
crypto = aes128.AESECB(key)
reEncKeyBlock = crypto.encrypt(decKeyBlock)
encKeyBlock = reEncKeyBlock
if newMasterKeyRev >= 3:
crypto1=2
crypto2=newMasterKeyRev
if newMasterKeyRev == 2:
crypto1=2
crypto2=0
if newMasterKeyRev < 2:
crypto1=newMasterKeyRev
crypto2=0
return encKeyBlock,crypto1,crypto2
return encKeyBlock,ncaHeader.getCryptoType(),ncaHeader.getCryptoType2()
def get_newheader(ncaHeader,encKeyBlock,crypto1,crypto2,hcrypto,gc_flag):
ncaHeader.rewind()
rawhead=ncaHeader.read(0xC00)
rawhead=hcrypto.decrypt(rawhead)
header = b''
header += rawhead[0x00:0x00+0x204]
#isgamecard 0x204
GC=bytes.fromhex(gc_flag)
header += GC
#contentType 0x205
header += rawhead[0x205:0x206]
#crypto 1 0x206
c1=crypto1.to_bytes(1, byteorder='big')
header += c1
#########
header += rawhead[0x207:0x220]
#crypto 1 0x220
c2=crypto2.to_bytes(1, byteorder='big')
header += c2
#########
header += rawhead[0x221:0x230]
tr='00'*0x10
tr=bytes.fromhex(tr)
header += tr
header += rawhead[0x240:0x240+0xC0]
header += encKeyBlock
header += rawhead[0x340:]
newheader=hcrypto.encrypt(header)
return newheader
| 33.645066 | 160 | 0.660495 |
4a19b9af52382aa1f22954d36a80f6805f492a64
| 515 |
py
|
Python
|
pagarmecoreapi/decorators.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 6 |
2021-09-02T19:55:04.000Z
|
2022-03-16T14:06:15.000Z
|
pagarmecoreapi/decorators.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 2 |
2021-10-11T22:48:15.000Z
|
2022-01-24T18:24:23.000Z
|
pagarmecoreapi/decorators.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 2 |
2021-09-12T21:43:32.000Z
|
2022-03-07T16:58:54.000Z
|
# -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class lazy_property(object):
"""A decorator class for lazy instantiation."""
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
| 22.391304 | 84 | 0.574757 |
4a19b9be63a6369bd9006da2c6f4af951d23b4b2
| 7,482 |
py
|
Python
|
youtube_dl/extractor/tv2.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 3,001 |
2020-10-24T05:24:18.000Z
|
2022-03-31T06:45:32.000Z
|
youtube_dl/extractor/tv2.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 274 |
2020-10-24T04:57:21.000Z
|
2022-03-22T01:34:56.000Z
|
youtube_dl/extractor/tv2.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 541 |
2020-10-24T03:32:09.000Z
|
2022-01-12T23:49:30.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
float_or_none,
js_to_json,
parse_iso8601,
remove_end,
strip_or_none,
try_get,
)
class TV2IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tv2\.no/v/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tv2.no/v/916509/',
'info_dict': {
'id': '916509',
'ext': 'flv',
'title': 'Se Frode Gryttens hyllest av Steven Gerrard',
'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.',
'timestamp': 1431715610,
'upload_date': '20150515',
'duration': 156.967,
'view_count': int,
'categories': list,
},
}
_API_DOMAIN = 'sumo.tv2.no'
_PROTOCOLS = ('HDS', 'HLS', 'DASH')
_GEO_COUNTRIES = ['NO']
def _real_extract(self, url):
video_id = self._match_id(url)
api_base = 'http://%s/api/web/asset/%s' % (self._API_DOMAIN, video_id)
formats = []
format_urls = []
for protocol in self._PROTOCOLS:
try:
data = self._download_json(
api_base + '/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % protocol,
video_id, 'Downloading play JSON')['playback']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read().decode(), video_id)['error']
error_code = error.get('code')
if error_code == 'ASSET_PLAYBACK_INVALID_GEO_LOCATION':
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
elif error_code == 'SESSION_NOT_AUTHENTICATED':
self.raise_login_required()
raise ExtractorError(error['description'])
raise
items = try_get(data, lambda x: x['items']['item'])
if not items:
continue
if not isinstance(items, list):
items = [items]
for item in items:
if not isinstance(item, dict):
continue
video_url = item.get('url')
if not video_url or video_url in format_urls:
continue
format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat'))
if not self._is_valid_url(video_url, video_id, format_id):
continue
format_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id=format_id, fatal=False))
elif ext == 'm3u8':
if not data.get('drmProtected'):
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, format_id, fatal=False))
elif ext == 'ism' or video_url.endswith('.ism/Manifest'):
pass
else:
formats.append({
'url': video_url,
'format_id': format_id,
'tbr': int_or_none(item.get('bitrate')),
'filesize': int_or_none(item.get('fileSize')),
})
if not formats and data.get('drmProtected'):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
asset = self._download_json(
api_base + '.json', video_id,
'Downloading metadata JSON')['asset']
title = asset['title']
thumbnails = [{
'id': thumbnail.get('@type'),
'url': thumbnail.get('url'),
} for _, thumbnail in (asset.get('imageVersions') or {}).items()]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': strip_or_none(asset.get('description')),
'thumbnails': thumbnails,
'timestamp': parse_iso8601(asset.get('createTime')),
'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')),
'view_count': int_or_none(asset.get('views')),
'categories': asset.get('keywords', '').split(','),
'formats': formats,
}
class TV2ArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tv2\.no/(?:a|\d{4}/\d{2}/\d{2}(/[^/]+)+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542',
'info_dict': {
'id': '6930542',
'title': 'Russen hetses etter pingvintyveri - innrømmer å ha åpnet luken på buret',
'description': 'De fire siktede nekter fortsatt for å ha stjålet pingvinbabyene, men innrømmer å ha åpnet luken til de små kyllingene.',
},
'playlist_count': 2,
}, {
'url': 'http://www.tv2.no/a/6930542',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
# Old embed pattern (looks unused nowadays)
assets = re.findall(r'data-assetid=["\'](\d+)', webpage)
if not assets:
# New embed pattern
for v in re.findall(r'(?s)TV2ContentboxVideo\(({.+?})\)', webpage):
video = self._parse_json(
v, playlist_id, transform_source=js_to_json, fatal=False)
if not video:
continue
asset = video.get('assetId')
if asset:
assets.append(asset)
entries = [
self.url_result('http://www.tv2.no/v/%s' % asset_id, 'TV2')
for asset_id in assets]
title = remove_end(self._og_search_title(webpage), ' - TV2.no')
description = remove_end(self._og_search_description(webpage), ' - TV2.no')
return self.playlist_result(entries, playlist_id, title, description)
class KatsomoIE(TV2IE):
_VALID_URL = r'https?://(?:www\.)?(?:katsomo|mtv)\.fi/(?:#!/)?(?:[^/]+/[0-9a-z-]+-\d+/[0-9a-z-]+-|[^/]+/\d+/[^/]+/)(?P<id>\d+)'
_TEST = {
'url': 'https://www.mtv.fi/sarja/mtv-uutiset-live-33001002003/lahden-pelicans-teki-kovan-ratkaisun-ville-nieminen-pihalle-1181321',
'info_dict': {
'id': '1181321',
'ext': 'mp4',
'title': 'MTV Uutiset Live',
'description': 'Päätöksen teki Pelicansin hallitus.',
'timestamp': 1575116484,
'upload_date': '20191130',
'duration': 37.12,
'view_count': int,
'categories': list,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
_API_DOMAIN = 'api.katsomo.fi'
_PROTOCOLS = ('HLS', 'MPD')
_GEO_COUNTRIES = ['FI']
| 38.766839 | 148 | 0.528067 |
4a19bb0c6b615a904dfc5d37069be87d1ce91eeb
| 1,102 |
py
|
Python
|
examples/video_channel_create.py
|
Dawolee/jwplatform-py
|
0a0c69e9c6a2eece9b0392f2e9ba5d8cc24675a4
|
[
"MIT"
] | 2 |
2020-03-09T21:59:37.000Z
|
2020-09-15T10:24:27.000Z
|
examples/video_channel_create.py
|
Dawolee/jwplatform-py
|
0a0c69e9c6a2eece9b0392f2e9ba5d8cc24675a4
|
[
"MIT"
] | null | null | null |
examples/video_channel_create.py
|
Dawolee/jwplatform-py
|
0a0c69e9c6a2eece9b0392f2e9ba5d8cc24675a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
import jwplatform
def create_channel(api_key, api_secret, channel_type='manual', **kwargs):
"""
Function which creates a new channel. Channels serve as containers of video/media objects.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param channel_type: <string> REQUIRED Acceptable values include 'manual','dynamic','trending','feed','search'
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/channels/create.html
:return: <dict> Dict which represents the JSON response.
"""
jwplatform_client = jwplatform.Client(api_key, api_secret)
logging.info("Creating new channel with keyword args.")
try:
response = jwplatform_client.channels.create(type=channel_type, **kwargs)
except jwplatform.errors.JWPlatformError as e:
logging.error("Encountered an error creating new channel.\n{}".format(e))
sys.exit(e.message)
return response
| 39.357143 | 145 | 0.727768 |
4a19bbbe580950f00b55f2ba0d3ad03df7b460c2
| 5,186 |
py
|
Python
|
aalh_iit_industry_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_industry_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_industry_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_industry_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 464
iterationrow = 7
targetcol = 9
placecol = 13
subjectholland = 'Images in time photographic collection. (Toledo Lucas County Public Library); Holland (Ohio). History. Photographs.'
subjectwaterville = 'Images in time photographic collection. (Toledo Lucas County Public Library); Waterville (Ohio). History. Photographs.'
subjectoregon = 'Images in time photographic collection. (Toledo Lucas County Public Library); Oregon (Ohio). History. Photographs.'
subjectmaumee = 'Images in time photographic collection. (Toledo Lucas County Public Library); Maumee (Ohio). History. Photographs.'
subjectsylvania = 'Images in time photographic collection. (Toledo Lucas County Public Library); Sylvania (Ohio). History. Photographs.'
subjecttoledo = 'Images in time photographic collection. (Toledo Lucas County Public Library); Toledo (Ohio). History. Photographs.'
subjectnonlucascounty = 'Images in time photographic collection. (Toledo Lucas County Public Library)'
semicolonspace = '; '
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
placevar = ws.cell(row=iterationrow, column=placecol).value
if testvar == None:
if placevar == None:
ws.cell(row=iterationrow, column=targetcol).value = subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Toledo (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjecttoledo
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Sylvania (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectsylvania
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Maumee (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectmaumee
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Oregon (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectoregon
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Waterville (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectwaterville
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Holland (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectholland
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
ws.cell(row=iterationrow, column=targetcol).value = subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
if placevar == None:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Toledo') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjecttoledo
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Sylvania (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectsylvania
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Maumee (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectmaumee
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Oregon (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectoregon
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Waterville (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectwaterville
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Holland (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectholland
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_industry_001.xlsx')
| 64.024691 | 141 | 0.655226 |
4a19bc08629df13f806b1003649588b63064da13
| 566 |
py
|
Python
|
plotly/validators/splom/marker/colorbar/_ticks.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12 |
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/splom/marker/colorbar/_ticks.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27 |
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/splom/marker/colorbar/_ticks.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6 |
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='ticks',
parent_name='splom.marker.colorbar',
**kwargs
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop('values', ['outside', 'inside', '']),
**kwargs
)
| 28.3 | 71 | 0.597173 |
4a19bc0ff47921cf68440da12d17d09e2706a5d1
| 5,627 |
py
|
Python
|
backtest/algos/algo_prometheus_v1.py
|
block1o1/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 4 |
2021-10-14T21:22:25.000Z
|
2022-03-12T19:58:48.000Z
|
backtest/algos/algo_prometheus_v1.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | null | null | null |
backtest/algos/algo_prometheus_v1.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 1 |
2022-03-15T22:52:53.000Z
|
2022-03-15T22:52:53.000Z
|
import json
import sys
sys.dont_write_bytecode = True
import numpy as np
import datetime
import random
import math
import core
def run(debug):
base = "BTC"
base = "ETH"
#base = "LTC"
quote = "USDT"
historymins = 60*24*30*1 #60*24*30*4
interval = 60
dtend = datetime.datetime.strptime('2018-04-26 15:00', '%Y-%m-%d %H:%M')
# dtend = datetime.datetime.strptime('2018-05-17 12:00', '%Y-%m-%d %H:%M')
dtstart = dtend - datetime.timedelta(minutes=historymins)
inp = core.getPriceExchange_v1('binance', interval, base, quote, historymins, dtend)
#inp = json.load(open('misc/json_BTCUSDT_60min.json'))
uncertainty_margin = 0.001
def sig(prev_len, prevPrice, price):
multiplier = (2 / float(1 + prev_len))
v = price*multiplier + prevPrice*(1-multiplier)
return v
def normalize(arr):
a = arr[:]
mi = min(a)
ma = max(a)
if (ma-mi) == 0: return [0.0]
for i,v in enumerate(a):
a[i] = (a[i]-mi)/(ma-mi)
return a
def work(_1, _2, _3):
portfolio = {}
dtit = dtstart
traceD = core.createNewScatterTrace("traceD", "y2")
usage = {
'canBuy': True,
'canSell': False,
'buyPrice': None,
'sellPrice': None,
'prevPrice': None,
}
bucket=[]
bucketstd = []
while dtit <= dtend:
idx = datetime.datetime.strftime(dtit, '%Y-%m-%dT%H:%M')
if idx in inp:
c = inp[idx]['close']
o = inp[idx]['open']
l = inp[idx]['low']
h = inp[idx]['high']
price = (o+c+l+h)/4 # ok
#price = c # ok
#price = o + (c-o)*random.randint(0,10)/10 # ok
#price = random.uniform(o, c) if c > o else random.uniform(c, o)
price = random.uniform(l, h) # reality
core.portfolioPriceEntry(portfolio, dtit, price, o, c, l, h)
def buyF():
if len(traceD['y']) < 2:
return False
if traceD['y'][-2] == 1 and traceD['y'][-1] == -1:
return True
#if traceD['y'][-1] == 1 and traceD['y'][-2] == -1:
# return True
def sellF():
if price > usage['buyPrice']*_1:
return True
if price < usage['buyPrice']*_2:
return True
if len(bucket) > 2:
pD = np.average(bucket[-_3:])
pD = 1 if pD > 1 else -1
core.addToScatterTrace(traceD, dtit, pD)
if usage['canBuy'] and buyF():
core.portfolioBuy(portfolio, dtit, price, uncertainty_margin)
usage['canSell'] = True
usage['canBuy'] = False
usage['buyPrice'] = price
elif usage['canSell'] and sellF():
core.portfolioSell(portfolio, dtit, price, uncertainty_margin)
usage['canSell'] = False
usage['canBuy'] = True
usage['sellPrice'] = price
usage['countSinceSell'] = 0
if usage['prevPrice'] != None:
bucket.append( price/ usage['prevPrice'] )
usage['prevPrice'] = price
dtit += datetime.timedelta(minutes=interval)
proc = core.processPortfolio(portfolio, 1)
return (proc, portfolio, [traceD ])
if debug == 0: # computing ROI
A = 1.03
B = 0.96
C = 16
avgs = []
for x in range(100):
(proc, portfolio, traces) = work(A,B,C)
print("%s ROI \t %f" % (str(x), proc['_']['ROI%']))
avgs.append(proc['_']['ROI%'])
print("avg ROI%: " + str(sum(avgs)/len(avgs)))
std = np.std(avgs)
print("std ROI%: " + str(std))
elif debug == 1: # brute-force searching for optimal parameters (A,B,C,D)
dct = {}
for A in [1+x/100 for x in range(1, 6)]:
for B in [0.95+x/100 for x in range(0, 5)]:
for C in range(1, 20):
avgs = []
for x in range(20):
(proc, portfolio, traces) = work(A,B,C)
#print("%s ROI \t %f" % (str(x), proc['_']['ROI%']))
avgs.append(proc['_']['ROI%'])
print("%f %f %f" % (A,B,C))
print("avg ROI%: " + str(sum(avgs)/len(avgs)))
std = np.std(avgs)
print("std ROI%: " + str(std))
if not str(sum(avgs)/len(avgs)) in dct:
dct [ str(sum(avgs)/len(avgs)) ] = str(A)+"_"+str(B)+"_"+str(C)
print("--------")
print(base)
print("--------")
print(json.dumps(dct))
print("--------")
print(base)
else: # computing and plotting out
# A = 1.02
# B = 0.98
# C = 9
A = 1.03
B = 0.97
C = 16
(proc, portfolio, traces) = work(A, B, C)
print("ROI: (%f %f %i) %f" % (A, B, C, proc['_']['ROI%']))
core.portfolioToChart_OHLC(portfolio, traces)
if __name__ == '__main__':
debug = 0
run(debug)
| 32.526012 | 90 | 0.435756 |
4a19bdd90d28bc297f4aa11e8f790a7f83d5dede
| 1,478 |
py
|
Python
|
docs/src/automobile.py
|
VictorOmondi1997/Automobile
|
4ab74b5607ad275073d9c717748accff511f8670
|
[
"Apache-2.0"
] | null | null | null |
docs/src/automobile.py
|
VictorOmondi1997/Automobile
|
4ab74b5607ad275073d9c717748accff511f8670
|
[
"Apache-2.0"
] | null | null | null |
docs/src/automobile.py
|
VictorOmondi1997/Automobile
|
4ab74b5607ad275073d9c717748accff511f8670
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 13:10:41 2019
@author: Victor Omondi
"""
class Automobile:
def __init__(self, vehicle_id, make, model, color, year, mileage):
self.vehicle_id = vehicle_id
self.make = make
self.__model = model
self.__color = color
self.__year = year
self.__mileage = mileage
def set_make(self, make):
self.__make = make
def set_model(self, model):
self.__model = model
def set_color(self, color):
self.__color = color
def set_year(self, year):
self.__year = year
def set_mileage(self, mileage):
self.__mileage = mileage
def get_make(self):
return self.__make
def get_model(self):
return self.__model
def get_color(self):
return self.__color
def get_year(self):
return self.__year
def get_mileage(self):
return self.__mileage
def __str__(self):
out_str = "Make:" + self.get_make() + " Color:" + self.get_color() + " Model:" + self.get_model() + " Year: " + str(self.get_year()) + " Mileage:" + str(self.get_mileage())
return out_str
def __repr__(self):
out_str = "Make:" + self.get_make() + " Color:" + self.get_color() + " Model:" + self.get_model() + " Year: " + str(self.get_year()) + " Mileage:" + str(self.get_mileage())
return out_str
| 26.872727 | 181 | 0.562923 |
4a19bdfd97e1bb6313d9cb3b4360ca34a65cebf4
| 3,321 |
py
|
Python
|
bounty-program/stellar-docs/python/claimable-balance.py
|
apaldiwal/stellar-quest
|
14f12dd38d837008c4181e61295c19ab6d1bfd96
|
[
"MIT"
] | 1 |
2021-07-19T17:55:41.000Z
|
2021-07-19T17:55:41.000Z
|
bounty-program/stellar-docs/python/claimable-balance.py
|
apaldiwal/stellar-quest
|
14f12dd38d837008c4181e61295c19ab6d1bfd96
|
[
"MIT"
] | null | null | null |
bounty-program/stellar-docs/python/claimable-balance.py
|
apaldiwal/stellar-quest
|
14f12dd38d837008c4181e61295c19ab6d1bfd96
|
[
"MIT"
] | null | null | null |
import time
from stellar_sdk.xdr import TransactionResult, OperationType
from stellar_sdk.exceptions import NotFoundError, BadResponseError, BadRequestError
from stellar_sdk import (
Keypair,
Network,
Server,
TransactionBuilder,
Transaction,
Asset,
Operation,
Claimant,
ClaimPredicate,
CreateClaimableBalance,
ClaimClaimableBalance
)
server = Server("https://horizon-testnet.stellar.org")
A = Keypair.from_secret("SBDLSAHWZHQZG6ZQDJY63XQORETH5X5Q5BM66JUW4E6S7CVHXGX373GO")
B = Keypair.from_public_key("GBS6YWU5NAFZFZTRYMVVRBGE4IREF67AYVC3EFYMKS5NZVCHX2NXFB6L")
# NOTE: Proper error checks are omitted for brevity; always validate things!
try:
aAccount = server.load_account(A.public_key)
except NotFoundError:
raise Exception(f"Failed to load {A.public_key}")
# Create a claimable balance with our two above-described conditions.
soon = int(time.time() + 60)
bCanClaim = ClaimPredicate.predicate_before_relative_time(60)
aCanClaim = ClaimPredicate.predicate_not(
ClaimPredicate.predicate_before_absolute_time(soon)
)
# Create the operation and submit it in a transaction.
claimableBalanceEntry = CreateClaimableBalance(
asset = Asset.native(),
amount = "420",
claimants = [
Claimant(destination = B.public_key, predicate = bCanClaim),
Claimant(destination = A.public_key, predicate = aCanClaim)
]
)
tx = (
TransactionBuilder (
source_account = aAccount,
network_passphrase = Network.TESTNET_NETWORK_PASSPHRASE,
base_fee = server.fetch_base_fee()
)
.append_operation(claimableBalanceEntry)
.set_timeout(180)
.build()
)
tx.sign(A)
try:
txResponse = server.submit_transaction(tx)
print("Claimable balance created!")
except (BadRequestError, BadResponseError) as err:
print(f"Tx submission failed: {err}")
# Method 1: Not available in the Python SDK yet.
# Method 2: Suppose `txResponse` comes from the transaction submission
# above.
txResult = TransactionResult.from_xdr(txResponse["result_xdr"])
results = txResult.result.results
# We look at the first result since our first (and only) operation
# in the transaction was the CreateClaimableBalanceOp.
operationResult = results[0].tr.create_claimable_balance_result
balanceId = operationResult.balance_id.to_xdr_bytes().hex()
print(f"Balance ID (2): {balanceId}")
# Method 3: Account B could alternatively do something like:
try:
balances = (
server
.claimable_balances()
.for_claimant(B.public_key)
.limit(1)
.order(desc = True)
.call()
)
except (BadRequestError, BadResponseError) as err:
print(f"Claimable balance retrieval failed: {err}")
balanceId = balances["_embedded"]["records"][0]["id"]
print(f"Balance ID (3): {balanceId}")
claimBalance = ClaimClaimableBalance(balance_id = balanceId)
print(f"{A.public_key} claiming {balanceId}")
tx = (
TransactionBuilder (
source_account = aAccount,
network_passphrase = Network.TESTNET_NETWORK_PASSPHRASE,
base_fee = server.fetch_base_fee()
)
.append_operation(claimBalance)
.set_timeout(180)
.build()
)
tx.sign(A)
try:
txResponse = server.submit_transaction(tx)
except (BadRequestError, BadResponseError) as err:
print(f"Tx submission failed: {err}")
| 29.389381 | 87 | 0.733815 |
4a19be00bd5e873191ac7f07cdbae80bc9aa726f
| 7,373 |
py
|
Python
|
instagram/views.py
|
fkinyae/kins-stage
|
f924e5a0393d38ed2976ac4a1dec00b97380a559
|
[
"MIT"
] | null | null | null |
instagram/views.py
|
fkinyae/kins-stage
|
f924e5a0393d38ed2976ac4a1dec00b97380a559
|
[
"MIT"
] | null | null | null |
instagram/views.py
|
fkinyae/kins-stage
|
f924e5a0393d38ed2976ac4a1dec00b97380a559
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from .models import Image,Profile,Follow
from .forms import UploadForm,ProfileForm,UpdateUserForm,UpdateUserProfileForm,CommentForm,UserSignUpForm
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .email import send_welcome_email
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .token_generator import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes,force_text,DjangoUnicodeDecodeError
from django.urls import reverse_lazy
from django.urls import reverse
from django.urls.base import reverse
from django.http import HttpResponse
from django.contrib.auth import authenticate,login, logout
def usersignup(request):
if request.method == 'POST':
form = UserSignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
uidb64 = urlsafe_base64_encode(force_bytes(user.pk))
domain = get_current_site(request).domain
link=reverse('activate',kwargs={'uidb64':uidb64,'token':account_activation_token.make_token(user)})
activate_url='http://'+domain+link
email_body='Hi ' +user.username+ ' Please use this link to verify your account\n' +activate_url
email_subject = 'Activate Your Account'
to_email = form.cleaned_data.get('email')
email = EmailMessage(email_subject, email_body, 'francis.kinyae@student.moringaschool.com',[to_email])
email.send()
return render(request, "email/invitation.html")
else:
form = UserSignUpForm()
return render(request, 'registration/signup.html', {'form': form})
def activate_account(request, uidb64, token):
try:
uid = force_bytes(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
return render(request, "email/welcomeemail.html")
else:
return render(request, "email/invalid.html")
# Create your views here.
@login_required(login_url='/accounts/login/')
def index(request):
images = Image.images()
users = User.objects.exclude(id=request.user.id)
return render(request,"index.html", {"images":images[::1],"users":users})
def post(request):
if request.method == 'POST':
form = UploadForm(request.POST,request.FILES)
print(form.errors)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user.profile
post.save()
return redirect('index')
else:
form = UploadForm()
return render(request, 'post_image.html', {"form":form})
@login_required(login_url='/accounts/login/')
def profile(request, username):
images = request.user.profile.images.all()
print(images)
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and prof_form.is_valid():
user_form.save()
prof_form.save()
return HttpResponseRedirect(request.path_info)
else:
user_form = UpdateUserForm(instance=request.user)
prof_form = UpdateUserProfileForm(instance=request.user.profile)
params = {
'user_form': user_form,
'prof_form': prof_form,
}
return render(request, 'profile.html', params)
@login_required(login_url='/accounts/login/')
def update_profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST,request.FILES)
print(form.errors)
if form.is_valid():
post = form.save(commit=False)
post.save()
return redirect('profile')
else:
form = UploadForm()
return render(request,'edit_profile.html',{"form":form})
@login_required(login_url='/accounts/login/')
def search_profile(request):
if 'search_user' in request.GET and request.GET['search_user']:
name = request.GET.get("search_user")
results = Profile.search_profile(name)
print(results)
message = f'name'
params = {
'results': results,
'message': message
}
return render(request, 'results.html', params)
else:
message = "You did not make a selection"
return render(request, 'results.html', {'message': message})
@login_required(login_url='/accounts/login/')
def user_profile(request, username):
user_prof = get_object_or_404(User, username=username)
if request.user == user_prof:
return redirect('profile', username=request.user.username)
user_posts = user_prof.profile.images.all()
followers = Follow.objects.filter(followed=user_prof.profile)
follow_status = None
for follower in followers:
if request.user.profile == follower.follower:
follow_status = True
else:
follow_status = False
params = {
'user_prof': user_prof,
'user_posts': user_posts,
'followers': followers,
'follow_status': follow_status
}
return render(request, 'user_profile.html', params)
login_required(login_url='/accounts/login/')
def follow(request, to_follow):
if request.method == 'GET':
user_three_profile = Profile.objects.get(pk=to_follow)
follow_s = Follow(follower=request.user.profile, followed=user_three_profile)
follow_s.save()
return redirect('user_profile', user_three_profile.user.username)
@login_required(login_url='/accounts/login/')
def unfollow(request, to_unfollow):
if request.method == 'GET':
user_two_profile = Profile.objects.get(pk=to_unfollow)
unfollow_d = Follow.objects.filter(follower=request.user.profile, followed=user_two_profile)
unfollow_d.delete()
return redirect('user_profile', user_two_profile.user.username)
@login_required(login_url='/accounts/login/')
def comment(request, id):
image = get_object_or_404(Image, pk=id)
comments = image.comment.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.photo = image
comment.user = request.user.profile
comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
params = {
'image': image,
'form': form,
'comments':comments,
}
return render(request, 'post.html', params)
| 36.5 | 114 | 0.667435 |
4a19be9bc4b5f52b616b53f96459d44e158b3ac7
| 534 |
py
|
Python
|
hackerearth/Algorithms/Different queries/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4 |
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Different queries/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Different queries/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'5 3',
'1 2 3 4 5',
'1 3 4 2',
'2 1 2 3',
'1 4 5 -6',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '3 3 5 0 -1\n')
if __name__ == '__main__':
unittest.main()
| 22.25 | 62 | 0.597378 |
4a19bff2b554083729260726443698d084a607c4
| 16,306 |
py
|
Python
|
code/Test/TextROI.py
|
tpsatish95/OCR-on-Indus-Seals
|
3c66a1be11117297d75275c3b099b2f108860ecb
|
[
"Apache-2.0"
] | 2 |
2016-11-26T03:35:01.000Z
|
2022-01-06T08:50:03.000Z
|
code/Test/TextROI.py
|
dataarch/OCR-on-Indus-Seals
|
3c66a1be11117297d75275c3b099b2f108860ecb
|
[
"Apache-2.0"
] | null | null | null |
code/Test/TextROI.py
|
dataarch/OCR-on-Indus-Seals
|
3c66a1be11117297d75275c3b099b2f108860ecb
|
[
"Apache-2.0"
] | 1 |
2018-10-20T02:33:22.000Z
|
2018-10-20T02:33:22.000Z
|
# -*- coding: utf-8 -*-
import skimage.io
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import selectivesearch
import numpy as np
import skimage.transform
import os
import shutil
import caffe
from PIL import Image
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
text_cut_final = set()
def getClass(FileList):
caffe.set_mode_gpu()
classifier = caffe.Classifier("../ROIs_Indus/deploy.prototxt","../ROIs_Indus/Models/bvlc_googlenet_indusnet_iter_20000.caffemodel" ,
image_dims=[224,224], raw_scale=255.0, channel_swap = [2,1,0])
inputs = [caffe.io.load_image(im_f) for im_f in FileList]
print("Classifying %d inputs." % len(inputs))
predictions = classifier.predict(inputs)
return predictions
def texbox_ext():
global text
global both_text
global text_cut_final
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in both_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1']
aw = A['x1'] + A['w'] - B['x1']
overf = 1
# if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
if A['x1'] < B['x1']: # B is right to A
aw = B['x1']+B['w'] - A['x1']
overf = 1
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
text_cut_final = text_cut_final - both_text # CHANGE THIS LINE
def texbox_cut():
global no_text
no_text = no_text.union(both_text)
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in no_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1'] + B['w']
overf = 1
if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
ah = A['h'] - (A['y1']+A['h'] - B['y1'])
overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
# if A['x1'] < B['x1']: # B is right to A
# aw = A['w'] - (A['x1']+A['w'] - B['x1'])
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
def extend_text_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def draw_textbox():
global width, height
thresh = ((width+height)/2)*(0.25)
tempc = set()
for x, y, w, h in text_boxes:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
f = 0
for x1, y1, w1, h1 in text_boxes:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
f = 1
if f == 0:
text.add((x, y, w, h))
text.add(extend_text_rect(temp))
def contains():
x1, y1, w1, h1 = p
for x, y, w, h in candidates:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
return True
if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
candidates.remove((x, y, w, h))
return False
return False
def extend_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def extend_superbox():
global width, height
thresh = ((width+height)/2)*(0.06)
tempc = set()
for x, y, w, h in final:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in final:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
final_extended.add(extend_rect(temp))
def draw_superbox(finals=[]):
noover = []
refinedT = []
global final
final = set()
# (x1,y1) top-left coord, (x2,y2) bottom-right coord, (w,h) size
if finals != []:
refinedT = finals
else:
refinedT = refined
remp = set(refinedT)
ref = list(refinedT)
while len(ref) > 0:
x1, y1, w1, h1 = ref[0]
if len(ref) == 1: # final box
final.add((x1, y1, w1, h1))
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
else:
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
over = set()
for x2, y2, w2, h2 in remp:
A = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
B = {'x1': x2, 'y1': y2, 'x2': x2+w2, 'y2': y2+h2, 'w': w2, 'h': h2}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overlap_A = float(SI) / float(SA)
overlap_B = float(SI) / float(SB)
# print(overlap_AB)
#
if overlap_A >= 0.40 or overlap_B >= 0.40:
over.add((B['x1'],B['y1'],B['w'],B['h']))
# print(len(over))
if len(over) != 0: #Overlap
remp = remp - over
for i in over: ref.remove(i)
over.add((A['x1'],A['y1'],A['w'],A['h']))
# print(over)
final.add((min([i[0] for i in over]), min([i[1] for i in over]), max([i[0]+i[2] for i in over]) - min([i[0] for i in over]), max([i[1]+i[3] for i in over]) - min([i[1] for i in over])))
# final.add((np.mean([i[0] for i in over]), np.mean([i[1] for i in over]), np.mean([i[2] for i in over]), np.mean([i[3] for i in over])))
noover.append(False)
else: #No overlap
final.add((x1,y1,w1,h1))
noover.append(True)
if all(noover):
return
else:
draw_superbox(final)
return
def contains_remove():
for x, y, w, h in merged_candidates:
f = False
temp = set(merged_candidates)
temp.remove((x, y, w, h))
for x1, y1, w1, h1 in temp:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
f = False
break
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
else:
f = True
if f == True:
refined.add((x, y, w, h))
# def contains_remove():
# for x, y, w, h in merged_candidates:
# temp = set(merged_candidates)
# temp.remove((x, y, w, h))
# test = []
# for x1, y1, w1, h1 in temp:
# A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
# B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# # overlap between A and B
# SA = A['w']*A['h']
# SB = B['w']*B['h']
# SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
# SU = SA + SB - SI
# overlap_AB = float(SI) / float(SU)
# if overlap_AB > 0.0:
# # if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
# test.append(False)
# else:
# test.append(True)
# else:
# test.append(True)
# if all(test):
# refined.add((x, y, w, h))
def mean_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[1]+i[3] for i in l]) - min([i[1] for i in l]))
def merge():
global width, height
thresh = int(((width+height)/2)*(0.14))
tempc = set()
for x, y, w, h in candidates:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in candidates:
if abs(x1-x) <= thresh and abs(y1-y) <= thresh and abs(w1-w) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
merged_candidates.add(mean_rect(temp))
contains_remove()
for name in os.listdir("./Images"):
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
print("Processing Image " + name.split(".")[0])
fname = "./Images/" + name
print(fname)
img = skimage.io.imread(fname)
width = len(img[0])
height = len(img)
# new_size = 256
# height = int(new_size * height / width)
# width = new_size
if width*height < 256*256*(0.95) and abs(width-height) <= 3 :
new_size = 512
height = int(new_size * height / width)
width = new_size
print("A")
elif width*height < 220*220*(1.11):
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B")
elif width*height < 256*256:
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B1")
elif width*height > 512*512*(0.99) and width < 800 and height < 800:
new_size = 512
height = int(new_size * height / width)
width = new_size
print("C")
elif width*height < 512*512*(0.95) and width*height > 256*256*(1.15):
new_size = 512
height = int(new_size * height / width)
width = new_size
print("D")
tried = []
while True:
tried.append(width)
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
stage = 1
text_cut_final = set()
for sc in [350,450,500]:
for sig in [0.8]:
for mins in [30,60,120]: # important
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]):
pass
else:
img = skimage.transform.resize(img, (height, width))
img_lbl, regions = selectivesearch.selective_search(
img, scale=sc, sigma= sig,min_size = mins)
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding regions smaller than 2000 pixels
if r['size'] < 2000:
continue
# distorted rects
x, y, w, h = r['rect']
if w / h > 1.2 or h / w > 1.2:
continue
if w >= (img.shape[0]-1)*(0.7) and h >= (img.shape[1]-1)*(0.7):
continue
candidates.add(r['rect'])
print("Stage " + str(stage) + " Complete.")
stage+=1
print(candidates)
merge()
print(refined)
draw_superbox()
print(final)
extend_superbox()
print(final_extended)
os.makedirs("Regions/"+name.split(".")[0])
# draw rectangles on the original image
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in final_extended:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Regions/"+name.split(".")[0]+"/FinalRegions.png")
plt.close('all')
img1 = skimage.io.imread(fname)[:,:,:3]
if height == len(img1) and width == len(img1[0]): pass
else: img1 = skimage.transform.resize(img1, (height, width))
# imgT = Image.open(fname).convert('L')
# w, h = imgT.size
# if height == h and width == w:
# pass
# else:
# # img1 = skimage.transform.resize(img1, (height, width))
# imgT = imgT.resize((width,height), Image.ANTIALIAS)
ij = 1
fList = []
box_list = []
for x, y, w, h in final_extended:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg", img1[y:y+h,x:x+w])
# imgT.crop((x,y,x+w,y+h)).save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
# imgT = Image.open("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.png").convert('L')
# imgT.save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
fList.append("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg")
box_list.append((x, y, w, h))
ij+=1
# classify text no text
text_boxes=set()
text = set()
no_text = set()
both_text = set()
text_cut_final = set()
i = 0
try:
a = getClass(fList)
l = np.array([0,1,2])
for pred in a:
idx = list((-pred).argsort())
pred = l[np.array(idx)]
if pred[0] == 1 or pred[0] == 2:
text_boxes.add(box_list[i])
elif pred[0] == 0:
no_text.add(box_list[i])
if pred[0] == 2:
both_text.add(box_list[i])
print(pred)
i+=1
except:
print("No Text Regions")
draw_textbox()
print(text)
texbox_cut()
print(text_cut)
texbox_ext()
print(text_cut_final)
# draw rectangles on the original image
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]): pass
else: img = skimage.transform.resize(img, (height, width))
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in text_cut_final:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Result/final_"+name.split(".")[0]+".png")
plt.close('all')
ij = 1
for x, y, w, h in text_cut_final:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_text.png", img[y:y+h,x:x+w])
ij+=1
# min area check
minf = 0
for x, y, w, h in text_cut_final:
if w*h < width*height*0.20 and (w < width*0.20 or h < height*0.20):
minf = 1
if (len(text_cut_final) == 0 or minf == 1) and len(tried) < 3:
print(tried)
print("New size being tried.")
shutil.rmtree("Regions/"+name.split(".")[0]+"/")
img = skimage.io.imread(fname)
twidth = len(img[0])
theight = len(img)
new_size = list(set([256,512,twidth]) - set(tried))[0]
height = int(new_size * theight / twidth)
width = new_size
else:
break
| 33.075051 | 197 | 0.49307 |
4a19c099e02f6cd663defc5c5f18d6f9bbbd51b8
| 474 |
py
|
Python
|
lib/aggregate.py
|
deepu-james/sast-scan
|
3e6c6da5e981cf51c39312ed0665f3d6d49ee042
|
[
"MIT"
] | 131 |
2020-01-08T21:09:32.000Z
|
2022-02-24T21:30:25.000Z
|
lib/aggregate.py
|
deepu-james/sast-scan
|
3e6c6da5e981cf51c39312ed0665f3d6d49ee042
|
[
"MIT"
] | 27 |
2020-01-10T20:25:15.000Z
|
2020-09-04T12:21:30.000Z
|
lib/aggregate.py
|
deepu-james/sast-scan
|
3e6c6da5e981cf51c39312ed0665f3d6d49ee042
|
[
"MIT"
] | 19 |
2020-01-10T08:00:40.000Z
|
2021-08-06T01:41:35.000Z
|
# -*- coding: utf-8 -*-
import json
def jsonl_aggregate(run_data_list, out_file_name):
"""Produce aggregated report in jsonl format
:param run_data_list: List of run data after parsing the sarif files
:param out_file_name: Output filename
"""
if not run_data_list or not out_file_name:
return
with open(out_file_name, "w") as outfile:
for data in run_data_list:
json.dump(data, outfile)
outfile.write("\n")
| 27.882353 | 72 | 0.664557 |
4a19c1b6e503e4d9f636cbc4208160a71157c42c
| 3,199 |
py
|
Python
|
awake_guardian/awake_guardian.py
|
kamil-cy/awake-guardian
|
a03d03d3980c6dcc4355ded3d9ed218dceeb5271
|
[
"MIT"
] | null | null | null |
awake_guardian/awake_guardian.py
|
kamil-cy/awake-guardian
|
a03d03d3980c6dcc4355ded3d9ed218dceeb5271
|
[
"MIT"
] | null | null | null |
awake_guardian/awake_guardian.py
|
kamil-cy/awake-guardian
|
a03d03d3980c6dcc4355ded3d9ed218dceeb5271
|
[
"MIT"
] | null | null | null |
from PySide6.QtCore import QTimer
from datetime import datetime
from .config import Config, Icon
from .hold_dialog import HoldDialog
from .lang import L
from .power_management import SYSTEM_COMMANDS
from .settings_dialog import SettingsDialog
from .sounds import Sound
from .system_tray_icon import SystemTrayIcon
from .user_activity import UserActivity
from .volume_control import VolumeControl
class AwakeGurdian:
def __init__(self, app):
self.cfg = Config()
self.hold_timer = QTimer()
self.main_timer = QTimer()
self.main_timer.setInterval(1000)
self.main_timer.timeout.connect(self.loop)
self.main_timer.start()
self.app = app
self.last_state = 0
self.dialog_settings = SettingsDialog(self)
self.dialog_hold = HoldDialog(self)
self.tray_icon = SystemTrayIcon(self)
self.tray_icon.show()
def timer_toggle(self):
if self.main_timer.isActive():
self.hold()
else:
self.resume()
def resume(self, text=L.PAUSE):
self.hold_timer.stop()
self.tray_icon.setIcon(Icon.eyes)
self.main_timer.start()
self.tray_icon.systray_menu_main.setText(text)
self.tray_icon.systray_menu_main.setIcon(Icon.inactive)
def hold(self, text=L.RESUME):
self.dialog_hold.show()
self.tray_icon.setIcon(Icon.inactive)
self.main_timer.stop()
self.tray_icon.systray_menu_main.setText(text)
self.tray_icon.systray_menu_main.setIcon(Icon.eyes)
def loop(self):
if self.cfg.t_range:
t = datetime.now().time()
tf = self.cfg.t_range_f.toPython()
tt = self.cfg.t_range_t.toPython()
if tf > t > tt:
self.last_state = -1
self.tray_icon.setIcon(Icon.clock)
return
idle_secs = UserActivity.check_idle()
if self.cfg.power_management:
if idle_secs >= self.cfg.t_to_event_m * 60 + self.cfg.t_to_event_s:
UserActivity.idle_secs = 0
list(SYSTEM_COMMANDS.values())[self.cfg.power_management_action]()
remind_seconds = self.cfg.t_to_remind_m * 60 + self.cfg.t_to_remind_s
nag_seconds = self.cfg.t_to_nag_m * 60 + self.cfg.t_to_nag_s
if self.cfg.remind and idle_secs >= remind_seconds:
if self.cfg.inc_volume_remind:
VolumeControl.raise_volume(1)
self.last_state = 1
self.remind()
if self.cfg.nag and idle_secs >= nag_seconds:
if self.cfg.inc_volume_nag:
VolumeControl.raise_volume()
self.last_state = 2
self.nag()
if idle_secs < remind_seconds and idle_secs < nag_seconds:
if self.last_state:
self.tray_icon.setIcon(Icon.eyes)
VolumeControl.restore_volume()
self.last_state = 0
def remind(self):
self.tray_icon.setIcon(Icon.beep)
self.main_timer.setInterval(1000)
Sound.remind()
def nag(self):
self.tray_icon.setIcon(Icon.shout)
self.main_timer.setInterval(2000)
Sound.nag()
| 32.979381 | 82 | 0.632698 |
4a19c1ed48a6908fc3192e2bad9eccc4065b0c60
| 1,166 |
py
|
Python
|
scripts/xor_6_6f/xor_6_6f_prepare_data.py
|
NRuf77/proset
|
101d491e05c2423faddca31029232982f46d8831
|
[
"MIT"
] | null | null | null |
scripts/xor_6_6f/xor_6_6f_prepare_data.py
|
NRuf77/proset
|
101d491e05c2423faddca31029232982f46d8831
|
[
"MIT"
] | null | null | null |
scripts/xor_6_6f/xor_6_6f_prepare_data.py
|
NRuf77/proset
|
101d491e05c2423faddca31029232982f46d8831
|
[
"MIT"
] | null | null | null |
"""Prepare 'continuous XOR' problem with 6 relevant and 6 irrelevant features as benchmark case.
Copyright by Nikolaus Ruf
Released under the MIT license - see LICENSE file for details
"""
import gzip
import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from proset.benchmarks import create_continuous_xor
print("* Apply user settings")
random_state = np.random.RandomState(12345)
output_path = "scripts/results"
output_file = "xor_6_6f_data.gz"
print("* Generate data")
X, y = create_continuous_xor(random_state=random_state)
X = np.hstack([X, random_state.uniform(low=-1.0, high=1.0, size=X.shape)])
print("* Make train-test split")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=random_state, stratify=y)
print("* Save data")
data = {
"X_train": X_train,
"X_test": X_test,
"y_train": y_train,
"y_test": y_test,
"feature_names": tuple(["F{}".format(i + 1) for i in range(X_train.shape[1])])
}
with gzip.open(os.path.join(output_path, output_file), mode="wb") as file:
pickle.dump(data, file)
print("* Done")
| 28.439024 | 112 | 0.708405 |
4a19c1f194eb275f9469fcfcf85a74b889dbb3e7
| 9,651 |
py
|
Python
|
nemo/collections/nlp/models/language_modeling/bert_lm_model.py
|
vinayphadnis/NeMo
|
9dc7773c48e164b8a82051bb558a728c6eeb85ec
|
[
"Apache-2.0"
] | 2 |
2020-10-08T13:38:46.000Z
|
2020-10-14T15:09:34.000Z
|
nemo/collections/nlp/models/language_modeling/bert_lm_model.py
|
vinayphadnis/NeMo
|
9dc7773c48e164b8a82051bb558a728c6eeb85ec
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/models/language_modeling/bert_lm_model.py
|
vinayphadnis/NeMo
|
9dc7773c48e164b8a82051bb558a728c6eeb85ec
|
[
"Apache-2.0"
] | 1 |
2020-12-18T14:23:37.000Z
|
2020-12-18T14:23:37.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss, SmoothedCrossEntropyLoss
from nemo.collections.nlp.data.language_modeling.lm_bert_dataset import (
BertPretrainingDataset,
BertPretrainingPreprocessedDataloader,
)
from nemo.collections.nlp.metrics.perplexity import Perplexity
from nemo.collections.nlp.modules.common import BertPretrainingTokenClassifier, SequenceClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes import typecheck
from nemo.core.classes.modelPT import ModelPT
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
__all__ = ['BERTLMModel']
class BERTLMModel(ModelPT):
"""
BERT language model pretraining.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
output_types_dict = {'mlm_logits': self.mlm_classifier.output_types['logits']}
if not self.only_mlm_loss:
output_types_dict['nsp_logits'] = self.nsp_classifier.output_types['logits']
return output_types_dict
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if cfg.tokenizer is not None:
self._setup_tokenizer(cfg.tokenizer)
else:
self.tokenizer = None
super().__init__(cfg=cfg, trainer=trainer)
self.bert_model = get_lm_model(
pretrained_model_name=cfg.language_model.pretrained_model_name,
config_file=cfg.language_model.config_file,
config_dict=OmegaConf.to_container(cfg.language_model.config) if cfg.language_model.config else None,
checkpoint_file=cfg.language_model.lm_checkpoint,
)
self.hidden_size = self.bert_model.config.hidden_size
self.vocab_size = self.bert_model.config.vocab_size
self.only_mlm_loss = cfg.only_mlm_loss
self.mlm_classifier = BertPretrainingTokenClassifier(
hidden_size=self.hidden_size,
num_classes=self.vocab_size,
num_layers=cfg.num_tok_classification_layers,
activation='gelu',
log_softmax=True,
use_transformer_init=True,
)
self.mlm_loss = SmoothedCrossEntropyLoss()
if not self.only_mlm_loss:
self.nsp_classifier = SequenceClassifier(
hidden_size=self.hidden_size,
num_classes=2,
num_layers=cfg.num_seq_classification_layers,
log_softmax=False,
activation='tanh',
use_transformer_init=True,
)
self.nsp_loss = CrossEntropyLoss()
self.agg_loss = AggregatorLoss(num_inputs=2)
# # tie weights of MLM softmax layer and embedding layer of the encoder
if (
self.mlm_classifier.mlp.last_linear_layer.weight.shape
!= self.bert_model.embeddings.word_embeddings.weight.shape
):
raise ValueError("Final classification layer does not match embedding layer.")
self.mlm_classifier.mlp.last_linear_layer.weight = self.bert_model.embeddings.word_embeddings.weight
# create extra bias
# setup to track metrics
self.perplexity_metric = Perplexity()
self.setup_optimization(cfg.optim)
@typecheck()
def forward(self, input_ids, token_type_ids, attention_mask):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
mlm_logits = self.mlm_classifier(hidden_states=hidden_states)
if self.only_mlm_loss:
return (mlm_logits,)
nsp_logits = self.nsp_classifier(hidden_states=hidden_states)
return mlm_logits, nsp_logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_loss = self.mlm_loss(logits=logits[0], labels=output_ids, output_mask=output_mask)
if self.only_mlm_loss:
loss = mlm_loss
else:
nsp_loss = self.nsp_loss(logits=logits[1], labels=labels)
loss = self.agg_loss(loss_1=mlm_loss, loss_2=nsp_loss)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_loss = self.mlm_loss(logits=logits[0], labels=output_ids, output_mask=output_mask)
if self.only_mlm_loss:
loss = mlm_loss
else:
nsp_loss = self.nsp_loss(logits=logits[1], labels=labels)
loss = self.agg_loss(loss_1=mlm_loss, loss_2=nsp_loss)
perplexity = self.perplexity_metric(mlm_loss)
tensorboard_logs = {'val_loss': loss, 'perplexity': perplexity}
return {'val_loss': loss, 'log': tensorboard_logs}
def validation_epoch_end(self, outputs):
"""Called at the end of validation to aggregate outputs.
Args:
outputs (list): The individual outputs of each validation step.
Returns:
dict: Validation loss and tensorboard logs.
"""
if outputs:
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
perplexity = torch.stack([x['log']['perplexity'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss, 'perplexity': perplexity}
logging.info(f"evaluation perplexity {perplexity.item()}")
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = (
self._setup_preprocessed_dataloader(train_data_config)
if self.tokenizer is None
else self._setup_dataloader(train_data_config)
)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = (
self._setup_preprocessed_dataloader(val_data_config)
if self.tokenizer is None
else self._setup_dataloader(val_data_config)
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
pass
def _setup_preprocessed_dataloader(self, cfg: Optional[DictConfig]):
dataset = cfg.data_file
max_predictions_per_seq = cfg.max_predictions_per_seq
batch_size = cfg.batch_size
if os.path.isdir(dataset):
files = [os.path.join(dataset, f) for f in os.listdir(dataset) if os.path.isfile(os.path.join(dataset, f))]
else:
files = [dataset]
files.sort()
dl = BertPretrainingPreprocessedDataloader(
data_files=files, max_predictions_per_seq=max_predictions_per_seq, batch_size=batch_size
)
return dl
def _setup_tokenizer(self, cfg: DictConfig):
tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
tokenizer_model=cfg.tokenizer_model,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
vocab_file=cfg.vocab_file,
)
self.tokenizer = tokenizer
def _setup_dataloader(self, cfg: DictConfig):
dataset = BertPretrainingDataset(
tokenizer=self.tokenizer,
data_file=cfg.data_file,
max_seq_length=cfg.max_seq_length,
mask_prob=cfg.mask_prob,
short_seq_prob=cfg.short_seq_prob,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.get('drop_last', False),
shuffle=cfg.shuffle,
num_workers=cfg.get('num_workers', 0),
)
return dl
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
pass
| 38.915323 | 119 | 0.678064 |
4a19c23ad1ce091c440fad5ca485ed19e4f22627
| 639 |
py
|
Python
|
Python/car.py
|
rgomeztinoco/poo_uber
|
c705268820408dc5ee60a8a23e2cd05967642988
|
[
"MIT"
] | null | null | null |
Python/car.py
|
rgomeztinoco/poo_uber
|
c705268820408dc5ee60a8a23e2cd05967642988
|
[
"MIT"
] | null | null | null |
Python/car.py
|
rgomeztinoco/poo_uber
|
c705268820408dc5ee60a8a23e2cd05967642988
|
[
"MIT"
] | null | null | null |
from account import Account
class Car:
id = int
license = str
driver = Account("","")
__passenger = int
def __init__(self, license, driver):
self.license = license
self.driver = driver
@property
def passenger(self):
return self.__passenger
@passenger.setter
def passenger(self, passengers):
if passengers != 4:
print("Debes tener 4 pasajeros")
else:
self.__passenger = passengers
def printDataCar(self):
print("Licencia: " + self.license)
print("Nombre: " + self.driver.name)
print("Documento: " + self.driver.document)
print("Pasajeros: " + str(self.passenger))
| 22.821429 | 47 | 0.655712 |
4a19c249f6d780e8b77925d273e70375c0ff9254
| 60 |
py
|
Python
|
flask_app/views/__init__.py
|
BlakeC97/gcloud-flask-app
|
0f2b3c49cf4a771b2656639720b71c649ecef5d1
|
[
"MIT"
] | null | null | null |
flask_app/views/__init__.py
|
BlakeC97/gcloud-flask-app
|
0f2b3c49cf4a771b2656639720b71c649ecef5d1
|
[
"MIT"
] | null | null | null |
flask_app/views/__init__.py
|
BlakeC97/gcloud-flask-app
|
0f2b3c49cf4a771b2656639720b71c649ecef5d1
|
[
"MIT"
] | null | null | null |
from .homepage import homepage
from .pdf_ocr import pdf_ocr
| 20 | 30 | 0.833333 |
4a19c3ba5933afb087bc273e475acb8df51a9517
| 1,770 |
py
|
Python
|
setup.py
|
a-ws-m/unlockGNN
|
128b192770925220e24761173c9c373d90088868
|
[
"MIT"
] | 6 |
2020-09-20T11:38:46.000Z
|
2020-10-21T14:13:35.000Z
|
setup.py
|
a-ws-m/unlockGNN
|
128b192770925220e24761173c9c373d90088868
|
[
"MIT"
] | 18 |
2021-09-27T17:24:26.000Z
|
2022-02-02T03:25:35.000Z
|
setup.py
|
a-ws-m/unlockGNN
|
128b192770925220e24761173c9c373d90088868
|
[
"MIT"
] | 1 |
2021-08-05T11:05:53.000Z
|
2021-08-05T11:05:53.000Z
|
"""Setup script for package."""
import pathlib
from setuptools import find_namespace_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="unlockNN",
version="2.0.2",
description="Uncertainty quantification for neural network models of chemical systems.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/a-ws-m/unlockNN",
author="Alexander Moriarty",
author_email="amoriarty14@gmail.com",
license="MIT",
keywords=[
"keras",
"tensorflow",
"megnet",
"machine learning",
"uncertainty quantification",
],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Chemistry",
],
packages=find_namespace_packages(include=["unlocknn*"]),
include_package_data=False,
install_requires=[
"numpy<=1.19.5",
"pymatgen<=2021.2.8",
"megnet>=1.1.4",
"requests",
"pyarrow>=1.0.1",
"tensorflow>=2.2",
"tensorflow-probability>=0.10.1",
"typish; python_version < '3.8'",
],
python_requires=">=3.6",
extras_require={
"Compatible matminer version": ["matminer==0.6.5"],
},
)
| 31.052632 | 92 | 0.619209 |
4a19c3d5507dcda3ee354d4a661b0e60ddcd42d5
| 6,307 |
py
|
Python
|
test/programytest/clients/restful/flask/line/test_client.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 2 |
2018-06-16T09:32:22.000Z
|
2019-07-21T13:16:00.000Z
|
test/programytest/clients/restful/flask/line/test_client.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 3 |
2020-07-16T04:00:42.000Z
|
2021-03-31T18:52:22.000Z
|
test/programytest/clients/restful/flask/line/test_client.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 4 |
2018-06-29T23:50:44.000Z
|
2020-11-05T08:13:47.000Z
|
import unittest.mock
from linebot.models import TextSendMessage
from linebot import LineBotApi, WebhookParser
from linebot.webhook import SignatureValidator
from programy.clients.restful.flask.line.client import LineBotClient
from programy.clients.restful.flask.line.config import LineConfiguration
from programytest.clients.arguments import MockArgumentParser
class MockLineApi(LineBotApi):
def __init__(self, channel_access_token):
LineBotApi.__init__(self, channel_access_token)
self._messages = []
def reply_message(self, reply_token, messages, timeout=None):
self._messages = messages
class MockSignatureValidator(SignatureValidator):
def __init__(self, valid=True):
self._valid = valid
def validate(self, body, signature):
return self._valid
class MockWebhookParser(WebhookParser):
def __init__(self, channel_secret):
self.signature_validator = MockSignatureValidator(channel_secret)
class MockLineBotClient(LineBotClient):
def __init__(self, argument_parser=None, line_bot=None, parser=None):
self._line_bot_api = line_bot
self._parser = parser
self.test_question = None
LineBotClient.__init__(self, argument_parser)
def set_question(self, question):
self.test_question = question
def get_license_keys(self):
self._channel_secret = "LINE_CHANNEL_SECRET"
self._channel_access_token = "LINE_ACCESS_TOKEN"
def ask_question(self, sessionid, question):
if self.test_question is not None:
return self.test_question
return super(MockLineBotClient, self).ask_question(sessionid, question)
def create_line_bot(self):
if self._line_bot_api is None:
self._line_bot_api = LineBotApi(self._channel_access_token)
if self._parser is None:
self._parser = WebhookParser(self._channel_secret)
class LineBotClientTests(unittest.TestCase):
def test_line_client_init(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments)
self.assertIsNotNone(client)
self.assertEquals("LINE_CHANNEL_SECRET", client._channel_secret)
self.assertEquals("LINE_ACCESS_TOKEN", client._channel_access_token)
self.assertIsInstance(client.get_client_configuration(), LineConfiguration)
self.assertEquals('ProgramY AIML2.0 Line Client', client.get_description())
def test_handle_text_message(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments, line_bot=MockLineApi("TOKEN"))
self.assertIsNotNone(client)
event = unittest.mock.Mock()
event.message.unittest.mock.Mock()
event.message.text = "Hello"
event.source = unittest.mock.Mock()
event.source.user_id = "User123"
client.test_question = "Hi there"
client.handle_text_message(event)
self.assertIsNotNone(client._line_bot_api)
self.assertIsNotNone(client._line_bot_api._messages)
self.assertIsInstance(client._line_bot_api._messages, TextSendMessage)
self.assertEquals("Hi there", client._line_bot_api._messages.text)
def test_handle_unknown_message(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments, line_bot=MockLineApi("TOKEN"))
self.assertIsNotNone(client)
event = unittest.mock.Mock()
event.message.unittest.mock.Mock()
event.message.text = "Hello"
event.source = unittest.mock.Mock()
event.source.user_id = "User123"
client.test_question = "Unknown command"
client.handle_unknown_message(event)
self.assertIsNotNone(client._line_bot_api)
self.assertIsNotNone(client._line_bot_api._messages)
self.assertIsInstance(client._line_bot_api._messages, TextSendMessage)
self.assertEquals("Unknown command", client._line_bot_api._messages.text)
def test_handle_unknown_event(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments, line_bot=MockLineApi("TOKEN"))
self.assertIsNotNone(client)
event = unittest.mock.Mock()
event.message.unittest.mock.Mock()
event.message.text = "Hello"
event.source = unittest.mock.Mock()
event.source.user_id = "User123"
client.test_question = "Unknown command"
client.handle_unknown_message(event)
self.assertIsNotNone(client._line_bot_api)
self.assertIsNotNone(client._line_bot_api._messages)
self.assertIsInstance(client._line_bot_api._messages, TextSendMessage)
self.assertEquals("Unknown command", client._line_bot_api._messages.text)
def test_handle_message_request(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments, line_bot=MockLineApi("TOKEN"), parser=MockWebhookParser("SECRET"))
self.assertIsNotNone(client)
body = '{"events": [{"type": "message", "source": {"source_id": "test", "type": "text", "user": {"user_id": "User123"}}}]}'
signature = "SIGNATURE"
client.handle_message_request(body, signature)
self.assertIsNotNone(client._line_bot_api)
self.assertIsNotNone(client._line_bot_api._messages)
self.assertIsInstance(client._line_bot_api._messages, TextSendMessage)
self.assertEquals("Unknown command", client._line_bot_api._messages.text)
def test_receive_message(self):
arguments = MockArgumentParser()
client = MockLineBotClient(arguments, line_bot=MockLineApi("TOKEN"), parser=MockWebhookParser("SECRET"))
self.assertIsNotNone(client)
client.test_question = "Hi there"
request = unittest.mock.Mock()
request.headers = {'X-Line-Signature': "SECRET"}
request.get_data.return_value = '{"events": [{"type": "message", "source": {"source_id": "test", "type": "text", "user": {"user_id": "User123"}}}]}'
client.receive_message(request)
self.assertIsNotNone(client._line_bot_api)
self.assertIsNotNone(client._line_bot_api._messages)
self.assertIsInstance(client._line_bot_api._messages, TextSendMessage)
self.assertEquals("Unknown command", client._line_bot_api._messages.text)
| 37.319527 | 156 | 0.711432 |
4a19c66d88cdb8861482a7528b44619ad7525dc9
| 16,044 |
py
|
Python
|
madminer/fisherinformation/geometry.py
|
siyuchen95/madminer
|
dfcbd7ee26c47dd294610c195fafce15f74c10eb
|
[
"MIT"
] | 2 |
2020-09-09T20:58:31.000Z
|
2020-09-10T01:35:35.000Z
|
madminer/fisherinformation/geometry.py
|
siyuchen95/madminer
|
dfcbd7ee26c47dd294610c195fafce15f74c10eb
|
[
"MIT"
] | null | null | null |
madminer/fisherinformation/geometry.py
|
siyuchen95/madminer
|
dfcbd7ee26c47dd294610c195fafce15f74c10eb
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import random
from scipy.interpolate import griddata, LinearNDInterpolator, CloughTocher2DInterpolator
from scipy.stats import chi2
from ..utils.various import load_and_check
logger = logging.getLogger(__name__)
class InformationGeometry:
"""
Functions to calculate limits using Information Geometry.
After initializing the `InformationGeometry` class, a Fisher Information needs to be provided using
one of the following functions
* `InformationGeometry.information_from_formula()` defines the Fisher Information
explicitly as function of the theory paramters `theta`.
* `InformationGeometry.information_from_grid()` loads a grid of Fisher Informations
which is then interpolated.
Using information geometrical methods, the function `InformationGeometry.distance_contours()` then
calculates the distance contours and equivalently the p-values throughout parameter space.
"""
def __init__(self):
self.infotype = None
self.dimension = 0
self.information_formula = None
self.inverse = "exact"
def information_from_formula(self, formula, dimension):
"""
Explicitly defines the Fisher Information as function of the theory parameter `theta`
through a formula that can be avaulated using `eval()`.
Parameters
----------
formula : str
Explicit definition of the Fisher Information as ndarray, which can be a function of
the n-dimensional theory parameter `theta`.
Example: formula="np.array([[1+theta[0],1],[1,2*theta[1]**2]])"
dimension : int
Dimensionality of the theory parameter space.
"""
self.infotype = "formula"
self.dimension = dimension
self.information_formula = formula
def information_from_grid(self, theta_grid, fisherinformation_grid, option="smooth", inverse="exact"):
"""
Loads a grid of coordinates and corresponding Fisher Information, which is then interpolated.
Parameters
----------
theta_grid : ndarray
List if parameter points `theta` at which the Fisher information matrices `I_ij(theta)`
is evaluated. Shape (n_gridpoints, n_dimension).
fisherinformation_grid : ndarray
List if Fisher information matrices `I_ij(theta)`. Shape (n_gridpoints, n_dimension, n_dimension).
option : {"smooth", "linear"}
Defines if the Fisher Information is interpolated smoothly using the function
CloughTocher2DInterpolator() or piecewise linear using LinearNDInterpolator().
Default = 'smooth'.
inverse : {"exact", "interpolate"}
Defines if the inverse Fisher Information is obtained by either first interpolating
the Fisher Information and then inverting it ("exact") or by first inverting the grid
of Fisher Informations and then interpolating the inverse ("interpolate"). Default = 'exact'.
"""
self.infotype = "grid"
self.inverse = inverse
# load from file
theta_grid = load_and_check(theta_grid)
fisherinformation_grid = load_and_check(fisherinformation_grid)
self.dimension = len(fisherinformation_grid[0])
# Interpolate Information
if option == "linear":
self.infofunction = LinearNDInterpolator(points=theta_grid, values=np.array(fisherinformation_grid))
elif option == "smooth":
self.infofunction = CloughTocher2DInterpolator(points=theta_grid, values=np.array(fisherinformation_grid))
else:
RuntimeError("Option %s unknown", option)
# Interpolate inverse information
if self.inverse == "interpolate":
inv_fisherinformation_grid = np.array([np.linalg.inv(info) for info in fisherinformation_grid])
if option == "linear":
self.infofunction_inv = LinearNDInterpolator(points=theta_grid, values=inv_fisherinformation_grid)
elif option == "smooth":
self.infofunction_inv = CloughTocher2DInterpolator(points=theta_grid, values=inv_fisherinformation_grid)
def _information(self, theta):
"""
Low level function that calculates the Fisher Information as function of
the theory parameter `theta`
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
Returns
-------
fisher_information : ndarray
Fisher information matrix with shape `(n_dimension, n_dimension)`.
"""
# check input format
assert len(theta) == self.dimension, "theta should have length %r, not %r" % (self.dimension, len(theta))
# calculate information
if self.infotype == "formula":
information = eval(self.information_formula)
elif self.infotype == "grid":
information = self.infofunction(tuple(theta))
else:
raise RuntimeError("Information not defined yet")
# check output format
assert np.shape(information) == (self.dimension, self.dimension), "information should have shape %r, not %r" % (
(self.dimension, self.dimension),
np.shape(information),
)
return information
def _information_inv(self, theta):
"""
Low level function that calculates the inverse Fisher Information as function of
the theory parameter `theta`.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the inverse Fisher information
matrix `I^{-1}_ij(theta)` is evaluated.
Returns
-------
inverse_fisher_information : ndarray
Inverse Fisher information matrix with shape `(n_dimension, n_dimension)`.
"""
if self.inverse == "interpolate":
return self.infofunction_inv(tuple(theta))
else:
return np.linalg.inv(self._information(theta))
def _information_derivative(self, theta):
"""
Low level function that calculates the derivative of Fisher Information
`\partial_k I_{ij}` at the theory parameter `theta`.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the derivative of the Fisher information
matrix is evaluated.
Returns
-------
fisher_information_derivative : ndarray
Derivative of Fisher information matrix with shape `(n_dimension, n_dimension, n_dimension)`.
"""
epsilon = 10 ** -3
dtheta = np.identity(len(theta)) * epsilon
return np.array(
[(self._information(theta + dtheta[k]) - self._information(theta)) / epsilon for k in range(len(theta))]
)
def _christoffel(self, theta):
"""
Low level function that calculates the Christoffel symbol (2nd kind) Gamma^i_jk at
the theory parameter `theta`. Here Gamma^i_jk=0.5*I^{im}(\partial_k I_{mj}
+ \partial_j I_{mk} - \partial_m I_{jk})
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Christoffel symbol is evaluated.
Returns
-------
christoffel_symbol : ndarray
Christoffel symbol with shape `(n_dimension, n_dimension, n_dimension)`.
"""
term1 = np.einsum("ad,cdb->abc", self._information_inv(theta), self._information_derivative(theta))
term2 = np.einsum("ad,bdc->abc", self._information_inv(theta), self._information_derivative(theta))
term3 = np.einsum("ad,bcd->abc", self._information_inv(theta), self._information_derivative(theta))
return 0.5 * (term1 + term2 - term3)
def find_trajectory(self, theta0, dtheta0, limits, stepsize=1):
"""
Finds the geodesic trajectory starting at a parameter point theta0 going in the
initial direction dtheta0.
Parameters
----------
theta0 : ndarray
Parameter point `theta0` at which the geodesic trajectory starts.
dtheta0 : ndarray
Initial direction `dtheta0` of the geodesic
limits : list of (tuple of float)
Specifies the boundaries of the parameter grid in which the trajectory
is evaulated. It should be `[[min, max], [min, max], ..., [min, max]`,
where the list goes over all parameters and `min` and `max` are float.
stepsize : int, optional
Maximal stepsize `|Delta theta|` during numerical integration in parameter space.
$Default: 1
Returns
-------
list_of_theta : ndarray
List of parameter points theta `(n_points, n_dimension)`.
list_of_distance : ndarray
List of distances from the staring point theta0 `(n_points, )`.
"""
# initiate starting point
theta = 1.0 * theta0
dtheta = 1.0 * dtheta0
dist = 0
output_theta = [1.0 * theta]
output_dist = [0]
# calculate free-fall trajectory
counter = 0
in_grid = True
while in_grid and counter < 200:
counter += 1
# normalize dtheta to stepsize
dtheta = dtheta / np.linalg.norm(dtheta)
# calculate ddtheta and distance
ddtheta = -1.0 * np.einsum("abc,b,c->a", self._christoffel(theta), dtheta, dtheta)
ddist = np.sqrt(np.einsum("ab,a,b", self._information(theta), dtheta, dtheta))
# determine stepsize to be used
max_stepsize = 0.05 * np.linalg.norm(dtheta) / np.linalg.norm(ddtheta)
use_stepsize = min(max_stepsize, stepsize)
# update theta,dtheta, dist
theta += dtheta * use_stepsize
dtheta += ddtheta * use_stepsize
dist += ddist * use_stepsize
# save
theta = np.array(theta)
if np.isnan(dist):
break
output_theta.append(theta * 1.0)
output_dist.append(dist * 1.0)
# check if outside range
for th, lim in zip(theta, limits):
if th < lim[0] or th > lim[1]:
in_grid = False
return np.array(output_theta), output_dist
def distance_contours(
self,
theta0,
grid_ranges,
grid_resolutions,
stepsize=None,
ntrajectories=None,
continous_sampling=False,
return_trajectories=False,
):
"""
Finds the distance values from the point theta0 and the corresponding p-value
within the parameter space bounded by `grid_ranges`.
Parameters
----------
theta0 : ndarray
Parameter point `theta0` at which the geodesic trajectory starts.
grid_ranges : list of (tuple of float)
Specifies the boundaries of the parameter grid in which the trajectory
is evaulated. It should be `[[min, max], [min, max], ..., [min, max]`,
where the list goes over all parameters and `min` and `max` are float.
grid_resolutions : list of int
Resolution of the parameter space grid on which the p-values are evaluated.
The individual entries specify the number of points along each parameter individually.
stepsize : float or None, optional
Maximal stepsize `|Delta theta|` during numerical integration in parameter space.
If None, stepsize = min([(max-min)/20 for (min,max) in grid_ranges]). Default: None
ntrajectories : int or None, optional
Number of sampled trajectories. If None, ntrajectories = 20 times the
number of dimensions. Default: None
continous_sampling : bool, optional
If n_dimension is 2, the trajectories are sampled continously in the angular
direction. Default: False
return_trajectories : bool, optional
Returns the trajectories (parameter points and distances). Default: False
Returns
-------
theta_grid : ndarray
Parameter points at which the p-values are evaluated with shape `(n_grid_points, n_dimension)`.
p_values : ndarray
Observed p-values for each parameter point on the grid, with shape `(n_grid_points,)`.
p_values : ndarray
Interpolated distance from theta0 for each parameter point on the grid,
with shape `(n_grid_points,)`.
(list_of_theta, list_of_distance) : (ndarray,ndarray)
Only returned if return_trajectories is True. List of parameter points
theta `(n_points, n_dimension)` and List of distances from the
staring point theta0 `(n_points, )`.
"""
# automatic setting of stepsize and ntrajectories
if stepsize == None:
stepsize = min([(limit[1] - limit[0]) / 20.0 for limit in grid_ranges])
if ntrajectories == None:
ntrajectories = 20 * self.dimension
if self.dimension is not 2:
continous_sampling = False
limits = (1.0 + 2.0 * stepsize) * np.array(grid_ranges)
# determine trajectories
thetas = []
distances = []
for i in range(ntrajectories):
if continous_sampling:
angle = 2.0 * np.pi / float(ntrajectories) * i
dth0 = np.array([np.cos(angle), np.sin(angle)])
else:
dth0 = np.array([random.uniform(-1, 1) for _ in range(self.dimension)])
logger.debug("Calculate Trajectory Number %s with dtheta0=%s", i, dth0)
ths, ds = self.find_trajectory(theta0, dth0, limits, stepsize)
for th in ths:
thetas.append(th)
for d in ds:
distances.append(d)
thetas = np.array(thetas)
# Create Theta Grid
theta_grid_each = self._make_theta_grid_each(grid_ranges, grid_resolutions)
theta_grid = self._make_theta_grid(grid_ranges, grid_resolutions)
# Create Distance Grid
distance_grid = griddata(thetas, distances, (theta_grid_each[0], theta_grid_each[1]), method="linear")
# Create p-value Grid
p_value_grid = self._asymptotic_p_value(distance_grid)
# return
if return_trajectories:
return theta_grid, p_value_grid, distance_grid, (thetas, distances)
else:
return theta_grid, p_value_grid, distance_grid
def _make_theta_grid_each(self, grid_ranges, grid_resolutions):
theta_each = []
for resolution, (theta_min, theta_max) in zip(grid_resolutions, grid_ranges):
theta_each.append(np.linspace(theta_min, theta_max, resolution))
theta_grid_each = np.meshgrid(*theta_each, indexing="ij")
return theta_grid_each
def _make_theta_grid(self, grid_ranges, grid_resolutions):
theta_grid_each = self._make_theta_grid_each(grid_ranges, grid_resolutions)
theta_grid_each = [theta.flatten() for theta in theta_grid_each]
theta_grid = np.vstack(theta_grid_each).T
return theta_grid
def _asymptotic_p_value(self, dist):
"""
Low level function to convert distances in p-values
"""
p_value = chi2.sf(x=dist * dist, df=self.dimension)
return p_value
| 39.614815 | 120 | 0.613064 |
4a19c6b3e7cf767e730ec41b37d772fb9ad3836c
| 543 |
py
|
Python
|
RabbitMqUdn/client/command_args.py
|
clearpal7/ChaosTestingCode
|
5fc029372dc68064f99ac494618061211f51204d
|
[
"MIT"
] | 24 |
2018-03-23T02:50:52.000Z
|
2021-08-17T18:50:58.000Z
|
RabbitMqUdn/client/command_args.py
|
clearpal7/ChaosTestingCode
|
5fc029372dc68064f99ac494618061211f51204d
|
[
"MIT"
] | 3 |
2018-03-11T17:00:25.000Z
|
2019-10-07T21:19:43.000Z
|
RabbitMqUdn/client/command_args.py
|
clearpal7/ChaosTestingCode
|
5fc029372dc68064f99ac494618061211f51204d
|
[
"MIT"
] | 24 |
2017-10-03T09:45:37.000Z
|
2022-03-28T12:55:18.000Z
|
def get_args(args):
args_dict = dict()
index = 1
while index < len(args):
key = args[index]
value = args[index+1]
args_dict[key] = value
index += 2
return args_dict
def get_mandatory_arg(args_dict, key):
if key in args_dict:
return args_dict[key]
else:
print(f"Missing mandatory argument {key}")
exit(1)
def get_optional_arg(args_dict, key, default_value):
if key in args_dict:
return args_dict[key]
else:
return default_value
| 21.72 | 52 | 0.598527 |
4a19c9414211c8c62b98f93060f744f5ef2c71ea
| 2,099 |
py
|
Python
|
cogs/_setup.py
|
GoldenJayz/sexybot
|
77d5938240f5f3d8c6fbf4eab6ba3aada40fb6d7
|
[
"MIT"
] | null | null | null |
cogs/_setup.py
|
GoldenJayz/sexybot
|
77d5938240f5f3d8c6fbf4eab6ba3aada40fb6d7
|
[
"MIT"
] | null | null | null |
cogs/_setup.py
|
GoldenJayz/sexybot
|
77d5938240f5f3d8c6fbf4eab6ba3aada40fb6d7
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import datetime
import json
from PIL import Image
from io import BytesIO
client = discord.Client()
class _setup(commands.Cog):
def __init__(self, client):
self.client = client
async def editimage(self, target):
IMAGE = Image.open("amoogus.jpg")
TARGET_PFP = target.avatar_url_as(size=128)
TARGET_DATA = BytesIO(await TARGET_PFP.read())
TPFP = Image.open(TARGET_DATA)
TPFP = TPFP.resize((110, 110))
TPFP = TPFP.convert("RGB")
IMAGE = IMAGE.convert("RGB")
IMAGE.paste(TPFP, (150, 84))
IMAGE.save("sus.jpg")
async def load_guild_config(self, guildid: int) -> bool:
with open("./cogs/guildconfig.json") as jasonfile:
jason = json.load(jasonfile)
for data in jason["Guilds"]:
if data[f"{guildid}"]:
return jason["Guilds"][0][f"{guildid}"]["Among"]
@commands.Cog.listener()
async def on_guild_join(self, guild):
with open("./cogs/guildconfig.json") as jasonfile:
jason = json.load(jasonfile)
jason["Guilds"][0].update({f"{guild.id}": {"prefix": "-", "Among": False}})
with open("./cogs/guildconfig.json", "w") as jasonfile:
json.dump(jason, jasonfile)
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
await channel.send("Thank you for inviting me into your server! You can change my configuration by doing ```-setup```")
break
@commands.Cog.listener()
async def on_message(self, msg):
result = await self.load_guild_config(msg.guild.id)
if msg.author == self.client.user:
return
else:
if result == True:
if "sus" in msg.content:
await self.editimage(msg.author)
file = discord.File("sus.jpg", filename="sus.jpg")
await msg.channel.send("impostor?", file=file)
def setup(client):
client.add_cog(_setup(client))
| 35.576271 | 135 | 0.597904 |
4a19c97a25f7dd551c4f43594cdd2a626416ee63
| 8,338 |
py
|
Python
|
independent_vector_analysis/tests/test_iva_g.py
|
SSTGroup/independent_vector_analysis
|
a5fa085bfb3bb8e9a4283bb90beaa8e0132596b6
|
[
"MIT"
] | 1 |
2022-03-09T14:52:14.000Z
|
2022-03-09T14:52:14.000Z
|
independent_vector_analysis/tests/test_iva_g.py
|
SSTGroup/independent_vector_analysis
|
a5fa085bfb3bb8e9a4283bb90beaa8e0132596b6
|
[
"MIT"
] | null | null | null |
independent_vector_analysis/tests/test_iva_g.py
|
SSTGroup/independent_vector_analysis
|
a5fa085bfb3bb8e9a4283bb90beaa8e0132596b6
|
[
"MIT"
] | null | null | null |
# Copyright (c) <2021> <University of Paderborn>
# Signal and System Theory Group, Univ. of Paderborn, https://sst-group.org/
# https://github.com/SSTGroup/independent_vector_analysis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify and
# merge the Software, subject to the following conditions:
#
# 1.) The Software is used for non-commercial research and
# education purposes.
#
# 2.) The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# 3.) Publication, Distribution, Sublicensing, and/or Selling of
# copies or parts of the Software requires special agreements
# with the University of Paderborn and is in general not permitted.
#
# 4.) Modifications or contributions to the software must be
# published under this license. The University of Paderborn
# is granted the non-exclusive right to publish modifications
# or contributions in future versions of the Software free of charge.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Persons using the Software are encouraged to notify the
# Signal and System Theory Group at the University of Paderborn
# about bugs. Please reference the Software in your publications
# if it was used for them.
import numpy as np
import matplotlib.pyplot as plt
from ..iva_g import iva_g
def test_iva_g():
"""
test iva_g
"""
N = 10 # number of sources
T = 1000 # sample size
K = 10 # number of groups
# generate the mixtures
S = np.zeros((N, T, K))
for n in range(N):
temp1 = np.random.randn(K, T)
temp = np.zeros((K, T))
B = np.random.randn(K, K, 3)
for p in range(2):
for t in range(2, T):
# introduce nonwhiteness and spatial correlation
temp[:, t] += B[:, :, p] @ temp1[:, t - p]
for k in range(K):
S[n, :, k] = temp[k, :]
S[n, :, k] -= np.mean(S[n, :, k])
S[n, :, k] = S[n, :, k] / np.std(S[n, :, k], ddof=1)
A = np.random.randn(N, N, K)
X = np.zeros((N, T, K))
for k in range(K):
X[:, :, k] = A[:, :, k] @ S[:, :, k]
# separation
W, _, _, isi = iva_g(X, A=A, jdiag_initW=True)
np.testing.assert_array_less(isi, 0.05)
# show results
T1 = np.zeros((N, N))
for k in range(K):
T_k = W[:, :, k] @ A[:, :, k]
T_k = np.abs(T_k)
for n in range(N):
T_k[n, :] /= np.amax(np.abs(T_k[n, :]))
T1 += T_k / K
P = np.zeros((N, N))
imax = np.argmax(T1, axis=0)
P[np.arange(N), imax] = 1
T1 = P @ T1
plt.figure()
plt.imshow(T1, extent=[0, N, 0, N], cmap='bone')
plt.title('joint global matrix')
plt.colorbar()
plt.show()
print('Ideally, image is identity matrix.')
def test_iva_real_function_calls():
"""
Make sure that function calls do not raise errors. Final value is not that important, therefore
max-iter is set to 4.
"""
N = 20 # number of sources
T = 1000 # sample size
K = 40 # number of groups
# generate the mixtures
S = np.zeros((N, T, K))
for n in range(N):
temp1 = np.random.randn(K, T)
temp = np.zeros((K, T))
B = np.random.randn(K, K, 3)
for p in range(2):
for t in range(2, T):
# introduce nonwhiteness and spatial correlation
temp[:, t] += B[:, :, p] @ temp1[:, t - p]
for k in range(K):
S[n, :, k] = temp[k, :]
S[n, :, k] -= np.mean(S[n, :, k])
S[n, :, k] = S[n, :, k] / np.std(S[n, :, k], ddof=1)
A = np.random.randn(N, N, K)
X = np.zeros((N, T, K))
for k in range(K):
X[:, :, k] = A[:, :, k] @ S[:, :, k]
# initialize with multi-set diagonalization
W, _, _, isi = iva_g(X, verbose=True, A=A, jdiag_initW=True, max_iter=4)
# CCA init (for 2 datasets)
W, _, _, isi = iva_g(X[:, :, 0:2], verbose=True, A=A[:, :, 0:2], jdiag_initW=True, max_iter=4)
# W_init is given
W_init = np.zeros_like(A)
for k in range(K):
W_init[:, :, k] = np.linalg.inv(A[:, :, k]) + np.random.randn(N, N) * 0.1
W, _, _, isi = iva_g(X, verbose=True, A=A, W_init=W_init, max_iter=4)
# same W_init for each dataset
W, _, _, isi = iva_g(X, verbose=True, A=A, W_init=W_init[:, :, 0], max_iter=4)
# random init
W, _, _, isi = iva_g(X, verbose=True, A=A, max_iter=4)
# gradient optimization approach
W, _, _, isi = iva_g(X, opt_approach='gradient', verbose=True, A=A, max_iter=4)
# quasi optimization approach
W, _, _, isi = iva_g(X, opt_approach='quasi', verbose=True, A=A, max_iter=4)
# use int step size
W, _, _, isi = iva_g(X, verbose=True, A=A, max_iter=4, alpha0=2)
# complex call with real-valued data
W, _, _, isi = iva_g(X, complex_valued=True, verbose=True, A=A, max_iter=4)
# circular call with real-valued data
W, _, _, isi = iva_g(X, verbose=True, circular=True, A=A, max_iter=4)
# No whitening
W, _, _, isi = iva_g(X, whiten=False, verbose=True, A=A, max_iter=4)
def test_iva_complex_function_calls():
"""
Make sure that function calls do not raise errors. Final value is not that important, therefore
max-iter is set to 4.
"""
N = 20 # number of sources
T = 1000 # sample size
K = 40 # number of groups
# generate the mixtures
S = np.zeros((N, T, K), dtype=complex)
for n in range(N):
temp1 = np.random.randn(K, T) + 1j * np.random.randn(K, T)
temp = np.zeros((K, T), dtype=complex)
B = np.random.randn(K, K, 3) + 1j * np.random.randn(K, K, 3)
for p in range(2):
for t in range(2, T):
# introduce nonwhiteness and spatial correlation
temp[:, t] += B[:, :, p] @ np.conj(temp1[:, t - p])
for k in range(K):
S[n, :, k] = temp[k, :]
S[n, :, k] -= np.mean(S[n, :, k])
S[n, :, k] = S[n, :, k] / np.std(S[n, :, k], ddof=1)
A = np.random.randn(N, N, K) + 1j * np.random.randn(N, N, K)
X = np.zeros((N, T, K), dtype=complex)
for k in range(K):
X[:, :, k] = A[:, :, k] @ S[:, :, k]
# initialize with multi-set diagonalization
W, _, _, isi = iva_g(X, verbose=True, A=A, jdiag_initW=True, max_iter=4)
# CCA init (for 2 datasets)
W, _, _, isi = iva_g(X[:, :, 0:2], verbose=True, A=A[:, :, 0:2], jdiag_initW=True, max_iter=4)
# W_init is given
W_init = np.zeros_like(A)
for k in range(K):
W_init[:, :, k] = np.linalg.inv(A[:, :, k]) + \
np.random.randn(N, N) * 0.1 + 1j * np.random.randn(N, N) * 0.1
W, _, _, isi = iva_g(X, verbose=True, A=A, W_init=W_init, max_iter=4)
# same W_init for each dataset
W, _, _, isi = iva_g(X, verbose=True, A=A, W_init=W_init[:, :, 0], max_iter=4)
# random init
W, _, _, isi = iva_g(X, verbose=True, A=A, max_iter=4)
# gradient optimization approach
W, _, _, isi = iva_g(X, opt_approach='gradient', verbose=True, A=A, max_iter=4)
# quasi optimization approach
W, _, _, isi = iva_g(X, opt_approach='quasi', verbose=True, A=A, max_iter=4)
# use int step size
W, _, _, isi = iva_g(X, verbose=True, A=A, max_iter=4, alpha0=2)
# circular
W, _, _, isi = iva_g(X, circular=True, verbose=True, A=A, max_iter=4)
W, _, _, isi = iva_g(X, opt_approach='gradient', circular=True, verbose=True, A=A, max_iter=4)
W, _, _, isi = iva_g(X, opt_approach='quasi', circular=True, verbose=True, A=A, max_iter=4)
| 34.454545 | 99 | 0.578076 |
4a19c98350cd691b55b37a0d50c1fcbd6ab69303
| 8,411 |
py
|
Python
|
docs/conf.py
|
Ramon5/dj-chartjs
|
eaa1d24a43f1d66637686c2620408a9890a1eb2b
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Ramon5/dj-chartjs
|
eaa1d24a43f1d66637686c2620408a9890a1eb2b
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Ramon5/dj-chartjs
|
eaa1d24a43f1d66637686c2620408a9890a1eb2b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import dj_chartjs
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.autosummary','sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dj-chartjs'
copyright = u'2020, Ramon dos Santos Rodrigues'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = dj_chartjs.__version__
# The full version, including alpha/beta/rc tags.
release = dj_chartjs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dj-chartjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dj-chartjs.tex', u'dj-chartjs Documentation',
u'Ramon dos Santos Rodrigues', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dj-chartjs', u'dj-chartjs Documentation',
[u'Ramon dos Santos Rodrigues'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dj-chartjs', u'dj-chartjs Documentation',
u'Ramon dos Santos Rodrigues', 'dj-chartjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.474903 | 83 | 0.714065 |
4a19caed8fdcdaed886c69e559ae6ea2629df3e8
| 5,041 |
py
|
Python
|
captum/attr/_utils/stat.py
|
ashkan-software/captum
|
b8b7d4b10a9646e4da827635d6947499fbde3326
|
[
"BSD-3-Clause"
] | 1 |
2020-02-02T19:23:01.000Z
|
2020-02-02T19:23:01.000Z
|
captum/attr/_utils/stat.py
|
ashkan-software/captum
|
b8b7d4b10a9646e4da827635d6947499fbde3326
|
[
"BSD-3-Clause"
] | null | null | null |
captum/attr/_utils/stat.py
|
ashkan-software/captum
|
b8b7d4b10a9646e4da827635d6947499fbde3326
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import torch
class Stat:
"""
The Stat class represents a statistic that can be updated and retrieved
at any point in time.
The basic functionality this class provides is:
1. A update/get method to actually compute the statistic
2. A statistic store/cache to retrieve dependent information
(e.g. other stat values that are required for computation)
3. The name of the statistic that is used for the user to refer to
"""
def __init__(self, name=None, **kwargs):
self.params = kwargs
self._name = name
self._other_stats = None
def init(self):
pass
def _get_stat(self, stat):
return self._other_stats.get(stat)
def update(self, x):
raise NotImplementedError()
def get(self):
raise NotImplementedError()
def __hash__(self):
return hash((self.__class__, frozenset(self.params.items())))
def __eq__(self, other):
return self.__class__ == other.__class__ and frozenset(
self.params.items()
) == frozenset(other.params.items())
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""
The name of the statistic. i.e. it is the key in a .summary
See Summarizer or SummarizerSingleTensor
"""
default_name = self.__class__.__name__.lower()
if len(self.params) > 0:
default_name += f"({self.params})"
return default_name if self._name is None else self._name
class Count(Stat):
def __init__(self, name=None):
super().__init__(name=name)
self.n = None
def get(self):
return self.n
def update(self, x):
if self.n is None:
self.n = 0
self.n += 1
class Mean(Stat):
def __init__(self, name=None):
super().__init__(name=name)
self.rolling_mean = None
self.n = None
def get(self):
return self.rolling_mean
def init(self):
self.n = self._get_stat(Count())
def update(self, x):
n = self.n.get()
if self.rolling_mean is None:
self.rolling_mean = x
else:
delta = x - self.rolling_mean
self.rolling_mean += delta / n
class MSE(Stat):
def __init__(self, name=None):
super().__init__(name=name)
self.prev_mean = None
self.mse = None
def init(self):
self.mean = self._get_stat(Mean())
def get(self):
if self.mse is None and self.prev_mean is not None:
return torch.zeros_like(self.prev_mean)
return self.mse
def update(self, x):
mean = self.mean.get()
if mean is not None and self.prev_mean is not None:
rhs = (x - self.prev_mean) * (x - mean)
if self.mse is None:
self.mse = rhs
else:
self.mse += rhs
# do not not clone
self.prev_mean = mean.clone()
class Var(Stat):
def __init__(self, name=None, order=0):
if name is None:
if order == 0:
name = "variance"
elif order == 1:
name = "sample_variance"
else:
name = f"variance({order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.mse = self._get_stat(MSE())
self.n = self._get_stat(Count())
def update(self, x):
pass
def get(self):
mse = self.mse.get()
n = self.n.get()
if mse is None:
return None
if n <= self.order:
return torch.zeros_like(mse)
return mse / (n - self.order)
class StdDev(Stat):
def __init__(self, name=None, order=0):
if name is None:
if order == 0:
name = "std_dev"
elif order == 1:
name = "sample_std_dev"
else:
name = f"std_dev{order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.var = self._get_stat(Var(order=self.order))
def update(self, x):
pass
def get(self):
var = self.var.get()
return var ** 0.5 if var is not None else None
class GeneralAccumFn(Stat):
def __init__(self, fn, name=None):
super().__init__(name=name)
self.result = None
self.fn = fn
def get(self):
return self.result
def update(self, x):
if self.result is None:
self.result = x
else:
self.result = self.fn(self.result, x)
class Min(GeneralAccumFn):
def __init__(self, name=None, min_fn=torch.min):
super().__init__(name=name, fn=min_fn)
class Max(GeneralAccumFn):
def __init__(self, name=None, max_fn=torch.max):
super().__init__(name=name, fn=max_fn)
class Sum(GeneralAccumFn):
def __init__(self, name=None):
super().__init__(name=name, fn=torch.add)
| 24.004762 | 75 | 0.565364 |
4a19cc20d0bf5bdd9463da2bb54d4435b9a315c4
| 6,898 |
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/tarantool/TarantoolFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/JumpscaleLibs/clients/tarantool/TarantoolFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/JumpscaleLibs/clients/tarantool/TarantoolFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
import os
from Jumpscale import j
try:
import tarantool
except:
j.builders.db.tarantool.install()
import tarantool
from .TarantoolClient import TarantoolClient
from .TarantoolDB import TarantoolDB
JSConfigBaseFactory = j.application.JSFactoryBaseClass
class TarantoolFactory(JSConfigBaseFactory):
"""
#server_start
kosmos 'j.clients.tarantool.server_start()'
#start test
kosmos 'j.clients.tarantool.test()'
"""
__jslocation__ = "j.clients.tarantool"
_CHILDCLASS = TarantoolClient
def _init(self, **kwargs):
self.__imports__ = "tarantool"
if j.core.platformtype.myplatform.platform_is_osx:
self.cfgdir = "/usr/local/etc/tarantool/instances.enabled"
else:
self.cfgdir = "/etc/tarantool/instances.enabled"
self._tarantoolq = {}
def install(self):
j.builders.db.tarantool.install()
# def client_configure(self, name="main", ipaddr="localhost", port=3301, login="root", password="admin007"):
# """
# add a configuration for the tarantool instance 'name' into the jumpscale state config
# :param name: name of the tarantool instance to connect to
# :name type: str
# :param ipaddr: ip address of the tarantool instance
# :type ipaddr: str
# :param port: port of the tarantool instance
# :type port: int
# :param login: user use to connect to tarantool
# :type login: str
# :param password: password use to connect to tarantool
# :type password: str
# """
# cfg = j.core.state.clientConfigGet("tarantool", name)
# cfg.data["ipaddr"] = ipaddr
# cfg.data["port"] = port
# cfg.data["login"] = login
# cfg.data["password"] = password
# cfg.save()
# def client_get(self, name="main", fromcache=True):
# """
# Get a instance of a tarantool client for the instance `name`
# :param name: name of the tarantool instance to connect to. Need to have been configured with client_configure
# :name type: str
# :param fromcache: if false don't try to re-use a client instance from the client cache
# :type fromcache: bool
# """
# cfg = j.core.state.clientConfigGet("tarantool", instance=name)
# # if client for this instance is not configured yet, we generate default config
# if "ipaddr" not in cfg.data.keys():
# self.client_configure(name=name)
# cfg = j.core.state.clientConfigGet("tarantool", instance=name)
# # return client instance from cache or create new one
# cfg = cfg.data
# key = "%s_%s" % (cfg["ipaddr"], cfg["port"])
# if key not in self._tarantool or fromcache is False:
# client = tarantool.connect(cfg["ipaddr"], user=cfg["login"], port=cfg["port"], password=cfg["password"])
# self._tarantool[key] = TarantoolClient(client=client)
# return self._tarantool[key]
def server_get(self, name="main", path="$DATADIR/tarantool/$NAME", adminsecret="admin007", port=3301):
"""
Get a TarantoolDB object, this object provides you with some method to deal with tarantool server
:param name: name of the tarantool instance
:type name: str
:param path: working directory were the file of the database will be saved
:type path:str
:param adminsecret:
"""
return TarantoolDB(name=name, path=path, adminsecret=adminsecret, port=port)
def server_start(
self, name="main", path="$DATADIR/tarantool/$NAME", adminsecret="admin007", port=3301, configTemplatePath=None
):
db = self.server_get(name=name, path=path, adminsecret=adminsecret, port=port)
db.configTemplatePath = configTemplatePath
db.start()
def testmodels(self):
""" WARNING - XXX this is a destructive test that REMOVES code
from the actual git repository (or, the deployed system).
either the code being destroyed should never have been
checked in in the first place, or this test needs to be
modified to either not be destructive, or to clean up
properly after itself
issue #79
"""
# remove the generated code
todel = j.sal.fs.getDirName(os.path.abspath(__file__)) + "models/user/"
j.sal.fs.remove(todel + "/model.lua")
j.sal.fs.remove(todel + "/UserCollection.py")
tt = self.get()
tt.addScripts() # will add the system scripts
tt.reloadSystemScripts()
tt.addModels()
tt.models.UserCollection.destroy()
num_user = 1
for i in range(num_user):
d = tt.models.UserCollection.new()
d.dbobj.name = "name_%s" % i
d.dbobj.description = "this is some description %s" % i
d.dbobj.region = 10
d.dbobj.epoch = j.data.time.getTimeEpoch()
d.save()
d2 = tt.models.UserCollection().get(key=d.key)
assert d.dbobj.name == d2.dbobj.name
assert d.dbobj.description == d2.dbobj.description
assert d.dbobj.region == d2.dbobj.region
assert d.dbobj.epoch == d2.dbobj.epoch
self._log_debug("list of users")
users = tt.models.UserCollection.list()
assert len(users) == num_user
def test_find(self):
cl = self.get()
cl.addScripts() # will add the system scripts
cl.addModels()
user = cl.models.UserCollection.new()
user.dbobj.name = "zaibon"
user.dbobj.description = "this is a description"
user.dbobj.region = 10
user.dbobj.epoch = j.data.time.getTimeEpoch()
user.save()
self._log_debug("user {} created".format(user))
def test(self):
tt = self.get()
tt.addScripts()
tt.reloadSystemScripts()
tt.addModels()
self._log_debug(1)
for i in range(1000):
bytestr = j.data.hash.hex2bin(j.data.hash.sha512_string("%s" % i))
md5hex = j.data.hash.md5_string(bytestr)
md5hex2 = tt.call("binarytest", (bytestr))[0][0]
assert md5hex == md5hex2
self._log_debug(2)
C = """
function echo3(name)
return name
end
"""
tt.eval(C)
self._log_debug("return:%s" % tt.call("echo3", "testecho"))
# capnpSchema = """
# @0x9a7562d859cc7ffa;
# struct User {
# id @0 :UInt32;
# name @1 :Text;
# }
# """
# lpath = j.dirs.TMPDIR + "/test.capnp"
# j.sal.fs.writeFile(lpath, capnpSchema)
# res = j.data.capnp.schema_generate_lua(lpath)
# # tt.scripts_execute()
# self._log_debug(test)
# from IPython import embed
# embed(colors='Linux')
| 34.148515 | 119 | 0.603653 |
4a19cc63ac71d6039dad92b42d72c80f8ea54408
| 10,978 |
py
|
Python
|
neutron/notifiers/nova.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/notifiers/nova.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/notifiers/nova.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as exc
from neutron_lib.plugins import directory
from novaclient import api_versions
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import attributes as sql_attr
from neutron._i18n import _LE, _LI, _LW
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
VIF_UNPLUGGED = 'network-vif-unplugged'
VIF_PLUGGED = 'network-vif-plugged'
VIF_DELETED = 'network-vif-deleted'
NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed',
constants.PORT_STATUS_ERROR: 'failed',
constants.PORT_STATUS_DOWN: 'completed'}
NOVA_API_VERSION = "2.1"
@registry.has_registry_receivers
class Notifier(object):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova')
session = ks_loading.load_session_from_conf_options(
cfg.CONF,
'nova',
auth=auth)
extensions = [
ext for ext in nova_client.discover_extensions(NOVA_API_VERSION,
only_contrib=True)
if ext.name == "server_external_events"]
self.nclient = nova_client.Client(
api_versions.APIVersion(NOVA_API_VERSION),
session=session,
region_name=cfg.CONF.nova.region_name,
endpoint_type=cfg.CONF.nova.endpoint_type,
extensions=extensions)
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)
def _is_compute_port(self, port):
try:
if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
and port['device_owner'].startswith(
constants.DEVICE_OWNER_COMPUTE_PREFIX)):
return True
except (KeyError, AttributeError):
pass
return False
def _get_network_changed_event(self, port):
return {'name': 'network-changed',
'server_uuid': port['device_id'],
'tag': port['id']}
def _get_port_delete_event(self, port):
return {'server_uuid': port['device_id'],
'name': VIF_DELETED,
'tag': port['id']}
@registry.receives(resources.PORT, [events.BEFORE_RESPONSE])
@registry.receives(resources.FLOATING_IP, [events.BEFORE_RESPONSE])
def _send_nova_notification(self, resource, event, trigger,
action=None, original=None, data=None,
**kwargs):
self.send_network_change(action, original, data)
def send_network_change(self, action, original_obj,
returned_obj):
"""Called when a network change is made that nova cares about.
:param action: the event that occurred.
:param original_obj: the previous value of resource before action.
:param returned_obj: the body returned to client as result of action.
"""
if not cfg.CONF.notify_nova_on_port_data_changes:
return
# When neutron re-assigns floating ip from an original instance
# port to a new instance port without disassociate it first, an
# event should be sent for original instance, that will make nova
# know original instance's info, and update database for it.
if (action == 'update_floatingip'
and returned_obj['floatingip'].get('port_id')
and original_obj.get('port_id')):
disassociate_returned_obj = {'floatingip': {'port_id': None}}
event = self.create_port_changed_event(action, original_obj,
disassociate_returned_obj)
self.batch_notifier.queue_event(event)
event = self.create_port_changed_event(action, original_obj,
returned_obj)
self.batch_notifier.queue_event(event)
def create_port_changed_event(self, action, original_obj, returned_obj):
port = None
if action in ['update_port', 'delete_port']:
port = returned_obj['port']
elif action in ['update_floatingip', 'create_floatingip',
'delete_floatingip']:
# NOTE(arosen) if we are associating a floatingip the
# port_id is in the returned_obj. Otherwise on disassociate
# it's in the original_object
port_id = (returned_obj['floatingip'].get('port_id') or
original_obj.get('port_id'))
if port_id is None:
return
ctx = context.get_admin_context()
try:
port = directory.get_plugin().get_port(ctx, port_id)
except exc.PortNotFound:
LOG.debug("Port %s was deleted, no need to send any "
"notification", port_id)
return
if port and self._is_compute_port(port):
if action == 'delete_port':
return self._get_port_delete_event(port)
else:
return self._get_network_changed_event(port)
def _can_notify(self, port):
if not port.id:
LOG.warning(_LW("Port ID not set! Nova will not be notified of "
"port status change."))
return False
# If there is no device_id set there is nothing we can do here.
if not port.device_id:
LOG.debug("device_id is not set on port %s yet.", port.id)
return False
# We only want to notify about nova ports.
if not self._is_compute_port(port):
return False
return True
def record_port_status_changed(self, port, current_port_status,
previous_port_status, initiator):
"""Determine if nova needs to be notified due to port status change.
"""
# clear out previous _notify_event
port._notify_event = None
if not self._can_notify(port):
return
# We notify nova when a vif is unplugged which only occurs when
# the status goes from ACTIVE to DOWN.
if (previous_port_status == constants.PORT_STATUS_ACTIVE and
current_port_status == constants.PORT_STATUS_DOWN):
event_name = VIF_UNPLUGGED
# We only notify nova when a vif is plugged which only occurs
# when the status goes from:
# NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR.
elif (previous_port_status in [sql_attr.NO_VALUE,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_BUILD]
and current_port_status in [constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_ERROR]):
event_name = VIF_PLUGGED
# All the remaining state transitions are of no interest to nova
else:
LOG.debug("Ignoring state change previous_port_status: "
"%(pre_status)s current_port_status: %(cur_status)s"
" port_id %(id)s",
{'pre_status': previous_port_status,
'cur_status': current_port_status,
'id': port.id})
return
port._notify_event = (
{'server_uuid': port.device_id,
'name': event_name,
'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status),
'tag': port.id})
def send_port_status(self, mapper, connection, port):
event = getattr(port, "_notify_event", None)
self.batch_notifier.queue_event(event)
port._notify_event = None
def notify_port_active_direct(self, port):
"""Notify nova about active port
Used when port was wired on the host other than port's current host
according to port binding. This happens during live migration.
In this case ml2 plugin skips port status update but we still we need
to notify nova.
"""
if not self._can_notify(port):
return
port._notify_event = (
{'server_uuid': port.device_id,
'name': VIF_PLUGGED,
'status': 'completed',
'tag': port.id})
self.send_port_status(None, None, port)
def send_events(self, batched_events):
LOG.debug("Sending events: %s", batched_events)
try:
response = self.nclient.server_external_events.create(
batched_events)
except nova_exceptions.NotFound:
LOG.debug("Nova returned NotFound for event: %s",
batched_events)
except Exception:
LOG.exception(_LE("Failed to notify nova on events: %s"),
batched_events)
else:
if not isinstance(response, list):
LOG.error(_LE("Error response returned from nova: %s"),
response)
return
response_error = False
for event in response:
try:
code = event['code']
except KeyError:
response_error = True
continue
if code != 200:
LOG.warning(_LW("Nova event: %s returned with failed "
"status"), event)
else:
LOG.info(_LI("Nova event response: %s"), event)
if response_error:
LOG.error(_LE("Error response returned from nova: %s"),
response)
| 40.065693 | 79 | 0.600656 |
4a19ce5e051b034d478e38ec2777c4a8778d5603
| 20,713 |
py
|
Python
|
intersight/models/iam_role.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21 |
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/iam_role.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14 |
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/iam_role.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18 |
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamRole(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'ancestors': 'list[MoBaseMoRef]',
'parent': 'MoBaseMoRef',
'permission_resources': 'list[MoBaseMoRef]',
'description': 'str',
'name': 'str',
'privilege_names': 'list[str]',
'account': 'IamAccountRef',
'privilege_sets': 'list[IamPrivilegeSetRef]',
'system': 'IamSystemRef'
}
attribute_map = {
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'description': 'Description',
'name': 'Name',
'privilege_names': 'PrivilegeNames',
'account': 'Account',
'privilege_sets': 'PrivilegeSets',
'system': 'System'
}
def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, description=None, name=None, privilege_names=None, account=None, privilege_sets=None, system=None):
"""
IamRole - a model defined in Swagger
"""
self._account_moid = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._ancestors = None
self._parent = None
self._permission_resources = None
self._description = None
self._name = None
self._privilege_names = None
self._account = None
self._privilege_sets = None
self._system = None
if account_moid is not None:
self.account_moid = account_moid
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if ancestors is not None:
self.ancestors = ancestors
if parent is not None:
self.parent = parent
if permission_resources is not None:
self.permission_resources = permission_resources
if description is not None:
self.description = description
if name is not None:
self.name = name
if privilege_names is not None:
self.privilege_names = privilege_names
if account is not None:
self.account = account
if privilege_sets is not None:
self.privilege_sets = privilege_sets
if system is not None:
self.system = system
@property
def account_moid(self):
"""
Gets the account_moid of this IamRole.
The Account ID for this managed object.
:return: The account_moid of this IamRole.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this IamRole.
The Account ID for this managed object.
:param account_moid: The account_moid of this IamRole.
:type: str
"""
self._account_moid = account_moid
@property
def create_time(self):
"""
Gets the create_time of this IamRole.
The time when this managed object was created.
:return: The create_time of this IamRole.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this IamRole.
The time when this managed object was created.
:param create_time: The create_time of this IamRole.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this IamRole.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this IamRole.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this IamRole.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this IamRole.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this IamRole.
The time when this managed object was last modified.
:return: The mod_time of this IamRole.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this IamRole.
The time when this managed object was last modified.
:param mod_time: The mod_time of this IamRole.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this IamRole.
The unique identifier of this Managed Object instance.
:return: The moid of this IamRole.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamRole.
The unique identifier of this Managed Object instance.
:param moid: The moid of this IamRole.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamRole.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:return: The object_type of this IamRole.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamRole.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:param object_type: The object_type of this IamRole.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this IamRole.
The array of owners which represent effective ownership of this object.
:return: The owners of this IamRole.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this IamRole.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this IamRole.
:type: list[str]
"""
self._owners = owners
@property
def shared_scope(self):
"""
Gets the shared_scope of this IamRole.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this IamRole.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this IamRole.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this IamRole.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this IamRole.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this IamRole.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this IamRole.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IamRole.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IamRole.
The versioning info for this managed object.
:return: The version_context of this IamRole.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IamRole.
The versioning info for this managed object.
:param version_context: The version_context of this IamRole.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this IamRole.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IamRole.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IamRole.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IamRole.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this IamRole.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IamRole.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IamRole.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IamRole.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this IamRole.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this IamRole.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this IamRole.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this IamRole.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def description(self):
"""
Gets the description of this IamRole.
Informative description about each role.
:return: The description of this IamRole.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this IamRole.
Informative description about each role.
:param description: The description of this IamRole.
:type: str
"""
self._description = description
@property
def name(self):
"""
Gets the name of this IamRole.
The name of the role which has to be granted to user.
:return: The name of this IamRole.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this IamRole.
The name of the role which has to be granted to user.
:param name: The name of this IamRole.
:type: str
"""
self._name = name
@property
def privilege_names(self):
"""
Gets the privilege_names of this IamRole.
Names of the privileges in the role.
:return: The privilege_names of this IamRole.
:rtype: list[str]
"""
return self._privilege_names
@privilege_names.setter
def privilege_names(self, privilege_names):
"""
Sets the privilege_names of this IamRole.
Names of the privileges in the role.
:param privilege_names: The privilege_names of this IamRole.
:type: list[str]
"""
self._privilege_names = privilege_names
@property
def account(self):
"""
Gets the account of this IamRole.
A collection of references to the [iam.Account](mo://iam.Account) Managed Object. When this managed object is deleted, the referenced [iam.Account](mo://iam.Account) MO unsets its reference to this deleted MO.
:return: The account of this IamRole.
:rtype: IamAccountRef
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this IamRole.
A collection of references to the [iam.Account](mo://iam.Account) Managed Object. When this managed object is deleted, the referenced [iam.Account](mo://iam.Account) MO unsets its reference to this deleted MO.
:param account: The account of this IamRole.
:type: IamAccountRef
"""
self._account = account
@property
def privilege_sets(self):
"""
Gets the privilege_sets of this IamRole.
Reference to the privilege sets. Privilege set is a collection of privileges. Privilege sets are assigned to a user using roles.
:return: The privilege_sets of this IamRole.
:rtype: list[IamPrivilegeSetRef]
"""
return self._privilege_sets
@privilege_sets.setter
def privilege_sets(self, privilege_sets):
"""
Sets the privilege_sets of this IamRole.
Reference to the privilege sets. Privilege set is a collection of privileges. Privilege sets are assigned to a user using roles.
:param privilege_sets: The privilege_sets of this IamRole.
:type: list[IamPrivilegeSetRef]
"""
self._privilege_sets = privilege_sets
@property
def system(self):
"""
Gets the system of this IamRole.
A collection of references to the [iam.System](mo://iam.System) Managed Object. When this managed object is deleted, the referenced [iam.System](mo://iam.System) MO unsets its reference to this deleted MO.
:return: The system of this IamRole.
:rtype: IamSystemRef
"""
return self._system
@system.setter
def system(self, system):
"""
Sets the system of this IamRole.
A collection of references to the [iam.System](mo://iam.System) Managed Object. When this managed object is deleted, the referenced [iam.System](mo://iam.System) MO unsets its reference to this deleted MO.
:param system: The system of this IamRole.
:type: IamSystemRef
"""
self._system = system
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.877778 | 738 | 0.629363 |
4a19cfbdf75336fa2492068e8926d75fa5aa954d
| 6,880 |
py
|
Python
|
tests/test_utilities/test_extended_tag_list.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 240 |
2015-07-17T16:27:54.000Z
|
2022-03-29T13:53:06.000Z
|
tests/test_utilities/test_extended_tag_list.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 400 |
2015-07-23T05:37:52.000Z
|
2022-03-29T12:32:30.000Z
|
tests/test_utilities/test_extended_tag_list.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 143 |
2015-07-17T18:22:27.000Z
|
2022-03-22T01:21:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Utilities Extended Tag List
--------------------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.primitivedata import Tag, OpeningTag, ClosingTag, \
Null, Boolean, Unsigned, Integer, Real, Double, OctetString, \
CharacterString, BitString, Enumerated, Date, Time, ObjectIdentifier
from ..extended_tag_list import statement_to_tag, ExtendedTagList
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
def tag_encode(obj, context=None):
"""Encode an atomic object into a tag."""
if _debug: tag_encode._debug("tag_encode %r", obj)
# encode it normally
tag = Tag()
obj.encode(tag)
# check for context encoding
if context is not None:
tag = tag.app_to_context(context)
if _debug: tag_encode._debug(" - tag: %r", tag)
return tag
@bacpypes_debugging
def compare_tag_list(tag_string, *tags):
"""Compare the extended tag list interpretation of the string with
the list of other tags provided."""
if _debug: compare_tag_list._debug("compare_tag_list %r %r", tag_string, tags)
tag_list = ExtendedTagList(tag_string)
if _debug: compare_tag_list._debug(" - tag_list: %r", tag_list)
# make sure they encode the same number of tags
assert len(tag_list) == len(tags)
# check each tag
for x, y in zip(tag_list.tagList, tags):
assert x == y
@bacpypes_debugging
class TestExtendedTagStatements(unittest.TestCase):
def test_opening_closing_statements(self):
if _debug: TestExtendedTagStatements._debug("test_opening_closing_statements")
# test individual statements
assert statement_to_tag("opening tag 1") == OpeningTag(1)
assert statement_to_tag("closing tag 1") == ClosingTag(1)
def test_null_statement(self):
if _debug: TestExtendedTagStatements._debug("test_null_statement")
# test atomic tags
assert statement_to_tag("null") == tag_encode(Null())
assert statement_to_tag("null context 1") == tag_encode(Null(), context=1)
def test_boolean_statement(self):
if _debug: TestExtendedTagStatements._debug("test_boolean_statement")
assert statement_to_tag("boolean false") == tag_encode(Boolean(False))
assert statement_to_tag("boolean true") == tag_encode(Boolean(True))
assert statement_to_tag("boolean true context 2") == tag_encode(Boolean(True), context=2)
def test_unsigned_statement(self):
if _debug: TestExtendedTagStatements._debug("test_unsigned_statement")
assert statement_to_tag("unsigned 0") == tag_encode(Unsigned(0))
assert statement_to_tag("unsigned 1") == tag_encode(Unsigned(1))
assert statement_to_tag("unsigned 1 context 3") == tag_encode(Unsigned(1), context=3)
def test_integer_statement(self):
if _debug: TestExtendedTagStatements._debug("test_integer_statement")
assert statement_to_tag("integer 0") == tag_encode(Integer(0))
assert statement_to_tag("integer 1") == tag_encode(Integer(1))
assert statement_to_tag("integer -1") == tag_encode(Integer(-1))
assert statement_to_tag("integer 1 context 4") == tag_encode(Integer(1), context=4)
def test_real_statement(self):
if _debug: TestExtendedTagStatements._debug("test_real_statement")
assert statement_to_tag("real 0.0") == tag_encode(Real(0.0))
assert statement_to_tag("real 72.5") == tag_encode(Real(72.5))
assert statement_to_tag("real 3.14 context 5") == tag_encode(Real(3.14), context=5)
def test_double_statement(self):
if _debug: TestExtendedTagStatements._debug("test_double_statement")
assert statement_to_tag("double 0.0") == tag_encode(Double(0.0))
assert statement_to_tag("double 75.2") == tag_encode(Double(75.2))
assert statement_to_tag("double 6.28 context 6") == tag_encode(Double(6.28), context=6)
def test_octet_string_statement(self):
if _debug: TestExtendedTagStatements._debug("test_octet_string_statement")
assert statement_to_tag("octet string 0102") == tag_encode(OctetString(xtob("0102")))
assert statement_to_tag("octet string 01020304 context 7") == tag_encode(OctetString(xtob("01020304")), context=7)
def test_character_string_statement(self):
if _debug: TestExtendedTagStatements._debug("test_character_string_statement")
assert statement_to_tag("character string ''") == tag_encode(CharacterString(""))
assert statement_to_tag("character string 'hello'") == tag_encode(CharacterString("hello"))
assert statement_to_tag("character string 'hi' context 8") == tag_encode(CharacterString("hi"), context=8)
def test_bit_string_statement(self):
if _debug: TestExtendedTagStatements._debug("test_bit_string_statement")
assert statement_to_tag("bit string 101") == tag_encode(BitString([1, 0, 1]))
assert statement_to_tag("bit string 10111 context 9") == tag_encode(BitString([1, 0, 1, 1, 1]), context=9)
def test_enumerated_statement(self):
if _debug: TestExtendedTagStatements._debug("test_enumerated_statement")
assert statement_to_tag("enumerated 5") == tag_encode(Enumerated(5))
assert statement_to_tag("enumerated 5 context 10") == tag_encode(Enumerated(5), context=10)
def test_date_statement(self):
if _debug: TestExtendedTagStatements._debug("test_date_statement")
assert statement_to_tag("date 1/1/70") == tag_encode(Date((70, 1, 1, 4)))
# note that the day of the week is not optional for date statements with a context
assert statement_to_tag("date 2015-8-31 1 context 11") == tag_encode(Date((115, 8, 31, 1)), context=11)
def test_time_statement(self):
if _debug: TestExtendedTagStatements._debug("test_time_statement")
assert statement_to_tag("time 1:2") == tag_encode(Time((1, 2, 0, 0)))
assert statement_to_tag("time 1:2:3.4 context 12") == tag_encode(Time((1, 2, 3, 40)), context=12)
def test_object_identifier_statement(self):
if _debug: TestExtendedTagStatements._debug("test_object_identifier_statement")
assert statement_to_tag("object identifier analogInput 1") == tag_encode(ObjectIdentifier('analogInput', 1))
assert statement_to_tag("object identifier 99 1 context 13") == tag_encode(ObjectIdentifier(99, 1), context=13)
@bacpypes_debugging
class TestExtendedTagList(unittest.TestCase):
def test_tag_list(self):
if _debug: TestExtendedTagList._debug("test_tag_list")
compare_tag_list("""
opening tag 1
integer 2
closing tag 3
""",
OpeningTag(1),
tag_encode(Integer(2)),
ClosingTag(3),
)
| 40.233918 | 122 | 0.699128 |
4a19d0477499bc406e852a8927cf5ea9063ee380
| 6,479 |
py
|
Python
|
pex/resolve/lockfile/__init__.py
|
alexey-tereshenkov-oxb/pex
|
2e2d1e50e604fdee48b0d51aea482ca255521ff0
|
[
"Apache-2.0"
] | null | null | null |
pex/resolve/lockfile/__init__.py
|
alexey-tereshenkov-oxb/pex
|
2e2d1e50e604fdee48b0d51aea482ca255521ff0
|
[
"Apache-2.0"
] | null | null | null |
pex/resolve/lockfile/__init__.py
|
alexey-tereshenkov-oxb/pex
|
2e2d1e50e604fdee48b0d51aea482ca255521ff0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex import resolver
from pex.common import pluralize, safe_open
from pex.requirements import LocalProjectRequirement, VCSRequirement
from pex.resolve import resolvers
from pex.resolve.locked_resolve import LockConfiguration
from pex.resolve.lockfile.lockfile import Lockfile as Lockfile # For re-export.
from pex.resolve.requirement_configuration import RequirementConfiguration
from pex.resolve.resolver_configuration import PipConfiguration
from pex.result import Error
from pex.targets import Targets
from pex.third_party.pkg_resources import Requirement
from pex.typing import TYPE_CHECKING
from pex.variables import ENV
from pex.version import __version__
if TYPE_CHECKING:
from typing import List, Text, Union
class ParseError(Exception):
"""Indicates an error parsing a Pex lock file."""
def load(lockfile_path):
# type: (str) -> Lockfile
"""Loads the Pex lock file stored at the given path.
:param lockfile_path: The path to the Pex lock file to load.
:return: The parsed lock file.
:raises: :class:`ParseError` if there was a problem parsing the lock file.
"""
from pex.resolve.lockfile import json_codec
return json_codec.load(lockfile_path=lockfile_path)
def loads(
lockfile_contents, # type: Text
source="<string>", # type: str
):
# type: (...) -> Lockfile
"""Parses the given Pex lock file contents.
:param lockfile_contents: The contents of a Pex lock file.
:param source: A descriptive name for the source of the lock file contents.
:return: The parsed lock file.
:raises: :class:`ParseError` if there was a problem parsing the lock file.
"""
from pex.resolve.lockfile import json_codec
return json_codec.loads(lockfile_contents=lockfile_contents, source=source)
def store(
lockfile, # type: Lockfile
path, # type: str
):
# type: (...) -> None
"""Stores the given lock file at the given path.
Any missing parent directories in the path will be created and any pre-existing file at the
path wil be over-written.
:param lockfile: The lock file to store.
:param path: The path to store the lock file at.
"""
import json
from pex.resolve.lockfile import json_codec
with safe_open(path, "w") as fp:
json.dump(json_codec.as_json_data(lockfile), fp, sort_keys=True)
def create(
lock_configuration, # type: LockConfiguration
requirement_configuration, # type: RequirementConfiguration
targets, # type: Targets
pip_configuration, # type: PipConfiguration
):
# type: (...) -> Union[Lockfile, Error]
"""Create a lock file for the given resolve configurations."""
network_configuration = pip_configuration.network_configuration
requirements = [] # type: List[Requirement]
projects = [] # type: List[str]
for parsed_requirement in requirement_configuration.parse_requirements(network_configuration):
if isinstance(parsed_requirement, LocalProjectRequirement):
projects.append("local project at {path}".format(path=parsed_requirement.path))
elif isinstance(parsed_requirement, VCSRequirement):
projects.append(
"{vcs} project {project_name} at {url}".format(
vcs=parsed_requirement.vcs,
project_name=parsed_requirement.requirement.project_name,
url=parsed_requirement.url,
)
)
else:
requirements.append(parsed_requirement.requirement)
if projects:
return Error(
"Cannot create a lock for project requirements built from local or version "
"controlled sources. Given {count} such {projects}:\n{project_descriptions}".format(
count=len(projects),
projects=pluralize(projects, "project"),
project_descriptions="\n".join(
"{index}.) {project}".format(index=index, project=project)
for index, project in enumerate(projects, start=1)
),
)
)
constraints = tuple(
constraint.requirement
for constraint in requirement_configuration.parse_constraints(network_configuration)
)
try:
downloaded = resolver.download(
targets=targets,
requirements=requirement_configuration.requirements,
requirement_files=requirement_configuration.requirement_files,
constraint_files=requirement_configuration.constraint_files,
allow_prereleases=pip_configuration.allow_prereleases,
transitive=pip_configuration.transitive,
indexes=pip_configuration.repos_configuration.indexes,
find_links=pip_configuration.repos_configuration.find_links,
resolver_version=pip_configuration.resolver_version,
network_configuration=network_configuration,
cache=ENV.PEX_ROOT,
build=pip_configuration.allow_builds,
use_wheel=pip_configuration.allow_wheels,
prefer_older_binary=pip_configuration.prefer_older_binary,
use_pep517=pip_configuration.use_pep517,
build_isolation=pip_configuration.build_isolation,
max_parallel_jobs=pip_configuration.max_jobs,
lock_configuration=lock_configuration,
# We're just out for the lock data and not the distribution files downloaded to produce
# that data.
dest=None,
)
except resolvers.ResolveError as e:
return Error(str(e))
return Lockfile.create(
pex_version=__version__,
style=lock_configuration.style,
requires_python=lock_configuration.requires_python,
resolver_version=pip_configuration.resolver_version,
requirements=requirements,
constraints=constraints,
allow_prereleases=pip_configuration.allow_prereleases,
allow_wheels=pip_configuration.allow_wheels,
allow_builds=pip_configuration.allow_builds,
prefer_older_binary=pip_configuration.prefer_older_binary,
use_pep517=pip_configuration.use_pep517,
build_isolation=pip_configuration.build_isolation,
transitive=pip_configuration.transitive,
locked_resolves=downloaded.locked_resolves,
)
| 39.03012 | 99 | 0.702886 |
4a19d07b2410b88ffe24afb557943f9d2d3d100e
| 599 |
py
|
Python
|
demo4.py
|
KINGMJ/python-demo
|
1876ab9912353e40494892d72c5cd72dd322cad4
|
[
"MIT"
] | null | null | null |
demo4.py
|
KINGMJ/python-demo
|
1876ab9912353e40494892d72c5cd72dd322cad4
|
[
"MIT"
] | null | null | null |
demo4.py
|
KINGMJ/python-demo
|
1876ab9912353e40494892d72c5cd72dd322cad4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
# 遍历的技巧
# items() 可以取出 key 和 value
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.items():
print(k, v)
# enumerate() 可以取出 index 和 value
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
# 同时循环多个序列时,zip() 函数可以将其内的元素一一匹配
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a))
# 排序加去重
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
for f in sorted(set(basket)):
print(f)
| 24.958333 | 65 | 0.60601 |
4a19d2726e1cb77c7f72a471110caa2d87f24fb8
| 5,730 |
py
|
Python
|
kornia/losses/ssim.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 10 |
2021-01-26T05:25:01.000Z
|
2022-02-08T06:10:41.000Z
|
kornia/losses/ssim.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 3 |
2021-05-03T10:34:15.000Z
|
2022-02-17T04:25:26.000Z
|
kornia/losses/ssim.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 4 |
2021-04-30T01:51:38.000Z
|
2022-01-27T05:06:04.000Z
|
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.filters import get_gaussian_kernel2d
class SSIM(nn.Module):
r"""Creates a criterion that measures the Structural Similarity (SSIM)
index between each element in the input `x` and target `y`.
The index can be described as:
.. math::
\text{SSIM}(x, y) = \frac{(2\mu_x\mu_y+c_1)(2\sigma_{xy}+c_2)}
{(\mu_x^2+\mu_y^2+c_1)(\sigma_x^2+\sigma_y^2+c_2)}
where:
- :math:`c_1=(k_1 L)^2` and :math:`c_2=(k_2 L)^2` are two variables to
stabilize the division with weak denominator.
- :math:`L` is the dynamic range of the pixel-values (typically this is
:math:`2^{\#\text{bits per pixel}}-1`).
the loss, or the Structural dissimilarity (DSSIM) can be finally described
as:
.. math::
\text{loss}(x, y) = \frac{1 - \text{SSIM}(x, y)}{2}
Arguments:
window_size (int): the size of the kernel.
max_val (float): the dynamic range of the images. Default: 1.
reduction (str, optional): Specifies the reduction to apply to the
output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of elements
in the output, 'sum': the output will be summed. Default: 'none'.
Returns:
Tensor: the ssim index.
Shape:
- Input: :math:`(B, C, H, W)`
- Target :math:`(B, C, H, W)`
- Output: scale, if reduction is 'none', then :math:`(B, C, H, W)`
Examples::
>>> input1 = torch.rand(1, 4, 5, 5)
>>> input2 = torch.rand(1, 4, 5, 5)
>>> ssim = kornia.losses.SSIM(5, reduction='none')
>>> loss = ssim(input1, input2) # 1x4x5x5
"""
def __init__(
self,
window_size: int,
reduction: str = 'none',
max_val: float = 1.0) -> None:
super(SSIM, self).__init__()
self.window_size: int = window_size
self.max_val: float = max_val
self.reduction: str = reduction
self.window: torch.Tensor = get_gaussian_kernel2d(
(window_size, window_size), (1.5, 1.5))
self.padding: int = self.compute_zero_padding(window_size)
self.C1: float = (0.01 * self.max_val) ** 2
self.C2: float = (0.03 * self.max_val) ** 2
@staticmethod
def compute_zero_padding(kernel_size: int) -> int:
"""Computes zero padding."""
return (kernel_size - 1) // 2
def filter2D(
self,
input: torch.Tensor,
kernel: torch.Tensor,
channel: int) -> torch.Tensor:
return F.conv2d(input, kernel, padding=self.padding, groups=channel)
def forward( # type: ignore
self,
img1: torch.Tensor,
img2: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(img1):
raise TypeError("Input img1 type is not a torch.Tensor. Got {}"
.format(type(img1)))
if not torch.is_tensor(img2):
raise TypeError("Input img2 type is not a torch.Tensor. Got {}"
.format(type(img2)))
if not len(img1.shape) == 4:
raise ValueError("Invalid img1 shape, we expect BxCxHxW. Got: {}"
.format(img1.shape))
if not len(img2.shape) == 4:
raise ValueError("Invalid img2 shape, we expect BxCxHxW. Got: {}"
.format(img2.shape))
if not img1.shape == img2.shape:
raise ValueError("img1 and img2 shapes must be the same. Got: {}"
.format(img1.shape, img2.shape))
if not img1.device == img2.device:
raise ValueError("img1 and img2 must be in the same device. Got: {}"
.format(img1.device, img2.device))
if not img1.dtype == img2.dtype:
raise ValueError("img1 and img2 must be in the same dtype. Got: {}"
.format(img1.dtype, img2.dtype))
# prepare kernel
b, c, h, w = img1.shape
tmp_kernel: torch.Tensor = self.window.to(img1.device).to(img1.dtype)
kernel: torch.Tensor = tmp_kernel.repeat(c, 1, 1, 1)
# compute local mean per channel
mu1: torch.Tensor = self.filter2D(img1, kernel, c)
mu2: torch.Tensor = self.filter2D(img2, kernel, c)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
# compute local sigma per channel
sigma1_sq = self.filter2D(img1 * img1, kernel, c) - mu1_sq
sigma2_sq = self.filter2D(img2 * img2, kernel, c) - mu2_sq
sigma12 = self.filter2D(img1 * img2, kernel, c) - mu1_mu2
ssim_map = ((2 * mu1_mu2 + self.C1) * (2 * sigma12 + self.C2)) / \
((mu1_sq + mu2_sq + self.C1) * (sigma1_sq + sigma2_sq + self.C2))
loss = torch.clamp(torch.tensor(1.) - ssim_map, min=0, max=1) / 2.
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
elif self.reduction == 'none':
pass
return loss
######################
# functional interface
######################
def ssim(
img1: torch.Tensor,
img2: torch.Tensor,
window_size: int,
reduction: str = 'none',
max_val: float = 1.0) -> torch.Tensor:
r"""Function that measures the Structural Similarity (SSIM) index between
each element in the input `x` and target `y`.
See :class:`~kornia.losses.SSIM` for details.
"""
return SSIM(window_size, reduction, max_val)(img1, img2)
| 35.590062 | 80 | 0.564049 |
4a19d31d451a6813b730865f97f331e3c82514cd
| 4,648 |
py
|
Python
|
tests/jupyterlab_hubshare/test_handlers.py
|
lydian/jupyterlab_hubshre
|
18e3f270569d9f0d7c377bb3e26678300c19d22b
|
[
"BSD-3-Clause"
] | 3 |
2021-03-29T07:21:27.000Z
|
2021-03-29T15:48:02.000Z
|
tests/jupyterlab_hubshare/test_handlers.py
|
lydian/jupyterlab_hubshre
|
18e3f270569d9f0d7c377bb3e26678300c19d22b
|
[
"BSD-3-Clause"
] | 5 |
2021-04-28T08:48:00.000Z
|
2021-06-07T06:47:10.000Z
|
tests/jupyterlab_hubshare/test_handlers.py
|
lydian/jupyterlab_hubshre
|
18e3f270569d9f0d7c377bb3e26678300c19d22b
|
[
"BSD-3-Clause"
] | 3 |
2021-05-06T00:43:59.000Z
|
2021-06-06T06:08:36.000Z
|
import base64
import datetime
import urllib.parse
from unittest import mock
import tornado
import pytest
from jupyter_server.services.contents.filemanager import FileContentsManager
from jupyterlab_hubshare.handlers import create_manager
from jupyterlab_hubshare.handlers import BaseMixin
from jupyterlab_hubshare.handlers import get_share_path
@pytest.mark.parametrize(
"cls_name",
[
"jupyter_server.services.contents.filemanager.FileContentsManager",
FileContentsManager,
],
)
def test_create_manager(cls_name):
kwargs = {"root_dir": "/tmp"}
manager = create_manager(cls_name, kwargs)
assert isinstance(manager, FileContentsManager)
assert manager.root_dir == "/tmp"
class DummyCls(object):
def __init__(self, config=None):
self.config = config or {}
self.contents_manager = mock.Mock()
class DummyHandler(BaseMixin, DummyCls):
pass
class TestBaseMixin(object):
@pytest.fixture
def mock_create_manager(self):
with mock.patch("jupyterlab_hubshare.handlers.create_manager") as m:
yield m
@pytest.mark.parametrize(
"config,use_internal_cm",
[
# didn't configure manager, use built-in
({}, True),
# self defined contents manager
({"contents_manager": {"manager_cls": "test_cls"}}, False),
],
)
def test_init(self, config, use_internal_cm, mock_create_manager):
dummy_handler = DummyHandler(config={"HubShare": config})
assert dummy_handler.hub_share_config == config
if use_internal_cm:
assert dummy_handler._cm == dummy_handler.contents_manager
else:
assert dummy_handler._cm == mock_create_manager.return_value
@pytest.fixture
def dummy_handler(self):
dummy_handler = DummyHandler()
dummy_handler._cm = mock.Mock()
return dummy_handler
def test_get_notebook_file_not_found(self, dummy_handler):
encoded_path = base64.b64encode("/path/to/notebook.ipynb".encode("utf-8"))
dummy_handler._cm.file_exists.return_value = False
with pytest.raises(tornado.web.HTTPError):
dummy_handler.get_notebook(encoded_path)
def test_get_notebook_wrong_file_type(self, dummy_handler):
encoded_path = base64.b64encode("/path/to/someformat".encode("utf-8"))
with pytest.raises(tornado.web.HTTPError):
dummy_handler.get_notebook(encoded_path)
def test_get_notebook_success(self, dummy_handler):
encoded_path = base64.b64encode("/path/to/notebook.ipynb".encode("utf-8"))
fake_content = {"name": "notebook.ipynb"}
dummy_handler._cm.get.return_value = fake_content
assert dummy_handler.get_notebook(encoded_path) == fake_content
def test_to_json(self, dummy_handler):
assert (
dummy_handler.to_json({"dt": datetime.datetime(2021, 2, 3, 4, 5, 6)})
== '{"dt": "2021-02-03T04:05:06"}'
)
@pytest.mark.parametrize(
"use_jupyterhub_redirect,expected_first_url_component",
[(True, "/user-redirect/"), (False, "/")],
)
@pytest.mark.parametrize(
"use_preview,expected_second_url_component",
[(True, "?hubshare-preview={FINAL_PATH}"), (False, "lab/tree/{FINAL_PATH}")],
)
@pytest.mark.parametrize(
"path_template,path_func,input_path,expected_file_path",
[
("{path}", None, "path/to/file", "path/to/file"),
("{user}/{path}", None, "path/to/file", "test_user/path/to/file"),
("{user}/{path}", lambda p: f"new/{p}", "path/to/file", "new/path/to/file"),
],
)
@pytest.mark.parametrize(
"base_url", ["", "https://example.com/", "https://example.com"]
)
def test_get_share_path(
use_jupyterhub_redirect,
use_preview,
base_url,
path_template,
path_func,
input_path,
expected_first_url_component,
expected_second_url_component,
expected_file_path,
monkeypatch,
):
monkeypatch.setenv("JUPYTERHUB_USER", "test_user")
if use_preview:
expected_file_path = urllib.parse.quote(
base64.b64encode(expected_file_path.encode("utf-8"))
)
expected_final_path = (
expected_first_url_component
+ expected_second_url_component.format(FINAL_PATH=expected_file_path)
)
if base_url:
expected_final_path = "https://example.com" + expected_final_path
assert (
get_share_path(
use_jupyterhub_redirect,
use_preview,
base_url,
path_template,
path_func,
{"path": input_path},
)
== expected_final_path
)
| 31.405405 | 84 | 0.66932 |
4a19d3be1d6e589062b189f612b316a4285300f4
| 3,932 |
py
|
Python
|
dydx/perp_orders.py
|
petioptrv/dydx-python
|
7f82bb6f73f7850152268e08ad1534789831d450
|
[
"Apache-2.0"
] | 99 |
2019-07-30T01:37:06.000Z
|
2022-03-15T05:09:05.000Z
|
dydx/perp_orders.py
|
GibraltarCapital/dydx-python
|
7f82bb6f73f7850152268e08ad1534789831d450
|
[
"Apache-2.0"
] | 25 |
2019-07-31T23:09:47.000Z
|
2022-02-01T00:22:48.000Z
|
dydx/perp_orders.py
|
GibraltarCapital/dydx-python
|
7f82bb6f73f7850152268e08ad1534789831d450
|
[
"Apache-2.0"
] | 38 |
2019-09-15T09:17:01.000Z
|
2022-03-24T10:26:20.000Z
|
from web3 import Web3
import dydx.constants as consts
import dydx.util as utils
EIP712_ORDER_STRUCT_STRING = \
'Order(' + \
'bytes32 flags,' + \
'uint256 amount,' + \
'uint256 limitPrice,' + \
'uint256 triggerPrice,' + \
'uint256 limitFee,' + \
'address maker,' + \
'address taker,' + \
'uint256 expiration' + \
')'
EIP712_DOMAIN_STRING = \
'EIP712Domain(' + \
'string name,' + \
'string version,' + \
'uint256 chainId,' + \
'address verifyingContract' + \
')'
EIP712_CANCEL_ORDER_STRUCT_STRING = \
'CancelLimitOrder(' + \
'string action,' + \
'bytes32[] orderHashes' + \
')'
EIP712_CANCEL_ACTION = 'Cancel Orders'
def get_domain_hash(pair):
contract_name = ''
contract_address = ''
if pair == consts.PAIR_PBTC_USDC:
contract_name = 'P1Orders'
contract_address = consts.BTC_P1_ORDERS_ADDRESS
elif pair == consts.PAIR_PLINK_USDC:
contract_name = 'P1Orders'
contract_address = consts.LINK_P1_ORDERS_ADDRESS
elif pair == consts.PAIR_WETH_PUSD:
contract_name = 'P1InverseOrders'
contract_address = consts.ETH_P1_ORDERS_ADDRESS
else:
raise ValueError('Invalid perpetual pair')
return Web3.solidityKeccak(
[
'bytes32',
'bytes32',
'bytes32',
'uint256',
'bytes32'
],
[
utils.hash_string(EIP712_DOMAIN_STRING),
utils.hash_string(contract_name),
utils.hash_string('1.0'),
consts.NETWORK_ID,
utils.address_to_bytes32(contract_address)
]
).hex()
def get_order_hash(order, pair):
'''
Returns the final signable EIP712 hash for an order.
'''
struct_hash = Web3.solidityKeccak(
[
'bytes32',
'bytes32',
'uint256',
'uint256',
'uint256',
'uint256',
'bytes32',
'bytes32',
'uint256'
],
[
utils.hash_string(EIP712_ORDER_STRUCT_STRING),
get_order_flags(order['salt'], order['isBuy'], order['limitFee']),
int(order['amount']),
int(order['limitPrice'] * consts.BASE_DECIMAL),
int(order['triggerPrice'] * consts.BASE_DECIMAL),
int(abs(order['limitFee']) * consts.BASE_DECIMAL),
utils.address_to_bytes32(order['maker']),
utils.address_to_bytes32(order['taker']),
int(order['expiration'])
]
).hex()
return utils.get_eip712_hash(get_domain_hash(pair), struct_hash)
def get_cancel_order_hash(order_hash):
'''
Returns the final signable EIP712 hash for a cancel order API call.
'''
action_hash = Web3.solidityKeccak(
['string'],
[EIP712_CANCEL_ACTION]
).hex()
orders_hash = Web3.solidityKeccak(
['bytes32'],
[order_hash]
).hex()
struct_hash = Web3.solidityKeccak(
[
'bytes32',
'bytes32',
'bytes32',
],
[
utils.hash_string(EIP712_CANCEL_ORDER_STRUCT_STRING),
action_hash,
orders_hash,
]
).hex()
return utils.get_eip712_hash(get_domain_hash(
consts.PAIR_PBTC_USDC, # Use BTC Market. Orderbook should accept it.
), struct_hash)
def sign_order(order, pair, private_key):
order_hash = get_order_hash(order, pair)
return utils.sign_hash(order_hash, private_key)
def sign_cancel_order(order_hash, private_key):
cancel_order_hash = get_cancel_order_hash(order_hash)
return utils.sign_hash(cancel_order_hash, private_key)
def get_order_flags(salt, isBuy, limitFee):
salt_string = utils.strip_hex_prefix(hex(salt))[-63:]
salt_int = 0
salt_int += 1 if isBuy else 0
salt_int += 4 if (limitFee < 0) else 0
salt_string += str(salt_int)
return '0x' + salt_string.rjust(64, '0')
| 27.117241 | 78 | 0.600203 |
4a19d5414c828db42e1973192e9de148723831ae
| 505 |
py
|
Python
|
disco.py
|
evanessa234/selenium-tasks
|
369f48bcb9fbba856074088886b8f1551f795b1f
|
[
"MIT"
] | null | null | null |
disco.py
|
evanessa234/selenium-tasks
|
369f48bcb9fbba856074088886b8f1551f795b1f
|
[
"MIT"
] | null | null | null |
disco.py
|
evanessa234/selenium-tasks
|
369f48bcb9fbba856074088886b8f1551f795b1f
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://discord.com/login')
user = driver.find_element_by_xpath('//input[@name=\'email\']').send_keys("your_email_here")
pwd = driver.find_element_by_xpath('//input[@name=\'password\']')
pwd.send_keys("your_password_here")
login = driver.find_element_by_xpath('//button[@class=\'marginBottom8-AtZOdT button-3k0cO7 button-38aScr lookFilled-1Gx00P colorBrand-3pXr91 sizeLarge-1vSeWK fullWidth-1orjjo grow-q77ONN\']')
login.click()
| 36.071429 | 191 | 0.774257 |
4a19d7c40f746d64de800d7bf78a387bb2ba7db2
| 1,651 |
py
|
Python
|
ex39.py
|
JeffreyAsuncion/PythonTheHardWay
|
97488d99c9feff8b7737fc6910d0db03ee406550
|
[
"MIT"
] | null | null | null |
ex39.py
|
JeffreyAsuncion/PythonTheHardWay
|
97488d99c9feff8b7737fc6910d0db03ee406550
|
[
"MIT"
] | null | null | null |
ex39.py
|
JeffreyAsuncion/PythonTheHardWay
|
97488d99c9feff8b7737fc6910d0db03ee406550
|
[
"MIT"
] | null | null | null |
# Exercise 39 - Dictionaries, Oh Lovely Dictionaries
# Create a mapping of state to abbreviation
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI'
}
# create a basic set of states and some cities in them
cities = {
'CA': 'San Francisco',
'MI': 'Detroit',
'FL': 'Jacksonville'
}
# add some more cities
cities['NY'] = 'New York'
cities['OR'] = 'Portland'
# print out some cities
print("-"*10)
print("NY State has: ", cities['NY'])
print("OR State has: ", cities['OR'])
# print some states
print("-"*10)
print("Michigan's abbreviation is: ", states['Michigan'])
print("Florida's abbreviation is: ", states['Florida'])
# do it by using the state then the cities dict
print("-"*10)
print("Michigan has: ", cities[states['Michigan']])
print("Florida has: ", cities[states['Florida']])
# print every state abbreviation
print("-"*10)
for state, abbrev in states.items():
print("%s is abbreviated %s" % (state, abbrev))
# print every city in state
print("-"*10)
for abbrev, city in cities.items():
print("%s has the city %s" % (abbrev, city))
# now do both at the same time
print("-"*10)
for state, abbrev in states.items():
print("%s state is abbreviated %s and has city %s" % (state, abbrev, cities[abbrev]))
print("-"*10)
#safely get an abbreviation by state that might not be there
state = states.get('Texas', None)
if not state:
print("Sorry, no Texas.")
# get a city with a default value
city = cities.get('TX', 'Does Not Exist')
print("The city for the state 'TX' is: %s" % city)
| 24.641791 | 89 | 0.623864 |
4a19d7c4c2ea5d703c7051c86f9d7041808d44c5
| 20,586 |
py
|
Python
|
pyfolio/utils.py
|
QuantInsti/pyfolio
|
906d49e566d1af260aacec109943016cf8258919
|
[
"Apache-2.0"
] | 3 |
2020-11-28T18:12:36.000Z
|
2021-06-09T11:31:28.000Z
|
pyfolio/utils.py
|
QuantInsti/pyfolio
|
906d49e566d1af260aacec109943016cf8258919
|
[
"Apache-2.0"
] | null | null | null |
pyfolio/utils.py
|
QuantInsti/pyfolio
|
906d49e566d1af260aacec109943016cf8258919
|
[
"Apache-2.0"
] | 3 |
2019-07-25T03:09:44.000Z
|
2020-07-02T18:39:26.000Z
|
#
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import warnings
from itertools import cycle
from matplotlib.pyplot import cm
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import empyrical.utils
from os import environ
from .deprecate import deprecated
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 21
APPROX_BDAYS_PER_YEAR = 252
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
MM_DISPLAY_UNIT = 1000000.
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
YEARLY = 'yearly'
ANNUALIZATION_FACTORS = {
DAILY: APPROX_BDAYS_PER_YEAR,
WEEKLY: WEEKS_PER_YEAR,
MONTHLY: MONTHS_PER_YEAR
}
DEPRECATION_WARNING = ("Data loaders have been moved to empyrical and will "
"be removed from pyfolio in a future release. Please "
"use e.g. empyrical.utils.get_symbol_rets() instead "
"of pyfolio.utils.get_symbol_rets()")
COLORMAP = 'Paired'
COLORS = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231',
'#911eb4', '#46f0f0', '#f032e6', '#d2f53c', '#fabebe',
'#008080', '#e6beff', '#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080', '#808080']
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def format_asset(asset):
"""
If zipline asset objects are used, we want to print them out prettily
within the tear sheet. This function should only be applied directly
before displaying.
"""
try:
import zipline.assets
except ImportError:
return asset
if isinstance(asset, zipline.assets.Asset):
return asset.symbol
else:
return asset
def vectorize(func):
"""
Decorator so that functions can be written to work on Series but
may still be called with DataFrames.
"""
def wrapper(df, *args, **kwargs):
if df.ndim == 1:
return func(df, *args, **kwargs)
elif df.ndim == 2:
return df.apply(func, *args, **kwargs)
return wrapper
def extract_rets_pos_txn_from_zipline(backtest):
"""
Extract returns, positions, transactions and leverage from the
backtest data structure returned by zipline.TradingAlgorithm.run().
The returned data structures are in a format compatible with the
rest of pyfolio and can be directly passed to
e.g. tears.create_full_tear_sheet().
Parameters
----------
backtest : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
Returns
-------
returns : pd.Series
Daily returns of strategy.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
Example (on the Quantopian research platform)
---------------------------------------------
>>> backtest = my_algo.run()
>>> returns, positions, transactions =
>>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
>>> pyfolio.tears.create_full_tear_sheet(returns,
>>> positions, transactions)
"""
backtest.index = backtest.index.normalize()
if backtest.index.tzinfo is None:
backtest.index = backtest.index.tz_localize('UTC')
returns = backtest.returns
raw_positions = []
no_overnight_position = True
for dt, pos_row in backtest.positions.iteritems():
no_overnight_position = no_overnight_position and (not pos_row)
df = pd.DataFrame(pos_row)
df.index = [dt] * len(df)
raw_positions.append(df)
if not raw_positions:
raise ValueError("The backtest does not have any positions.")
if no_overnight_position:
positions = pd.DataFrame({})
else:
positions = pd.concat(raw_positions)
positions = pos.extract_pos(positions, backtest.ending_cash)
transactions = txn.make_transaction_frame(backtest.transactions)
if transactions.index.tzinfo is None:
transactions.index = transactions.index.tz_localize('utc')
return returns, positions, transactions
def print_table(table,
name=None,
float_format=None,
formatters=None,
header_rows=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pandas.Series or pandas.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
float_format : function, optional
Formatter to use for displaying table elements, passed as the
`float_format` arg to pd.Dataframe.to_html.
E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.
formatters : list or dict, optional
Formatters to use by column, passed as the `formatters` arg to
pd.Dataframe.to_html.
header_rows : dict, optional
Extra rows to display at the top of the table.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if name is not None:
table.columns.name = name
html = table.to_html(float_format=float_format, formatters=formatters)
if header_rows is not None:
# Count the number of columns for the text to span
n_cols = html.split('<thead>')[1].split('</thead>')[0].count('<th>')
# Generate the HTML for the extra rows
rows = ''
for name, value in header_rows.items():
rows += ('\n <tr style="text-align: right;"><th>%s</th>' +
'<td colspan=%d>%s</td></tr>') % (name, n_cols, value)
# Inject the new HTML
html = html.replace('<thead>', '<thead>' + rows)
display(HTML(html))
def standardize_data(x):
"""
Standardize an array with mean and standard deviation.
Parameters
----------
x : np.array
Array to standardize.
Returns
-------
np.array
Standardized array.
"""
return (x - np.mean(x)) / np.std(x)
def detect_intraday(positions, transactions, threshold=0.25):
"""
Attempt to detect an intraday strategy. Get the number of
positions held at the end of the day, and divide that by the
number of unique stocks transacted every day. If the average quotient
is below a threshold, then an intraday strategy is detected.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
boolean
True if an intraday strategy is detected.
"""
daily_txn = transactions.copy()
daily_txn.index = daily_txn.index.date
txn_count = daily_txn.groupby(level=0).symbol.nunique().sum()
daily_pos = positions.drop('cash', axis=1).replace(0, np.nan)
return daily_pos.count(axis=1).sum() / txn_count < threshold
def check_intraday(estimate, returns, positions, transactions):
"""
Logic for checking if a strategy is intraday and processing it.
Parameters
----------
estimate: boolean or str, optional
Approximate returns for intraday strategies.
See description in tears.create_full_tear_sheet.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, adjusted for intraday movement.
"""
if estimate == 'infer':
if positions is not None and transactions is not None:
if detect_intraday(positions, transactions):
warnings.warn('Detected intraday strategy; inferring positi' +
'ons from transactions. Set estimate_intraday' +
'=False to disable.')
return estimate_intraday(returns, positions, transactions)
else:
return positions
else:
return positions
elif estimate:
if positions is not None and transactions is not None:
return estimate_intraday(returns, positions, transactions)
else:
raise ValueError('Positions and txns needed to estimate intraday')
else:
return positions
def estimate_intraday(returns, positions, transactions, EOD_hour=23):
"""
Intraday strategies will often not hold positions at the day end.
This attempts to find the point in the day that best represents
the activity of the strategy on that day, and effectively resamples
the end-of-day positions with the positions at this point of day.
The point of day is found by detecting when our exposure in the
market is at its maximum point. Note that this is an estimate.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, resampled for intraday behavior.
"""
# Construct DataFrame of transaction amounts
txn_val = transactions.copy()
txn_val.index.names = ['date']
txn_val['value'] = txn_val.amount * txn_val.price
txn_val = txn_val.reset_index().pivot_table(
index='date', values='value',
columns='symbol').replace(np.nan, 0)
# Cumulate transaction amounts each day
txn_val['date'] = txn_val.index.date
txn_val = txn_val.groupby('date').cumsum()
# Calculate exposure, then take peak of exposure every day
txn_val['exposure'] = txn_val.abs().sum(axis=1)
condition = (txn_val['exposure'] == txn_val.groupby(
pd.TimeGrouper('24H'))['exposure'].transform(max))
txn_val = txn_val[condition].drop('exposure', axis=1)
# Compute cash delta
txn_val['cash'] = -txn_val.sum(axis=1)
# Shift EOD positions to positions at start of next trading day
positions_shifted = positions.copy().shift(1).fillna(0)
starting_capital = positions.iloc[0].sum() / (1 + returns[0])
positions_shifted.cash[0] = starting_capital
# Format and add start positions to intraday position changes
txn_val.index = txn_val.index.normalize()
corrected_positions = positions_shifted.add(txn_val, fill_value=0)
corrected_positions.index.name = 'period_close'
corrected_positions.columns.name = 'sid'
return corrected_positions
def to_utc(df):
"""
For use in tests; applied UTC timestamp to DataFrame.
"""
try:
df.index = df.index.tz_localize('UTC')
except TypeError:
df.index = df.index.tz_convert('UTC')
return df
def to_series(df):
"""
For use in tests; converts DataFrame's first column to Series.
"""
return df[df.columns[0]]
@deprecated(msg=DEPRECATION_WARNING)
def default_returns_func(symbol, start=None, end=None):
"""
Gets returns for a symbol.
Queries Yahoo Finance. Attempts to cache SPY.
Parameters
----------
symbol : str
Ticker symbol, e.g. APPL.
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pd.Series
Daily returns for the symbol.
- See full explanation in tears.create_full_tear_sheet (returns).
"""
return empyrical.utils.default_returns_func(symbol, start=None, end=None)
@deprecated(msg=DEPRECATION_WARNING)
def get_fama_french():
"""
Retrieve Fama-French factors via pandas-datareader
Returns
-------
pandas.DataFrame
Percent change of Fama-French factors
"""
return empyrical.utils.get_fama_french()
@deprecated(msg=DEPRECATION_WARNING)
def get_returns_cached(filepath, update_func, latest_dt, **kwargs):
"""
Get returns from a cached file if the cache is recent enough,
otherwise, try to retrieve via a provided update function and
update the cache file.
Parameters
----------
filepath : str
Path to cached csv file
update_func : function
Function to call in case cache is not up-to-date.
latest_dt : pd.Timestamp (tz=UTC)
Latest datetime required in csv file.
**kwargs : Keyword arguments
Optional keyword arguments will be passed to update_func()
Returns
-------
pandas.DataFrame
DataFrame containing returns
"""
return empyrical.utils.get_returns_cached(filepath,
update_func,
latest_dt,
**kwargs)
@deprecated(msg=DEPRECATION_WARNING)
def get_symbol_returns_from_yahoo(symbol, start=None, end=None):
"""
Wrapper for pandas.io.data.get_data_yahoo().
Retrieves prices for symbol from yahoo and computes returns
based on adjusted closing prices.
Parameters
----------
symbol : str
Symbol name to load, e.g. 'SPY'
start : pandas.Timestamp compatible, optional
Start date of time period to retrieve
end : pandas.Timestamp compatible, optional
End date of time period to retrieve
Returns
-------
pandas.DataFrame
Returns of symbol in requested period.
"""
return get_symbol_returns_from_yahoo(symbol, start=None, end=None)
@deprecated(msg=DEPRECATION_WARNING)
def get_treasury_yield(start=None, end=None, period='3MO'):
"""
Load treasury yields from FRED.
Parameters
----------
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
period : {'1MO', '3MO', '6MO', 1', '5', '10'}, optional
Which maturity to use.
Returns
-------
pd.Series
Annual treasury yield for every day.
"""
return empyrical.utils.get_treasury_yield(start=None,
end=None,
period='3MO')
@deprecated(msg=DEPRECATION_WARNING)
def get_utc_timestamp(dt):
"""
Returns the Timestamp/DatetimeIndex
with either localized or converted to UTC.
Parameters
----------
dt : Timestamp/DatetimeIndex
the date(s) to be converted
Returns
-------
same type as input
date(s) converted to UTC
"""
return empyrical.utils.get_utc_timestamp(dt)
@deprecated(msg=DEPRECATION_WARNING)
def cache_dir(environ=environ):
return empyrical.utils.cache_dir(environ=environ)
@deprecated(msg=DEPRECATION_WARNING)
def ensure_directory(path):
"""
Ensure that a directory named "path" exists.
"""
return empyrical.data_path(path)
@deprecated(msg=DEPRECATION_WARNING)
def data_path(name):
return empyrical.data_path(name)
@deprecated(msg=DEPRECATION_WARNING)
def _1_bday_ago():
return empyrical._1_bday_ago()
@deprecated(msg=DEPRECATION_WARNING)
def load_portfolio_risk_factors(filepath_prefix=None, start=None, end=None):
"""
Load risk factors Mkt-Rf, SMB, HML, Rf, and UMD.
Data is stored in HDF5 file. If the data is more than 2
days old, redownload from Dartmouth.
Returns
-------
five_factors : pd.DataFrame
Risk factors timeseries.
"""
return empyrical.utils.load_portfolio_risk_factors(filepath_prefix=None,
start=None,
end=None)
# Settings dict to store functions/values that may
# need to be overridden depending on the users environment
SETTINGS = {
'returns_func': default_returns_func
}
def register_return_func(func):
"""
Registers the 'returns_func' that will be called for
retrieving returns data.
Parameters
----------
func : function
A function that returns a pandas Series of asset returns.
The signature of the function must be as follows
>>> func(symbol)
Where symbol is an asset identifier
Returns
-------
None
"""
SETTINGS['returns_func'] = func
def get_symbol_rets(symbol, start=None, end=None):
"""
Calls the currently registered 'returns_func'
Parameters
----------
symbol : object
An identifier for the asset whose return
series is desired.
e.g. ticker symbol or database ID
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pandas.Series
Returned by the current 'returns_func'
"""
return SETTINGS['returns_func'](symbol,
start=start,
end=end)
def configure_legend(ax, autofmt_xdate=True, change_colors=False,
rotation=30, ha='right'):
"""
Format legend for perf attribution plots:
- put legend to the right of plot instead of overlapping with it
- make legend order match up with graph lines
- set colors according to colormap
"""
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0,
chartBox.width * 0.75, chartBox.height])
# make legend order match graph lines
handles, labels = ax.get_legend_handles_labels()
handles_and_labels_sorted = sorted(zip(handles, labels),
key=lambda x: x[0].get_ydata()[-1],
reverse=True)
handles_sorted = [h[0] for h in handles_and_labels_sorted]
labels_sorted = [h[1] for h in handles_and_labels_sorted]
if change_colors:
for handle, color in zip(handles_sorted,
cycle(COLORS)):
handle.set_color(color)
ax.legend(handles=handles_sorted,
labels=labels_sorted,
frameon=True,
framealpha=0.5,
loc='upper left',
bbox_to_anchor=(1.05, 1),
fontsize='large')
# manually rotate xticklabels instead of using matplotlib's autofmt_xdate
# because it disables xticklabels for all but the last plot
if autofmt_xdate:
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
def sample_colormap(cmap_name, n_samples):
"""
Sample a colormap from matplotlib
"""
colors = []
colormap = cm.cmap_d[cmap_name]
for i in np.linspace(0, 1, n_samples):
colors.append(colormap(i))
return colors
| 29.965066 | 78 | 0.640581 |
4a19d8baff7b4aae42c41bc8a5cd421e99074c40
| 8,134 |
py
|
Python
|
apps/personal/clock/main.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
apps/personal/clock/main.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | 2 |
2020-01-17T00:44:53.000Z
|
2020-01-19T21:10:48.000Z
|
apps/personal/clock/main.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | 1 |
2020-01-14T22:44:27.000Z
|
2020-01-14T22:44:27.000Z
|
from __future__ import division
import math
import os
import time
from datetime import datetime, timedelta
from dateutil.zoneinfo import getzoneinfofile_stream, ZoneInfoFile
from subprocess import check_output, CalledProcessError
from apps import ZeroApp
from actions import FirstBootAction
from ui import Menu, Refresher, Canvas, IntegerAdjustInput, Listbox, LoadingBar, PrettyPrinter as Printer
from helpers import read_or_create_config, local_path_gen, setup_logger
logger = setup_logger(__name__, "warning")
local_path = local_path_gen(__name__)
class ClockApp(ZeroApp, Refresher):
def __init__(self, i, o, *args, **kwargs):
super(ClockApp, self).__init__(i, o)
self.menu_name = "Clock"
self.countdown = None
self.refresher = Refresher(self.on_refresh, i, o, keymap={"KEY_RIGHT":self.countdown_settings, "KEY_DOWN":self.force_sync_time})
default_config = '{}'
config_filename = "config.json"
self.config = read_or_create_config(local_path(config_filename), default_config, self.menu_name+" app")
def set_context(self, c):
self.context = c
c.register_firstboot_action(FirstBootAction("set_timezone", self.set_timezone, depends=None, not_on_emulator=True))
c.register_firstboot_action(FirstBootAction("force_sync_time", self.force_sync_time, depends=["set_timezone", "check_connectivity"], not_on_emulator=True))
def force_sync_time(self):
Printer("Syncing time", self.i, self.o, 0)
try:
output = check_output(["sntp", "-S", "pool.ntp.org"])
except CalledProcessError:
logger.exception("Failed to sync time!")
Printer("Failed to sync time!", self.i, self.o, 1)
return False
except OSError:
logger.exception("Failed to sync time - sntp not installed!")
Printer("Failed to sync time (no sntp)!", self.i, self.o, 1)
return False
else:
Printer("Synced time successfully!", self.i, self.o, 1)
return True
def format_countdown(self):
if not self.countdown: return None
h, m, s, sign = self.get_countdown_time_left()
if sign: return None
return "{}m".format(h*60+m)
def get_countdown_time_left(self):
delta = self.countdown["time"]-datetime.now()
print(delta)
seconds = delta.seconds
sign = None
if delta.days < 0:
seconds = -seconds
sign = "+"
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if sign == "+":
hours = hours+24
return hours, minutes, seconds, sign
def countdown_settings(self):
# Setting an absolute countdown is not yet possible
# because we don't yet have a TimePicker UI element
def gmc(): #get menu contents
countdown_label = self.format_countdown()
contents = []
if countdown_label: contents.append(["Countdown: {}".format(countdown_label)])
#contents.append(["Set absolute", lambda: self.set_countdown(absolute=True)])
contents.append(["Set relative", self.set_countdown])
# Add an option for setting the timezone
contents.append(["Set timezone", self.set_timezone])
return contents
Menu([], self.i, self.o, "Countdown settings menu", contents_hook=gmc).activate()
def set_countdown(self, absolute=False):
if absolute: raise NotImplementedError # Needs a TimePicker or something like that
rel_start = 0
message = "After (in minutes):"
if self.countdown:
# A countdown is already active
# Using it as a starting point
h, m, s, _ = self.get_countdown_time_left()
rel_start = h*60+m
offset = IntegerAdjustInput(rel_start, self.i, self.o, message=message).activate()
if offset is not None:
countdown = {"time": datetime.now()+timedelta(minutes=offset)}
self.countdown = countdown
# Shows a menu of available timezones, accept new TZ by pressing ENTER
def set_timezone(self):
try:
with open('/etc/timezone', "r") as f:
current_timezone = f.readline().strip()
except:
logger.exception("Can't get current timezone!")
current_timezone = None
else:
logger.info("Current timezone: {}".format(repr(current_timezone)))
lc = []
with LoadingBar(self.i, self.o, message="Getting timezones"):
for k in ZoneInfoFile(getzoneinfofile_stream()).zones.keys():
lc.append([k])
lc = sorted(lc)
choice = Listbox(lc, self.i, self.o, "Timezone selection listbox", selected=current_timezone).activate()
if choice:
# Setting timezone using timedatectl
try:
check_output(["timedatectl", "set-timezone", choice])
except CalledProcessError as e:
logger.exception("Can't set timezone using timedatectl! Return code: {}, output: {}".format(e.returncode, repr(e.output)))
return False
else:
logger.info("Set timezone successfully")
return True
else:
return None
def draw_analog_clock(self, c, time, radius="min(*c.size) / 3", clock_x = "center_x+32", clock_y = "center_y+5", h_len = "radius / 2", m_len = "radius - 5", s_len = "radius - 3", **kwargs):
"""Draws the analog clock, with parameters configurable through config.json."""
center_x, center_y = c.get_center()
clock_x = eval(clock_x)
clock_y = eval(clock_y)
radius = eval(radius)
c.ellipse((clock_x - radius, clock_y - radius, clock_x + radius, clock_y + radius), fill=False, outline="white")
self.draw_needle(c, 60 - time.second / 60, eval(s_len), clock_x, clock_y, 1)
self.draw_needle(c, 60 - time.minute / 60, eval(m_len), clock_x, clock_y, 1)
self.draw_needle(c, 24 - time.hour / 24, eval(h_len), clock_x, clock_y, 1)
def draw_countdown(self, c, countdown_x="(center_x/2)-10", countdown_y="center_y/2*3", **kwargs):
"""Draws the digital clock, with parameters configurable through config.json."""
h, m, s, sign = self.get_countdown_time_left()
hz, mz, sz = map(lambda x:str(x).zfill(2), (h, m, s))
string = "{}:{}".format(mz, sz)
if h: string = hz+":"+string
if sign: string = sign+string
center_x, center_y = c.get_center()
centered_coords = c.get_centered_text_bounds(string)
x = eval(countdown_x)
y = eval(countdown_y)
c.text((x, y), string, fill="white")
def draw_text(self, c, time, text_x="10", text_y="center_y-5", time_format = "%H:%M:%S", **kwargs):
"""Draws the digital clock, with parameters configurable through config.json."""
time_str = time.strftime(time_format)
center_x, center_y = c.get_center()
centered_coords = c.get_centered_text_bounds(time_str)
x = eval(text_x)
y = eval(text_y)
c.text(time_str, (x, y))
def on_refresh(self):
current_time = datetime.now()
return self.render_clock(current_time, **self.config)
def render_clock(self, time, **kwargs):
c = Canvas(self.o)
width, height = c.size
self.draw_text(c, time, **kwargs)
self.draw_analog_clock(c, time, **kwargs)
if self.countdown:
self.draw_countdown(c, **kwargs)
return c.get_image()
def draw_needle(self, c, progress, radius, x, y, width):
# type: (Canvas, float, float, float, float, int) -> None
hour_angle = math.pi * 2 * progress + math.pi
c.line(
(
int(x),
int(y),
int(x + radius * math.sin(hour_angle)),
int(y + radius * math.cos(hour_angle))
),
width=width,
fill=True
)
def on_start(self):
self.refresher.activate()
| 41.927835 | 193 | 0.615441 |
4a19d9171d127868a27361c52d0953ac4c0a3826
| 9,765 |
py
|
Python
|
flex-templates/python/regional_dlp_de_identification/pubsub_dlp_bigquery.py
|
marklindsey11/Google-Cloud-DataWarehouse
|
987aaf34ae899edde82b7854530519ea9231e519
|
[
"Apache-2.0"
] | 16 |
2021-12-16T21:09:32.000Z
|
2022-03-17T08:00:06.000Z
|
flex-templates/python/regional_dlp_de_identification/pubsub_dlp_bigquery.py
|
marklindsey11/Google-Cloud-DataWarehouse
|
987aaf34ae899edde82b7854530519ea9231e519
|
[
"Apache-2.0"
] | 25 |
2021-12-16T23:28:15.000Z
|
2022-03-31T18:55:40.000Z
|
flex-templates/python/regional_dlp_de_identification/pubsub_dlp_bigquery.py
|
marklindsey11/Google-Cloud-DataWarehouse
|
987aaf34ae899edde82b7854530519ea9231e519
|
[
"Apache-2.0"
] | 16 |
2021-12-16T21:17:00.000Z
|
2022-03-16T13:56:32.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import json
import logging
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.options.pipeline_options import (GoogleCloudOptions,
PipelineOptions)
from apache_beam.transforms import DoFn, ParDo, PTransform, BatchElements
from apache_beam.utils.annotations import experimental
def run(argv=None, save_main_session=True):
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_table',
required=True,
help=(
'Output BigQuery table for results specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'
)
)
parser.add_argument(
'--bq_schema',
required=True,
help=(
'Output BigQuery table schema specified as string with format: '
'FIELD_1:STRING,FIELD_2:STRING,...'
)
)
parser.add_argument(
'--dlp_project',
required=True,
help=(
'ID of the project that holds the DLP template.'
)
)
parser.add_argument(
'--dlp_location',
required=False,
help=(
'The Location of the DLP template resource.'
)
)
parser.add_argument(
'--deidentification_template_name',
required=True,
help=(
'Name of the DLP Structured De-identification Template '
'of the form "projects/<PROJECT>/locations/<LOCATION>'
'/deidentifyTemplates/<TEMPLATE_ID>"'
)
)
parser.add_argument(
"--window_interval_sec",
default=30,
type=int,
help=(
'Window interval in seconds for grouping incoming messages.'
)
)
parser.add_argument(
"--batch_size",
default=1000,
type=int,
help=(
'Number of records to be sent in a batch in '
'the call to the Data Loss Prevention (DLP) API.'
)
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--input_topic',
help=(
'Input PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'
'A temporary subscription will be created from '
'the specified topic.'
)
)
group.add_argument(
'--input_subscription',
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>."'
)
)
known_args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(
pipeline_args,
save_main_session=True,
streaming=True
)
with beam.Pipeline(options=options) as p:
# Read from PubSub into a PCollection.
# If input_subscription provided, it will be used.
# If input_subscription not provided, input_topic will be used.
# If input_topic provided, a temporary subscription will be created
# from the specified topic.
if known_args.input_subscription:
messages = (
p
| 'Read from Pub/Sub' >>
beam.io.ReadFromPubSub(
subscription=known_args.input_subscription
).with_output_types(bytes)
| 'UTF-8 bytes to string' >>
beam.Map(lambda msg: msg.decode("utf-8"))
| 'Parse JSON payload' >>
beam.Map(json.loads)
| 'Flatten lists' >>
beam.FlatMap(normalize_data)
| 'Apply window' >> beam.WindowInto(
window.FixedWindows(known_args.window_interval_sec, 0)
)
)
else:
messages = (
p
| 'Read from Pub/Sub' >>
beam.io.ReadFromPubSub(
topic=known_args.input_topic
).with_output_types(bytes)
| 'UTF-8 bytes to string' >>
beam.Map(lambda msg: msg.decode("utf-8"))
| 'Parse JSON payload' >>
beam.Map(json.loads)
| 'Flatten lists' >>
beam.FlatMap(normalize_data)
| 'Apply window' >> beam.WindowInto(
window.FixedWindows(known_args.window_interval_sec, 0)
)
)
de_identified_messages = (
messages
| "Batching" >> BatchElements(
min_batch_size=known_args.batch_size,
max_batch_size=known_args.batch_size
)
| 'Convert dicts to table' >>
beam.Map(from_list_dicts_to_table)
| 'Call DLP de-identification' >>
MaskDetectedDetails(
project=known_args.dlp_project,
location=known_args.dlp_location,
template_name=known_args.deidentification_template_name
)
| 'Convert table to dicts' >>
beam.FlatMap(from_table_to_list_dict)
)
# Write to BigQuery.
de_identified_messages | 'Write to BQ' >> beam.io.WriteToBigQuery(
known_args.output_table,
schema=known_args.bq_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED
)
def normalize_data(data):
"""
The template reads a json from PubSub that can be a single object
or a List of objects. This function is used by a FlatMap transformation
to normalize the input in to individual objects.
See:
- https://beam.apache.org/documentation/transforms/python/elementwise/flatmap/
""" # noqa
if isinstance(data, list):
return data
return [data]
def from_list_dicts_to_table(list_item):
"""
Converts a Python list of dict object to a DLP API v2
ContentItem with value Table.
See:
- https://cloud.google.com/dlp/docs/reference/rest/v2/ContentItem#Table
- https://cloud.google.com/dlp/docs/inspecting-structured-text
"""
headers = []
rows = []
for key in sorted(list_item[0]):
headers.append({"name": key})
for item in list_item:
row = {"values": []}
for item_key in sorted(item):
row["values"].append({"string_value": item[item_key]})
rows.append(row)
table_item = {"table": {"headers": headers, "rows": rows}}
return table_item
def from_table_to_list_dict(content_item):
"""
Converts a DLP API v2 ContentItem of type Table with a single row
to a Python dict object.
See:
- https://cloud.google.com/dlp/docs/reference/rest/v2/ContentItem#Table
- https://cloud.google.com/dlp/docs/inspecting-structured-text
"""
result = []
for row in content_item.table.rows:
new_item = {}
for index, val in enumerate(content_item.table.headers):
new_item[val.name] = row.values[index].string_value
result.append(new_item)
return result
@experimental()
class MaskDetectedDetails(PTransform):
def __init__(
self,
project=None,
location="global",
template_name=None,
deidentification_config=None,
timeout=None):
self.config = {}
self.project = project
self.timeout = timeout
self.location = location
if template_name is not None:
self.config['deidentify_template_name'] = template_name
else:
self.config['deidentify_config'] = deidentification_config
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(
GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified '
'in "project" pipeline option')
return (
pcoll
| ParDo(_DeidentifyFn(
self.config,
self.timeout,
self.project,
self.location
)))
class _DeidentifyFn(DoFn):
def __init__(
self,
config=None,
timeout=None,
project=None,
location=None,
client=None
):
self.config = config
self.timeout = timeout
self.client = client
self.project = project
self.location = location
self.params = {}
def setup(self):
from google.cloud import dlp_v2
if self.client is None:
self.client = dlp_v2.DlpServiceClient()
self.params = {
'timeout': self.timeout,
'parent': "projects/{}/locations/{}".format(
self.project,
self.location
)
}
self.params.update(self.config)
def process(self, element, **kwargs):
operation = self.client.deidentify_content(
item=element, **self.params)
yield operation.item
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 31.5 | 83 | 0.582796 |
4a19d91f68f448a1fb524c8bc4d3b3c304f8feab
| 854 |
py
|
Python
|
components/collector/collector/collectors/gitlab.py
|
WMeijbeek/quality-time
|
325366a36b1f54f91b748e5e9a7bedee2d2a5ee7
|
[
"Apache-2.0"
] | null | null | null |
components/collector/collector/collectors/gitlab.py
|
WMeijbeek/quality-time
|
325366a36b1f54f91b748e5e9a7bedee2d2a5ee7
|
[
"Apache-2.0"
] | null | null | null |
components/collector/collector/collectors/gitlab.py
|
WMeijbeek/quality-time
|
325366a36b1f54f91b748e5e9a7bedee2d2a5ee7
|
[
"Apache-2.0"
] | null | null | null |
"""Gitlab metric source."""
import requests
from collector.collector import Collector
from collector.type import Measurement, URL
class GitlabJobs(Collector):
"""Collector class to get job counts from Gitlab."""
def api_url(self, **parameters) -> URL:
return URL(f"{parameters.get('url')}/api/v4/projects/{parameters.get('project')}/"
f"jobs?private_token={parameters.get('private_token')}")
def parse_source_response(self, response: requests.Response, **parameters) -> Measurement:
return str(len(response.json()))
class GitlabFailedJobs(GitlabJobs):
"""Collector class to get failed job counts from Gitlab."""
def parse_source_response(self, response: requests.Response, **parameters) -> Measurement:
return str(len([job for job in response.json() if job["status"] == "failed"]))
| 34.16 | 94 | 0.696721 |
4a19d9299eae16ed2a0f4bfadf0a639d309c16e7
| 8,255 |
py
|
Python
|
update-query.py
|
suin/update-query
|
51d1ceca8888bb516d145d741ac4a029071f175d
|
[
"MIT"
] | 3 |
2016-01-10T02:03:31.000Z
|
2020-04-19T15:29:18.000Z
|
update-query.py
|
suin/update-query
|
51d1ceca8888bb516d145d741ac4a029071f175d
|
[
"MIT"
] | null | null | null |
update-query.py
|
suin/update-query
|
51d1ceca8888bb516d145d741ac4a029071f175d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Update query management tool
#
import os
import argparse
import commands
import sys
import re
import time
import glob
class TokenCollection:
"""
Token collection object
"""
def __init__(self, tokens = []):
"""
Returns new TokenCollection object
"""
self.tokens = tokens
self.index = 0
def __len__(self):
return len(self.tokens)
def __iter__(self):
"""
Returns this instance
"""
return self
def append(self, token):
"""
Appends a token object to this collection
"""
self.tokens.append(token)
return self
def next(self):
"""
Returns current token object
"""
if self.index >= len(self.tokens):
self.index = 0
raise StopIteration
result = self.tokens[self.index]
self.index += 1
return result
def has(self, environment_name):
"""
Determines if this has the token
"""
if self.find(environment_name) is False:
return False
else:
return True
def find(self, environment_name):
"""
Returns token object
"""
for token in self.tokens:
if token.get_environment_name() == environment_name:
return token
return False
class Token:
"""
Token file object
"""
def __init__(self, filename):
"""
Returns new Token object
"""
if self._isValidFilename(filename) is False:
raise Exception("Token file name is not valid: %s" % filename)
self.filename = filename
self.original_filename = filename
def get_filename(self):
"""
Returns file name
"""
return self.filename
def get_original_filename(self):
"""
Returns original file name
"""
return self.original_filename
def get_datetime(self):
"""
Returns datetime
"""
return self.filename.split('~')[0].replace('_', '')
def get_environment_name(self):
"""
Returns environment name
"""
return self.filename.split('~')[1].split('.')[0]
def log(self, message):
"""
Logs message
"""
now = time.strftime('%Y-%m-%d %H:%M:%S')
file = open(self.filename, 'a')
file.write("-- [%s] %s\n" % (now, message))
def update_time(self):
"""
Updates time
"""
info = self.filename.split('~')
info[0] = time.strftime('%Y%m%d_%H%M')
new_filename = '~'.join(info)
self._rename(new_filename)
@staticmethod
def _isValidFilename(filename):
"""
Determines if the file name is valid
"""
if re.match(r'^[0-9]{8}_[0-9]{4}~.+\.apply_token$', filename):
return True
else:
return False
def _rename(self, new):
"""
Renames filename
"""
old = self.filename
os.rename(old, new)
self.original_filename = old
self.filename = new
def init(args):
"""
Initialize with new token
"""
if args.name is None:
args.name = get_new_file_name("Environment name")
tokens = get_all_tokens()
if tokens.has(args.name):
print "Environment '%s' already exists." % (args.name)
print "Please try other name."
exit(1)
now = time.strftime('%Y%m%d_%H%M')
filename = "%s~%s.apply_token" % (now, args.name)
if os.path.exists(filename):
print "Token %s already exists" % (filename)
exit(1)
file = open(filename, 'w')
file.write("-- [%s] update-query create '%s'\n" % (time.strftime('%Y-%m-%d %H:%M:%S'), args.name))
file.close()
print "New token '%s' was created." % (filename)
def create(args):
"""
Create new file
"""
if args.name is None:
args.name = get_new_file_name("New file name")
now = time.strftime('%Y%m%d_%H%M')
filename = "%s_%s.sql" % (now, args.name)
if os.path.exists(filename):
print "File %s already exists" % (filename)
exit(1)
file = open(filename, 'w')
file.write('')
file.close()
print "New file '%s' was created." % (filename)
print "Please edit it."
def tokens(args):
"""
List up all tokens
"""
tokens = get_all_tokens()
for token in tokens:
print " " + token.get_filename()
def get_new_file_name(question, error_message = None):
"""
fetch new file name via interactive
"""
if error_message:
message = "%s (%s): " % (question, error_message)
else:
message = "%s: " % (question)
new_file_name = raw_input(message)
new_file_name = new_file_name.replace(' ', '_')
if new_file_name[-4:].lower() == '.sql':
new_file_name = get_new_file_name(question = question, error_message = "don't end with .sql")
if re.match(r'^[a-zA-Z0-9_]+$', new_file_name) is None:
new_file_name = get_new_file_name(question = question, error_message = "you can use alphabet, number, space and underscore")
return new_file_name
def apply(args):
"""
Concatenate all update queries together
"""
tokens = get_all_tokens()
if len(tokens) < 1:
print "Sorry, there is no environments."
print ""
print "If you use this first, initialize environment tokens:"
print " $ %s init" % (os.path.basename(__file__))
exit(1)
if args.name is None:
print "We know these environments:"
for token in tokens:
print " * " + token.get_environment_name()
args.name = fetch_environment_name()
if tokens.has(args.name) is False:
print "Sorry, such an environment not found: %s" % (args.name)
exit(1)
selected_token = tokens.find(args.name)
candidates = glob.glob('*.sql')
candidates.sort()
candidates = [candidate for candidate in candidates if re.match(r'^[0-9]{8}_[0-9]{4}_.+\.sql$', candidate)]
apply_query_files = []
for candidate in candidates:
candiadte_datetime = ''.join(candidate.split('_', 2)[0:2])
if candiadte_datetime > selected_token.get_datetime():
apply_query_files.append(candidate)
if len(apply_query_files) < 1:
print "There is no update for %s" % (args.name)
exit()
contents = []
for filename in apply_query_files:
file = open(filename, 'r')
content = "-- %(filename)s\n%(contents)s" % {'filename': filename, 'contents': file.read().strip()}
file.close()
contents.append(content)
patch_contents = "\n\n".join(contents)
patch_filename = 'patch.%s.%s.sql' % (args.name, time.strftime('%Y%m%d_%H%M'))
if os.path.exists(patch_filename):
print "Patch file already exists: %s" % (patch_filename)
exit(1)
for filename in apply_query_files:
selected_token.log('update-query cat "%s" >> "%s"' % (filename, patch_filename))
selected_token.update_time()
original_filename = selected_token.get_original_filename()
new_filename = selected_token.get_filename()
selected_token.log('update-query mv %s %s' % (original_filename, new_filename))
print "Token renamed %s -> %s" % (original_filename, new_filename)
file = open(patch_filename, 'w')
file.write(patch_contents)
file.close()
print "------------------------------"
print "Patch file was created!"
print ""
print " * " + patch_filename
print ""
print "Please apply the patch update queries in above file to the SQL server."
print "After you apply, please delete patch file from here."
def get_all_tokens():
"""
Returns all tokens
"""
tokens = TokenCollection()
for name in glob.glob('*.apply_token'):
tokens.append(Token(name))
return tokens
def fetch_environment_name():
"""
Fetch environment name via interactive
"""
environment_name = raw_input("Environment name: ")
if environment_name == '':
environment_name = fetch_environment_name()
return environment_name
def main():
"""
Main
"""
parser = argparse.ArgumentParser(description='Update query management tool')
subparsers = parser.add_subparsers(title='commands', metavar='command')
parser_init = subparsers.add_parser('init', help='initialize with new token')
parser_init.set_defaults(func=init)
parser_init.add_argument('name', type=str, nargs='?', help='environment name', default=None)
parser_create = subparsers.add_parser('create', help='create new update query')
parser_create.set_defaults(func=create)
parser_create.add_argument('name', type=str, nargs='?', help='new file name', default=None)
parser_apply = subparsers.add_parser('apply', help='concatenate update queries together and create a patch')
parser_apply.set_defaults(func=apply)
parser_apply.add_argument('name', type=str, nargs='?', help='environment name to apply update queries', default=None)
parser_tokens = subparsers.add_parser('tokens', help='list up all tokens')
parser_tokens.set_defaults(func=tokens)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 23.721264 | 126 | 0.67983 |
4a19d9e84d3e4cc3dace7c2dcfd9df6cacf5db38
| 174 |
py
|
Python
|
dovesoft/__init__.py
|
srahul07/dovesoft
|
aabba703009af331f3d04c550693bd706400957b
|
[
"MIT"
] | null | null | null |
dovesoft/__init__.py
|
srahul07/dovesoft
|
aabba703009af331f3d04c550693bd706400957b
|
[
"MIT"
] | 3 |
2021-03-20T00:43:11.000Z
|
2022-01-06T22:33:23.000Z
|
dovesoft/__init__.py
|
srahul07/dovesoft
|
aabba703009af331f3d04c550693bd706400957b
|
[
"MIT"
] | null | null | null |
from .version import __version__
from .dovesoft import DoveSoftClient
from .exceptions import DoveSoftError, DoveSoftClientError, DoveSoftServerError
from .message import Sms
| 43.5 | 79 | 0.867816 |
4a19d9f233c6a2f5ab82b85f33c46904666111fd
| 5,947 |
py
|
Python
|
xinmei/tools/tensorflow_scripts/list_kernels/print_kernel_src_list.py
|
lh-ycx/tensorflow
|
fbb024db725f462de6a991541fed568ad4f75eb5
|
[
"Apache-2.0"
] | null | null | null |
xinmei/tools/tensorflow_scripts/list_kernels/print_kernel_src_list.py
|
lh-ycx/tensorflow
|
fbb024db725f462de6a991541fed568ad4f75eb5
|
[
"Apache-2.0"
] | null | null | null |
xinmei/tools/tensorflow_scripts/list_kernels/print_kernel_src_list.py
|
lh-ycx/tensorflow
|
fbb024db725f462de6a991541fed568ad4f75eb5
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
import click
from tensorflow.python.tools.selective_registration_header_lib import get_header_from_ops_and_kernels, \
get_ops_and_kernels
class IncScanner:
inc_reg = re.compile(r'^\s*#\s*include\s*["<](tensorflow/core/kernels/)?(?P<name>[^/]+)[>"].*$')
special_cases = {
'gather_nd_op_cpu_impl.h': 'gather_nd_op_cpu_impl_',
'mirror_pad_op_cpu_impl.h': 'mirror_pad_op_cpu_impl_',
'scatter_nd_op_cpu_impl.h': 'scatter_nd_op_cpu_impl_',
'slice_op_cpu_impl.h': 'slice_op_cpu_impl_',
'strided_slice_op_impl.h': 'strided_slice_op_inst_',
'split_op.cc': 'split_lib_cpu.cc',
'tile_ops.cc': 'tile_ops_cpu',
}
name_suffix = {'h', 'cc'}
name_reg = re.compile(f'(?P<name>[^.]+)\\.(?P<suffix>[^.]+)')
black_list = {'ops_util.cc'}
def __init__(self, tf_dir):
self._inc = None
self._tf_dir = tf_dir
self._tf_kernel_dir = None
def _add(self, name):
if self._inc is None:
self._inc = {name}
else:
self._inc.add(name)
@property
def _kernel_dir(self):
if self._tf_kernel_dir is None:
self._tf_kernel_dir = os.path.join(self._tf_dir, 'tensorflow', 'core', 'kernels')
return self._tf_kernel_dir
def _contains(self, name):
return self._inc is not None and name in self._inc
@property
def all(self):
if self._inc is None:
return
for name in self._inc:
yield name
def clear(self):
self._inc = None
def scan_all_suffix(self, name):
for suffix in IncScanner.name_suffix:
self.scan(f'{name}.{suffix}')
def scan(self, name):
# check dup
if self._contains(name) or name in IncScanner.black_list:
return
# check exist
path = os.path.join(self._kernel_dir, name)
if not os.path.isfile(path):
return
# add self
self._add(name)
# special case
if name in IncScanner.special_cases:
for f in os.listdir(self._kernel_dir):
if f.startswith(IncScanner.special_cases[name]):
self.scan(f)
# header <-> source
name_match = IncScanner.name_reg.match(name)
if name_match:
for suffix in IncScanner.name_suffix:
if suffix == name_match.group('suffix'):
continue
self.scan(f'{name_match.group("name")}.{suffix}')
if name_match.group('suffix') == 'h':
self.scan_all_suffix(f'{name_match.group("name")}_cpu')
if name_match.group('name').endswith('_op'):
self.scan_all_suffix(f'{name_match.group("name")[:-3]}_functor')
# find include statement
with open(path) as fp:
for line in fp:
match = IncScanner.inc_reg.match(line)
if not match:
continue
self.scan(match.group('name'))
@click.command()
@click.option('--all_ops', type=click.Path(), default=os.path.join(os.path.dirname(__file__), 'all_ops'))
@click.option('--default_op', type=str, default='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp,_Arg:ArgOp,_Retval:RetvalOp')
@click.option('--graph', type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True), multiple=True)
@click.option('--header_out', type=click.File(mode='w'), default=sys.stdout)
@click.option('--kernels_out', type=click.File(mode='w'), default=sys.stderr)
@click.option('--tf_dir', type=click.Path(exists=True, dir_okay=True, file_okay=False, readable=True), default=None)
def cli(all_ops, default_op, graph, header_out, kernels_out, tf_dir):
op_def = {}
with open(all_ops) as fp:
for op_line in fp: # type: str
op_line_parts = op_line.split('"')
if len(op_line_parts) != 3:
continue
stripped_class_name = op_line_parts[0].replace(' ', '')
src = op_line_parts[1]
op_def[stripped_class_name] = src
ops_and_kernels = get_ops_and_kernels('rawproto', graph, default_op)
header_content = get_header_from_ops_and_kernels(ops_and_kernels, False)
header_out.write(header_content)
src_dict = {}
inc_dict = {}
inc_scanner = None if tf_dir is None else IncScanner(tf_dir)
for op, kernel in ops_and_kernels:
stripped_class_name = kernel.replace(' ', '')
src = op_def[stripped_class_name] # type: str
last_sep = src.rfind('/')
src_name = f'//{src[:last_sep]}:{src[last_sep+1:]}'
if src_name in src_dict:
op_set, class_name_set = src_dict[src_name]
op_set.add(op)
class_name_set.add(stripped_class_name)
else:
src_dict[src_name] = ({op}, {stripped_class_name})
if inc_scanner is not None:
inc_scanner.clear()
inc_scanner.scan(os.path.basename(src))
for inc in inc_scanner.all:
if inc in inc_dict:
inc_dict[inc].add(stripped_class_name)
else:
inc_dict[inc] = {stripped_class_name}
src_list = list(src_dict.items())
src_list.sort(key=lambda _: _[0])
avoid_dup_set = set()
for src_name, (op_set, class_name_set) in src_list:
if src_name in avoid_dup_set:
continue
avoid_dup_set.add(src_name)
print(f'"{src_name}", # Op"{",".join(op_set)}", Class"{",".join(class_name_set)}"', file=kernels_out)
inc_list = list(inc_dict.items())
inc_list.sort(key=lambda _: _[0])
for inc, clz in inc_list:
src_name = f'//tensorflow/core/kernels:{inc}'
if src_name in avoid_dup_set:
continue
avoid_dup_set.add(src_name)
print(f'"{src_name}", # dep by: {",".join(clz)}', file=kernels_out)
if __name__ == '__main__':
cli()
| 34.982353 | 116 | 0.604843 |
4a19db7ccdd4c25d66ea1299e07f2c3d9c7e0e6b
| 424 |
py
|
Python
|
src/fast_solution_using_deque.py
|
karianjahi/advent_of_code
|
16939cc7c475465c35d8750328b9b7aef60fc4d6
|
[
"MIT"
] | null | null | null |
src/fast_solution_using_deque.py
|
karianjahi/advent_of_code
|
16939cc7c475465c35d8750328b9b7aef60fc4d6
|
[
"MIT"
] | null | null | null |
src/fast_solution_using_deque.py
|
karianjahi/advent_of_code
|
16939cc7c475465c35d8750328b9b7aef60fc4d6
|
[
"MIT"
] | null | null | null |
from collections import deque
def count_number_of_fish_after_n_days(laternfish_list, no_of_days):
# init counts
basket = deque([0] * 9)
for laternfish in laternfish_list:
basket[laternfish] += 1
# run through days
for each_day in range(no_of_days):
basket[7] += basket[0]
basket.rotate(-1)
return sum(basket)
print(count_number_of_fish_after_n_days([3, 4, 3, 1, 2], 256))
| 24.941176 | 67 | 0.676887 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.