repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
guneysus/packathon2016
|
packathon2016/__init__.py
|
Python
|
bsd-3-clause
| 85 | 0 |
#!/usr/bin/env pyt
|
hon
# coding=utf-8
__author__ = u'Ahmed Şeref GÜNEYSU'
import
|
ui
|
sanaldavis/Google-Python-Exercies
|
basic/mimic.py
|
Python
|
apache-2.0
| 2,511 | 0.010753 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
word=open(filename).read().split()
mimic_dict={}
prev=''
for words in word:
if not prev in mimic_dict:
mimic_dict[prev]=[words]
else:
mimic_dict[prev].append(words)
prev=words
return mimic_dict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
for i in range(200):
print word,
nexts=mimic_dict.get(word)
if not nexts:
nexts=mim
|
ic_dict['']
word=random.choice(nexts)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print dict
print_mimic(dict, '')
if __name__
|
== '__main__':
main()
|
retrography/scancode-toolkit
|
tests/cluecode/data/ics/markdown-markdown-extensions/html_tidy.py
|
Python
|
apache-2.0
| 225 | 0.031111 |
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/qui
|
ckref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.or
|
g/licenses/bsd-license.php)
|
mogoweb/webkit_for_android5.1
|
webkit/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py
|
Python
|
apache-2.0
| 1,879 | 0.001597 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permissi
|
on.
#
# THIS
|
SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class PostDiffForCommit(AbstractStep):
def run(self, state):
self._tool.bugs.add_patch_to_bug(
state["bug_id"],
self.cached_lookup(state, "diff"),
"Patch for landing",
mark_for_review=False,
mark_for_landing=True)
|
willharris/django
|
tests/template_backends/test_utils.py
|
Python
|
bsd-3-clause
| 1,198 | 0 |
from django.core.exceptions import ImproperlyConfigured
from django.template import engines
from django.test import SimpleTestCase, override_settings
class TemplateStringsTests(SimpleTestCase):
@override_settings(TEMPLATES=[{
'BACKEND': 'raise.import.error',
}])
def test_backend_import_error(self):
"""
Failing to import a backend keeps raising the original import error.
Regression test for #24265.
"""
with self.assertRaises(ImportError):
engines.all()
|
with self.assertRaises(ImportError):
engines.all()
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Incorrect: APP_DIRS and loaders are mutually incompatible.
'APP_DIRS': True,
'OPTIONS': {'loaders': []},
}])
def test_backend_
|
improperly_configured(self):
"""
Failing to initialize a backend keeps raising the original exception.
Regression test for #24265.
"""
with self.assertRaises(ImproperlyConfigured):
engines.all()
with self.assertRaises(ImproperlyConfigured):
engines.all()
|
ptroja/spark2014
|
testsuite/gnatprove/tests/O824-010__flow_no_computed_calls_for_ll_subprogram/test.py
|
Python
|
gpl-3.0
| 199 | 0.015075 |
from test_support impo
|
rt *
import re, os
do_flow()
hand = open(os.path.join("gnatprove", "main.ali"))
for line in hand :
line = line.rstrip()
if re.search('^F ', line
|
) :
print line
|
tmaiwald/OSIM
|
OSIM/Modeling/Components/NPN_Vertical_Bipolar_Intercompany_Model/VBIC_Currents/IBC.py
|
Python
|
bsd-2-clause
| 4,274 | 0.007253 |
import numpy as np
from numba import jit
import OSIM.Simulation.Utils as u
from OSIM.Modeling.AbstractComponents.NonlinearComponent import NonlinearComponent
class IBC(NonlinearComponent):
def __init__(self, nodes, name, value, superComponent, **kwargs):
super(IBC, self).__init__(nodes, name, value, superComponent, **kwargs)
if(self.COMPONENT_PRINT_WARNINGS):
print (name + "VBIC Current IBC-Warning no avalanche effect implemented yet")
self.bi = nodes[0]
self.ci = nodes[1]
#self.ei = nodes[2]
'''
TODO: Defaultwerte anpassen
|
'''
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.IBCI = eval(self.paramDict.get("ibci", "1.5E-18"))
self.IBCN = eval(self.paramDic
|
t.get("ibcn", "1E-15"))
self.NCN = eval(self.paramDict.get("ncn", "1.7"))
self.NCI = eval(self.paramDict.get("nci", "1.05"))
self.AVC1 = eval(self.paramDict.get("avc1", "2.4"))
self.AVC2 = eval(self.paramDict.get("avc2", "11.5"))
self.MC = eval(self.paramDict.get("mc", "0.12"))
self.PC = eval(self.paramDict.get("pc", "0.62"))
self.IS = eval(self.paramDict.get("is", "1e-16"))
self.ISSR = eval(self.paramDict.get("issr", "1"))
self.NF = eval(self.paramDict.get("nf", "1.0"))
self.NR = eval(self.paramDict.get("nr", "1.0"))
self.Udlim = 0.8
def performCalculations(self):
self.current,self.gd = self.getCharacterisitcs()
def getCharacterisitcs(self):
ubi = (self.sys.getSolutionAt(self.bi).real)
uci = (self.sys.getSolutionAt(self.ci).real)
#uei = (self.sys.getSolutionAt(self.ei).real)
ibcn = self.IBCN * (u.exp((ubi - uci), 1 / (self.NCN * self.UT), self.Udlim) - 1.0)
ibci = self.IBCI * (u.exp((ubi - uci), 1 / (self.NCI * self.UT), self.Udlim) - 1.0)
igc = 0 #self.igc(ubi, uci,uei,ibcn + ibci) # fehlt noch
dig = 0 #(self.igc(ubi+0.000001, uei, uci,ibcn + ibci)-igc)/0.000001
return ibcn + ibci - igc , ibcn / (self.NCN * self.UT) + ibci / (self.NCI * self.UT)+dig + self.sys.GMIN
def avalm(self, V, P, M, AV1, AV2):
# aus http://www.designers-guide.org/VBIC/release1.1.5/vbic1.1.5_pseudoCode.html
# Kloosterman/de Graaff weak avalanche model
vl = 0.5 * (np.sqrt((P - V) ** 2 + 0.01) + (P - V))
return AV1 * vl * np.exp(- AV2 * vl ** (M - 1.0))
def igc(self, ubi, uci,uei,ibc):
#TODO: implement !
Itzf = self._ITF(ubi,uei)
Itzr = self._ITR(ubi,uci)
return (Itzf - Itzr - ibc )*self.avalm(ubi-uci,self.PC,self.MC,self.AVC1,self.AVC2)
def reloadParams(self):
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec (variableExpr)
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.IBCI = eval(self.paramDict.get("ibci", "1.5E-18"))
self.IBCN = eval(self.paramDict.get("ibcn", "1E-15"))
self.NCN = eval(self.paramDict.get("ncn", "1.7"))
self.NCI = eval(self.paramDict.get("nci", "1.05"))
self.AVC1 = eval(self.paramDict.get("avc1", "2.4"))
self.AVC2 = eval(self.paramDict.get("avc2", "11.5"))
self.MC = eval(self.paramDict.get("mc", "0.12"))
self.PC = eval(self.paramDict.get("pc", "0.62"))
self.IS = eval(self.paramDict.get("is", "1e-16"))
self.ISSR = eval(self.paramDict.get("issr", "1"))
self.NF = eval(self.paramDict.get("nf", "1.0"))
self.NR = eval(self.paramDict.get("nr", "1.0"))
@jit
def _ITF(self, BI, EI):
if (BI < 1.6):
lim = BI
else:
lim = 1.6
return self.IS * (u.exp(BI - EI, 1 / (self.NF * self.UT), lim) - 1.0)
@jit
def _ITR(self, BI, CI):
if (BI < 1.6):
lim = BI
else:
lim = 1.6
return self.IS * self.ISSR * (u.exp(BI - CI, 1 / (self.NR * self.UT), lim) - 1.0)
|
kiddinn/plaso
|
tests/parsers/winreg_plugins/typedurls.py
|
Python
|
apache-2.0
| 4,083 | 0.00147 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MSIE typed URLs Windows Registry plugin."""
import unittest
from plaso.parsers.winreg_plugins import typedurls
from tests.parsers.winreg_plugins import test_lib
class MsieTypedURLsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the MSIE typed URLs Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = typedurls.TypedURLsPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\'
'TypedURLs')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\TypedPaths')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\'
'TypedURLs')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = typedurls.TypedURLsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_entries = (
'url1: http://cnn.com/ '
'url2: http://twitter.com/ '
'url3: http://linkedin.com/ '
'url4: http://tweetdeck.com/ '
'url5: mozilla '
'url6: http://google.com/ '
'url7: http://controller.shieldbase.local/certsrv/ '
'url8: http://controller.shieldbase.local/ '
'url9: http://www.stark-research-labs.com/ '
'url10: http://www.adobe.com/ '
'url11: http://www.google.com/ '
'url12: http://www.firefox.com/ '
'url13: http://go.microsoft.com/fwlink/?LinkId=69157')
expected_event_values = {
'date_time': '2012-03-12 21:23:53.3077499',
'data_type': 'windows:registry:typedurls',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TypedPathsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the typed paths Windows Registry plugin."""
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\TypedPaths')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = typedurls.TypedURLsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_entries = (
'url1: \\\\controller')
expected_event_values = {
'date_time': '2010-11-10 07:58:15.8116250',
'data_type': 'win
|
dows:registry:typedurls',
'entries': expected_entries,
'key_path'
|
: key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
CarltonShepherd/political-tweet-classifier
|
data/usermapping.py
|
Python
|
gpl-2.0
| 1,522 | 0.032852 |
# Dict of Twitter handles and known political views
data_tuples = {
"jeremycorbyn" : "Labour",
"ken4london" : "Labour",
"Imran_HussainMP" : "Labour",
"GloriaDePiero" : "Labour",
"tom_watson" : "Labour",
"JonAshworth" : "Labour",
"UKLabour" : "Labour",
"RupaHuq" : "Labour",
"heidi_mp" : "Labour",
"Conservatives" : "Tory",
"claire4devizes" : "Tory",
"Davi
|
d_Cameron" : "Tory",
"NickyMorgan01" : "Tory",
"Freeman_George" : "Tory",
"lucyallan" : "Tory",
"edvaizey" : "Tory",
"ChrisWhite_MP" : "Tory",
"BrandonLewis" : "Tory",
"NicolaSturgeon" : "SNP",
"theSNP" : "SNP",
"StewartHosieSNP" : "SNP",
"DougChapmanSNP" : "SNP",
"AngusMacNeilSNP" : "SNP",
"RobertJenrick" : "Tory",
"JulieElliottMP" : "Labour",
"IanMearnsMP" : "Labour",
"
|
SDoughtyMP" : "Labour",
"Keith_Vaz" : "Labour",
"CWhittakerMP" : "Tory",
"Owen_PatersonMP" : "Tory",
"NigelFarage" : "UKIP",
"DouglasCarswell" : "UKIP",
"paulnuttallukip" : "UKIP",
"Steven_Woolfe" : "UKIP",
"RogerHelmerMEP" : "UKIP",
"oflynnmep" : "UKIP",
"rog_ukip" : "UKIP",
"SimonDanczuk" : "Labour",
"WalkerWorcester" : "Tory",
"NickBolesMP" : "Tory",
"tcunninghammp1" : "Labour",
"KateHoeyMP" : "Labour",
"HelenJonesMP" : "Labour",
"SarahChampionMP" : "Labour",
"JustineGreening" : "Tory",
"PeterBoneMP" : "Tory",
"Tim_Aker" : "UKIP",
"JohnBickleyUKIP" : "UKIP",
"SuzanneEvans1" : "UKIP"
}
|
ezequielpereira/Time-Line
|
autopilot/autopilotlib/wrappers/messagedialog.py
|
Python
|
gpl-3.0
| 1,500 | 0.003333 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from autopilotlib.app.logger import Logger
from autop
|
ilotlib.wrappers.wrapper import Wrapper
from autopilotlib.app.constants import TIME_TO_WAIT_FOR_DIALOG_TO_SHOW_IN_MILLISECONDS
wxMessageDialog = wx.MessageDialog
class MessageDialog(wxMessageDialog, Wrapper):
def __init__(self, *args, **kw):
wxMessageDialog.__init__(self, *args, **kw)
def ShowModal(self):
Logger.add_result("MessageDialog opened")
wx.CallLater(TIME_TO_WAIT_FOR_DIALOG_TO_SHOW_IN_MILLI
|
SECONDS,
self._explore, MessageDialog.listener)
super(MessageDialog, self).ShowModal()
@classmethod
def wrap(self, listener):
wx.MessageDialog = MessageDialog
MessageDialog.listener = listener
|
sreenathmenon/billing-dashboard
|
billingdashboard/dashboards/project/cust_invoice/views.py
|
Python
|
apache-2.0
| 1,151 | 0.004344 |
from django.views import generic
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon impor
|
t exceptions
from horizon import forms
from horizon import tabs
from horizon import tables
from billingdashboard.common import get_user_invoices
from billingdashboard.dashboards.project.cust_invoice \
import tables as invoice_table
from astutedashboard.common import get_invoices, get_invoice
class IndexView(tables.DataTableView):
table_class
|
= invoice_table.UserInvoiceListingTable
template_name = 'project/cust_invoice/index.html'
page_title = _("Invoices")
def get_data(self):
return get_user_invoices(self.request, verbose=True)
class UserInvoiceDetailsView(generic.TemplateView):
template_name = 'project/cust_invoice/invoice.html'
def get_context_data(self, **kwargs):
context = super(UserInvoiceDetailsView, self).get_context_data(**kwargs)
id = self.kwargs['invoice_id']
context['invoice'] = get_invoice(self.request, id, verbose=True)
return context
|
hryamzik/ansible
|
lib/ansible/modules/network/nxos/nxos_interface_ospf.py
|
Python
|
gpl-3.0
| 17,076 | 0.001464 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_interface_ospf
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an OSPF interface instance.
description:
- Manages configuration of an OSPF interface instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
- To remove an existing authentication configuration you should use
C(message_digest_key_id=default) plus all other options matching their
existing values.
- C(state=absent) removes the whole OSPF interface configuration.
options:
interface:
description:
- Name of this cisco_interface resource. Valid value is a string.
required: true
ospf:
description:
- Name of the ospf instance.
required: true
area:
description:
- Ospf area associated with this cisco_interface_ospf instance.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer.
required: true
cost:
description:
- The cost associated with this cisco_interface_ospf instance.
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer or the keyword 'default'.
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer or the keyword 'default'.
passive_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets.
type: bool
message_digest:
description:
- Enables or disables the usage of message digest authentication.
type: bool
message_digest_key_id:
description:
- Md5 authentication key-id associated with the ospf instance.
If this is present, message_digest_encryption_type,
message_digest_algorithm_type and message_digest_password are
mandatory. Valid value is an integer and 'default'.
message_digest_algorithm_type:
description:
- Algorithm used for authentication among neighboring routers
within an area. Valid values are 'md5' and 'default'.
choices: ['md5', 'default']
message_digest_encryption_type:
description:
- Specifies the scheme used for encrypting message_digest_password.
Valid values are '3des' or 'cisco_type_7' encryption or 'default'.
choices: ['cisco_type_7','3des', 'default']
message_digest_password:
description:
- Specifies the message_digest password. Valid value is a string.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_interface_ospf:
interface: ethernet1/32
ospf: 1
area: 1
cost: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"]
'''
import re
import struct
import socket
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'passive_interface',
'message_digest'
]
PARAM_TO_COMMAND_KEYMAP = {
'interface': '',
'cost': 'ip ospf cost',
'ospf': 'ip router ospf',
'area': 'ip router ospf',
'hello_interval': 'ip ospf hello-interval',
'dead_interval': 'ip ospf dead-interval',
'passive_interface': 'ip ospf passive-interface',
'message_digest': 'ip ospf authentication message-digest',
'message_digest_key_id': 'ip ospf message-digest-key',
'message_digest_algorithm_type': 'ip ospf message-digest-key',
'message_digest_encryption_type': 'ip ospf message-digest-key',
'message_digest_password': 'ip ospf message-digest-key',
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'\s+{0}\s*$'.format(command), config, re.M)
has_command_val = re.search(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
if command == 'ip router ospf':
value = ''
if has_command_val:
value_list = has_command_val.group('value').split()
if arg == 'ospf':
value = value_list[0]
elif arg == 'area':
value = value_list[2]
value = normalize_area(value, module)
elif command == 'ip ospf message-digest-key':
value = ''
if has_command_val:
value_list = has_command_val.group('value').split()
if arg == 'message_digest_key_id':
value = value_list[0]
elif arg == 'message_digest_algorithm_t
|
ype':
value = value_list[1]
elif arg == 'message_digest_encryption_type':
value = value_list[2]
if value == '3':
value = '3des'
elif value == '7':
value = 'cisco_type_7'
elif arg == 'message_digest_password':
|
value = value_list[3]
elif arg == 'passive_interface':
has_no_command = re.search(r'\s+no\s+{0}\s*$'.format(command), config, re.M)
value = False
if has_command and not has_no_command:
value = True
elif arg in BOOL_PARAMS:
value = bool(has_command)
else:
value = ''
if has_command_val:
value = has_command_val.group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
if module.params['interface'].startswith('loopback') or module.params['interface'].startswith('port-channel'):
parents = ['interface {0}'.format(module.params['interface'])]
else:
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
config = netcfg.get_section(parents)
if 'ospf' in config:
for arg in args:
if arg not in ['interface']:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = value
return new_dict
def get_default_commands(existing, proposed, existing_commands, key, module):
commands = list()
existing_value = existing_commands.get(key)
if key.startswith('ip ospf message-digest-key'):
check = False
for param in ['message_digest_encryption_type',
'message_digest_algorithm_type',
'message_digest_password']:
if existing[param] == proposed[param]:
check = True
if check:
if existing['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif existing['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
|
astagi/twitterene
|
twitterene/tweepy/cgi.py
|
Python
|
gpl-3.0
| 34,465 | 0.002234 |
#! /usr/bin/python2.6
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import urllib
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
if sys.py3kwarning:
filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning)
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pa
|
ss
log = initl
|
og # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs \
instead", PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if
|
1a1a11a/mimircache
|
PyMimircache/cacheReader/csvReader.py
|
Python
|
gpl-3.0
| 8,745 | 0.003316 |
# coding=utf-8
"""
a csv trace reader
Author: Jason Yang <peter.waynechina@gmail.com> 2016/06
"""
import string
from PyMimircache.const import ALLOW_C_MIMIRCACHE, INSTALL_PHASE
from PyMimircache.utils.printing import *
if ALLOW_C_MIMIRCACHE and not INSTALL_PHASE:
import PyMimircache.CMimircache.CacheReader as c_cacheReader
from PyMimircache.cacheReader.abstractReader import AbstractReader
class CsvReader(AbstractReader):
"""
CsvReader class
"""
all = ["read_one_req", "read_complete_req", "lines_dict",
"lines", "read_time_req", "reset", "copy", "get_params"]
def __init__(self, file_loc,
data_type='c',
init_params=None,
block_unit_size=0,
disk_sector_size=0,
open_c_reader=True,
**kwargs):
"""
:param file_loc: location of the file
:param data_type: type of data, can be "l" for int/long, "c" for string
:param init_params: the init_params for opening csv
:param block_unit_size: block size for storage system, 0 when disabled
:param disk_sector_size: size of disk sector
:param open_c_reader: bool for whether open reader in C backend
:param kwargs: not used now
"""
super(CsvReader, self).__init__(file_loc, data_type, block_unit_size, disk_sector_size,
open_c_reader, kwargs.get("lock", None))
assert init_params is not None, "please provide init_param for csvReader"
assert "label" in init_params, "please provide label for csv reader"
self.trace_file = open(file_loc, 'rb')
# self.trace_file = open(file_loc, 'r', encoding='utf-8', errors='ignore')
self.init_params = init_params
self.label_column = init_params['label']
self.time_column = init_params.get("real_time", )
self.size_column = init_params.get("size", )
if self.time_column != -1:
self.support_real_time = True
if self.size_column != -1:
self.support_size = True
if block_unit_size != 0:
assert "size" in init_params, "please provide size_column option to consider request size"
self.header_bool = init_params.get('header', )
self.delimiter = init_params.get('delimiter', ",")
if "delimiter" not in init_params:
INFO("open {} using default delimiter \",\" for CsvReader".format(file_loc))
if self.header_bool:
self.headers = [i.strip(string.whitespace) for i in
self.trace_file.readline().decode().split(self.delimiter)]
# self.trace_file.readline()
if ALLOW_C_MIMIRCACHE and open_c_reader:
self.c_reader = c_cacheReader.setup_reader(file_loc, 'c', data_type=data_type,
block_unit_size=block_unit_size,
disk_sector_size=disk_sector_size,
init_params=init_params)
def read_one_req(self):
"""
read one request, return the lbn/objID
:return:
"""
super().read_one_req()
line = self.trace_file.readline().decode('utf-8', 'ignore')
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
ret = line.split(self.delimiter)[self.label_column - 1].strip()
if self.data_type == 'l':
ret = int(ret)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
ret = ret * self.disk_sector_size // self.block_unit_size
return ret
else:
return None
def read_complete_req(self):
"""
read the complete line, including request and its all related info
:return: a list of all info of the request
"""
super().read_one_req()
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
line_split = line.strip().split(self.delimiter)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
line_split[self.label_column - 1] = line_split[self.label_column - 1] * \
self.disk_sector_size // self.block_unit_size
return line_split
else:
return None
def lines_dict(self):
"""
return a dict with column header->data
note this function does not convert lbn even if disk_sector_size and block_unit_size are set
:return:
"""
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
while line:
line_split = line.split(self.delimiter)
d = {}
if self.header_bool:
|
for i in range(len(self.headers)):
d[self.headers[i]] = line_split[i].str
|
ip(string.whitespace)
else:
for key, value in enumerate(line_split):
d[key] = value
line = self.trace_file.readline()
yield d
# raise StopIteration
def lines(self):
"""
a generator for reading all the information of current request/line
:return: a tuple of current request
"""
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
while line:
line_split = tuple(line.split(self.delimiter))
line = self.trace_file.readline()
yield line_split
# raise StopIteration
def read_time_req(self):
"""
return real_time information for the request in the form of (time, request)
:return:
"""
super().read_one_req()
line = self.trace_file.readline().strip().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
line = line.split(self.delimiter)
try:
time = float(line[self.time_column - 1].strip())
lbn = line[self.label_column - 1].strip()
if self.data_type == 'l':
lbn = int(lbn)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
lbn = lbn * self.disk_sector_size // self.block_unit_size
return time, lbn
except Exception as e:
print("ERROR csvReader reading data: {}, current line: {}".format(e, line))
else:
return None
def skip_n_req(self, n):
"""
skip N requests from current position
:param n: the number of requests to skip
"""
for i in range(n):
self.read_one_req()
def reset(self):
"""
reset reader to initial state
:return:
"""
super().reset()
if self.header_bool:
self.trace_file.readline()
def copy(self, open_c_reader=False):
"""
reader a deep copy of current reader with everything reset to initial state,
the returned reader should not interfere with current reader
:param open_c_reader: whether open_c_reader_or_not, default not open
:return: a copied reader
"""
return CsvReader(self.file_loc, self.data_type, self.init_params,
self.block_unit_size, self.disk_sector_size, open_c_reader, lock=self.lock)
def get_params(self):
"""
return all the parameters for this reader instance in a dictionary
:return: a dictionary containing all parameters
"""
return {
"file_loc": self.file_loc,
|
pepitogithub/PythonScripts
|
crypto.py
|
Python
|
gpl-2.0
| 5,608 | 0.023894 |
import Aplicacion
import Probabilidades as pr
from Menu import *
from Variable import *
from validador import *
#------------------------------------------------
#--------------- TODO ---------------------------
#------------------------------------------------
# 1) Lista de tareas pendientes a implementar.
# 2) Si no te gusta lo sacas :).
#------------------------------------------------
#------------------------------------------------
#------------------------------------------------
class Crypto(Aplicacion.Aplicacion):
"""
FILLME
"""
#-----------------------
#--- inicializacion ----
#-----------------------
def iniciar(self,**args):
#variables de programa
self.probs = pr.Probabilidad()
self.probs.cargarDatos("0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z")
#variables de usuario
self.vars["semilla"] = Variable(0,self.modifSemilla,orden=0)
self.vars["longitudSemilla"] = Variable(16,self.modifLongitudSemilla,orden=1)
#Items del Menu
self.agregarMenu(0,Leaf("Encriptar","",self.encriptar))
self.agregarMenu(1,Leaf("Desencriptar","",self.desencriptar))
self.modifSemilla("semilla")
self.vars["semilla"].valor = self.generarSemilla(self.vars["longitudSem
|
illa"].valor)
#-----------------------
#--- Funciones ---------
#--
|
---------------------
def base36encode(self,number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def encriptar(self):
self.espaciador()
print "Ingrese la clave a encriptar:"
clave = int(validador.ingresar(str),36)
print "Ingrese la semilla a utilizar:"
semilla = int(validador.ingresar(str),36)
print "codigo encriptado: (ANOTATELO)"
print self.doEncriptar(clave,semilla)
self.espaciador()
def doEncriptar(self,clave,semilla):
return self.base36encode(clave + semilla)
def desencriptar(self):
self.espaciador()
print "Ingrese el codigo encriptado:"
criptado = validador.ingresar(str)
print "Ingrese la semilla utilizada:"
semilla = validador.ingresar(str)
self.espaciador()
print "el codigo descencriptado es:"
print self.doDesencriptar(criptado,semilla)
def doDesencriptar(self,criptado,semilla):
return self.base36encode(int(criptado,36) - int(semilla,36))
def generarSemilla(self,longitud):
crypto = ""
for i in range(0,longitud):
crypto += self.probs.generar()
return crypto
#-----------------------
#--- modificadores -----
#-----------------------
# Crear todos los modificadores con esta estructura, y SIEMPRE respetando el encabezado (self,key,*params):
def modifSemilla(self,key,*params):
print "Se genera una nueva Semilla:"
self.vars["semilla"].valor = self.generarSemilla(self.vars["longitudSemilla"].valor)
print self.vars["semilla"].valor
def modifLongitudSemilla(self,key,*params):
print "Ingrese la nueva longitud (entre 5 y 32)"
longitud = validador.ingresar(int,validador.entre,5,32)
self.vars["longitudSemilla"].valor = longitud
self.modifSemilla("semilla")
#-----------------------
#--- otros -------------
#-----------------------
# Funcion opcional. Si se desea mostrar algun tipo de informacion ( o ejecutar algun comando)
# en el menu principal( arriba del nombre de la aplicacion) hay que sobreescribir este metodo.
# Este metodo muestra lo que quieras, la idea es que expliques como se usa el programa.
def ayuda(self):
print "Para encriptar: \n"
print "1) Ingresar la clave que se quiere encriptar."
print "2) Ingresar una semilla. " + self.appNombre+ " " + self.version + " ofrece una semilla generada de forma aleatoria, de ancho configurable. Su uso es opcional."
print "3) (RECOMENDABLE) Guardar el codigo encriptado."
print ""
print "Para desencriptar:\n"
print "1) Ingresar el codigo encriptado."
print "2) Ingresar la semilla utilizada para la encriptacion.\n"
print "3) Se mostrara en pantalla el codigo desencriptado."
# Texto personalizado de salida.
def salirPrint(self):
pass# self.doEncriptar("Hasta la vista Baby")
#esto siempre va.
# tenes que invocar a tu clase principal, los tres primeros parametros son el nombre, version y si es o no con pantalla grafica.
# despues tenes que pasarle todos los parametros que quieras, separados por comas.
if __name__ == '__main__':
a = Crypto("Crypto","1.0.0",False)
a.menuPrincipal()
|
WatchPeopleCode/WatchPeopleCode
|
migrations/versions/177b55430b61_.py
|
Python
|
mit
| 679 | 0.013255 |
"""empty message
Revision ID: 177b55430b61
Revises: None
Create Date: 2015-01-29 22:29:18.963249
"""
# revision identifiers, used by Alembic.
revision = '177b55430b61'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stream',
sa.Column('id', sa.Integer(), nu
|
llable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stream')
### end Alemb
|
ic commands ###
|
jfillmore/hoops
|
tests/models_tests/test_model_language.py
|
Python
|
mit
| 1,525 | 0.001311 |
from tests.models_tests import ModelsTestBase
from test_models.core import Language
from tests import dbhelpe
|
r
from sqlalchemy.exc import IntegrityError
class TestLanguageModel(ModelsTestBase):
def test_00_init(self):
assert Language
def test_01_populate(self):
dbhelper.add(Language(lang='en', name='English'), self.db)
|
dbhelper.add(Language(lang='en-us', name='English US'), self.db)
dbhelper.add(Language(lang='es', name='Espanol'), self.db)
dbhelper.add(Language(lang='fr', name='French'), self.db)
# Duplicate entry
try:
dbhelper.add(Language(lang='fr', name='French'), self.db)
assert False, 'Expected IntegrityError'
except IntegrityError:
pass
assert Language.query.count() == 4
def test_02_repr_method(self):
languages = Language.query.all()
for language in languages:
assert str(language.id) in str(language)
assert language.lang in str(language)
assert language.name in str(language)
def test_04_languages_lang_name_present(self):
for language in Language.query.all():
assert (language.lang is not None and language.lang is not '')
assert (language.name is not None and language.name is not '')
def test_05_languages_unique(self):
assert Language.query.filter(Language.lang == 'en').count() == 1
assert Language.query.filter(Language.lang == 'en').first().name == 'English'
|
naziris/HomeSecPi
|
venv/lib/python2.7/site-packages/gunicorn/workers/gthread.py
|
Python
|
apache-2.0
| 10,785 | 0.000649 |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# design:
# a threaded worker accepts connections in the main loop, accepted
# connections are are added to the thread pool as a connection job. On
# keepalive connections are put back in the loop waiting for an event.
# If no event happen after the keep alive timeout, the connectoin is
# closed.
from collections import deque
from datetime import datetime
import errno
from functools import partial
import os
import operator
import socket
import ssl
import sys
import time
from .. import http
from ..http import wsgi
from .. import util
from . import base
from .. import six
try:
import concurrent.futures as futures
except ImportError:
raise RuntimeError("""
You need 'concurrent' installed to use this worker with this python
version.
""")
try:
from asyncio import selectors
except ImportError:
try:
from trollius import selectors
except ImportError:
raise RuntimeError("""
You need 'trollius' installed to use this worker with this python
version.
""")
class TConn(object):
def __init__(self, cfg, listener, sock, addr):
self.cfg = cfg
self.listener = listener
self.sock = sock
self.addr = addr
self.timeout = None
self.parser = None
# set the socket to non blocking
|
self.sock.setblocking(False)
def init(self):
self.sock.setblocking(True)
if self.parser is None:
# wrap the socket if needed
if self.cfg.is_ssl:
self.sock = ssl.wrap_socket(client, server_side=True,
**self.cfg.ssl_options)
# initialize
|
the parser
self.parser = http.RequestParser(self.cfg, self.sock)
return True
return False
def set_timeout(self):
# set the timeout
self.timeout = time.time() + self.cfg.keepalive
def __lt__(self, other):
return self.timeout < other.timeout
__cmp__ = __lt__
class ThreadWorker(base.Worker):
def __init__(self, *args, **kwargs):
super(ThreadWorker, self).__init__(*args, **kwargs)
self.worker_connections = self.cfg.worker_connections
# initialise the pool
self.tpool = None
self.poller = None
self.futures = deque()
self._keep = deque()
def _wrap_future(self, fs, conn):
fs.conn = conn
self.futures.append(fs)
fs.add_done_callback(self.finish_request)
def init_process(self):
self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
self.poller = selectors.DefaultSelector()
super(ThreadWorker, self).init_process()
def accept(self, listener):
try:
client, addr = listener.accept()
conn = TConn(self.cfg, listener, client, addr)
# wait for the read event to handle the connection
self.poller.register(client, selectors.EVENT_READ,
partial(self.handle_client, conn))
except socket.error as e:
if e.args[0] not in (errno.EAGAIN,
errno.ECONNABORTED, errno.EWOULDBLOCK):
raise
def handle_client(self, conn, client):
# unregister the client from the poller
self.poller.unregister(client)
# submit the connection to a worker
fs = self.tpool.submit(self.handle, conn)
self._wrap_future(fs, conn)
def murder_keepalived(self):
now = time.time()
while True:
try:
# remove the connection from the queue
conn = self._keep.popleft()
except IndexError:
break
delta = conn.timeout - now
if delta > 0:
# add the connection back to the queue
self._keep.appendleft(conn)
break
else:
# remove the socket from the poller
self.poller.unregister(conn.sock)
# close the socket
util.close(conn.sock)
def run(self):
# init listeners, add them to the event loop
for s in self.sockets:
s.setblocking(False)
self.poller.register(s, selectors.EVENT_READ, self.accept)
timeout = self.cfg.timeout or 0.5
while self.alive:
# If our parent changed then we shut down.
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return
# notify the arbiter we are alive
self.notify()
events = self.poller.select(0.2)
for key, mask in events:
callback = key.data
callback(key.fileobj)
# hanle keepalive timeouts
self.murder_keepalived()
# if we more connections than the max number of connections
# accepted on a worker, wait until some complete or exit.
if len(self.futures) >= self.worker_connections:
res = futures.wait(self.futures, timeout=timeout)
if not res:
self.log.info("max requests achieved")
break
# shutdown the pool
self.poller.close()
self.tpool.shutdown(False)
# wait for the workers
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
# if we have still fures running, try to close them
while True:
try:
fs = self.futures.popleft()
except IndexError:
break
sock = fs.conn.sock
# the future is not running, cancel it
if not fs.done() and not fs.running():
fs.cancel()
# make sure we close the sockets after the graceful timeout
util.close(sock)
def finish_request(self, fs):
try:
(keepalive, conn) = fs.result()
# if the connection should be kept alived add it
# to the eventloop and record it
if keepalive:
# flag the socket as non blocked
conn.sock.setblocking(False)
# register the connection
conn.set_timeout()
self._keep.append(conn)
# add the socket to the event loop
self.poller.register(conn.sock, selectors.EVENT_READ,
partial(self.handle_client, conn))
else:
util.close(conn.sock)
except:
# an exception happened, make sure to close the
# socket.
util.close(fs.conn.sock)
finally:
# remove the future from our list
try:
self.futures.remove(fs)
except ValueError:
pass
def handle(self, conn):
if not conn.init():
# connection kept alive
try:
self._keep.remove(conn)
except ValueError:
pass
keepalive = False
req = None
try:
req = six.next(conn.parser)
if not req:
return (False, conn)
# handle the request
keepalive = self.handle_request(req, conn)
if keepalive:
return (keepalive, conn)
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
except StopIteration as e:
self.log.debug("Closing connection. %s", e)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_EOF:
self.log.debug("ssl connection closed")
conn.sock.close()
else:
self.log.debug("Error processing SSL request.")
self.handle_error(req, conn.sock, conn.addr, e)
except socket.error as e:
if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
self.log.exception("Socket
|
rjw57/streamkinect2
|
setup.py
|
Python
|
bsd-2-clause
| 1,148 | 0.016551 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
import streamkinect2.version as meta
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = meta.__project__,
version = meta.__version__,
author = meta.__author__,
author_email = meta.__author_email__,
description = "A simple network streamer for kinect2 data.",
license = "BSD",
keywords = "kinect kinect2 zeroconf bonjour",
url = "https://github.com/rjw57/stramkinect
|
2",
packages=find_packages(exclude='test'),
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
install_requires=[
'blinker',
'enum34',
'lz4',
'numpy',
'pillow',
'pyzmq',
'tornado',
'zeroconf',
],
setup_requires=[
'nose',
],
tests_require=[
|
'coverage'
],
extras_require={
'docs': [ 'sphinx', 'docutils', ],
},
)
|
koyadovic/Dia
|
predictive/systems/statistical/analysis/tools/property.py
|
Python
|
gpl-2.0
| 271 | 0.00369 |
def propertycached(fn):
attr_name = "_cached_" + fn.__name__
|
@property
def _propertycached(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _propertycached
|
|
yuhaozhang/nnjm-global
|
code/train_nnjm_gplus.py
|
Python
|
mit
| 15,583 | 0.005134 |
#!/usr/bin/env python
"""
Train a NNJM (with global context and extended architecture) model.
"""
usage = 'To train NNJM (with global context and extended architecture) using Theano'
import cPickle
import gzip
import os
import sys
import time
import re
import codecs
import argparse
import datetime
import numpy as np
import theano
import theano.tensor as T
# our libs
import model_gplus
import io_vocab
import io_read_ngram
import io_model
from train_util import *
def process_command_line():
"""
Return a 1-tuple: (args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
parser = argparse.ArgumentParser(description=usage) # add description
# positional arguments
parser.add_argument(
'train_file', metavar='train_file', type=str, help='train file')
parser.add_argument(
'valid_file', metavar='valid_file', type=str, help='valid file')
parser.add_argument(
'test_file', metavar='test_file', type=str, help='test file')
parser.add_argument(
'ngram_size', metavar='ngram_size', type=int, help='ngram size')
parser.add_argument('sentence_vector_length',
metavar='sentence_vector_length', type=int, help='sentence vector length')
parser.add_argument(
'vocab_size', metavar='vocab_size', type=int, help='vocab size')
# plus model
parser.add_argument(
'num_section', metavar='num_section', type=int, help='global vector section number')
parser.add_argument(
'vocab_file', metavar='vocab_file', type=str, help='vocab file')
# optional arguments
parser.add_argument('--model_file', dest='model_file', type=str,
default='', help='load model from a file (default=\'\')')
parser.add_argument('--emb_dim', dest='emb_dim', type=int,
default=128, help='embedding dimension (default=128)')
parser.add_argument('--hidden_layers', dest='hidden_layers', type=str,
default='512', help='hidden layers, e.g. 512-512 (default=512)')
parser.add_argument('--learning_rate', dest='learning_rate',
type=float, default=0.1, help='learning rate (default=0.1)')
parser.add_argument('--chunk', dest='chunk', type=int, default=2000,
help='each time consider batch_size*chunk ngrams (default=2000)')
parser.add_argument('--valid_freq', dest='valid_freq',
type=int, default=1000, help='valid freq (default=1000)')
parser.add_argument('--option', dest='opt', type=int, default=0,
help='option: 0 -- predict last word, 1 -- predict middle word (default=0)')
parser.add_argument('--act_func', dest='act_func', type=str, default='relu',
help='non-linear function: \'tanh\' or \'relu\' (default=\'relu\')')
parser.add_argument('--finetune', dest='finetune', type=int, default=1,
help='after training for this number of epoches, start halving learning rate(default: 1)')
parser.add_argument('--n_epochs', dest='n_epochs', type=int, default=5,
help='number of epochs, i.e. how many times to go throught the training data (default: 5)')
# joint model
parser.add_argument('--src_window', dest='src_window', type=int,
default=5, help='src window for joint model (default=5)')
parser.add_argument('--src_lang', dest='src_lang',
type=str, default='', help='src lang (default=\'\')')
parser.add_argument('--tgt_lang', dest='tgt_lang',
type=str, default='', help='tgt_lang (default=\'\')')
# load pretrain model
parser.add_argument('--load_model', dest='load_model_file', type=str, default=None, help='Load model parameters from a pre-trained model')
parser.add_argument('--fix_emb', dest='fix_emb', action='store_true', default=False, help='Use pretrain model and fix the embedding matrix during the training process')
# global non-linearity
parser.add_argument('--global_nonlinear', dest='global_nonlinear', type=int, default=None, help="Add a non-linear layer after the global mean sum")
# remove stopwords
parser.add_argument('--rm_stopwords', dest='stopword_cutoff', type=int, default=-1, help="Remove stopwords from the global sentence vector")
# adaptive section length splitting
parser.add_argument('--ada_split', dest='ada_split', action='store_true', default=False, help="Use adaptive section length splitting")
args = parser.parse_args()
return args
class TrainGlobalPlusModel(TrainModel):
def loadModelParams(self, ngram_size, src_window, model, max_src_sent_length):
self.ngram_size = ngram_size
self.src_window = src_window
self.model = model
self.max_src_sent_length = max_src_sent_length
self.model_param_loaded = True
def loadGlobalModelParams(self, stopword_cutoff):
self.stopword_cutoff = stopword_cutoff
def loadValidSet(self, valid_data_package):
self.valid_set_x, self.valid_set_y, self.valid_set_sm = valid_data_package
self.shared_valid_set_x, self.shared_valid_set_y, self.shared_valid_set_sm = io_read_ngram.shared_dataset(valid_data_package)
self.shared_valid_set_y = T.cast(self.shared_valid_set_y, 'int32')
self.valid_set_loaded = True
def loadTestSet(self, test_data_package):
self.test_set_x, self.test_set_y, self.test_set_sm = test_data_package
self.shared_test_set_x, self.shared_test_set_y, self.shared_test_set_sm = io_read_ngram.shared_dataset(test_data_package)
self.shared_test_set_y = T.cast(sel
|
f.shared_test_set_y, 'int32')
self.test_set_loaded = True
def loadBatchData(self, isInitialLoad=False):
src_lang = self.src_lang
tgt_lang = self.tgt_lang
tgt_vocab_size = self.tgt_vocab_size
ngram_size = self.ngram_size
chunk_size = self.chunk_size
src_window = self.src_window
opt = self.opt
(self.data_x, self.data_y, self.data_sm) =
|
io_read_ngram.get_joint_ngrams_with_src_global_matrix(self.src_f, self.tgt_f, self.align_f, \
max_src_sent_length, tgt_vocab_size, ngram_size, src_window, opt, num_read_lines=chunk_size, stopword_cutoff=self.stopword_cutoff)
if isInitialLoad == False:
assert(type(self.model) == model_gplus.ModelGlobalPlus)
return self.model.updateTrainModelInput(self.data_x, self.data_y, self.data_sm)
def displayFirstNExamples(self, n):
if self.src_window < 0:
return
src_vocab, src_vocab_size = io_vocab.load_vocab(self.src_vocab_file)
tgt_vocab, tgt_vocab_size = io_vocab.load_vocab(self.tgt_vocab_file)
src_inverse_vocab = io_vocab.inverse_vocab(src_vocab)
tgt_inverse_vocab = io_vocab.inverse_vocab(tgt_vocab)
assert(n <= self.chunk_size)
for i in xrange(n):
example_x = self.data_x[i]
example_y = self.data_y[i]
sent_idx = example_x[-1]
src_sent_vector = self.data_sm[sent_idx]
src_sent_length = src_sent_vector[0]
src_sent_vector = src_sent_vector[1:src_sent_length+1]
src_window_vector = example_x[:self.src_window*2 + 1]
tgt_gram_vector = example_x[self.src_window*2 + 1:-1]
src_sent_words = io_vocab.getWordsFromIndeces(src_sent_vector, src_inverse_vocab, self.tgt_vocab_size)
src_window_words = io_vocab.getWordsFromIndeces(src_window_vector, src_inverse_vocab, self.tgt_vocab_size)
tgt_gram_words = io_vocab.getWordsFromIndeces(tgt_gram_vector, tgt_inverse_vocab, 0)
output = ""
count = 0
for w in src_window_words:
count += 1
if count == self.src_window + 1:
output += "[" + w + "] "
else:
output += w + " "
output += "|| "
output += " ".join(tgt_gram_words) + " "
output += "===> " + tgt_inverse_vocab[example_y]
output += " |||| "
|
jjscarafia/CUPS-Cloud-Print
|
ccputils.py
|
Python
|
gpl-3.0
| 10,118 | 0.000297 |
# CUPS Cloudprint - Print via Google Cloud Print
# Copyright (C) 2014 Simon Cadman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os
import logging
import sys
import grp
import base64
import fcntl
import termios
import struct
class Utils(object):
logpath = '/var/log/cups/cloudprint_log'
# Countries where letter sized paper is used, according to:
# http://en.wikipedia.org/wiki/Letter_(paper_size)
_LETTER_COUNTRIES = set(('US', 'CA', 'MX', 'BO', 'CO', 'VE', 'PH', 'CL'))
PROTOCOL_NAME = 'gcp'
GUI = False
PROTOCOL = PROTOCOL_NAME + '://'
OLD_PROTOCOL_NAME = 'cloudprint'
OLD_PROTOCOL = OLD_PROTOCOL_NAME + '://'
_MIMETYPES_JOBTYPES = {'pdf': 'application/pdf',
'other': 'application/octet-stream',
'jpg': 'image/jpeg',
'png': 'image/png'}
@staticmethod
def FixFilePermissions(filename):
filePermissions = True
fileOwnerships = True
currentStat = None
if os.path.exists(filename):
currentStat = os.stat(filename)
if currentStat is None or currentStat.st_mode != 0o100660:
try:
os.chmod(filename, 0o100660)
except Exception:
filePermissions = False
sys.stderr.write(
"DEBUG: Cannot alter " +
filename +
" file permissions\n")
if currentStat is None or currentStat.st_gid != Utils.GetLPID():
try:
os.chown(filename, -1, Utils.GetLPID())
except Exception:
fileOwnerships = False
sys.stderr.write(
"DEBUG: Cannot alter " +
filename +
" file ownership\n")
return filePermissions, fileOwnerships
@staticmethod
def SetupLogging(logpath=None):
returnValue = True
logformat = "%(asctime)s|%(levelname)s|%(message)s"
dateformat = "%Y-%m-%d %H:%M:%S"
if logpath is None:
logpath = Utils.logpath
try:
logging.basicConfig(
filename=logpath,
level=logging.INFO,
format=logformat,
datefmt=dateformat)
Utils.FixFilePermissions(logpath)
except Exception:
logging.basicConfig(
level=logging.INFO,
format=logformat,
datefmt=dateformat)
logging.error("Unable to write to log file " + logpath)
returnValue = False
return returnValue
@staticmethod
def fileIsPDF(filedata):
"""Check if a file is or isnt a PDF
Args:
filename: string, name of the file to check
Returns:
boolean: True = is a PDF, False = not a PDF.
"""
p = subprocess.Popen(["file", '-'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = p.communicate(filedata)[0]
logging.debug("File output was: " + output)
return "PDF document" in output
@staticmethod
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
@staticmethod
def which(program):
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if Utils.is_exe(exe_file):
return exe_file
return None
@staticmethod
def GetLPID(default='lp', alternative='cups', useFiles=True,
blacklistedGroups=None,
useFilesOnly=False):
if blacklistedGroups is None:
blacklistedGroups = ['adm', 'wheel', 'root']
blacklistedGroupIds = []
for group in blacklistedGroups:
try:
blacklistedGroupIds.append(grp.getgrnam(group).gr_gid)
except Exception:
logging.debug("Group " + group + " not found")
if useFiles:
# check files in order
for cupsConfigFile in ['/var/log/cups/access_log',
'/etc/cups/ppd',
'/usr/local/etc/cups/ppd']:
if os.path.exists(cupsConfigFile):
configGid = os.stat(cupsConfigFile).st_gid
if configGid not in blacklistedGroupIds:
return configGid
else:
logging.debug(
"Group " +
str(configGid) +
" excluded as blacklisted")
if useFilesOnly:
return None
# try lp first, then cups
lpgrp = None
try:
lpgrp = grp.getgrnam(default)
except Exception:
try:
lpgrp = grp.getgrnam(alternative)
except Exception:
pass
if lpgrp is None:
return None
else:
return lpgrp.gr_gid
@staticmethod
def ShowVersion(CCPVersion):
if len(sys.argv) == 2 and sys.argv[1] == 'version':
print "CUPS Cloud Print Version " + CCPVersion
sys.exit(0)
return False
@staticmethod
def ReadFile(pathname):
"""Read contents of a file and return content.
Args:
pathname: string, (path)name of file.
Returns:
string: contents of file.
"""
try:
f = open(pathname, 'rb')
s = f.read()
return s
except IOError as e:
print 'ERROR: Error opening %s\n%s', pathname, e
return None
@staticmethod
def WriteFile(file_name, data):
"""Write contents of data to a file_name.
Args:
file_name: string, (path)name of file.
data: string, contents to write to file.
Returns:
boolean: True = success, False = errors.
"""
status = True
try:
f = open(file_name, 'wb')
f.write(data)
f.close()
except IOError:
status
|
= False
return status
@staticmethod
def Base64Encode(data, jobtype):
"""Convert a file to a base64 encoded file.
Args:
pathname: data to base64 encode
jobtype: job type being encoded - pdf, jpg etc
Returns:
string, base64 encoded string.
|
For more info on data urls, see:
http://en.wikipedia.org/wiki/Data_URI_scheme
"""
# Convert binary data to base64 encoded data.
mimetype = Utils._MIMETYPES_JOBTYPES['other']
if jobtype in Utils._MIMETYPES_JOBTYPES:
mimetype = Utils._MIMETYPES_JOBTYPES[jobtype]
header = 'data:%s;base64,' % mimetype
return header + base64.b64encode(data)
@staticmethod
def GetLanguage(locale, cupshelper=None):
newlocale = None
if cupshelper is not None:
newlocale = cupshelper.getServerSetting('DefaultLanguage')
if newlocale is None:
if len(locale) < 1 or locale[0] is None:
return ('en', 'en')
defaultlocale = locale[0]
newlocale = defaultlocale
language = newlocale
if '_' in newlocale:
language = newlocale.split("_")[0]
return (language, newlocale)
@staticmethod
def GetDefaultPaperType(locale):
defaultpapertype = "Letter"
if len(locale.split('_')) > 1
|
Acidity/PyPermissions
|
pypermissions/permission.py
|
Python
|
mit
| 8,509 | 0.004231 |
class Permission(object):
"""This class represents the most basic permission possible. It has any number of segments, but is fully defined by
it's name. It may have wildcards, allowing for easily giving multiple permissions of the same form to users,
especially when the number of permissions is large, infinite, or undetermined. Note: Permissions with different
delimiters and wildcards are treated as the same, so don't use multiple delimiters or wildcards unless you know
completely what you're doing.
"""
def __init__(self, name, description=None, delimiter=".", wildcard="*"):
"""Create a Permission object with the specified name and optional description.
:param name: The string representing the name of the permission. This indicates what the permission grants.
:param description: A human-readable string describing the abilities this permission grants.
:param delimiter: The character to be used as the delimiter for segments. Default: "."
:param wildcard: The character to be used as the wildcard. Default: "*"
:rtype: :py:class`Permission` representing the supplied values.
"""
self.delimiter = delimiter
self.segments = name.split(self.delimiter)
self.description = description
self.wildcard = wildcard
self.state = dict()
@property
def is_wildcard(self):
"""Determines whether the permission is a wildcard permission or a simple permission.
:rtype: True or False
"""
return self.wildcard in self.segments
@property
def is_end_wildcard(self):
"""Returns whether this permission ends in a wildcard. Terminating wildcards are treated differently from other
wildcards, as they may represent an infinite number of segments rather than just the typical single segment.
:rtype: True or False
"""
return self.segments[len(self.segments)-1] == self.wildcard
def grants_permission(self, other_permission):
"""Checks whether this permission grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
if other_permission is None:
return True
if isinstance(other_permission, basestring):
other_permission = Permission(name=other_permission)
if len(self.segments) < len(other_permission.segments) and not self.is_end_wildcard:
return False
if len(self.segments) > len(other_permission.segments):
return False
for s, o in zip(self.segments, other_permission.segments):
if s != o and s != self.wildcard:
return False
return True
def grants_any_permission(self, permission_set):
"""Checks whether this permission grants access to any permission in the supplied set.
:param permission_set: The set of Permissions that we are checking
:rtype: True or False
"""
return any(self.grants_permission(perm) for perm in permission_set)
@property
def name(self):
"""Returns the name of this permission.
:rtype: :py:class:`str`
"""
return self.delimiter.join(self.segments)
def __eq__(self, other):
if not hasattr(other, "name"):
return False
if not self.description:
return self.name == other.name
if not hasattr(other, "description"):
return False
return self.name == other.name and self.description == other.description
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "{cls}({name}, {desc})".format(cls=self.__class__.__name__, name=self.name, desc=self.description)
def __hash__(self):
return 17 * self.name.__hash__() + 19 * self.description.__hash__()
@staticmethod
def meets_requirements(permission, **kwargs):
if permission:
return True
return False
class DynamicPermission(Permission):
"""Dynamic permissions are used for cases where you want to grant permissions that require state. These permissions
require additional information in order to be evaluated (such as access to a database). This class serves as the
base for dynamic permissions."""
# The list of templates that this dynamic permission uses to match other permissions.
templates = []
def grants_permission(self, other_permission):
"""Checks whether this permission grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
other = other_permission.name if hasattr(other_permission, 'name') else other_permission
if other == self.name:
return True
for template in self.templates:
matches, m = template.matches_format(other_permission)
if matches:
return self._grants_permission(m, template)
return False
def _grants_permission(self, components, template):
"""This method is where you define the stateful logic of your dynamic permission. Only permissions that match
the formats you specified with your templates will reach this code, and only the wildcard portion of the
template is returned. The template is supplied so that you know how to parse the components.
:param components: A :py:class:`list` containing the portions of the other permission that matched the template
:param template: The :py:class:`PermissionTemplate` that matched the permission.
:rtype: T
|
rue or False
"""
raise NotImplementedError()
|
def create_stateful_permission(self, state):
if self.state:
raise Exception("You cannot create a stateful permission from a stateful permission")
new_perm = self.__class__(name=self.name, description=self.description,
delimiter=self.delimiter, wildcard=self.wildcard)
new_perm.state = state
return new_perm
class PermissionSet(set):
def grants_permission(self, other_permission):
"""Checks whether this permission set has a permission that grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
return any(perm.grants_permission(other_permission) for perm in self)
def grants_any_permission(self, permission_set):
"""Checks whether this permission set has any permission that grants access to any permission in the supplied
set.
:param permission_set: The set of Permissions that we are checking
:rtype: True or False
"""
"""O(n^2) :( Can be done faster."""
return any(self.grants_permission(perm) for perm in permission_set)
def has_any_permission(self, other_permission):
"""Checks whether any permission in this permission set is of the form other_permission. Strictly speaking, this
checks whether any permission in the set is granted by other_permission.
:param other_permission: The permission whose form we're checking for
:rtype: True or False
"""
if isinstance(other_permission, basestring):
other_permission = Permission(name=other_permission)
return other_permission.grants_any_permission(self)
def statefulize(self, state=None):
"""Returns a new PermissionSet, with all DynamicPermissions having their state set to the provided state.
:param state: The state to be added to the permissions in the set
:type state: :py:class:`dict`
:rtype: :py:class:`PermissionSet`
"""
ret = PermissionSet()
for perm in self:
if hasattr(perm, 'create_sta
|
enj/origin
|
vendor/github.com/heketi/heketi/extras/tools/comparison.py
|
Python
|
apache-2.0
| 5,562 | 0.001079 |
#!/usr/bin/env python
#
# Copyright (c) 2018 The heketi Authors
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
import argparse
import json
import sys
import yaml
DESC = """
Compare outputs of gluster and/or heketi and/or openshift/k8s.
Prints lists of volumes where sources differ.
"""
EXAMPLE= """
Example:
$ python3 comparison.py
--gluster-info gluster-volume-info.txt
--heketi-json heketi-db.json
--pv-yaml openshift-pv-yaml.yaml
"""
def main():
parser = argparse.ArgumentParser(description=DESC, epilog=EXAMPLE)
parser.add_argument(
'--gluster-info', '-g',
help='Path to a file containing gluster volume info')
parser.add_argument(
'--heketi-json', '-j',
help='Path to a file containing Heketi db json export')
parser.add_argument(
'--pv-yaml', '-y',
help='Path to a file containing PV yaml data')
parser.add_argument(
'--skip-ok', '-K', action='store_true',
help='Exclude matching items from output')
parser.add_argument(
'--pending', action='store_true',
help='Show heketi pending status (best effort)')
parser.add_argument(
'--no-header', '-H', action='store_true',
help='Do not print column header')
parser.add_argument(
'--ignore', '-I', action='append',
help='Exlude given volume name (multiple allowed)')
cli = parser.parse_args()
check = []
gvinfo = heketi = pvdata = None
if cli.gluster_info:
check.append('gluster')
gvinfo = parse_gvinfo(cli.gluster_info)
if cli.heketi_json:
check.append('heketi')
heketi = parse_heketi(cli.heketi_json)
if cli.pv_yaml:
check.append('pvs')
pvdata = parse_oshift(cli.pv_yaml)
if not check:
parser.error(
"Must provide: --gluster-info OR --heketi-json OR --pv-yaml")
summary = compile_summary(gvinfo, heketi, pvdata)
for ign in (cli.ignore or []):
if summary.pop(ign, None):
sys.stderr.write('ignoring: {}\n'.format(ign))
compare(summary, check, cli.skip_ok,
header=(not cli.no_header),
show_pending=(cli.pending))
return
def parse_heketi(h_json):
with open(h_json) as fh:
return json.load(fh)
def parse_oshift(yf):
with open(yf) as fh:
return yaml.safe_load(fh)
def parse_gvlist(gvl):
vols = {}
with open(gvl) as fh:
for line in fh:
vols[line.strip()] = []
return vols
def parse_gvinfo(gvi):
vols = {}
volume = None
with open(gvi) as fh:
for line in fh:
l = line.strip()
if l.startswith("Volume Name:"):
volume = l.split(":", 1)[-1].strip()
vols[volume] = []
if l.startswith('Brick') and l != "Bricks:":
if volume is None:
raise ValueError("Got Brick before volume: %s" % l)
vols[volume].append(l.split(":", 1)[-1].strip())
return vols
def compile_heketi(summary, heketi):
for vid, v in heketi['volumeentries'].items():
n = v['Info']['name']
summary[n] = {'id': vid, 'heketi': True}
if v['Pending']['Id']:
summary[n]['heketi-pending'] = True
def compile_gvinfo(summary, gvinfo):
for vn in gvinfo:
if vn in summary:
summary[vn]['gluster'] = True
else:
summary[vn] = {'gluster': True}
def compile_pvdata(summary, pvdata):
for elem in pvdata['items']:
g = elem.get('spec', {}).get('glusterfs', {})
if not g:
continue
vn = g['path']
if vn in summary:
summary[vn]['pvs'] = True
else:
summary[vn] = {'pvs': True}
def compile_summary(gvinfo, heketi, pvdata):
summary = {}
if heketi:
compile_heketi(summary, heketi)
if gvinfo:
compile_gvinfo(summary, gvinfo)
if pvdata:
compile_pvdata(summary, pvdata)
return summary
def compare(summary, check, skip_ok=False, header=True, show_pending=False):
if header:
_print = Printer(['Volume-Name', 'Match', 'Volume-ID'])
else:
_print = Printer([])
for vn, vs in summary.items():
ok = all(vs.get(c) for c in check)
if ok and skip_ok:
continue
heketi_info = vs.get('id', '')
if show_pending and vs.get('heketi-pending'):
heketi_info += '/pending'
if ok:
_print.line(vn, 'ok', heketi_info)
else:
matches = ','.join(
sorted(k for k in check if vs.get(k)))
_print.line(vn
|
, matches, heketi_info)
class Printer(object):
"""Utility class for printing columns w/ headers
|
."""
def __init__(self, header):
self._did_header = False
self.header = header or []
def line(self, *columns):
if self.header and not self._did_header:
self._print_header(columns)
self._did_header = True
print (' '.join(columns))
def _print_header(self, columns):
parts = []
for idx, hdr in enumerate(self.header):
pad = max(0, len(columns[idx]) - len(hdr))
parts.append('{}{}'.format(hdr, ' ' * pad))
print (' '.join(parts))
if __name__ == '__main__':
main()
|
jtoppins/beaker
|
IntegrationTests/src/bkr/inttest/server/selenium/test_distros.py
|
Python
|
gpl-2.0
| 9,272 | 0.001294 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import xmlrpclib
import requests
from turbogears.database import session
from bkr.inttest.server.selenium import WebDriverTestCase, XmlRpcTestCase
from bkr.inttest.server.webdriver_utils import login, delete_and_confirm
from bkr.inttest import data_setup, with_transaction, get_server_base
from bkr.server.model import Permission, User
def go_to_distro_view(browser, distro):
browser.get(get_server_base() + 'distros/view?id=%s' % distro.id)
class DistroViewTest(WebDriverTestCase):
@with_transaction
def setUp(self):
self.distro = data_setup.create_distro()
self.distro.tags.append(u'SAD')
self.user = data_setup.create_user(password=u'distro')
self.browser = self.get_browser()
def test_can_add_tag_to_distro(self):
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
b.find_element_by_id('tags_tag_text').send_keys('HAPPY')
b.find_element_by_link_text('Add').click()
self.assertEquals(b.find_element_by_class_name('flash').text,
'Added Tag HAPPY')
b.find_element_by_xpath(
'//td[normalize-space(text())="HAPPY"]')
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'WEBUI')
self.assertEquals(activity.action, u'Added')
self.assertEquals(activity.old_value, None)
self.assertEquals(activity.new_value, u'HAPPY')
def test_can_remove_tag_from_distro(self):
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
delete_and_confirm(b, '//td[normalize-space(preceding-sibling::td[1]/text())="SAD"]')
self.assertEquals(b.find_element_by_class_name('flash').text,
'Removed Tag SAD')
b.find_element_by_xpath('//div[@class="tags"]//table[not('
'.//td[normalize-space(text())="SAD"])]')
with session.begin():
session.refresh(self.distro)
self.assert_(u'SAD' not in self.distro.tags)
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'WEBUI')
self.assertEquals(activity.action, u'Removed')
self.assertEquals(activity.old_value, u'SAD')
self.assertEquals(activity.new_value, None)
def test_non_admin_user_cannot_add_tag(self):
b = self.browser
login(b, self.user.user_name, 'distro')
go_to_distro_view(b, self.distro)
b.find_element_by_xpath('//div[@class="tags" and not(.//a)]')
response = requests.get(get_server_base() +
'distros/save_tag?id=%s&tag.text=HAPPY' % self.distro.id)
self.assertEquals(response.status_code, 403)
def test_non_admin_user_cannot_remove_tag(self):
b = self.browser
login(b, self.user.user_name, 'distro')
go_to_distro_view(b, self.distro)
b.find_element_by_xpath('//div[@class="tags" and not(.//a)]')
response = requests.get(get_server_base() +
'distros/tag_remove?id=%s&tag=SAD' % self.distro.id)
self.assertEquals(response.status_code, 403)
# https://bugzilla.redhat.com/show_bug.cgi?id=830940
def test_provision_links_arent_shown_for_expired_trees(self):
with session.begin():
not_expired_tree = data_setup.create_distro_tree(
distro=self.distro, variant=u'Client')
expired_tree = data_setup.create_distro_tree(
distro=self.distro, variant=u'Server')
session.flush()
expired_tree.lab_controller_assocs[:] = []
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
self.assertEquals(b.find_element_by_xpath(
'//table//tr[td[1]/a/text()="%s"]/td[4]'
% not_expired_tree.id).text,
'Provision')
self.assertEquals(b.find_element_by_xpath(
'//table//tr[td[1]/a/text()="%s"]/td[4]'
% expired_tree.id).text,
'')
class DistroExpireXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
self.group = data_setup.create_group()
# grant the group distro_expire permission
self.group.permissions.append(Permission.by_name('distro_expire'))
self.user = data_setup.create_user(password=u'password')
self.group.add_member(self.user)
self.lc = data_setup.create_labcontroller(user=self.user)
self.distro = data_setup.create_distro()
self.distro_tree = data_setup.create_distro_tree(distro=self.distro,
arch='x86_64', lab_controllers=[self.lc])
self.server = self.get_server()
def test_activity_created_with_expire(self):
self.server.auth.login_password(self.user.user_name, u'password')
self.server.distros.expire(self.distro.name, 'CUSTOMSERVICE')
session.expire_all()
with session.begin():
activity = self.distro_tree.activity[0]
self.assertEquals(activity.service, u'CUSTOMSERVICE')
class DistroEditVersionXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
|
self.distro = data_setup.create_distro()
self.server = self.get_server()
# https://bugzilla.redhat.com/show_bug.cgi?id=1173368
def test_empty_version(self):
self.server.auth.login_password(data_setup.ADMIN_USER,
data_setup.ADMIN_PASSWORD)
try:
self.server.dis
|
tros.edit_version(self.distro.name, '')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assertIn('OSMajor cannot be empty', e.faultString)
class DistroTaggingXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
self.distro = data_setup.create_distro()
self.distro.tags.append(u'SAD')
self.user = data_setup.create_user(password=u'distro')
self.server = self.get_server()
def test_can_add_tag_to_distro(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.tag(self.distro.name, 'HAPPY')
with session.begin():
session.refresh(self.distro)
self.assert_(u'HAPPY' in self.distro.tags)
def test_can_remove_tag_from_distro(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.untag(self.distro.name, 'SAD')
with session.begin():
session.refresh(self.distro)
self.assert_(u'SAD' not in self.distro.tags)
def test_non_admin_user_cannot_add_tag(self):
self.server.auth.login_password(self.user.user_name, 'distro')
try:
self.server.distros.tag(self.distro.name, 'HAPPY')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assert_('IdentityFailure' in e.faultString)
def test_non_admin_user_cannot_remove_tag(self):
self.server.auth.login_password(self.user.user_name, 'distro')
try:
self.server.distros.untag(self.distro.name, 'SAD')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assert_('IdentityFailure' in e.faultString)
def test_adding_tag_is_recorded_in_distro_activity(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.ser
|
acrazing/dbapi
|
scripts/join_group.py
|
Python
|
mit
| 1,512 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 acrazing <joking.young@gmail.com>. All rights reserved.
# @since 2018-12-03 00:03:40
import time
from dbapi.DoubanAPI import DoubanAPI
class GroupAPI:
def __init__(self):
self.api = DoubanAPI(flush=False)
self._applied = {}
self._users = {}
def run(self):
self.api.flush()
groups = self.api.group.list_joined_groups()['results']
for group in groups:
self._applied[group['alias']] = True
self.handle_user(self.api.user_alias)
def handle_user(self, user_alias):
self.join_user_groups(user_alias)
users = self.api.people.list_contacts()['results']
for user in users:
if self._users.get(user['alias'], None) is None:
self.handle_user(user['alias'])
self._users[user['alias']] = True
time.sleep(30)
else:
|
print('skip user: %s' % (user['alias']))
def join_user_groups(sel
|
f, user_alias):
groups = self.api.group.list_joined_groups(user_alias)['results']
for group in groups:
if self._applied.get(group['alias'], None) is None:
self.api.group.join_group(group['alias'], 'Hello ~')
self._applied[group['alias']] = True
time.sleep(30)
else:
print('skip group: %s' % (group['alias']))
if __name__ == '__main__':
group = GroupAPI()
group.run()
|
mrkm4ntr/incubator-airflow
|
tests/dags/test_scheduler_dags.py
|
Python
|
apache-2.0
| 1,500 | 0.001333 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DATE = datetime(2016, 1, 1)
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id='test_start_date_scheduling', start_date=datetime.utcnow() + timedelta(days=1))
dag1_task1 = DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_task_start_date_scheduling', start_date=DEFAULT_DATE)
dag2_task1 = DummyOperator(
task_id='dummy1', dag=dag2, owner=
|
'airflow', start_date=DEFAULT_DATE + timedelta(days=3)
)
dag2_task2 = DummyOperator(task_id='dummy2
|
', dag=dag2, owner='airflow')
|
napsternxg/gensim
|
gensim/test/utils.py
|
Python
|
gpl-3.0
| 5,981 | 0.002508 |
#!/usr/bin/env python
# encoding: utf-8
"""Module contains common utilities used in automated code tests for Gensim modules.
Attr
|
ibutes:
-----------
module_path : str
Full path to this module directory.
common_texts : list of list of
|
str
Toy dataset.
common_dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary of toy dataset.
common_corpus : list of list of (int, int)
Corpus of toy dataset.
Examples:
---------
It's easy to keep objects in temporary folder and reuse'em if needed:
.. sourcecode:: pycon
>>> from gensim.models import word2vec
>>> from gensim.test.utils import get_tmpfile, common_texts
>>>
>>> model = word2vec.Word2Vec(common_texts, min_count=1)
>>> temp_path = get_tmpfile('toy_w2v')
>>> model.save(temp_path)
>>>
>>> new_model = word2vec.Word2Vec.load(temp_path)
>>> result = new_model.wv.most_similar("human", topn=1)
Let's print first document in toy dataset and then recreate it using its corpus and dictionary.
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts, common_dictionary, common_corpus
>>> print(common_texts[0])
['human', 'interface', 'computer']
>>> assert common_dictionary.doc2bow(common_texts[0]) == common_corpus[0]
We can find our toy set in test data directory.
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> with open(datapath("testcorpus.txt")) as f:
... texts = [line.strip().split() for line in f]
>>> print(texts[0])
['computer', 'human', 'interface']
If you don't need to keep temporary objects on disk use :func:`~gensim.test.utils.temporary_file`:
.. sourcecode:: pycon
>>> from gensim.test.utils import temporary_file, common_corpus, common_dictionary
>>> from gensim.models import LdaModel
>>>
>>> with temporary_file("temp.txt") as tf:
... lda = LdaModel(common_corpus, id2word=common_dictionary, num_topics=3)
... lda.save(tf)
"""
import contextlib
import tempfile
import os
import shutil
from gensim.corpora import Dictionary
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
def datapath(fname):
"""Get full path for file `fname` in test data directory placed in this module directory.
Usually used to place corpus to test_data directory.
Parameters
----------
fname : str
Name of file.
Returns
-------
str
Full path to `fname` in test_data folder.
Example
-------
Let's get path of test GloVe data file and check if it exits.
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> for document in corpus:
... pass
"""
return os.path.join(module_path, 'test_data', fname)
def get_tmpfile(suffix):
"""Get full path to file `suffix` in temporary folder.
This function doesn't creates file (only generate unique name).
Also, it may return different paths in consecutive calling.
Parameters
----------
suffix : str
Suffix of file.
Returns
-------
str
Path to `suffix` file in temporary folder.
Examples
--------
Using this function we may get path to temporary file and use it, for example, to store temporary model.
.. sourcecode:: pycon
>>> from gensim.models import LsiModel
>>> from gensim.test.utils import get_tmpfile, common_dictionary, common_corpus
>>>
>>> tmp_f = get_tmpfile("toy_lsi_model")
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> model.save(tmp_f)
>>>
>>> loaded_model = LsiModel.load(tmp_f)
"""
return os.path.join(tempfile.gettempdir(), suffix)
@contextlib.contextmanager
def temporary_file(name=""):
"""This context manager creates file `name` in temporary directory and returns its full path.
Temporary directory with included files will deleted at the end of context. Note, it won't create file.
Parameters
----------
name : str
Filename.
Yields
------
str
Path to file `name` in temporary directory.
Examples
--------
This example demonstrates that created temporary directory (and included
files) will deleted at the end of context.
.. sourcecode:: pycon
>>> import os
>>> from gensim.test.utils import temporary_file
>>> with temporary_file("temp.txt") as tf, open(tf, 'w') as outfile:
... outfile.write("my extremely useful information")
... print("Is this file exists? {}".format(os.path.exists(tf)))
... print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this file exists? True
Is this folder exists? True
>>>
>>> print("Is this file exists? {}".format(os.path.exists(tf)))
Is this file exists? False
>>> print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this folder exists? False
"""
# note : when dropping python2.7 support, we can use tempfile.TemporaryDirectory
tmp = tempfile.mkdtemp()
try:
yield os.path.join(tmp, name)
finally:
shutil.rmtree(tmp, ignore_errors=True)
# set up vars used in testing ("Deerwester" from the web tutorial)
common_texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
|
RawEvan/sharephotos
|
site-packages/sinastorage/__init__.py
|
Python
|
gpl-3.0
| 605 | 0.02314 |
from _
|
_future__ import absolute_import
__version__ = "1.1.6"
from .bucket import SCSFile, SCSBucket, SCSError, KeyNotFound
SCSFile, SCSBucket, SCSError, KeyNotFound
__all__ = "SCSFile", "SCSBucket", "SCSError"
class appinfo(object):
def __init__(self,access_key,secret_key,secure):
self.access_key=access_key
self.secret_key=secret_key
self.secure = secure
def getDefau
|
ltAppInfo():
pass
def setDefaultAppInfo(access_key,secret_key,secure=False):
default = appinfo(access_key,secret_key,secure)
global getDefaultAppInfo
getDefaultAppInfo = lambda: default
|
sh01/gonium
|
src/pid_filing.py
|
Python
|
gpl-2.0
| 2,162 | 0.019889 |
#!/usr/bin/env python
#Copyright 2004,2008 Sebastian Hagen
# This file is part of gonium.
#
# gonium is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# gonium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import fcntl
class PidFile:
def __init__(self, filename:bytes=None):
"""Open pid-file."""
if (filename is None):
argv0 = sys.argv[0]
if (isinstance(argv0, str)):
# Get rid of silly unicode names
|
argv0 = argv0.encode()
|
filename = os.path.basename(argv0) + b'.pid'
if (os.path.exists(filename)):
mode = 'r+b'
else:
mode = 'wb'
# The feature allowing for calling open() on bytes filenames was added
# somewhere between CPython 3.0-rc1 and -rc3. This version is written
# for 3.0 final, so using it should be fine.
self.filename = filename
self.file = open(filename, mode)
def lock(self, else_die:bool=False):
"""Acquire lock on pid file; if successful, write our pid to it. If
the optional argument is specified and True, any IOErrors will
be caught and turned into SystemExits."""
try:
fcntl.lockf(self.file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
if (else_die):
print('Our pid-file {0} is already locked, aborting.'.format(self.filename,))
sys.exit(0)
raise
self.file.seek(0)
self.file.write(ascii(os.getpid()).encode('ascii'))
self.file.truncate()
def unlock(self):
"""Release lock on pid file."""
fcntl.lockf(self.file.fileno(), fcntl.LOCK_UN)
|
twitter/pants
|
src/python/pants/scm/git.py
|
Python
|
apache-2.0
| 22,734 | 0.011261 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import binascii
import io
import logging
import os
import traceback
from builtins import bytes, object, open
from contextlib import contextmanager
from pants.scm.scm import Scm
from pants.util.contextutil import pushd
from pants.util.memo import memoized_method
from pants.util.process_handler import subprocess
from pants.util.strutil import ensure_binary, ensure_text
# 40 is Linux's hard-coded limit for total symlinks followed when resolving a path.
MAX_SYMLINKS_IN_REALPATH = 40
GIT_HASH_LENGTH = 20
# Precompute these because ensure_binary is slow and we'll need them a lot
SLASH = ensure_binary('/')
NUL = ensure_binary('\0')
SPACE = ensure_binary(' ')
NEWLINE = ensure_binary('\n')
EMPTY_STRING = ensure_binary("")
logger = logging.getLogger(__name__)
class Git(Scm):
"""An Scm implementation backed by git."""
@classmethod
def detect_worktree(cls, binary='git', subdir=None):
"""Detect the git working tree above cwd and return it; else, return None.
:param string binary: The path to the git binary to use, 'git' by default.
:param string subdir: The path to start searching for a git repo.
:returns: path to the directory where the git working tree is rooted.
:rtype: string
"""
# TODO(John Sirois): This is only used as a factory for a Git instance in
# pants.base.build_environment.get_scm, encapsulate in a true factory method.
cmd = [binary, 'rev-parse', '--show-toplevel']
try:
if subdir:
with pushd(subdir):
process, out = cls._invoke(cmd)
else:
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode, raise_type=Scm.ScmException)
except Scm.ScmException:
return None
return cls._cleanse(out)
@classmethod
def clone(cls, repo_url, dest, binary='git'):
"""Clone the repo at repo_url into dest.
:param string binary: The path to the git binary to use, 'git' by default.
:returns: an instance of this class representing the cloned repo.
:rtype: Git
"""
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
@classmethod
def _invoke(cls, cmd):
"""Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e))
out, _ = process.communicate()
return process, out
@classmethod
def _cleanse(cls, output, errors='strict'):
return output.strip().decode('utf-8', errors=errors)
@classmethod
def _check_result(cls, cmd, result, failure_msg=None, raise_type=Scm.ScmException):
if result != 0:
raise raise_type(failure_msg or '{} failed with exit code {}'.format(' '.join(cmd), result))
def __init__(self, binary='git', gitdir=None, worktree=None, remote=None, branch=None):
"""Creates a git scm proxy that assumes the git repository is in the cwd by default.
binary: The path to the git binary to use, 'git' by default.
gitdir: The path to the repository's git metadata directory (typically '.git').
worktree: The path to the git repository working tree directory (typically '.').
remote: The default remote to use.
branch: The default remote branch to use.
"""
super(Scm, self).__init__()
self._gitcmd = binary
self._worktree = os.path.realpath(worktree or os.getcwd())
self._gitdir = os.path.realpath(gitdir) if gitdir else os.path.join(self._worktree, '.git')
self._remote = remote
self._branch = branch
def current_rev_identifier(self):
return 'HEAD'
@property
def worktree(self):
return self._worktree
@property
def commit_id(self):
return self._check_output(['rev-parse', 'HEAD'], raise_type=Scm.LocalException)
@property
def server_url(self):
git_output = self._check_output(['remote', '--verbose'], raise_type=Scm.LocalException)
def origin_urls():
for line in git_output.splitlines():
name, url, action = line.split()
if name == 'origin' and action == '(push)':
yield url
origins = list(origin_urls())
if len(origins) != 1:
raise Scm.LocalException("Unable to find remote named 'origin' that accepts pushes "
"amongst:\n{}".format(git_output))
return origins[0]
@property
def tag_name(self):
# Calls to git describe can have bad performance on large repos. Be aware
# of the performance hit if you use this property.
tag = self._check_output(['describe', '--tags', '--always'], raise_type=Scm.LocalException)
return None if 'cannot' in tag else tag
@property
def branch_name(self):
branch = self._check_output(['rev-parse', '--abbrev-ref', 'HEAD'],
raise_type=Scm.LocalException)
return None if branch == 'HEAD' else branch
def fix_git_relative_path(self, worktree_path, relative_to):
return os.path.relpath(os.path.join(self._worktree, worktree_path), relative_to)
def changed_files(self, from_commit=None, include_untracked=False, relative_to=None):
relative_to = relative_to or self._worktree
rel_suffix = ['--', relative_to]
uncommitted_changes = self._check_output(['diff', '--name-only', 'HEAD'] + rel_suffix,
raise_type=Scm.LocalException)
files = set(uncommitted_changes.splitlines())
if from_commit:
# Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
# the changes that have occurred on the current branch.
committed_cmd = ['diff', '--name-only', from_commit + '...HEAD'] + rel_suffix
committed_changes = self._check_output(committed_
|
cmd,
raise_type=Scm.LocalException)
files.update(committed_changes.split())
if include_untracked:
untracked_
|
cmd = ['ls-files', '--other', '--exclude-standard', '--full-name'] + rel_suffix
untracked = self._check_output(untracked_cmd,
raise_type=Scm.LocalException)
files.update(untracked.split())
# git will report changed files relative to the worktree: re-relativize to relative_to
return {self.fix_git_relative_path(f, relative_to) for f in files}
def changes_in(self, diffspec, relative_to=None):
relative_to = relative_to or self._worktree
cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', diffspec]
files = self._check_output(cmd, raise_type=Scm.LocalException).split()
return {self.fix_git_relative_path(f.strip(), relative_to) for f in files}
def changelog(self, from_commit=None, files=None):
# We force the log output encoding to be UTF-8 here since the user may have a git config that
# overrides the git UTF-8 default log output encoding.
args = ['log', '--encoding=UTF-8', '--no-merges', '--stat', '--find-renames', '--find-copies']
if from_commit:
args.append(from_commit + '..HEAD')
if files:
args.append('--')
args.extend(files)
# There are various circumstances that can lead to git logs that are not transcodeable to utf-8,
# for example: http://comments.gmane.org/gmane.comp.version-control.git/262685
# Git will not error in these cases and we do not wish to either. Here we direct byte sequences
# that can not be utf-8 decoded to be replaced
|
CCI-MOC/GUI-Backend
|
core/migrations/0045_allow_blank_membership_AND_rename_project_links.py
|
Python
|
apache-2.0
| 583 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0044_cm_d
|
efaults_for_allocation_and_quota'),
]
operations = [
migrations.AlterField(
model_name='machinerequest',
name='new_version_membership',
field=models.ManyToManyField(to='core.Group', blank=True),
),
migrations.AlterModelTable(
name='projectexternall
|
ink',
table='project_links',
),
]
|
rodluger/planetplanet
|
planetplanet/photo/trappist1.py
|
Python
|
gpl-3.0
| 7,549 | 0.015511 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
trappist1.py |github|
---------------------
This module hosts TRAPPIST-1-specific routines.
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/planetplanet/photo/trappist1.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from ..constants import *
from .ppo import Star, Planet, System
from . import theta
import numpy as np
import matplotlib.pyplot as pl
import os
from tqdm import tqdm
__all__ = ['Trappist1']
def Trappist1(sample = Tru
|
e, distance = 12, seed = None, **kwargs):
'''
Returns an instance of :py:obj:`planetplanet.photo.System` for the full
TRAPPIST-1 system. Star and plan
|
et parameters are drawn from their
respective prior distributions, which are based on the observed values
from Gillon et al. (2017), Luger et al. (2017), and
Burgasser & Mamajek (2017). Longitudes of ascending node are
drawn from the :math:`\\theta` distribution derived in the paper.
:param bool sample: Draw a random sample from the full prior? \
If :py:obj:`False`,returns the mean values for all parameters. \
Default :py:obj:`True`
:param float distance: Distance to the system in parsecs. \
Default :py:obj:`12`
:param int seed: Random number generator seed. Default :py:obj:`None`
:param kwargs: Any other :py:obj:`kwargs` to be passed to \
:py:func:`planetplanet.Star`, \
:py:func:`planetplanet.Planet`, and :py:func:`planetplanet.System`.
.. plot::
:align: center
from planetplanet.photo.trappist1 import Trappist1
from planetplanet.constants import MINUTE
import matplotlib.pyplot as pl
import numpy as np
system = Trappist1()
system.compute(np.arange(0, 10, 1 * MINUTE))
system.plot_lightcurve()
pl.show()
'''
# Randomizer seed
if seed is not None:
np.random.seed(seed)
# Account for the uncertainty?
if not sample:
N = lambda mu, sigma: mu
# Fix the inclinations at their mean values
inclinations = [89.65, 89.67, 89.75, 89.86, 89.680, 89.710, 89.80]
else:
N = lambda mu, sigma: mu + sigma * np.random.randn()
# Draw from the joint inclination distribution
PATH = os.path.dirname(os.path.abspath(__file__))
samples = np.loadtxt(os.path.join(PATH, "inclination.dat"))
inclinations = samples[np.random.randint(len(samples))]
# Instantiate the star; radius from Burgasser & Mamajek (2017)
mstar = N(0.0802, 0.0073)
rstar = N(0.121, 0.003)
teff = (N(0.000524, 0.000034)
* LSUN / (4 * np.pi * (rstar * RSUN) ** 2 * SBOLTZ)) ** 0.25
star = Star('A', m = mstar, r = rstar, teff = teff, color = 'k', **kwargs)
# Parameters from Gillon et al. (2017) and Luger et al. (2017)
# Mass for `h` is currently unconstrained, so basing it loosely on
# the mass distribution for `d`, which has a similar radius.
planets = [None for i in range(7)]
names = ['b', 'c', 'd', 'e', 'f', 'g', 'h']
periods = [(1.51087081, 0.60e-6),
(2.4218233, 0.17e-5),
(4.049610, 0.63e-4),
(6.099615, 0.11e-4),
(9.206690, 0.15e-4),
(12.35294, 0.12e-3),
(18.767, 0.004)]
# Transit times, t0 − 2,450,000 (BJD_{TDB})
# These were taken from the Excel source data corresponding
# to Extended Data Figure 4 of Gillon et al. (2017), downloaded from
# http://www.nature.com/nature/journal/v542/n7642/source_data/nature21360-sf4.xlsx
# These are the *last* transit times measured in the discovery paper.
# Note that photodynamical integrations will only be accurate for
# integrations starting close to this time (7670 corresponds to
# 12:00:00 UT October 8, 2016). We will update these ephemerides as
# more TTV data becomes available.
transits = [(7671.52876, 0.00033),
(7670.29869, 0.00035),
(7670.14198, 0.00066),
(7672.5793, 0.0026),
(7671.39279, 0.00072),
(7665.35151, 0.00028),
(7662.55463, 0.00056)]
masses = [(0.85, 0.72),
(1.38, 0.61),
(0.41, 0.27),
(0.62, 0.58),
(0.68, 0.18),
(1.34, 0.88),
(0.4, 0.3)]
depths = [(0.7266, 0.0088),
(0.687, 0.010),
(0.367, 0.017),
(0.519, 0.026),
(0.673, 0.023),
(0.782, 0.027),
(0.346, 0.032)]
# These are approximated from Supplementary Figure 6 in
# Luger et al. (2017). These can certainly be improved with better TTV
# data and more dynamical modeling.
eccentricities = [(0.0005, 0.0001),
(0.004, 0.001),
(0.0004, 0.0003),
(0.007, 0.0005),
(0.009, 0.001),
(0.004, 0.001),
(0.003, 0.001)]
# These we're just going to fix for now. We have no prior
# constraints on them. Let's assume the most optimistic albedos.
albedos = [(0., 0), (0., 0), (0., 0), (0., 0),
(0., 0), (0., 0), (0., 0)]
tnights = [(40., 0), (40., 0), (40., 0), (40., 0),
(40., 0), (40., 0), (40., 0)]
# Colors for plotting
colors = ['firebrick', 'coral', 'gold', 'mediumseagreen', 'turquoise',
'cornflowerblue', 'midnightblue']
# Compute the polar angle scatter
sig_theta = theta.sample()
# Instantiate the planets
for i in range(7):
# Period and time of transit
per = N(*periods[i])
t0 = N(*transits[i])
# Positive mass
m = 0
while m <= 0:
m = N(*masses[i])
# Inclination in range [0, 90]
inc = inclinations[i]
if inc > 90:
inc = 180 - inc
# Longitude of ascending node in degrees
if (i == 0) or (not sample):
Omega = 0
else:
Omega = N(0, sig_theta)
# Longitude of pericenter (uniform over [0-360 deg])
if sample:
w = 360. * np.random.rand()
else:
w = 0.
# Eccentricity
ecc = 1
while (ecc < 0) or (ecc >= 1):
ecc = N(*eccentricities[i])
# Radius from Rp / Rstar
mu = np.sqrt(depths[i][0] / 100)
sig = 0.5 * depths[i][1] / 100 / mu
RpRs = N(mu, sig)
r = RpRs * rstar * RSUN / REARTH
# Albedo, night side temperature, effective temperature
albedo = N(*albedos[i])
tnight = N(*tnights[i])
# Instantiate!
planets[i] = Planet(names[i], m = m, per = per, inc = inc, r = r,
t0 = t0, Omega = Omega, w = w, ecc = ecc,
color = colors[i], tnight = tnight,
albedo = albedo, **kwargs)
# Return the system
system = System(star, distance = distance, *planets, **kwargs)
return system
|
loli/semisupervisedforests
|
sklearn/feature_selection/univariate_selection.py
|
Python
|
bsd-3-clause
| 18,609 | 0 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_feat
|
ures features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Para
|
meters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F =
|
JulienMcJay/eclock
|
windows/kivy/doc/autobuild.py
|
Python
|
gpl-2.0
| 6,361 | 0.008018 |
'''
Script to generate Kivy API from source code.
Code is messy, but working.
Be careful if you change anything in !
'''
ignore_list = (
'kivy._event',
'kivy.factory_registers',
'kivy.graphics.buffer',
'kivy.graphics.vbo',
'kivy.graphics.vertex',
'kivy.lib.osc'
)
import os
import sys
from glob import glob
import kivy
# force loading of kivy modules
import kivy.app
import kivy.metrics
import kivy.atlas
import kivy.context
import kivy.core.audio
import kivy.core.camera
import kivy.core.clipboard
import kivy.core.gl
import kivy.core.image
import kivy.core.spelling
import kivy.core.text
import kivy.core.text.markup
import kivy.core.video
import kivy.core.window
import kivy.ext
import kivy.geometry
import kivy.graphics
import kivy.graphics.shader
import kivy.animation
import kivy.modules.keybinding
import kivy.modules.monitor
import kivy.modules.touchring
import kivy.modules.inspector
import kivy.modules.recorder
import kivy.modules.screen
import kivy.storage
import kivy.storage.dictstore
import kivy.storage.jsonstore
import kivy.storage.redisstore
import kivy.network.urlrequest
import kivy.modules.webdebugger
import kivy.support
import kivy.input.recorder
import kivy.interactive
import kivy.garden
from kivy.factory import Factory
# force loading of all classes from factory
for x in list(Factory.classes.keys())[:]:
getattr(Factory, x)
# Directory of doc
base_dir = os.path.dirname(__file__)
dest_dir = os.path.join(base_dir, 'sources')
examples_framework_dir = os.path.join(base_dir, '..', 'examples', 'framework')
def writefile(filename, data):
global dest_dir
# avoid to rewrite the file if the content didn't change
f = os.path.join(dest_dir, filename)
print('write', filename)
if os.path.exists(f):
with open(f) as fd:
if fd.read() == data:
return
h = open(f, 'w')
h.write(data)
h.close()
# Activate Kivy modules
'''
for k in kivy.kivy_modules.list().keys():
kivy.kivy_modules.import_module(k)
'''
# Search all kivy module
l = [(x, sys.modules[x], os.path.basename(sys.modules[x].__file__).rsplit('.', 1)[0]) for x in sys.modules if x.startswith('kivy') and sys.modules[x]]
# Extract packages from modules
packages = []
modules = {}
api_modules = []
for name, module, filename in l:
if name in ignore_list:
continue
if not any([name.startswith(x) for x in ignore_list]):
api_modules.append(name)
if filename == '__init__':
packages.append(name)
else:
if hasattr(module, '__all__'):
modules[name] = module.__all__
else:
modules[name] = [x for x in dir(module) if not x.startswith('__')]
packages.sort()
# Create index
api_index = \
'''API Reference
-------------
The API reference is a lexicographic list of all the different classes,
methods and features that Kivy offers.
.. toctree::
:maxdepth: 1
'''
api_modules.sort()
for package in api_modules:
api_index += " api-%s.rst\n" % package
writefile('api-index.rst', api_index)
# Create index for all packages
template = \
'''==========================================================================================================
$SUMMARY
==========================================================================================================
$EXAMPLES_REF
.. automodule:: $PACKAGE
:members:
:show-inheritance:
.. toctree::
$EXAMPLES
'''
template_examples = \
'''.. _example-reference%d:
Examples
--------
%s
'''
template_examples_ref = \
'''# :ref:`Jump directly to Examples <example-reference%d>`'''
def extract_summary_line(doc):
if doc is None:
return
for line in doc.split('\n'):
line = line.strip()
# don't take empty line
if len(line) < 1:
continue
# ref mark
if line.startswith('.. _'):
continue
return line
for package in packages:
summary = extract_summary_line(sys.modules[package].__doc__)
if summary is None:
summary = 'NO DOCUMENTATION (package %s)' % package
t = template.replace('$SUMMARY', summary)
t = t.replace('$PACKAGE', package)
t = t.replace('$EXAMPLES_REF', '')
t = t.replace('$EXAMPLES', '')
# search packages
for subpackage in packages:
packagemodule = subpackage.rsplit('.', 1)[0]
if packagemodule != package or len(subpackage.split('.')) <= 2:
continue
t += " api-%s.rst\n" % subpackage
# search modules
m = list(modules.keys())
m.sort(key=lambda x: extract_summary_line(sys.modules[x].__doc__))
for module in m:
packagemodule = module.rsplit('.', 1)[0]
if packagemodule != package:
continue
t += " api-%s.rst\n" % module
writefile('api-%s.rst' % package, t)
# Create index for all module
m = list(modules.keys())
m.sort()
refid = 0
for module in m:
summary = extract_summary_line(sys.modules[module].__doc__)
if summary is None:
summary = 'NO DOCUMENTATION (module %s)' % package
# search examples
example_output = []
example_prefix = module
if module.startswith('kivy.'):
example_prefix = module[5:]
example_prefix = example_prefix.replace('.', '_')
# try to found any example in framework directory
list_examples = glob('%s*.py' % os.path.join(examples_framework_dir, example_prefix))
for x in list_examples:
# extract filename without directory
xb = os.path.basename(x)
# add a section !
example_output.append('File :download:`%s <%s>` ::' % (
xb, os.path.join('..', x)))
# put the file in
with open(x,
|
'r') as fd:
d = fd.read().strip()
d = '\t' + '\n\t'.join(d.split('\n'))
example_output.append(d)
t = template.replace('$SUMMARY', summary)
t = t.replace('$PACKAGE', module)
if len(example_output):
refid += 1
example_output = template_examples % (refid, '\n\n\n'.join(example_output))
t = t.replace('$EXAMPLES_REF', template_examples_ref % refid)
t = t.replace('$EXAMPLES', example_output)
|
else:
t = t.replace('$EXAMPLES_REF', '')
t = t.replace('$EXAMPLES', '')
writefile('api-%s.rst' % module, t)
# Generation finished
print('Generation finished, do make html')
|
googlegenomics/pipelines-api-examples
|
set_vcf_sample_id/set_vcf_sample_id.py
|
Python
|
bsd-3-clause
| 2,263 | 0.011931 |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
# set_vcf_sample_id.py
#
# This script processes a single sample VCF file and replaces the
# sample ID in the header line.
#
# This could be replaced (almost) with a one-line sed script:
#
# sed -e 's/\(^#CHROM\t.*\t\)original$/\1new/' \
#
# What this script adds is a little more control, notably with error
# handling. sed will not report the number of changes, so to determine
# if a change was made, you'd need to make a second pass over the file.
#
# This script reads from stdin and writes to stdout.
#
# Usage:
# python set_vcf_sample_id.py original_id new_id
#
# If the original_id is specified, it will be verified before making the change.
# If the original_id is set to "", verification will be skipped.
import sys
def main():
"""Entry point to the script."""
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s original_id new_id" % sys.argv[0]
sys.exit(1)
original_id = sys.argv[1]
new_id = sys.argv[2]
lines_processed = 0
lines_changed = 0
for line in sys.stdin:
lines_processed = lines_processed + 1
# Only line we care about is the #^CHROM line
if line.startswith('#CHROM\t'):
fields = line.rstrip('\n').split('\t')
# If an "original_id" was specified, verify that is what is in the file
if original_id:
curr_id = fields[-1]
if curr_id != original_id:
print >> sys.stderr, \
|
"ERROR: Current sample ID does not match expected: %s != %s\n" % (
curr_id, original_id)
sys.exit(1)
# Set the new value into the fields array and recreate the line
fi
|
elds[-1] = new_id
line = '\t'.join(fields) + '\n'
lines_changed = lines_changed + 1
# Emit the current line
sys.stdout.write(line)
# Emit some statistics to stderr
print >> sys.stderr, "Total lines: %d" % lines_processed
print >> sys.stderr, "Changed lines: %d" % lines_changed
if lines_changed != 1:
print >> sys.stderr, "ERROR: Changed lines is not 1"
sys.exit(1)
if __name__ == "__main__":
main()
|
cberry777/dd-agent
|
utils/kubernetes/kubeutil.py
|
Python
|
bsd-3-clause
| 8,888 | 0.002025 |
# (C) Datadog, Inc. 2015-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import logging
import os
from urlparse import urljoin
# project
from util import check_yaml
from utils.checkfiles import get_conf_path
from utils.http import retrieve_json
from utils.singleton import Singleton
from utils.dockerutil import DockerUtil
import requests
log = logging.getLogger('collector')
KUBERNETES_CHECK_NAME = 'kubernetes'
class KubeUtil:
__metaclass__ = Singleton
DEFAULT_METHOD = 'http'
MACHINE_INFO_PATH = '/api/v1.3/machine/'
METRICS_PATH = '/api/v1.3/subcontainers/'
PODS_LIST_PATH = '/pods/'
DEFAULT_CADVISOR_PORT = 4194
DEFAULT_KUBELET_PORT = 10255
DEFAULT_MASTER_PORT = 8080
DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod.
CA_CRT_PATH = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'
AUTH_TOKEN_PATH = '/run/secrets/kubernetes.io/serviceaccount/token'
POD_NAME_LABEL = "io.kubernetes.pod.name"
NAMESPACE_LABEL = "io.kubernetes.pod.namespace"
def __init__(self, instance=None):
self.docker_util = DockerUtil()
if instance is None:
try:
config_file_path = get_conf_path(KUBERNETES_CHECK_NAME)
check_config = check_yaml(config_file_path)
instance = check_config['instances'][0]
# kubernetes.yaml was not found
except IOError as ex:
log.error(ex.message)
instance = {}
except Exception:
log.error('Kubernetes configuration file is invalid. '
'Trying connecting to kubelet with default settings anyway...')
instance = {}
self.method = instance.get('method', KubeUtil.DEFAULT_METHOD)
self.host = instance.get("host") or self.docker_util.get_hostname()
self._node_ip = self._node_name = None # lazy evaluation
self.host_name = os.environ.get('HOSTNAME')
self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT)
self.kubelet_port = instance.get('kubelet_port', KubeUtil.DEFAULT_KUBELET_PORT)
self.kubelet_api_url = '%s://%s:%d' % (self.method, self.host, self.kubelet_port)
self.cadvisor_url = '%s://%s:%d' % (self.method, self.host, self.cadvisor_port)
self.kubernetes_api_url = 'https://%s/api/v1' % (os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME)
self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH)
self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH)
self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH)
self.kube_health_url = urljoin(self.kubelet_api_url, 'healthz')
# keep track of the latest k8s event we collected and posted
# default value is 0 but TTL for k8s events is one hour anyways
self.last_event_collection_ts = 0
def get_kube_labels(self, excluded_keys=None):
pods = self.retrieve_pods_list()
return self.extract_kube_labels(pods, excluded_keys=excluded_keys)
def extract_kube_labels(self, pods_list, excluded_keys=None):
"""
Extract labels from a list of pods coming from
the kubelet API.
"""
excluded_keys = excluded_keys or []
kube_labels = defaultdict(list)
pod_items = pods_list.get("items") or []
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
namespace = metadata.get("namespace")
labels = metadata.get("labels")
if name and labels and namespace:
key = "%s/%s" % (namespace, name)
for k, v in labels.iteritems():
if k in excluded_keys:
continue
kube_labels[key].append(u"kube_%s:%s" % (k, v))
return kube_labels
def extract_meta(self, pods_list, field_name):
"""
Exctract fields like `uid` or `name` from the `metadata` section of a
list of pods coming from the kubelet API.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
uids = []
pods = pods_list.get("items") or []
for p in pods:
value = p.get('metadata', {}).get(field_name)
if value is not None:
uids.append(value)
return uids
def retrieve_pods_list(self):
"""
Retrieve the list of pods for this cluster querying the kubelet API.
TODO: the list of pods could be cached with some policy to be decided.
"""
return retrieve_json(self.pods_list_url)
def retrieve_machine_info(self):
"""
Retrieve machine info from Cadvisor.
"""
return retrieve_json(self.machine_info_url)
def retrieve_metrics(self):
"""
Retrieve metrics from Cadvisor.
"""
return retrieve_json(self.metrics_url)
def filter_pods_list(self, pods_list, host_ip):
"""
Filter out (in place) pods that are not running on the given host.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
pod_items = pods_list.get('items') or []
log.debug('Found {} pods to filter'.format(len(pod_items)))
filtered_pods = []
for pod in pod_items:
status = pod.get('status', {})
if status.get('hostIP') == host_ip:
filtered_pods.append(pod)
log.debug('Pods after filtering: {}'.format(len(filtered_pods)))
pods_list['items'] = filtered_pods
return po
|
ds_list
def retrieve_json_auth(self, url, auth_token, timeout=10):
"""
Kubernetes API
|
requires authentication using a token available in
every pod.
We try to verify ssl certificate if available.
"""
verify = self.CA_CRT_PATH if os.path.exists(self.CA_CRT_PATH) else False
log.debug('ssl validation: {}'.format(verify))
headers = {'Authorization': 'Bearer {}'.format(auth_token)}
r = requests.get(url, timeout=timeout, headers=headers, verify=verify)
r.raise_for_status()
return r.json()
def get_node_info(self):
"""
Return the IP address and the hostname of the node where the pod is running.
"""
if None in (self._node_ip, self._node_name):
self._fetch_host_data()
return self._node_ip, self._node_name
def _fetch_host_data(self):
"""
Retrieve host name and IP address from the payload returned by the listing
pods endpoints from kubelet or kubernetes API.
The host IP address is different from the default router for the pod.
"""
try:
pod_items = self.retrieve_pods_list().get("items") or []
except Exception as e:
log.warning("Unable to retrieve pod list %s. Not fetching host data", str(e))
return
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
if name == self.host_name:
status = pod.get('status', {})
spec = pod.get('spec', {})
# if not found, use an empty string - we use None as "not initialized"
self._node_ip = status.get('hostIP', '')
self._node_name = spec.get('nodeName', '')
break
def extract_event_tags(self, event):
"""
Return a list of tags extracted from an event object
"""
tags = []
if 'reason' in event:
tags.append('reason:%s' % event.get('reason', '').lower())
if 'namespace' in event.get('metadata', {}):
tags.append('namespace:%s' % event['metadata']['namespace'])
if 'host' in event.get('source', {}):
tags.append('node_name:%s' % event['source']['host'])
if 'kind' in event.get('
|
OpenWinCon/OpenWinNet
|
web-gui/AP/urls.py
|
Python
|
apache-2.0
| 119 | 0 |
from django.conf.urls impor
|
t url
from . import views
urlpatterns = [
url(r'^
|
$', views.AP_list, name='AP_list'),
]
|
scaphilo/koalixcrm
|
koalixcrm/crm/documents/contract.py
|
Python
|
bsd-3-clause
| 11,821 | 0.003215 |
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.plugin import *
from koalixcrm.crm.contact.phone_address import PhoneAddress
from koalixcrm.crm.contact.email_address import EmailAddress
from koalixcrm.crm.contact.postal_address import PostalAddress
from koalixcrm.crm.documents.invoice import Invoice
from koalixcrm.crm.documents.quote import Quote
from koalixcrm.crm.documents.purchase_order import PurchaseOrder
from koalixcrm.global_support_functions import xstr
from koalixcrm.crm.const.purpose import *
from koalixcrm.crm.documents.invoice import InlineInvoice
from koalixcrm.crm.documents.quote import InlineQuote
from koalixcrm.crm.reporting.generic_project_link import InlineGenericProjectLink
from koalixcrm.crm.exceptions import *
from koalixcrm.djangoUserExtension.models import UserExtension
import koalixcrm.crm.documents.calculations
import koalixcrm.crm.documents.pdf_export
from rest_framework import serializers
class PostalAddressForContract(PostalAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Postal Address For Contracts')
verbose_name_plural = _('Postal Address For Contracts')
def __str__(self):
return xstr(self.prename) + ' ' + xstr(self.name) + ' ' + xstr(self.addressline1)
class PhoneAddressForContract(PhoneAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Phone Address For Contracts')
verbose_name_plural = _('Phone Address For Contracts')
def __str__(self):
return str(self.phone)
class EmailAddressForContract(EmailAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Email Address For Contracts')
verbose_name_plural = _('Email Address For Contracts')
def __str__(self):
return str(self.email)
class ContractPostalAddress(admin.StackedInline):
model = PostalAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('prefix',
'pre_name',
'name',
'address_line_1',
'address_line_2',
'address_line_3',
'address_line_4',
'zip_code',
'town',
'state',
'country',
'purpose'),
}),
)
allow_add = True
class ContractPhoneAddress(admin.TabularInline):
model = PhoneAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('phone', 'purpose',)
}),
)
allow_add = True
class ContractEmailAddress(admin.TabularInline):
model = EmailAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('email',
'purpose',)
}),
)
allow_add = True
class Contract(models.Model):
staff = models.ForeignKey('auth.User',
limit_choices_to={'is_staff': True},
verbose_name=_("Staff"),
related_name="db_relcontractstaff",
blank=True,
null=True)
description = models.TextField(verbose_name=_("Description"))
default_customer = models.ForeignKey("Customer",
verbose_name=_("Default Customer"),
null=True,
blank=True)
default_supplier = models.ForeignKey("Supplier",
verbose_name=_("Default Supplier"),
null=True,
blank=True)
default_currency = models.ForeignKey("Currency",
verbose_name=_("Default Currency"),
blank=False,
null=False)
default_template_set = models.ForeignKey("djangoUserExtension.TemplateSet",
verbose_name=_("Default Template Set"), null=True, blank=True)
date_of_creation = models.DateTimeField(verbose_name=_("Created at"),
auto_now_add=True)
last_modification = models.DateTimeField(verbose_name=_("Last modified"),
auto_now=True)
last_modified_by = models.ForeignKey('auth.User',
limit_choices_to={'is_staff': True},
verbose_name=_("Last modified by"),
related_name="db_contractlstmodified")
class Meta:
app_label = "crm"
verbose_name = _('Contract')
verbose_name_plural = _('Contracts')
def get_template_set(self, calling_model):
if self.default_template_set:
required_template_set = str(type(calling_model).__name__)
return self.default_template_set.get_template_set(required_template_set)
else:
raise TemplateSetMissingInContract("The Contract has no Default Template Set selected")
def create_from_reference(self, calling_model, staff):
staff_user_extension = UserExtension.get_user_extension(staff.id)
self.default_customer = calling_model
self.default_currency = staff_user_extension.defaultCurrency
self.default_template_set = staff_user_extension.defaultTemplateSet
self.last_modified_by = staff
self.staff = staff
self.save()
return self
def create_invoice(self):
invoice = Invoice()
invoice.create_from_reference(self)
return invoice
def create_quote(self):
quote = Quote()
quote.create_from_reference(self)
return quote
def create_purchase_order(self):
purchase_order = PurchaseOrder()
purchase_order.create_from_reference(self)
return purchase_order
def __str__(self):
return _("Contract") + " " + str(self.id)
class OptionContract(admin.ModelAdmin):
list_display = ('id',
'descript
|
ion',
'default_customer',
'default_supplier',
'staff',
'default_currency',
'date_of_creation',
'last_modification',
'last_modified_by')
list_display_links = ('id',)
|
list_filter = ('default_customer',
'default_supplier',
'staff',
'default_currency')
ordering = ('id', )
search_fields = ('id',
'contract')
fieldsets = (
(_('Basics'), {
'fields': ('description',
'default_customer',
'staff',
'default_supplier',
'default_currency',
'default_template_set')
}),
)
inlines = [ContractPostalAddress,
ContractPhoneAddress,
ContractEmailAddress,
InlineQuote,
InlineInvoice,
InlineGenericProjectLink]
pluginProcessor = PluginProcessor()
inlines.extend(pluginProcessor.getPluginAdditions("contractInlines"))
def create_quote(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
|
jhazelwo/python-awscli
|
python2awscli/model/securitygroup.py
|
Python
|
mit
| 6,235 | 0.001123 |
""" -*- coding: utf-8 -*- """
from python2awscli import bin_aws
from python2awscli.error import AWSNotFound, ParseError, AWSDuplicate
from python2awscli import must
class BaseSecurityGroup(object):
def __init__(self, name, region, vpc, description, inbound=None, outbound=None):
"""
:param name: String, name of SG
:param region: String, AWS region
:param vpc: String, IP of the VPC this SG belongs to
:param description: String
:param inbound: List of dicts, IP Permissions that should exist
:param outbound: List of dicts, IP Permissions that should exist
"""
self.id = None
self.name = name
self.region = region
self.vpc = vpc
self.description = description
self.IpPermissions = []
self.IpPermissionsEgress = []
self.owner = None
self.changed = False
try:
self._get()
except AWSNotFound:
self._create()
self._merge_rules(must.be_list(inbound), self.IpPermissions)
self._merge_rules(must.be_list(outbound), self.IpPermissionsEgress, egress=True)
if self.changed:
self._get()
def _break_out(self, existing):
"""
Undo AWS's rule flattening so we can do simple 'if rule in existing' logic later.
:param existing: List of SG rules as dicts.
:return: List of SG rules as dicts.
"""
spool = list()
for rule in existing:
for ip in rule['IpRanges']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = [ip]
copy_of_rule['UserIdGroupPairs'] = []
spool.append(copy_of_rule)
for group in rule['UserIdGroupPairs']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = []
copy_of_rule['UserIdGroupPairs'] = [group]
spool.append(copy_of_rule)
return spool
def _merge_rules(self, requested, active, egress=False):
"""
:param requested: List of dicts, IP Permissions that should exist
:param active: List of dicts, IP Permissions that already exist
:param egress: Bool, addressing outbound rules or not?
:return: Bool
"""
if not isinstance(requested, list):
raise ParseError(
'SecurityGroup {0}, need a list of dicts, instead got "{1}"'.format(self.name, requested))
for rule in requested:
if rule not in active:
self._add_rule(rule, egress)
for active_rule in active:
if active_rule not in requested:
self._rm_rule(active_rule, egress)
return True
def _add_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'authorize-security-group-ingress'
if egress:
direction = 'authorize-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Authorized: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _rm_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'revoke-security-group-ingress'
if egress:
direction = 'revoke-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Revoked: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _create(self):
"""
Create a Security Group
:return:
"""
# AWS grants all new SGs this default outbound rule "This is pro-human & anti-machine behavior."
default_egress = {
'Ipv6Ranges': [],
'PrefixListIds': [],
'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
'UserIdGroupPairs': [], 'IpProtocol': '-1'
}
command = [
'ec2', 'create-security-group',
'--region', self.region,
'--group-name', self.name,
'--description', self.description,
'--vpc-id', self.vpc
]
try:
self.id = bin_aws(command, key='GroupId')
except AWSDuplicate:
return False # OK if it already exists.
print('Created {0}'.format(command)) # TODO: Log(...)
self.IpPermissions = []
self.IpPermissionsEgress = [default_egress]
self.changed = True
return True
def _get(self):
"""
Get information about Security Group from AWS and update self
:return: Bool
"""
command = ['ec2', 'describe-security-groups', '--region', self.region, '--group-names', self.name]
result =
|
bin_aws(command, key='SecurityGroups
|
', max=1) # will raise NotFound if empty
me = result[0]
self.id = me['GroupId']
self.owner = me['OwnerId']
self.IpPermissions = self._break_out(me['IpPermissions'])
self.IpPermissionsEgress = self._break_out(me['IpPermissionsEgress'])
print('Got {0}'.format(command)) # TODO: Log(...)
return True
def _delete(self):
"""
Delete myself by my own id.
As of 20170114 no other methods call me. You must do `foo._delete()`
:return:
"""
command = ['ec2', 'delete-security-group', '--region', self.region,
# '--dry-run',
'--group-id', self.id
]
bin_aws(command, decode_output=False)
print('Deleted {0}'.format(command)) # TODO: Log(...)
return True
|
Tristan79/ComicStreamer
|
comicstreamerlib/server.py
|
Python
|
apache-2.0
| 80,525 | 0.009637 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
import tornado.escape
import tornado.ioloop
import tornado.web
import urllib
import mimetypes
import re
import threading
from urllib2 import quote
from sqlalchemy import desc
from sqlalchemy.orm import joinedload,subqueryload,aliased
from sqlalchemy.sql.expression import func, select
import json
import pprint
import mimetypes
from PIL import Image
try:
from PIL import WebPImagePlugin
except:
pass
import StringIO
import gzip
import dateutil.parser
import logging
import logging.handlers
import imghdr
import random
import signal
import sys
import socket
import webbrowser
import time
from comicapi.comicarchive import *
import csversion
import utils
from database import *
from monitor import Monitor
from config import ComicStreamerConfig
from folders import AppFolders
from options import Options
from bonjour import BonjourThread
from bookmark import Bookmark
from blacklist import Blacklist
from library import Library
# add webp test to imghdr in case it isn't there already
def my_test_webp(h, f):
if h.startswith(b'RIFF') and h[8:12] == b'WEBP':
return 'webp'
imghdr.tests.append(my_test_webp)
# to allow a blank username
def fix_username(username):
return username + "ComicStreamer"
def custom_get_current_user(handler):
user = handler.get_secure_cookie("user")
if user:
user = fix_username(user)
return user
# you can change default root here :-)
def deviceroot(s):
if(re.search('(iPhone|iPod).*', s.request.headers["User-Agent"])):
return "default/"
elif(re.search('(Android).*', s.request.headers["User-Agent"])):
return "default/"
elif(re.search('(iPad).*', s.request.headers["User-Agent"])):
return "default/"
else:
return "default/"
class BaseHandler(tornado.web.RequestHandler):
@property
def webroot(self):
return self.application.webroot
@property
def library(self):
return self.application.library
@property
def port(self):
return self.application.port
def get_current_user(self):
return custom_get_current_user(self)
class GenericAPIHandler(BaseHandler):
def validateAPIKey(self):
if self.application.config['security']['use_api_key']:
api_key = self.get_argument(u"api_key", default="")
if api_key == self.application.config['security']['api_key']:
return True
else:
raise tornado.web.HTTPError(400)
return False
class JSONResultAPIHandler(GenericAPIHandler)
|
:
def setContentType(self):
self.add_header("Content-type","application/json; charset=UTF-8")
def processPagingArgs(self, query):
per_page = self.get_argument(u"per_page", default=None)
offset = self.get_argument(u"offset", default=None)
# offset and max_results should be processed last
total_results
|
= None
if per_page is not None:
total_results = query.distinct().count()
try:
max = 0
max = int(per_page)
if total_results > max:
query = query.limit(max)
except:
pass
if offset is not None:
try:
off = 0
off = int(offset)
query = query.offset(off)
except:
pass
return query, total_results
def processComicQueryArgs(self, query):
def hasValue(obj):
return obj is not None and obj != ""
keyphrase_filter = self.get_argument(u"keyphrase", default=None)
series_filter = self.get_argument(u"series", default=None)
path_filter = self.get_argument(u"path", default=None)
folder_filter = self.get_argument(u"folder", default="")
title_filter = self.get_argument(u"title", default=None)
start_filter = self.get_argument(u"start_date", default=None)
end_filter = self.get_argument(u"end_date", default=None)
added_since = self.get_argument(u"added_since", default=None)
modified_since = self.get_argument(u"modified_since", default=None)
lastread_since = self.get_argument(u"lastread_since", default=None)
order = self.get_argument(u"order", default=None)
character = self.get_argument(u"character", default=None)
team = self.get_argument(u"team", default=None)
location = self.get_argument(u"location", default=None)
storyarc = self.get_argument(u"storyarc", default=None)
alternateseries = self.get_argument(u"alternateseries", default=None)
volume = self.get_argument(u"volume", default=None)
publisher = self.get_argument(u"publisher", default=None)
language = self.get_argument(u"language", default=None)
credit_filter = self.get_argument(u"credit", default=None)
tag = self.get_argument(u"tag", default=None)
genre = self.get_argument(u"genre", default=None)
if folder_filter != "":
folder_filter = os.path.normcase(os.path.normpath(folder_filter))
#print folder_filter
person = None
role = None
if hasValue(credit_filter):
credit_info = credit_filter.split(":")
if len(credit_info[0]) != 0:
person = credit_info[0]
if len(credit_info) > 1:
role = credit_info[1]
if hasValue(person):
query = query.join(Credit).filter(Person.name.ilike(person.replace("*","%"))).filter(Credit.person_id==Person.id)
if role is not None:
query = query.filter(Credit.role_id==Role.id).filter(Role.name.ilike(role.replace("*","%")))
#query = query.filter( Comic.persons.contains(unicode(person).replace("*","%") ))
if hasValue(keyphrase_filter):
keyphrase_filter = unicode(keyphrase_filter).replace("*","%")
keyphrase_filter = "%" + keyphrase_filter + "%"
query = query.filter( Comic.series.ilike(keyphrase_filter)
| Comic.alternateseries_raw.any(AlternateSeries.name.ilike(keyphrase_filter))
| Comic.title.ilike(keyphrase_filter)
| Comic.publisher.ilike(keyphrase_filter)
| Comic.language.ilike(keyphrase_filter)
| Comic.path.ilike(keyphrase_filter)
| Comic.comments.ilike(keyphrase_filter)
| Comic.characters_raw.any(Character.name.ilike(keyphrase_filter))
| Comic.teams_raw.any(Team.name.ilike(keyphrase_filter))
| Comic.generictags_raw.any(GenericTag.name.ilike(keyphrase_filter))
| Comic.locations_raw.any(Location.name.ilike(keyphrase_filter))
| Comic.storyarcs_raw.any(StoryArc.name.ilike(keyphrase_filter))
| Comic.persons_raw.any(Person.name.ilike(keyphrase_filter))
)
def addQueryOnScalar(query, obj_prop, filt):
if hasValue(filt):
filt = unicode(filt).replace("*","%")
return query.filter( obj_prop.ilike(filt))
else:
return query
def addQueryOnList(query, obj_list, list_prop, filt):
if hasValue(filt):
filt = unicode(filt).replace("*","%")
return query.filter( obj_list.any(list_prop.ilike(filt)))
else:
return query
query = addQueryOnScalar(query, Comic.series, series_filter)
query = addQueryOnScalar(query, Comic.title, title_filter)
query = addQueryOnScalar(query, Comic.path, path_filter)
query = addQueryOnScalar(query, Comic.folder, folder_filter)
query = addQueryOnScalar(query, Comic.publisher, publisher)
query = addQue
|
pforret/python-for-android
|
python3-alpha/python-libs/pyxmpp2/sasl/gssapi.py
|
Python
|
apache-2.0
| 3,008 | 0.003324 |
#
# (C) Copyright 2008 Jelmer Vernooij <jelmer@samba.org>
# (C) Copyright 2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""GSSAPI authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 4752 <http://www.ietf.org/rfc/rfc4752.txt>`__
"""
__docformat__ = "restructuredtext en"
import base64
import kerberos
import logging
from .core import ClientAuthenticator, Response, Success
from .core import sasl_mechanism
logger = logging.getLogger("pyxmpp2.sasl.gssapi")
@sasl_mechanism("GSSAPI", 75)
class GSSAPIClientAuthenticator(ClientAuthenticator):
"""Provides client-side GSSAPI SASL (Kerberos 5) authentication."""
def __init__(self, password_manager):
ClientAuthenticator.__init__(self, password_manager)
self.password_manager = password_manager
self
|
.username = None
self._gss = None
self.step = None
self.authzid = None
def start(self, username, authzid):
self.username = username
self.authzid = authzid
_unused, self._gss = kerberos.authGSSClientInit(authzid or
"{0}@{1}".format("xmpp",
self.password_manager.get_serv_host()))
self.step = 0
return self.challenge("")
def challenge(self, challenge):
|
if self.step == 0:
ret = kerberos.authGSSClientStep(self._gss,
base64.b64encode(challenge))
if ret != kerberos.AUTH_GSS_CONTINUE:
self.step = 1
elif self.step == 1:
ret = kerberos.authGSSClientUnwrap(self._gss,
base64.b64encode(challenge))
response = kerberos.authGSSClientResponse(self._gss)
ret = kerberos.authGSSClientWrap(self._gss, response, self.username)
response = kerberos.authGSSClientResponse(self._gss)
if response is None:
return Response("")
else:
return Response(base64.b64decode(response))
def finish(self, data):
self.username = kerberos.authGSSClientUserName(self._gss)
logger.debug("Authenticated as {0!r}".format(
kerberos.authGSSClientUserName(self._gss)))
return Success(self.username, None, self.authzid)
# vi: sts=4 et sw=4
|
sunmont/textclassifier
|
tokenizer.py
|
Python
|
apache-2.0
| 1,809 | 0.007756 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import six
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
class Tokenizer(object):
def __init__(self):
pass
def tokenizer0(self, iter):
for str in iter:
yield TOKENIZER_RE.findall(str)
def tokenizer1(self, iter):
for str in iter:
#tokens = re.sub(r"[^a-z0-9]+", " ", str).split()
tokens = re.sub(r"(?!)[^a-z0-9]+", " ", str).split()
yield tokens
raw_doc = [
" Abbott of Farnham E D Abbott Limited was a British coachbuilding business based in Farnham Surrey trading under that name from 1929. A major part of their output was under sub-contract to motor vehicle manufacturers. Their business closed in 1972."
," Schwan-STABILO is a German maker of pens for writing colouring and cosmetics as well as markers and highlighters for office use. It is the world's largest manufacturer of highlighter pens Stabilo Boss."
" Q-workshop is a Polish company located in Poznań that specializes in designand production of polyhedral dice and dice accessories for use in various games (role-playing gamesboard games and tabletop wargames).
|
They also run an online retail store and maintainan active forum community.Q-workshop was established in 2001 by Patryk Strzelewicz – a student from Poznań. Initiallythe company sold its products via online auction services but in 2005 a website and online store wereestablished."
]
# test
i
|
f __name__ == '__main__':
tokenizer = Tokenizer()
tokenizer_ = tokenizer.tokenizer1
for tokens in tokenizer_(raw_doc):
for token in tokens:
print(token)
|
takluyver/Love
|
love/__main__.py
|
Python
|
mit
| 147 | 0.020408 |
fro
|
m __future__ import absolute_import
from .love import main
import s
|
ys
argv = sys.argv
if len(argv)>=2:
main(argv[1])
else:
main()
|
rachmansenpai/rach-devp
|
chivasbot.py
|
Python
|
gpl-3.0
| 83,839 | 0.010302 |
# -*- coding: utf-8 -*-
import LineAlpha
from LineAlpha.Gen.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob
cl = LineAlpha.LINE()
cl.login(qr=True)
cl.loginResult()
kk = LineAlpha.LINE()
kk.login(qr=True)
kk.loginResult()
ki = LineAlpha.LINE()
ki.login(qr=True)
ki.loginResult()
kc = LineAlpha.LINE()
kc.login(qr=True)
kc.loginResult()
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" Rach Bot
[Id︎]
[Mid]
[Me︎]
[TL︎:「Text」]
[Mc 「mid」]
[K on/off]
[Join︎ on/off]
[Gcancel:︎「Number of people」]
[Group cancelalll︎]
[Leave︎ on/off]
[Add on/off]
[Share on/off]
[Message change:「text」]
[Message check]
[Confirm]
[Jam on/off]
[Change clock:「name」]
[Up]
[Cv join]
[*] Command in the groups [*]
[Curl]
[Ourl]
[url]
[url:「Group ID」]
[Invite:「mid」]
[Kick:「mid」]
[Ginfo]
[Cancel]
[Gn 「group name」]
[Nk 「name」]
[*] Command kicker only [*]
[Bye]
[Kill ban]
[Kill 「@」]
[Ban 「@」] By Tag
[Unban 「@」] By Tag
[Ban︎] Share Contact
[Unban︎] Share Contact
[Banlist︎]
[Cek ban]
[Cv mid]
[Cv ︎invite:「mid」]
[Cv ︎rename:「name」]
[Cv ︎gift]
[Respo︎n]
[Bot cancel]
[Title:]
"""
KAC=[cl,ki,kk,kc]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid]
admin=["u3b2f7586e70571fd1f35b9ba58c91c96"]
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me, Bitch",
"lang":"JP",
"comment":"Thanks for add me, Bitch",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"Pepepepe ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmi
|
d:
|
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.
|
misaki-nyanya/MyPieces
|
kancolle/buildCalc/run.py
|
Python
|
gpl-3.0
| 840 | 0.018519 |
# -*- coding: cp936 -*-
import sys
default_encoding = 'utf-8'
if sy
|
s.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
inf = open("F:/workspace/kancolle/log.txt",'r')
dahe=open("F:/workspace/kancolle/5-13-dahe.txt",'w')
dafeng=open("F:/workspace/kancolle/5-13-dafeng.txt",'w')
bsm=open("F:/workspace/kancolle/5-13-bsm.txt",'w')
maruyu=open("F:/workspace/kancolle/5-13-maruyu.txt",'w')
line = inf.readline()
while line:
if line.find(u'結果:大和')>0:
dahe.write(line)
elif line.find(u'結
|
果:大鳳')>0:
dafeng.write(line)
elif line.find(u'結果:Bismarck')>0:
bsm.write(line)
elif line.find(u'結果:まるゆ')>0:
maruyu.write(line)
line = inf.readline()
inf.close()
dahe.close()
dafeng.close()
bsm.close()
maruyu.close()
|
philanthropy-u/edx-platform
|
lms/djangoapps/onboarding/email_utils.py
|
Python
|
agpl-3.0
| 3,876 | 0.003354 |
import base64
from django.conf import settings
from crum import get_current_request
from openedx.core.lib.request_utils import safe_get_host
from common.lib.mandrill_client.client import MandrillClient
def send_admin_activation_email(first_name, org_id, org_name, claimed_by_name, claimed_by_email, dest_addr, hash_key):
"""
Send an admin activation email.
"""
request = get_current_request()
max_retries = settings.RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS
encoded_org_id = base64.b64encode(str(org_id))
message_context = {
"first_name": first_name,
"key": hash_key.activation_hash,
"org_id": encoded_org_id,
"org_name": org_name,
"referring_user": hash_key.suggested_by.username,
"claimed_by_name": claimed_by_name,
"claimed_by_email": claimed_by_email,
}
admin_activation_link = '{protocol}://{site}/onboarding/admin_activate/{activation_key}?admin_activation=True'.format(
protocol='https' if request.is_secure() else 'http',
site=safe_get_host(request),
org_id=encoded_org_id,
activation_key=hash_key.activation_hash
)
message_context["admin_activation_link"] = admin_activation_link
while max_retries > 0:
try:
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_ACTIVATION_TEMPLATE, dest_addr, message_context)
max_retries = 0
except:
max_retries -= 1
def send_admin_update_email(org_id, org_name, dest_addr, org_admin_name, hash_key, claimed_by_email, claimed_by_name):
"""
Send an email to the admin, that this user claims himself to be the admin
"""
request = get_current_request()
admin_activation_link = '{protocol}://{site}/onboarding/admin_activate/{claimed_by_key}'.format(
protocol='https' if request.is_secure() else 'http',
site=safe_get_host(request),
claimed_by_key=hash_key.activation_hash
)
message_context = {
"org_name": org_name,
"first_name": org_admin_name,
"claimed_by_name": claimed_by_name,
"claimed_by_email": claimed_by_email,
"admin_activation_link": admin_activation_link
}
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_CHANGE_TEMPLATE, dest_addr, message_context)
def send_admin_update_confirmation_email(org_name, current_admin, new_admin, confirm):
"""
Send an email to the claimed admin, that h
|
e is either accepted as admin or rejected
Arguments:
org_name -- the name of the organization
current_admin -- the current admin of the organization
new_admin -- the new admin of the organization
confirm -- 1 if the current_admin has confirmed resignation else 0
"""
if confirm == 1:
MandrillClient().send_ma
|
il(MandrillClient.ORG_ADMIN_CLAIM_CONFIRMATION, current_admin.email, {
"first_name": current_admin.first_name,
"org_name": org_name,
"claimed_by_name": new_admin.email,
})
MandrillClient().send_mail(MandrillClient.NEW_ADMIN_CLAIM_CONFIRMATION, new_admin.email, {
"first_name": new_admin.first_name,
"org_name": org_name,
"confirm": confirm,
})
else:
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_GET_IN_TOUCH, current_admin.email, {
"first_name": current_admin.first_name,
"org_name": org_name,
"claimed_by_name": "{first_name} {last_name}".format(
first_name=new_admin.first_name, last_name=new_admin.last_name
),
"claimed_by_email": new_admin.email,
})
MandrillClient().send_mail(MandrillClient.NEW_ADMIN_GET_IN_TOUCH, new_admin.email, {
"first_name": new_admin.first_name,
"org_name": org_name,
"current_admin": current_admin.email,
})
|
ubidiscordbot/ubi
|
src/lib/modules/clear.py
|
Python
|
mit
| 108 | 0.009259 |
#Short module amiright
def
|
main(message)
|
:
return [["purgeText", int(message.content.split(' ', 1)[1])]]
|
zepheira/exhibit
|
src/webapp/api/extensions/curate/files/admin/simplejson/__init__.py
|
Python
|
bsd-3-clause
| 10,786 | 0.002781 |
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.7.4'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed wit
|
h that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON repres
|
entation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
""
|
lewischeng-ms/pox
|
pox/core.py
|
Python
|
gpl-3.0
| 11,128 | 0.012491 |
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Some of POX's core API and functionality is here, largely in the POXCore
class (an instance of which is available as pox.core.core).
This includes things like component rendezvous, logging, system status
(up and down events), etc.
"""
# Set up initial log state
import logging
import inspect
import time
import os
_path = inspect.stack()[0][1]
_ext_path = _path[0:_path.rindex(os.sep)]
_ext_path = os.path.dirname(_ext_path) + os.sep
_path = os.path.dirname(_path) + os.sep
SQUELCH_TIME = 5
_squelch = ''
_squelchTime = 0
_squelchCount = 0
def getLogger (name=None, moreFrames=0):
"""
In general, you don't need to call this directly, and will use
core.getLogger() instead.
"""
if name is None:
s = inspect.stack()[1+moreFrames]
name = s[1]
if name.endswith('.py'):
name = name[0:-3]
elif name.endswith('.pyc'):
name = name[0:-4]
if name.startswith(_path):
name = name[len(_path):]
elif name.startswith(_ext_path):
name = name[len(_ext_path):]
name = name.replace('/', '.').replace('\\', '.') #FIXME: use os.path or whatever
# Remove double names ("topology.topology" -> "topology")
if name.find('.') != -1:
n = name.split('.')
if len(n) >= 2:
if n[-1] == n[-2]:
del n[-1]
name = '.'.join(n)
if name.endswith(".__init__"):
name = name.rsplit(".__init__",1)[0]
l = logging.getLogger(name)
g=globals()
if not hasattr(l, "print"):
def printmsg (*args, **kw):
#squelch = kw.get('squelch', True)
msg = ' '.join((str(s) for s in args))
s = inspect.stack()[1]
o = '['
if 'self' in s[0].f_locals:
o += s[0].f_locals['self'].__class__.__name__ + '.'
o += s[3] + ':' + str(s[2]) + '] '
o += msg
if o == _squelch:
if time.time() >= _squelchTime:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount']+1,))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
else:
g['_squelchCount'] += 1
else:
g['_squelch'] = o
if g['_squelchCount'] > 0:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount'],))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
l.debug(o)
setattr(l, "print", printmsg)
setattr(l, "msg", printmsg)
return l
log = (lambda : getLogger())()
from pox.lib.revent import *
# Now use revent's exception hook to put exceptions in event handlers into
# the log...
def _revent_exception_hook (source, event, args, kw, exc_info):
try:
c = source
t = event
if hasattr(c, "__class__"): c = c.__class__.__name__
if isinstance(t, Event): t = t.__class__.__name__
elif issubclass(t, Event): t = t.__name__
except:
pass
log.exception("Exception while handling %s!%s...\n" % (c,t))
import pox.lib.revent.revent
pox.lib.revent.revent.handleEventException = _revent_exception_hook
class GoingUpEvent (Event):
""" Fired when system is going up. """
pass
class GoingDownEvent (Event):
""" Fired when system is going down. """
pass
class UpEvent (Event):
""" Fired when system is up. """
pass
class DownEvent (Event):
""" Fired when system is down. """
pass
class ComponentRegistered (Event):
"""
This is raised by core whenever a new component is registered.
By watching this, a component can monitor whether other components it
depends on are available.
"""
def __init__ (self, name, component):
Event.__init__(self)
self.name = name
self.component = component
import pox.lib.recoco as recoco
class POXCore (EventMixin):
"""
A nexus of of the POX API.
pox.core.core is a reference to an instance of this class. This class
serves a number of functions.
An important one is that it can serve as a rendezvous point for
components. A component can register objects on core, and they can
then be accessed on the core object (e.g., if you register foo, then
there will then be a pox.core.core.foo). In many cases, this means you
won't need to import a module.
|
Ano
|
ther purpose to the central registration is that it decouples
functionality from a specific module. If myL2Switch and yourL2Switch
both register as "switch" and both provide the same API, then it doesn't
matter. Doing this with imports is a pain.
Additionally, a number of commmon API functions are vailable here.
"""
_eventMixin_events = set([
UpEvent,
DownEvent,
GoingUpEvent,
GoingDownEvent,
ComponentRegistered
])
def __init__ (self):
self.debug = False
self.running = True
self.components = {}
self.version = (0,0,0)
print "{0} / Copyright 2011 James McCauley".format(self.version_string)
self.scheduler = recoco.Scheduler(daemon=True)
@property
def version_string (self):
return "POX " + '.'.join(map(str, self.version))
def callDelayed (_self, _seconds, _func, *args, **kw):
"""
Calls the function at a later time.
This is just a wrapper around a recoco timer.
"""
t = recoco.Timer(_seconds, _func, args=args, kw=kw,
scheduler = _self.scheduler)
return t
def callLater (_self, _func, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
Call the given function with the given arguments within the context
of the co-operative threading environment.
It actually calls it sooner rather than later. ;)
Much of POX is written without locks because it's all thread-safe
with respect to itself, as it's written using the recoco co-operative
threading library. If you have a real thread outside of the
co-operative thread context, you need to be careful about calling
things within it. This function provides a rather simple way that
works for most situations: you give it a callable (like a method)
and some arguments, and it will call that callable with those
arguments from within the co-operative threader, taking care of
synchronization for you.
"""
_self.scheduler.callLater(_func, *args, **kw)
def raiseLater (_self, _obj, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
This is similar to callLater(), but provides an easy way to raise a
revent event from outide the co-operative context.
Rather than foo.raiseEvent(BarEvent, baz, spam), you just do
core.raiseLater(foo, BarEvent, baz, spam).
"""
_self.scheduler.callLater(_obj.raiseEvent, *args, **kw)
def getLogger (self, *args, **kw):
"""
Returns a logger. Pass it the name you want if you'd like to specify
one (e.g., core.getLogger("foo")). If you don't specify a name, it
will make one up based on the module name it is called from.
"""
return getLogger(moreFrames=1,*args, **kw)
def quit (self):
"""
Shut down POX.
"""
if self.running:
self.running = False
log.info("Going down...")
import gc
gc.collect()
self.raiseEvent(GoingDownEvent())
self.callLater(self.scheduler.quit)
for i in range(50):
if self.scheduler._hasQuit: break
gc.collect()
time.sleep(.1)
if not self.scheduler._allDone:
log.warning("Scheduler didn't quit in time")
self.raiseEvent(DownEvent())
log.info("
|
alcides/rdflib
|
rdflib/sparql/graphPattern.py
|
Python
|
bsd-3-clause
| 15,809 | 0.013349 |
# -*- coding: utf-8 -*-
#
#
# $Date: 2005/11/04 14:06:36 $, by $Author: ivan $, $Revision: 1.1 $
#
"""
Graph pattern class used by the SPARQL implementation
"""
import sys, os, time, datetime
from rdflib.term import Literal, BNode, URIRef, Variable
from types import *
from rdflib.namespace import NamespaceManager
from rdflib.graph import Graph
from rdflib.sparql import _questChar, Debug, SPARQLError
def _createResource(v) :
"""Create an RDFLib Literal instance with the corresponding XML
Schema datatype set. If the variable is already an RDFLib
resource, it simply returns the resource; otherwise the
corresponding Literal. A SPARQLError Exception is raised if the
type is not implemented.
The Literal contains the string representation of the variable (as
Python does it by default) with the corresponding XML Schema URI
set.
@param v: Python variable
@return: either an RDFLib Literal (if 'v' is not an RDFLib Resource), or the same variable if it is already
an RDFLib resource (ie, Literal, BNode, or URIRef)
@raise SPARQLError: if the type of 'v' is not implemented
"""
if isinstance(v,Literal) or isinstance(v,BNode) or isinstance(v,URIRef) :
# just do nothing
return v
else :
return Literal(v) # Literal now does the datatype bits
def _isResQuest(r) :
"""
Is 'r' a request string (ie, of the form "?XXX")?
@rtype: Boolean
"""
if r and isinstance(r,basestring) and r[0] == _questChar :
return True
return False
class GraphPattern :
"""
Storage of one Graph Pattern, ie, the pattern tuples and the
possible (functional) constraints (filters)
"""
def __init__(self,patterns=[]) :
"""
@param patterns: an initial list of graph pattern tuples
"""
self.patterns = []
self.constraints = []
self.unbounds = []
self.bnodes = {}
if type(patterns) == list :
self.addPatterns(patterns)
elif type(patterns) == tuple :
self.addPattern(patterns)
else :
raise SPARQLError("illegal argument, pattern must be a tuple or a li
|
st of tuples")
def _generatePattern(self,tupl) :
"""
Append a tuple to the local patterns. Possible type literals
are converted to real literals on the fly. Each tuple should
be contain eit
|
her 3 elements (for an RDF Triplet pattern) or
four, where the fourth element is a per-pattern constraint
(filter). (The general constraint of SPARQL can be optimized
by assigning a constraint to a specific pattern; because it
stops the graph expansion, its usage might be much more
optimal than the the 'global' constraint).
@param tupl: either a three or four element tuple
"""
if type(tupl) != tuple :
raise SPARQLError("illegal argument, pattern must be a tuple, got %s" % type(tupl))
if len(tupl) != 3 and len(tupl) != 4 :
raise SPARQLError("illegal argument, pattern must be a tuple of 3 or 4 element, got %s" % len(tupl))
if len(tupl) == 3 :
(s,p,o) = tupl
f = None
else :
(s,p,o,f) = tupl
final=[]
for c in (s,p,o) :
if _isResQuest(c) :
if not c in self.unbounds :
self.unbounds.append(c)
final.append(c)
elif isinstance(c, BNode):
#Do nothing - BNode name management is handled by SPARQL parser
# if not c in self.bnodes :
# self.bnodes[c] = BNode()
final.append(c)
else :
final.append(_createResource(c))
final.append(f)
return tuple(final)
def addPattern(self,tupl) :
"""
Append a tuple to the local patterns. Possible type literals
are converted to real literals on the fly. Each tuple should
be contain either 3 elements (for an RDF Triplet pattern) or
four, where the fourth element is a per-pattern constraint
(filter). (The general constraint of SPARQL can be optimized
by assigning a constraint to a specific pattern; because it
stops the graph expansion, its usage might be much more
optimal than the the 'global' constraint).
@param tupl: either a three or four element tuple
"""
self.patterns.append(self._generatePattern(tupl))
def insertPattern(self,tupl) :
"""
Insert a tuple to to the start of local patterns. Possible
type literals are converted to real literals on the fly. Each
tuple should be contain either 3 elements (for an RDF Triplet
pattern) or four, where the fourth element is a per-pattern
constraint (filter). (The general constraint of SPARQL can be
optimized by assigning a constraint to a specific pattern;
because it stops the graph expansion, its usage might be much
more optimal than the the 'global' constraint).
Semantically, the behaviour induced by a graphPattern does not
depend on the order of the patterns. However, due to the
behaviour of the expansion algorithm, users may control the
speed somewhat by adding patterns that would 'cut' the
expansion tree soon (ie, patterns that reduce the available
triplets significantly). API users may be able to do that,
hence this additional method.
@param tupl: either a three or four element tuple
"""
self.patterns.insert(0,self._generatePattern(tupl))
def addPatterns(self,lst) :
"""
Append a list of tuples to the local patterns. Possible type
literals are converted to real literals on the fly. Each
tuple should be contain either three elements (for an RDF
Triplet pattern) or four, where the fourth element is a
per-pattern constraint. (The general constraint of SPARQL can
be optimized by assigning a constraint to a specific pattern;
because it stops the graph expansion, its usage might be much
more optimal than the the 'global' constraint).
@param lst: list consisting of either a three or four element tuples
"""
for l in lst:
self.addPattern(l)
def insertPatterns(self,lst) :
"""
Insert a list of tuples to the start of the local
patterns. Possible type literals are converted to real
literals on the fly. Each tuple should be contain either
three elements (for an RDF Triplet pattern) or four, where the
fourth element is a per-pattern constraint. (The general
constraint of SPARQL can be optimized by assigning a
constraint to a specific pattern; because it stops the graph
expansion, its usage might be much more optimal than the the
'global' constraint).
Semantically, the behaviour induced by a graphPattern does not
depend on the order of the patterns. However, due to the
behaviour of the expansion algorithm, users may control the
speed somewhat by adding patterns that would 'cut' the
expansion tree soon (ie, patterns that reduce the available
triplets significantly). API users may be able to do that,
hence this additional method.
@param lst: list consisting of either a three or four element tuples
"""
for i in xrange(len(lst)-1,-1,-1) :
self.insertPattern(lst[i])
def addConstraint(self,func) :
"""
Add a global filter constraint to the graph pattern. 'func'
must be a method with a single input parameter (a dictionary)
returning a boolean. This method is I{added} to previously
added methods, ie, I{all} methods must return True to accept a
binding.
@param func: filter function
"""
if type(func) == FunctionType :
self.constraints.append(func)
else :
raise SPARQLError("illegal argument, constraint must be
|
lliendo/SimpleFSM
|
simplefsm/__init__.py
|
Python
|
lgpl-3.0
| 8,624 | 0.000464 |
# -*- coding: utf-8 -*-
"""
This file is part of SimpleFSM.
SimpleFSM is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SimpleFSM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with SimpleFSM. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014 Lucas Liendo.
"""
from abc import ABCMeta, abstractmethod
from exceptions import *
class State(object):
"""
The State class models a defined state.
To create a new state an id must be supplied to identify it among other
states. Two other keyword arguments can be supplied to identify if the
state is a start state and/or a final state.
Note that at least a final state is needed between all states and just
only one start state must be established among all states.
"""
def __init__(self, id, start_state=False, final_state=False):
self._id = id
self._start_state = start_state
self._final_state = final_state
@property
def id(self):
"""Returns the id of the state."""
return self._id
@property
def start_state(self):
"""Returns True if the state is marked as a start state."""
return self._start_state
@start_state.setter
def start_state(self, start_state):
self._start_state = start_state
@property
def final_state(self):
"""Returns True if the state is marked as a final state."""
return self._final_state
@final_state.setter
def final_state(self, final_state):
self._final_state = final_state
def transit(self, fsm):
"""
This method is automatically called from SimpleFSM and performs
the transition from one state to another provided that a transition
match applies otherwise a FSMRejectedInput is raised.
"""
symbol = fsm.read_symbol()
try:
transition = [t for t in fsm.transitions if t.from_state.id == self.id and t.accepts(symbol)].pop()
except IndexError:
raise FSMRejectedInput([symbol])
fsm.current_state = transition.to_state
return symbol
def __eq__(self, other):
return self.id == other.id
class Transition(obje
|
ct):
"""
The Transition class models a transition between two given states.
To create a new transition three mandatory arguments must be supplied :
from_state : The state from which you want to transit.
to_state : The state you want to transit to.
transition_function : The function used to actually
|
test if a symbol matches
the transition. This function must take only the symbol to be tested.
"""
def __init__(self, from_state, to_state, transition_function):
self._from_state = from_state
self._to_state = to_state
self._transition_function = transition_function
@property
def from_state(self):
"""Returns the state from which this transition should transit."""
return self._from_state
@property
def to_state(self):
"""Returns the state from which this transition should transit to."""
return self._to_state
@property
def transition_function(self):
"""Returns the transition function used by a Transition object."""
return self._transition_function
def accepts(self, symbol):
"""
Returns True if the read symbol is accepted by the transition function.
"""
return self._transition_function(symbol)
def __eq__(self, other):
return self.from_state == other.from_state \
and self.to_state == other.to_state \
and self.transition_function == other.transition_function
class SimpleFSM(object):
"""
The SimpleFSM class models a finite state machine. To use this class
you must create a custom class that inherits from SimpleFSM and implement
the read_symbol() method. This method is responsible for returning a symbol
each time is called. This symbol is then tested to check if it's actually
accepted by the FSM.
Typically you would instantiate a set of States and Transitions. After
this is done you instantiate your custom-implemented FSM and add all the
states and transitions.
After your custom-implemented FSM is built you should call the run()
method. If the word is recognized a list with all the accepted symbols
is returned otherwise a FSMRejectedInput is raised.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._states = []
self._transitions = []
self._accepted_symbols = []
self._final_states = None
self._current_state = None
self._remaining_input = True
@property
def transitions(self):
"""Returns a list containing all the defined transitions for this FSM."""
return self._transitions
@property
def current_state(self):
return self._current_state
@current_state.setter
def current_state(self, state):
self._current_state = state
def add_state(self, state):
"""
Adds a new state to the FSM. If the supplied state already exists
a FSMDuplicatedState exception is raised.
"""
if state in self._states:
raise FSMDuplicatedState(state)
self._states.append(state)
def add_states(self, states):
"""
Adds a set of states to the FSM. If one of the states is already
present a FSMDuplicatedState exception is raised.
"""
[self.add_state(s) for s in states]
def add_transition(self, transition):
"""
Adds a new transition to this FSM. If the supplied transition already
exists a FSMDuplicatedTransition exception is raised.
"""
if transition in self._transitions:
raise FSMDuplicatedTransition(transition)
self._transitions.append(transition)
def add_transitions(self, transitions):
"""
Adds a set of transitions to the FSM. If one of the transitions is
already present a FSMDuplicatedTransition exception is raised.
"""
[self.add_transition(t) for t in transitions]
def pre_transit(self):
"""
This method is called just before a transition is performed.
You may optionally implement this method.
"""
pass
@abstractmethod
def read_symbol(self):
"""
Abstract method that must be implemented by the user. When there
is no more input a FSMEndOfInput exception should be raised
to notify the FSM that no more input is available.
"""
raise FSMNotImplementedInput()
def post_transit(self):
"""
This method is called after a sucessfull transition between two
states is performed. You may optionally implement this method.
"""
pass
def _set_initial_state(self):
start_state = [s for s in self._states if s.start_state]
if len(start_state) > 1:
raise FSMStartStatesError()
try:
self._current_state = start_state.pop()
except IndexError:
raise FSMNoStartStateError()
def _set_final_states(self):
self._final_states = [s for s in self._states if s.final_state]
if not self._final_states:
raise FSMFinalStateError()
def _set_states(self):
self._accepted_symbols = []
self._remaining_input = True
self._set_initial_state()
self._set_final_states()
def run(self):
"""
Starts the FSM. Returns a list containing the accepted symbols
otherwise a FSMRejectedInput exception is raised.
"""
self._set_states()
while
|
stackforge/solum
|
solum/tests/api/controllers/v1/test_trigger.py
|
Python
|
apache-2.0
| 14,612 | 0 |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from oslo_config import cfg
from solum.api.controllers.v1 import trigger
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.controllers.v1.trigger.app_handler'
'.AppHandler')
class TestTriggerController(base.BaseTestCase):
def test_trigger_get_workflow_with_empty_body(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
workflow = obj._get_workflow({})
self.assertIsNone(workflow)
def test_trigger_get_workflow_with_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['deploy'], list(workflow))
def test_trigger_get_workflow_with_build_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['build', 'deploy'], list(workflow))
def test_trigger_get_workflow_with_all(self, assem_mock,
|
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest', 'build', 'deploy'], list(workflow))
|
def test_trigger_get_workflow_with_invalid_stage(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+unitunitunittest'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest'], list(workflow))
def test_trigger_process_request_private_repo(self, assem_mock,
resp_mock, request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertIsNone(collab_url)
self.assertEqual('asdf', commit_sha)
def test_trigger_process_request_on_valid_pub_repo(self,
assem_mock, resp_mock,
request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertEqual('https://api.github.com/repos/u/r/collaborators/u',
collab_url)
self.assertEqual('asdf', commit_sha)
@mock.patch('solum.common.policy.check')
def test_trigger_post_with_empty_body(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'opened',
'pull_request': {'head': {'sha': 'asdf'}},
'repository': {'statuses_url': status_url}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_comment_webhook(self, mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('httplib2.Http.request')
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_mismatch_comment_pub_repo(self, http_mock,
mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/opaque_keys/edx/tests/test_course_locators.py
|
Python
|
agpl-3.0
| 10,010 | 0.001598 |
"""
Tests of CourseKeys and CourseLocators
"""
import ddt
from bson.objectid import ObjectId
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from opaque_keys.edx.tests import LocatorBaseTest, TestDeprecated
@ddt.ddt
class TestCourseKeys(LocatorBaseTest, TestDeprecated):
"""
Tests of :class:`.CourseKey` and :class:`.CourseLocator`
"""
@ddt.data(
"foo/bar/baz",
)
def test_deprecated_roundtrip(self, course_id):
self.assertEquals(
course_id,
unicode(CourseKey.from_string(course_id))
)
@ddt.data(
"foo!/bar/baz",
)
def test_invalid_chars_in_ssck_string(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(course_id)
@ddt.data(
"org/course/run/foo",
"org/course",
"org+course+run+foo",
"org+course",
)
def test_invalid_format_location(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseLocator.from_string(course_id)
def test_make_usage_key(self):
depr_course = CourseKey.from_string('org/course/run')
self.assertEquals(
unicode(BlockUsageLocator(depr_course, 'category', 'name', deprecated=True)),
unicode(depr_course.make_usage_key('category', 'name'))
)
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(
unicode(BlockUsageLocator(course, 'block_type', 'block_id')),
unicode(course.make_usage_key('block_type', 'block_id'))
)
def test_convert_deprecation(self):
depr_course = CourseKey.from_string('org/course/run')
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(unicode(depr_course.replace(deprecated=False)), unicode(course))
self.assertEquals(unicode(course.replace(deprecated=True)), unicode(depr_course))
def test_course_constructor_underspecified(self):
with self.assertRaises(InvalidKeyError):
CourseLocator()
with self.assertRaises(InvalidKeyError):
CourseLocator(branch='published')
def test_course_constructor_bad_version_guid(self):
with self.assertRaises(ValueError):
CourseLocator(version_guid="012345")
with self.assertRaises(InvalidKeyError):
CourseLocator(version_guid=None)
def test_course_constructor_version_guid(self):
# pylint: disable=no-member,protected-access
# generate a random location
test_id_1 = ObjectId()
test_id_1_loc = str(test_id_1)
testobj_1 = CourseLocator(version_guid=test_id_1)
self.check_course_locn_fields(testobj_1, version_guid=test_id_1)
self.assertEqual(str(testobj_1.version_guid), test_id_1_loc)
testobj_1_string = u'@'.join((testobj_1.VERSION_PREFIX, test_id_1_loc))
self.assertEqual(testobj_1._to_string(), testobj_1_string)
self.assertEqual(str(testobj_1), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.html_id(), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.version, test_id_1)
# Test using a given string
test_id_2_loc = '519665f6223ebd6980884f2b'
test_id_2 = ObjectId(test_id_2_loc)
testobj_2 = CourseLocator(version_guid=test_id_2)
self.check_course_locn_fields(testobj_2, version_guid=test_id_2)
self.assertEqual(str(testobj_2.version_guid), test_id_2_loc)
testobj_2_string = u'@'.join((testobj_2.VERSION_PREFIX, test_id_2_loc))
self.assertEqual(testobj_2._to_string(), testobj_2_string)
self.assertEqual(str(testobj_2), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.html_id(), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.version, test_id_2)
@ddt.data(
' mit.eecs',
'mit.eecs ',
CourseLocator.VERSION_PREFIX + '@mit.eecs',
BlockUsageLocator.BLOCK_PREFIX + '@black+mit.eecs',
'mit.ee cs',
'mit.ee,cs',
'mit.ee+cs',
'mit.ee&cs',
'mit.ee()cs',
CourseLocator.BRANCH_PREFIX + '@this',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX + '@that',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BR
|
ANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this ',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@th%is ',
u
|
'\ufffd',
)
def test_course_constructor_bad_package_id(self, bad_id):
"""
Test all sorts of badly-formed package_ids (and urls with those package_ids)
"""
with self.assertRaises(InvalidKeyError):
CourseLocator(org=bad_id, course='test', run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course=bad_id, run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course='test', run=bad_id)
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(u'course-v1:test+{}+2014_T2'.format(bad_id))
@ddt.data(
'course-v1:',
'course-v1:/mit.eecs',
'http:mit.eecs',
'course-v1:mit+course+run{}@branch'.format(CourseLocator.BRANCH_PREFIX),
'course-v1:mit+course+run+',
)
def test_course_constructor_bad_url(self, bad_url):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(bad_url)
def test_course_constructor_url(self):
# Test parsing a url when it starts with a version ID and there is also a block ID.
# This hits the parsers parse_guid method.
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string("course-v1:{}@{}+{}@hw3".format(
CourseLocator.VERSION_PREFIX, test_id_loc, CourseLocator.BLOCK_PREFIX
))
self.check_course_locn_fields(
testobj,
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string(
'course-v1:mit.eecs+honors.6002x+2014_T2+{}@{}'.format(CourseLocator.VERSION_PREFIX, test_id_loc)
)
self.check_course_locn_fields(
testobj,
org='mit.eecs',
course='honors.6002x',
run='2014_T2',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_branch_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
org = 'mit.eecs'
course = '~6002x'
run = '2014_T2'
testobj = CourseKey.from_string('course-v1:{}+{}+{}+{}@draft-1+{}@{}'.format(
org, course, run, CourseLocator.BRANCH_PREFIX, CourseLocator.VERSION_PREFIX, test_id_loc
))
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch='draft-1',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_package_id_no_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
testurn = '{}+{}+{}'.format(org, course, run)
testobj = CourseLocator(org=org, course=course, run=run)
self.check_course_locn_fields(testobj, org=org, course=course, run=run)
# Allow access to _to_string
# pylint: disable=protected-access
self.assertEqual(testobj._to_string(), testurn)
def test_course_constructor_package_id_separate_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
test_branch = 'published'
expected_urn = '{}+{}+{}+{}@{}'.format(org, course, run, CourseLocator.BRANCH_PREFIX, test_branch)
testobj = CourseLocator(org=org, course=course, run=run, branch=test_branch)
se
|
hyperspy/hyperspy
|
hyperspy/misc/utils.py
|
Python
|
gpl-3.0
| 51,574 | 0.000856 |
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
import warnings
import inspect
import copy
import types
from io import StringIO
import codecs
from collections.abc import Iterable, Mapping
import unicodedata
from contextlib import contextmanager
import importlib
import logging
import numpy as np
from hyperspy.misc.signal_tools import broadcast_signals
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.docstrings.utils import STACK_METADATA_ARG
_logger = logging.getLogger(__name__)
def attrsetter(target, attrs, value):
"""Sets attribute of the target to specified value, supports nested
attributes. Only creates a new attribute if the object supports such
behaviour (e.g. DictionaryTreeBrowser does)
Parameters
----------
target : object
attrs : string
attributes, separated by periods (e.g.
'metadata.Signal.Noise_parameters.variance' )
value : object
Example
-------
First create a signal and model pair:
>>> s = hs.signals.Signal1D(np.arange(10))
>>> m = s.create_model()
>>> m.signal.data
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Now set the data of the model with attrsetter
>>> attrsetter(m, 'signal1D.data', np.arange(10)+2)
>>> self.signal.data
array([2, 3, 4, 5, 6, 7, 8, 9, 10, 10])
The behaviour is identical to
>>> self.signal.data = np.arange(10) + 2
"""
where = attrs.rfind(".")
if where != -1:
target = attrgetter(attrs[:where])(target)
setattr(target, attrs[where + 1 :], value)
@contextmanager
def stash_active_state(model):
active_state = []
for component in model:
if component.active_is_multidimensional:
active_state.append(component._active_array)
else:
active_state.append(component.active)
yield
for component in model:
active_s = active_state.pop(0)
if isinstance(active_s, bool):
component.active = active_s
else:
if not component.active_is_multidimensional:
component.active_is_multidimensional = True
component._active_array[:] = active_s
@contextmanager
def dummy_context_manager(*args, **kwargs):
yield
def str2num(string, **kargs):
"""Transform a a table in string form into a numpy array
Parameters
----------
string : string
Returns
-------
numpy array
"""
stringIO = StringIO(string)
return np.loadtxt(stringIO, **kargs)
def parse_quantity(quantity, opening="(", closing=")"):
"""Parse quantity of the signal outputting quantity and units separately.
It looks for the last matching opening and closing separator.
Parameters
----------
quantity : string
opening : string
Separator used to define the beginning of the units
closing : string
Separator used to define the end of the units
Returns
-------
quantity_name : string
quantity_units : string
"""
# open_bracket keep track of the currently open brackets
open_bracket = 0
for index, c in enumerate(quantity.strip()[::-1]):
if c == closing:
# we find an closing, increment open_bracket
open_bracket += 1
if c == opening:
# we find a opening, decrement open_bracket
open_bracket -= 1
if open_bracket == 0:
# we found the matching bracket and we will use the index
break
if index + 1 == len(quantity):
return quantity, ""
else:
quantity_name = quantity[: -index - 1].strip()
quantity_units = quantity[-index:-1].strip()
return quantity_name, quantity_units
_slugify_strip_re_data = "".join(
c for c in map(chr, np.delete(np.arange(256), [95, 32])) if not c.isalnum()
).encode()
def slugify(value, valid_variable_name=False):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
Adapted from Django's "django/template/defaultfilters.py".
"""
if not isinstance(value, str):
try:
# Convert to unicode using the default encoding
value = str(value)
except BaseException:
# Try latin1. If this does not work an exception is raised.
value = str(value, "latin1")
value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore")
value = value.translate(None, _slugify_strip_re_data).decode().strip()
value = value.replace(" ", "_")
if valid_variable_name and not value.isidentifier():
value = "Number_" + value
return value
class DictionaryTreeBrowser:
"""A class to comfortably browse a dictionary using a CLI.
In addition to accessing the values using dictionary syntax
the class enables navigating a dictionary that constains
nested dictionaries as attribures of nested classes.
Also it is an iterator over the (key, value) items. The
`__repr__` method provides pretty tree printing. Private
keys, i.e. keys that starts with an underscore, are not
printed, counted when calling len nor iterated.
Methods
-------
export : saves the dictionary in pretty tree printing format in a text
file.
keys : returns a list of non-private keys.
as_dictionary : returns a dictionary repres
|
entation of the object.
set_item : easily set items, creating any necessary nodes on the way.
has_item: given a path, or part of a path, checks if the item exists.
get_item given a path, or part of a path, return the value of the item.
add_node : add all non existing nodes in a given path.
add_dictionary: add new items from dictionary.
Examples
--------
>>> tree = Dictiona
|
ryTreeBrowser()
>>> tree.set_item("Branch.Leaf1.color", "green")
>>> tree.set_item("Branch.Leaf2.color", "brown")
>>> tree.set_item("Branch.Leaf2.caterpillar", True)
>>> tree.set_item("Branch.Leaf1.caterpillar", False)
>>> tree
└── Branch
├── Leaf1
│ ├── caterpillar = False
│ └── color = green
└── Leaf2
├── caterpillar = True
└── color = brown
>>> tree.Branch
├── Leaf1
│ ├── caterpillar = False
│ └── color = green
└── Leaf2
├── caterpillar = True
└── color = brown
>>> for label, leaf in tree.Branch:
... print("%s is %s" % (label, leaf.color))
Leaf1 is green
Leaf2 is brown
>>> tree.Branch.Leaf2.caterpillar
True
>>> "Leaf1" in tree.Branch
True
>>> "Leaf3" in tree.Branch
False
>>>
"""
def __init__(self, dictionary=None, double_lines=False, lazy=True):
"""When creating a DictionaryTreeBrowser lazily, the dictionary is
added to the `_lazy_attributes` attribute. The first time a lazy
attribute is called or the DictionaryTreeBrowser is printed, the
DictionaryTreeBrowser processes the lazy attributes with the
`process_lazy_attributes` method.
DictionaryTreeBrowser is lazy by default, using non-lazy instances
can be useful for debugging purposes.
"""
self._lazy_attributes = {}
self._double_lines = double_lines
if dictionary is None:
dictionary = {}
if lazy:
self._lazy_attributes.upda
|
Danyc0/boatd
|
boatd/__init__.py
|
Python
|
lgpl-3.0
| 3,800 | 0 |
from __future__ import print_function
import argparse
import logging
import imp
import os
import sys
from . import logger
from . import plugin
from . import nmea # noqa
from .api import BoatdHTTPServer, BoatdRequestHandler
from .behaviour import Be
|
haviour
from .behaviour import BehaviourManager
from .boat import Boat
from .color import color
from .config import Config
from .driver import BaseBoatdDriver # noqa
from .base_plugin import BasePlugin # noqa
__version__ = '2.0.0'
log = logging.getLogger()
def load_conf(
|
conf_file):
'''
Return the configuration object. Reads from the first argument by default,
otherwise falls back to 'boatd-config.yaml'.
'''
_, ext = os.path.splitext(conf_file)
if ext == '.json':
conf = Config.from_json(conf_file)
else:
conf = Config.from_yaml(conf_file)
conf.filename = conf_file
return conf
def load_driver(conf):
'''
Return the driver module from the filename specified in the configuration
file with key configuration.scripts.driver.
'''
expanded_path = os.path.expanduser(conf.driver.file)
directory, name = os.path.split(expanded_path)
sys.path.append(os.path.dirname(directory))
if hasattr(conf, 'filename'):
conf_directory, _ = os.path.split(conf.filename)
search_dirs = [directory, conf_directory]
else:
search_dirs = [directory]
module_name = os.path.splitext(name)[0]
try:
found_module = imp.find_module(module_name, search_dirs)
_, filename, _ = found_module
log.info('Loading boat driver from {}'.format(color(filename, 37)))
driver_module = imp.load_module('driver_module', *found_module)
log.info('Using \'{}\' as boat driver'.format(
color(type(driver_module.driver).__name__, 33)))
except Exception:
log.exception('Exception raised in boat driver module')
raise
finally:
found_module[0].close()
if not isinstance(driver_module.driver, BaseBoatdDriver):
log.error('Driver module does not instantiate BaseBoatdDriver')
sys.exit(1)
return driver_module.driver
def load_behaviours(conf):
behaviour_manager = BehaviourManager()
for behaviour in conf.behaviours:
name = list(behaviour.keys())[0]
behaviour_conf = behaviour.get(name)
filename = behaviour_conf.get('file')
b = Behaviour(name, filename)
behaviour_manager.add(b)
return behaviour_manager
def parse_args():
description = '''\
Experimental robotic sailing boat daemon.
'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('config', metavar='CONFIG FILE',
default='boatd-config.yaml',
nargs='?',
help='a path to a configuration file')
parser.add_argument('--version',
action='version',
version='boatd {}'.format(__version__))
return parser.parse_args()
def run():
'''Run the main server.'''
args = parse_args()
conf = load_conf(args.config)
logger.setup_logging()
driver = load_driver(conf)
boat = Boat(driver)
plugins = plugin.load_plugins(conf, boat)
behaviour_manager = load_behaviours(conf)
httpd = BoatdHTTPServer(boat, behaviour_manager,
(conf.boatd.interface, conf.boatd.port),
BoatdRequestHandler)
while httpd.running:
try:
httpd.handle_request()
except (KeyboardInterrupt, SystemExit):
log.info('Quitting and requesting plugins end...')
behaviour_manager.stop()
for p in plugins:
p.running = False
sys.exit()
|
nickjhughes/polyominohs
|
generator.py
|
Python
|
mit
| 7,968 | 0.005146 |
""" generator.py: Contains the Generator class. """
import random
import copy
import graphics
from helpers import *
# Just to check we have generated the correct number of polyominoes
# {order: number of omiones}
counts = {1: 1, 2: 1, 3: 2, 4: 7, 5: 18, 6: 60}
class Generator:
""" A class for generating polyominoes. Call the generate function with the
polyomino order wanted. Please Note: This class has not been tested for
orders greater than 6. """
def generate(self, order):
""" Return a list of all the one-sided polyominoes of the given order.
Objects in returned list are 2D square lists representing the shape of
the polyominoes by boolean values.
generate(int) -> list<list<list<bool>>>
"""
self._order = order
ominoes = []
if order == 1:
ominoes = [[[True]]]
return ominoes
# This is the 'growth method' algorithm for generating polyominoes.
# A order * order grid is made, then the bottom-left block filled.
# The squares adjacent to that block are numbered, and one of them
# is randomly picked. This continues till order blocks are filled.
# Check to see if generated polyomino is a repeat, and continue
# till we've generated enough.
while len(ominoes) < counts[order]:
free_squares = {}
pick = 0
max_number = 0
omino = rect_list(order, order, False)
if order > 4:
# A different starting point for orders > 4
# This is so crosses and similar shapes can be generated
row, col = order - 2, 0
else:
row, col = order - 1, 0
omino[row][col] = True
for s in xrange(order - 1):
free_squares, max_number = self._number_adjacent_squares(omino,
(row, col), free_squares, max_number)
possible = [n for n in free_squares.keys() if n > pick]
pick = random.choice(possible)
row, col = free_squares[pick]
free_squares.pop(pick)
omino[row][col] = True
omino = self._normalise(omino)
if not [n for n in ominoes if n == omino]:
ominoes.append(omino)
return ominoes
def generate_colours(self, n):
""" Generate n unique colours and return as a list of RGB triples.
Colours are as contrasted as possible.
generate_colours(int) -> list<(int, int, int)>
"""
# This divides the 360 degrees of hue in the HSV colour space by n,
# and so chooses n colours with equally spaced hues.
colours = []
degrees = 360 / n
for i in xrange(n):
hsv = (degrees * i, 1.0, 0.78)
rgb = graphics.hsv2rgb(hsv)
colours.append(rgb)
return colours
def _normalise(self, polyomino):
""" Return a copy of the given polyomino with its rotation and position
normalised. That is, in its left- and bottom-most position and rotation.
_normalise(list<list<bool>>) -> list<list<bool>>
"""
# Bottom- and left-most rotation and position is defined here as the
# position in which the most bottom row and left column squares are
# filled.
adjusted = copy.deepcopy(polyomino)
rowfractions = {} # Fraction of bottom row filled
colfractions = {} # Fraction of left column filled
for rotation in xrange(4):
adjusted = self._move(adjusted)
rowfilled = adjusted[self._order - 1].count(True)
rowfraction = float(rowfilled) / self._order
rowfractions.update({rotation: rowfraction})
colfilled = [adjusted[row][0] for row in xrange(self._order)].count(True)
colfraction = float(colfilled) / self._order
colfractions.update({rotation: colfraction})
adjusted = self._rotate(adjusted)
# Pick the rotation with the largest fractions
rowpick = max(rowfractions.values())
rowpicked_rotations = [k for k, v in rowfractions.iteritems() \
if v == rowpick]
if len(rowpicked_rotations) > 1:
colpick = m
|
ax([v for k, v in colfractions.iteritems() \
if k in rowpicked_rotations])
colpicked_rotations = [k for k, v in colfractions.iteritems() \
if v == colpick and k in rowpicked_rotations]
if len(colpicked_rotations) == 0:
rotations = rowpicked_rotations[0]
else:
rotations = colpicked_rotati
|
ons[0]
else:
rotations = rowpicked_rotations[0]
normalised = copy.deepcopy(polyomino)
for rotation in xrange(rotations):
normalised = self._rotate(normalised)
normalised = self._move(normalised)
return normalised
def _move(self, polyomino):
""" Return a copy of the given polyomino pushed into the bottom left
corner of its grid.
_move(list<list<bool>>) -> list<list<bool>>
"""
moved = copy.deepcopy(polyomino)
while moved[self._order - 1].count(True) == 0:
# While bottom row is empty, move down
for row in xrange(self._order - 1, 0, -1):
for col in xrange(self._order):
moved[row][col] = moved[row - 1][col]
moved[0] = [False] * self._order
while [moved[row][0] for row in xrange(self._order)].count(True) == 0:
# While left column is empty, move left
for row in xrange(self._order):
for col in xrange(self._order - 1):
moved[row][col] = moved[row][col + 1]
for row in xrange(self._order):
moved[row][self._order - 1] = False
return moved
def _rotate(self, polyomino):
""" Return a copy of the given polyomino rotated clockwise 90 degrees.
_rotate(list<list<bool>>) -> list<list<bool>>
"""
rotated = rect_list(self._order, self._order, False)
for row in xrange(self._order):
for col in xrange(self._order):
rotated[col][self._order - 1 - row] = polyomino[row][col]
return rotated
def _number_adjacent_squares(self, polyomino, coordinates, \
numbered_squares, max_number):
""" Return a pair with a dictionary of all the adjacent squares in the
given polyomino, keyed by their number, where they are numbered
clockwise from the top, and the highest numbered square. Numbering will
start from max_number and any previously numbered squares in
numbered_squares will be included.
_number_adjacent_squares(list<list<bool>>, (int,int),
dict<int:(int,int)>, int) ->
(dict<int:(int, int)>, int)
"""
row, col = coordinates
possible_squares = [(row - 1, col), (row, col + 1),
(row + 1, col), (row, col - 1)]
adjacents = copy.deepcopy(numbered_squares)
n = max_number
for row, col in possible_squares:
if row in range(self._order) and col in range(self._order) \
and not polyomino[row][col] \
and not (row, col) in numbered_squares.values():
# Number the square only if its in the grid, not already
# numbered and not already filled
n += 1
adjacents.update({n: (row, col)})
return adjacents, n
|
jelmer/isso
|
isso/utils/html.py
|
Python
|
mit
| 3,145 | 0.000954 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import html
import bleach
import misaka
from configparser import NoOptionError
class Sanitizer(object):
def __init__(self, elements, attributes):
# attributes found in Sundown's HTML serializer [1]
# except for <img> tag,
# because images are not generated anyways.
#
# [1] https://github.com/vmg/sundown/blob/master/html/html.c
self.elements = ["a", "p", "hr", "br", "ol", "ul", "li",
"pre", "code", "blockquote",
"del", "ins", "strong",
|
"em",
"h1", "h2", "h3", "h4", "h5", "h6",
"table", "thead", "tbody", "th", "td"] + elements
# href for <a> and align for <table>
|
self.attributes = ["align", "href"] + attributes
def sanitize(self, text):
clean_html = bleach.clean(text, tags=self.elements, attributes=self.attributes, strip=True)
def set_links(attrs, new=False):
href_key = (None, u'href')
if href_key not in attrs:
return attrs
if attrs[href_key].startswith(u'mailto:'):
return attrs
rel_key = (None, u'rel')
rel_values = [val for val in attrs.get(rel_key, u'').split(u' ') if val]
for value in [u'nofollow', u'noopener']:
if value not in [rel_val.lower() for rel_val in rel_values]:
rel_values.append(value)
attrs[rel_key] = u' '.join(rel_values)
return attrs
linker = bleach.linkifier.Linker(callbacks=[set_links])
return linker.linkify(clean_html)
def Markdown(extensions=("strikethrough", "superscript", "autolink",
"fenced-code"), flags=[]):
renderer = Unofficial(flags=flags)
md = misaka.Markdown(renderer, extensions=extensions)
def inner(text):
rv = md(text).rstrip("\n")
if rv.startswith("<p>") or rv.endswith("</p>"):
return rv
return "<p>" + rv + "</p>"
return inner
class Unofficial(misaka.HtmlRenderer):
"""A few modifications to process "common" Markdown.
For instance, fenced code blocks (~~~ or ```) are just wrapped in <code>
which does not preserve line breaks. If a language is given, it is added
to <code class="$lang">, compatible with Highlight.js.
"""
def blockcode(self, text, lang):
lang = ' class="{0}"'.format(html.escape(lang)) if lang else ''
return "<pre><code{1}>{0}</code></pre>\n".format(html.escape(text, False), lang)
class Markup(object):
def __init__(self, conf):
try:
conf_flags = conf.getlist("flags")
except NoOptionError:
conf_flags = []
parser = Markdown(extensions=conf.getlist("options"), flags=conf_flags)
sanitizer = Sanitizer(
conf.getlist("allowed-elements"),
conf.getlist("allowed-attributes"))
self._render = lambda text: sanitizer.sanitize(parser(text))
def render(self, text):
return self._render(text)
|
FrozenPigs/Taigabot
|
plugins/_broken/chatbot.py
|
Python
|
gpl-3.0
| 874 | 0.006865 |
import urllib
import urllib2
import xml.dom.minidom
import re
import socket
from util import hook
c
|
hatbot_re = (r'(^.*\b(taiga|taigabot)\b.*$)', re.I)
@hook.regex(*chatbot_re)
@hook.command
def chatbot(inp, reply=None, nick=None, conn=None):
inp = inp.group(1).lower().replace('taigabot', '').replace('taiga'
|
, '').replace(':', '')
args = {'bot_id': '6', 'say': inp.strip(), 'convo_id': conn.nick, 'format': 'xml'}
data = urllib.urlencode(args)
resp = False
url_response = urllib2.urlopen('http://api.program-o.com/v2/chatbot/?', data)
response = url_response.read()
response_dom = xml.dom.minidom.parseString(response)
text = response_dom.getElementsByTagName('response')[0].childNodes[0].data.strip()
return nick + ': ' + str(text.lower().replace('programo', 'taiga').replace('program-o', 'taigabot').replace('elizabeth', 'wednesday'))
|
quozl/sugar-toolkit-gtk3
|
examples/ticket2855.py
|
Python
|
lgpl-2.1
| 1,719 | 0 |
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
toggle = Gtk.ToggleButton()
icon = Icon(icon_name
|
='go-previous', pixel_size=style.STANDARD_ICON_SIZE)
toggle.set_image(icon)
bo
|
x.pack_start(toggle, False, False, 0)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', pixel_size=style.STANDARD_ICON_SIZE)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False, False, 0)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)
|
cleverhans-lab/cleverhans
|
cleverhans_v3.1.0/cleverhans/experimental/certification/nn.py
|
Python
|
mit
| 11,199 | 0.001429 |
"""This file defines the neural network class, where a network is reinitialized from configuration files.
The class also has a forward propagation method.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
import tensorflow as tf
class NeuralNetwork(object):
"""NeuralNetwork is a class that interfaces the verification code with
the neural net parameters (weights).
"""
def __init__(
self,
net_weights,
net_biases,
net_layer_types,
input_shape=None,
cnn_params=None,
):
"""Function to initialize NeuralNetParams class.
Args:
net_weights: list of numpy matrices of weights of each layer
[convention: x[i+1] = W[i] x[i]
net_biases: list of numpy arrays of biases of each layer
net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv' or
'ff_conv_relu']
'ff': Simple feedforward layer with no activations
'ff_relu': Simple feedforward layer with ReLU activations
'ff_conv': Convolution layer with no activation
'ff_conv_relu': Convolution layer with ReLU activation
input_shape: [num_rows, num_columns, num_channels] at the input layer
cnn_params: list of dictionaries containing stride and padding for
each layer
Raises:
ValueError: the input lists of net params are not of the same length
"""
if (len(net_weights) != len(net_biases)) or len(net_biases) != len(
net_layer_types
):
raise ValueError("Inputs of net params are not of same length ....")
if net_layer_types[len(net_layer_types) - 1] != "ff":
raise ValueError("Final layer is not linear")
self.num_hidden_layers = len(net_weights) - 1
self.weights = []
self.biases = []
self.layer_types = []
self.sizes = []
self.input_shapes = []
self.output_shapes = []
self.has_conv = False
if input_shape is not None:
current_num_rows = input_shape[0]
current_num_columns = input_shape[1]
current_num_channels = input_shape[2]
self.cnn_params = cnn_params
# Setting the sizes of the layers of the network
# sizes[i] contains the size of x_i
for i in range(self.num_hidden_layers):
shape = np.shape(net_weights[i])
self.weights.append(tf.convert_to_tensor(net_weights[i], dtype=tf.float32))
self.layer_types.append(net_layer_types[i])
if self.layer_types[i] in {"ff", "ff_relu"}:
self.sizes.append(int(shape[1]))
# For feedforward networks, no unraveling the bias terms
small_bias = tf.convert_to_tensor(net_biases[i], dtype=tf.float32)
self.biases.append(tf.reshape(small_bias, [-1, 1]))
# Assumes that x^{i+1} = W_i x^i
self.input_shapes.append([int(shape[1]), 1])
self.output_shapes.append([int(shape[0]), 1])
# Convolution type
else:
self.has_conv = True
num_filters = shape[3]
self.input_shapes.append(
[1, current_num_rows, current_num_columns, current_num_channels]
)
self.sizes.append(
current_num_rows * current_num_columns * current_num_channels
)
current_num_channels = num_filters
# For propagating across multiple conv layers
if self.cnn_params[i]["padding"] == "SAME":
current_num_rows = int(
current_num_rows / self.cnn_params[i]["stride"]
)
current_num_columns = int(
current_num_columns / self.cnn_params[i]["stride"]
)
self.output_shapes.append(
[1, current_num_rows, current_num_columns, current_num_channels]
)
# For conv networks, unraveling the bias terms
small_bias = tf.convert_to_tensor(net_biases[i], dtype=tf.float32)
large_bias = tf.tile(
tf.reshape(small_bias, [-1, 1]),
[current_num_rows * current_num_columns, 1],
)
self.biases.append(large_bias)
# Last layer shape: always ff
if self.has_conv:
final_dim = int(np.shape(net_weights[self.num_hidden_layers])[1])
self.input_shapes.append([final_dim, 1])
else:
final_dim = int(np.shape(net_weights[self.num_hidden_layers - 1])[0])
self.sizes.append(final_dim)
self.final_weights = tf.convert_to_tensor(
net_weights[self.num_hidden_layers], dtype=tf.float32
)
self.final_bias = tf.convert_to_tensor(
net_biases[self.num_hidden_layers], dtype=tf.float32
)
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):
"""Performs forward pass through the layer weights at layer_index.
Args
|
:
vector: vector that has to be p
|
assed through in forward pass
layer_index: index of the layer
is_transpose: whether the weights of the layer have to be transposed
is_abs: whether to take the absolute value of the weights
Returns:
tensor that corresponds to the forward pass through the layer
Raises:
ValueError: if the layer_index is negative or more than num hidden layers
"""
if layer_index < 0 or layer_index > self.num_hidden_layers:
raise ValueError("Invalid layer index")
layer_type = self.layer_types[layer_index]
weight = self.weights[layer_index]
if is_abs:
weight = tf.abs(weight)
if is_transpose:
vector = tf.reshape(vector, self.output_shapes[layer_index])
else:
vector = tf.reshape(vector, self.input_shapes[layer_index])
if layer_type in {"ff", "ff_relu"}:
if is_transpose:
weight = tf.transpose(weight)
return_vector = tf.matmul(weight, vector)
elif layer_type in {"conv", "conv_relu"}:
if is_transpose:
return_vector = tf.nn.conv2d_transpose(
vector,
weight,
output_shape=self.input_shapes[layer_index],
strides=[
1,
self.cnn_params[layer_index]["stride"],
self.cnn_params[layer_index]["stride"],
1,
],
padding=self.cnn_params[layer_index]["padding"],
)
else:
return_vector = tf.nn.conv2d(
vector,
weight,
strides=[
1,
self.cnn_params[layer_index]["stride"],
self.cnn_params[layer_index]["stride"],
1,
],
padding=self.cnn_params[layer_index]["padding"],
)
else:
raise NotImplementedError("Unsupported layer type: {0}".format(layer_type))
if is_transpose:
return tf.reshape(return_vector, (self.sizes[layer_index], 1))
return tf.reshape(return_vector, (self.sizes[layer_index + 1], 1))
def load_network_from_checkpoint(checkpoint, model_json, input_shape=None):
"""Function to read the weights from checkpoint based on json description.
Args:
checkpoint: tensorflow checkpoint with trained model to
verify
model_json: path of json file with model description of
the network list of dictionary items for each layer
containing 'type', 'weight_var', 'bias_var' and
'is_transpose' 'type'is one of {'ff', 'ff_relu' or
|
dmacvicar/spacewalk
|
backend/server/rhnServer/server_kickstart.py
|
Python
|
gpl-2.0
| 28,246 | 0.002868 |
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Kickstart-related operations
#
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnException
from spacewalk.server import rhnSQL, rhnAction, rhnLib, rhnChannel
def update_kickstart_session(server_id, action_id, action_status,
kickstart_state, next_action_type):
log_debug(3, server_id, action_id, action_status, kickstart_state, next_action_type)
# Is this a kickstart-related action?
ks_session_id = get_kickstart_session_id(server_id, action_id)
if ks_session_id is None:
# Nothing more to do
log_debug(4, "Kickstart session not found")
return None
# Check the current action state
if action_status == 2:
# Completed
ks_status = kickstart_state
# Get the next action - it has to be of the right type
next_action_id = get_next_action_id(action_id, next_action_type)
elif action_status == 3:
# Failed
ks_status = 'failed'
next_action_id = None
else:
raise rhnException("Invalid action state %s" % action_status)
update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id)
return ks_session_id
_query_update_ks_session_table = rhnSQL.Statement("""
update rhnKickstartSession
set action_id = :action_id,
state_id = :ks_status_id,
new_server_id = :server_id
where id = :ks_session_id
""")
def update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id):
log_debug(4, ks_session_id, ks_status, next_action_id, server_id)
ks_table = rhnSQL.Table('rhnKickstartSessionState', 'label')
ks_status_id = ks_table[ks_status]['id']
h = rhnSQL.prepare(_query_update_ks_session_table)
h.execute(ks_session_id=ks_session_id, ks_status_id=ks_status_id,
action_id=next_action_id, server_id=server_id)
if ks_status == 'complete':
delete_guests(server_id)
_query_lookup_guests_for_host = rhnSQL.Statement("""
select virtual_system_id from rhnVirtualInstance
where host_system_id = :server_id
""")
_query_delete_virtual_instances = rhnSQL.Statement("""
delete from rhnVirtualInstance where host_system_id = :server_id
""")
def delete_guests(server_id):
"""
Callback used after a successful kickstart to remove any guest virtual
instances, as well as their associated servers.
"""
# First delete all the guest server objects:
h = rhnSQL.prepare(_query_lookup_guests_for_host)
h.execute(server_id=server_id)
delete_server = rhnSQL.Procedure("delete_server")
log_debug(4, "Deleting guests")
while 1:
row = h.fetchone_dict()
if not row:
break
guest_id = row['virtual_system_id']
log_debug(4, 'Deleting guest server: %s'% guest_id)
try:
if guest_id != None:
delete_server(guest_id)
except rhnSQL.SQLError:
log_error("Error deleting server: %s" % guest_id)
# Finally delete all the virtual instances:
log_debug(4, "Deleting all virtual instances for host")
h = rhnSQL.prepare(_query_delete_virtual_instances)
h.execute(server_id=server_id)
# Commit all changes:
try:
rhnSQL.commit()
except rhnSQL.SQLError, e:
log_error("Error committing transaction: %s" % e)
rhnSQL.rollback()
_query_get_next_action_id = rhnSQL.Statement("""
select a.id
from rhnAction a, rhnActionType at
where a.prerequisite = :action_id
and a.action_type = at.id
and at.label = :next_action_type
""")
def get_next_action_id(action_id, next_action_type = None):
if not next_action_type:
return None
h = rhnSQL.prep
|
are(_query_get_next_action_id)
h.execute(action_id=action_id, next_action_type=next_action_type)
row = h.fetchone_dict()
if not row:
return None
return row['id']
_query_lookup_kickstart_session_id = rhnSQL.Statement("""
select ks.id
from rhnKickstartSession ks
where (
(ks.old_server_id = :server_id and ks.new_
|
server_id is NULL)
or ks.new_server_id = :server_id
or ks.host_server_id = :server_id
)
and ks.action_id = :action_id
""")
def get_kickstart_session_id(server_id, action_id):
h = rhnSQL.prepare(_query_lookup_kickstart_session_id)
h.execute(server_id=server_id, action_id=action_id)
row = h.fetchone_dict()
if not row:
# Nothing to do
return None
return row['id']
_query_insert_package_delta = rhnSQL.Statement("""
insert into rhnPackageDelta (id, label)
values (:package_delta_id, 'ks-delta-' || :package_delta_id)
""")
_query_insert_action_package_delta = rhnSQL.Statement("""
insert into rhnActionPackageDelta (action_id, package_delta_id)
values (:action_id, :package_delta_id)
""")
_query_insert_package_delta_element = rhnSQL.Statement("""
insert into rhnPackageDeltaElement
(package_delta_id, transaction_package_id)
values
(:package_delta_id,
lookup_transaction_package(:operation, :n, :e, :v, :r, :a))
""")
def schedule_kickstart_delta(server_id, kickstart_session_id,
installs, removes):
log_debug(3, server_id, kickstart_session_id)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
action_id = rhnAction.schedule_server_action(
server_id,
action_type='packages.runTransaction', action_name="Package delta",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
package_delta_id = rhnSQL.Sequence('rhn_packagedelta_id_seq').next()
h = rhnSQL.prepare(_query_insert_package_delta)
h.execute(package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_action_package_delta)
h.execute(action_id=action_id, package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_package_delta_element)
col_names = [ 'n', 'v', 'r', 'e']
__execute_many(h, installs, col_names, operation='insert', a=None,
package_delta_id=package_delta_id)
__execute_many(h, removes, col_names, operation='delete', a=None,
package_delta_id=package_delta_id)
update_ks_session_table(kickstart_session_id, 'package_synch_scheduled',
action_id, server_id)
return action_id
def schedule_kickstart_sync(server_id, kickstart_session_id):
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
# Create a new action
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart.schedule_sync',
action_name="Schedule a package sync",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
return action_id
def _get_ks_virt_type(type_id):
_query_kickstart_virt_type = rhnSQL.Statement("""
select label
from rhnKickstartVirtualizationType kvt
where kvt.id = :id
""")
prepared_query = rhnSQL.prepare(_query_kickstart_virt_type)
prepared_query.execute(id=type_id)
row = prepared_query.fetchone_dict()
# XXX: we should have better constraints on the db so this doesn't happen.
if not row:
kstype = 'auto'
else:
kstype = row['label']
log_debug(1, "KS_TYPE: %s" % kstype)
return kstype
def get_kickstart_session_type(server_id, action_id):
ks_session_id = get_kickstart_
|
MuckRock/muckrock
|
muckrock/communication/importers.py
|
Python
|
agpl-3.0
| 2,224 | 0 |
"""
Custom importers for addresses
"""
# Django
from django.conf import settings
# Standard Library
import csv
import re
# Third Party
from localflavor.us.us_states import STATE_CHOICES
from smart_open.smart_open_lib import smart_open
# MuckRock
from muckrock.communication.models import Address
# columns
AGENCY_PK = 0
AGENCY_NAME = 1
ADDRESS_TYPE = 2
ADDRESS_PK = 3
ORIG_ADDRESS = 4
STREET = 5
CITY = 6
STATE = 7
ZIP = 8
LONG = 9
LAT = 10
SUITE = 11
AGENCY_OVERRIDE = 12
ATTN_OVERRIDE = 13
STATES = {s[0] for s in list(STATE_CHOICES)}
p_zip = re.compile(r"^\d{5}(?:-\d{4})?$")
# pylint: disable=broad-except
def import_addresses(file_name):
"""Import addresses from spreadsheet"""
# pylint: disable=too-many-locals
s3_path = f"s3://{settings.AWS_MEDIA_BUCKET_NAME}/{file_name}"
with smart_open(s3_path) as tmp_file:
reader = csv.reader(tmp_file)
# discard header row
next(reader)
for i, row in enumerate(reader):
if i % 1000 == 0:
print(i)
if row[STATE] and row[STATE] not in STATES:
print('Illegal State "{}"'.format(row[STATE]))
if row[ZIP] and not p_zip.match(row[ZIP]):
print('Illegal Zip "{}"'.format(row[ZIP]))
try:
address = Address.objects.get(pk=row[ADDRESS_PK])
except Address.DoesNotExist:
print("Address {} does not exist".format(row[ADDRESS_PK]))
else:
address.street = row[STREET].strip()
address.suite = row[SUITE].strip()
address.city = row[CITY].strip()
address.state = row[STATE].strip()
address.zip_code = row[ZIP].strip()
address.point = {
"type": "Point",
"coordinates": [row[LONG].strip(), row[LAT].strip()],
}
address.agency_override = row[AGENCY_OVERRIDE].strip()
address.attn_override = row[ATTN_OVERRIDE].strip()
|
try:
address.save()
except Exception as exc:
print(
|
"Data Error", exc, row[ADDRESS_PK])
print(row)
|
mansonul/events
|
events/contrib/plugins/form_elements/fields/ip_address/__init__.py
|
Python
|
mit
| 382 | 0 |
__title__ = 'fobi.contrib.plugins.form_elements.fields.ip_address'
__author__ = 'Artur
|
Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('default_app_config', 'UID',)
default_app_config = 'fobi.contrib.plugins.form_elements.fields.' \
|
'ip_address.apps.Config'
UID = 'ip_address'
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/RandomReg_500/ws_RandomReg_500_GradientBoostingRegressor_sqlite_code_gen.py
|
Python
|
bsd-3-clause
| 146 | 0.013699 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientB
|
oostingRegressor" , "RandomReg_50
|
0" , "sqlite")
|
pattisdr/osf.io
|
website/files/utils.py
|
Python
|
apache-2.0
| 1,720 | 0.002907 |
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
:param Folder src: The source to copy children from
:param Node target_node: The node to copy files to
:param Folder parent: The parent of to attach the clone of src to, if applicable
"""
assert not parent or not parent.is_file, 'Parent must be a folder'
cloned = src.clone()
cloned.parent = parent
cloned.target = target_node
cloned.name = name or cloned.name
cloned.copied_from = src
cloned.save()
if src.is_file and src.versions.exists():
fileversions = src.versions.select_related('region').order_by('-created')
most_recent_fileversion = fileversions.first()
if most_recent_fileversion.region and most_recent_fileversion.region != target_node.osfstorage_region:
# add all original version except the most recent
cloned.
|
versions.add(*fileversions[1:])
# create a new most recent version and update the region before adding
new_fileversion = most_recent_fileversion.clone()
new_fileversion.region = target_node.osfstorage_region
new_fileversion.save()
|
cloned.versions.add(new_fileversion)
else:
cloned.versions.add(*src.versions.all())
# copy over file metadata records
if cloned.provider == 'osfstorage':
for record in cloned.records.all():
record.metadata = src.records.get(schema__name=record.schema.name).metadata
record.save()
if not src.is_file:
for child in src.children:
copy_files(child, target_node, parent=cloned)
return cloned
|
JaneliaSciComp/osgpyplusplus
|
examples/rough_translated1/osgwidgetprogress.py
|
Python
|
bsd-3-clause
| 2,695 | 0.014842 |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgwidgetprogress"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osgDB
from osgpypp import osgWidget
# Translated from file 'osgwidgetprogress.cpp'
# -*-c++-*- osgWidget - Code by: Jeremy Moles (cubicool) 2007-2008
# $Id$
#include <osgDB/ReadFile>
#include <osgWidget/Util>
#include <osgWidget/WindowManager>
#include <osgWidget/Canvas>
MASK_2D = 0xF0000000
class UpdateProgressNode (osg.NodeCallback) :
start = float()
done = float()
UpdateProgressNode():
start (0.0),
done (5.0)
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
fs = nv.getFrameStamp()
t = fs.getSimulationTime()
if start == 0.0 : start = t
width = ((t - start) / done) * 512.0
percent = (width / 512.0) * 100.0
if width < 1.0 or width > 512.0 : return
window = dynamic_cast<osgWidget.Window*>(node)
if not window : return
w = window.getByName("pMeter")
l = dynamic_cast<osgWidget.Label*>(window.getByName("pLabel"))
if not w or not l : return
w.setWidth(width)
w.setTexCoordRegion(0.0, 0.0, width, 64.0)
ss = std.ostringstream()
ss, osg.round(percent), "% Done"
l.setLabel(ss.str())
def main(argv):
viewer = osgViewer.Viewer()
wm = osgWidget.WindowManager(
viewer,
1280.0,
1024.0,
MASK_2D,
osgWidget.WindowManager.WM_PICK_DEBUG
)
|
canvas = osgWidget.Canvas("canvas")
pOutline = osgWidget.Widget("pOutline", 512.0, 64.0)
|
pMeter = osgWidget.Widget("pMeter", 0.0, 64.0)
pLabel = osgWidget.Label("pLabel", "0% Done")
pOutline.setImage("osgWidget/progress-outline.png", True)
pOutline.setLayer(osgWidget.Widget.LAYER_MIDDLE, 2)
pMeter.setImage("osgWidget/progress-meter.png")
pMeter.setColor(0.7, 0.1, 0.1, 0.7)
pMeter.setLayer(osgWidget.Widget.LAYER_MIDDLE, 1)
pLabel.setFont("fonts/VeraMono.ttf")
pLabel.setFontSize(20)
pLabel.setFontColor(1.0, 1.0, 1.0, 1.0)
pLabel.setSize(512.0, 64.0)
pLabel.setLayer(osgWidget.Widget.LAYER_MIDDLE, 3)
canvas.setOrigin(300.0, 300.0)
canvas.addWidget(pMeter, 0.0, 0.0)
canvas.addWidget(pOutline, 0.0, 0.0)
canvas.addWidget(pLabel, 0.0, 0.0)
canvas.getBackground().setColor(0.0, 0.0, 0.0, 0.0)
canvas.setUpdateCallback(UpdateProgressNode())
wm.addChild(canvas)
return osgWidget.createExample(viewer, wm, osgDB.readNodeFile("cow.osgt"))
if __name__ == "__main__":
main(sys.argv)
|
labordus/cornice
|
common.py
|
Python
|
gpl-2.0
| 8,653 | 0.008321 |
# common.py: global variables
# arch-tag: global variables
# author: Alberto Griggio <agriggio@users.sourceforge.net>
# license: GPL
import wx
import os, sys, locale, Image
import threading
__version__ = '0.6.1'
if os.path.expanduser('~') != '~':
bookmarks_file = os.path.expanduser('~/.cornice/bookmarks')
config_file = os.path.expanduser('~/.cornice/config')
confdir = os.path.expanduser('~/.cornice')
if not os.path.exists(confdir):
try: os.mkdir(confdir)
except (IOError, OSError): pass # this is not fatal...
else:
confdir = os.path.dirname(sys.argv[0])
bookmarks_file = os.path.join(confdir, 'bookmarks')
config_file = os.path.join(confdir, 'config')
config = None # ConfigParser instance used to load/store options
try:
interpolations = [ Image.NEAREST, Image.BILINEAR,
Image.BICUBIC, Image.ANTIALIAS ]
except AttributeError:
# probably PIL < 1.1.3
interpolations = [ Image.NEAREST, Image.BILINEAR,
Image.BICUBIC, Image.BICUBIC ]
icons_and_colors = {
'GIF': ('file_gif.xpm', (208, 232, 208)),
'ICO': ('file_ico.xpm', (249, 240, 208)),
'JPEG': ('file_jpg.xpm', (224, 232, 192)),
'PCX': ('file_pcx.xpm', (216, 231, 216)),
'PNG': ('file_png.xpm', (224, 216, 208)),
'PNM': ('file_pnm.xpm', (218, 237, 192)),
'PSD': ('file_psd.xpm', (255, 255, 223)),
'TIF': ('file_tif.xpm', (200, 200, 213)),
'XBM': ('file_xbm.xpm', (224, 224, 224)),
'XCF': ('file_xcf.xpm', (191, 239, 233)),
'XPM': ('file_xpm.xpm', (222, 217, 234)),
'BMP': ('file_bmp.xpm', (229, 213, 213)),
}
default_icon_and_color = ('file_image.xpm', (240, 240, 240))
unknown_icon_and_color = ('file_unknown.xpm', (255, 255, 255))
# sort indexes
SORT_NAME = 0
SORT_DATE = 1
SORT_SIZE = 2
SORT_TYPE = 3
sort_index = 0
reverse_sort = False
def format_size_str(number):
sf = ['bytes', 'KB', 'MB', 'GB']
i = 0
while number > 1000 and i < 4:
number = number / 1024.0
i += 1
return '%s %s' % (locale.format('%.1f', number), sf[i])
has_alpha = wx.VERSION[:3] >= (2, 5, 2) and 'gtk1' not in wx.PlatformInfo
if wx.Platform == '__WXGTK__':
_mask_table = [0]*128 + [255]*128
else:
_mask_table = [255]*128 + [0]*128
def get_mask(pil_image):
"""\
If the image has some transparency, returns a wx.Mask object used to mask
the transparent pixels, otherwise returns None
The function should always be called with only one argument
"""
if pil_image.mode == 'RGBA' and not has_alpha:
alpha = pil_image.split()[3]
mask = wx.EmptyImage(*alpha.size)
#mask.SetData(alpha.convert('1').convert('RGB').tostring())
mask.SetData(alpha.point(_mask_table, '1').convert('RGB').tostring())
return wx.Mask(wx.BitmapFromImage(mask, 1))
elif pil_image.mode == 'P':
# let's try to get the transparency value...
transparency = pil_image.info.get('transparency')
if transparency:
## mode, data = pil_image.palette.getdata()
## if 0: #mode[:3] == 'RGB':
## if mode == 'RGBA': n = 4
## else: n = 3
## rgb = data[transparency*n : transparency*n + n]
## mask = wx.EmptyImage(*pil_image.size)
## ma
|
sk.SetData(pil_image.convert('RGB').tostring())
## color = wx.Colour(*[ord(c) for c in rgb[:3]])
## if wx.VERSION[:3] >= (2, 5, 2):
## return wx.Mask(mask.ConvertToBitmap(), color)
## else:
## return wx.MaskColour(mas
|
k.ConvertToBitmap(), color)
## else:
if wx.Platform != '__WXGTK__': c1, c2 = 255, 0
else: c1, c2 = 0, 255
palette = [c1] * 768 #[255] * 768
palette[transparency*3 : transparency*3 + 3] = [c2, c2, c2]#[0, 0, 0]
pil_image = pil_image.copy()
pil_image.putpalette(palette)
mask = wx.EmptyImage(*pil_image.size)
mask.SetData(pil_image.convert('1').convert('RGB').tostring())
return wx.Mask(wx.BitmapFromImage(mask, 1))
return None
# custom event to update the menubar when the sorting of the PictureList
# changes
_SORT_CHANGED_EVENT = wx.NewEventType()
class _SortChangedEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(_SORT_CHANGED_EVENT)
self.sort_index = sort_index
self.reverse_sort = reverse_sort
# end of class _SortChangedEvent
_win_to_post = None
def EVT_SORT_CHANGED(win, func):
global _win_to_post; _win_to_post = win
win.Connect(-1, -1, _SORT_CHANGED_EVENT, func)
def send_sort_changed_event():
wx.PostEvent(_win_to_post, _SortChangedEvent())
_exiting_lock = threading.RLock()
_is_exiting = False
def exiting(val=None):
global _is_exiting
_exiting_lock.acquire()
if val is not None: _is_exiting = val
retval = _is_exiting
_exiting_lock.release()
return retval
exit_app = None # reference to a function called to exit the app nicely
really_exit_app = None
_theme_dir = None
def load_from_theme(resource):
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
d = os.path.join(confdir, _theme_dir)
if not os.path.isdir(d):
d = os.path.join(os.getcwd(), 'icons', _theme_dir)
if not os.path.isdir(d) or \
not os.path.isfile(os.path.join(d, 'toolbars.xrc')):
d = os.path.join(os.getcwd(), 'icons')
old = os.getcwd()
#resource = os.path.abspath(resource)
os.chdir(d)
res = wx.xrc.XmlResource_Get()
res.Load(resource)
os.chdir(old)
def get_theme_icon():
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
if wx.Platform == '__WXMSW__':
name = 'icon.ico'
else:
name = 'icon.png'
d = os.path.join(confdir, _theme_dir)
if not os.path.isdir(d):
d = os.path.join(os.getcwd(), 'icons', _theme_dir)
if not os.path.isdir(d) or \
not os.path.isfile(os.path.join(d, name)):
d = os.path.join(os.getcwd(), 'icons')
if wx.Platform == '__WXMSW__':
icon = wx.Icon(os.path.join(d, name), wx.BITMAP_TYPE_ICO)
else:
icon = wx.EmptyIcon()
bmp = wx.Bitmap(os.path.join(d, name), wx.BITMAP_TYPE_PNG)
icon.CopyFromBitmap(bmp)
return icon
def get_bitmap_for_theme(imagepath):
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
name, ext = os.path.splitext(imagepath)
if ext: extensions = [ext]
else: extensions = ['.png', '.xpm']
paths = [os.path.join(os.getcwd(), 'icons', _theme_dir),
os.path.join(os.getcwd(), 'icons')]
log_null = wx.LogNull()
for path in paths:
for ext in extensions:
imagepath = os.path.join(path, name + ext)
try:
bmp = wx.Bitmap(imagepath, wx.BITMAP_TYPE_ANY)
if bmp.Ok():
return bmp
except:
pass
return None
if wx.Platform != '__WXMSW__' or wx.VERSION[:2] >= (2, 5):
def delete_dc(dc):
pass
else:
def delete_dc(dc):
dc.Destroy()
def get_image_info(path):
import fileops, time
pi = fileops.get_path_info(path)
im = Image.open(fileops.open(path))
w, h = im.size
if im.mode == '1': sdepth = '1'
elif im.mode == 'P': sdepth = '256'
else: sdepth = '16M'
info = [
fileops.basename(path),
time.strftime('%Y/%m/%d %H:%M',
time.localtime(pi.mtime)),
pi.size,
'%sx%sx%s %s' % (w, h, sdepth, im.format)
]
return info
def create_thumbnail(pil_image, thumb_size):
"""\
Returns a bitmap with the thumbnail
"""
pil_image.thumbnail(thumb_size, Image.NEAREST)
mask = get_mask(pil_image)
img = wx.EmptyImage(*pil_image.size)
if has_alpha and pil_image.mode == 'RGBA':
alpha = pil_image.split()[3].tostring()
img.SetData(pil_image.convert('RGB').tostring
|
nextgis-extra/tests
|
lib_gdal/gcore/hfa_read.py
|
Python
|
gpl-2.0
| 2,508 | 0.005183 |
#!/usr/bin/env python
###############################################################################
# $Id: hfa_read.py 32166 2015-12-13 19:29:52Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic read support for all datatypes from a HFA file.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# When imported build a list of units based on the files available.
gdaltest_list = []
init_list = [
('byte.img', 1, 4672, None),
('int16.img', 1, 4672, None),
('uint16.img', 1, 4672, None),
('int32.img', 1, 4672, None),
|
('uint32.img', 1, 4672, None),
('float32.img', 1, 4672, None),
('float64.img', 1, 4672, None),
('utmsmall.img', 1, 50054, None),
('2bit_compressed.img', 1, 11918, None)]
for item in init_list:
ut = gdaltest.GDALTest( 'HFA', item[0], item[1], i
|
tem[2] )
if ut is None:
print( 'HFA tests skipped' )
sys.exit()
gdaltest_list.append( (ut.testOpen, item[0]) )
if __name__ == '__main__':
gdaltest.setup_run( 'hfa_read' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
numenta/nupic.vision
|
src/nupic/vision/data/OCR/characters/parseJPG.py
|
Python
|
agpl-3.0
| 7,772 | 0.019429 |
#!/usr/bin/python2
'''
This script parses JPEG images of text documents to isolate and save images
of individual characters. The size of these output images in pixels is
specified by the parameters desired_height and desired_width.
The JPEG images are converted to grey scale using a parameter called
luminance_threshold to distinguish between light and dark pixels. Lines of
text are found by searching for rows that contain dark pixels, and
characters are found by searching for columns that contain dark pixels. Once
a character is found it is padded with blank rows and columns to obtain the
desired size. The images are saved using the filenames given in the XML file.
'''
# Set desired output image height and width in pixels
desired_height = 32
desired_width = 32
DEBUG = False
import matplotlib.pyplot as plot
import numpy as np
import operator
import sys
import re
import os
from PIL import Image
from xml.dom import minidom
jpg_list = [ 'characters-0.jpg', 'characters-1.jpg', 'characters-2.jpg',
'characters-3.jpg', 'characters-4.jpg', 'characters-5.jpg',
'characters-6.jpg', 'characters-7.jpg', 'characters-8.jpg',
'characters-9.jpg', 'characters-10.jpg', 'characters-11.jpg',
'characters-12.jpg', 'characters-13.jpg', 'characters-14.jpg',
'characters-15.jpg', 'characters-16.jpg', 'characters-17.jpg',
'characters-18.jpg', 'characters-19.jpg' ]
#jpg_list = [ 'debug_doc.jpg' ]
# Parse XML file for filenames to use when saving each character image
xmldoc = minidom.parse('characters.xml')
#xmldoc = minidom.parse('debug_doc.xml')
filelist = xmldoc.getElementsByTagName('image')
print len(filelist)
#for i in range(145):
#print filelist[62*i].attributes['file'].value
# this counter gets used to select file names from an xml file
output_files_saved = 0
for jpg in jpg_list:
print jpg
im = Image.open(jpg)
width, length = im.size
if DEBUG:
print "image size: ", im.size
print "image mode: ", im.mode
print im.size[1],im.size[0]
# read pixel data from image into a numpy array
if im.mode == 'L':
pixels = np.arr
|
ay(list(im.getdata())).reshape(im.size[1],im.size[0])
elif
|
im.mode == 'RGB':
pixels = np.array(list(im.convert('L').getdata())).reshape(im.size[1],
im.size[0])
#im.show()
##############################################################################
# Removed all logic for determining the value to use to distinguish between
# light and dark pixels because this is a non-trivial challenge of its own and
# I want to get to generating a data set for OCR which I can do much faster by
# choosing the threshold manually.
##############################################################################
luminance_threshold = 100
##############################################################################
# parse document for lines of text
##############################################################################
row = 0
while row < length:
# Find the first row of pixels in next line of text by ignoring blank rows
# of pixels which will have a non-zero product since white pixels have a
# luminance value of 255
#row_data = pixels[row * width : row * width + width]
while (row < length and pixels[row,:].min() > luminance_threshold):
row += 1
first_row = row
if DEBUG:
print "the first row of pixels in the line of text is ", first_row
# Find the last row of pixels in this line of text by counting rows with
# dark pixels. These rows have a product of zero since the luminance value
# of all dark pixels was set to zero
while (row < length and pixels[row:row + 2,:].min() < luminance_threshold):
row += 1
last_row = row
#if row < length:
#last_row = row + 2 # this is a hack for Cochin font Q
#row += 5 # this is a hack for Cochin font Q
if DEBUG:
print "the last row of pixels in the line of text is ", last_row
##############################################################################
# parse line of text for characters
##############################################################################
if first_row < last_row:
col = 0
while col < width:
# find first column of pixels in the next character by ignoring blank
# cols of pixels
while col < width and pixels[first_row:last_row,col].min() > luminance_threshold:
col += 1
first_col = col
# find last column of pixels in the next character by counting columns
# with dark pixels
while col < width and \
pixels[first_row:last_row,col:col + 5].min() < luminance_threshold:
col += 1
last_col = col
##############################################################################
# remove blank rows from the top and bottom of characters
##############################################################################
if first_col < last_col:
# remove blank rows from the top of the character
r = first_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r + 1;
char_first_row = r;
# remove blank rows from the bottom of the character
r = last_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r - 1;
char_last_row = r + 1;
if DEBUG:
# isolate an image of this character
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
print "Character size after whitespace removal", character.size
print first_col, first_row, last_col, last_row
#character.show()
# pad character width out to desired_width
char_width = last_col - first_col
if char_width > desired_width:
print "Character is wider than ", desired_width
else:
# add the same number of blank columns to the left and right
first_col = first_col - (desired_width - char_width) / 2
last_col = last_col + (desired_width - char_width) / 2
# if the difference was odd we'll be short one column
char_width = last_col - first_col
if char_width < desired_width:
last_col = last_col + 1
# pad character height out to desired_height
char_height = char_last_row - char_first_row
if char_height > desired_height:
print "Character is taller than ", desired_height
else:
# add the same number of blank rows to the left and right
char_first_row = char_first_row - (desired_height - char_height) / 2
char_last_row = char_last_row + (desired_height - char_height) / 2
# if the difference was odd we'll be short one row
char_height = char_last_row - char_first_row
if char_height < desired_height:
char_last_row = char_last_row + 1
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
if DEBUG:
print "Character size after padding", character.size
print first_col, char_first_row, last_col, char_last_row
#character.show()
#garbage = raw_input()
# save image to filename specified in ground truth file
filename = filelist[output_files_saved].attributes['file'].value
directory = filename.split('/')[0]
if not os.path.exists(directory):
os.makedirs(directory)
character.save(filename, "JPEG", quality=80)
output_files_saved = output_files_saved + 1
print output_files_saved
|
trhongbinwang/data_science_journey
|
deep_learning/keras/examples/imdb_cnn.py
|
Python
|
apache-2.0
| 2,519 | 0.004367 |
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2
|
epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from tensorflow.contrib
|
.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.models import Model
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.layers import Input, Embedding
from tensorflow.contrib.keras.python.keras.layers import Conv1D, GlobalMaxPooling1D
from tensorflow.contrib.keras.python.keras.datasets import imdb
# set hype parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 5
def load_data():
'''
'''
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
return [x_train, y_train, x_test, y_test]
def cnn_model_fn():
''' '''
print('Build model...')
inputs = Input(shape=(maxlen,), dtype='int32') # a index sequence with lenght = maxlen
x = Embedding( max_features,
embedding_dims,
input_length=maxlen)(inputs)
x = Dropout(0.2)(x)
x = Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(hidden_dims)(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
model = Model(inputs=inputs, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train(data, model):
''' '''
x_train, y_train, x_test, y_test = data[0], data[1], data[2], data[3]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
if __name__ == '__main__':
''' '''
data = load_data()
model = cnn_model_fn()
train(data, model)
|
flipcoder/litebot
|
plugins/highfive.py
|
Python
|
mit
| 268 | 0.029851 |
def highfive(ctx, serv, nick, dest, msg):
if msg=="\o":
|
serv.say(dest, "o/")
elif msg=="o/":
serv.say(dest, "\o")
elif msg=="o'":
serv.say(dest, "'o")
elif msg=="'o":
serv.say(dest, "o'")
serv.on_msg.connect(highf
|
ive)
|
Suwmlee/XX-Net
|
gae_proxy/server/lib/google/appengine/datastore/document_pb.py
|
Python
|
bsd-2-clause
| 55,713 | 0.021144 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# See the License for the specific language governing permissions and
# l
|
imitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import _dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class FieldValue_Geo(ProtocolBuffer.ProtocolMessage):
has_lat_ = 0
lat_ = 0.0
has_lng_ = 0
lng_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def lat(self): return self.lat_
def set_lat(self, x):
self.has_lat_ = 1
self.lat_ = x
def clear_lat(self):
if self.has_lat_:
self.has_lat_ = 0
self.lat_ = 0.0
def has_lat(self): return self.has_lat_
def lng(self): return self.lng_
def set_lng(self, x):
self.has_lng_ = 1
self.lng_ = x
def clear_lng(self):
if self.has_lng_:
self.has_lng_ = 0
self.lng_ = 0.0
def has_lng(self): return self.has_lng_
def MergeFrom(self, x):
assert x is not self
if (x.has_lat()): self.set_lat(x.lat())
if (x.has_lng()): self.set_lng(x.lng())
def Equals(self, x):
if x is self: return 1
if self.has_lat_ != x.has_lat_: return 0
if self.has_lat_ and self.lat_ != x.lat_: return 0
if self.has_lng_ != x.has_lng_: return 0
if self.has_lng_ and self.lng_ != x.lng_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_lat_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lat not set.')
if (not self.has_lng_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lng not set.')
return initialized
def ByteSize(self):
n = 0
return n + 18
def ByteSizePartial(self):
n = 0
if (self.has_lat_):
n += 9
if (self.has_lng_):
n += 9
return n
def Clear(self):
self.clear_lat()
self.clear_lng()
def OutputUnchecked(self, out):
out.putVarInt32(41)
out.putDouble(self.lat_)
out.putVarInt32(49)
out.putDouble(self.lng_)
def OutputPartial(self, out):
if (self.has_lat_):
out.putVarInt32(41)
out.putDouble(self.lat_)
if (self.has_lng_):
out.putVarInt32(49)
out.putDouble(self.lng_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 36: break
if tt == 41:
self.set_lat(d.getDouble())
continue
if tt == 49:
self.set_lng(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_lat_: res+=prefix+("lat: %s\n" % self.DebugFormat(self.lat_))
if self.has_lng_: res+=prefix+("lng: %s\n" % self.DebugFormat(self.lng_))
return res
class FieldValue(ProtocolBuffer.ProtocolMessage):
TEXT = 0
HTML = 1
ATOM = 2
DATE = 3
NUMBER = 4
GEO = 5
UNTOKENIZED_PREFIX = 6
TOKENIZED_PREFIX = 7
_ContentType_NAMES = {
0: "TEXT",
1: "HTML",
2: "ATOM",
3: "DATE",
4: "NUMBER",
5: "GEO",
6: "UNTOKENIZED_PREFIX",
7: "TOKENIZED_PREFIX",
}
def ContentType_Name(cls, x): return cls._ContentType_NAMES.get(x, "")
ContentType_Name = classmethod(ContentType_Name)
has_type_ = 0
type_ = 0
has_language_ = 0
language_ = "en"
has_string_value_ = 0
string_value_ = ""
has_geo_ = 0
geo_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 0
def has_type(self): return self.has_type_
def language(self): return self.language_
def set_language(self, x):
self.has_language_ = 1
self.language_ = x
def clear_language(self):
if self.has_language_:
self.has_language_ = 0
self.language_ = "en"
def has_language(self): return self.has_language_
def string_value(self): return self.string_value_
def set_string_value(self, x):
self.has_string_value_ = 1
self.string_value_ = x
def clear_string_value(self):
if self.has_string_value_:
self.has_string_value_ = 0
self.string_value_ = ""
def has_string_value(self): return self.has_string_value_
def geo(self):
if self.geo_ is None:
self.lazy_init_lock_.acquire()
try:
if self.geo_ is None: self.geo_ = FieldValue_Geo()
finally:
self.lazy_init_lock_.release()
return self.geo_
def mutable_geo(self): self.has_geo_ = 1; return self.geo()
def clear_geo(self):
if self.has_geo_:
self.has_geo_ = 0;
if self.geo_ is not None: self.geo_.Clear()
def has_geo(self): return self.has_geo_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_language()): self.set_language(x.language())
if (x.has_string_value()): self.set_string_value(x.string_value())
if (x.has_geo()): self.mutable_geo().MergeFrom(x.geo())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_language_ != x.has_language_: return 0
if self.has_language_ and self.language_ != x.language_: return 0
if self.has_string_value_ != x.has_string_value_: return 0
if self.has_string_value_ and self.string_value_ != x.string_value_: return 0
if self.has_geo_ != x.has_geo_: return 0
if self.has_geo_ and self.geo_ != x.geo_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_geo_ and not self.geo_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
if (self.has_geo_): n += 2 + self.geo_.ByteSize()
return n
def ByteSizePartial(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
if (self.has_geo_): n += 2 + self.geo_.ByteSizePartial()
return n
def Clear(self):
self.clear_type()
self.clear_language()
self.clear_string_value()
self.clear_geo()
def OutputUnchecked(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.has_language_):
out.putVarInt32(18)
out.putPrefixedString(self.language_)
if (self.has_string_value_):
out.putVarInt32(26)
out.putPrefixedString(self.string_value_)
if (self.has_geo_):
out.putVarInt32(35)
self.geo_.OutputUnchecked(out)
out.putVarInt32(36)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.h
|
doudz/checkfeedmail
|
icon.py
|
Python
|
bsd-2-clause
| 9,632 | 0.012978 |
import cStringIO
import zlib
import wx
#----------------------------------------------------------------------
def getMailData():
return zlib.decompress(
"x\xda\x01M\x01\xb2\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x01\x04IDATX\x85\xed\x941\x0e\x82@\x10E\x9f\xc6`,\x88\xad\
\x8d\x8d\x89r\x02B\xc1\t\xbc\x94\x857\xf04\x9e\xc0C\x00\x95\xb1\xb1\xa52\xda\
h\xc1N\xe1\xc8f5j\x9cD^Ev\x98\x81\xffv\x01::\xfe\x9d^\x91e\xd7\xb6\xc2d\xb9\
\x04`\xb8X\xbc\xf5\x80sY\x02p\xdcn[\xeb\xfd\xb7\xa6\x7f\x80\x81\xaf o<O\xd3f\
\xc1\x19y\x1a\xd7\xbf\xf7$\x17\xec\x19\x90\xbd?\x15\x05\x00\xd5z\r\xc0\\n\
\x08\x99p\x89\xa5o<\x9b\x010J\x12\xe0\xf1,\xd83\x10\xafV\xcd\x85K \x04M\x04\
\x92\xcb\\\xfb\x06\x84\xa7M\xa8u_r\x1fv\r\x08\xb1\xfc\x07\x14\x952\xf3\x90\
\xdc\xd3\xa71l\xe0p\x00\xe0R\xd7@8\x91N.}\x91\x9b\xc3t\xda\xdag\xd0\x80$\xdf\
\xed\x00\x88\xf2\xbcYw\tb\xf9\xfe\xd5\x19\xd0\xa7=\xf2\xcdQ\xd83\xe0K\xae\t}\
\xdf\xd2'sd\xae\xc6\x9e\x81P\xf2\x97Q&\xd8l\xee\xca\xf6\x0c\xf8\xf6\xea[\xfc\
\xdc@G\xc7\rv\x18V\xd3#+\xef\x8c\x00\x00\x00\x00IEND\xaeB`\x82\xb38\x8e\xb0"\
)
def getMailBitmap():
return wx.BitmapFromImage(getMailImage())
def getMailImage():
stream = cStringIO.StringIO(getMailData())
return wx.ImageFromStream(stream)
def getMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getMailBitmap())
return icon
#----------------------------------------------------------------------
def getNoMailData():
return zlib.decompress(
'x\xda\x01G\x04\xb8\xfb\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x03\xfeIDATX\x85\xed\x97[o\xdb8\x10F\x0fu\xa1$\xeb\x96(A\
\x92\x1a}\xe8\xcf\xdc\xdd?\xeb\xa0h\x12\'\xa9#;\xba\x8b\x12\xb5\x0f\x81\x88\
\xba\xb6w\xb37\xf4a;\x80!\x98\xb09gf8\xdfPBX6?\xd2\xac\x1f\xea\xfd\'\xc0O\
\x00\xc0\xf9\xed\xd7_\xa6i\x9a\xf6\x16\xb3,\xe3\xea\xea\x8a8\x8eY,\x16X\xd6\
\xdf\xe3\x1c\xc7\x91\xba\xae\xa9\xaa\x8a\xa7\xa7\'6\x9b\xcd!@\x92$\x07\x8b\
\xbe\xef\x9b\xe7\xe5\xe5%a\x18"\xa5\xc4\xb6\xdf\xd7\xb2\xe38\xd2\xf7=UU\xf1\
\xf8\xf8HUUx\x9eG\x9a\xa6\x87\x00\xc76\xa8\xeb\x9a\xae\xeb\xf0}\x9f\xeb\xebk\
\xc20$MS\\\xd7}\x17\x80R\x8a\xddnG]\xd7\x94e\xc9\xd3\xd3\x13\xe38\x1e\xfd\
\xed\x1e\x80\x94\x12\xdf\xf7\xd1Z3\x0c\x03M\xd3\xb0^\xaf\x11B\xe0\xba.q\x1c#\
\xa5\xc4q\x8er3\x0c\x03}\xdfS\xd75_\xbf~e\xbd^\xd34\r\x8e\xe3\xe0\xfb>\xb6m\
\xd3\xb6-]\xd7\x1d\x07\x08\xc3\x90\x8b\x8b\x0b\x94R4MC\xd7u\xacV+\xba\xae\
\xc3q\x1c\x84\x10\xa4iz\x12`\x1cG\xca\xb2\xe4\xf9\xf9\x99\xdb\xdb[\xee\xef\
\xef\rx\x10\x04x\x9e\xc7f\xb39\r\x90$\t\x1f?~\xa4\xaek6\x9b\rEQ\xd0u\x1d\xbb\
\xdd\x8e\xbb\xbb;\xc6qd\x9a\xa6\x83L\xcc\x91\x17E\xc1z\xbdf\xbd^\xb3\xdb\xed\
\xd0Z\x1b\x80,\xcb\x88\xa2\x08\xa5\x14///\xc7\x01\xd24\xe5\xd3\xa7O\xbc\xbc\
\xbc\xd0\xf7=sw\xf4}\xcf\xed\xed-M\xd3`Y\x16B\x08\x92$\xd9\x03\x98k\xbdZ\xad\
x||\xc4\xb2,\xa2("\x0cC\x92$\xe1\xc3\x87\x0fdY\xb6\xe7\xfc\x00\xc0\xf3<\xe28\
6N]\xd7\xc5\xb2,^__)\xcb\x92\xedv\xcb\xfd\xfd=Zk\xa6ib\x18\x06\x00\xaa\xaa2\
\x91o\xb7[\xfa\xbe\'\x8a"\x13\xf9\xe5\xe5%Y\x96\x99\xcc\x9d\x04\xf8\xb6\x14R\
J\xa4\x94\x0c\xc3\x80\xd6\xdaD\xfa\xf9\xf3g\x9a\xa6A\x08\xc1\xf9\xf99\x00y\
\x9e\xb3Z\xadx~~F\x08A\x14EDQD\x9a\xa6,\x97Knnn\xf0<\x8f\xef\xf5\xe6$\x80\
\xef\xfb\xf8\xbeO\xd34\xa6\x96\x00eYR\x96%y\x9e\xf3\xf0\xf0@Q\x14f=\xcfs\xba\
\xae\xdbK{\x92$\xa4ij\xfa\xbfi\x9a\xf7\x01\xcc&\xa5$I\x12\x93\xf2\xd9\x94R|\
\xf9\xf2\x05!\x04\x00\xd34\xa1\xb5&\x0cC\xe3<MS\xe28\xfeS\xed8\n0\x9f\xf6\
\xb9\xff\x83 `\x1cG\xe3\xb0(\n\xaa\xaa\xa2\xef{\x03\x1a\x86!q\x1c\x13\xc71Q\
\x14\xe1\xfb>\xae\xeb"\x84`\x18\x06\xf3\xdfw\x01h\xad\xe9\xfb\x9e\xae\xebPJa\
Y\x16q\x1cc\xdb\xb6\xc9\x84\x10\xe2(@\x9a\xa6\x04A\x80\x10\x02\xa5\x14]\xd7\
\xd1u\xdd\xc9L\xec\x01h\xad\x19\xc7\x11\xad5u]\x1b\xe7s4\xf3SJ\x89eY\xb4m\
\x0b\xbcu\xcf\xd9\xd9\x19gggDQ\x84\x94\x12\xa5\x14\xd34\xa1\x94\xa2\xaek\x82\
0>N\x02\xccCd\x18\x06^__\xb1m\x9b0\x0c\xf1<\x0f\xd7u\x99\xa6\x89\xf3\xf3s\
\xf2<\x07\xde\x0e\x1f@\x14E,\x97K...L\xa4s\xf4\xf3\\\x98\xa6\t\xc7q\x0ef\xc2\
\x1e\xc0L\xab\xb5F)\x85\xeb\xba,\x16\x0b\x82 \xc0u]#<\x8e\xe3\xd0\xb6-\x9e\
\xe7\x01\x10\xc71WWWdY\x06\xbc\xb5\xabR\n\xdb\xb6)\x8a\x82\xb6mi\xdb\x16\xcb\
\xb2PJ\x9d\x06\x98ew\xb1X\x18\xfd\x0e\x82\xc0\xcc\x81\xd9\x82 `\xb9\\\x9a\
\xcd\xa4\x94&\xc5\xf0v>\x1c\xc7!\x08\x02\xa6i\xc2\xb6m\x94RF\xdaO\x02\xcc\
\x9a>\x0b\x89\xe7yx\x9ewp!\x99\xc1N\x99m\xdb\xe63\x7f\xdf\xedv\xf4}\xff\xc7%\
\xf0}\x9f4MM\xddOM\xbd\xbfb\xf3\x1eQ\x141\x8e\xa3)\xdbQ\x80yn\xcf\xa7\xfc[\
\xbd\xff\'fY\x96\xb9k|\x1f\xd4\xd130\xcf\xff\x7f\xd3\xc6q4w\x8c=\x80\xa6i\
\x8c\xb8\xe4yn.\x11\xff\x85)\xa5\xd8n\xb7\xd4um\xd6\xc4\xcfw\xc3\xff=\xc0\
\xefa\x89?u1\xd3\xf5 \x00\x00\x00\x00IEND\xaeB`\x82\xc4\x1f\x08\x9f' )
def getNoMailBitmap():
return wx.BitmapFromImage(getNoMailImage())
def getNoMailImage():
stream = cStringIO.StringIO(getNoMailData())
return wx.ImageFromStream(stream)
def getNoMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getNoMailBitmap())
return icon
#----------------------------------------------------------------------
def getErrMailData():
return zlib.decompress(
'x\xda\x01W\x05\xa8\xfa\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x05\x0eIDATX\x85\xcd\x97\xcf\x8f\xdb\xd4\x16\xc7?v\xae\x7f\
\xc5N&\x8e\xd3L\x92\xceL%T\x15\rbQQ!\xe8\x0e\xc4\x92\xff\x80%H\xac\xdeC\xf0\
\xfe\x94\x07\xdb\xf7\x96\xac\xfa\x1f TT\t\x06\x90\xa0,*UB#\x90f:i"\'\x99L\
\xec\xd8\xf1\xaf\x98\xc5LLC\x92\x8aH\xa0r$/|t\xef9\x1f\xdf\xfb\xbd\xe7\\K\
\x92\\\xe2E\x9a\xfcB\xb3\x03b\xdb\t\x9f}\xfa\xdf\xfc\xf5\xd1\x88\x83\xcf?\
\xa7\xf2\xf81\x00\xde\xe1!\xa7\xe
|
f\xbd\xc7\xf7\xf5:\xff\xfa\xf7G\xd2\xdf\n\
\xb0w\xff>\xd7\x83\x80\xeah\x84q\xe5\x93F#:GG\xec\x95\xcb\xdb\x86C\xdaV\x03\
\xdfjj\xfeZ\x9e#\xc71\xf2|\x0e\xc0\\\x96\x99\xab*?J\x12oF\xf1V+\xb0\xb5\x06\
\x1cUE\xccfEr\x00y>G\xccf8\xaa\xbam8\xc4\x7f>\xf98\xcf\xf3|\xc9\xd9n\xb7\xd9\
\xdb\xdbCQ\x94%\xff\xf5\xef\xbe\xa3~\xef\x1e\\\\\xac\rV\xaf\xd7\xf9\xe6\xc3\
\x0f\xf3\xb37\xdeX\xf2\'I\xc2\x93\'Ox\
|
xfa\xf4\xe9*@\xa5RYu\nA\x92$\xe8\xba\
\x8eeY\xc5cw\xbb\xe8\xba\xbe\xf1kt]g\x7f\x7f\x1f\xeb\xe5\x97\xf1}\xbfx\x82 @\
\x08A\xb5Z]\xcd\xb5.\x90\xe7y\x84a\xc8\xee\xee.\x86a`\x9a&\xedv\x1b\xab\xd1@\
<g\x99UU\xa5\xd1h\xa0\xb7\xdbt\xbb]...\x18\x8dF\xf4\xfb}\xd24];g\t`\x91L\x92\
.u\x94\xe79\xc3\xe1\x10UU)\x97\xcb\x94\xc2\x90r\x96\xb1I\xb6Y\x96\x11\x86!\
\xe3\xf1\x98\xc1`\xc0p8$\xcfsvvv\x8ax\xd3\xe9\x940\x0c\xd7\x03T\xabU:\x9d\
\x0e\xa5\xd2e\x8a\xf3\xf3s\xfa\xfd>I\x92\x000w]\xdaq\xcc\xa65\x88\xe3\x18\
\xd7uyrr\xc2\xc9\xc9\t\xa3\xd1\x88k\xd7\xae\xd1j\xb5\n\xc0n\xb7\xfb|\x80\xfd\
\xfd}\xd24%\x08\x02\xe28&\x08\x02\x92$\xa1\xd7\xeb\xa1\xb9.N\x1coH\xff;@\xaf\
\xd7#I\x12L\xd3\xc44M,\xcb\xa2\\.#\x84\xc0\xf7}\xfa\xfd\xfef\x80\xbd\xbd=&\
\x93\tQ\x14aY\x16\xaa\xaa2\x1e\x8fq]\x97\xb2\xeb\xf2\xd2\x9f\x00p]\x17\xc7q\
\xa8\xd5j\xa8\xaaJ\xa9T\xa2^\xafS\xadV9;;[\x9a\xb3\x04\xa0\xaa*\x96e!I\x12Q\
\x14\x15\xfb\x15\xc71\xbe\xef#\x84(\xf4\xb1\xce$IB\x08\x81\xa6i\x94\xcbe*\
\x95J\xa1\xabj\xb5Z|\xd0F\x80\x85U*\x15TUe0\x18\xd0\xeb\xf50M\x93N\xa7C\xb3\
\xd9D\xd3\xb4\x8d\x00\x9a\xa6\xd1l6\x99w:h\x9a\x86\x10\x02\xc7qh4\x1a\xa8\
\xaa\xca\x1f\xeb\xcdF\x00M\xd3\xd04\x8d\xe9t\x8a,\xcb\xc5\xbbh\xb7\x99\xbe\
\xf2\n%IB\xef\xf5P\xa6S\x00\x12\xd3d\xd6j1=<D\xb4\xdb\xc5y\x97e\x19\xc30\x8a\
\xf7g\xc5\xf7\\\x80M\x16\x1c\x1c\xd0{\xf7]f\xad\x16\xbb_|Q\x00D\x8d\x06\xee\
\xdbos~\xe7\x0e\xb3+\xc5\xffY\xdb\n \xb5m|\xdbF\xb9\xb8 ;:*\xfc\x99e1\xbdy\
\x13\xff\xf0p\xab\xe4\xf0O\xbd\x90DQD\x1c\xc7dY\x86a\x18\x08\xb1<Lq\x1c\xa2\
\x1b7\x98\\\x1d\xc9\xe8\xc6\r\x84\xe3`\x9a\xe6\xf28E!\xcb2<\xcf[Q\xffs\x01|\
\xdf\xc7u]\x84\x104\x9b\xcd\xa22.,\x06\xce\xb3\x8c\xe4\xaa\xa0(\xbb\xbbX\xb7\
o\xe3\x1c\x1c,\x8d\xcb\xb2\x8c\xe9t\x8a\xef\xfb4\x1a\x8d\x15\xc0\x15\x80$I\
\x08\x82\xa0xj\xb5\x1a\xb6m\xaft\xc0sE\xe1\xc20\x08\xaeDh\x9a&V\xa7\x83m\xdb\
K\xe3f\xb3\x19a\x18\x16\xf1$I*\xca\xfaZ\x80\xc9d\xc2\xe9\xe9)\x95J\x85V\xab\
\x85i\x9a+\xcb\x0f\x97M
|
Clarity-89/clarityv2
|
src/clarityv2/utils/storages.py
|
Python
|
mit
| 685 | 0 |
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.utils.functional import LazyObject
class PrivateMediaFileSystemStorage(FileSystemSt
|
orage):
"""
Storage that puts files in the private media folder that isn't
globally available.
"""
def __init__(self, *args, **kwargs):
|
kwargs.setdefault('location', settings.PRIVATE_MEDIA_ROOT)
kwargs.setdefault('base_url', settings.PRIVATE_MEDIA_URL)
super().__init__(*args, **kwargs)
class PrivateMediaStorage(LazyObject):
def _setup(self):
self._wrapped = PrivateMediaFileSystemStorage()
private_media_storage = PrivateMediaStorage()
|
fbradyirl/home-assistant
|
homeassistant/components/zha/core/const.py
|
Python
|
apache-2.0
| 5,498 | 0.000182 |
"""All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NWK = "nwk"
ATTR_POWER_SOURCE = "power_source"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_VALUE = "value"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONF
|
IGURATION = "power"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (BINARY_SENSOR, DEVICE_TRACKER, FAN, LIGHT, LOCK, SENSOR, SWITCH)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVI
|
CE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONTROLLER = "controller"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = "ezsp"
xbee = "xbee"
deconz = "deconz"
zigate = "zigate"
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = "electrical_measurement"
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = "humidity"
SENSOR_ILLUMINANCE = "illuminance"
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = "occupancy"
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = "pressure"
SENSOR_TEMPERATURE = "temperature"
SENSOR_TYPE = "sensor_type"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_RADIO = "radio"
ZHA_GW_RADIO_DESCRIPTION = "radio_description"
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/rainmachine/__init__.py
|
Python
|
apache-2.0
| 10,913 | 0 |
"""Support for RainMachine devices."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD,
CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL,
CONF_MONITORED_CONDITIONS, CONF_SWITCHES)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from .config_flow import configured_instances
from .const import (
DATA_CLIENT, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DOMAIN)
REQUIREMENTS = ['regenmaschine==1.2.0']
_LOGGER = logging.getLogger(__name__)
DATA_LISTENER = 'listener'
PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN)
SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN)
CONF_CONTROLLERS = 'controllers'
CONF_PROGRAM_ID = 'program_id'
CONF_SECONDS = 'seconds'
CONF_ZONE_ID = 'zone_id'
CONF_ZONE_RUN_TIME = 'zone_run_time'
DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC'
DEFAULT_ICON = 'mdi:water'
DEFAULT_ZONE_RUN = 60 * 10
TYPE_FREEZE = 'freeze'
TYPE_FREEZE_PROTECTION = 'freeze_protection'
TYPE_FREEZE_TEMP = 'freeze_protect_temp'
TYPE_HOT_DAYS = 'extra_water_on_hot_days'
TYPE_HOURLY = 'hourly'
TYPE_MONTH = 'month'
TYPE_RAINDELAY = 'raindelay'
TYPE_RAINSENSOR = 'rainsensor'
TYPE_WEEKDAY = 'weekday'
BINARY_SENSORS = {
TYPE_FREEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_PAUSE_WATERING = vol.Schema({
vol.Required(CONF_SECONDS): cv.positive_int,
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLERS):
vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
if controller[CONF_IP_ADDRESS] in configured_instances(hass):
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data=controller))
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
from regenmaschine import login
from regenmaschine.errors import RainMachineError
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await login(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
websession,
port=config_entry.data[CONF_PORT],
ssl=config_entry.data[CONF_SSL])
rainmachine = RainMachine(
client,
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)),
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN))
await rainmachine.async_update()
except RainMachineError as err:
_LOGGER.error('An error occurred: %s', err)
raise ConfigEntryNotReady
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ('binary_sensor', 'sensor', 'switch'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]))
async def pause_watering(service):
"""Pause watering for a set number of seconds."""
await rainmachine.client.watering.pause_all(service.data[CONF_SECONDS])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def start_program(service):
"""Start a particular program."""
await rainmachine.client.programs.start(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def start_zone(service):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(
service.data[CONF_ZONE_ID], service.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
async def stop_all(service):
"""Stop all watering."""
await rainmachine.client.watering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_program(service):
"""Stop a program."""
await rainmachine.client.programs.stop(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_zone(service):
"""Stop a zone."""
await rainmachine.client.zones.stop(service.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
async def unpause_watering(service):
"""Unpause watering."""
await rainmachine.client.watering.unpause_all()
asyn
|
c_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
for service, method, schema in [
('pause_watering', pause_watering, SERVICE_PAUSE_WATERING),
('start_program', start_program, SERVICE_START_PROGRAM_SCHEMA),
('start_zone', start_zone, SERVICE_START_ZO
|
NE_SCHEMA),
('stop_all', stop_all, {}),
('stop_program', stop_program, SERVICE_STOP_PRO
|
AccelAI/accel.ai
|
flask-aws/lib/python2.7/site-packages/docker/api/image.py
|
Python
|
mit
| 9,342 | 0 |
import logging
import six
import warnings
from ..auth import auth
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
from .. import utils
from .. import errors
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource
def get_image(self, image):
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource
def history(self, image):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None):
if src:
if isinstance(src, six.string_types):
try:
result = self.import_image_from_file(
src, repository=repository, tag=tag)
except IOError:
result = self.import_image_from_url(
src, repository=repository, tag=tag)
else:
result = self.import_image_from_data(
src, repository=repository, tag=tag)
elif image:
result = self.import_image_from_image(
image, repository=repository, tag=tag)
else:
raise Exception("Must specify a src or image")
return result
def import_image_from_data(self, data, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
return self._result(
self._post(u, data=data, params=params, headers=headers))
def import_image_from_file(self, filename, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
with open(filename, 'rb') as f:
return self._result(
self._post(u, data=f, params=params, headers=headers,
timeout=None))
def import_image_from_stream(self, stream, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
'Transfer-Encoding': 'chunked',
}
return self._result(
self._post(u, data=stream, params=params, headers=headers))
def import_image_from_url(self, url, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': url,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def import_image_from_image(self, image, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromImage': image,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
@utils.check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@utils.check_resource
def inspect_image(self, image):
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if auth_config is None:
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
"No auth config in memory - loading from filesystem"
)
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
headers['X-Registry-Auth'] = auth.encode_header(
authcfg
)
else:
log.debug('No auth config found')
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(r
|
epository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if uti
|
ls.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this specific
# registry as we can have a readonly pull. Just put the header if
# we
|
lukecwik/incubator-beam
|
website/append_index_html_to_internal_links.py
|
Python
|
apache-2.0
| 4,461 | 0.008518 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Script to fix the links in the staged website.
Finds all internal links which do not have index.html at the end and appends
index.html in the appropriate place (preserving anchors, etc).
Usage:
From root directory, after running the jekyll build, execute
'python .jenkins/append_index_html_to_internal_links.py'.
Dependencies:
beautifulsoup4
Installable via pip as 'sudo pip install beautifulsoup4' or apt via
'sudo apt-get install python-beautifulsoup4'.
"""
from __future__ import print_function
import argparse
import fnmatch
import os
import re
from bs4 import BeautifulSoup
try:
unicode # pylint: disable=unicode-builtin
except NameError:
unicode = str
# Original link match. Matches any string which starts with '/' and doesn't
# have a file extension.
linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$'
# Regex which matches strings of type /internal/link/#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch1 = r'(.+\/)(#[^\/]+$)'
# Regex which matches strings of type /internal/link#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)'
parser = argparse.ArgumentParser(description='Fix links in the staged website.')
parser.add_argument('content_dir', help='Generated content directory to fix links in')
args = parser.parse_args()
matches = []
# Rec
|
ursively walk content directory and find all html files.
for root,
|
dirnames, filenames in os.walk(args.content_dir):
for filename in fnmatch.filter(filenames, '*.html'):
# Javadoc does not have the index.html problem, so omit it.
if 'javadoc' not in root:
matches.append(os.path.join(root, filename))
print('Matches: ' + str(len(matches)))
# Iterates over each matched file looking for link matches.
for match in matches:
print('Fixing links in: ' + match)
mf = open(match)
soup = BeautifulSoup(mf)
# Iterates over every <meta> which is used for aliases - redirected links
for meta in soup.findAll('meta'):
try:
content = meta['content']
alias = content.replace('0; url=', '')
if re.match(linkMatch, alias) is not None:
if alias.endswith('/'):
# /internal/link/
meta['content'] = content + 'index.html'
else:
# /internal/link
meta['content'] = content + '/index.html'
mf.close()
html = unicode(soup).encode('utf-8')
# Write back to the file.
with open(match, "wb") as f:
print('Replacing ' + content + ' with: ' + meta['content'])
f.write(html)
except KeyError as e:
# Some <meta> tags don't have url.
continue
# Iterates over every <a>
for a in soup.findAll('a'):
try:
hr = a['href']
if re.match(linkMatch, hr) is not None:
if hr.endswith('/'):
# /internal/link/
a['href'] = hr + 'index.html'
elif re.match(anchorMatch1, hr) is not None:
# /internal/link/#anchor
mat = re.match(anchorMatch1, hr)
a['href'] = mat.group(1) + 'index.html' + mat.group(2)
elif re.match(anchorMatch2, hr) is not None:
# /internal/link#anchor
mat = re.match(anchorMatch2, hr)
a['href'] = mat.group(1) + '/index.html' + mat.group(2)
else:
# /internal/link
a['href'] = hr + '/index.html'
mf.close()
html = unicode(soup).encode('utf-8')
# Write back to the file.
with open(match, "wb") as f:
print('Replacing ' + hr + ' with: ' + a['href'])
f.write(html)
except KeyError as e:
# Some <a> tags don't have an href.
continue
|
cliffton/localsecrets
|
offers/migrations/0001_initial.py
|
Python
|
mit
| 3,179 | 0.004404 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 16:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('shops', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=300)),
('expiration', models.DateTimeField()),
('count', models.IntegerField(default=0)),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers', to='shops.Shop')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OfferHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offer_history', to='offers.Offer')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers_used', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OfferReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('review', models.TextField()),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offer_reviews', to='offers.Offer')),
('user', mode
|
ls.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related
|
_name='offers_reviewed', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
aluchies/impedance_map
|
impedance_map/tests/test_sphere.py
|
Python
|
bsd-3-clause
| 6,422 | 0.001246 |
from impedance_map.sphere import correlation_coefficient, form_factor, \
pair_distribution_function_PY, structure_factor_function_PY, \
cross_section_dimension, fit_correlation_coefficient_nls, \
fit_form_factor_nls
import numpy as np
import math
import unittest
class TestCode(unittest.TestCase):
def test1_sphere_cross_section_dimension(self):
a = 1
q = 0
A = cross_section_dimension(a, q)
A_real = 1
self.assertTrue(A == A_real)
def test2_sphere_cross_section_dimension(self):
a = 1
q = 1
A = cross_section_dimension(a, q)
A_real = 0
self.assertTrue(A == A_real)
def test3_sphere_cross_section_dimension(self):
a = 1
q = 0.5
A = cross_section_dimension(a, q)
A_real = np.sqrt(1 - 0.5 ** 2)
self.assertTrue(A == A_real)
def test1_sphere_form_factor(self):
ndim = 1
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.70807342, 0.20670545, 0.00221276, 0.03579688,
0.03678143, 0.0021687])
self.assertTrue(np.allclose(H, Hr))
def test2_sphere_form_factor(self):
ndim = 2
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.77457807, 0.3326115, 0.05109377, 0.00109043,
0.01716929, 0.008506])
self.assertTrue(np.allclose(H, Hr))
def test3_sphere_form_factor(self):
ndim = 3
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.81632316, 0.42653525, 0.11949293, 0.00758346,
0.00325512, 0.00703836])
self.assertTrue(np.allclose(H, Hr))
def test1_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 2., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test2_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 3., 16)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(len(r))
b_real[0:11] = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test3_sphere_corr_coeff(self):
ndim = 1
a = 3.
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test4_sphere_corr_coeff(self):
ndim = 1
a = 0.0
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(11)
self.assertTrue(np.allclose(b, b_real))
def test5_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test6_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test7_sphere_corr_coeff(self):
ndim = 2
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test8_sphere_corr_coeff(self):
ndim = 2
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test9_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test10_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test11_sphere_corr_coeff(self):
ndim = 3
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test13_sphere_corr_coeff(self):
ndim = 3
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test1_pair_distribution_function_PY(self):
f = 0.4
a = 0.01
rb = np.linspace(1, 5, 5)
r = rb * 2 * a
g = pair_distribution_function_PY(r, a, f)
g_real = np.asarray([ 1.665534 , 1.14167826, 1.04312259, 1.01389934, 1.00453527])
self.assertTrue(np.allclose(g, g_real))
def test1_structure_factor_function_PY(self):
k = np.linspace(0, 10, 5)
f = 0.15
a = 0.01
S = structure_factor_function_PY(k/a, a=0.01, f=0.15)
S_real = np.asarray([ 0.30887944, 1.03988757, 0.95564256, 0.98177134, 1.00532684])
self.assertTrue(np.allclose(S, S_real))
def test1_fit_correlation_coefficient_nls(self):
a = 0.75
r = np.linspace(0, 3., 10)
y = correlation_coefficient(a=a, r=r)
a_guess = fit_correlation_coefficient_nls(r, y)
self.assertTrue(np.allclose(a, a_guess))
def test1_fit_form_factor_nls(self):
a = 0.75
k = np.linspace
|
(0.01, 3, 10)
y = form_factor(a=a, k=k)
a_guess = fit_form_factor_nls(k, y)
self.assertTrue(np.allclose(a, a_guess))
if __name__ == '__main__':
print 'Running unit tests for impedance_map.sphere'
un
|
ittest.main()
|
abc612008/datatoaster
|
setup.py
|
Python
|
mit
| 474 | 0.004219 |
from
|
setuptools import setup, find_packages
with open('README.md', encoding = "utf-8") as f:
readme = f.read()
setup(
name='datatoaster',
version='0.1.0',
description='A Python library that can convert raw data to chart data',
long_description=readme,
author='Harry Yu',
author_email='harryyunull@gmail.com',
url='https://github.com/abc612008/datatoaster',
license="MIT",
packages=find_p
|
ackages(exclude=('tests', 'docs', 'demo'))
)
|
AerisCloud/AerisCloud
|
aeriscloud/cli/aeris/destroy.py
|
Python
|
mit
| 271 | 0 |
#!/usr/bin/env python
import click
from aerisclou
|
d.cli.helpers import standard_options, Command
@click.command(cls=Command)
@standard_options(start_prompt=False)
def cli(box):
"""
Destroy a box
"""
box.
|
destroy()
if __name__ == '__main__':
cli()
|
Fokko/incubator-airflow
|
airflow/gcp/example_dags/example_spanner.py
|
Python
|
apache-2.0
| 8,612 | 0.002206 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that creates, updates, queries and deletes a Cloud Spanner instance.
This DAG relies on the following environment variables
* GCP_PROJECT_ID - Google Cloud Platform project for the Clou
|
d Spanner instance.
* GC
|
P_SPANNER_INSTANCE_ID - Cloud Spanner instance ID.
* GCP_SPANNER_DATABASE_ID - Cloud Spanner database ID.
* GCP_SPANNER_CONFIG_NAME - The name of the instance's configuration. Values are of the
form ``projects/<gcp_project>/instanceConfigs/<configuration>``. See also:
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs#InstanceConfig
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list#google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs
* GCP_SPANNER_NODE_COUNT - Number of nodes allocated to the instance.
* GCP_SPANNER_DISPLAY_NAME - The descriptive name for this instance as it appears in UIs.
Must be unique per project and between 4 and 30 characters in length.
"""
import os
import airflow
from airflow import models
from airflow.gcp.operators.spanner import (
CloudSpannerInstanceDatabaseDeleteOperator, CloudSpannerInstanceDatabaseDeployOperator,
CloudSpannerInstanceDatabaseQueryOperator, CloudSpannerInstanceDatabaseUpdateOperator,
CloudSpannerInstanceDeleteOperator, CloudSpannerInstanceDeployOperator,
)
# [START howto_operator_spanner_arguments]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCP_SPANNER_INSTANCE_ID = os.environ.get('GCP_SPANNER_INSTANCE_ID', 'testinstance')
GCP_SPANNER_DATABASE_ID = os.environ.get('GCP_SPANNER_DATABASE_ID', 'testdatabase')
GCP_SPANNER_CONFIG_NAME = os.environ.get('GCP_SPANNER_CONFIG_NAME',
'projects/example-project/instanceConfigs/eur3')
GCP_SPANNER_NODE_COUNT = os.environ.get('GCP_SPANNER_NODE_COUNT', '1')
GCP_SPANNER_DISPLAY_NAME = os.environ.get('GCP_SPANNER_DISPLAY_NAME', 'Test Instance')
# OPERATION_ID should be unique per operation
OPERATION_ID = 'unique_operation_id'
# [END howto_operator_spanner_arguments]
default_args = {
'start_date': airflow.utils.dates.days_ago(1)
}
with models.DAG(
'example_gcp_spanner',
default_args=default_args,
schedule_interval=None # Override to match your needs
) as dag:
# Create
# [START howto_operator_spanner_deploy]
spanner_instance_create_task = CloudSpannerInstanceDeployOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
configuration_name=GCP_SPANNER_CONFIG_NAME,
node_count=int(GCP_SPANNER_NODE_COUNT),
display_name=GCP_SPANNER_DISPLAY_NAME,
task_id='spanner_instance_create_task'
)
spanner_instance_update_task = CloudSpannerInstanceDeployOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
configuration_name=GCP_SPANNER_CONFIG_NAME,
node_count=int(GCP_SPANNER_NODE_COUNT) + 1,
display_name=GCP_SPANNER_DISPLAY_NAME + '_updated',
task_id='spanner_instance_update_task'
)
# [END howto_operator_spanner_deploy]
# [START howto_operator_spanner_database_deploy]
spanner_database_deploy_task = CloudSpannerInstanceDatabaseDeployOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table1 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
"CREATE TABLE my_table2 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_deploy_task'
)
spanner_database_deploy_task2 = CloudSpannerInstanceDatabaseDeployOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table1 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
"CREATE TABLE my_table2 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_deploy_task2'
)
# [END howto_operator_spanner_database_deploy]
# [START howto_operator_spanner_database_update]
spanner_database_update_task = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table3 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_task'
)
# [END howto_operator_spanner_database_update]
# [START howto_operator_spanner_database_update_idempotent]
spanner_database_update_idempotent1_task = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
operation_id=OPERATION_ID,
ddl_statements=[
"CREATE TABLE my_table_unique (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_idempotent1_task'
)
spanner_database_update_idempotent2_task = CloudSpannerInstanceDatabaseUpdateOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
operation_id=OPERATION_ID,
ddl_statements=[
"CREATE TABLE my_table_unique (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_idempotent2_task'
)
# [END howto_operator_spanner_database_update_idempotent]
# [START howto_operator_spanner_query]
spanner_instance_query_task = CloudSpannerInstanceDatabaseQueryOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
query=["DELETE FROM my_table2 WHERE true"],
task_id='spanner_instance_query_task'
)
spanner_instance_query_task2 = CloudSpannerInstanceDatabaseQueryOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
query=["DELETE FROM my_table2 WHERE true"],
task_id='spanner_instance_query_task2'
)
# [END howto_operator_spanner_query]
# [START howto_operator_spanner_database_delete]
spanner_database_delete_task = CloudSpannerInstanceDatabaseDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
task_id='spanner_database_delete_task'
)
spanner_database_delete_task2 = CloudSpannerInstanceDatabaseDeleteOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
task_id='spanner_database_delete_task2'
)
# [END howto_operator_spanner_database_delete]
# [START howto_operator_spanner_delete]
spanner_instance_delete_task = CloudSpannerInstanceDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
task_id='spanner_instance_delete_task'
)
spanner_instance_delete_task2 = CloudSpannerInstanceDeleteOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
task_id='spanner_instance_delete_task2'
)
# [END howto_operator_spanner_delete]
spanner_instance_create_task \
>> spanner_instance_update_task \
>> spanner_database_deploy_task \
>> s
|
vernnobile/Gnu_Free_Font
|
tools/test/ranges/Arabic/unicode_joining.py
|
Python
|
gpl-3.0
| 8,257 | 0.016108 |
#!/usr/bin/python
from __future__ import print_function, unicode_literals
__license__ = """
This file is part of GNU FreeFont.
GNU FreeFont is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
GNU FreeFont is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
GNU FreeFont. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Emmanuel Vallois"
__email__ = "vallois@polytech.unice.fr"
__copyright__ = "Copyright 2011 Emmanuel Vallois"
__date__ = "$Date$"
__version__ = "$Revision$"
__doc__ = """
Writes in the file named by the first argument an HTML page comprising a table
for testing joining cursive script characters.
Runs under normal Python, version 2.7 or above.
Typical usage:
unicode_joining.py "Unicode joining test page.html"
"""
import sys
from codecs import open
from string import Template
from collections import OrderedDict
from itertools import chain
_module_missing_msg = """Please run
generate_arabic_shapin
|
g.py
to generate
arabic_shaping.py"""
try:
from arabic_shaping import arabic_shapings, joining_type
except:
print( _module_missing_msg, file=sys.stderr)
sys.exit( 1 )
if len(sys.argv) > 1:
outfile = sys.argv[1]
else:
outfile = 'Unicode joining test page.html'
sys.stdout = open(outfile, 'w', 'utf-8')
class OrderedDef
|
aultDict(OrderedDict):
def __missing__(self, key):
self[key] = rv = []
return rv
def move_to_end(self, key):
tmp = self[key]
del self[key]
self[key] = tmp
arabic_ranges = tuple(chain(range(0x600, 0x6FF +1), range(0x750, 0x77F +1), range(0x8A0, 0x8FF)))
unicode61_new_ranges = [0x604, 0x8A0]
unicode61_new_ranges.extend(range(0x8A2, 0x8AC + 1))
unicode61_new_ranges.extend(range(0x8E4, 0x8FE + 1))
unicode62_new_ranges = [0x605, 0x8A1]
unicode62_new_ranges.extend(range(0x8AD, 0x8B1 + 1))
unicode62_new_ranges.append(0x8FF)
shapings = filter(lambda s: s.joining_type in 'RD' and (s.joining_group != 'No_Joining_Group' or s.code_point not in arabic_ranges), arabic_shapings.values())
jg_shapings_arabic = OrderedDefaultDict()
jg_shapings_other_scripts = OrderedDefaultDict()
for s in shapings:
if s.code_point in arabic_ranges:
jg_shapings_arabic[s.joining_group].append(s)
else:
jg_shapings_other_scripts[s.joining_group].append(s)
if s.code_point == 0x62B:
jg_shapings_arabic.move_to_end('TEH MARBUTA')
jg_shapings_arabic['TEH MARBUTA GOAL']
elif s.code_point == 0x642:
jg_shapings_arabic.move_to_end('GAF')
jg_shapings_arabic['SWASH KAF']
elif s.code_point == 0x646:
jg_shapings_arabic['NYA']
elif s.code_point == 0x647:
jg_shapings_arabic['KNOTTED HEH']
jg_shapings_arabic['HEH GOAL']
elif s.code_point == 0x64A:
jg_shapings_arabic.move_to_end('FARSI YEH')
elif s.code_point in chain(range(0x627, 0x63A + 1), range(0x641, 0x64A + 1)):
jg_shapings_arabic.move_to_end(s.joining_group)
#for jg, ls in jg_shapings_arabic.items():
# for s in ls:
# print(jg, ls, file=sys.stderr)
table_head = '''
<table frame="box" rules="rows">
{}
<colgroup><col/><col/><col/></colgroup>
<colgroup id="characterCols"><col/><col/><col/><col/></colgroup>
<colgroup><col/></colgroup>'''
table_internal_title = '''<tr><td colspan="8"><h2>{}</h2></td></tr>
<tr>
<th rowspan="2">Joining Group</th>
<th rowspan="2">Code Point</th>
<th rowspan="2">Short Name</th>
<th colspan="5">Contextual Forms</th>
</tr>
<tr><th>Isolated</th><th>Final</th><th>Medial</th><th>Initial</th><th>Joined</th></tr>'''
def print_table():
contextual_form_formats = { 'isolat':'{}', 'final>':'‍{}', 'medial':'‍{}‍', 'initia':'{}‍' }
contextual_forms = 'isolat', 'final>', 'medial', 'initia'
def print_shaping(shaping, rowspan):
# print('print_shaping', shaping, file=sys.stderr)
cp = shaping.code_point
char = unichr(cp)
print('<tr{}>'.format(' class="nextVersion"' if cp in unicode61_new_ranges else ' class="furtherFuture"' if cp in unicode62_new_ranges else ''))
if rowspan: print('<td rowspan="{}">{}</td>'.format(rowspan, shaping.joining_group))
print('<td>{:04X}</td>'.format(cp))
print('<td>{}</td>'.format(shaping.short_name))
i = 0
for form in contextual_forms:
print('<td class="ch">{}</td>'.format(contextual_form_formats[form].format(char)))
i += 1
if { 'R':'final>', 'D':'' }[joining_type(cp)] == form:
break
if i < 4:
print('<td colspan="{}"></td>'.format(4 - i))
print('<td class="ch">{}</td>'.format('\u0640' * (4 - i) + char * (i - 1) + ' ' + char))
print('</tr>')
print(table_head.format(caption))
print(table_internal_title.format('Arabic'))
for shaping_list in jg_shapings_arabic.values():
rowspan = len(shaping_list)
for shaping in shaping_list:
print_shaping(shaping, rowspan)
rowspan = None
print(table_internal_title.format('Syriac, Nko and Mandaic'))
for shaping_list in jg_shapings_other_scripts.values():
rowspan = len(shaping_list)
for shaping in shaping_list:
print_shaping(shaping, rowspan)
rowspan = None
print('</table>')
html_heading = Template('''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8"/>
<title>$title</title>
<style type="text/css">
.captionSquare { float: left; width: 2em; height: 1em; margin-right: 0.5em }
caption { width: 60em; text-align: left }
table { text-align: center; font-family: FreeSerif, FreeSans }
td { padding: 10px }
small { font-size: small }
#characterCols { border-left: medium double black; border-right: medium double black }
.nextVersion { background-color: #CCFF99 }
.furtherFuture { background-color: #FFFFCC }
.name { width: 10em }
.ch { vertical-align: baseline; line-height: 75%; font-size: 250%; direction: rtl }
.empty { background:#EEEEEE }
</style>
</head>
<body>
<h1>$title</h1>
<p>Choose the font to test: <select onchange="changefont(this)"><option>FreeSerif</option><option>FreeSerif, bold</option><option>FreeSans</option><option>FreeMono</option></select></p>
<script type="text/javascript">//<![CDATA[
function changefont(select) {
var font = select.options.item(select.selectedIndex).value.split(', ');
var bold = font.length > 1 ? font[1] == 'bold' : false;
font = font[0];
var elementsToStyle = document.getElementsByClassName("ch");
for (i = 0; i < elementsToStyle.length; i++) {
elementsToStyle[i].style.fontFamily = font;
elementsToStyle[i].style.fontWeight = bold ? 'bold' : 'normal';
}
}//]]></script>''')
caption='''<caption><span class="captionSquare nextVersion"> </span> New characters in Unicode 6.1, which will be published in February 2012.
These can be relied upon and will not change or be removed. See <a href="http://www.unicode.org/Public/6.1.0/charts/blocks//U08A0.pdf">the
Unicode chart for the new block <b>Arabic Extended-A</b></a>, and for more about these characters, see <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3734.pdf">N3734</a>
for U+0604, <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3882.pdf">the complete
proposal</a> for most characters, <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3791.pdf">N3791</a> for U+08F0-U+08F3.<br/>
<span class="captionSquare furtherFuture"> </span> Future new characters in Unicode 6.2. These can will probably be standardized this way,
but could in principle still change or be removed. See <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3990.pdf">N3990, in 4.2 Orthography</a> for U+0605,
<a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n4072.pdf">N4072 proposal</a> about U+08AD-U+08B1, and
<a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3989.pdf">N3989 proposal</a> about U+08FF.</caption>'''
def print_arabic_test_page():
print(html_heading.substitute(title='Test of Joining Characters From Unicode Cursive Scripts'))
|
per7inac1ousQ/Directories
|
DirectoriesBackupFiles/models.py
|
Python
|
apache-2.0
| 7,043 | 0.015618 |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines for those models you wish to give write DB access
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class Attributes(models.Model):
attr_id = models.AutoField(primary_key=True)
descr = models.CharField(max_length=140)
descr_en = models.CharField(max_length=140)
notes = models.CharField(max_length=250, blank=True)
class Meta:
db_table = 'attributes'
verbose_name= 'Attributes'
# mayb
|
e instantiate a def _unicode_ function that will return model's name?
class Dep
|
artment(models.Model):
id = models.AutoField(primary_key=True)
tmima_per = models.TextField()
proedros = models.IntegerField(default='0')
pr_spoudwn = models.TextField()
pr_spoudwn_en = models.TextField()
tmima_per_en = models.TextField()
tmima_en = models.CharField(max_length=5)
homepage = models.TextField()
homepage_en = models.TextField()
lastupdate = models.DateTimeField()
class Meta:
db_table = 'department'
verbose_name= 'Department'
class Instructors(models.Model):
instr_id = models.AutoField(primary_key=True, default='0')
cv = models.TextField(blank=True)
cv_en = models.TextField(blank=True)
research = models.TextField(blank=True)
research_en = models.TextField(blank=True)
subject = models.CharField(max_length=255, blank=True)
subject_en = models.CharField(max_length=255, blank=True)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'instructors'
verbose_name= 'Instructors'
class Katefth(models.Model):
kat_id = models.AutoField(primary_key=True)
perigrafi_kat = models.CharField(max_length=100)
perigrafi_kat_en = models.CharField(max_length=100)
class Meta:
db_table = 'katefth'
verbose_name= 'Katefthnsh'
class KatefthKykloi(models.Model):
kat_id = models.IntegerField(primary_key=True)
kyklos_id = models.IntegerField()
class Meta:
db_table = 'katefth_kykloi'
unique_together = ("kat_id", "kyklos_id")
verbose_name= 'Katefthnsh kykloi'
class Kykloi(models.Model):
kyklos_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, blank=True)
name_en = models.CharField(max_length=255, blank=True)
notes = models.TextField(blank=True)
notes_en = models.TextField(blank=True)
dept_id = models.IntegerField(blank=True, null=True)
examina = models.IntegerField()
indexing = models.IntegerField(default='0000')
class Meta:
db_table = 'kykloi'
verbose_name= 'Kykloi'
class KykloiExamina(models.Model):
id = models.AutoField(primary_key=True)
examina = models.TextField()
notes = models.CharField(max_length=255, blank=True)
notes_en = models.CharField(max_length=255, blank=True)
comments = models.TextField(blank=True)
class Meta:
db_table = 'kykloi_examina'
verbose_name= 'Kykloi examina'
class ModuleKykloi(models.Model):
module_id = models.IntegerField(primary_key=True, default='0')
kyklos_id = models.IntegerField(default='0')
semester = models.IntegerField(default='0')
indexing = models.IntegerField(default='99')
class Meta:
db_table = 'module_kykloi'
unique_together = (("module_id", "kyklos_id", "semester"),)
verbose_name= 'Modules Kyklwn'
class Modules(models.Model):
id = models.AutoField(primary_key=True)
module = models.CharField(max_length=255, default='')
description = models.TextField()
choice = models.IntegerField(default='0')
module_en = models.CharField(max_length=255, default='')
description_en = models.TextField()
notes = models.CharField(max_length=255, default='')
notes_en = models.CharField(max_length=255)
class Meta:
db_table = 'modules'
verbose_name= 'Modules'
class ModulesTutors(models.Model):
module_id = models.IntegerField(primary_key=True, default='0')
tutor_id = models.IntegerField(default='0')
last_update = models.DateTimeField()
class Meta:
db_table = 'modules_tutors'
unique_together = (("module_id", "tutor_id"),)
verbose_name= 'Modules tutors'
class PubInstr(models.Model):
pubid = models.IntegerField(primary_key=True, default='0')
instrid = models.IntegerField(default='0')
cduom = models.IntegerField(default='1')
lastupdate = models.DateTimeField()
class Meta:
db_table = 'pub_instr'
unique_together = (("pubid", "instrid"),)
verbose_name= 'Publication instr'
class PubTypes(models.Model):
id = models.AutoField(primary_key=True, default='0')
type_description = models.CharField(max_length=255)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'pub_types'
verbose_name= 'Publication types'
class Publications(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField()
year = models.CharField(max_length=4, default='')
typeid = models.IntegerField(default='0')
filelink = models.CharField(max_length=255, blank=True)
pubdate = models.DateField(blank=True, null=True)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'publications'
verbose_name= 'Publications'
class Ranks(models.Model):
rank_id = models.AutoField(primary_key=True)
idiotita_per = models.CharField(max_length=150, default='')
idiotita_per_en = models.CharField(max_length=150, default='')
notes = models.CharField(max_length=250, blank=True)
class Meta:
db_table = 'ranks'
verbose_name= 'Ranks'
class Service(models.Model):
service_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=150, blank=True)
name_en = models.CharField(max_length=150, blank=True)
notes = models.TextField(blank=True)
notes_en = models.TextField(blank=True)
is_academic = models.IntegerField(default='0')
class Meta:
db_table = 'service'
verbose_name= 'Service'
class Users(models.Model):
username = models.CharField(primary_key=True, max_length=20, default='')
password = models.CharField(max_length=4, default='ldap')
status = models.IntegerField(default='1')
class Meta:
db_table = 'users'
verbose_name= 'Users'
class Works(models.Model):
emp_id = models.IntegerField(primary_key=True)
service_id = models.IntegerField()
attribute_id = models.IntegerField(default='44')
phone = models.CharField(max_length=36, blank=True)
primary_academic = models.IntegerField()
lastupdate = models.DateTimeField()
class Meta:
db_table = 'works'
unique_together = (("emp_id", "service_id", "attribute_id"),)
verbose_name= 'Works'
|
ankanaan/chimera
|
src/chimera/util/vizquery.py
|
Python
|
gpl-2.0
| 3,452 | 0.000579 |
from chimera.util.votable import VOTable
from httplib import HTTPConnection
import tempfile
import os
import urllib
class VizQuery(object):
"""
Queries A catalog in Vizier
within a given radius or box of the zenith
"""
def __init__(self):
self.args = {}
self.args["-mime"] = "xml"
self.columns = None
def useCat(self, catName):
"""
@param catName: the catalog's name in Vizier
@type catName: str
Simply sets the catalog's name
"""
self.args["-source"] = catName
def useColumns(self, columns, sortBy, reverse=False):
"""
@param columns: list of catalog's columns to use
@type columns: list
@param sortBy: define which column to sort by
@type sortBy: str
@param reverse: decide to reverse sort @type reverse: bool
Define which columns will be fetched and which column will be used
for sorting.
"""
self.columns = columns.split(",")
self.args["-out"] = columns
if reverse:
self.args["-sort"] = "-" + sortBy
else:
self.args["-sort"] = sortBy
def sortBy(self, column):
"""
One sets here which column to sort by
@param column: name of column to sort by
@type column: str
"""
def constrainColumns(self, columns):
"""
Use this to add constraints to any of the columns
@param columns: list of dictionaries {COLUMN:condition}
@type columns: list
"""
self.args.update(columns)
def useTarget(self, center, radius=None, box=None):
"""
@param center: center of search in catalog
@type center: L{Position}
@param radius: radius of search
@type radius: float
@param box: box size, if you want a square use an integer
if you want a rectangle use a tuple (ww,hh)
@type box: int | tuple
"""
self.args["-c"] = str(center)
self.args["-c.eq"] = "J2000"
if radius:
self.args["-c.rd"] = radius
elif box:
try:
self.args["-c.bd"] = "=%fx%f" % radius
except:
self.args["-c.bd"] = radius
else:
raise TypeError("You must specify either radius or box size")
def find(self, limit=9999):
"""
@param limit: Number of stars to return from Vizier
@type limit: int
"""
assert "-c.rd" in self.args or "-c.bd" in self.args, "No target selected, use useTarget method first."
self.args["-out.max"] = limit
resu
|
lts = tempfile.NamedTemporaryFile(mode='w+',
prefix="chimera.vizquery",
dir=
|
tempfile.gettempdir())
# query the catalog in Vizier's database
conn = HTTPConnection("webviz.u-strasbg.fr")
s = urllib.urlencode(self.args)
conn.request("POST", "/viz-bin/votable", s)
resp = conn.getresponse()
ret = resp.read()
f = open(results.name, "w")
f.write(ret)
f.close()
obj = []
votable = VOTable(results.name)
for linha in votable.getDataRows():
v = [c.getContent() for c in linha.getNodeList()]
obj.append(dict(zip(self.columns, v)))
return obj
|
fametrano/BitcoinBlockchainTechnology
|
tests/test_hashes.py
|
Python
|
mit
| 1,276 | 0.000784 |
#!/usr/bin/env python3
# Copyright (C) 2017-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for the `btclib.hashes` module."
from btclib.hashes import hash160, hash256
from tests.test_to_key import (
net_unaware_compressed_pub_keys,
net_unaware_uncompressed_pub_keys,
plain_prv_keys
|
,
)
def test_hash160_hash256() -> None:
test_vectors = (
plain_prv_keys
+ net_unaware_compressed_pub_keys
+ net_unaware_uncompressed_pub_keys
)
for hexstring in test_vectors:
hash160(hexstring)
hash256(hexstrin
|
g)
# def test_fingerprint() -> None:
#
# seed = "bfc4cbaad0ff131aa97fa30a48d09ae7df914bcc083af1e07793cd0a7c61a03f65d622848209ad3366a419f4718a80ec9037df107d8d12c19b83202de00a40ad"
# xprv = rootxprv_from_seed(seed)
# pf = fingerprint(xprv) # xprv is automatically converted to xpub
# child_key = derive(xprv, 0x80000000)
# pf2 = BIP32KeyData.b58decode(child_key).parent_fingerprint
# assert pf == pf2
|
tucbill/manila
|
manila/scheduler/filter_scheduler.py
|
Python
|
apache-2.0
| 8,816 | 0.000113 |
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating shares.
You can customize this scheduler by specifying your own share Filters and
Weighing Functions.
"""
import operator
from manila import exception
from manila.openstack.common import importutils
from manila.openstack.common import log as logging
from manila.scheduler import driver
from manila.scheduler import scheduler_options
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend. In the event that
the request gets re-scheduled, this entry will signal that the given
backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
return max_attempts
def schedule_create_share(self, context, request_spec, filter_properties):
weighed_host = self._schedule_share(context,
request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="")
host = weighed_host.obj.host
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = driver.share_update_db(context, share_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share(context, updated_share, host,
request_spec=request_spec,
filter_properties=filter_properties,
snapshot_id=snapshot_id)
def _schedule_share(self, context, request_spec, filter_properties=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
share_properties = request_spec['share_properties']
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
share_type = request_spec.get("share_type", {})
resource_type = request_spec.get("share_type", {})
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if filter_properti
|
es is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
|
'share_type': share_type,
'resource_type': resource_type
})
self.populate_filter_properties_share(request_spec, filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states_share(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
LOG.debug(_("Filtered share %(hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug(_("Choosing for share: %(best_host)s") % locals())
#NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_volume(share_properties)
return best_host
def _populate_retry_share(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of share service hosts tried
}
filter_properties['retry'] = retry
share_id = properties.get('share_id')
self._log_share_error(share_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"share %(share_id)s") % locals()
raise exception.NoValidHost(reason=msg)
def _log_share_error(self, share_id, retry):
"""If the request contained an exception from a previous share
create operation, log it to aid debugging.
"""
exc = retry.pop('exc', None) # string-ified exception from share
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
msg = _("Error scheduling %(share_id)s f
|
djo938/GPSPythonSharingWithPyro
|
pysharegps/gpsSharedData.py
|
Python
|
gpl-3.0
| 1,548 | 0.016796 |
import sys
class sharedGpsData(object):
def __init__(self):
self.pos = (0.0,0.0,None,) # float latitude(from equateur),float longitude(from greenwitch),datetime (Universal Time Coordinated)
self.alt = (0.0, "M", None,) # float altitude, string scale unit, datetime (Universal Time Coordinated)
self.place = ("",sys.maxint,"M", "", None, ) # string place name, distance from this point, datetime (Universal Time Coordinated)
self.point_to_print = []
self.gpsShareDaemonId = -1
###
def setGpsLogId(self, process_id):
self.gpsShareDaemonId = process_id
def getGpsLogId(self):
return self.gpsS
|
hareDaemonId
###
def setPosition(self, latitude, longitude, dtime):
self.pos = (latitude, longitude, dtime,)
|
def getPosition(self):
return self.pos
###
def setAltitude(self, altitude, dtime, unit = "M"):
self.alt = (altitude, unit, dtime,)
def getAltitude(self):
return self.alt
###
def setPlace(self, placename, distance, distanceType, dtime, unit = "M"):
self.place = (placename, distance, unit, distanceType, dtime,)
def getPlace(self):
return self.place
###
def addPointOfInterest(self, lat, lon, key, descr=None):
self.point_to_print.append( (lat, lon, key, descr,) )
def getAndResetPointOfInterest(self):
toRet = self.point_to_print
self.point_to_print = []
return toRet
|
YeoLab/gscripts
|
gscripts/general/downsample_bam.py
|
Python
|
mit
| 4,356 | 0.008494 |
__author__ = 'gpratt'
__author__ = 'gpratt'
import argparse
import subprocess
import os
def wrap_wait_error(wait_result):
if wait_result != 0:
raise NameError("Failed to execute command correctly {}".format(wait_result))
def pre_process_bam(bam, bam01, bam02, bam03, bam04, bam05, bam06, bam07, bam08, bam09, no_shuffle, no_sort):
#split bam file into two, return file handle for the two bam files
print "word counting"
p = subprocess.Popen("samtools view {} | wc -l".format(bam), shell=True, stdout=subprocess.PIPE) # Number of reads in the tagAlign file
stdout, stderr = p.communicate()
nlines = int(stdout)
print "header counting"
p = subprocess.Popen("samtools view -H {} | wc -l".format(bam), shell=True, stdout=subprocess.PIPE) # Number of header lines (for when we've got a lot of chromosomes)
stdout, stderr = p.communicate()
n_header_lines = int(stdout)
if no_shuffle:
shuffled_bam = os.path.splitext(bam)[0]
else: #shuffle
shuffled_bam = os.path.splitext(os.path.basename(bam))[0] + "_shuff"
p = subprocess.Popen("samtools bamshuf {0} {1}".format(bam, shuffled_bam), shell=True) # This will shuffle the lines in the file and split it into two parts
wrap_wait_error(p.wait())
bam_and_percent = [(bam01, int(nlines * .1) + n_header_lines),
(bam02, int(nlines * .2) + n_header_lines),
(bam03, int(nlines * .3) + n_header_lines),
(bam04, int(nlines * .4) + n_header_lines),
(bam05, int(nlines * .5) + n_header_lines),
(bam06, int(nlines * .6) + n_header_lines),
(bam07, int(nlines * .7) + n_header_lines),
(bam08, int(nlines * .8) + n_header_lines),
(bam09, int(nlines * .9) + n_header_lines),]
cmds = []
for bam_file, percent in bam_and_percent:
if percent % 2 == 1:
percent -= 1
if no_sort: #if we are aren't shuffling, don't delete
#Make sure I select pairs of reads
cmd = "samtools view -h {0}.bam | head -n {1} | samtools view -bS - -o {2}".format(shuffled_
|
bam, percent, bam_file)
else: #sort
cmd = "samtools view -h {0}.bam | head -n {1} | samtools view -bS - | samtools sort - -o {2} && samtools index {2}".format(shuffled_bam,
|
percent, bam_file)
print cmd
p1 = subprocess.Popen(cmd, shell=True)
wrap_wait_error(p1.wait())
if not no_shuffle: #if we are aren't shuffling, don't delete
p1 = subprocess.Popen("rm {0}.bam".format(shuffled_bam), shell=True)
wrap_wait_error(p1.wait())
return bam01, bam02
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Downsamples bam to a given number of reads')
parser.add_argument(
'--bam', required=True, help='bam file to split')
parser.add_argument(
'--bam01', required=True, help='name of first output bam')
parser.add_argument(
'--bam02', required=True, help='name of second output bam')
parser.add_argument(
'--bam03', required=True, help='name of third output bam')
parser.add_argument(
'--bam04', required=True, help='name of fourth output bam')
parser.add_argument(
'--bam05', required=True, help='name of fith output bam')
parser.add_argument(
'--bam06', required=True, help='name of sixth output bam')
parser.add_argument(
'--bam07', required=True, help='name of seventh output bam')
parser.add_argument(
'--bam08', required=True, help='name of eighth output bam')
parser.add_argument(
'--bam09', required=True, help='name of ninth output bam')
parser.add_argument("--no_shuffle", action="store_true", help="Don't shuffle input bam file, only use this if input bam is already somehow shuffled")
parser.add_argument("--no_sort", action="store_true", help="Don't sort the resulting bam files")
args = parser.parse_args()
bam01, bam02 = pre_process_bam(args.bam, args.bam01, args.bam02, args.bam03,
args.bam04, args.bam05, args.bam06, args.bam07,
args.bam08, args.bam09, args.no_shuffle, args.no_sort)
|
Arcbot-Org/Arcbot
|
tests/discord/test_permission.py
|
Python
|
gpl-3.0
| 1,719 | 0 |
import unittest
from bolt.discord.permissions import Permission
class TestPermission(unittest.TestCase):
def test_permission_from_list_to_list(self):
expected = ['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS']
permission = Permission(['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS'])
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_from_int_to_list(self):
expected = ['ADMINISTRATOR', 'SEND_MESSAGES']
permission = Permission(2056)
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_in_permission(self):
self.assertTrue("ADMINISTRATOR" in Permission(2056))
def test_permissions_in_permission(self):
self.assertTrue(["ADMINISTRATOR", "SEND_MESSAGES"] in Permission(2056))
def test_permission_not_in_permission(self):
self.assertTrue("USE_VAD" not in Permission(2056))
def test_permissions_not_in_permission(self):
self.assertTrue(["SPEAK", "MANAGE_EMOJIS"] not in Permission(2056))
def test_permission_add(self):
permission = Permission
|
(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertFalse(permission.allows("MENTION_EVERYONE"))
permission.add("
|
MENTION_EVERYONE")
self.assertTrue(permission.allows("MENTION_EVERYONE"))
def test_permission_remove(self):
permission = Permission(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertTrue(permission.allows("SEND_MESSAGES"))
permission.remove("SEND_MESSAGES")
self.assertFalse(permission.allows("SEND_MESSAGES"))
|
tanglei528/glance
|
glance/tests/unit/v2/test_registry_client.py
|
Python
|
apache-2.0
| 24,618 | 0.000081 |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
import mox
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
#NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(len(images), 2)
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='name', sort_dir='asc')
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
|
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status
|
in
descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='status', sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='disk_format',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='container_format',
sort_dir='desc')
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='size', sort_dir='asc')
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=Fa
|
thomasanderson9000/ipredator_btt
|
transmission/healthcheck.py
|
Python
|
apache-2.0
| 1,116 | 0.005376 |
#!/usr/bin/env python
from __future__ import print_function
from time import sleep
from subprocess import call
import os
from drive import get_active_torrents_count, get_vpn_ip, WAIT_CYCLE
MAX_FAIL=2
def transmission_down():
try:
_ = get_active_torrents_count()
return False
except Exception as exc:
print("Problem getting active torrent count: {}".format(exc))
return True
def vpn_down():
try:
_ = get_vpn_ip()
return False
except Exception as exc:
print("Problem g
|
etting vpn IP: {}".format(exc))
return True
def suicide():
"""Kill tini which will cause everything to restart properly."""
print("Something went wrong, comitting suicide.")
call("pkill -f tini", shell=True)
if __name__ == "__main__":
fail_count = 0
while True:
sleep(WAIT_CYCLE)
print("Health checking...")
if transmission_down() or vpn_down():
fail_count += 1
|
print("Fail count: {}".format(fail_count))
else:
fail_count = 0
if fail_count >= MAX_FAIL:
suicide()
|
wikimedia/pywikibot-core
|
pywikibot/families/meta_family.py
|
Python
|
mit
| 564 | 0 |
"""Family module for Meta Wiki."
|
""
#
# (C) Pywikibot team, 2005-2020
#
# Distributed under the terms of the MIT license.
#
from pywikibot import family
# The Wikimedia Meta-Wiki family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
|
name = 'meta'
interwiki_forward = 'wikipedia'
cross_allowed = ['meta', ]
category_redirect_templates = {
'meta': (
'Category redirect',
),
}
# Subpages for documentation.
doc_subpages = {
'_default': (('/doc',), ['meta']),
}
|
selfcommit/gaedav
|
pyxml/sax/saxutils.py
|
Python
|
lgpl-2.1
| 24,606 | 0.005486 |
"""
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v
|
1.35 2004/03/20 07:46:04 fdrake Exp $
"""
import os, urlparse, urllib2, types
import handler
import xmlreader
import sys, _exceptions, saxlib
try:
_StringTypes = [types.S
|
tringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
def __str__(self):
if self.__line is None:
line = "?"
else:
line = self.__line
if self.__col is None:
col = "?"
else:
col = self.__col
return "%s:%s:%s" % (
self.__sysid or self.__pubid or "<unknown>",
line, col)
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
if hasattr(codecs, "register_error"):
def writetext(stream, text, entities={}):
stream.errors = "xmlcharrefreplace"
stream.write(escape(text, entities))
stream.errors = "strict"
else:
def writetext(stream, text, entities={}):
text = escape(text, entities)
try:
stream.write(text)
except UnicodeError:
for c in text:
try:
stream.write(c)
except UnicodeError:
stream.write(u"&#%d;" % ord(c))
def writeattr(stream, text):
countdouble = text.count('"')
if countdouble:
countsingle = text.count("'")
if countdouble <= countsingle:
entities = {'"': """}
quote = '"'
else:
entities = {"'": "'"}
quote = "'"
else:
entities = {}
quote = '"'
stream.write(quote)
writetext(stream, text, entities)
stream.write(quote)
class XMLGenerator(handler.ContentHandler):
GENERATED_PREFIX = "pyxml.sax.saxutils.prefix%s"
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._generated_prefix_ctr = 0
return
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
|
tensorflow/tfx
|
tfx/dsl/compiler/testdata/foreach_pipeline.py
|
Python
|
apache-2.0
| 2,580 | 0.003488 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample pipeline with ForEach context."""
from tfx.components import CsvExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.dsl.components.common import resolver
from tfx.dsl.control_flow import for_each
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.orchestration import pipeline
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
def create_test_pipeline():
"""Creates a sample pipeline with ForEach context."""
example_gen = CsvExampleGen(input_base='/data/mydummy_dataset')
with for_each.ForEach(example_gen.outputs['examples']) as each_example:
statistics_gen = StatisticsGen(examples=each_example)
latest_stats_resolver = resolver.Resolver(
statistics=statistics_gen.outputs['statistics'],
strategy_class=latest_artifact_strategy.LatestArtifactStrategy,
).
|
with_id('latest_stats_resolver')
s
|
chema_gen = SchemaGen(statistics=latest_stats_resolver.outputs['statistics'])
with for_each.ForEach(example_gen.outputs['examples']) as each_example:
trainer = Trainer(
module_file='/src/train.py',
examples=each_example,
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
)
with for_each.ForEach(trainer.outputs['model']) as each_model:
pusher = Pusher(
model=each_model,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory='/models')),
)
return pipeline.Pipeline(
pipeline_name='foreach',
pipeline_root='/tfx/pipelines/foreach',
components=[
example_gen,
statistics_gen,
latest_stats_resolver,
schema_gen,
trainer,
pusher,
],
enable_cache=True,
execution_mode=pipeline.ExecutionMode.SYNC,
)
|
goodmami/penman
|
penman/transform.py
|
Python
|
mit
| 13,014 | 0 |
"""
Tree and graph transformations.
"""
from typing import Optional, Dict, Set, List, Tuple
import logging
from penman.types import (Variable, Target, BasicTriple, Node)
from penman.exceptions import ModelError
from penman.epigraph import (Epidatum, Epidata)
from penman.surface import (Alignment, RoleAlignment, alignments)
from penman.tree import (Tree, is_atomic)
from penman.graph import (Graph, CONCEPT_ROLE)
from penman.model import Model
from penman.layout import (
Push,
Pop,
POP,
appears_inverted,
get_pushed_variable,
)
logger = logging.getLogger(__name__)
def canonicalize_roles(t: Tree, model: Model) -> Tree:
"""
Normalize roles in *t* so they are canonical according to *model*.
This is a tree transformation instead of a graph transformation
because the orientation of the pure graph's triples is not decided
until the graph is configured into a tree.
Args:
t: a :class:`~penman.tree.Tree` object
model: a model defining role normalizations
Returns:
A new :class:`~penman.tree.Tree` object with canonicalized
roles.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import canonicalize_roles
>>> codec = PENMANCodec()
>>> t = codec.parse('(c / chapter :domain-of 7)')
>>> t = canonicalize_roles(t, model)
>>> print(codec.format(t))
(c / chapter
:mod 7)
"""
if model is None:
model = Model()
tree = Tree(_canonicalize_node(t.node, model), metadata=t.metadata)
logger.info('Canonicalized roles: %s', tree)
return tree
def _canonicalize_node(node: Node, model: Model) -> Node:
var, edges = node
canonical_edges = []
for i, edge in enumerate(edges):
role, tgt = edge
# alignments aren't parsed off yet, so handle them superficially
role, tilde, alignment = role.partition('~')
if not is_atomic(tgt):
tgt = _canonicalize_node(tgt, model)
canonical_role = model.canonicalize_role(role) + tilde + alignment
canonical_edges.append((canonical_role, tgt))
return (var, canonical_edges)
def reify_edges(g: Graph, model: Model) -> Graph:
"""
Reify all edges in *g* that have reifications in *model*.
Args:
g: a :class:`~penman.graph.Graph` object
model: a model defining reifications
Returns:
A new :class:`~penman.graph.Graph` object with reified edges.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import reify_edges
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode('(c / chapter :mod 7)')
>>> g = reify_edges(g, model)
>>> print(codec.encode(g))
(c / chapter
:ARG1-of (_ / have-mod-91
:ARG2 7))
"""
vars = g.variables()
if model is None:
model = Model()
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
for triple in g.triples:
if model.is_role_reifiable(triple[1]):
in_triple, node_triple, out_triple = model.reify(triple, vars)
if appears_inverted(g, triple):
in_triple, out_triple = out_triple, in_triple
new_triples.extend((in_triple, node_triple, out_triple))
var = node_triple[0]
vars.add(var)
# manage epigraphical markers
new_epidata[in_triple] = [Push(var)]
old_epis = new_epidata.pop(triple) if triple in new_epidata else []
node_epis, out_epis = _edge_markers(old_epis)
new_epidata[node_triple] = node_epis
new_epidata[out_t
|
riple] = out_epis
# we don't know where to put the final POP without configuring
# the tree; maybe this should be a tree operation?
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Reified edges: %s', g)
return g
def dereify_edges(g: Graph, model: Model) -> Graph:
"""
Dereify edges in *g* that hav
|
e reifications in *model*.
Args:
g: a :class:`~penman.graph.Graph` object
Returns:
A new :class:`~penman.graph.Graph` object with dereified
edges.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import dereify_edges
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode(
... '(c / chapter'
... ' :ARG1-of (_ / have-mod-91'
... ' :ARG2 7))')
>>> g = dereify_edges(g, model)
>>> print(codec.encode(g))
(c / chapter
:mod 7)
"""
if model is None:
model = Model()
agenda = _dereify_agenda(g, model)
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
for triple in g.triples:
var = triple[0]
if var in agenda:
first, dereified, epidata = agenda[var]
# only insert at the first triple so the dereification
# appears in the correct location
if triple == first:
new_triples.append(dereified)
new_epidata[dereified] = epidata
if triple in new_epidata:
del new_epidata[triple]
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Dereified edges: %s', g)
return g
def reify_attributes(g: Graph) -> Graph:
"""
Reify all attributes in *g*.
Args:
g: a :class:`~penman.graph.Graph` object
Returns:
A new :class:`~penman.graph.Graph` object with reified
attributes.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import reify_attributes
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode('(c / chapter :mod 7)')
>>> g = reify_attributes(g)
>>> print(codec.encode(g))
(c / chapter
:mod (_ / 7))
"""
variables = g.variables()
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
i = 2
for triple in g.triples:
source, role, target = triple
if role != CONCEPT_ROLE and target not in variables:
# get unique var for new node
var = '_'
while var in variables:
var = f'_{i}'
i += 1
variables.add(var)
role_triple = (source, role, var)
node_triple = (var, CONCEPT_ROLE, target)
new_triples.extend((role_triple, node_triple))
# manage epigraphical markers
old_epis = new_epidata.pop(triple) if triple in new_epidata else []
role_epis, node_epis = _attr_markers(old_epis)
new_epidata[role_triple] = role_epis + [Push(var)]
new_epidata[node_triple] = node_epis + [POP]
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Reified attributes: %s', g)
return g
def indicate_branches(g: Graph, model: Model) -> Graph:
"""
Insert TOP triples in *g* indicating the tree structure.
Note:
This depends on *g* containing the epigraphical layout markers
from parsing; it will not work with programmatically
constructed Graph objects or those whose epigraphical data
were removed.
Args:
g: a :class:`~penman.graph.Graph` object
model: a model defining the TOP role
Returns:
A new :class:`~penman.graph.Graph` object with TOP roles
indicating tree branches.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transfor
|
jawilson/home-assistant
|
homeassistant/components/ovo_energy/__init__.py
|
Python
|
apache-2.0
| 3,757 | 0.000799 |
"""Support for OVO Energy."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
import aiohttp
import async_timeout
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OVO Energy from a config entry."""
client = OVOEnergy()
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise ConfigEntryNotReady from exception
if not authenticated:
raise ConfigEntryAuthFailed
async def async_update_data() -> OVODailyUsage:
"""Fetch data from OVO Energy."""
async with async_timeout.timeout(10):
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
raise UpdateFailed(exception) from exception
if not authenticated:
raise ConfigEntryAuthFailed("Not authenticated with OVO Energy")
return await client.get_daily_usage(datetime.utcnow().strftime("%Y-%m"))
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=3600),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CLIENT: client,
DATA_COORDINATOR: coordinator,
}
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigType) -> bool:
"""Unload OVO Energy config entry."""
# Unload sensors
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
class OVOEnergyEntity(CoordinatorEntity):
"""Defines a base OVO Energy entity."""
def __init__(
self
|
,
coordinator: DataUpdateCoordinator,
client: OVOEnergy,
) -> None
|
:
"""Initialize the OVO Energy entity."""
super().__init__(coordinator)
self._client = client
class OVOEnergyDeviceEntity(OVOEnergyEntity):
"""Defines a OVO Energy device entity."""
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this OVO Energy instance."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._client.account_id)},
manufacturer="OVO Energy",
name=self._client.username,
)
|
edenhuangSH/STA663_Final_Project
|
setup.py
|
Python
|
gpl-3.0
| 1,203 | 0.010806 |
"""Hidden Markov Models implemented in linear memory/running time"""
from distutils.core import setup
from distutils.extension import Extension
import numpy
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("linearhmm.score", [ "linearhmm/score.pyx" ], include_dirs=[numpy.get_include()]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("line
|
arhmm.score", [ "linearhmm/score.c" ],include_dirs=[numpy.get_include()]),
]
import linearhmm
VERSION = linearhmm.__version__
# MAINTAINER = "Sergei Lebedev"
# MAINTAINER_EMAIL = "superbobry@gmail.com"
install_require
|
s = ["numpy"]
tests_require = install_requires + ["pytest"]
setup_options = dict(
name="linearhmm",
version=VERSION,
# maintainer=MAINTAINER,
# maintainer_email=MAINTAINER_EMAIL,
url="https://github.com/hmmlearn/hmmlearn",
packages=["linearhmm"],
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
)
if __name__ == "__main__":
setup(**setup_options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.