text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
import os import sys import json import pdb class client: def __init__(self): self.defaultEncoding = "" self.system = "" self.deviceName = "" self.pythonVersion = "" self.username = "" self.cwd = "" self.filesInCurrentDirectory = "" self.currentUserID = "" self.OS = "" self.serverPort = 0 self.serverIP = "" def create_beacon(self, serverIP, serverPort): self.serverIP = serverIP self.serverPort = serverPort def send_beacon(self): import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serverConnection = s.connect_ex(self.serverIP, self.serverPort) def enumerate(self): self.defaultEncoding = sys.getdefaultencoding() self.system = sys.platform # Determine operating system based on sys.platform if self.system != None: self.determineSystem() self.deviceName = os.uname() self.pythonVersion = sys.version #self.username = os.getlogin() self.cwd = os.getcwd() self.filesInCurrentDirectory = os.listdir(os.getcwd()) self.currentUserID = os.getuid() def determineSystem(self): # System Dict based upon sys.platform responses in Python systemDict = {'linux2':'Linux (2.x and 3.x)', 'win32':'Windows', 'cygwin':'Windows/Cygwin', 'darwin':'Mac OS X', 'os2':'OS/2', 'os2emx':'OS/2 EMX', 'riscos':'RiscOS', 'atheos':'AtheOS', 'freebsd7':'FreeBSD 7', 'freebsd8':'FreeBSD 8', 'freebsd9':'FreeBSD 9', 'freebsd10':'FreeBSD10', 'freebsd11':'FreeBSD 11'} # Assign the correct sys.platform response to the self.OS value for key in systemDict: if key == self.system: self.OS = systemDict[key] def get_client_info(self): return json.dumps(self.__dict__) def get_server_info(self): return str(self.serverIP) +':' +str(self.serverPort)
latteBot/piRAT
piRAT/core.py
Python
apache-2.0
1,729
0.039329
# Generated by Django 3.0.3 on 2020-02-06 10:24 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("example", "0007_artproject_description"), ] operations = [ migrations.CreateModel( name="LabResults", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("date", models.DateField()), ("measurements", models.TextField()), ( "research_project", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="lab_results", to="example.ResearchProject", ), ), ], ), ]
django-json-api/rest_framework_ember
example/migrations/0008_labresults.py
Python
bsd-2-clause
1,087
0
"""Anchore Jenkins plugin source up-to-dateness collector.""" from base_collectors import JenkinsPluginSourceUpToDatenessCollector class AnchoreJenkinsPluginSourceUpToDateness(JenkinsPluginSourceUpToDatenessCollector): """Collector for the up to dateness of the Anchore Jenkins plugin security report."""
ICTU/quality-time
components/collector/src/source_collectors/anchore_jenkins_plugin/source_up_to_dateness.py
Python
apache-2.0
312
0.00641
import itertools def topological_order(graph): global current_label current_label = len(graph) global label_offset label_offset = -1 global ordered_graph ordered_graph = {} explored_nodes = [] for node in graph: if node not in explored_nodes: explored_nodes.extend(dfs(graph, node, explored_nodes=explored_nodes)) return ordered_graph def dfs(graph, start_node, explored_nodes=None, stack=None): """ Given a graph as a dict of lists and a start node (a key in the graph dict) perform a depth first search """ #print("dfs(g, start_node: {}, explored_nodes: {}, stack: {})".format(start_node, explored_nodes, stack)) if not explored_nodes: explored_nodes = [] if not stack: stack = [] stack.append(start_node) tail = start_node explored_nodes.append(tail) #print("added start node {} to stack and explored nodes".format(start_node)) #print("stack: {}, en: {}".format(stack, explored_nodes)) try: for head in graph[tail]: #print("head: {}, en: {}".format(head, explored_nodes)) if head not in explored_nodes: dfs(graph, head, explored_nodes, stack) except KeyError: # tail has no outgoing edges pass #print("stack pre-pop: {}".format(stack)) stack.pop() #print("stack post-pop: {}".format(stack)) #print("start_node: {}".format(start_node)) # If current_label is set, we want to compute topological ordering global current_label global label_offset global ordered_graph if 'current_label' in globals(): #print("setting {} to {}".format(start_node, current_label)) ordered_graph[start_node] = current_label current_label += label_offset return explored_nodes def reverse_graph(graph): reversed_graph = {} for vertex in graph: for head in graph[vertex]: if head not in reversed_graph: reversed_graph[head] = [] reversed_graph[head].append(vertex) return reversed_graph def get_graph_finishing_times(graph): reversed_graph = reverse_graph(graph) reversed_graph_explored_nodes = [] global current_label current_label = 1 global label_offset label_offset = 1 global ordered_graph ordered_graph = {} for node in reversed(list(reversed_graph.keys())): if node not in reversed_graph_explored_nodes: reversed_graph_explored_nodes.extend( dfs(reversed_graph, node, explored_nodes=reversed_graph_explored_nodes ) ) return ordered_graph def get_strongly_connected_components(graph): # Kosaraju two-pass graph_finishing_times = get_graph_finishing_times(graph) rgft = dict([(v,k) for k,v in graph_finishing_times.items()]) explored_nodes = [] leader_nodes = [] for n in range(len(rgft), 0, -1): node = rgft[n] if node not in explored_nodes: leader_nodes.append(node) explored_nodes.extend( dfs(graph, node, explored_nodes=explored_nodes ) ) print(leader_nodes) sccs = [] scc_explored_nodes = [] last_dfsr = [] for node in leader_nodes: print("N: {}".format(node)) dfs_result = dfs(graph, node, explored_nodes=scc_explored_nodes) print(" dfsr: {}, last_dfsr: {}".format(dfs_result, last_dfsr)) scc = list(set(dfs_result) - set(last_dfsr)) scc_explored_nodes.extend(scc) scc_explored_nodes = list(set(scc_explored_nodes)) print(" explored: {}".format(scc_explored_nodes)) print(" scc: {}".format(scc)) last_dfsr = dfs_result sccs.append(scc) print(" sccs: {}".format(sccs)) return sccs
Preston4tw/elearning
coursera/algo-pt1/week4/graph_primitives.py
Python
mit
3,909
0.005116
# oppia/gamification/models.py from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils import timezone from oppia.models import Course, Activity, Media class DefaultGamificationEvent(models.Model): GLOBAL = 'global' COURSE = 'course' ACTIVITY = 'activity' QUIZ = 'quiz' MEDIA = 'media' LEVELS = ( (GLOBAL, 'Global'), (COURSE, 'Course'), (ACTIVITY, 'Activity'), (QUIZ, 'Quiz'), (MEDIA, 'Media') ) event = models.CharField(max_length=100) points = models.IntegerField() level = models.CharField(max_length=20, choices=LEVELS) label = models.CharField(max_length=100) helper_text = models.TextField(null=True, default=None) class Meta: verbose_name = _(u'Default Gamification Event') verbose_name_plural = _(u'Default Gamification Events') def __str__(self): return self.event class GamificationEvent(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField('date created', default=timezone.now) event = models.CharField(max_length=100) points = models.IntegerField() class Meta: abstract = True def __init__(self, *args, **kwargs): super(GamificationEvent, self).__init__(*args, **kwargs) self.__default_event = None def __str__(self): return self.event @property def default_event(self): if not self.__default_event: self.__default_event = DefaultGamificationEvent.objects \ .get(event=self.event) return self.__default_event def get_label(self): return self.default_event.label def get_helper_text(self): return self.default_event.helper_text class CourseGamificationEvent(GamificationEvent): course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='gamification_events') class Meta: verbose_name = _(u'Course Gamification Event') verbose_name_plural = _(u'Course Gamification Events') class ActivityGamificationEvent(GamificationEvent): activity = models.ForeignKey(Activity, on_delete=models.CASCADE, related_name='gamification_events') class Meta: verbose_name = _(u'Activity Gamification Event') verbose_name_plural = _(u'Activity Gamification Events') class MediaGamificationEvent(GamificationEvent): media = models.ForeignKey(Media, on_delete=models.CASCADE, related_name='gamification_events') class Meta: verbose_name = _(u'Media Gamification Event') verbose_name_plural = _(u'Media Gamification Events')
DigitalCampus/django-oppia
gamification/models.py
Python
gpl-3.0
2,912
0
''' Colour code ''' import webcolors from operator import itemgetter # Replacing Matplotlib code with webcolors CNAMES = webcolors.css3_names_to_hex # Special gradients gradients = {} # Colours from images def top_colours(image, n): ''' Return top-n colours in the image with counts. ''' size = image.size[0] * image.size[1] counts = image.getcolors(size) counts = sorted(counts, key=itemgetter(0), reverse=True)[:n] return [(x, y) for y, x in counts] def common_colours(image, min_prop=0.01): ''' Return all colours in image above a certain proportion threshold''' size = image.size[0] * image.size[1] counts = image.getcolors(size) min_num = size * min_prop counts = sorted(counts, key=itemgetter(0), reverse=True) counts = [(y, float(x)/size) for x, y in counts if x >= min_num] return counts
snorecore/MincePi
mince/colours.py
Python
mit
856
0.004673
from django import template from .. import forms register = template.Library() @register.filter def dyn_form(forms, pk): return forms[pk]
ainterr/scoring_engine
engine/templatetags/dyn_form.py
Python
mit
144
0.006944
import math import numpy as np from firedrake import * from pyop2 import MPI from pyop2.profiling import Timer parameters["pyop2_options"]["profiling"] = True def measure(name, thunk): if MPI.comm.rank == 0: print "name:", name mesh = thunk() mesh.init() timer = Timer("Mesh: cell_closure (quadrilateral)") runtime = timer._timings[-1] sendbuf = np.array([runtime, runtime * runtime], dtype=float) recvbuf = MPI.comm.reduce(sendbuf) if MPI.comm.rank == 0: M1, M2 = recvbuf m = M1 / MPI.comm.size s = math.sqrt((M2 - M1*M1 / MPI.comm.size) / (MPI.comm.size - 1)) print "cell_closure seconds %s: %g +- %g" % (name, m, s) if __name__ == "__main__": measure("s_square", lambda: UnitSquareMesh(512, 512, quadrilateral=True)) measure("s_sphere", lambda: UnitCubedSphereMesh(9)) measure("u_square", lambda: Mesh("square.msh")) measure("u_sphere", lambda: Mesh("sphere.msh")) measure("t10", lambda: Mesh("t10.msh")) measure("t11", lambda: Mesh("t11.msh"))
miklos1/watton
measure.py
Python
mit
1,052
0
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse import uuid from .. import models class ParameterGroupingOperations(object): def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def post_required( self, parameter_grouping_post_required_parameters, custom_headers={}, raw=False, **operation_config): """ Post a bunch of required parameters grouped :param parameter_grouping_post_required_parameters: Additional parameters for the operation :type parameter_grouping_post_required_parameters: ParameterGroupingPostRequiredParameters :param dict custom_headers: headers that will be added to the request :param boolean raw: returns the direct response alongside the deserialized response :rtype: None or (None, requests.response) or concurrent.futures.Future """ body = None if parameter_grouping_post_required_parameters is not None: body = parameter_grouping_post_required_parameters.body custom_header = None if parameter_grouping_post_required_parameters is not None: custom_header = parameter_grouping_post_required_parameters.custom_header query = None if parameter_grouping_post_required_parameters is not None: query = parameter_grouping_post_required_parameters.query path = None if parameter_grouping_post_required_parameters is not None: path = parameter_grouping_post_required_parameters.path # Construct URL url = '/parameterGrouping/postRequired/{path}' path_format_arguments = { 'path': self._serialize.url("path", path, 'str') } url = url.format(**path_format_arguments) # Construct parameters query_parameters = {} if query is not None: query_parameters['query'] = self._serialize.query("query", query, 'int') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if custom_header is not None: header_parameters['customHeader'] = self._serialize.header("custom_header", custom_header, 'str') # Construct body body_content = self._serialize.body(body, 'int') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def post_optional( self, parameter_grouping_post_optional_parameters=None, custom_headers={}, raw=False, **operation_config): """ Post a bunch of optional parameters grouped :param parameter_grouping_post_optional_parameters: Additional parameters for the operation :type parameter_grouping_post_optional_parameters: ParameterGroupingPostOptionalParameters or None :param dict custom_headers: headers that will be added to the request :param boolean raw: returns the direct response alongside the deserialized response :rtype: None or (None, requests.response) or concurrent.futures.Future """ custom_header = None if parameter_grouping_post_optional_parameters is not None: custom_header = parameter_grouping_post_optional_parameters.custom_header query = None if parameter_grouping_post_optional_parameters is not None: query = parameter_grouping_post_optional_parameters.query # Construct URL url = '/parameterGrouping/postOptional' # Construct parameters query_parameters = {} if query is not None: query_parameters['query'] = self._serialize.query("query", query, 'int') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if custom_header is not None: header_parameters['customHeader'] = self._serialize.header("custom_header", custom_header, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def post_multiple_parameter_groups( self, first_parameter_group=None, parameter_grouping_post_multiple_parameter_groups_second_parameter_group=None, custom_headers={}, raw=False, **operation_config): """ Post parameters from multiple different parameter groups :param first_parameter_group: Additional parameters for the operation :type first_parameter_group: FirstParameterGroup or None :param parameter_grouping_post_multiple_parameter_groups_second_parameter_group: Additional parameters for the operation :type parameter_grouping_post_multiple_parameter_groups_second_parameter_group: ParameterGroupingPostMultipleParameterGroupsSecondParameterGroup or None :param dict custom_headers: headers that will be added to the request :param boolean raw: returns the direct response alongside the deserialized response :rtype: None or (None, requests.response) or concurrent.futures.Future """ header_one = None if first_parameter_group is not None: header_one = first_parameter_group.header_one query_one = None if first_parameter_group is not None: query_one = first_parameter_group.query_one header_two = None if parameter_grouping_post_multiple_parameter_groups_second_parameter_group is not None: header_two = parameter_grouping_post_multiple_parameter_groups_second_parameter_group.header_two query_two = None if parameter_grouping_post_multiple_parameter_groups_second_parameter_group is not None: query_two = parameter_grouping_post_multiple_parameter_groups_second_parameter_group.query_two # Construct URL url = '/parameterGrouping/postMultipleParameterGroups' # Construct parameters query_parameters = {} if query_one is not None: query_parameters['query-one'] = self._serialize.query("query_one", query_one, 'int') if query_two is not None: query_parameters['query-two'] = self._serialize.query("query_two", query_two, 'int') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if header_one is not None: header_parameters['header-one'] = self._serialize.header("header_one", header_one, 'str') if header_two is not None: header_parameters['header-two'] = self._serialize.header("header_two", header_two, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def post_shared_parameter_group_object( self, first_parameter_group=None, custom_headers={}, raw=False, **operation_config): """ Post parameters with a shared parameter group object :param first_parameter_group: Additional parameters for the operation :type first_parameter_group: FirstParameterGroup or None :param dict custom_headers: headers that will be added to the request :param boolean raw: returns the direct response alongside the deserialized response :rtype: None or (None, requests.response) or concurrent.futures.Future """ header_one = None if first_parameter_group is not None: header_one = first_parameter_group.header_one query_one = None if first_parameter_group is not None: query_one = first_parameter_group.query_one # Construct URL url = '/parameterGrouping/sharedParameterGroupObject' # Construct parameters query_parameters = {} if query_one is not None: query_parameters['query-one'] = self._serialize.query("query_one", query_one, 'int') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if header_one is not None: header_parameters['header-one'] = self._serialize.header("header_one", header_one, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
vulcansteel/autorest
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureParameterGrouping/auto_rest_parameter_grouping_test_service/operations/parameter_grouping_operations.py
Python
mit
11,840
0.00228
"""Helper functions and base classes for restapi module""" from __future__ import print_function import requests import fnmatch import datetime import collections import mimetypes import urllib import tempfile import time import codecs import json import copy import os import sys import munch from itertools import izip_longest from collections import namedtuple, OrderedDict from requests.packages.urllib3.exceptions import InsecureRequestWarning, InsecurePlatformWarning, SNIMissingWarning from ._strings import * # python 3 compat try: basestring except NameError: basestring = str # disable ssl warnings (we are not verifying SSL certificates at this time...future ehnancement?) for warning in [SNIMissingWarning, InsecurePlatformWarning, InsecureRequestWarning]: requests.packages.urllib3.disable_warnings(warning) class IdentityManager(object): """Identity Manager for secured services. This will allow the user to only have to sign in once (until the token expires) when accessing a services directory or individual service on an ArcGIS Server Site""" def __init__(self): self.tokens = {} self.proxies = {} def findToken(self, url): """returns a token for a specific domain from token store if one has been generated for the ArcGIS Server resource Required: url -- url for secured resource """ if self.tokens: if '/admin/' in url: url = url.split('/admin/')[0] + '/admin/services' else: url = url.lower().split('/rest/services')[0] + '/rest/services' if url in self.tokens: if not self.tokens[url].isExpired: return self.tokens[url] else: raise RuntimeError('Token expired at {}! Please sign in again.'.format(token.expires)) return None def findProxy(self, url): """returns a proxy url for a specific domain from token store if one has been used to access the ArcGIS Server resource Required: url -- url for secured resource """ if self.proxies: url = url.lower().split('/rest/services')[0] + '/rest/services' if url in self.proxies: return self.proxies[url] return None # initialize Identity Manager ID_MANAGER = IdentityManager() # temp dir for json outputs TEMP_DIR = os.environ['TEMP'] if not os.access(TEMP_DIR, os.W_OK| os.X_OK): TEMP_DIR = None def namedTuple(name, pdict): """creates a named tuple from a dictionary Required: name -- name of namedtuple object pdict -- parameter dictionary that defines the properties """ class obj(namedtuple(name, sorted(pdict.keys()))): """class to handle {}""".format(name) __slots__ = () def __new__(cls, **kwargs): return super(obj, cls).__new__(cls, **kwargs) def asJSON(self): """return object as JSON""" return {f: getattr(self, f) for f in self._fields} o = obj(**pdict) o.__class__.__name__ = name return o def Round(x, base=5): """round to nearest n""" return int(base * round(float(x)/base)) def tmp_json_file(): """returns a valid path for a temporary json file""" global TEMP_DIR if TEMP_DIR is None: TEMP_DIR = tempfile.mkdtemp() return os.path.join(TEMP_DIR, 'restapi_{}.json'.format(time.strftime('%Y%m%d%H%M%S'))) def do_post(service, params={F: JSON}, ret_json=True, token='', cookies=None, proxy=None): """Post Request to REST Endpoint through query string, to post request with data in body, use requests.post(url, data={k : v}). Required: service -- full path to REST endpoint of service Optional: params -- parameters for posting a request ret_json -- return the response as JSON. Default is True. token -- token to handle security (only required if security is enabled) cookies -- cookie object {'agstoken': 'your_token'} proxy -- option to use proxy page to handle security, need to provide full path to proxy url. """ global PROTOCOL if PROTOCOL != '': service = '{}://{}'.format(PROTOCOL, service.split('://')[-1]) if not cookies and not proxy: if not token: token = ID_MANAGER.findToken(service) if token and isinstance(token, Token):# and token.domain.lower() in service.lower(): if isinstance(token, Token) and token.isExpired: raise RuntimeError('Token expired at {}! Please sign in again.'.format(token.expires)) if not token.isAGOL and not token.isAdmin: cookies = {AGS_TOKEN: str(token)} else: if TOKEN not in params: params[TOKEN] = str(token) elif token: if not token.isAGOL and not token.isAdmin: cookies = {AGS_TOKEN: str(token)} else: if TOKEN not in params: params[TOKEN] = str(token) # auto fill in geometry params if a restapi.Geometry object is passed in (derived from BaseGeometry) if params.get(GEOMETRY) and isinstance(params.get(GEOMETRY), BaseGeometry): geometry = params.get(GEOMETRY) if not GEOMETRY_TYPE in params and hasattr(geometry, GEOMETRY_TYPE): params[GEOMETRY_TYPE] = getattr(geometry, GEOMETRY_TYPE) if not IN_SR in params: params[IN_SR] = geometry.getWKID() or geometry.getWKT() for pName, p in params.iteritems(): if isinstance(p, dict) or hasattr(p, 'json'): params[pName] = json.dumps(p, cls=RestapiEncoder) if not F in params: params[F] = JSON if not token and not proxy: proxy = ID_MANAGER.findProxy(service) if token: if isinstance(token, Token): if token.isAGOL or token.isAdmin: params[TOKEN] = str(token) cookies = None if proxy: r = do_proxy_request(proxy, service, params) ID_MANAGER.proxies[service.split('/rest')[0].lower() + '/rest/services'] = proxy else: r = requests.post(service, params, headers={'User-Agent': USER_AGENT}, cookies=cookies, verify=False) # make sure return if r.status_code != 200: raise NameError('"{0}" service not found!\n{1}'.format(service, r.raise_for_status())) else: if ret_json is True: _json = r.json() RequestError(_json) return munch.munchify(_json) else: return r def do_proxy_request(proxy, url, params={}): """make request against ArcGIS service through a proxy. This is designed for a proxy page that stores access credentials in the configuration to handle authentication. It is also assumed that the proxy is a standard Esri proxy, i.e. retrieved from their repo on GitHub @: https://github.com/Esri/resource-proxy Required: proxy -- full url to proxy url -- service url to make request against Optional: params -- query parameters, user is responsible for passing in the proper parameters """ frmat = params.get(F, JSON) if F in params: del params[F] p = '&'.join('{}={}'.format(k,v) for k,v in params.iteritems()) # probably a better way to do this... return requests.post('{}?{}?f={}&{}'.format(proxy, url, frmat, p).rstrip('&'), verify=False, headers={'User-Agent': USER_AGENT}) def guess_proxy_url(domain): """grade school level hack to see if there is a standard esri proxy available for a domain Required: domain -- url to domain to check for proxy """ domain = domain.lower().split('/arcgis')[0] if not domain.startswith('http'): domain = 'http://' + domain types = ['.ashx', '.jsp', '.php'] for ptype in types: proxy_url = '/'.join([domain, 'proxy' + ptype]) r = requests.get(proxy_url) # should produce an error in JSON if using esri proxy out of the box try: if r.status_code == 400 or 'error' in r.json(): return r.url except: pass # try again looking to see if it is in a folder called "proxy" for ptype in types: proxy_url = '/'.join([domain, PROXY, PROXY + ptype]) r = requests.get(proxy_url) try: if r.status_code == 400 or r.content: return r.url except: pass return None def validate_name(file_name): """validates an output name by removing special characters""" import string path = os.sep.join(file_name.split(os.sep)[:-1]) #forward slash in name messes up os.path.split() name = file_name.split(os.sep)[-1] root, ext = os.path.splitext(name) d = {s: '_' for s in string.punctuation} for f,r in d.iteritems(): root = root.replace(f,r) return os.path.join(path, '_'.join(root.split()) + ext) def guess_wkid(wkt): """attempts to guess a well-known ID from a well-known text imput (WKT) Required: wkt -- well known text spatial reference """ if wkt in PRJ_STRINGS: return PRJ_STRINGS[wkt] if 'PROJCS' in wkt: name = wkt.split('PROJCS["')[1].split('"')[0] elif 'GEOGCS' in wkt: name = wkt.split('GEOGCS["')[1].split('"')[0] if name in PRJ_NAMES: return PRJ_NAMES[name] return 0 def assign_unique_name(fl): """assigns a unique file name Required: fl -- file name """ if not os.path.exists(fl): return fl i = 1 head, tail = os.path.splitext(fl) new_name = '{}_{}{}'.format(head, i, tail) while os.path.exists(new_name): i += 1 new_name = '{}_{}{}'.format(head, i, tail) return new_name def mil_to_date(mil): """date items from REST services are reported in milliseconds, this function will convert milliseconds to datetime objects Required: mil -- time in milliseconds """ if isinstance(mil, basestring): mil = long(mil) if mil == None: return None elif mil < 0: return datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(seconds=(mil/1000)) else: # safely cast, to avoid being out of range for platform local time try: struct = time.gmtime(mil /1000.0) return datetime.datetime.fromtimestamp(time.mktime(struct)) except Exception as e: print(mil) raise e def date_to_mil(date=None): """converts datetime.datetime() object to milliseconds date -- datetime.datetime() object""" if isinstance(date, datetime.datetime): epoch = datetime.datetime.utcfromtimestamp(0) return long((date - epoch).total_seconds() * 1000.0) def generate_token(url, user, pw, expiration=60): """Generates a token to handle ArcGIS Server Security, this is different from generating a token from the admin side. Meant for external use. Required: url -- url to services directory or individual map service user -- username credentials for ArcGIS Server pw -- password credentials for ArcGIS Server Optional: expiration -- time (in minutes) for token lifetime. Max is 100. """ suffix = '/rest/info' isAdmin = False if '/admin/' in url: isAdmin = True if '/rest/admin/' in url: infoUrl = url.split('/rest/')[0] + suffix else: infoUrl = url.split('/admin/')[0] + suffix else: infoUrl = url.split('/rest/')[0] + suffix infoResp = do_post(infoUrl) is_agol = False if AUTH_INFO in infoResp and TOKEN_SERVICES_URL in infoResp[AUTH_INFO]: base = infoResp[AUTH_INFO][TOKEN_SERVICES_URL] is_agol = AGOL_BASE in base if is_agol: base = AGOL_TOKEN_SERVICE global PROTOCOL PROTOCOL = base.split('://')[0] print('set PROTOCOL to "{}" from generate token'.format(PROTOCOL)) try: shortLived = infoResp[AUTH_INFO][SHORT_LIVED_TOKEN_VALIDITY] except KeyError: shortLived = 100 else: base = url.split('/rest/')[0] + '/tokens' shortLived = 100 params = {F: JSON, USER_NAME: user, PASSWORD: pw, CLIENT: REQUEST_IP, EXPIRATION: max([expiration, shortLived])} if is_agol: params[REFERER] = AGOL_BASE del params[CLIENT] resp = do_post(base, params) if is_agol: # now call portal sharing portal_params = {TOKEN: resp.get(TOKEN)} org_resp = do_post(AGOL_PORTAL_SELF,portal_params) org_referer = org_resp.get(URL_KEY) + ORG_MAPS params[REFERER]= org_referer resp = do_post(AGOL_TOKEN_SERVICE, params) if '/services/' in url: resp[DOMAIN] = url.split('/services/')[0] + '/services' elif '/admin/' in url: resp[DOMAIN] = url.split('/admin/')[0] + '/admin' else: resp[DOMAIN] = url resp[IS_AGOL] = is_agol resp[IS_ADMIN] = isAdmin token = Token(resp) ID_MANAGER.tokens[token.domain] = token return token class RestapiEncoder(json.JSONEncoder): """encoder for restapi objects to make serializeable for JSON""" def default(self, o): if isinstance(o, datetime.datetime): return date_to_mil(o) if hasattr(o, JSON): return getattr(o, JSON) elif isinstance(o, (dict, list)): return o try: return o.__dict__ except: return {} class JsonGetter(object): """override getters to also check its json property""" json = {} def get(self, name, default=None): """gets an attribute from json""" return self.json.get(name, default) def dump(self, out_json_file, indent=2, **kwargs): """dump as JSON file""" if hasattr(out_json_file, 'read'): json.dump(self.json, out_json_file, indent=indent, **kwargs) elif isinstance(out_json_file, basestring): head, tail = os.path.splitext(out_json_file) if not tail == '.json': out_json_file = head + '.json' with open(out_json_file, 'w') as f: json.dump(self.json, f, indent=indent, **kwargs) return out_json_file def dumps(self): """dump as string""" return json.dumps(self.json) def __getitem__(self, name): """dict like access to json definition""" if name in self.json: return self.json[name] def __getattr__(self, name): """get normal class attributes and those from json response""" try: # it is a class attribute return object.__getattribute__(self, name) except AttributeError: # it is in the json definition, abstract it to the class level if name in self.json: return self.json[name] else: raise AttributeError(name) def __str__(self): return json.dumps(self.json, sort_keys=True, indent=2, ensure_ascii=False) class RESTEndpoint(JsonGetter): """Base REST Endpoint Object to handle credentials and get JSON response Required: url -- service url Optional (below params only required if security is enabled): usr -- username credentials for ArcGIS Server pw -- password credentials for ArcGIS Server token -- token to handle security (alternative to usr and pw) proxy -- option to use proxy page to handle security, need to provide full path to proxy url. """ url = None raw_response = None response = None token = None elapsed = None json = {} _cookie = None _proxy = None def __init__(self, url, usr='', pw='', token='', proxy=None): if PROTOCOL: self.url = PROTOCOL + '://' + url.split('://')[-1].rstrip('/') if not url.startswith(PROTOCOL) else url.rstrip('/') else: self.url = 'http://' + url.rstrip('/') if not url.startswith('http') else url.rstrip('/') if not fnmatch.fnmatch(self.url, BASE_PATTERN): _plus_services = self.url + '/arcgis/rest/services' if fnmatch.fnmatch(_plus_services, BASE_PATTERN): self.url = _plus_services else: RequestError({'error':{'URL Error': '"{}" is an invalid ArcGIS REST Endpoint!'.format(self.url)}}) params = {F: JSON} self.token = token self._cookie = None self._proxy = proxy if not self.token and not self._proxy: if usr and pw: self.token = generate_token(self.url, usr, pw) else: self.token = ID_MANAGER.findToken(self.url) if isinstance(self.token, Token) and self.token.isExpired: raise RuntimeError('Token expired at {}! Please sign in again.'.format(self.token.expires)) elif isinstance(self.token, Token) and not self.token.isExpired: pass else: self.token = None else: if isinstance(self.token, Token) and self.token.isExpired and self.token.domain in self.url.lower(): raise RuntimeError('Token expired at {}! Please sign in again.'.format(self.token.expires)) if self.token: if isinstance(self.token, Token) and self.token.domain.lower() in url.lower(): self._cookie = self.token._cookie else: self._cookie = {AGS_TOKEN: self.token.token if isinstance(self.token, Token) else self.token} if (not self.token or not self._cookie) and not self._proxy: if self.url in ID_MANAGER.proxies: self._proxy = ID_MANAGER.proxies[self.url] self.raw_response = do_post(self.url, params, ret_json=False, token=self.token, cookies=self._cookie, proxy=self._proxy) self.elapsed = self.raw_response.elapsed self.response = self.raw_response.json() self.json = munch.munchify(self.response) RequestError(self.json) def compatible_with_version(self, version): """checks if ArcGIS Server version is compatible with input version. A service is compatible with the version if it is greater than or equal to the input version Required: version -- minimum version compatibility as float (ex: 10.3 or 10.31) """ def validate_version(ver): if isinstance(ver, (float, int)): return ver elif isinstance(ver, basestring): try: ver = float(ver) except: # we want an exception here if it does not match the format whole, dec = ver.split('.') ver = float('.'.join([whole, ''.join([i for i in dec if i.isdigit()])])) try: return validate_version(self.currentVersion) >= validate_version(version) except AttributeError: return False def refresh(self): """refreshes the service""" self.__init__(self.url, token=self.token) @classmethod def __get_cls(cls): return cls def __dir__(self): atts = [] bases = self.__get_cls().__bases__ while bases: for base in bases: atts.extend(base.__dict__.keys()) bases = base.__bases__ return sorted(list(set(self.__class__.__dict__.keys() + self.json.keys() + atts))) class SpatialReferenceMixin(object): """mixin to allow convenience methods for grabbing the spatial reference from a service""" json = {} @property def _spatialReference(self): """gets the spatial reference dict""" resp_d = {} if SPATIAL_REFERENCE in self.json: resp_d = self.json[SPATIAL_REFERENCE] elif EXTENT in self.json and SPATIAL_REFERENCE in self.json[EXTENT]: resp_d = self.json[EXTENT][SPATIAL_REFERENCE] return munch.munchify(resp_d) def getSR(self): """return the spatial reference""" resp_d = self._spatialReference for key in [LATEST_WKID, WKID, WKT]: if key in resp_d: return resp_d[key] def getWKID(self): """returns the well known id for service spatial reference""" resp_d = self._spatialReference for key in [LATEST_WKID, WKID]: if key in resp_d: return resp_d[key] def getWKT(self): """returns the well known text (if it exists) for a service""" return self._spatialReference.get(WKT, '') class BaseService(RESTEndpoint, SpatialReferenceMixin): """base class for all services""" def __init__(self, url, usr='', pw='', token='', proxy=None): super(BaseService, self).__init__(url, usr, pw, token, proxy) if NAME not in self.json: self.name = self.url.split('/')[-2] self.name = self.name.split('/')[-1] def __repr__(self): """string representation with service name""" try: qualified_name = '/'.join(filter(None, [self.url.split('/services/')[-1].split('/' + self.name)[0], self.name])) except: qualified_name = self.name return '<{}: {}>'.format(self.__class__.__name__, qualified_name) class Feature(JsonGetter): def __init__(self, feature): """represents a single feature Required: feature -- input json for feature """ self.json = munch.munchify(feature) self.geometry = self.json.get(GEOMETRY) def get(self, field): """gets an attribute from the feature Required: field -- name of field for which to get attribute """ return self.json[ATTRIBUTES].get(field) def __repr__(self): return str(self) class RelatedRecords(JsonGetter, SpatialReferenceMixin): def __init__(self, in_json): """related records response Required: in_json -- json response for query related records operation """ self.json = munch.munchify(in_json) self.geometryType = self.json.get(GEOMETRY_TYPE) self.spatialReference = self.json.get(SPATIAL_REFERENCE) def list_related_OIDs(self): """returns a list of all related object IDs""" return [f.get('objectId') for f in iter(self)] def get_related_records(self, oid): """gets the related records for an object id Required: oid -- object ID for related records """ for group in iter(self): if oid == group.get('objectId'): return [Feature(f) for f in group[RELATED_RECORDS]] def __iter__(self): for group in self.json[RELATED_RECORD_GROUPS]: yield group class FeatureSet(JsonGetter, SpatialReferenceMixin): def __init__(self, in_json): """class to handle feature set Required: in_json -- input json response from request """ super(FeatureSet, self).__init__() if isinstance(in_json, basestring): in_json = json.loads(in_json) elif isinstance(in_json, self.__class__): self.json = in_json.json else: self.json = munch.munchify(in_json) if not all([self.json.get(k) for k in (FIELDS, FEATURES)]): raise ValueError('Not a valid Feature Set!') @property def OID(self): """OID field object""" try: return [f for f in self.fields if f.type == OID][0] except: return None @property def SHAPE(self): """SHAPE field object""" try: return [f for f in self.fields if f.type == SHAPE][0] except: return None @property def hasGeometry(self): """boolean for if it has geometry""" if self.count: if self.features[0].get(GEOMETRY): return True return False @property def count(self): """returns total number of records in Cursor (user queried)""" return len(self) def list_fields(self): """returns a list of field names""" return [f.name for f in self.fields] def __getattr__(self, name): """get normal class attributes and those from json response""" try: # it is a class attribute return object.__getattribute__(self, name) except AttributeError: # it is in the json definition, abstract it to the class level if name in self.json: return self.json[name] else: raise AttributeError(name) def __getitem__(self, key): """supports grabbing feature by index and json keys by name""" if isinstance(key, int): return Feature(self.json.features[key]) else: return Feature(self.json.get(key)) def __iter__(self): for feature in self.features: yield Feature(feature) def __len__(self): return len(self.features) def __bool__(self): return bool(len(self)) def __dir__(self): return sorted(self.__class__.__dict__.keys() + self.json.keys()) class OrderedDict2(OrderedDict): """wrapper for OrderedDict""" def __init__(self, *args, **kwargs): super(OrderedDict2, self).__init__(*args, **kwargs) def __repr__(self): """we want it to look like a dictionary""" return json.dumps(self, indent=2, ensure_ascii=False) class Token(JsonGetter): """class to handle token authentication""" def __init__(self, response): """response JSON object from generate_token""" self.json = munch.munchify(response) self._cookie = {AGS_TOKEN: self.token} self.isAGOL = self.json.get(IS_AGOL, False) self.isAdmin = self.json.get(IS_ADMIN, False) @property def time_expires(self): return mil_to_date(self.expires) @property def isExpired(self): """boolean value for expired or not""" if datetime.datetime.now() > self.time_expires: return True else: return False def __str__(self): """return token as string representation""" return self.token class RequestError(object): """class to handle restapi request errors""" def __init__(self, err): if 'error' in err: raise RuntimeError(json.dumps(err, indent=2)) class Folder(RESTEndpoint): """class to handle ArcGIS REST Folder""" @property def name(self): """returns the folder name""" return self.url.split('/')[-1] def list_services(self): """method to list services""" return ['/'.join([s.name, s.type]) for s in self.services] def __len__(self): """return number of services in folder""" return len(self.services) def __bool__(self): """return True if services are present""" return bool(len(self)) class GPResult(object): """class to handle GP Result""" def __init__(self, response): """handler for GP result res_dict -- JSON response from GP Task execution """ self.response = response RequestError(self.response) @property def results(self): if RESULTS in self.response: return [namedTuple('Result', r) for r in self.response[RESULTS]] return [] @property def value(self): """returns a value (if any) from results""" if VALUE in self.response: return self.response[VALUE] return None @property def messages(self): """return messages as JSON""" if 'messages' in self.response: return [namedTuple('Message', d) for d in self.response['messages']] return [] def print_messages(self): """prints all the GP messages""" for msg in self.messages: print('Message Type: {}'.format(msg.type)) print('\tDescription: {}\n'.format(msg.description)) def __len__(self): """return length of results""" return len(self.results) def __getitem__(self, i): """return result at index, usually will only be 1""" return self.results[i] def __bool__(self): """return True if results""" return bool(len(self)) class GeocodeResult(object): """class to handle Reverse Geocode Result""" __slots__ = [RESPONSE, SPATIAL_REFERENCE, TYPE, CANDIDATES, LOCATIONS, ADDRESS, RESULTS, 'result', 'Result'] def __init__(self, res_dict, geo_type): """geocode response object Required: res_dict -- JSON response from geocode request geo_type -- type of geocode operation (reverseGeocode|findAddressCandidates|geocodeAddresses) """ RequestError(res_dict) self.response = res_dict self.type = 'esri_' + geo_type self.candidates = [] self.locations = [] self.address = [] try: sr_dict = self.response[LOCATION][SPATIAL_REFERENCE] wkid = sr_dict.get(LATEST_WKID, None) if wkid is None: wkid = sr_dict.get(WKID, None) self.spatialReference = wkid except: self.spatialReference = None if self.type == 'esri_reverseGeocode': addr_dict = {} addr_dict[LOCATION] = self.response[LOCATION] addr_dict[ATTRIBUTES] = self.response[ADDRESS] address = self.response[ADDRESS].get('Address', None) if address is None: add = self.response[ADDRESS] addr_dict[ADDRESS] = ' '.join(filter(None, [add.get('Street'), add.get('City'), add.get('ZIP')])) else: addr_dict[ADDRESS] = address addr_dict[SCORE] = None self.address.append(addr_dict) # legacy response from find? <- deprecated? # http://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/find #still works elif self.type == 'esri_find': # format legacy results for res in self.response[LOCATIONS]: ref_dict = {} for k,v in res.iteritems(): if k == NAME: ref_dict[ADDRESS] = v elif k == FEATURE: atts_dict = {} for att, val in res[k].iteritems(): if att == GEOMETRY: ref_dict[LOCATION] = val elif att == ATTRIBUTES: for att2, val2 in res[k][att].iteritems(): if att2.lower() == SCORE: ref_dict[SCORE] = val2 else: atts_dict[att2] = val2 ref_dict[ATTRIBUTES] = atts_dict self.locations.append(ref_dict) else: if self.type == 'esri_findAddressCandidates': self.candidates = self.response[CANDIDATES] elif self.type == 'esri_geocodeAddresses': self.locations = self.response[LOCATIONS] defaults = 'address attributes location score' self.Result = collections.namedtuple('GeocodeResult_result', defaults) @property def results(self): """returns list of result objects""" gc_results = self.address + self.candidates + self.locations results = [] for res in gc_results: results.append(self.Result(*[v for k,v in sorted(res.items())])) return results @property def result(self): """returns the top result""" try: return self.results[0] except IndexError: return None def __getitem__(self, index): """allows for indexing of results""" return self.results[index] def __len__(self): """get count of results""" return len(self.results) def __iter__(self): """return an iterator for results (as generator)""" for r in self.results: yield r def __bool__(self): """returns True if results are returned""" return bool(len(self)) class EditResult(object): """class to handle Edit operation results""" __slots__ = [ADD_RESULTS, UPDATE_RESULTS, DELETE_RESULTS, ADD_ATTACHMENT_RESULT, SUMMARY, AFFECTED_OIDS, FAILED_OIDS, RESPONSE, JSON] def __init__(self, res_dict, feature_id=None): RequestError(res_dict) self.response = munch.munchify(res_dict) self.failedOIDs = [] self.addResults = [] self.updateResults = [] self.deleteResults = [] self.addAttachmentResult = {} for key, value in res_dict.iteritems(): if isinstance(value, dict): value = [value] for v in value: res_id = v.get(RESULT_OBJECT_ID) if res_id is None: res_id = v.get(RESULT_GLOBAL_ID) if v[SUCCESS_STATUS] in (True, TRUE): if key == ADD_ATTACHMENT_RESULT: self.addAttachmentResult[feature_id] = res_id else: getattr(self, key).append(res_id) else: self.failedOIDs.append(res_id) self.affectedOIDs = self.addResults + self.updateResults + self.deleteResults + self.addAttachmentResult.keys() self.json = munch.munchify(res_dict) def summary(self): """print summary of edit operation""" if self.affectedOIDs: if self.addResults: print('Added {} feature(s)'.format(len(self.addResults))) if self.updateResults: print('Updated {} feature(s)'.format(len(self.updateResults))) if self.deleteResults: print('Deleted {} feature(s)'.format(len(self.deleteResults))) if self.addAttachmentResult: try: k,v = self.addAttachmentResult.items()[0] print("Added attachment '{}' for feature {}".format(v, k)) except IndexError: # should never happen? print('Added 1 attachment') if self.failedOIDs: print('Failed to edit {0} feature(s)!\n{1}'.format(len(self.failedOIDs), self.failedOIDs)) def __len__(self): """return count of affected OIDs""" return len(self.affectedOIDs) class BaseGeometry(SpatialReferenceMixin): """base geometry obect""" def dumps(self): """retuns JSON as a string""" return json.dumps(self.json) class BaseGeometryCollection(object): """Base Geometry Collection""" geometries = [] json = {GEOMETRIES: []} geometryType = None @property def count(self): return len(self) def dumps(self): """retuns JSON as a string""" return json.dumps(self.json) def __len__(self): return len(self.geometries) def __iter__(self): for geometry in self.geometries: yield geometry def __getitem__(self, index): return self.geometries[index] def __bool__(self): return bool(len(self.geometries)) def __repr__(self): return '<restapi.GeometryCollection ({}): [{}]>'.format(self.count, self.geometryType) class GeocodeService(RESTEndpoint): """class to handle Geocode Service""" def geocodeAddresses(self, recs, outSR=4326, address_field=''): """geocode a list of addresses. If there is a singleLineAddress field present in the geocoding service, the only input required is a list of addresses. Otherwise, a record set an be passed in for the "recs" parameter. See formatting example at bottom. Required: recs -- JSON object for fields as record set if no SingleLine field available. If singleLineAddress is present a list of full addresses can be passed in. Optional: outSR -- output spatial refrence for geocoded addresses address_field -- name of address field or Single Line address field # recs param examples # preferred option as record set (from esri help docs): recs = { "records": [ { "attributes": { "OBJECTID": 1, "STREET": "440 Arguello Blvd", "ZONE": "94118" } }, { "attributes": { "OBJECTID": 2, "STREET": "450 Arguello Blvd", "ZONE": "94118" } } ] } # full address list option if singleLineAddressField is present recs = ['100 S Riverfront St, Mankato, MN 56001',..] """ geo_url = self.url + '/geocodeAddresses' if isinstance(recs, (list, tuple)): addr_list = recs[:] recs = {RECORDS: []} if not address_field: if hasattr(self, 'singleLineAddressField'): address_field = self.singleLineAddressField.name else: address_field = self.addressFields[0].name print('Warning, no singleLineAddressField found...Using "{}" field'.format(address_field)) for i, addr in enumerate(addr_list): recs[RECORDS].append({ATTRIBUTES: {"OBJECTID": i+1, address_field: addr}}) # validate recs, make sure OBECTID is present elif isinstance(recs, dict) and RECORDS in recs: for i, atts in enumerate(recs[RECORDS]): if not OBJECTID in atts[ATTRIBUTES]: atts[ATTRIBUTES][OBJECTID] = i + 1 #do not start at 0 else: raise ValueError('Not a valid input for "recs" parameter!') params = {ADDRESSES: json.dumps(recs), OUT_SR: outSR, F: JSON} return GeocodeResult(do_post(geo_url, params, token=self.token, cookies=self._cookie), geo_url.split('/')[-1]) def reverseGeocode(self, location, distance=100, outSR=4326, returnIntersection=False, langCode='eng'): """reverse geocodes an address by x, y coordinates Required: location -- input point object as JSON distance -- distance in meters from given location which a matching address will be found outSR -- wkid for output address Optional: langCode -- optional language code, default is eng (only used for StreMap Premium locators) """ geo_url = self.url + '/reverseGeocode' params = {LOCATION: location, DISTANCE: distance, OUT_SR: outSR, RETURN_INTERSECTION: returnIntersection, F: JSON} return GeocodeResult(do_post(geo_url, params, token=self.token, cookies=self._cookie), geo_url.split('/')[-1]) def findAddressCandidates(self, address='', outSR=4326, outFields='*', returnIntersection=False, **kwargs): """finds address candidates for an anddress Required: address -- full address (380 New York Street, Redlands, CA 92373) outFields -- list of fields for output. Default is * for all fields. Will accept either list of fields [], or comma separated string. outSR -- wkid for output address **kwargs -- key word arguments to use for Address, City, State, etc fields if no SingleLine field """ geo_url = self.url + '/findAddressCandidates' params = {OUT_SR: outSR, OUT_FIELDS: outFields, RETURN_INTERSECTION: returnIntersection, F: JSON} if address: if hasattr(self, 'singleLineAddressField'): params[self.singleLineAddressField.name] = address else: params[self.addressFields[0].name] = address if kwargs: for fld_name, fld_query in kwargs.iteritems(): params[fld_name] = fld_query return GeocodeResult(do_post(geo_url, params, token=self.token, cookies=self._cookie), geo_url.split('/')[-1]) def __repr__(self): """string representation with service name""" return '<GeocodeService: {}>'.format('/'.join(self.url.split('/services/')[-1].split('/')[:-1]))
CalebM1987/serverAdminTools
serverAdminTools/restapi/rest_utils.py
Python
gpl-3.0
41,101
0.003358
""" Prepare Sparse Matrix for Sparse Affinity Propagation Clustering (SAP) """ # Authors: Huojun Cao <bioinfocao at gmail.com> # License: BSD 3 clause import numpy as np import pandas as pd import sparseAP_cy # cython for calculation ############################################################################################ # def copySym(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds): """ For single col items or single row items, copy sym minimal value For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B), then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point) """ copy_row_array,copy_col_array,copy_data_array=sparseAP_cy.copySingleRows(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds) #if symCopy=='all': #rowBased_row_array=np.concatenate((rowBased_row_array,copy_col_array)) #rowBased_col_array=np.concatenate((rowBased_col_array,copy_row_array)) #rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_array)) #else:# symCopy=='min' or others will be treated as 'min' df = pd.DataFrame(zip(copy_row_array,copy_col_array,copy_data_array), columns=['row', 'col', 'data']) copy_row_list,copy_col_list,copy_data_list=[],[],[] for ind in singleRowInds: copyData=df[(df.col==ind) & (df.row!=ind)].sort_values(['data']).copy() copyData_min=copyData[0:1] copy_row_list+=list(copyData_min.col) copy_col_list+=list(copyData_min.row) copy_data_list+=list(copyData_min.data) rowBased_row_array=np.concatenate((rowBased_row_array,copy_row_list)) rowBased_col_array=np.concatenate((rowBased_col_array,copy_col_list)) rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_list)) return rowBased_row_array,rowBased_col_array,rowBased_data_array def rmSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,nSamplesOri): """ Affinity/similarity matrix does not need be symmetric, that is s(A,B) does not need be same as s(B,A). Also since Affinity/similarity matrix is sparse, it could be that s(A,B) exist but s(B,A) does not exist in the sparse matrix. For the FSAPC to work, specifically in computation of R and A matrix, each row/column of Affinity/similarity matrix should have at least two datapoints. So in FSAPC, we first remove samples that do not have affinity/similarity with other samples, that is samples that only have affinity/similarity with itself And we remove samples only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist In these two cases, these samples are removed from FSAPC computation and their examplers are set to themself. For samples that only have one data (affinity/similarity) with others, For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B), and there exist at least one value in [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point) then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...] nSamplesOri is the number of samples of orignail input data """ # find rows and cols that only have one datapoint singleRowInds=set(sparseAP_cy.singleItems(rowBased_row_array)) singleColInds=set(sparseAP_cy.singleItems(rowBased_col_array)) # samples that have one datapoint in row and col are samples only have affinity/similarity with itself singleSampleInds=singleRowInds & singleColInds # in case every col/row have more than one datapoint, just return original data if len(singleRowInds)==0 and len(singleColInds)==0: return rowBased_row_array,rowBased_col_array,rowBased_data_array,None,None,nSamplesOri # remove samples that only have affinity/similarity with itself # or only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist # in these two cases, these samples are removed from FSAPC computation and their examplers are set to themself. if len(singleSampleInds)>0: # row indexs that left after remove single samples rowLeft=sorted(list(set(range(nSamplesOri))-singleSampleInds)) # map of original row index to current row index(after remove rows/cols that only have single item) rowOriLeftDict={ori:left for left,ori in enumerate(rowLeft)} rowLeftOriDict={left:ori for ori,left in rowOriLeftDict.items()} rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds) else: # no samples are removed rowLeftOriDict=None #if len(singleSampleInds)>0: #rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds) # for samples that need copy a minimal value to have at least two datapoints in row/column # for samples that row have single data point, copy minimal value of this sample's column singleRowInds=singleRowInds-singleSampleInds if len(singleRowInds)>0: rowBased_row_array,rowBased_col_array,rowBased_data_array=copySym(rowBased_row_array.astype(np.int),rowBased_col_array.astype(np.int),rowBased_data_array,singleRowInds) # for samples that col have single data point, copy minimal value of this sample's row singleColInds=singleColInds-singleSampleInds if len(singleColInds)>0: rowBased_col_array,rowBased_row_array,rowBased_data_array=copySym(rowBased_col_array.astype(np.int),rowBased_row_array.astype(np.int),rowBased_data_array,singleColInds) # change row, col index if there is any sample removed if len(singleSampleInds)>0: changeIndV=np.vectorize(lambda x:rowOriLeftDict[x]) rowBased_row_array=changeIndV(rowBased_row_array) rowBased_col_array=changeIndV(rowBased_col_array) #rearrange based on new row index and new col index, print ('{0}, sort by row,col'.format(datetime.now())) sortedLeftOriInd = np.lexsort((rowBased_col_array,rowBased_row_array)).astype(np.int) rowBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array.astype(np.int),sortedLeftOriInd) rowBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array.astype(np.int),sortedLeftOriInd) rowBased_data_array=sparseAP_cy.npArrRearrange_float_para(rowBased_data_array,sortedLeftOriInd) return rowBased_row_array,rowBased_col_array,rowBased_data_array,rowLeftOriDict,singleSampleInds,nSamplesOri-len(singleSampleInds) def preCompute(rowBased_row_array,rowBased_col_array,S_rowBased_data_array): """ format affinity/similarity matrix """ # Get parameters data_len=len(S_rowBased_data_array) row_indptr=sparseAP_cy.getIndptr(rowBased_row_array) if row_indptr[-1]!=data_len: row_indptr=np.concatenate((row_indptr,np.array([data_len]))) row_to_col_ind_arr=np.lexsort((rowBased_row_array,rowBased_col_array)) colBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array,row_to_col_ind_arr) colBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array,row_to_col_ind_arr) col_to_row_ind_arr=np.lexsort((colBased_col_array,colBased_row_array)) col_indptr=sparseAP_cy.getIndptr(colBased_col_array) if col_indptr[-1]!=data_len: col_indptr=np.concatenate((col_indptr,np.array([data_len]))) kk_col_index=sparseAP_cy.getKKIndex(colBased_row_array,colBased_col_array) #Initialize matrix A, R A_rowbased_data_array=np.array([0.0]*data_len) R_rowbased_data_array=np.array([0.0]*data_len) #Add random samll value to remove degeneracies random_state=np.random.RandomState(0) S_rowBased_data_array+=1e-12*random_state.randn(data_len)*(np.amax(S_rowBased_data_array)-np.amin(S_rowBased_data_array)) #Convert row_to_col_ind_arr/col_to_row_ind_arr data type to np.int datatype so it is compatible with cython code row_to_col_ind_arr=row_to_col_ind_arr.astype(np.int) col_to_row_ind_arr=col_to_row_ind_arr.astype(np.int) return S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array,col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index
bioinfocao/pysapc
pysapc/sparseMatrixPrepare.py
Python
bsd-3-clause
8,514
0.024078
# Copyright (C) 2017 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com> # # config_tests.py - Boom report API tests. # # This file is part of the boom project. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import unittest import logging from os.path import abspath, join from sys import stdout import shutil try: # Python2 from ConfigParser import SafeConfigParser as ConfigParser, ParsingError except: # Python3 from configparser import ConfigParser, ParsingError log = logging.getLogger() log.level = logging.DEBUG log.addHandler(logging.FileHandler("test.log")) # Test suite paths from tests import * from boom import * from boom.config import * BOOT_ROOT_TEST = abspath("./tests") set_boot_path(BOOT_ROOT_TEST) class ConfigBasicTests(unittest.TestCase): """Basic tests for the boom.config sub-module. """ def test_sync_config(self): """Test that the internal _sync_config() helper works. """ import boom.config # for _sync_config() cfg = ConfigParser() bc = BoomConfig() cfg.add_section("global") cfg.add_section("legacy") boot_path = "/boot" boom_path = "/boot/boom" legacy_format = "grub1" bc.legacy_enabled = False bc.legacy_sync = False bc.legacy_format = legacy_format bc.boot_path = boot_path bc.boom_path = boom_path boom.config._sync_config(bc, cfg) self.assertEqual(cfg.get("legacy", "enable"), "no") self.assertEqual(cfg.get("legacy", "sync"), "no") self.assertEqual(cfg.get("legacy", "format"), legacy_format) self.assertEqual(cfg.get("global", "boot_root"), boot_path) self.assertEqual(cfg.get("global", "boom_root"), boom_path) class ConfigTests(unittest.TestCase): # The set of configuration files to use for this test class conf_path = join(BOOT_ROOT_TEST, "boom_configs/default/boot") # The path to the boot directory in the test sandbox boot_path = join(SANDBOX_PATH, "boot") # The path to the sandbox boom.conf configuration file boom_conf = join(boot_path, "boom/boom.conf") def setUp(self): """Set up a test fixture for the ConfigTests class. """ reset_sandbox() # Sandbox paths shutil.copytree(self.conf_path, join(SANDBOX_PATH, "boot")) # Set boom paths set_boot_path(self.boot_path) def tearDown(self): rm_sandbox() reset_boom_paths() def test_get_boom_config_path(self): """Test that the correct boom.conf path is returned from a call to the `get_boom_config_path()` function. """ conf_path = self.boom_conf self.assertEqual(get_boom_config_path(), conf_path) def test_set_boom_config_path_abs(self): """Test that the correct boom.conf path is returned from a call to the `get_boom_config_path()` function when an absolute path is given. """ conf_dir = join(SANDBOX_PATH, "boot/boom") conf_path = join(conf_dir, "boom.conf") set_boom_config_path(conf_dir) self.assertEqual(get_boom_config_path(), conf_path) def test_load_boom_config_default(self): """Test the `load_boom_config()` function with the default configuration file. """ load_boom_config() class BadConfigTests(ConfigTests): # The set of configuration files to use for this test class conf_path = join(BOOT_ROOT_TEST, "boom_configs/badconfig/boot") def test_load_boom_config_default(self): """Test the `load_boom_config()` function with the default configuration file. """ with self.assertRaises(ValueError) as cm: load_boom_config() # vim: set et ts=4 sw=4 :
bmr-cymru/boom
tests/config_tests.py
Python
gpl-2.0
4,151
0.001686
""" Formtools Preview application. """ try: import cPickle as pickle except ImportError: import pickle from django.conf import settings from django.http import Http404 from django.shortcuts import render_to_response from django.template.context import RequestContext from django.utils.crypto import constant_time_compare from django.contrib.formtools.utils import form_hmac AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter. class FormPreview(object): preview_template = 'formtools/preview.html' form_template = 'formtools/form.html' # METHODS SUBCLASSES SHOULDN'T OVERRIDE ################################### def __init__(self, form): # form should be a Form class, not an instance. self.form, self.state = form, {} def __call__(self, request, *args, **kwargs): stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview') self.parse_params(*args, **kwargs) try: method = getattr(self, stage + '_' + request.method.lower()) except AttributeError: raise Http404 return method(request) def unused_name(self, name): """ Given a first-choice name, adds an underscore to the name until it reaches a name that isn't claimed by any field in the form. This is calculated rather than being hard-coded so that no field names are off-limits for use in the form. """ while 1: try: f = self.form.base_fields[name] except KeyError: break # This field name isn't being used by the form. name += '_' return name def preview_get(self, request): "Displays the form" f = self.form(auto_id=self.get_auto_id(), initial=self.get_initial(request)) return render_to_response(self.form_template, self.get_context(request, f), context_instance=RequestContext(request)) def preview_post(self, request): "Validates the POST data. If valid, displays the preview page. Else, redisplays form." f = self.form(request.POST, auto_id=self.get_auto_id()) context = self.get_context(request, f) if f.is_valid(): self.process_preview(request, f, context) context['hash_field'] = self.unused_name('hash') context['hash_value'] = self.security_hash(request, f) return render_to_response(self.preview_template, context, context_instance=RequestContext(request)) else: return render_to_response(self.form_template, context, context_instance=RequestContext(request)) def _check_security_hash(self, token, request, form): expected = self.security_hash(request, form) return constant_time_compare(token, expected) def post_post(self, request): "Validates the POST data. If valid, calls done(). Else, redisplays form." f = self.form(request.POST, auto_id=self.get_auto_id()) if f.is_valid(): if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''), request, f): return self.failed_hash(request) # Security hash failed. return self.done(request, f.cleaned_data) else: return render_to_response(self.form_template, self.get_context(request, f), context_instance=RequestContext(request)) # METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ######################## def get_auto_id(self): """ Hook to override the ``auto_id`` kwarg for the form. Needed when rendering two form previews in the same template. """ return AUTO_ID def get_initial(self, request): """ Takes a request argument and returns a dictionary to pass to the form's ``initial`` kwarg when the form is being created from an HTTP get. """ return {} def get_context(self, request, form): "Context for template rendering." return {'form': form, 'stage_field': self.unused_name('stage'), 'state': self.state} def parse_params(self, *args, **kwargs): """ Given captured args and kwargs from the URLconf, saves something in self.state and/or raises Http404 if necessary. For example, this URLconf captures a user_id variable: (r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)), In this case, the kwargs variable in parse_params would be {'user_id': 32} for a request to '/contact/32/'. You can use that user_id to make sure it's a valid user and/or save it for later, for use in done(). """ pass def process_preview(self, request, form, context): """ Given a validated form, performs any extra processing before displaying the preview page, and saves any extra data in context. """ pass def security_hash(self, request, form): """ Calculates the security hash for the given HttpRequest and Form instances. Subclasses may want to take into account request-specific information, such as the IP address. """ return form_hmac(form) def failed_hash(self, request): "Returns an HttpResponse in the case of an invalid security hash." return self.preview_post(request) # METHODS SUBCLASSES MUST OVERRIDE ######################################## def done(self, request, cleaned_data): """ Does something with the cleaned_data and returns an HttpResponseRedirect. """ raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
skevy/django
django/contrib/formtools/preview.py
Python
bsd-3-clause
5,860
0.003072
"""Constants for Google Assistant.""" from homeassistant.components import ( binary_sensor, camera, climate, cover, fan, group, input_boolean, light, lock, media_player, scene, script, switch, vacuum, ) DOMAIN = 'google_assistant' GOOGLE_ASSISTANT_API_ENDPOINT = '/api/google_assistant' CONF_EXPOSE = 'expose' CONF_ENTITY_CONFIG = 'entity_config' CONF_EXPOSE_BY_DEFAULT = 'expose_by_default' CONF_EXPOSED_DOMAINS = 'exposed_domains' CONF_PROJECT_ID = 'project_id' CONF_ALIASES = 'aliases' CONF_API_KEY = 'api_key' CONF_ROOM_HINT = 'room' CONF_ALLOW_UNLOCK = 'allow_unlock' CONF_SECURE_DEVICES_PIN = 'secure_devices_pin' DEFAULT_EXPOSE_BY_DEFAULT = True DEFAULT_EXPOSED_DOMAINS = [ 'climate', 'cover', 'fan', 'group', 'input_boolean', 'light', 'media_player', 'scene', 'script', 'switch', 'vacuum', 'lock', 'binary_sensor', 'sensor' ] PREFIX_TYPES = 'action.devices.types.' TYPE_CAMERA = PREFIX_TYPES + 'CAMERA' TYPE_LIGHT = PREFIX_TYPES + 'LIGHT' TYPE_SWITCH = PREFIX_TYPES + 'SWITCH' TYPE_VACUUM = PREFIX_TYPES + 'VACUUM' TYPE_SCENE = PREFIX_TYPES + 'SCENE' TYPE_FAN = PREFIX_TYPES + 'FAN' TYPE_THERMOSTAT = PREFIX_TYPES + 'THERMOSTAT' TYPE_LOCK = PREFIX_TYPES + 'LOCK' TYPE_BLINDS = PREFIX_TYPES + 'BLINDS' TYPE_GARAGE = PREFIX_TYPES + 'GARAGE' TYPE_OUTLET = PREFIX_TYPES + 'OUTLET' TYPE_SENSOR = PREFIX_TYPES + 'SENSOR' TYPE_DOOR = PREFIX_TYPES + 'DOOR' TYPE_TV = PREFIX_TYPES + 'TV' TYPE_SPEAKER = PREFIX_TYPES + 'SPEAKER' SERVICE_REQUEST_SYNC = 'request_sync' HOMEGRAPH_URL = 'https://homegraph.googleapis.com/' REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + 'v1/devices:requestSync' # Error codes used for SmartHomeError class # https://developers.google.com/actions/reference/smarthome/errors-exceptions ERR_DEVICE_OFFLINE = "deviceOffline" ERR_DEVICE_NOT_FOUND = "deviceNotFound" ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange" ERR_NOT_SUPPORTED = "notSupported" ERR_PROTOCOL_ERROR = 'protocolError' ERR_UNKNOWN_ERROR = 'unknownError' ERR_FUNCTION_NOT_SUPPORTED = 'functionNotSupported' ERR_CHALLENGE_NEEDED = 'challengeNeeded' ERR_CHALLENGE_NOT_SETUP = 'challengeFailedNotSetup' ERR_TOO_MANY_FAILED_ATTEMPTS = 'tooManyFailedAttempts' ERR_PIN_INCORRECT = 'pinIncorrect' ERR_USER_CANCELLED = 'userCancelled' # Event types EVENT_COMMAND_RECEIVED = 'google_assistant_command' EVENT_QUERY_RECEIVED = 'google_assistant_query' EVENT_SYNC_RECEIVED = 'google_assistant_sync' DOMAIN_TO_GOOGLE_TYPES = { camera.DOMAIN: TYPE_CAMERA, climate.DOMAIN: TYPE_THERMOSTAT, cover.DOMAIN: TYPE_BLINDS, fan.DOMAIN: TYPE_FAN, group.DOMAIN: TYPE_SWITCH, input_boolean.DOMAIN: TYPE_SWITCH, light.DOMAIN: TYPE_LIGHT, lock.DOMAIN: TYPE_LOCK, media_player.DOMAIN: TYPE_SWITCH, scene.DOMAIN: TYPE_SCENE, script.DOMAIN: TYPE_SCENE, switch.DOMAIN: TYPE_SWITCH, vacuum.DOMAIN: TYPE_VACUUM, } DEVICE_CLASS_TO_GOOGLE_TYPES = { (cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE, (cover.DOMAIN, cover.DEVICE_CLASS_DOOR): TYPE_DOOR, (switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH, (switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET, (binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_DOOR): TYPE_DOOR, (binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_GARAGE_DOOR): TYPE_GARAGE, (binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_LOCK): TYPE_SENSOR, (binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_OPENING): TYPE_SENSOR, (binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_WINDOW): TYPE_SENSOR, (media_player.DOMAIN, media_player.DEVICE_CLASS_TV): TYPE_TV, (media_player.DOMAIN, media_player.DEVICE_CLASS_SPEAKER): TYPE_SPEAKER, } CHALLENGE_ACK_NEEDED = 'ackNeeded' CHALLENGE_PIN_NEEDED = 'pinNeeded' CHALLENGE_FAILED_PIN_NEEDED = 'challengeFailedPinNeeded'
aequitas/home-assistant
homeassistant/components/google_assistant/const.py
Python
apache-2.0
3,816
0
# -*- coding: utf-8 -*- """ *************************************************************************** doSpatialIndex.py - build spatial index for vector layers or files -------------------------------------- Date : 11-Nov-2011 Copyright : (C) 2011 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from PyQt4.QtCore import * from PyQt4.QtGui import * from qgis.core import * from qgis.gui import * import ftools_utils from ui_frmSpatialIndex import Ui_Dialog class Dialog( QDialog, Ui_Dialog ): def __init__( self, iface ): QDialog.__init__( self, iface.mainWindow() ) self.setupUi( self ) self.iface = iface self.workThread = None self.btnOk = self.buttonBox.button( QDialogButtonBox.Ok ) self.btnClose = self.buttonBox.button( QDialogButtonBox.Close ) QObject.connect( self.chkExternalFiles, SIGNAL( "stateChanged( int )" ), self.toggleExternalFiles ) QObject.connect( self.btnSelectFiles, SIGNAL( "clicked()" ), self.selectFiles ) QObject.connect( self.lstLayers, SIGNAL( "itemSelectionChanged()" ), self.updateLayerList ) QObject.connect( self.btnSelectAll, SIGNAL( "clicked()" ), self.selectAll ) QObject.connect( self.btnSelectNone, SIGNAL( "clicked()" ), self.selectNone ) QObject.connect( self.btnClearList, SIGNAL( "clicked()" ), self.clearList ) self.manageGui() def manageGui( self ): self.btnSelectFiles.setEnabled( False ) self.btnClearList.setEnabled( False ) self.fillLayersList() def fillLayersList( self ): self.lstLayers.clear() layers = ftools_utils.getLayerNames( [ QGis.Line, QGis.Point, QGis.Polygon ] ) for lay in layers: source = ftools_utils.getVectorLayerByName( lay ).source() item = QListWidgetItem( lay, self.lstLayers ) item.setData( Qt.UserRole, source ) item.setData( Qt.ToolTipRole, source ) def toggleExternalFiles( self ): if self.chkExternalFiles.isChecked(): self.btnSelectFiles.setEnabled( True ) self.btnClearList.setEnabled( True ) self.btnSelectAll.setEnabled( False ) self.btnSelectNone.setEnabled( False ) self.lstLayers.clear() self.lstLayers.setSelectionMode( QAbstractItemView.NoSelection ) self.layers = [] else: self.btnSelectFiles.setEnabled( False ) self.btnClearList.setEnabled( False ) self.btnSelectAll.setEnabled( True ) self.btnSelectNone.setEnabled( True ) self.fillLayersList() self.lstLayers.setSelectionMode( QAbstractItemView.ExtendedSelection ) self.updateLayerList() def updateLayerList( self ): self.layers = [] selection = self.lstLayers.selectedItems() for item in selection: self.layers.append( item.text() ) def selectFiles( self ): filters = QgsProviderRegistry.instance().fileVectorFilters() ( files, self.encoding ) = ftools_utils.openDialog( self, filtering = filters, dialogMode = "MultipleFiles" ) if files is None: return self.layers.extend( [ unicode( f ) for f in files ] ) self.lstLayers.addItems( files ) def selectAll( self ): self.lstLayers.selectAll() def selectNone( self ): self.lstLayers.clearSelection() def clearList( self ): self.layers = [] self.lstLayers.clear() def accept( self ): self.btnOk.setEnabled( False ) self.workThread = SpatialIdxThread( self.layers, self.chkExternalFiles.isChecked() ) self.progressBar.setRange( 0, len( self.layers ) ) QObject.connect( self.workThread, SIGNAL( "layerProcessed()" ), self.layerProcessed ) QObject.connect( self.workThread, SIGNAL( "processFinished( PyQt_PyObject )" ), self.processFinished ) QObject.connect( self.workThread, SIGNAL( "processInterrupted()" ), self.processInterrupted ) self.btnClose.setText( self.tr( "Cancel" ) ) QObject.disconnect( self.buttonBox, SIGNAL( "rejected()" ), self.reject ) QObject.connect( self.btnClose, SIGNAL( "clicked()" ), self.stopProcessing ) self.workThread.start() def layerProcessed( self ): self.progressBar.setValue( self.progressBar.value() + 1 ) def processInterrupted( self ): self.restoreGui() def processFinished( self, errors ): self.stopProcessing() self.restoreGui() if not errors.isEmpty(): msg = self.tr( "Processing of the following layers/files ended with error:<br><br>" ) + "<br>".join(errors) QErrorMessage( self ).showMessage( msg ) QMessageBox.information( self, self.tr( "Finished" ), self.tr( "Processing completed." ) ) def stopProcessing( self ): if self.workThread != None: self.workThread.stop() self.workThread = None def restoreGui( self ): self.progressBar.setValue( 0 ) QObject.connect( self.buttonBox, SIGNAL( "rejected()" ), self.reject ) self.btnClose.setText( self.tr( "Close" ) ) self.btnOk.setEnabled( True ) if self.chkExternalFiles.isChecked(): self.clearList() class SpatialIdxThread( QThread ): def __init__( self, layers, isFiles ): QThread.__init__( self, QThread.currentThread() ) self.layers = layers self.isFiles = isFiles self.mutex = QMutex() self.stopMe = 0 self.errors = [] def run( self ): self.mutex.lock() self.stopMe = 0 self.mutex.unlock() interrupted = False if self.isFiles: for layer in self.layers: vl = QgsVectorLayer( layer, "tmp", "ogr" ) provider = vl.dataProvider() if provider.capabilities() & QgsVectorDataProvider.CreateSpatialIndex: if not provider.createSpatialIndex(): self.errors.append( layer ) else: self.errors.append( layer ) self.emit( SIGNAL( "layerProcessed()" ) ) self.mutex.lock() s = self.stopMe self.mutex.unlock() if s == 1: interrupted = True break else: for layer in self.layers: vl = ftools_utils.getVectorLayerByName( layer ) provider = vl.dataProvider() if provider.capabilities() & QgsVectorDataProvider.CreateSpatialIndex: if not provider.createSpatialIndex(): self.errors.append( layer ) else: self.errors.append( layer ) self.emit( SIGNAL( "layerProcessed()" ) ) self.mutex.lock() s = self.stopMe self.mutex.unlock() if s == 1: interrupted = True break if not interrupted: self.emit( SIGNAL( "processFinished( PyQt_PyObject )" ), self.errors ) else: self.emit( SIGNAL( "processInterrupted()" ) ) def stop( self ): self.mutex.lock() self.stopMe = 1 self.mutex.unlock() QThread.wait( self )
innotechsoftware/Quantum-GIS
python/plugins/fTools/tools/doSpatialIndex.py
Python
gpl-2.0
7,344
0.03908
#!/usr/bin/env python import csv, sys mapping = {} totalTruth, totalTesting, hit, miss, errors = (0, 0, 0, 0, 0) with open(sys.argv[1], 'rb') as groundtruth: reader = csv.reader(groundtruth) for row in reader: totalTruth += 1 mapping[(row[1], row[2])] = row[0] with open(sys.argv[2], 'rb') as testing: reader = csv.reader(testing) for row in reader: totalTesting += 1 try: if (mapping[(row[1], row[2])] == row[0]): hit += 1 else: miss += 1 except KeyError: errors += 1 print "Total size: ", totalTruth, " and testing size: ", totalTesting print "Correct assignments: ", hit, " and failed assigments: ", miss print "Errors: ", errors print "Accuracy: ", float(hit) / float(totalTruth)
whoww/peel-flink-kmeans
VarianceBenchmarkResults/AccuracyMeasure.py
Python
apache-2.0
815
0.001227
MODULES = ['main', 'config'] DEPS = [] NAME = 'Notepad' PLATFORMS = ['any'] DESCRIPTION = 'Configuration files editor' VERSION = '0.1' AUTHOR = 'Ajenti team' HOMEPAGE = 'http://ajenti.org'
DmZ/ajenti
plugins/notepad/__init__.py
Python
lgpl-3.0
192
0.005208
""" For Django < 3.1, rely on django-jsonfield-backport for JSONField functionality https://github.com/laymonage/django-jsonfield-backport#installation https://github.com/laymonage/django-jsonfield-backport#why-create-another-one """ try: from django.db.models import JSONField # noqa except ImportError: from django_jsonfield_backport.models import JSONField # noqa
pinax/pinax-eventlog
pinax/eventlog/compat.py
Python
mit
379
0
# Copyright 2013 Viewfinder Inc. All Rights Reserved. """Viewfinder UpdateFollowerOperation. This operation update's follower metadata for a user. """ __authors__ = ['mike@emailscrubbed.com (Mike Purtell)', 'andy@emailscrubbed.com (Andy Kimball)'] import json import logging from tornado import gen from viewfinder.backend.base.exceptions import PermissionError from viewfinder.backend.db.follower import Follower from viewfinder.backend.db.operation import Operation from viewfinder.backend.db.viewpoint import Viewpoint from viewfinder.backend.op.notification_manager import NotificationManager from viewfinder.backend.op.viewfinder_op import ViewfinderOperation class UpdateFollowerOperation(ViewfinderOperation): """The UpdateFollower operation follows the four phase pattern described in the header of operation_map.py, except that there is no ACCOUNT phase, since this operation does not affect accounting. """ def __init__(self, client, user_id, foll_dict): super(UpdateFollowerOperation, self).__init__(client) self._foll_dict = foll_dict self._user_id = user_id self._viewpoint_id = foll_dict['viewpoint_id'] @classmethod @gen.coroutine def Execute(cls, client, user_id, follower): """Entry point called by the operation framework.""" yield UpdateFollowerOperation(client, user_id, follower)._UpdateFollower() @gen.coroutine def _UpdateFollower(self): """Orchestrates the update follower operation by executing each of the phases in turn.""" lock = yield gen.Task(Viewpoint.AcquireLock, self._client, self._viewpoint_id) try: yield self._Check() self._client.CheckDBNotModified() yield self._Update() yield Operation.TriggerFailpoint(self._client) yield self._Notify() finally: yield gen.Task(Viewpoint.ReleaseLock, self._client, self._viewpoint_id, lock) @gen.coroutine def _Check(self): """Gathers pre-mutation information: 1. Queries for follower. 2. Queries for viewpoint. Validates the following: 1. Permission to update follower metadata. 2. Certain labels cannot be set. """ self._follower = yield gen.Task(Follower.Query, self._client, self._user_id, self._viewpoint_id, None, must_exist=False) if self._follower is None: raise PermissionError('User %d does not have permission to update follower "%s", or it does not exist.' % (self._user_id, self._viewpoint_id)) self._viewpoint = yield gen.Task(Viewpoint.Query, self._client, self._viewpoint_id, None) if 'labels' in self._foll_dict: self._follower.SetLabels(self._foll_dict['labels']) @gen.coroutine def _Update(self): """Updates the database: 1. Updates the follower metadata. """ # Labels should have been set in the _Check step. assert 'labels' not in self._foll_dict or set(self._follower.labels) == set(self._foll_dict['labels']), \ (self._foll_dict, self._follower.labels) if 'viewed_seq' in self._foll_dict: # Don't allow viewed_seq to exceed update_seq. if self._foll_dict['viewed_seq'] > self._viewpoint.update_seq: self._foll_dict['viewed_seq'] = self._viewpoint.update_seq # Ratchet up viewed_seq so that it's guaranteed to monotonically increase. if self._foll_dict['viewed_seq'] > self._follower.viewed_seq: self._follower.viewed_seq = self._foll_dict['viewed_seq'] else: # Map to final value which will be used in the notification. self._foll_dict['viewed_seq'] = self._follower.viewed_seq yield gen.Task(self._follower.Update, self._client) @gen.coroutine def _Notify(self): """Creates notifications: 1. Notify all of the user's devices that the follower has been updated. """ yield NotificationManager.NotifyUpdateFollower(self._client, self._foll_dict)
0359xiaodong/viewfinder
backend/op/update_follower_op.py
Python
apache-2.0
4,084
0.008815
""" In-memory treq returns stubbed responses. """ from functools import partial from inspect import getmembers, isfunction from mock import ANY from six import text_type, binary_type from twisted.web.client import ResponseFailed from twisted.web.error import SchemeNotSupported from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET from twisted.python.compat import _PY3 import treq from treq.test.util import TestCase from treq.testing import ( HasHeaders, RequestSequence, StringStubbingResource, StubTreq ) class _StaticTestResource(Resource): """Resource that always returns 418 "I'm a teapot""" isLeaf = True def render(self, request): request.setResponseCode(418) request.setHeader(b"x-teapot", b"teapot!") return b"I'm a teapot" class _NonResponsiveTestResource(Resource): """Resource that returns NOT_DONE_YET and never finishes the request""" isLeaf = True def render(self, request): return NOT_DONE_YET class _EventuallyResponsiveTestResource(Resource): """ Resource that returns NOT_DONE_YET and stores the request so that something else can finish the response later. """ isLeaf = True def render(self, request): self.stored_request = request return NOT_DONE_YET class StubbingTests(TestCase): """ Tests for :class:`StubTreq`. """ def test_stubtreq_provides_all_functions_in_treq_all(self): """ Every single function and attribute exposed by :obj:`treq.__all__` is provided by :obj:`StubTreq`. """ treq_things = [(name, obj) for name, obj in getmembers(treq) if name in treq.__all__] stub = StubTreq(_StaticTestResource()) api_things = [(name, obj) for name, obj in treq_things if obj.__module__ == "treq.api"] content_things = [(name, obj) for name, obj in treq_things if obj.__module__ == "treq.content"] # sanity checks - this test should fail if treq exposes a new API # without changes being made to StubTreq and this test. msg = ("At the time this test was written, StubTreq only knew about " "treq exposing functions from treq.api and treq.content. If " "this has changed, StubTreq will need to be updated, as will " "this test.") self.assertTrue(all(isfunction(obj) for name, obj in treq_things), msg) self.assertEqual(set(treq_things), set(api_things + content_things), msg) for name, obj in api_things: self.assertTrue( isfunction(getattr(stub, name, None)), "StubTreq.{0} should be a function.".format(name)) for name, obj in content_things: self.assertIs( getattr(stub, name, None), obj, "StubTreq.{0} should just expose treq.{0}".format(name)) def test_providing_resource_to_stub_treq(self): """ The resource provided to StubTreq responds to every request no matter what the URI or parameters or data. """ verbs = ('GET', 'PUT', 'HEAD', 'PATCH', 'DELETE', 'POST') urls = ( 'http://supports-http.com', 'https://supports-https.com', 'http://this/has/a/path/and/invalid/domain/name', 'https://supports-https.com:8080', 'http://supports-http.com:8080', ) params = (None, {}, {b'page': [1]}) headers = (None, {}, {b'x-random-header': [b'value', b'value2']}) data = (None, b"", b'some data', b'{"some": "json"}') stub = StubTreq(_StaticTestResource()) combos = ( (verb, {"url": url, "params": p, "headers": h, "data": d}) for verb in verbs for url in urls for p in params for h in headers for d in data ) for combo in combos: verb, kwargs = combo deferreds = (stub.request(verb, **kwargs), getattr(stub, verb.lower())(**kwargs)) for d in deferreds: resp = self.successResultOf(d) self.assertEqual(418, resp.code) self.assertEqual([b'teapot!'], resp.headers.getRawHeaders(b'x-teapot')) self.assertEqual(b"" if verb == "HEAD" else b"I'm a teapot", self.successResultOf(stub.content(resp))) def test_handles_invalid_schemes(self): """ Invalid URLs errback with a :obj:`SchemeNotSupported` failure, and does so even after a successful request. """ stub = StubTreq(_StaticTestResource()) self.failureResultOf(stub.get(""), SchemeNotSupported) self.successResultOf(stub.get("http://url.com")) self.failureResultOf(stub.get(""), SchemeNotSupported) def test_files_are_rejected(self): """ StubTreq does not handle files yet - it should reject requests which attempt to pass files. """ stub = StubTreq(_StaticTestResource()) self.assertRaises( AssertionError, stub.request, 'method', 'http://url', files=b'some file') def test_passing_in_strange_data_is_rejected(self): """ StubTreq rejects data that isn't list/dictionary/tuple/bytes/unicode. """ stub = StubTreq(_StaticTestResource()) self.assertRaises( AssertionError, stub.request, 'method', 'http://url', data=object()) self.successResultOf(stub.request('method', 'http://url', data={})) self.successResultOf(stub.request('method', 'http://url', data=[])) self.successResultOf(stub.request('method', 'http://url', data=())) self.successResultOf( stub.request('method', 'http://url', data=binary_type(b""))) self.successResultOf( stub.request('method', 'http://url', data=text_type(""))) def test_handles_failing_asynchronous_requests(self): """ Handle a resource returning NOT_DONE_YET and then canceling the request. """ stub = StubTreq(_NonResponsiveTestResource()) d = stub.request('method', 'http://url', data=b"1234") self.assertNoResult(d) d.cancel() self.failureResultOf(d, ResponseFailed) def test_handles_successful_asynchronous_requests(self): """ Handle a resource returning NOT_DONE_YET and then later finishing the response. """ rsrc = _EventuallyResponsiveTestResource() stub = StubTreq(rsrc) d = stub.request('method', 'http://example.com/', data=b"1234") self.assertNoResult(d) rsrc.stored_request.finish() stub.flush() resp = self.successResultOf(d) self.assertEqual(resp.code, 200) def test_handles_successful_asynchronous_requests_with_response_data(self): """ Handle a resource returning NOT_DONE_YET and then sending some data in the response. """ rsrc = _EventuallyResponsiveTestResource() stub = StubTreq(rsrc) d = stub.request('method', 'http://example.com/', data=b"1234") self.assertNoResult(d) chunks = [] rsrc.stored_request.write(b'spam ') rsrc.stored_request.write(b'eggs') stub.flush() resp = self.successResultOf(d) d = stub.collect(resp, chunks.append) self.assertNoResult(d) self.assertEqual(b''.join(chunks), b'spam eggs') rsrc.stored_request.finish() stub.flush() self.successResultOf(d) def test_handles_successful_asynchronous_requests_with_streaming(self): """ Handle a resource returning NOT_DONE_YET and then streaming data back gradually over time. """ rsrc = _EventuallyResponsiveTestResource() stub = StubTreq(rsrc) d = stub.request('method', 'http://example.com/', data="1234") self.assertNoResult(d) chunks = [] rsrc.stored_request.write(b'spam ') rsrc.stored_request.write(b'eggs') stub.flush() resp = self.successResultOf(d) d = stub.collect(resp, chunks.append) self.assertNoResult(d) self.assertEqual(b''.join(chunks), b'spam eggs') del chunks[:] rsrc.stored_request.write(b'eggs\r\nspam\r\n') stub.flush() self.assertNoResult(d) self.assertEqual(b''.join(chunks), b'eggs\r\nspam\r\n') rsrc.stored_request.finish() stub.flush() self.successResultOf(d) class HasHeadersTests(TestCase): """ Tests for :obj:`HasHeaders`. """ def test_equality_and_strict_subsets_succeed(self): """ The :obj:`HasHeaders` returns True if both sets of headers are equivalent, or the first is a strict subset of the second. """ self.assertEqual(HasHeaders({'one': ['two', 'three']}), {'one': ['two', 'three']}, "Equivalent headers do not match.") self.assertEqual(HasHeaders({'one': ['two', 'three']}), {'one': ['two', 'three', 'four'], 'ten': ['six']}, "Strict subset headers do not match") def test_partial_or_zero_intersection_subsets_fail(self): """ The :obj:`HasHeaders` returns False if both sets of headers overlap but the first is not a strict subset of the second. It also returns False if there is no overlap. """ self.assertNotEqual(HasHeaders({'one': ['two', 'three']}), {'one': ['three', 'four']}, "Partial value overlap matches") self.assertNotEqual(HasHeaders({'one': ['two', 'three']}), {'one': ['two']}, "Missing value matches") self.assertNotEqual(HasHeaders({'one': ['two', 'three']}), {'ten': ['six']}, "Complete inequality matches") def test_case_insensitive_keys(self): """ The :obj:`HasHeaders` equality function ignores the case of the header keys. """ self.assertEqual(HasHeaders({b'A': [b'1'], b'b': [b'2']}), {b'a': [b'1'], b'B': [b'2']}) def test_case_sensitive_values(self): """ The :obj:`HasHeaders` equality function does care about the case of the header value. """ self.assertNotEqual(HasHeaders({b'a': [b'a']}), {b'a': [b'A']}) def test_bytes_encoded_forms(self): """ The :obj:`HasHeaders` equality function compares the bytes-encoded forms of both sets of headers. """ self.assertEqual(HasHeaders({b'a': [b'a']}), {u'a': [u'a']}) self.assertEqual(HasHeaders({u'b': [u'b']}), {b'b': [b'b']}) def test_repr(self): """ :obj:`HasHeaders` returns a nice string repr. """ if _PY3: reprOutput = "HasHeaders({b'a': [b'b']})" else: reprOutput = "HasHeaders({'a': ['b']})" self.assertEqual(reprOutput, repr(HasHeaders({b'A': [b'b']}))) class StringStubbingTests(TestCase): """ Tests for :obj:`StringStubbingResource`. """ def _get_response_for(self, expected_args, response): """ Make a :obj:`IStringResponseStubs` that checks the expected args and returns the given response. """ method, url, params, headers, data = expected_args def get_response_for(_method, _url, _params, _headers, _data): self.assertEqual((method, url, params, data), (_method, _url, _params, _data)) self.assertEqual(HasHeaders(headers), _headers) return response return get_response_for def test_interacts_successfully_with_istub(self): """ The :obj:`IStringResponseStubs` is passed the correct parameters with which to evaluate the response, and the response is returned. """ resource = StringStubbingResource(self._get_response_for( (b'DELETE', 'http://what/a/thing', {b'page': [b'1']}, {b'x-header': [b'eh']}, b'datastr'), (418, {b'x-response': b'responseheader'}, b'response body'))) stub = StubTreq(resource) d = stub.delete('http://what/a/thing', headers={b'x-header': b'eh'}, params={b'page': b'1'}, data=b'datastr') resp = self.successResultOf(d) self.assertEqual(418, resp.code) self.assertEqual([b'responseheader'], resp.headers.getRawHeaders(b'x-response')) self.assertEqual(b'response body', self.successResultOf(stub.content(resp))) class RequestSequenceTests(TestCase): """ Tests for :obj:`RequestSequence`. """ def setUp(self): """ Set up a way to report failures asynchronously. """ self.async_failures = [] def test_mismatched_request_causes_failure(self): """ If a request is made that is not expected as the next request, causes a failure. """ sequence = RequestSequence( [((b'get', 'https://anything/', {b'1': [b'2']}, HasHeaders({b'1': [b'1']}), b'what'), (418, {}, b'body')), ((b'get', 'http://anything', {}, HasHeaders({b'2': [b'1']}), b'what'), (202, {}, b'deleted'))], async_failure_reporter=self.async_failures.append) stub = StubTreq(StringStubbingResource(sequence)) get = partial(stub.get, 'https://anything?1=2', data=b'what', headers={b'1': b'1'}) resp = self.successResultOf(get()) self.assertEqual(418, resp.code) self.assertEqual(b'body', self.successResultOf(stub.content(resp))) self.assertEqual([], self.async_failures) resp = self.successResultOf(get()) self.assertEqual(500, resp.code) self.assertEqual(1, len(self.async_failures)) self.assertIn("Expected the next request to be", self.async_failures[0]) self.assertFalse(sequence.consumed()) def test_unexpected_number_of_request_causes_failure(self): """ If there are no more expected requests, making a request causes a failure. """ sequence = RequestSequence( [], async_failure_reporter=self.async_failures.append) stub = StubTreq(StringStubbingResource(sequence)) d = stub.get('https://anything', data=b'what', headers={b'1': b'1'}) resp = self.successResultOf(d) self.assertEqual(500, resp.code) self.assertEqual(1, len(self.async_failures)) self.assertIn("No more requests expected, but request", self.async_failures[0]) # the expected requests have all been made self.assertTrue(sequence.consumed()) def test_works_with_mock_any(self): """ :obj:`mock.ANY` can be used with the request parameters. """ sequence = RequestSequence( [((ANY, ANY, ANY, ANY, ANY), (418, {}, b'body'))], async_failure_reporter=self.async_failures.append) stub = StubTreq(StringStubbingResource(sequence)) with sequence.consume(sync_failure_reporter=self.fail): d = stub.get('https://anything', data=b'what', headers={b'1': b'1'}) resp = self.successResultOf(d) self.assertEqual(418, resp.code) self.assertEqual(b'body', self.successResultOf(stub.content(resp))) self.assertEqual([], self.async_failures) # the expected requests have all been made self.assertTrue(sequence.consumed()) def test_consume_context_manager_fails_on_remaining_requests(self): """ If the `consume` context manager is used, if there are any remaining expecting requests, the test case will be failed. """ sequence = RequestSequence( [((ANY, ANY, ANY, ANY, ANY), (418, {}, b'body'))] * 2, async_failure_reporter=self.async_failures.append) stub = StubTreq(StringStubbingResource(sequence)) consume_failures = [] with sequence.consume(sync_failure_reporter=consume_failures.append): self.successResultOf(stub.get('https://anything', data=b'what', headers={b'1': b'1'})) self.assertEqual(1, len(consume_failures)) self.assertIn( "Not all expected requests were made. Still expecting:", consume_failures[0]) self.assertIn( "{0}(url={0}, params={0}, headers={0}, data={0})".format( repr(ANY)), consume_failures[0]) # no asynchronous failures (mismatches, etc.) self.assertEqual([], self.async_failures)
mithrandi/treq
src/treq/test/test_testing.py
Python
mit
17,263
0
#------------------------------------------------------------------------ # # Register the Addon # #------------------------------------------------------------------------ register(GENERAL, id="libwebconnect", name="libwebconnect", description = _("Library for web site collections"), status = STABLE, # not yet tested with python 3 version = '1.0.29', gramps_target_version = "5.1", fname="libwebconnect.py", load_on_reg = True, )
sam-m888/addons-source
libwebconnect/libwebconnect.gpr.py
Python
gpl-2.0
516
0.027132
import json import glob import logging import os config = {} log = logging.getLogger(__name__) def get(key, default_value=None): path = key.split(".") value = config.copy() for i in path: if i not in value: value = None break value = value[i] return value or default_value def getint(key, default=None): r = get(key, default) if r: return int(r) return default def getfloat(key, default=None): return float(get(key, default)) def deep_merge(d1, d2): res = d1 for key, value in d2.iteritems(): if key in d1: if isinstance(value, dict) and isinstance(d1[key], dict): d1[key] = deep_merge(d1[key], value) continue d1[key] = value return d1 def read_config(*folders): global config files = [] if not folders: folders = ["conf.d/*.json"] for folder in folders: files.extend(glob.glob(folder)) filtered = [] [filtered.append(i) for i in files if not filtered.count(i)] files = filtered for f in files: with open(f, "r") as config_file: try: data = json.load(config_file) except ValueError, e: log.exception("Failed to read config file %s", f) continue config = deep_merge(config, data) return config
Unix4ever/spike
common/config.py
Python
mit
1,403
0.003564
from django.conf import settings from django.core.handlers.base import get_path_info from django.core.handlers.wsgi import WSGIHandler from django.utils.six.moves.urllib.parse import urlparse from django.utils.six.moves.urllib.request import url2pathname from django.contrib.staticfiles import utils from django.contrib.staticfiles.views import serve class StaticFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to the static files directory, as defined by the STATIC_URL setting, and serves those files. """ def __init__(self, application, base_dir=None): self.application = application if base_dir: self.base_dir = base_dir else: self.base_dir = self.get_base_dir() self.base_url = urlparse(self.get_base_url()) super(StaticFilesHandler, self).__init__() def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): utils.check_settings() return settings.STATIC_URL def _should_handle(self, path): """ Checks if the path should be handled. Ignores the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """ Returns the relative path to the media file on disk for the given URL. """ relative_url = url[len(self.base_url[2]):] return url2pathname(relative_url) def serve(self, request): """ Actually serves the request path. """ return serve(request, self.file_path(request.path), insecure=True) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404 as e: if settings.DEBUG: from django.views import debug return debug.technical_404_response(request, e) return super(StaticFilesHandler, self).get_response(request) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super(StaticFilesHandler, self).__call__(environ, start_response)
edisonlz/fruit
web_project/base/site-packages/django/contrib/staticfiles/handlers.py
Python
apache-2.0
2,440
0.00082
# -*- coding: utf-8 -*- ''' Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse from resources.lib.modules import cleantitle from resources.lib.modules import cache from resources.lib.modules import client from resources.lib.modules import proxy class source: def __init__(self): self.domains = ['projectfreetv.im'] self.base_link = 'http://projectfreetv.im' self.search_link = '/watch-series/' def tvshow(self, imdb, tvdb, tvshowtitle, year): try: t = cleantitle.get(tvshowtitle) r = cache.get(self.pftv_tvcache, 120) r = [i[0] for i in r if t == i[1]] for i in r[:2]: try: m = proxy.request(urlparse.urljoin(self.base_link, i), 'Episodes') m = re.sub('\s|<.+?>|</.+?>', '', m) m = re.findall('Year:(%s)' % year, m)[0] url = i ; break except: pass return url except: return def pftv_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) r = proxy.request(url, 'A-Z') r = client.parseDOM(r, 'li') m = [] for i in r: try: title = client.parseDOM(i, 'a')[0] title = client.replaceHTMLCodes(title) title = cleantitle.get(title) title = title.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urljoin(self.base_link, url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = url.encode('utf-8') m.append((url, title)) except: pass return m except: return def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = [i for i in url.split('/') if not i == ''][-1] url = '/episode/%s-season-%01d-episode-%01d/' % (url, int(season), int(episode)) return url except: return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'add links') links = client.parseDOM(r, 'tr') for i in links: try: host = client.parseDOM(i, 'a')[0] host = [x.strip() for x in host.strip().split('\n') if not x == ''][-1] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'provider': 'PFTV', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): try: r = proxy.request(url, 'nofollow') url = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) url = [i for i in url if not urlparse.urlparse(self.base_link).netloc in i] url = client.replaceHTMLCodes(url[0]) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = url.encode('utf-8') return url except: return
felipenaselva/repo.felipe
plugin.video.exodus/resources/lib/sources/pftv_tv.py
Python
gpl-2.0
5,225
0.010718
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2013 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Machine Specifications: the rows of this table represent the default values of machine "models" so that users don't need to manaully enter the low level details of each one since this is mostly repeated data in large grid deployments, such as Saphire """ from datetime import datetime from sqlalchemy import (Table, Column, Integer, DateTime, Sequence, String, ForeignKey, UniqueConstraint) from sqlalchemy.orm import relation, backref from aquilon.aqdb.model import Base, Model, Vendor, Cpu from aquilon.aqdb.model.disk import disk_types, controller_types from aquilon.aqdb.column_types import Enum class MachineSpecs(Base): """ Captures the configuration hardware components for a given model """ #TODO: Maybe this entire table is in fact a part of the model "subtype" _def_cpu_cnt = { 'workstation':1, 'blade': 2, 'rackmount' : 4 } _def_nic_cnt = { 'workstation':1, 'blade': 2, 'rackmount' : 2 } _def_memory = { 'workstation': 2048, 'blade': 8192, 'rackmount': 16384 } __tablename__ = 'machine_specs' id = Column( Integer, Sequence('mach_specs_id_seq'), primary_key=True) model_id = Column(Integer, ForeignKey('model.id', name='mach_spec_model_fk'), nullable=False) cpu_id = Column(Integer, ForeignKey('cpu.id', name='mach_spec_cpu_fk'), nullable=False) cpu_quantity = Column(Integer, nullable=False) #Constrain to below 512? memory = Column(Integer, nullable=False, default=0) disk_type = Column(Enum(64, disk_types), nullable=False) disk_capacity = Column(Integer, nullable=False, default=36) controller_type = Column(Enum(64, controller_types), nullable=False) nic_count = Column(Integer, nullable=False, default=2) creation_date = Column('creation_date', DateTime, default=datetime.now) comments = Column('comments', String(255), nullable=True) model = relation(Model, backref=backref('machine_specs', uselist=False)) cpu = relation(Cpu) machine_specs = MachineSpecs.__table__ machine_specs.primary_key.name='machine_specs_pk' #for now, need a UK on model_id. WILL be a name AND a model_id as UK. machine_specs.append_constraint( UniqueConstraint('model_id', name='machine_specs_model_uk')) table = machine_specs def populate(sess, *args, **kw): if len(sess.query(MachineSpecs).all()) < 1: from sqlalchemy import insert specs = [["hs20-884345u", "xeon_2660", 2, 8192, 'scsi', 36, 2], ["hs21-8853l5u", "xeon_2660", 2, 8192, 'scsi', 68, 2], ["poweredge_6650", "xeon_3000", 4, 16384, 'scsi', 36, 2], ["bl45p", "opteron_2600", 2, 32768, 'scsi', 36, 2], ["bl260c", "xeon_2500", 2, 24576, 'scsi', 36, 2], ["vb1205xm", "xeon_2500", 2, 24576, 'scsi', 36, 2], ["aurora_model", "aurora_cpu", 0, 0, 'scsi', 0, 0]] for ms in specs: try: dbmodel = sess.query(Model).filter_by(name=ms[0]).one() dbcpu = sess.query(Cpu).filter_by(name=ms[1]).one() cpu_quantity = ms[2] memory = ms[3] disk_type = 'local' controller_type = ms[4] disk_capacity = ms[5] nic_count = ms[6] dbms = MachineSpecs(model=dbmodel, cpu=dbcpu, cpu_quantity=cpu_quantity, memory=memory, disk_type=disk_type, controller_type=controller_type, disk_capacity=disk_capacity, nic_count=nic_count) sess.add(dbms) except Exception,e: sess.rollback() print 'Creating machine specs: %s' % e continue try: sess.commit() except Exception,e: sess.rollback() print 'Commiting ',e continue
stdweird/aquilon
upgrade/1.4.5/aquilon/aqdb/model/machine_specs.py
Python
apache-2.0
4,639
0.006036
from __future__ import absolute_import, unicode_literals ###################### # MEZZANINE SETTINGS # ###################### # The following settings are already defined with default values in # the ``defaults.py`` module within each of Mezzanine's apps, but are # common enough to be put here, commented out, for convenient # overriding. Please consult the settings documentation for a full list # of settings Mezzanine implements: # http://mezzanine.jupo.org/docs/configuration.html#default-settings # Controls the ordering and grouping of the admin menu. # # ADMIN_MENU_ORDER = ( # ("Content", ("pages.Page", "blog.BlogPost", # "generic.ThreadedComment", ("Media Library", "fb_browse"),)), # ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")), # ("Users", ("auth.User", "auth.Group",)), # ) # A three item sequence, each containing a sequence of template tags # used to render the admin dashboard. # # DASHBOARD_TAGS = ( # ("blog_tags.quick_blog", "mezzanine_tags.app_list"), # ("comment_tags.recent_comments",), # ("mezzanine_tags.recent_actions",), # ) # A sequence of templates used by the ``page_menu`` template tag. Each # item in the sequence is a three item sequence, containing a unique ID # for the template, a label for the template, and the template path. # These templates are then available for selection when editing which # menus a page should appear in. Note that if a menu template is used # that doesn't appear in this setting, all pages will appear in it. # PAGE_MENU_TEMPLATES = ( # (1, "Top navigation bar", "pages/menus/dropdown.html"), # (2, "Left-hand tree", "pages/menus/tree.html"), # (3, "Footer", "pages/menus/footer.html"), # ) # A sequence of fields that will be injected into Mezzanine's (or any # library's) models. Each item in the sequence is a four item sequence. # The first two items are the dotted path to the model and its field # name to be added, and the dotted path to the field class to use for # the field. The third and fourth items are a sequence of positional # args and a dictionary of keyword args, to use when creating the # field instance. When specifying the field class, the path # ``django.models.db.`` can be omitted for regular Django model fields. # # EXTRA_MODEL_FIELDS = ( # ( # # Dotted path to field. # "mezzanine.blog.models.BlogPost.image", # # Dotted path to field class. # "somelib.fields.ImageField", # # Positional args for field class. # ("Image",), # # Keyword args for field class. # {"blank": True, "upload_to": "blog"}, # ), # # Example of adding a field to *all* of Mezzanine's content types: # ( # "mezzanine.pages.models.Page.another_field", # "IntegerField", # 'django.db.models.' is implied if path is omitted. # ("Another name",), # {"blank": True, "default": 1}, # ), # ) # Setting to turn on featured images for blog posts. Defaults to False. # # BLOG_USE_FEATURED_IMAGE = True # If True, the south application will be automatically added to the # INSTALLED_APPS setting. USE_SOUTH = True ######################## # MAIN DJANGO SETTINGS # ######################## # People who get code error notifications. # In the format (('Full Name', 'email@example.com'), # ('Full Name', 'anotheremail@example.com')) ADMINS = ( ('administrator', 'administrator@example.com'), ) MANAGERS = ADMINS # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['127.0.0.1'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = "Europe/Rome" # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = True # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = "en" # Supported languages _ = lambda s: s LANGUAGES = ( ('en', _('English')), ) # A boolean that turns on/off debug mode. When set to ``True``, stack traces # are displayed for error pages. Should always be set to ``False`` in # production. Best set to ``True`` in local_settings.py DEBUG = True # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = True SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = False # Tuple of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = ("127.0.0.1",) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ) AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # The numeric mode to set newly-uploaded files to. The value should be # a mode you'd pass directly to os.chmod. FILE_UPLOAD_PERMISSIONS = 0o644 ############# # DATABASES # ############# DATABASES = { "default": { # Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle". "ENGINE": "django.db.backends.postgresql_psycopg2", # DB name or path to database file if using sqlite3. "NAME": "mezzanine_mailchimper", # Not used with sqlite3. "USER": "", # Not used with sqlite3. "PASSWORD": "", # Set to empty string for localhost. Not used with sqlite3. "HOST": "localhost", # Set to empty string for default. Not used with sqlite3. "PORT": "", "ATOMIC_REQUESTS": True, } } ######### # PATHS # ######### import os # Full filesystem path to the project. PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) # Name of the directory for the project. PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1] # Every cache key will get prefixed with this value - here we set it to # the name of the directory the project is in to try and use something # project specific. CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = "/static/" # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/")) # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = STATIC_URL + "media/" # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/")) # Package/module name to import the root urlpatterns from for the project. ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME # Put strings here, like "/home/html/django_templates" # or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),) ########### # LOGGING # ########### LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, }, 'handlers': { 'console': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', }, 'null': { 'class': 'django.utils.log.NullHandler', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler', # 'email_backend': 'django.core.mail.backends.console.' # 'EmailBackend', } }, 'loggers': { 'django': { 'handlers': ['console'], }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'django.security': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': False, }, 'py.warnings': { 'handlers': ['console'], }, } } ################ # APPLICATIONS # ################ INSTALLED_APPS = ( "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.redirects", "django.contrib.sessions", "django.contrib.sites", "django.contrib.sitemaps", "django.contrib.staticfiles", "mailchimper", "mezzanine.boot", "mezzanine.conf", "mezzanine.core", "mezzanine.generic", "mezzanine.pages", "mezzanine.accounts", # "django_pdb", "crispy_forms", # "functional_tests", ) # List of processors used by RequestContext to populate the context. # Each one should be a callable that takes the request object as its # only parameter and returns a dictionary to add to the context. TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.static", "django.core.context_processors.media", "django.core.context_processors.request", "django.core.context_processors.tz", "mezzanine.conf.context_processors.settings", ) # List of middleware classes to use. Order is important; in the request phase, # these middleware classes will be applied in the order given, and in the # response phase the middleware will be applied in reverse order. MIDDLEWARE_CLASSES = ( "mezzanine.core.middleware.UpdateCacheMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.locale.LocaleMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "mezzanine.core.request.CurrentRequestMiddleware", "mezzanine.core.middleware.RedirectFallbackMiddleware", "mezzanine.core.middleware.TemplateForDeviceMiddleware", "mezzanine.core.middleware.TemplateForHostMiddleware", "mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware", "mezzanine.core.middleware.SitePermissionMiddleware", # Uncomment the following if using any of the SSL settings: # "mezzanine.core.middleware.SSLRedirectMiddleware", "mezzanine.pages.middleware.PageMiddleware", "mezzanine.core.middleware.FetchFromCacheMiddleware", # "django_pdb.middleware.PdbMiddleware", ) # Store these package names here as they may change in the future since # at the moment we are using custom forks of them. PACKAGE_NAME_FILEBROWSER = "filebrowser_safe" PACKAGE_NAME_GRAPPELLI = "grappelli_safe" ######################### # OPTIONAL APPLICATIONS # ######################### # These will be added to ``INSTALLED_APPS``, only if available. OPTIONAL_APPS = ( "debug_toolbar", "django_extensions", "compressor", PACKAGE_NAME_FILEBROWSER, PACKAGE_NAME_GRAPPELLI, ) DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False} ################### # DEPLOY SETTINGS # ################### # These settings are used by the default fabfile.py provided. # Check fabfile.py for defaults. # FABRIC = { # "SSH_USER": "", # SSH username # "SSH_PASS": "", # SSH password (consider key-based authentication) # "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth # "HOSTS": [], # List of hosts to deploy to # "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs # "PROJECT_NAME": "", # Unique identifier for project # "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project # "GUNICORN_PORT": 8000, # Port gunicorn will listen on # "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8" # "LIVE_HOSTNAME": "www.example.com", # Host for public site. # "REPO_URL": "", # Git or Mercurial remote repo URL for the project # "DB_PASS": "", # Live database password # "ADMIN_PASS": "", # Live admin user password # "SECRET_KEY": SECRET_KEY, # "NEVERCACHE_KEY": NEVERCACHE_KEY, # } ################## # LOCAL SETTINGS # ################## # Allow any settings to be defined in local_settings.py which should be # ignored in your version control system allowing for settings to be # defined per machine. try: from local_settings import * except ImportError: pass # Make these unique, and don't share it with anybody. SECRET_KEY = "%(SECRET_KEY)s" NEVERCACHE_KEY = "%(NEVERCACHE_KEY)s" CRISPY_TEMPLATE_PACK = 'bootstrap' # for functional tests INSTALLED_APPS = list(INSTALLED_APPS) + [ PACKAGE_NAME_GRAPPELLI, PACKAGE_NAME_FILEBROWSER, 'django.contrib.redirects'] from django import get_version if int(get_version().split('.')[1]) <= 5: TEST_RUNNER = 'discover_runner.DiscoverRunner' TEST_DISCOVER_PATTERN = "test_*.py" else: TEST_RUNNER = 'django.test.runner.DiscoverRunner' #################### # DYNAMIC SETTINGS # #################### # set_dynamic_settings() will rewrite globals based on what has been # defined so far, in order to provide some better defaults where # applicable. We also allow this settings module to be imported # without Mezzanine installed, as the case may be when using the # fabfile, where setting the dynamic settings below isn't strictly # required. try: from mezzanine.utils.conf import set_dynamic_settings except ImportError: pass else: set_dynamic_settings(globals())
simodalla/mezzanine_mailchimper
project_template/settings.py
Python
bsd-3-clause
14,784
0.000271
import marshal import os import subprocess import urlparse import dxr.plugins """Omniglot - Speaking all commonly-used version control systems. At present, this plugin is still under development, so not all features are fully implemented. Omniglot first scans the project directory looking for the hallmarks of a VCS (such as the .hg or .git directory). It also looks for these in parent directories in case DXR is only parsing a fraction of the repository. Once this information is found, it attempts to extract upstream information about the repository. From this information, it builds the necessary information to reproduce the links. Currently supported VCSes and upstream views: - git (github) - mercurial (hgweb) Todos: - add gitweb support for git - add cvs, svn, bzr support - produce in-DXR blame information using VCSs - check if the mercurial paths are specific to Mozilla's customization or not. """ # Global variables tree = None source_repositories = {} class VCS(object): """A class representing an abstract notion of a version-control system. In general, all path arguments to query methods should be normalized to be relative to the root directory of the VCS. """ def __init__(self, root): self.root = root self.untracked_files = set() def get_root_dir(self): """Return the directory that is at the root of the VCS.""" return self.root def get_vcs_name(self): """Return a recognizable name for the VCS.""" return type(self).__name__ def invoke_vcs(self, args): """Return the result of invoking said command on the repository, with the current working directory set to the root directory. """ return subprocess.check_output(args, cwd=self.get_root_dir()) def is_tracked(self, path): """Does the repository track this file?""" return path not in self.untracked_files def get_rev(self, path): """Return a human-readable revision identifier for the repository.""" raise NotImplemented def generate_log(self, path): """Return a URL for a page that lists revisions for this file.""" raise NotImplemented def generate_blame(self, path): """Return a URL for a page that lists source annotations for lines in this file. """ raise NotImplemented def generate_diff(self, path): """Return a URL for a page that shows the last change made to this file. """ raise NotImplemented def generate_raw(self, path): """Return a URL for a page that returns a raw copy of this file.""" raise NotImplemented class Mercurial(VCS): def __init__(self, root): super(Mercurial, self).__init__(root) # Find the revision self.revision = self.invoke_vcs(['hg', 'id', '-i']).strip() # Sometimes hg id returns + at the end. if self.revision.endswith("+"): self.revision = self.revision[:-1] # Make and normalize the upstream URL upstream = urlparse.urlparse(self.invoke_vcs(['hg', 'paths', 'default']).strip()) recomb = list(upstream) if upstream.scheme == 'ssh': recomb[0] == 'http' recomb[1] = upstream.hostname # Eliminate any username stuff recomb[2] = '/' + recomb[2].lstrip('/') # strip all leading '/', add one back if not upstream.path.endswith('/'): recomb[2] += '/' # Make sure we have a '/' on the end recomb[3] = recomb[4] = recomb[5] = '' # Just those three self.upstream = urlparse.urlunparse(recomb) # Find all untracked files self.untracked_files = set(line.split()[1] for line in self.invoke_vcs(['hg', 'status', '-u', '-i']).split('\n')[:-1]) @staticmethod def claim_vcs_source(path, dirs): if '.hg' in dirs: dirs.remove('.hg') return Mercurial(path) return None def get_rev(self, path): return self.revision def generate_log(self, path): return self.upstream + 'filelog/' + self.revision + '/' + path def generate_blame(self, path): return self.upstream + 'annotate/' + self.revision + '/' + path def generate_diff(self, path): return self.upstream + 'diff/' + self.revision + '/' + path def generate_raw(self, path): return self.upstream + 'raw-file/' + self.revision + '/' + path class Git(VCS): def __init__(self, root): super(Git, self).__init__(root) self.untracked_files = set(line for line in self.invoke_vcs(['git', 'ls-files', '-o']).split('\n')[:-1]) self.revision = self.invoke_vcs(['git', 'rev-parse', 'HEAD']) source_urls = self.invoke_vcs(['git', 'remote', '-v']).split('\n') for src_url in source_urls: name, url, _ = src_url.split() if name == 'origin': self.upstream = self.synth_web_url(url) break @staticmethod def claim_vcs_source(path, dirs): if '.git' in dirs: dirs.remove('.git') return Git(path) return None def get_rev(self, path): return self.revision[:10] def generate_log(self, path): return self.upstream + "/commits/" + self.revision + "/" + path def generate_blame(self, path): return self.upstream + "/blame/" + self.revision + "/" + path def generate_diff(self, path): # I really want to make this anchor on the file in question, but github # doesn't seem to do that nicely return self.upstream + "/commit/" + self.revision def generate_raw(self, path): return self.upstream + "/raw/" + self.revision + "/" + path def synth_web_url(self, repo): if repo.startswith("git@github.com:"): self._is_github = True return "https://github.com/" + repo[len("git@github.com:"):] elif repo.startswith("git://github.com/"): self._is_github = True if repo.endswith(".git"): repo = repo[:-len(".git")] return "https" + repo[len("git"):] raise Exception("I don't know what's going on") class Perforce(VCS): def __init__(self, root): super(Perforce, self).__init__(root) have = self._p4run(['have']) self.have = dict((x['path'][len(root) + 1:], x) for x in have) try: self.upstream = tree.plugin_omniglot_p4web except AttributeError: self.upstream = "http://p4web/" @staticmethod def claim_vcs_source(path, dirs): if 'P4CONFIG' not in os.environ: return None if os.path.exists(os.path.join(path, os.environ['P4CONFIG'])): return Perforce(path) return None def _p4run(self, args): ret = [] env = os.environ env["PWD"] = self.root proc = subprocess.Popen(['p4', '-G'] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=self.root, env=env) while True: try: x = marshal.load(proc.stdout) except EOFError: break ret.append(x) return ret def is_tracked(self, path): return path in self.have def get_rev(self, path): info = self.have[path] return '#' + info['haveRev'] def generate_log(self, path): info = self.have[path] return self.upstream + info['depotFile'] + '?ac=22#' + info['haveRev'] def generate_blame(self, path): info = self.have[path] return self.upstream + info['depotFile'] + '?ac=193' def generate_diff(self, path): info = self.have[path] haveRev = info['haveRev'] prevRev = str(int(haveRev) - 1) return (self.upstream + info['depotFile'] + '?ac=19&rev1=' + prevRev + '&rev2=' + haveRev) def generate_raw(self, path): info = self.have[path] return self.upstream + info['depotFile'] + '?ac=98&rev1=' + info['haveRev'] every_vcs = [Mercurial, Git, Perforce] # Load global variables def load(tree_, conn): global tree, lookup_order tree = tree_ # Find all of the VCS's in the source directory for cwd, dirs, files in os.walk(tree.source_folder): for vcs in every_vcs: attempt = vcs.claim_vcs_source(cwd, dirs) if attempt is not None: source_repositories[attempt.root] = attempt # It's possible that the root of the tree is not a VCS by itself, so walk up # the hierarchy until we find a parent folder that is a VCS. If we can't # find any, than no VCSs exist for the top-level of this repository. directory = tree.source_folder while directory != '/' and directory not in source_repositories: directory = os.path.dirname(directory) for vcs in every_vcs: attempt = vcs.claim_vcs_source(directory, os.listdir(directory)) if attempt is not None: source_repositories[directory] = attempt # Note: we want to make sure that we look up source repositories by deepest # directory first. lookup_order = source_repositories.keys() lookup_order.sort(key=len, reverse=True) def find_vcs_for_file(path): """Given an absolute path, find a source repository we know about that claims to track that file. """ for directory in lookup_order: # This seems to be the easiest way to find "is path in the subtree # rooted at directory?" if os.path.relpath(path, directory).startswith('..'): continue vcs = source_repositories[directory] if vcs.is_tracked(os.path.relpath(path, vcs.get_root_dir())): return vcs return None class LinksHtmlifier(object): """Htmlifier which adds blame and external links to VCS web utilities.""" def __init__(self, path): if not os.path.isabs(path): path = os.path.join(tree.source_folder, path) self.vcs = find_vcs_for_file(path) if self.vcs is not None: self.path = os.path.relpath(path, self.vcs.get_root_dir()) self.name = self.vcs.get_vcs_name() def refs(self): return [] def regions(self): return [] def annotations(self): return [] def links(self): if self.vcs is None: yield 5, 'Untracked file', [] return def items(): yield 'log', "Log", self.vcs.generate_log(self.path) yield 'blame', "Blame", self.vcs.generate_blame(self.path) yield 'diff', "Diff", self.vcs.generate_diff(self.path) yield 'raw', "Raw", self.vcs.generate_raw(self.path) yield 5, '%s (%s)' % (self.name, self.vcs.get_rev(self.path)), items() def htmlify(path, text): return LinksHtmlifier(path) __all__ = dxr.plugins.htmlifier_exports()
nrc/dxr
dxr/plugins/omniglot/htmlifier.py
Python
mit
10,987
0.001547
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. from gettext import gettext as _ import gtk import os # import custom glade handler from flumotion.ui import glade from flumotion.component.base.effectsnode import EffectAdminGtkNode __version__ = "$Rev$" class VideoscaleAdminGtkNode(EffectAdminGtkNode): logCategory = 'videoscale-admin' gladeFile = os.path.join('flumotion', 'component', 'effects', 'videoscale', 'videoscale.glade') uiStateHandlers = None def haveWidgetTree(self): self.widget = self.wtree.get_widget('videoscale-vbox') self._height = self.wtree.get_widget('videoscale-height') self._width = self.wtree.get_widget('videoscale-width') self._par_n = self.wtree.get_widget('videoscale-par_n') self._par_d = self.wtree.get_widget('videoscale-par_d') self._is_square = self.wtree.get_widget('videoscale-is_square') self._add_borders = self.wtree.get_widget('videoscale-add_borders') self._apply = self.wtree.get_widget('videoscale-apply') # do the callbacks for the mode setting self._height.connect('value-changed', self._cb_height) self._width.connect('value-changed', self._cb_width) self._par_n.connect('value-changed', self._cb_par) self._par_d.connect('value-changed', self._cb_par) self._is_square.connect('toggled', self._cb_is_square) self._add_borders.connect('toggled', self._cb_add_borders) self._apply.connect('clicked', self._cb_apply) def setUIState(self, state): EffectAdminGtkNode.setUIState(self, state) if not self.uiStateHandlers: uiStateHandlers = {'videoscale-width': self.widthSet, 'videoscale-height': self.heightSet, 'videoscale-is-square': self.isSquareSet, 'videoscale-add-borders': self.addBordersSet} self.uiStateHandlers = uiStateHandlers for k, handler in self.uiStateHandlers.items(): handler(state.get(k)) def stateSet(self, state, key, value): handler = self.uiStateHandlers.get(key, None) if handler: handler(value) def addBordersSet(self, add_borders): if add_borders is not None: self._add_borders.set_active(add_borders) def isSquareSet(self, is_square): if is_square is not None: self._is_square.set_active(is_square) def widthSet(self, width): if width is not None: self._width.handler_block_by_func(self._cb_width) self._width.set_value(width) self._width.handler_unblock_by_func(self._cb_width) def heightSet(self, height): if height is not None: self._height.handler_block_by_func(self._cb_height) self._height.set_value(height) self._height.handler_unblock_by_func(self._cb_height) def _cb_height(self, widget): height = widget.get_value_as_int() d = self.effectCallRemote("setHeight", height) d.addErrback(self.setErrback) def _cb_width(self, widget): width = widget.get_value_as_int() d = self.effectCallRemote("setWidth", width) d.addErrback(self.setErrback) def _cb_par(self, _): par_n = self._par_n.get_value_as_int() par_d = self._par_d.get_value_as_int() d = self.effectCallRemote("setPAR", (par_n, par_d)) d.addErrback(self.setErrback) def _cb_is_square(self, widget): is_square = self._is_square.get_active() d = self.effectCallRemote("setIsSquare", is_square) d.addErrback(self.setErrback) def _cb_add_borders(self, widget): add_borders = self._add_borders.get_active() d = self.effectCallRemote("setAddBorders", add_borders) d.addErrback(self.setErrback) def _cb_apply(self, widget): d = self.effectCallRemote("apply") d.addErrback(self.setErrback) def setErrback(self, failure): self.warning("Failure %s setting property: %s" % ( failure.type, failure.getErrorMessage())) return None
flumotion-mirror/flumotion
flumotion/component/effects/videoscale/admin_gtk.py
Python
lgpl-2.1
4,782
0.000209
# -*- coding: utf-8 -*- import unicodecsv from collections import Counter from django.http import HttpResponse from django.shortcuts import render from .models import Disciplina, Docente, Pesquisa, Extensao, Administrativo def query_estudantes(query): result = [ ['1-6', query.filter(estudantes__gte=1, estudantes__lte=6).count()], ['7-15', query.filter(estudantes__gte=7, estudantes__lte=15).count()], ['16-25', query.filter(estudantes__gte=16, estudantes__lte=25).count()], ['26-50', query.filter(estudantes__gte=26, estudantes__lte=50).count()], ['51-70', query.filter(estudantes__gte=51, estudantes__lte=70).count()], ['Mais que 70', query.filter(estudantes__gt=70).count()], ] return result def RelatorioEnsino(request): if 'centro' in request.GET and request.GET['centro']: centro = request.GET['centro'] turmas = Disciplina.objects.filter(docente__centro=centro) else: centro = '' turmas = Disciplina.objects.all() if 'semestre' in request.GET and request.GET['semestre']: semestre = request.GET['semestre'] turmas = turmas.filter(semestre=semestre) else: semestre = '' # número de turmas por tipo teoricas = turmas.filter(tipo='teorica') praticas = turmas.filter(tipo='pratica') estagio = turmas.filter(tipo='estagio') turmas_tipo = [ ('Turmas Teóricas', teoricas.count()), ('Turmas Práticas', praticas.count()), ('Turmas de Estágio', estagio.count()) ] turmas_multicampi = turmas.filter(multicampia=True).count() multicampia = [ ('Turmas sem Multicampia', turmas.count() - turmas_multicampi), ('Turmas Multicampi', turmas_multicampi) ] turmas_nivel = [ ('Turmas de Graduacao', turmas.filter(nivel='graduacao').count()), ('Turmas de Pós-Graduação', turmas.filter(nivel='pos').count()) ] estudantes_turmas = query_estudantes(turmas) estudantes_turmas_teoricas = query_estudantes(teoricas) estudantes_turmas_praticas = query_estudantes(praticas) estudantes_turmas_estagio = query_estudantes(estagio) return render(request, 'relatorio_ensino.html', { 'centro': centro, 'semestre': semestre, 'turmas_tipo': turmas_tipo, 'multicampia': multicampia, 'turmas_nivel': turmas_nivel, 'estudantes_turmas': estudantes_turmas, 'estudantes_turmas_teoricas': estudantes_turmas_teoricas, 'estudantes_turmas_praticas': estudantes_turmas_praticas, 'estudantes_turmas_estagio': estudantes_turmas_estagio, }) def RelatorioDocente(request): if 'centro' in request.GET and request.GET['centro']: centro = request.GET['centro'] docentes_ensino = Disciplina.objects.filter(docente__centro=centro) docentes_pesquisa = Pesquisa.objects.filter(docente__centro=centro) docentes_extensao = Extensao.objects.filter(docente__centro=centro) docentes_admin = Administrativo.objects.filter(docente__centro=centro) num_docentes = Docente.objects.filter(centro=centro).count() else: centro = '' docentes_ensino = Disciplina.objects.all() docentes_pesquisa = Pesquisa.objects.all() docentes_extensao = Extensao.objects.all() docentes_admin = Administrativo.objects.all() num_docentes = Docente.objects.all().count() if 'semestre' in request.GET and request.GET['semestre']: semestre = request.GET['semestre'] docentes_ensino = docentes_ensino.filter(semestre=semestre) docentes_pesquisa = docentes_pesquisa.filter(semestre=semestre) docentes_extensao = docentes_extensao.filter(semestre=semestre) docentes_admin = docentes_admin.filter(semestre=semestre) else: semestre = '' docentes_ensino = [disciplina.docente for disciplina in docentes_ensino.distinct('docente')] docentes_pesquisa = [projeto.docente for projeto in docentes_pesquisa.distinct('docente')] docentes_extensao = [projeto.docente for projeto in docentes_extensao.distinct('docente')] docentes_ens_pes = [docente for docente in docentes_pesquisa if docente in docentes_ensino] docentes_ens_ext = [docente for docente in docentes_extensao if docente in docentes_ensino] docentes_ens_pes_ext = [docente for docente in docentes_ens_pes if docente in docentes_ens_ext] num_docentes_ensino = len(docentes_ensino) num_docentes_pesquisa = len(docentes_pesquisa) num_docentes_extensao = len(docentes_extensao) num_docentes_afastados = docentes_admin.filter(afastamento=True) \ .distinct('docente').count() docentes_admin = docentes_admin \ .filter(cargo__in=['fg', 'cd', 'fuc']) ensino = [ ['Com atividades de ensino', num_docentes_ensino], ['Sem atividades de ensino', num_docentes - num_docentes_ensino] ] pesquisa = [ ['Com atividades de pesquisa', num_docentes_pesquisa], ['Sem atividades de pesquisa', num_docentes - num_docentes_pesquisa] ] extensao = [ ['Com atividades de extensão', num_docentes_extensao], ['Sem atividades de extensão', num_docentes - num_docentes_extensao] ] ens_pes_ext = [ ['Sim', len(docentes_ens_pes_ext)], ['Não', num_docentes - len(docentes_ens_pes_ext)] ] ens_pes = [ ['Sim', len(docentes_ens_pes)], ['Não', num_docentes - len(docentes_ens_pes)] ] ens_ext = [ ['Sim', len(docentes_ens_ext)], ['Não', num_docentes - len(docentes_ens_ext)] ] administrativo = [ ['Com atividades administrativas', docentes_admin .distinct('docente').count()], ['Sem atividades administrativas', num_docentes - docentes_admin .distinct('docente').count()] ] admin_detalhes = [ ['FG', docentes_admin.filter(cargo='fg').distinct('docente').count()], ['CD', docentes_admin.filter(cargo='cd').distinct('docente').count()], ['Coordenação de Colegiado', docentes_admin.filter(cargo='fuc') .distinct('docente').count()], ] afastamento = [ ['Docentes afastados', num_docentes_afastados], ['Docentes em exercício', num_docentes - num_docentes_afastados] ] return render(request, 'relatorio_docente.html', { 'centro': centro, 'semestre': semestre, 'ensino': ensino, 'ens_pes_ext': ens_pes_ext, 'ens_pes': ens_pes, 'ens_ext': ens_ext, 'pesquisa': pesquisa, 'extensao': extensao, 'administrativo': administrativo, 'admin_detalhes': admin_detalhes, 'afastamento': afastamento }) def filtro_por_centro(data): result = [ ['CAHL', data.filter(docente__centro='cahl').count()], ['CCAAB', data.filter(docente__centro='ccaab').count()], ['CCS', data.filter(docente__centro='ccs').count()], ['CETEC', data.filter(docente__centro='cetec').count()], ['CFP', data.filter(docente__centro='cfp').count()], ] return result def RelatorioProjetos(request): return render(request, 'relatorio_projetos.html', { 'pesquisa': filtro_por_centro(Pesquisa.objects.all()), 'extensao': filtro_por_centro(Extensao.objects.all()), 'pesquisa_20131': filtro_por_centro(Pesquisa.sem_20131.all()), 'pesquisa_20132': filtro_por_centro(Pesquisa.sem_20132.all()), 'extensao_20131': filtro_por_centro(Extensao.sem_20131.all()), 'extensao_20132': filtro_por_centro(Extensao.sem_20132.all()), }) def valores_ch(data): result = [ ['Menos que 8', sum([item[1] for item in data.items() if item[0]<136])], ['8h', sum([item[1] for item in data.items() if item[0]>=136 and item[0]<153])], ['9h', sum([item[1] for item in data.items() if item[0]>=153 and item[0]<170])], ['10h', sum([item[1] for item in data.items() if item[0]>=170 and item[0]<187])], ['11h', sum([item[1] for item in data.items() if item[0]>=187 and item[0]<204])], ['12h', sum([item[1] for item in data.items() if item[0]>=204 and item[0]<221])], ['13h', sum([item[1] for item in data.items() if item[0]>=221 and item[0]<238])], ['14h', sum([item[1] for item in data.items() if item[0]>=238 and item[0]<255])], ['15h', sum([item[1] for item in data.items() if item[0]>=255 and item[0]<272])], ['16h', sum([item[1] for item in data.items() if item[0]>=272 and item[0]<289])], ['Mais que 16h', sum([item[1] for item in data.items() if item[0]>289])], ] return result def RelatorioCargaHoraria(request): if 'centro' in request.GET and request.GET['centro']: centro = request.GET['centro'] docentes = Docente.objects.filter(centro=centro) else: centro = '' docentes = Docente.objects.all() ensino_20131 = valores_ch(Counter([i.ch_ensino('20131') for i in docentes])) ensino_20132 = valores_ch(Counter([i.ch_ensino('20132') for i in docentes])) return render(request, 'relatorio_ch.html', { 'centro': centro, 'ensino_20131': ensino_20131, 'ensino_20132': ensino_20132, }) def RelatorioGeral(request): if 'centro' in request.GET and request.GET['centro']: centro = request.GET['centro'] docentes = Docente.objects.filter(centro=centro) else: centro = '' docentes = Docente.objects.all() if 'semestre' in request.GET and request.GET['semestre']: semestre = request.GET['semestre'] else: semestre = '20131' return render(request, 'relatorio_geral.html',{ 'docentes': docentes, 'centro': centro, 'semestre': semestre }) def ExportarDisciplina(request): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="disciplinas.csv"' writer = unicodecsv.writer(response, encoding='utf-8') writer.writerow(['Centro', 'Código', 'Nome', 'Docente', 'Semestre', 'Tipo', 'Nível', 'Multicampia', 'Carga horária', 'Estudantes']) for disciplina in Disciplina.objects.all(): writer.writerow([disciplina.docente.centro, disciplina.codigo, disciplina.nome, disciplina.docente, disciplina.semestre, disciplina.tipo, disciplina.nivel, disciplina.multicampia, disciplina.cargahoraria, disciplina.estudantes]) return response def ExportarPesquisa(request): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="pesquisa.csv"' writer = unicodecsv.writer(response, encoding='utf-8') writer.writerow(['Centro', 'Nome', 'Docente', 'Semestre', 'Área', 'Financiador', 'Carga horária', 'Estudantes de Graduação', 'Estudantes de Pós', 'Bolsistas PIBIC/PIBITI', 'Bolsistas PPQ', 'Voluntários', 'Parceria Institucional', 'Parceria Interinstitucional']) for projeto in Pesquisa.objects.all(): writer.writerow([projeto.docente.centro, projeto.nome, projeto.docente, projeto.semestre, projeto.area, projeto.financiador, projeto.cargahoraria, projeto.estudantes_graduacao, projeto.estudantes_pos, projeto.bolsistas_pibic, projeto.bolsistas_ppq, projeto.voluntarios, projeto.parceria, projeto.parceria_inter]) return response def ExportarExtensao(request): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="extensao.csv"' writer = unicodecsv.writer(response, encoding='utf-8') writer.writerow(['Centro', 'Nome', 'Docente', 'Semestre', 'Área', 'Financiador', 'Carga horária', 'Estudantes de Graduação', 'Estudantes de Pós', 'Bolsistas PIBEX', 'Bolsistas PPQ', 'Voluntários', 'Parceria Institucional', 'Parceria Interinstitucional']) for projeto in Extensao.objects.all(): writer.writerow([projeto.docente.centro, projeto.nome, projeto.docente, projeto.semestre, projeto.area, projeto.financiador, projeto.cargahoraria, projeto.estudantes_graduacao, projeto.estudantes_pos, projeto.bolsistas_pibex, projeto.bolsistas_ppq, projeto.voluntarios, projeto.parceria, projeto.parceria_inter]) return response def ExportarAdministrativo(request): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="administrativo.csv"' writer = unicodecsv.writer(response, encoding='utf-8') writer.writerow(['Centro', 'Docente', 'Semestre', 'Afastamento', 'Cargo', 'Comissões']) for atividade in Administrativo.objects.all(): writer.writerow([atividade.docente.centro, atividade.docente, atividade.semestre, atividade.afastamento, atividade.cargo, atividade.comissoes]) return response
UFRB/chdocente
cadastro/views.py
Python
agpl-3.0
13,491
0.005275
print ("ssimiliar to develop")
Dan-Donovan/cs3240-labdemo
helperMaster.py
Python
mit
30
0.066667
from plugins.external.sergio_proxy.plugins.plugin import Plugin class CacheKill(Plugin): name = "CacheKill Plugin" optname = "cachekill" desc = "Kills page caching by modifying headers." implements = ["handleHeader","connectionMade"] has_opts = True bad_headers = ['if-none-match','if-modified-since'] def add_options(self,options): options.add_argument("--preserve-cookies",action="store_true", help="Preserve cookies (will allow caching in some situations).") def handleHeader(self,request,key,value): '''Handles all response headers''' request.client.headers['Expires'] = "0" request.client.headers['Cache-Control'] = "no-cache" def connectionMade(self,request): '''Handles outgoing request''' request.headers['Pragma'] = 'no-cache' for h in self.bad_headers: if h in request.headers: request.headers[h] = ""
P0cL4bs/3vilTwinAttacker
plugins/external/sergio_proxy/plugins/CacheKill.py
Python
gpl-3.0
952
0.015756
from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE VARCHAR2 = VARCHAR class CBondSpecialConditions(BaseModel): """ 4.127 中国债券特殊条款 Attributes ---------- object_id: VARCHAR2(100) 对象ID s_info_windcode: VARCHAR2(40) Wind代码 b_info_provisiontype: VARCHAR2(100) 条款类型 b_info_callbkorputbkprice: NUMBER(20,4) 赎回价/回售价 元 b_info_callbkorputbkdate: VARCHAR2(8) 赎回/回售日期 b_info_redemporrepurcdate: VARCHAR2(8) 赎回/回售告知截止日期 b_info_maturityembedded: VARCHAR2(40) 含权期限说明 b_info_execmaturityembedded: NUMBER(20,4) 行权期限 b_info_couponadj_max: NUMBER(20,4) 票面利率调整上限 b_info_couponadj_min: NUMBER(20,4) 票面利率调整下限 b_info_content: VARCHAR2(3000) 条款内容 opdate: DATETIME opdate opmode: VARCHAR(1) opmode """ __tablename__ = "CBondSpecialConditions" object_id = Column(VARCHAR2(100), primary_key=True) s_info_windcode = Column(VARCHAR2(40)) b_info_provisiontype = Column(VARCHAR2(100)) b_info_callbkorputbkprice = Column(NUMBER(20,4)) b_info_callbkorputbkdate = Column(VARCHAR2(8)) b_info_redemporrepurcdate = Column(VARCHAR2(8)) b_info_maturityembedded = Column(VARCHAR2(40)) b_info_execmaturityembedded = Column(NUMBER(20,4)) b_info_couponadj_max = Column(NUMBER(20,4)) b_info_couponadj_min = Column(NUMBER(20,4)) b_info_content = Column(VARCHAR2(3000)) opdate = Column(DATETIME) opmode = Column(VARCHAR(1))
SnowWalkerJ/quantlib
quant/data/wind/tables/cbondspecialconditions.py
Python
gpl-3.0
1,754
0.011139
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org> # All rights reserved. # # See LICENSE file for full license. import warnings from . import AWSHelperFn, AWSObject, AWSProperty warnings.warn("This module is outdated and will be replaced with " "troposphere.dynamodb2. Please see the README for " "instructions on how to prepare for this change.") class AttributeDefinition(AWSHelperFn): def __init__(self, name, type): self.data = { 'AttributeName': name, 'AttributeType': type, } def JSONrepr(self): return self.data class Key(AWSProperty): def __init__(self, AttributeName, KeyType): self.data = { 'AttributeName': AttributeName, 'KeyType': KeyType, } def JSONrepr(self): return self.data class ProvisionedThroughput(AWSHelperFn): def __init__(self, ReadCapacityUnits, WriteCapacityUnits): self.data = { 'ReadCapacityUnits': ReadCapacityUnits, 'WriteCapacityUnits': WriteCapacityUnits, } def JSONrepr(self): return self.data class Projection(AWSHelperFn): def __init__(self, ProjectionType, NonKeyAttributes=None): self.data = { 'ProjectionType': ProjectionType } if NonKeyAttributes is not None: self.data['NonKeyAttributes'] = NonKeyAttributes def JSONrepr(self): return self.data class GlobalSecondaryIndex(AWSHelperFn): def __init__(self, IndexName, KeySchema, Projection, ProvisionedThroughput): self.data = { 'IndexName': IndexName, 'KeySchema': KeySchema, 'Projection': Projection, 'ProvisionedThroughput': ProvisionedThroughput, } def JSONrepr(self): return self.data class LocalSecondaryIndex(AWSHelperFn): def __init__(self, IndexName, KeySchema, Projection, ProvisionedThroughput): self.data = { 'IndexName': IndexName, 'KeySchema': KeySchema, 'Projection': Projection, } def JSONrepr(self): return self.data class StreamSpecification(AWSProperty): props = { 'StreamViewType': (basestring, True), } class Table(AWSObject): resource_type = "AWS::DynamoDB::Table" props = { 'AttributeDefinitions': ([AttributeDefinition], True), 'GlobalSecondaryIndexes': ([GlobalSecondaryIndex], False), 'KeySchema': ([Key], True), 'LocalSecondaryIndexes': ([LocalSecondaryIndex], False), 'ProvisionedThroughput': (ProvisionedThroughput, True), 'StreamSpecification': (StreamSpecification, False), 'TableName': (basestring, False), }
Yipit/troposphere
troposphere/dynamodb.py
Python
bsd-2-clause
2,797
0.000358
import glob import os import re import yaml from argparse import ArgumentParser from .runner import Runner from .cli import CLI class GhcRunner(Runner): IMPORT_REGEX = re.compile(r'^\s*import\s+(.*?)$') def reset(self): self.required = [] self.incdirs = [] def make_code(self, file, filepath, filename): files = dict() code = '' for line in file: m = self.IMPORT_REGEX.match(line) if m: files.update(self.on_import(m.group(1))) code += line files[filename] = code return files def set_search_path(self, paths): self.search_path = paths def add_search_path(self, path): self.search_path.append(path) def on_import(self, path): files = dict() # TODO return files def build_compiler_options(self, options): super(GhcRunner, self).build_compiler_options(options) self.add_commandline_options('-dynamic') class GhcCLI(CLI): def __init__(self, compiler=None): super(GhcCLI, self).__init__('Haskell', compiler) def get_runner(self, args, options): return GhcRunner(args.language, args.compiler, args.save, args.encoding, args.retry, args.retry_wait) class HaskellStackCLI: class InnerCLI(GhcCLI): def __init__(self, compiler=None): self.libdirs = [] super(HaskellStackCLI.InnerCLI, self).__init__(compiler) def get_runner(self, args, options): runner = super(HaskellStackCLI.InnerCLI, self).get_runner(args, options) runner.set_search_path(self.libdirs) return runner def __init__(self, compiler=None): self.setup(compiler) # command line option def setup(self, compiler): self.parser = ArgumentParser(add_help=False) self.parser.add_argument( '-c', '--compiler', default=compiler ) self.parser.add_argument( '-n', '--dryrun', action='store_true', help='dryrun' ) subparser = self.parser.add_subparsers() run_cmd = subparser.add_parser( 'run', prefix_chars='+', description='build and run command', help='build and run command. see `run +h`' ) build_cmd = subparser.add_parser( 'build', prefix_chars='+', description='build and run command (run command alias)', help='build and run command (run command alias). see `build +h`' ) passthrough_cmds = [run_cmd, build_cmd] for passthrough_cmd in passthrough_cmds: passthrough_cmd.set_defaults(handler=self.command_run) passthrough_cmd.add_argument( 'options', metavar='OPTIONS', nargs='*', help='options' ) def parse_command_line(self, argv): opts, args = self.parser.parse_known_args(argv) if 'WANDBOX_DRYRUN' in os.environ: opts.dryrun = True return opts, args def print_help(self): self.parser.print_help() def execute(self): self.execute_with_args() def execute_with_args(self, args=None): opts, args = self.parse_command_line(args) if hasattr(opts, 'handler'): opts.handler(opts, args) else: self.print_help() def command_run(self, opts, args): cmd = HaskellStackCLI.InnerCLI(opts.compiler) run_options = ['run'] cli_options = args if opts.dryrun: cli_options.append('--dryrun') with open('package.yaml', 'r') as yml: config = yaml.safe_load(yml) exec_config = config['executables']['haskell-stack-exe'] main = exec_config['main'] main_dir = exec_config['source-dirs'] run_options.append(os.path.join(main_dir, main)) options = exec_config['ghc-options'] run_options.extend(options) dirs = config['library']['source-dirs'] if isinstance(dirs, str): dirs = [dirs] for dir in dirs: cmd.libdirs.append(dir) for x in glob.glob(os.path.join(dir, '*.hs')): run_options.append(x) cmd.execute_with_args(cli_options + run_options) def ghc(compiler=None): cli = GhcCLI(compiler) cli.execute() def haskell_stack(compiler=None): cli = HaskellStackCLI(compiler) cli.execute() def main(): ghc() if __name__ == '__main__': main()
srz-zumix/wandbox-api
wandbox/__ghc__.py
Python
mit
4,680
0.000427
from django import forms from .widgets import MarkdownxWidget class MarkdownxFormField(forms.CharField): """ Used in FormFields as a Markdown enabled replacement for ``CharField``. """ def __init__(self, *args, **kwargs): """ Arguments are similar to Django's default ``CharField``. See Django's `documentations on CharField`_ for additional information. .. _docs on Charfield: https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.CharField """ super(MarkdownxFormField, self).__init__(*args, **kwargs) if issubclass(self.widget.__class__, forms.widgets.MultiWidget): is_markdownx_widget = any( issubclass(item.__class__, MarkdownxWidget) for item in getattr(self.widget, 'widgets', list()) ) if not is_markdownx_widget: self.widget = MarkdownxWidget() elif not issubclass(self.widget.__class__, MarkdownxWidget): self.widget = MarkdownxWidget()
wuga214/Django-Wuga
env/lib/python2.7/site-packages/markdownx/fields.py
Python
apache-2.0
1,070
0.004673
#!/usr/local/bin/python2.7 from sys import exit, stdout, argv from os import environ, system environ['KERAS_BACKEND'] = 'tensorflow' import numpy as np import utils import signal from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate from keras.models import Model from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard from keras.optimizers import Adam, SGD from keras.utils import np_utils from keras import backend as K K.set_image_data_format('channels_last') from adversarial import Adversary import obj import config #config.DEBUG = True #config.n_truth = 5 #config.truth = 'resonanceType' #config.adversary_mask = 0 ''' some global definitions ''' #obj.limit = 50 LEARNMASS = True LEARNRHO = False LEARNPT = True DECORRMASS = True DECORRRHO = False DECORRPT = False adv_loss_weights = [0.0001, 100] ADV = 0 NEPOCH = 10 APOSTLE = 'panda_3' system('cp %s models/train_%s.py'%(argv[0], APOSTLE)) ''' instantiate data loaders ''' def make_coll(fpath): coll = obj.PFSVCollection() coll.add_categories(['singletons', 'pf'], fpath) return coll top = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/ZpTT_*_CATEGORY.npy') qcd = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/QCD_*_CATEGORY.npy') data = [top, qcd] # preload some data just to get the dimensions if obj.limit is None: data[0].objects['train']['pf'].load(memory=False) dims = data[0].objects['train']['pf'].data.data.shape else: dims = (None, obj.limit, 9) # override ''' first build the classifier! ''' # set up data opts = {'learn_mass':LEARNMASS, 'learn_pt':LEARNPT, 'learn_rho':LEARNRHO, 'normalize':False} classifier_train_gen = obj.generatePF(data, partition='train', batch=501, **opts) classifier_validation_gen = obj.generatePF(data, partition='validate', batch=1001, **opts) classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts) test_i, test_o, test_w = next(classifier_test_gen) #print test_i inputs = Input(shape=(dims[1], dims[2]), name='input') mass_inputs = Input(shape=(1,), name='mass_input') rho_inputs = Input(shape=(1,), name='rho_input') pt_inputs = Input(shape=(1,), name='pt_input') norm = BatchNormalization(momentum=0.6, name='input_bnorm') (inputs) conv = Conv1D(32, 2, activation='relu', name='conv0', kernel_initializer='lecun_uniform', padding='same')(norm) norm = BatchNormalization(momentum=0.6, name='conv0_bnorm') (conv) conv = Conv1D(16, 4, activation='relu', name='conv1', kernel_initializer='lecun_uniform', padding='same')(norm) norm = BatchNormalization(momentum=0.6, name='conv1_bnorm') (conv) lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm') (norm) norm = BatchNormalization(momentum=0.6, name='lstm_norm') (lstm) #drop = Dropout(0.1) (norm) drop = norm dense = Dense(100, activation='relu',name='lstmdense',kernel_initializer='lecun_uniform') (drop) norm = BatchNormalization(momentum=0.6,name='lstmdense_norm') (dense) for i in xrange(1,5): dense = Dense(50, activation='relu',name='dense%i'%i)(norm) norm = BatchNormalization(momentum=0.6,name='dense%i_norm'%i)(dense) if LEARNMASS or LEARNPT or LEARNRHO: to_merge = [norm] if LEARNMASS: to_merge.append(mass_inputs) if LEARNRHO: to_merge.append(rho_inputs) if LEARNPT: to_merge.append(pt_inputs) merge = concatenate(to_merge) dense = Dense(50, activation='tanh', name='dense5a')(merge) norm = BatchNormalization(momentum=0.6,name='dense5a_norm')(dense) # dense = Dense(50, activation='tanh', name='dense5')(norm) # norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense) else: dense = Dense(50, activation='tanh',name='dense5')(norm) norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense) y_hat = Dense(config.n_truth, activation='softmax') (norm) i = [inputs] if LEARNMASS: i.append(mass_inputs) if LEARNRHO: i.append(rho_inputs) if LEARNPT: i.append(pt_inputs) classifier = Model(inputs=i, outputs=y_hat) classifier.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy']) # print '########### CLASSIFIER ############' # classifier.summary() # print '###################################' pred = classifier.predict(test_i) # ctrl+C now triggers a graceful exit def save_classifier(name='classifier_conv', model=classifier): model.save('models/%s_%s.h5'%(name, APOSTLE)) def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier): save_classifier(name, model) flog.close() exit(1) signal.signal(signal.SIGINT, save_and_exit) ''' now build the adversarial setup ''' # set up data opts = {'decorr_mass':DECORRMASS, 'decorr_rho':DECORRRHO, 'decorr_pt':DECORRPT, 'learn_mass':LEARNMASS, 'learn_pt':LEARNPT, 'learn_rho':LEARNRHO} train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False, **opts) validation_gen = obj.generatePF(data, partition='validate', batch=1000, normalize=True, **opts) test_gen = obj.generatePF(data, partition='test', batch=1, **opts) # build the model kin_hats = Adversary(config.n_decorr_bins, n_outputs=(int(DECORRMASS)+int(DECORRPT)+int(DECORRRHO)), scale=0.0001)(y_hat) # kin_hats = Adversary(config.n_decorr_bins, n_outputs=2, scale=0.01)(y_hat) i = [inputs] if LEARNMASS: i.append(mass_inputs) if LEARNRHO: i.append(rho_inputs) if LEARNPT: i.append(pt_inputs) pivoter = Model(inputs=i, outputs=[y_hat]+kin_hats) pivoter.compile(optimizer=Adam(lr=0.001), loss=['categorical_crossentropy'] + ['categorical_crossentropy' for _ in kin_hats], loss_weights=adv_loss_weights) print '############# ARCHITECTURE #############' pivoter.summary() print '###################################' ''' Now we train both models ''' if ADV > 0: print 'TRAINING ADVERSARIAL NETWORK' system('mv logs/train_conv_adv.log logs/train_conv_adv.log.old') flog = open('logs/train_conv_adv.log','w') callback = LambdaCallback( on_batch_end=lambda batch, logs: flog.write('%i,%f,%f,%f,%f\n'%(batch,logs['loss'],logs['dense_6_loss'],logs['dense_7_loss'],logs['dense_1_loss'])), on_epoch_end=lambda epoch, logs: save_classifier(name='regularized_conv') ) tb = TensorBoard( log_dir = './logs/conv_logs', write_graph = True, write_images = True ) print ' -Pre-training the classifier' # bit of pre-training to get the classifer in the right place classifier.fit_generator(classifier_train_gen, steps_per_epoch=1000, epochs=2) save_classifier(name='pretrained_conv') # np.set_printoptions(threshold='nan') # print test_o # print classifier.predict(test_i) def save_and_exit(signal=None, frame=None, name='regularized_conv', model=classifier): save_classifier(name, model) flog.close() exit(1) signal.signal(signal.SIGINT, save_and_exit) print ' -Training the adversarial stack' # now train the model for real pivoter.fit_generator(train_gen, steps_per_epoch=5000, epochs=NEPOCH*2, callbacks = [ModelCheckpoint('models/regularized_conv_%s_{epoch:02d}_{val_loss:.5f}.h5'%APOSTLE)], validation_data=validation_gen, validation_steps=100 ) save_classifier(name='regularized_conv') save_classifier(name='pivoter_conv', model=pivoter) flog.close() if ADV % 2 == 0: print 'TRAINING CLASSIFIER ONLY' system('mv logs/train_conv.log logs/train_conv.log.old') flog = open('logs/train_conv.log','w') callback = LambdaCallback( on_batch_end=lambda batch, logs: flog.write('%i,%f\n'%(batch,logs['loss'])), on_epoch_end=lambda epoch, logs: save_classifier(name='classifier_conv') ) tb = TensorBoard( log_dir = './logs/lstmnoreg_logs', write_graph = True, write_images = True ) n_epochs = 1 if (ADV == 2) else 2 # fewer epochs if network is pretrained n_epochs *= NEPOCH def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier): save_classifier(name, model) flog.close() exit(1) signal.signal(signal.SIGINT, save_and_exit) system('rm models/classifier_conv_%s_*_*.h5'%(APOSTLE)) # clean checkpoints classifier.fit_generator(classifier_train_gen, steps_per_epoch=1000, epochs=n_epochs, callbacks = [ModelCheckpoint('models/classifier_conv_%s_{epoch:02d}_{val_loss:.5f}.h5'%APOSTLE)], validation_data=classifier_validation_gen, validation_steps=100 ) save_classifier(name='classifier_conv')
sidnarayanan/BAdNet
train/pf/adv/models/train_panda_3.py
Python
mit
9,427
0.017079
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Django settings for bmcodelab project. Generated by 'django-admin startproject' using Django 2.1.1. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ ''' import os import pymysql # noqa: 402 pymysql.version_info = (1, 4, 6, 'final', 0) # change mysqlclient version pymysql.install_as_MySQLdb() # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # Update the secret key to a value of your own before deploying the app. SECRET_KEY = '!@#PUT-YOUR-SECRET-KEY-HERE!@#' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # SECURITY WARNING: App Engine's security features ensure that it is safe to # have ALLOWED_HOSTS = ['*'] when the app is deployed. If you deploy a Django # app not on App Engine, make sure to set an appropriate host here. # See https://docs.djangoproject.com/en/2.1/ref/settings/ ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'bopis.apps.BopisConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'bmcodelab.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'bmcodelab.wsgi.application' # [START db_setup] if os.getenv('GAE_APPLICATION', None): # Running on production App Engine, so connect to Google Cloud SQL using # the unix socket at /cloudsql/<your-cloudsql-connection string> DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'HOST': 'Place/your/CloudSQL/hostname/here', 'NAME': 'bonjour_meal', 'USER': 'bmdbuser', 'PASSWORD': 'bmdbpassword', } } else: # Running locally so connect to either a local MySQL instance or connect to # Cloud SQL via the proxy. To start the proxy via command line: # # $ cloud_sql_proxy -instances=[INSTANCE_CONNECTION_NAME]=tcp:3306 # # See https://cloud.google.com/sql/docs/mysql-connect-proxy DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'HOST': '127.0.0.1', 'PORT': '3306', 'NAME': 'bonjour_meal', 'USER': 'bmdbuser', 'PASSWORD': 'bmdbpassword', } } # [END db_setup] # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Los_Angeles' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_ROOT = 'static' STATIC_URL = '/static/'
google-business-communications/bm-bonjour-meal-django-starter-code
bonjourmeal-codelab/full-sample/bmcodelab/settings.py
Python
apache-2.0
5,084
0.000787
#!/usr/bin/python2 # core.py # aoneill - 04/10/17 import sys import random import time import pauschpharos as PF import lumiversepython as L SEQ_LIM = 200 def memoize(ignore = None): if(ignore is None): ignore = set() def inner(func): cache = dict() def wrapper(*args): memo = tuple(filter(lambda x: x, map(lambda (i, e): e if (i not in ignore) else None, enumerate(args)))) if(memo not in cache): cache[memo] = func(*args) return cache[memo] return wrapper return inner def blank(): p = PF.PauschPharos() p.SetBlank() p.Trigger(PF.DEFAULT_ID, None) def fireplace(rig): # Warm up cache for seq in xrange(SEQ_LIM): query(rig, '$sequence=%d' % seq) def init(upload = True, run = True, wipe = True, fire = True): rig = L.Rig("/home/teacher/Lumiverse/PBridge.rig.json") rig.init() # Upload the blank template if(upload): blank() # Run if requested if(run): rig.run() # Wipe if requested if(wipe): for seq in xrange(SEQ_LIM): query(rig, '$sequence=%d' % seq).setRGBRaw(0, 0, 0) # Heat up the cache if(fire and not wipe): fireplace(rig) return rig @memoize(ignore = set([0])) def query(rig, text): return rig.select(text) def seq(rig, num): return query(rig, '$sequence=%d' % num) def rand_color(): func = lambda: random.randint(0, 255) / 255.0 return (func(), func(), func())
alexoneill/15-love
game/core.py
Python
mit
1,573
0.031786
import os import sys import django from django.conf import settings def runtests(): settings_file = 'django_ethereum_events.settings.test' if not settings.configured: os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_file) django.setup() from django.test.runner import DiscoverRunner runner_class = DiscoverRunner test_args = ['django_ethereum_events'] failures = runner_class( verbosity=1, interactive=True, failfast=False).run_tests(test_args) sys.exit(failures) if __name__ == '__main__': runtests()
artemistomaras/django-ethereum-events
runtests.py
Python
mit
570
0
################################################################################### # # Copyright (c) 2017-2019 MuK IT GmbH. # # This file is part of MuK Documents Attachment # (see https://mukit.at). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################### import os import re import base64 import hashlib import itertools import logging import mimetypes import textwrap from collections import defaultdict from odoo import api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import AccessError, ValidationError from odoo.tools import config, human_size, ustr, html_escape from odoo.tools.mimetypes import guess_mimetype _logger = logging.getLogger(__name__) class DocumentIrAttachment(models.Model): _inherit = 'ir.attachment' #---------------------------------------------------------- # Database #---------------------------------------------------------- store_document = fields.Many2one( comodel_name='muk_dms.file', string="Document File", auto_join=False, index=True, copy=False) store_document_directory = fields.Many2one( related="store_document.directory", string="Document Directory", readonly=True) is_store_document_link = fields.Boolean( string="Is Document Link", default=False, help=textwrap.dedent("""\ There are two possible ways in which a file and an attachment can be related. - True: The attachment is a link to a file. A file can have any number of links. - False: The attachment stores its contents in a file. This is a one to one relationship. """)) #---------------------------------------------------------- # Helper #---------------------------------------------------------- @api.model def _get_datas_inital_vals(self): vals = super(DocumentIrAttachment, self)._get_datas_inital_vals() vals.update({'store_document': False}) return vals @api.model def _get_datas_clean_vals(self, attach): vals = super(DocumentIrAttachment, self)._get_datas_clean_vals(attach) if self._storage() != 'document' and attach.store_document: vals['store_document'] = attach.store_document return vals @api.model def _clean_datas_after_write(self, vals): super(DocumentIrAttachment, self)._clean_datas_after_write(vals) if 'store_document' in vals: vals['store_document'].unlink() @api.model def _get_attachment_directory(self, attach): params = self.env['ir.config_parameter'].sudo() attachment_directory_id = params.get_param( 'muk_dms_attachment.attachment_directory', None ) if attachment_directory_id: model = self.env['muk_dms.directory'].sudo() directory = model.browse(int(attachment_directory_id)) if directory.exists(): return directory raise ValidationError(_('A directory has to be defined.')) #---------------------------------------------------------- # Function #---------------------------------------------------------- @api.model def storage_locations(self): locations = super(DocumentIrAttachment, self).storage_locations() locations.append('document') return locations @api.model def force_storage(self): if not self.env.user._is_admin(): raise AccessError(_('Only administrators can execute this action.')) if self._storage() != 'document': return super(DocumentIrAttachment, self).force_storage() else: storage_domain = { 'document': ('store_document', '=', False), } record_domain = [ '&', ('type', '=', 'binary'), '&', storage_domain[self._storage()], '&', ('is_store_document_link', '=', False), '|', ('res_field', '=', False), ('res_field', '!=', False) ] self.search(record_domain).migrate(batch_size=100) return True @api.multi def migrate(self, batch_size=None): if self._storage() != 'document': self.with_context(migration=True).write({ 'is_store_document_link': False }) return super(DocumentIrAttachment, self).migrate(batch_size=batch_size) #---------------------------------------------------------- # Read #---------------------------------------------------------- def _compute_datas(self): for attach in self: if attach.store_document: attach.datas = attach.sudo().store_document.content else: super(DocumentIrAttachment, attach)._compute_datas() #---------------------------------------------------------- # Constrains #---------------------------------------------------------- @api.constrains('store_document', 'is_store_document_link') def _check_store_document(self): for attach in self: if attach.store_document and attach.store_document.id: attachments = attach.sudo().search([ '&', ('is_store_document_link', '=', False), '&', ('store_document', '=', attach.store_document.id), '|', ('res_field', '=', False), ('res_field', '!=', False)]) if len(attachments) >= 2: raise ValidationError(_('The file is already referenced by another attachment.')) @api.constrains('store_document', 'is_store_document_link') def _check_is_store_document_link(self): for attach in self: if attach.is_store_document_link and not attach.store_document: raise ValidationError(_('A linked attachments has to be linked to a file.')) #---------------------------------------------------------- # Create, Write, Delete #---------------------------------------------------------- @api.multi def _inverse_datas(self): location = self._storage() for attach in self: if location == 'document': if attach.is_store_document_link: raise ValidationError(_('The data of an attachment created by a file cannot be changed.')) value = attach.datas bin_data = base64.b64decode(value) if value else b'' vals = self._get_datas_inital_vals() vals = self._update_datas_vals(vals, attach, bin_data) if value: directory = self._get_attachment_directory(attach) if attach.store_document: attach.store_document.sudo().write({ 'directory': directory and directory.id, 'content': value }) store_document = attach.store_document else: store_document = self.env['muk_dms.file'].sudo().create({ 'name': "[A-%s] %s" % (attach.id, attach.datas_fname or attach.name), 'directory': directory and directory.id, 'content': value }) vals['store_document'] = store_document and store_document.id elif not value and attach.store_document: attach.store_document.unlink() clean_vals = self._get_datas_clean_vals(attach) models.Model.write(attach.sudo(), vals) self._clean_datas_after_write(clean_vals) else: super(DocumentIrAttachment, attach)._inverse_datas() @api.multi def copy(self, default=None): self.ensure_one() default = dict(default or []) if not 'store_document' in default and self.store_document: default.update({'store_document': False}) file = self.store_document.sudo() copy = super(DocumentIrAttachment, self).copy(default) store_document = self.env['muk_dms.file'].sudo().create({ 'name': "[A-%s] %s" % (copy.id, copy.datas_fname or copy.name), 'directory': file.directory.id, 'content': file.content}) copy.write({'store_document': store_document.id}) return copy else: return super(DocumentIrAttachment, self).copy(default) @api.multi def write(self, vals): result = super(DocumentIrAttachment, self).write(vals) if 'datas_fname' in vals and vals['datas_fname']: for attach in self: if attach.store_document and not attach.is_store_document_link: attach.store_document.sudo().write({ 'name': "[A-%s] %s" % (attach.id, vals['datas_fname']) }) return result @api.multi def unlink(self): files = self.env['muk_dms.file'] for attach in self.sudo(): if attach.store_document and not attach.is_store_document_link: files |= attach.store_document result = super(DocumentIrAttachment, self).unlink() if files: files.sudo().unlink() return result
muk-it/muk_dms
muk_dms_attachment/models/ir_attachment.py
Python
lgpl-3.0
10,186
0.005007
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import getdate, validate_email_add, today from frappe.model.naming import make_autoname from frappe import throw, _ import frappe.permissions from frappe.model.document import Document from frappe.model.mapper import get_mapped_doc from erpnext.utilities.transaction_base import delete_events class EmployeeUserDisabledError(frappe.ValidationError): pass class Employee(Document): def onload(self): self.get("__onload").salary_structure_exists = frappe.db.get_value("Salary Structure", {"employee": self.name, "is_active": "Yes", "docstatus": ["!=", 2]}) def autoname(self): naming_method = frappe.db.get_value("HR Settings", None, "emp_created_by") if not naming_method: throw(_("Please setup Employee Naming System in Human Resource > HR Settings")) else: if naming_method == 'Naming Series': self.name = make_autoname(self.naming_series + '.####') elif naming_method == 'Employee Number': self.name = self.employee_number self.employee = self.name def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, ["Active", "Left"]) self.employee = self.name self.validate_date() self.validate_email() self.validate_status() self.validate_employee_leave_approver() self.validate_reports_to() if self.user_id: self.validate_for_enabled_user_id() self.validate_duplicate_user_id() else: existing_user_id = frappe.db.get_value("Employee", self.name, "user_id") if existing_user_id: frappe.permissions.remove_user_permission( "Employee", self.name, existing_user_id) def on_update(self): if self.user_id: self.update_user() self.update_user_permissions() def update_user_permissions(self): frappe.permissions.add_user_permission("Employee", self.name, self.user_id) frappe.permissions.set_user_permission_if_allowed("Company", self.company, self.user_id) def update_user(self): # add employee role if missing user = frappe.get_doc("User", self.user_id) user.flags.ignore_permissions = True if "Employee" not in user.get("user_roles"): user.add_roles("Employee") # copy details like Fullname, DOB and Image to User if self.employee_name and not (user.first_name and user.last_name): employee_name = self.employee_name.split(" ") if len(employee_name) >= 3: user.last_name = " ".join(employee_name[2:]) user.middle_name = employee_name[1] elif len(employee_name) == 2: user.last_name = employee_name[1] user.first_name = employee_name[0] if self.date_of_birth: user.birth_date = self.date_of_birth if self.gender: user.gender = self.gender if self.image: if not user.user_image: user.user_image = self.image try: frappe.get_doc({ "doctype": "File", "file_name": self.image, "attached_to_doctype": "User", "attached_to_name": self.user_id }).insert() except frappe.DuplicateEntryError: # already exists pass user.save() def validate_date(self): if self.date_of_birth and getdate(self.date_of_birth) > getdate(today()): throw(_("Date of Birth cannot be greater than today.")) if self.date_of_birth and self.date_of_joining and getdate(self.date_of_birth) >= getdate(self.date_of_joining): throw(_("Date of Joining must be greater than Date of Birth")) elif self.date_of_retirement and self.date_of_joining and (getdate(self.date_of_retirement) <= getdate(self.date_of_joining)): throw(_("Date Of Retirement must be greater than Date of Joining")) elif self.relieving_date and self.date_of_joining and (getdate(self.relieving_date) <= getdate(self.date_of_joining)): throw(_("Relieving Date must be greater than Date of Joining")) elif self.contract_end_date and self.date_of_joining and (getdate(self.contract_end_date) <= getdate(self.date_of_joining)): throw(_("Contract End Date must be greater than Date of Joining")) def validate_email(self): if self.company_email: validate_email_add(self.company_email, True) if self.personal_email: validate_email_add(self.personal_email, True) def validate_status(self): if self.status == 'Left' and not self.relieving_date: throw(_("Please enter relieving date.")) def validate_for_enabled_user_id(self): if not self.status == 'Active': return enabled = frappe.db.sql("""select name from `tabUser` where name=%s and enabled=1""", self.user_id) if not enabled: throw(_("User {0} is disabled").format( self.user_id), EmployeeUserDisabledError) def validate_duplicate_user_id(self): employee = frappe.db.sql_list("""select name from `tabEmployee` where user_id=%s and status='Active' and name!=%s""", (self.user_id, self.name)) if employee: throw(_("User {0} is already assigned to Employee {1}").format( self.user_id, employee[0]), frappe.DuplicateEntryError) def validate_employee_leave_approver(self): for l in self.get("leave_approvers")[:]: if "Leave Approver" not in frappe.get_roles(l.leave_approver): frappe.get_doc("User", l.leave_approver).add_roles("Leave Approver") def validate_reports_to(self): if self.reports_to == self.name: throw(_("Employee cannot report to himself.")) def on_trash(self): delete_events(self.doctype, self.name) @frappe.whitelist() def get_retirement_date(date_of_birth=None): import datetime ret = {} if date_of_birth: try: dt = getdate(date_of_birth) + datetime.timedelta(21915) ret = {'date_of_retirement': dt.strftime('%Y-%m-%d')} except ValueError: # invalid date ret = {} return ret @frappe.whitelist() def make_salary_structure(source_name, target=None): target = get_mapped_doc("Employee", source_name, { "Employee": { "doctype": "Salary Structure", "field_map": { "name": "employee", } } }) target.make_earn_ded_table() return target def validate_employee_role(doc, method): # called via User hook if "Employee" in [d.role for d in doc.get("user_roles")]: if not frappe.db.get_value("Employee", {"user_id": doc.name}): frappe.msgprint(_("Please set User ID field in an Employee record to set Employee Role")) doc.get("user_roles").remove(doc.get("user_roles", {"role": "Employee"})[0]) def update_user_permissions(doc, method): # called via User hook if "Employee" in [d.role for d in doc.get("user_roles")]: employee = frappe.get_doc("Employee", {"user_id": doc.name}) employee.update_user_permissions() def send_birthday_reminders(): """Send Employee birthday reminders if no 'Stop Birthday Reminders' is not set.""" if int(frappe.db.get_single_value("HR Settings", "stop_birthday_reminders") or 0): return from frappe.utils.user import get_enabled_system_users users = None birthdays = get_employees_who_are_born_today() if birthdays: if not users: users = [u.email_id or u.name for u in get_enabled_system_users()] for e in birthdays: frappe.sendmail(recipients=filter(lambda u: u not in (e.company_email, e.personal_email, e.user_id), users), subject=_("Birthday Reminder for {0}").format(e.employee_name), message=_("""Today is {0}'s birthday!""").format(e.employee_name), reply_to=e.company_email or e.personal_email or e.user_id, bulk=True) def get_employees_who_are_born_today(): """Get Employee properties whose birthday is today.""" return frappe.db.sql("""select name, personal_email, company_email, user_id, employee_name from tabEmployee where day(date_of_birth) = day(%(date)s) and month(date_of_birth) = month(%(date)s) and status = 'Active'""", {"date": today()}, as_dict=True) def get_holiday_list_for_employee(employee, raise_exception=True): if employee: holiday_list, company = frappe.db.get_value("Employee", employee, ["holiday_list", "company"]) else: holiday_list='' company=frappe.db.get_value("Global Defaults", None, "default_company") if not holiday_list: holiday_list = frappe.db.get_value("Company", company, "default_holiday_list") if not holiday_list and raise_exception: frappe.throw(_('Please set a default Holiday List for Employee {0} or Company {0}').format(employee, company)) return holiday_list
ShashaQin/erpnext
erpnext/hr/doctype/employee/employee.py
Python
agpl-3.0
8,327
0.023778
# -*- coding: UTF-8 -*- __author__ = 'Jeffrey'
duanhun/apk_for_linux
settings.py
Python
apache-2.0
47
0
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Canal para cuevana # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import re import urlparse from channelselector import get_thumbnail_path from core import config from core import logger from core import scrapertools from core import servertools from core.item import Item from core.tmdb import Tmdb from core import servertools from servers import expurl DEBUG = config.get_setting("debug") def agrupa_datos(data): ## Agrupa los datos data = re.sub(r'\n|\r|\t|&nbsp;|<br>|<!--.*?-->','',data) data = re.sub(r'\s+',' ',data) data = re.sub(r'>\s<','><',data) return data def mainlist(item): logger.info("[pelisadicto.py] mainlist") thumb_buscar = get_thumbnail_path()+ "thumb_buscar.png" itemlist = [] itemlist.append( Item(channel=item.channel, title="Últimas agregadas" , action="agregadas", url="http://www.descargacineclasico.net/", viewmode="movie_with_plot")) itemlist.append( Item(channel=item.channel, title="Listado por género" , action="porGenero", url="http://www.descargacineclasico.net/")) itemlist.append( Item(channel=item.channel, title="Buscar" , action="search", url="http://www.descargacineclasico.net/", thumbnail=thumb_buscar) ) return itemlist def porGenero(item): logger.info("[descargacineclasico.py] porGenero") itemlist = [] data = scrapertools.cache_page(item.url) logger.info("data="+data) patron = '<ul class="columnas">(.*?)</ul>' data = re.compile(patron,re.DOTALL).findall(data) patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data[0]) for url,genero in matches: itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot")) return itemlist def search(item,texto): logger.info("[descargacineclasico.py] search") ''' texto_get = texto.replace(" ","%20") texto_post = texto.replace(" ","+") item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post) ''' item.url = "http://www.descargacineclasico.net/?s=" + texto try: return agregadas(item) # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return [] return agregadas(item) def agregadas(item): logger.info("[descargacineclasico.py] agregadas") itemlist = [] ''' # Descarga la pagina if "?search=" in item.url: url_search = item.url.split("?search=") data = scrapertools.cache_page(url_search[0], url_search[1]) else: data = scrapertools.cache_page(item.url) logger.info("data="+data) ''' data = scrapertools.cache_page(item.url) logger.info("data="+data) # Extrae las entradas fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<div class="review-box-container">(.*?)wp-pagenavi')) #<a href="http://www.descargacineclasico.net/ciencia-ficcion/quatermass-2-1957/" #title="Quatermass II (Quatermass 2) (1957) Descargar y ver Online"> #<img style="border-radius:6px;" #src="//www.descargacineclasico.net/wp-content/uploads/2015/12/Quatermass-II-2-1957.jpg" #alt="Quatermass II (Quatermass 2) (1957) Descargar y ver Online Gratis" height="240" width="160"> patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url patron+= 'title="([^"]+)".*?' # title patron+= 'src="([^"]+).*?' # thumbnail patron+= '<p>([^<]+)' # plot matches = re.compile(patron,re.DOTALL).findall(fichas) for url,title,thumbnail,plot in matches: title=title[0:title.find("Descargar y ver Online")] url=urlparse.urljoin(item.url,url) thumbnail = urlparse.urljoin(url,thumbnail) itemlist.append( Item(channel=item.channel, action="findvideos", title=title+" ", fulltitle=title , url=url , thumbnail=thumbnail, plot=plot, show=title) ) # Paginación try: #<ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul> patron_nextpage = r'<a class="nextpostslink" rel="next" href="([^"]+)' next_page = re.compile(patron_nextpage,re.DOTALL).findall(data) itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page[0], viewmode="movie_with_plot") ) except: pass return itemlist def findvideos(item): logger.info("[pelisadicto.py] findvideos") itemlist = [] data = scrapertools.cache_page(item.url) data = scrapertools.unescape(data) titulo = item.title titulo_tmdb = re.sub("([0-9+])", "", titulo.strip()) oTmdb= Tmdb(texto_buscado=titulo_tmdb, idioma_busqueda="es") item.fanart=oTmdb.get_backdrop() # Descarga la pagina # data = scrapertools.cache_page(item.url) patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)' #Añado calidad matches = re.compile(patron,re.DOTALL).findall(data) for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches: title = titulo + "_" + scrapedidioma + "_"+ scrapedserver + "_" + scrapedcalidad itemlist.append( Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, show=item.show, fanart=item.fanart) ) return itemlist def play(item): logger.info("[descargacineclasico.py] play") video = expurl.expand_url(item.url) itemlist = [] itemlist = servertools.find_video_items(data=video) for videoitem in itemlist: videoitem.title = item.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel return itemlist
ChopChopKodi/pelisalacarta
python/main-classic/channels/descargacineclasico.py
Python
gpl-3.0
6,362
0.017781
import os from ctypes import CDLL from ctypes.util import find_library from django.conf import settings # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = dict((key, getattr(settings, key)) for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY') if hasattr(settings, key)) lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None) # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' # Getting the path to the GeoIP library. if lib_name: lib_path = find_library(lib_name) if lib_path is None: raise RuntimeError('Could not find the GeoIP library (tried "%s"). ' 'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name) lgeoip = CDLL(lib_path) # Getting the C `free` for the platform. if os.name == 'nt': libc = CDLL('msvcrt') else: libc = CDLL(None) free = libc.free
912/M-new
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/geoip/libgeoip.py
Python
gpl-2.0
1,094
0.001828
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2015-2017 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import configparser import logging import os from unittest.mock import patch import testtools from testtools.matchers import ( Contains, Equals, FileContains, FileExists, HasLength, Not ) import fixtures import yaml from snapcraft.internal.meta import ( CommandError, create_snap_packaging, _SnapPackaging ) from snapcraft.internal import common from snapcraft.internal.errors import MissingGadgetError from snapcraft import ProjectOptions, tests class CreateBaseTestCase(tests.TestCase): def setUp(self): super().setUp() self.config_data = { 'architectures': ['amd64'], 'name': 'my-package', 'version': '1.0', 'description': 'my description', 'summary': 'my summary', 'confinement': 'devmode', } patcher = patch( 'snapcraft.internal.project_loader.get_snapcraft_yaml') self.mock_get_yaml = patcher.start() self.mock_get_yaml.return_value = os.path.join( 'snap', 'snapcraft.yaml') self.addCleanup(patcher.stop) # Ensure the ensure snapcraft.yaml method has something to copy. os.makedirs('snap') open(os.path.join('snap', 'snapcraft.yaml'), 'w').close() self.meta_dir = os.path.join(self.prime_dir, 'meta') self.hooks_dir = os.path.join(self.meta_dir, 'hooks') self.snap_yaml = os.path.join(self.meta_dir, 'snap.yaml') self.project_options = ProjectOptions() def generate_meta_yaml(self): create_snap_packaging(self.config_data, self.project_options) self.assertTrue( os.path.exists(self.snap_yaml), 'snap.yaml was not created') with open(self.snap_yaml) as f: return yaml.load(f) class CreateTestCase(CreateBaseTestCase): def test_create_meta(self): y = self.generate_meta_yaml() expected = {'architectures': ['amd64'], 'confinement': 'devmode', 'description': 'my description', 'summary': 'my summary', 'name': 'my-package', 'version': '1.0'} self.assertEqual(y, expected, expected) def test_create_meta_with_epoch(self): self.config_data['epoch'] = '1*' y = self.generate_meta_yaml() self.assertTrue( 'epoch' in y, 'Expected "epoch" property to be copied into snap.yaml') self.assertEqual(y['epoch'], '1*') def test_create_meta_with_assumes(self): self.config_data['assumes'] = ['feature1', 'feature2'] y = self.generate_meta_yaml() self.assertTrue( 'assumes' in y, 'Expected "assumes" property to be copied into snap.yaml') self.assertEqual(y['assumes'], ['feature1', 'feature2']) def test_create_gadget_meta_with_gadget_yaml(self): gadget_yaml = 'stub entry: stub value' with open(os.path.join('gadget.yaml'), 'w') as f: f.write(gadget_yaml) self.config_data['type'] = 'gadget' create_snap_packaging(self.config_data, self.project_options) expected_gadget = os.path.join(self.meta_dir, 'gadget.yaml') self.assertTrue(os.path.exists(expected_gadget)) with open(expected_gadget) as f: self.assertEqual(f.read(), gadget_yaml) def test_create_gadget_meta_with_missing_gadget_yaml_raises_error(self): self.config_data['type'] = 'gadget' self.assertRaises( MissingGadgetError, create_snap_packaging, self.config_data, self.project_options) def test_create_meta_with_declared_icon(self): open(os.path.join(os.curdir, 'my-icon.png'), 'w').close() self.config_data['icon'] = 'my-icon.png' y = self.generate_meta_yaml() self.assertTrue( os.path.exists(os.path.join(self.meta_dir, 'gui', 'icon.png')), 'icon.png was not setup correctly') self.assertFalse('icon' in y, 'icon found in snap.yaml {}'.format(y)) def test_create_meta_with_declared_icon_and_setup(self): fake_logger = fixtures.FakeLogger(level=logging.INFO) self.useFixture(fake_logger) gui_path = os.path.join('setup', 'gui') os.makedirs(gui_path) setup_icon_content = b'setup icon' with open(os.path.join(gui_path, 'icon.png'), 'wb') as f: f.write(setup_icon_content) declared_icon_content = b'declared icon' with open('my-icon.png', 'wb') as f: f.write(declared_icon_content) self.config_data['icon'] = 'my-icon.png' y = self.generate_meta_yaml() expected_icon = os.path.join(self.meta_dir, 'gui', 'icon.png') self.assertTrue(os.path.exists(expected_icon), 'icon.png was not setup correctly') with open(expected_icon, 'rb') as f: self.assertEqual(f.read(), declared_icon_content) self.assertFalse('icon' in y, 'icon found in snap.yaml {}'.format(y)) def test_create_meta_with_declared_icon_and_setup_ran_twice_ok(self): gui_path = os.path.join('setup', 'gui') os.makedirs(gui_path) icon_content = b'this is the icon' with open(os.path.join(gui_path, 'icon.png'), 'wb') as f: f.write(icon_content) open(os.path.join(os.curdir, 'my-icon.png'), 'w').close() self.config_data['icon'] = 'my-icon.png' create_snap_packaging(self.config_data, self.project_options) # Running again should be good create_snap_packaging(self.config_data, self.project_options) def test_create_meta_with_icon_in_setup(self): gui_path = os.path.join('setup', 'gui') os.makedirs(gui_path) icon_content = b'this is the icon' with open(os.path.join(gui_path, 'icon.png'), 'wb') as f: f.write(icon_content) y = self.generate_meta_yaml() expected_icon = os.path.join(self.meta_dir, 'gui', 'icon.png') self.assertTrue(os.path.exists(expected_icon), 'icon.png was not setup correctly') with open(expected_icon, 'rb') as f: self.assertEqual(f.read(), icon_content) self.assertFalse('icon' in y, 'icon found in snap.yaml {}'.format(y)) def test_create_meta_with_app(self): os.mkdir(self.prime_dir) open(os.path.join(self.prime_dir, 'app.sh'), 'w').close() self.config_data['apps'] = { 'app1': {'command': 'app.sh'}, 'app2': {'command': 'app.sh', 'plugs': ['network']}, 'app3': {'command': 'app.sh', 'plugs': ['network-server']} } self.config_data['plugs'] = { 'network-server': {'interface': 'network-bind'}} y = self.generate_meta_yaml() for app in ['app1', 'app2', 'app3']: app_wrapper_path = os.path.join( self.prime_dir, 'command-{}.wrapper'.format(app)) self.assertTrue( os.path.exists(app_wrapper_path), 'the wrapper for {!r} was not setup correctly'.format(app)) expected = { 'architectures': ['amd64'], 'apps': { 'app1': { 'command': 'command-app1.wrapper', }, 'app2': { 'command': 'command-app2.wrapper', 'plugs': ['network'], }, 'app3': { 'command': 'command-app3.wrapper', 'plugs': ['network-server'], }, }, 'description': 'my description', 'summary': 'my summary', 'name': 'my-package', 'version': '1.0', 'confinement': 'devmode', 'plugs': { 'network-server': { 'interface': 'network-bind', } } } self.assertEqual(y, expected) def test_create_meta_with_app_desktop_key(self): os.mkdir(self.prime_dir) open(os.path.join(self.prime_dir, 'app.sh'), 'w').close() with open(os.path.join(self.prime_dir, 'app1.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app1.exe\nIcon=app1.png') icon_dir = os.path.join(self.prime_dir, 'usr', 'share') os.makedirs(icon_dir) open(os.path.join(icon_dir, 'app2.png'), 'w').close() with open(os.path.join(self.prime_dir, 'app2.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app2.exe\nIcon=/usr/share/app2.png') with open(os.path.join(self.prime_dir, 'app3.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app3.exe\nIcon=app3.png') self.config_data['apps'] = { 'app1': {'command': 'app.sh', 'desktop': 'app1.desktop'}, 'app2': {'command': 'app.sh', 'desktop': 'app2.desktop'}, 'my-package': {'command': 'app.sh', 'desktop': 'app3.desktop'} } self.generate_meta_yaml() desktop_file = os.path.join(self.meta_dir, 'gui', 'app1.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package.app1 %U') self.assertEqual(contents[section].get('Icon'), 'app1.png') desktop_file = os.path.join(self.meta_dir, 'gui', 'app2.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package.app2 %U') self.assertEqual(contents[section].get('Icon'), '${SNAP}/usr/share/app2.png') desktop_file = os.path.join(self.meta_dir, 'gui', 'my-package.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package %U') snap_yaml = os.path.join('prime', 'meta', 'snap.yaml') self.assertThat(snap_yaml, Not(FileContains('desktop: app1.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: app2.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: app3.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: my-package.desktop'))) def test_create_meta_with_hook(self): hooksdir = os.path.join(self.snap_dir, 'hooks') os.makedirs(hooksdir) open(os.path.join(hooksdir, 'foo'), 'w').close() open(os.path.join(hooksdir, 'bar'), 'w').close() os.chmod(os.path.join(hooksdir, 'foo'), 0o755) os.chmod(os.path.join(hooksdir, 'bar'), 0o755) self.config_data['hooks'] = { 'foo': {'plugs': ['plug']}, 'bar': {} } y = self.generate_meta_yaml() self.assertThat( y, Contains('hooks'), "Expected generated YAML to contain 'hooks'") for hook in ('foo', 'bar'): generated_hook_path = os.path.join( self.prime_dir, 'meta', 'hooks', hook) self.assertThat( generated_hook_path, FileExists(), 'The {!r} hook was not setup correctly'.format(hook)) self.assertThat( y['hooks'], Contains(hook), 'Expected generated hooks to contain {!r}'.format(hook)) self.assertThat( y['hooks']['foo'], Contains('plugs'), "Expected generated 'foo' hook to contain 'plugs'") self.assertThat(y['hooks']['foo']['plugs'], HasLength(1)) self.assertThat(y['hooks']['foo']['plugs'][0], Equals('plug')) self.assertThat( y['hooks']['bar'], Not(Contains('plugs')), "Expected generated 'bar' hook to not contain 'plugs'") class WriteSnapDirectoryTestCase(CreateBaseTestCase): scenarios = ( ('with build artifacts', dict(build_info='yes')), ('without build artifacts', dict(build_info='')), ) def test_write_snap_directory(self): if self.build_info: self.useFixture(fixtures.EnvironmentVariable( 'SNAPCRAFT_BUILD_INFO', self.build_info)) # Setup a snap directory containing a few things. _create_file(os.path.join(self.snap_dir, 'snapcraft.yaml')) _create_file( os.path.join(self.snap_dir, 'hooks', 'test-hook'), executable=True) # Now write the snap directory, and verify everything was migrated, as # well as the hook making it into meta/. self.generate_meta_yaml() prime_snap_dir = os.path.join(self.prime_dir, 'snap') if self.build_info: self.assertThat(os.path.join(prime_snap_dir, 'snapcraft.yaml'), FileExists()) else: self.assertThat(os.path.join(prime_snap_dir, 'snapcraft.yaml'), Not(FileExists())) self.assertThat( os.path.join(prime_snap_dir, 'hooks', 'test-hook'), FileExists()) self.assertThat( os.path.join(self.hooks_dir, 'test-hook'), FileExists()) # The hook should be empty, because the one in snap/hooks is empty, and # no wrapper is generated (i.e. that hook is copied to both locations). self.assertThat( os.path.join(self.hooks_dir, 'test-hook'), FileContains('')) def test_snap_hooks_overwrite_part_hooks(self): # Setup a prime/snap directory containing a hook. part_hook = os.path.join(self.prime_dir, 'snap', 'hooks', 'test-hook') _create_file(part_hook, content='from part', executable=True) # Setup a snap directory containing the same hook snap_hook = os.path.join(self.snap_dir, 'hooks', 'test-hook') _create_file(snap_hook, content='from snap', executable=True) # Now write the snap directory, and verify that the snap hook overwrote # the part hook in both prime/snap/hooks and prime/meta/hooks. self.generate_meta_yaml() prime_snap_dir = os.path.join(self.prime_dir, 'snap') self.assertThat( os.path.join(prime_snap_dir, 'hooks', 'test-hook'), FileExists()) self.assertThat( os.path.join(self.hooks_dir, 'test-hook'), FileExists()) # Both hooks in snap/hooks and meta/hooks should contain 'from snap' as # that one should have overwritten the other (and its wrapper). self.assertThat( os.path.join(self.prime_dir, 'snap', 'hooks', 'test-hook'), FileContains('from snap')) self.assertThat( os.path.join(self.prime_dir, 'meta', 'hooks', 'test-hook'), FileContains('from snap')) def test_snap_hooks_not_executable_raises(self): # Setup a snap directory containing a few things. _create_file(os.path.join(self.snap_dir, 'snapcraft.yaml')) _create_file(os.path.join(self.snap_dir, 'hooks', 'test-hook')) # Now write the snap directory. This process should fail as the hook # isn't executable. with testtools.ExpectedException(CommandError, "hook 'test-hook' is not executable"): self.generate_meta_yaml() class GenerateHookWrappersTestCase(CreateBaseTestCase): def test_generate_hook_wrappers(self): # Set up the prime directory to contain a few hooks in snap/hooks snap_hooks_dir = os.path.join(self.prime_dir, 'snap', 'hooks') hook1_path = os.path.join(snap_hooks_dir, 'test-hook1') hook2_path = os.path.join(snap_hooks_dir, 'test-hook2') for path in (hook1_path, hook2_path): _create_file(path, executable=True) # Now generate hook wrappers, and verify that they're correct self.generate_meta_yaml() for hook in ('test-hook1', 'test-hook2'): hook_path = os.path.join(self.hooks_dir, hook) self.assertThat(hook_path, FileExists()) self.assertThat(hook_path, tests.IsExecutable()) # The hook in meta/hooks should exec the one in snap/hooks, as it's # a wrapper generated by snapcraft. self.assertThat( hook_path, FileContains(matcher=Contains( 'exec "$SNAP/snap/hooks/{}"'.format(hook)))) def test_generate_hook_wrappers_not_executable_raises(self): # Set up the prime directory to contain a hook in snap/hooks that is # not executable. snap_hooks_dir = os.path.join(self.prime_dir, 'snap', 'hooks') _create_file(os.path.join(snap_hooks_dir, 'test-hook')) # Now attempt to generate hook wrappers. This should fail, as the hook # itself is not executable. with testtools.ExpectedException(CommandError, "hook 'test-hook' is not executable"): self.generate_meta_yaml() class CreateWithConfinementTestCase(CreateBaseTestCase): scenarios = [(confinement, dict(confinement=confinement)) for confinement in ['strict', 'devmode', 'classic']] def test_create_meta_with_confinement(self): self.config_data['confinement'] = self.confinement y = self.generate_meta_yaml() self.assertTrue( 'confinement' in y, 'Expected "confinement" property to be in snap.yaml') self.assertEqual(y['confinement'], self.confinement) class CreateWithGradeTestCase(CreateBaseTestCase): scenarios = [(grade, dict(grade=grade)) for grade in ['stable', 'devel']] def test_create_meta_with_grade(self): self.config_data['grade'] = self.grade y = self.generate_meta_yaml() self.assertTrue( 'grade' in y, 'Expected "grade" property to be in snap.yaml') self.assertEqual(y['grade'], self.grade) # TODO this needs more tests. class WrapExeTestCase(tests.TestCase): def setUp(self): super().setUp() # TODO move to use outer interface self.packager = _SnapPackaging({'confinement': 'devmode'}, ProjectOptions()) @patch('snapcraft.internal.common.assemble_env') def test_wrap_exe_must_write_wrapper(self, mock_assemble_env): mock_assemble_env.return_value = """\ PATH={0}/part1/install/usr/bin:{0}/part1/install/bin """.format(self.parts_dir) relative_exe_path = 'test_relexepath' open(os.path.join(self.prime_dir, relative_exe_path), 'w').close() # Check that the wrapper is created even if there is already a file # with the same name. open(os.path.join( self.prime_dir, 'test_relexepath.wrapper'), 'w').close() relative_wrapper_path = self.packager._wrap_exe(relative_exe_path) wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) expected = ('#!/bin/sh\n' 'PATH=$SNAP/usr/bin:$SNAP/bin\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "$SNAP/test_relexepath" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) @patch('snapcraft.internal.common.assemble_env') def test_wrap_exe_writes_wrapper_with_basename(self, mock_assemble_env): mock_assemble_env.return_value = """\ PATH={0}/part1/install/usr/bin:{0}/part1/install/bin """.format(self.parts_dir) relative_exe_path = 'test_relexepath' open(os.path.join(self.prime_dir, relative_exe_path), 'w').close() relative_wrapper_path = self.packager._wrap_exe( relative_exe_path, basename='new-name') wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) self.assertEqual(relative_wrapper_path, 'new-name.wrapper') expected = ('#!/bin/sh\n' 'PATH=$SNAP/usr/bin:$SNAP/bin\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "$SNAP/test_relexepath" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) def test_snap_shebangs_extracted(self): """Shebangs pointing to the snap's install dir get extracted. If the exe has a shebang that points to the snap's install dir, the wrapper script will execute it directly rather than relying on the shebang. The shebang needs to be an absolute path, and we don't know in which directory the snap will be installed. Executing it in the wrapper script allows us to use the $SNAP environment variable. """ relative_exe_path = 'test_relexepath' shebang_path = os.path.join( self.parts_dir, 'testsnap', 'install', 'snap_exe') exe_contents = '#!{}\n'.format(shebang_path) with open(os.path.join(self.prime_dir, relative_exe_path), 'w') as exe: exe.write(exe_contents) relative_wrapper_path = self.packager._wrap_exe(relative_exe_path) wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) expected = ( '#!/bin/sh\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "$SNAP/snap_exe"' ' "$SNAP/test_relexepath" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) with open(os.path.join(self.prime_dir, relative_exe_path), 'r') as exe: # The shebang wasn't changed, since we don't know what the # path will be on the installed system. self.assertEqual(exe_contents, exe.read()) def test_non_snap_shebangs_ignored(self): """Shebangs not pointing to the snap's install dir are ignored. If the shebang points to a system executable, there's no need to interfere. """ relative_exe_path = 'test_relexepath' exe_contents = '#!/bin/bash\necho hello\n' with open(os.path.join(self.prime_dir, relative_exe_path), 'w') as exe: exe.write(exe_contents) relative_wrapper_path = self.packager._wrap_exe(relative_exe_path) wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) expected = ('#!/bin/sh\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "$SNAP/test_relexepath" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) with open(os.path.join(self.prime_dir, relative_exe_path), 'r') as exe: self.assertEqual(exe_contents, exe.read()) def test_non_shebang_binaries_ignored(self): """Native binaries are ignored. If the executable is a native binary, and thus not have a shebang, it's ignored. """ relative_exe_path = 'test_relexepath' # Choose a content which can't be decoded with utf-8, to make # sure no decoding errors happen. exe_contents = b'\xf0\xf1' path = os.path.join(self.prime_dir, relative_exe_path) with open(path, 'wb') as exe: exe.write(exe_contents) relative_wrapper_path = self.packager._wrap_exe(relative_exe_path) wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) expected = ('#!/bin/sh\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "$SNAP/test_relexepath" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) with open(path, 'rb') as exe: self.assertEqual(exe_contents, exe.read()) @patch('snapcraft.internal.common.run') def test_exe_is_in_path(self, run_mock): app_path = os.path.join(self.prime_dir, 'bin', 'app1') os.mkdir(os.path.dirname(app_path)) open(app_path, 'w').close() relative_wrapper_path = self.packager._wrap_exe('app1') wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path) expected = ('#!/bin/sh\n' '\n\n' 'LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:$LD_LIBRARY_PATH\n' 'exec "app1" "$@"\n') with open(wrapper_path) as wrapper_file: wrapper_contents = wrapper_file.read() self.assertEqual(expected, wrapper_contents) def test_command_does_not_exist(self): common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)] apps = {'app1': {'command': 'command-does-not-exist'}} raised = self.assertRaises( EnvironmentError, self.packager._wrap_apps, apps) self.assertEqual( "The specified command 'command-does-not-exist' defined in the " "app 'app1' does not exist or is not executable", str(raised)) def test_command_is_not_executable(self): common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)] apps = {'app1': {'command': 'command-not-executable'}} cmd_path = os.path.join(self.prime_dir, 'bin', apps['app1']['command']) os.mkdir(os.path.dirname(cmd_path)) open(cmd_path, 'w').close() raised = self.assertRaises( EnvironmentError, self.packager._wrap_apps, apps) self.assertEqual( "The specified command 'command-not-executable' defined in the " "app 'app1' does not exist or is not executable", str(raised)) def test_command_found(self): common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)] apps = {'app1': {'command': 'command-executable'}} cmd_path = os.path.join(self.prime_dir, 'bin', apps['app1']['command']) os.mkdir(os.path.dirname(cmd_path)) open(cmd_path, 'w').close() os.chmod(cmd_path, 0o755) wrapped_apps = self.packager._wrap_apps(apps) self.assertEqual(wrapped_apps, {'app1': {'command': 'command-app1.wrapper'}}) def _create_file(path, *, content='', executable=False): os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write(content) if executable: os.chmod(path, 0o755)
3v1n0/snapcraft
snapcraft/tests/test_meta.py
Python
gpl-3.0
27,949
0
# This file is part of the musicbrainzngs library # Copyright (C) Alastair Porter, Adrian Sampson, and others # This file is distributed under a BSD-2-Clause type license. # See the COPYING file for more information. import sys import locale import xml.etree.ElementTree as ET from . import compat def _unicode(string, encoding=None): """Try to decode byte strings to unicode. This can only be a guess, but this might be better than failing. It is safe to use this on numbers or strings that are already unicode. """ if isinstance(string, compat.unicode): unicode_string = string elif isinstance(string, compat.bytes): # use given encoding, stdin, preferred until something != None is found if encoding is None: encoding = sys.stdin.encoding if encoding is None: encoding = locale.getpreferredencoding() unicode_string = string.decode(encoding, "ignore") else: unicode_string = compat.unicode(string) return unicode_string.replace('\x00', '').strip() def bytes_to_elementtree(bytes_or_file): """Given a bytestring or a file-like object that will produce them, parse and return an ElementTree. """ if isinstance(bytes_or_file, compat.basestring): s = bytes_or_file else: s = bytes_or_file.read() if compat.is_py3: s = _unicode(s, "utf-8") f = compat.StringIO(s) tree = ET.ElementTree(file=f) return tree
hzlf/openbroadcast
website/apps/musicbrainzngs/util.py
Python
gpl-3.0
1,428
0.010504
""" This file is meant to try the idea of having the diffraction pattern data appear in one window, and then give the user the option of simply having """
cduff4464/2016_summer_XPD
out_of_date/reverse_engineer/display2.py
Python
bsd-2-clause
155
0.006452
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide a table of data regarding bachelor's degrees earned by women. The data is broken down by field for any given year. This module contains one pandas Dataframe: ``data``. .. rubric:: ``data`` :bokeh-dataframe:`bokeh.sampledata.degrees.data` ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations import logging # isort:skip log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Bokeh imports from ..util.sampledata import package_csv #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'data', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- data = package_csv('degrees', 'percent-bachelors-degrees-women-usa.csv')
bokeh/bokeh
bokeh/sampledata/degrees.py
Python
bsd-3-clause
2,054
0.008763
import os import sys from setuptools import setup version = '1.6.2.dev0' if sys.argv[-1] == 'publish': os.system('python setup.py sdist bdist_wheel upload') print("You probably want to also tag the version now:") print(" git tag -a %s -m 'version %s'" % (version, version)) print(" git push --tags") sys.exit() readme = open('README.rst').read() history = open('HISTORY.rst').read() setup( name='django-richtextfield', version=version, description='A Django model field and widget that renders a customizable WYSIWYG/rich text editor', long_description=readme + '\n\n' + history, author='Jaap Roes', author_email='jaap.roes@gmail.com', url='https://github.com/jaap3/django-richtextfield', packages=[ 'djrichtextfield', ], include_package_data=True, install_requires=[], python_requires='>=3.6', license='MIT', zip_safe=False, keywords='django-richtextfield, djrichtextfield django wywiwyg field', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], )
jaap3/django-richtextfield
setup.py
Python
mit
1,486
0.000673
from django.conf.urls import patterns, include, url from django.contrib import admin from counter.api import router admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'tdc.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^', include(admin.site.urls)), url(r'^', include("massadmin.urls")), url(r'^api/v1/', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) )
tdf/tdc
tdc/urls.py
Python
gpl-3.0
489
0.010225
# This is a hack to disable GDAL support in GeoDjango. It seems there is no # easier way due to the GDAL_LIBRARY_PATH setting not behaving as documented # (i.e. setting it to a non-existent file will disable GDAL). Disabling GDAL is # unfortunately needed, because it breaks TIFF support for pgmagick, which is # required by e.g. the cropping back-end. GeoDjango loads GDAL into memory # which a) is a waste of memory if we don't use it and b) on at least Ubuntu # GDAL seems to be compiled in a way that it takes over TIFF I/O. And this # causes pgmagick to segfault. This patch is added here, because the database # engine is the first thing that is loaded which loads GeoDjango's GDAL module. # The patch below is based on the GeoDjango's GDAL module for Django 1.6: # https://github.com/django/django/blob/stable/1.6.x/django/contrib/gis/gdal/libgdal.py import logging import os import sys original_os_name = os.name os.name = "GDAL blocking OS" from django.contrib.gis import gdal os.name = original_os_name if not gdal.HAS_GDAL: logging.getLogger(__name__).warn("GeoDjango's GDAL support was disabled by " "CATMAID, because it breaks TIFF support in pgmagick. See " "https://github.com/catmaid/CATMAID/issues/1218 for more details.")
catsop/CATMAID
django/lib/custom_postgresql_psycopg2/__init__.py
Python
gpl-3.0
1,268
0.003155
# !/usr/bin/env python # # Hornet - SSH Honeypot # # Copyright (C) 2015 Aniket Panse <aniketpanse@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gevent.monkey gevent.monkey.patch_all() import paramiko import re import os import hornet from hornet.main import Hornet from hornet.tests.commands.base import BaseTestClass LS_L_REGEX = r"[-d][rwx-]{9}(.*)" class HornetTests(BaseTestClass): def test_basic_ls(self): """ Test basic 'ls' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('etc' in command_output) self.assertTrue('var' in command_output) self.assertTrue('bin' in command_output) self.assertTrue('initrd.img' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_version_string(self): """ Test basic 'ls --version' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls --version' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] expected_output = [] version_file_path = os.path.join(os.path.dirname(hornet.__file__), 'data', 'commands', 'ls', 'version') with open(version_file_path) as version_file: for line in version_file: line = line.strip() expected_output.append(line) self.assertEquals(command, ls_command) self.assertEquals(command_output, '\r\n'.join(expected_output)) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_help_string(self): """ Test basic 'ls --help' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls --help' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] expected_output = [] help_file_path = os.path.join(os.path.dirname(hornet.__file__), 'data', 'commands', 'ls', 'help') with open(help_file_path) as help_file: for line in help_file: line = line.strip() expected_output.append(line) self.assertEquals(command, ls_command) self.assertEquals(command_output, '\r\n'.join(expected_output)) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_long(self): """ Test basic 'ls -l' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -l' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) actual_list = command_output.split('\r\n')[1:] # Ignore the first "total" entry expected_list = ['initrd.img', 'var', 'etc', 'bin'] self.verify_long_list(actual_list, expected_list) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_dir_args(self): """ Test basic 'ls etc var' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls etc var' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) dir_outputs = sorted(command_output.split('\r\n\r\n')) self.assertTrue('etc:\r\n' in dir_outputs[0]) self.assertTrue('passwd' in dir_outputs[0]) self.assertTrue('init.d' in dir_outputs[0]) self.assertTrue('sysctl.conf' in dir_outputs[0]) self.assertTrue("var:" in dir_outputs[1]) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_long_dir_args(self): """ Test basic 'ls -l etc var' with multiple directory arguments """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -l etc var' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) dir_outputs = sorted(command_output.split('\r\n\r\n')) self.assertTrue(dir_outputs[0].startswith('etc:')) self.assertTrue('total ' in dir_outputs[0]) self.assertTrue('passwd' in dir_outputs[0]) self.assertTrue('sysctl.conf' in dir_outputs[0]) # No carriage return here, because it was split before self.assertTrue('init.d' in dir_outputs[0]) # No carriage return here, because it was split before self.assertEquals(len(dir_outputs[0].split('\r\n')), 5) self.assertTrue(dir_outputs[1].startswith('var:')) self.assertTrue('total 0' in dir_outputs[1]) self.assertTrue(len(dir_outputs[1].split('\r\n')) == 2) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_long_file_args(self): """ Test basic 'ls -l etc/passwd etc/sysctl.conf' with multiple file arguments """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -l etc/passwd etc/sysctl.conf' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) actual_list = command_output.split('\r\n') expected_list = ['passwd', 'sysctl.conf'] self.verify_long_list(actual_list, expected_list) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_d_with_dir_argument(self): """ Test basic 'ls -d bin' with single directory argument """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -d bin' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertEquals(command_output, 'bin') self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_d_with_multiple_dir_argument(self): """ Test basic 'ls -d bin var' with multiple directory arguments """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -d bin var' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('bin' in command_output) self.assertTrue('var' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_d_non_existant_path(self): """ Test basic 'ls -d nonexistantpath' with non-existant path argument """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -d nonexistantpath' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertEquals(command_output, 'ls: cannot access nonexistantpath: No such file or directory') self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_l_non_existant_path(self): """ Test basic 'ls -l nonexistantpath' with non-existant path argument """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -l nonexistantpath' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertEquals(command_output, 'ls: cannot access nonexistantpath: No such file or directory') self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_ld(self): """ Test basic 'ls -ld var bin etc/passwd initrd.img' with files as well as directories """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -ld var bin etc/passwd initrd.img' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) actual_list = command_output.split('\r\n') expected_list = ['initrd.img', 'var', 'passwd', 'bin'] self.verify_long_list(actual_list, expected_list) self.assertTrue('total' not in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_with_backref_directory_argument(self): """ Test basic 'ls etc/..' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls etc/..' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('etc' in command_output) self.assertTrue('var' in command_output) self.assertTrue('bin' in command_output) self.assertTrue('initrd.img' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_long_backref(self): """ Test basic 'ls -l .. var' with multiple directory arguments """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -l .. var' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) dir_outputs = sorted(command_output.split('\r\n\r\n')) self.assertTrue(dir_outputs[0].startswith('..:')) self.assertTrue('total ' in dir_outputs[0]) self.assertTrue('var' in dir_outputs[0]) self.assertTrue('bin' in dir_outputs[0]) self.assertTrue('initrd.img' in dir_outputs[0]) self.assertTrue('etc' in dir_outputs[0]) self.assertEquals(len(dir_outputs[0].split('\r\n')), 6) self.assertTrue(dir_outputs[1].startswith('var:')) self.assertTrue('total 0' in dir_outputs[1]) self.assertEquals(len(dir_outputs[1].split('\r\n')), 2) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_backref_overflow(self): """ Test basic 'ls ../../..' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('etc' in command_output) self.assertTrue('var' in command_output) self.assertTrue('bin' in command_output) self.assertTrue('initrd.img' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all(self): """ Test basic 'ls -a' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -a' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('etc' in command_output) self.assertTrue('var' in command_output) self.assertTrue('bin' in command_output) self.assertTrue('initrd.img' in command_output) self.assertTrue('. ' in command_output) self.assertTrue('..' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_long_after_cd(self): """ Test basic 'cd var; ls -al ../etc' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) cd_command = 'cd var' channel.send(cd_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] next_prompt = lines[-1] self.assertEquals(command, cd_command) self.assertTrue(next_prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -la ../etc' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] actual_list = command_output.split('\r\n')[1:] # Ignore the first "total" entry expected_list = ['init.d', 'passwd', 'sysctl.conf', '..', '.'] self.assertEquals(command, ls_command) self.verify_long_list(actual_list, expected_list) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_ls_long_dir_with_backref(self): """ Test basic 'cd var; ls -ld ../etc' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) cd_command = 'cd var' channel.send(cd_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] next_prompt = lines[-1] self.assertEquals(command, cd_command) self.assertTrue(next_prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -ld ../etc' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue(command_output.endswith('etc')) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_long_with_multiple_args(self): """ Test basic 'ls -la etc initrd.img' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -la etc initrd.img' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] dir_outputs = sorted(command_output.split('\r\n\r\n')) self.assertEquals(command, ls_command) actual_list = dir_outputs[1].split('\r\n')[1:] # Ignore the first "total" entry expected_list = ['.config', 'init.d', 'passwd', 'sysctl.conf', '..', '.'] self.assertTrue(dir_outputs[0].endswith('initrd.img')) self.verify_long_list(actual_list, expected_list) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_long_dir(self): """ Test basic 'ls -lda' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -lda' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue(command_output.endswith('.')) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_with_multiple_dir_args(self): """ Test basic 'ls -a etc var' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -a etc var' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] dir_outputs = sorted(command_output.split('\r\n\r\n')) self.assertEquals(command, ls_command) self.assertTrue('passwd' in dir_outputs[0]) self.assertTrue('.config' in dir_outputs[0]) self.assertTrue('. ' in dir_outputs[0]) self.assertTrue('..' in dir_outputs[0]) self.assertTrue('init.d' in dir_outputs[0]) self.assertTrue('sysctl.conf' in dir_outputs[0]) self.assertTrue("var:" in dir_outputs[1]) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_dir(self): """ Test basic 'ls -a etc' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -ad etc' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('etc' in command_output) self.assertFalse('var' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_hidden_arg(self): """ Test basic 'ls -a .hidden' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -a .hidden' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('. ' in command_output) self.assertTrue('..' in command_output) self.assertTrue('.rcconf' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_dir_hidden_arg(self): """ Test basic 'ls -da .hidden' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -ad .hidden' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('.hidden' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def test_basic_ls_all_file_hidden_arg(self): """ Test basic 'ls -a .hidden/.rcconf' """ honeypot = Hornet(self.working_dir) honeypot.start() self.create_filesystem(honeypot) while honeypot.server.server_port == 0: # wait until the server is ready gevent.sleep(0) port = honeypot.server.server_port client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # If we log in properly, this should raise no errors client.connect('127.0.0.1', port=port, username='testuser', password='testpassword') channel = client.invoke_shell() while not channel.recv_ready(): gevent.sleep(0) # :-( welcome = '' while channel.recv_ready(): welcome += channel.recv(1) lines = welcome.split('\r\n') prompt = lines[-1] self.assertTrue(prompt.endswith('$ ')) # Now send the ls command ls_command = 'ls -a .hidden/.rcconf' channel.send(ls_command + '\r\n') while not channel.recv_ready(): gevent.sleep(0) # :-( output = '' while not output.endswith('$ '): output += channel.recv(1) lines = output.split('\r\n') command = lines[0] command_output = '\r\n'.join(lines[1:-1]) next_prompt = lines[-1] self.assertEquals(command, ls_command) self.assertTrue('.rcconf' in command_output) self.assertFalse('. ' in command_output) self.assertFalse('..' in command_output) self.assertTrue(next_prompt.endswith('$ ')) honeypot.stop() def verify_long_list(self, actual_list, expected_list): for exp in expected_list: found = False regex = LS_L_REGEX + r'{}'.format(exp) for act in actual_list: if re.match(regex, act): found = True break self.assertTrue(found)
czardoz/hornet
hornet/tests/commands/test_ls.py
Python
gpl-3.0
45,918
0.001503
# vim: fileencoding=UTF-8 filetype=python ff=unix expandtab sw=4 sts=4 tw=120 # maintainer: Christer Sjöholm -- goobook AT furuvik DOT net # authors: Marcus Nitzschke -- marcus.nitzschke AT gmx DOT com # # Copyright (C) 2009 Carlos José Barroso # Copyright (C) 2010 Christer Sjöholm # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. '''\ The idea is make an interface to google contacts that mimics the behaviour of abook for mutt. It's developed in python and uses the fine google data api (gdata). ''' import codecs import email.parser import email.header import gdata.service import itertools import logging import os import pickle import re import sys import time import xml.etree.ElementTree as ET import unicodedata from storage import Storage, storageify, unstorageify log = logging.getLogger(__name__) CACHE_FORMAT_VERSION = '4.0' G_MAX_SRESULTS = 9999 # Maximum number of entries to ask google for. GDATA_VERSION = '3' ATOM_NS = '{http://www.w3.org/2005/Atom}' G_NS = '{http://schemas.google.com/g/2005}' GC_NS = '{http://schemas.google.com/contact/2008}' class GooBook(object): '''This class can't be used as a library as it looks now, it uses sys.stdin print, sys.exit() and getpass().''' def __init__(self, config): self.__config = config self.cache = Cache(config) self.cache.load() def query(self, query): """Do the query, and print it out in """ # query contacts matching_contacts = sorted(self.__query_contacts(query), key=lambda c: c.title) # query groups matching_groups = sorted(self.__query_groups(query), key=lambda g: g.title) # mutt's query_command expects the first line to be a message, # which it discards. print "\n", for contact in matching_contacts: if contact.emails: emailaddrs = sorted(contact.emails) groups = set(self.cache.get_group(gid).title for gid in contact.groups) groups = groups.difference(('System Group: My Contacts',)) groups_str = ', '.join(('"' + g + '"' for g in groups)) for (emailaddr, kind) in emailaddrs: title = contact.title or contact.nickname or emailaddr extra_str = kind if groups_str: extra_str = extra_str + ' groups: ' + groups_str print (u'\t'.join((emailaddr, title, extra_str))).encode(self.__config.encoding, errors='replace') for group in matching_groups: emails = ['%s <%s>' % (c.title, c.emails[0][0]) for c in group.contacts if c.emails] emails = ', '.join(emails) if not emails: continue print (u'%s\t%s (group)' % (emails, group.title)).encode(self.__config.encoding, errors='replace') def query_details(self, query): """ Method for querying the contacts and printing a detailed view. """ out = codecs.getwriter(self.__config.encoding)(sys.stdout, errors='replace') # query contacts matching_contacts = sorted(self.__query_contacts(query), key=lambda c: c.title) # query groups matching_groups = sorted(self.__query_groups(query), key=lambda g: g.title) for group in matching_groups: matching_contacts += group.contacts for contact in matching_contacts: print >> out, "-------------------------" print >> out, contact.title if contact.birthday: print >> out, "Birthday: ", contact.birthday if contact.phonenumbers: print >> out, "Phone:" for (number, kind) in contact.phonenumbers: print >> out, "\t", number, " (" + kind + ")" if contact.emails: print >> out, "EMail:" emailaddrs = sorted(contact.emails) for (emailaddr, kind) in emailaddrs: print >> out, "\t", emailaddr, " (" + kind + ")" if contact.im: print >> out, "IM:" for (nick, protocol) in contact.im: print >> out, "\t", nick, " (", protocol, ")" if contact.addresses: print >> out, "Address:" for (address, kind) in contact.addresses: lines = address.splitlines() lines[0] = '%s ( %s )' % (lines[0], kind) print >> out, "\t" + '\n\t'.join(lines) if contact.groups: print >> out, "Groups:" groups = set(self.cache.get_group(gid).title for gid in contact.groups) groups = groups.difference(('System Group: My Contacts',)) groups_str = '\n\t'.join(groups) print >> out, "\t" + groups_str def __query_contacts(self, query): tokens = [Cache._normalize(s) for s in query.split()] for contact in self.cache.contacts: if self.__config.filter_groupless_contacts and not contact.groups: continue # Skip contacts without groups matching_addrs = [email for (email, full) in itertools.izip(contact.emails, contact.fullemails) if all(token in full for token in tokens)] if matching_addrs: contact.emails = matching_addrs # only show matching yield contact def __query_groups(self, query): match = re.compile(query, re.I).search # create a match function for group in self.cache.groups: # Collect all values to match against all_values = (group.title,) if any(itertools.imap(match, all_values)): group.contacts = list(self.__get_group_contacts(group.id)) yield group def __get_group_contacts(self, group_id): for contact in self.cache.contacts: if group_id in contact.groups: yield contact def add_mail_contact(self, name, mailaddr): entry = ET.Element(ATOM_NS + 'entry') ET.SubElement(entry, ATOM_NS + 'category', scheme='http://schemas.google.com/g/2005#kind', term='http://schemas.google.com/contact/2008#contact') fullname_e = ET.Element(G_NS + 'fullName') fullname_e.text = name ET.SubElement(entry, G_NS + 'name').append(fullname_e) ET.SubElement(entry, G_NS + 'email', rel='http://schemas.google.com/g/2005#other', primary='true', address=mailaddr) group_id = self.cache.get_group_by_title('System Group: My Contacts').id ET.SubElement(entry, GC_NS + 'groupMembershipInfo', deleted='false', href=group_id) if self.__config.default_group: group_id2 = self.cache.get_group_by_title(self.__config.default_group).id ET.SubElement(entry, GC_NS + 'groupMembershipInfo', deleted='false', href=group_id2) gcont = GoogleContacts(self.__config) log.debug('Going to create contact name: %s email: %s' % (name, mailaddr)) gcont.create_contact(entry) log.info('Created contact name: %s email: %s' % (name, mailaddr)) def add_email_from(self, lines): """Add an address from From: field of a mail. This assumes a single mail file is supplied through. Args: lines: A generator of lines, usually a open file. """ parser = email.parser.HeaderParser() headers = parser.parse(lines) if 'From' not in headers: print "Not a valid mail file!" sys.exit(2) (name, mailaddr) = email.utils.parseaddr(headers['From']) if not name: name = mailaddr else: # This decodes headers like "=?iso-8859-1?q?p=F6stal?=" values = email.header.decode_header(name) if len(values) == 0: # Can't this be possible? name = mailaddr else: # There should be only one element anyway (name, encoding) = values[0] if encoding is not None: name = name.decode(encoding) self.add_mail_contact(name, mailaddr) class Cache(object): def __init__(self, config): self.__config = config self.contacts = None # list of Storage self.groups = None # list of Storage def load(self, force_update=False): """Load the cached addressbook feed, or fetch it (again) if it is old or missing or invalid or anyting Args: force_update: force update of cache """ cache = {} # if cache newer than cache_expiry_hours if not force_update and (os.path.exists(self.__config.cache_filename) and ((time.time() - os.path.getmtime(self.__config.cache_filename)) < (int(self.__config.cache_expiry_hours) * 60 * 60))): try: log.debug('Loading cache: ' + self.__config.cache_filename) cache = pickle.load(open(self.__config.cache_filename, 'rb')) if cache.get('goobook_cache') != CACHE_FORMAT_VERSION: log.info('Detected old cache format') cache = None # Old cache format except StandardError, err: log.info('Failed to read the cache file: %s', err) raise if cache: self.contacts = storageify(cache.get('contacts')) self.groups = storageify(cache.get('groups')) else: self.update() if not self.contacts: raise Exception('Failed to find any contacts') # TODO def update(self): log.info('Retrieving contact data from Google.') gc = GoogleContacts(self.__config) self.contacts = list(self._parse_contacts(gc.fetch_contacts())) self.groups = list(self._parse_groups(gc.fetch_contact_groups())) self.save() def save(self): """Pickle the addressbook and a timestamp """ if self.contacts: # never write a empty addressbook cache = {'contacts': unstorageify(self.contacts), 'groups': unstorageify(self.groups), 'goobook_cache': CACHE_FORMAT_VERSION} pickle.dump(cache, open(self.__config.cache_filename, 'wb')) def get_group(self, id_): for group in self.groups: if group.id == id_: return group raise KeyError('Group: ' + id_) def get_group_by_title(self, title): for group in self.groups: if group.title == title: return group raise KeyError('Group: ' + title) @staticmethod def _parse_contact(entry): '''Extracts interesting contact info from cache.''' contact = Storage() # ID contact.id = entry.findtext(ATOM_NS + 'id') # title contact.title = entry.findtext(ATOM_NS + 'title') # nickname contact.nickname = entry.findtext(GC_NS + 'nickname', default='') # emails contact.emails = [] for ent in entry.findall(G_NS + 'email'): label = ent.get('label') or ent.get('rel').split('#')[-1] contact.emails.append((ent.get('address'), label)) # groups contact.groups = [e.get('href') for e in entry.findall(GC_NS + 'groupMembershipInfo') if e.get('deleted') == 'false'] # phone contact.phonenumbers = [] for ent in entry.findall(G_NS + 'phoneNumber'): label = ent.get('label') or ent.get('rel').split('#')[-1] contact.phonenumbers.append((ent.text, label)) # birthday contact.birthday = entry.find(GC_NS + 'birthday').get('when') if entry.findall(GC_NS + 'birthday') else None # address contact.addresses = [] for address in entry.findall(G_NS + 'structuredPostalAddress'): label = address.get('label') or address.get('rel').split('#')[-1] contact.addresses.append((address.findtext(G_NS + 'formattedAddress'), label)) # IM contact.im = [] for ent in entry.findall(G_NS + 'im'): protocol = ent.get('protocol') # Default protocol is GOOGLE_TALK protocol = ent.get('protocol').split('#')[-1] if protocol else "GOOGLE_TALK" contact.im.append((ent.get('address'), protocol)) # email fulltext search contact.fullemails = [Cache._normalize('%s "%s" <%s> (%s)' % (contact.title, contact.nickname, email, kind)) for (email, kind) in contact.emails] log.debug('Parsed contact %s', contact) return contact @staticmethod def _normalize(string): '''Dummy casefold with normalization.''' return ''.join(c for c in unicodedata.normalize('NFKD', unicode(string)) if not unicodedata.combining(c)).lower() @staticmethod def _parse_group(entry): '''Extracts interesting group info from cache.''' group = Storage() group.id = entry.findtext(ATOM_NS + 'id') group.title = entry.findtext(ATOM_NS + 'title') log.debug('Parsed group %s', group) return group def _parse_contacts(self, raw_contacts): for entry in raw_contacts.findall(ATOM_NS + 'entry'): yield self._parse_contact(entry) def _parse_groups(self, raw_groups): for entry in raw_groups.findall(ATOM_NS + 'entry'): yield self._parse_group(entry) class GoogleContacts(object): def __init__(self, config): self.__email = config.email self.__client = self.__get_client(config.password()) def __get_client(self, password): '''Login to Google and return a ContactsClient object. ''' client = gdata.service.GDataService(additional_headers={'GData-Version': GDATA_VERSION}) client.ssl = True # TODO verify that this works # client.debug = True client.ClientLogin(username=self.__email, password=password, service='cp', source='goobook') log.debug('Authenticated client') return client def _get(self, query): res = self.__client.Get(str(query), converter=ET.fromstring) # TODO check not failed return res def _post(self, data, query): '''data is a ElementTree''' data = ET.tostring(data) log.debug('POSTing to: %s\n%s', query, data) res = self.__client.Post(data, str(query)) log.debug('POST returned: %s', res) # res = self.__client.Post(data, str(query), converter=str) # TODO check not failed return res def fetch_contacts(self): query = gdata.service.Query('http://www.google.com/m8/feeds/contacts/default/full') query.max_results = G_MAX_SRESULTS res = self._get(query) return res def fetch_contact_groups(self): query = gdata.service.Query('http://www.google.com/m8/feeds/groups/default/full') query.max_results = G_MAX_SRESULTS res = self._get(query) return res def create_contact(self, entry): query = gdata.service.Query('http://www.google.com/m8/feeds/contacts/default/full') self._post(entry, query)
mh21/goobook
goobook/goobook.py
Python
gpl-3.0
15,990
0.002252
"""Configuration definitions """ import numpy from astropy import units as u from astropy.coordinates import EarthLocation from processing_library.util.coordinate_support import xyz_at_latitude from data_models.memory_data_models import Configuration from data_models.parameters import arl_path, get_parameter from processing_components.simulation.testing_support import log from processing_library.util.coordinate_support import xyz_to_uvw, uvw_to_xyz def create_configuration_from_file(antfile: str, location: EarthLocation = None, mount: str = 'azel', names: str = "%d", diameter=35.0, rmax=None, name='') -> Configuration: """ Define from a file :param names: Antenna names :param antfile: Antenna file name :param location: :param mount: mount type: 'azel', 'xy', 'equatorial' :param diameter: Effective diameter of station or antenna :return: Configuration """ antxyz = numpy.genfromtxt(antfile, delimiter=",") assert antxyz.shape[1] == 3, ("Antenna array has wrong shape %s" % antxyz.shape) latitude = location.geodetic[1].to(u.rad).value antxyz = xyz_at_latitude(antxyz, latitude) antxyz += [location.geocentric[0].to(u.m).value, location.geocentric[1].to(u.m).value, location.geocentric[2].to(u.m).value] nants = antxyz.shape[0] diameters = diameter * numpy.ones(nants) anames = [names % ant for ant in range(nants)] mounts = numpy.repeat(mount, nants) antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, diameter=diameters, name=name) return fc def create_configuration_from_SKAfile(antfile: str, mount: str = 'azel', names: str = "%d", rmax=None, name='', location=None) -> Configuration: """ Define from a file :param names: Antenna names :param antfile: Antenna file name :param location: :param mount: mount type: 'azel', 'xy', 'equatorial' :param diameter: Effective diameter of station or antenna :return: Configuration """ antdiamlonglat = numpy.genfromtxt(antfile, usecols=[0, 1, 2], delimiter="\t") assert antdiamlonglat.shape[1] == 3, ("Antenna array has wrong shape %s" % antdiamlonglat.shape) antxyz = numpy.zeros([antdiamlonglat.shape[0] - 1, 3]) diameters = numpy.zeros([antdiamlonglat.shape[0] - 1]) for ant in range(antdiamlonglat.shape[0] - 1): loc = EarthLocation(lon=antdiamlonglat[ant, 1], lat=antdiamlonglat[ant, 2], height=0.0).geocentric antxyz[ant] = [loc[0].to(u.m).value, loc[1].to(u.m).value, loc[2].to(u.m).value] diameters[ant] = antdiamlonglat[ant, 0] nants = antxyz.shape[0] anames = [names % ant for ant in range(nants)] mounts = numpy.repeat(mount, nants) antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, diameter=diameters, name=name) return fc def create_configuration_from_MIDfile(antfile: str, location=None, mount: str = 'azel', rmax=None, name='') -> Configuration: """ Define from a file :param names: Antenna names :param antfile: Antenna file name :param mount: mount type: 'azel', 'xy' :return: Configuration """ # X Y Z Diam Station # 9.36976 35.48262 1052.99987 13.50 M001 antxyz = numpy.genfromtxt(antfile, skip_header=5, usecols=[0, 1, 2], delimiter=" ") antxyz = xyz_at_latitude(antxyz, location.lat.rad) antxyz += [location.geocentric[0].to(u.m).value, location.geocentric[1].to(u.m).value, location.geocentric[2].to(u.m).value] nants = antxyz.shape[0] assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape anames = numpy.genfromtxt(antfile, dtype='str', skip_header=5, usecols=[4], delimiter=" ") mounts = numpy.repeat(mount, nants) diameters = numpy.genfromtxt(antfile, dtype='str', skip_header=5, usecols=[3], delimiter=" ") antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, diameter=diameters, name=name) return fc def limit_rmax(antxyz, diameters, names, mounts, rmax): """ Select antennas with radius from centre < rmax :param antxyz: :param diameters: :param names: :param mounts: :param rmax: :return: """ if rmax is not None: lantxyz = antxyz - numpy.average(antxyz, axis=0) r = numpy.sqrt(lantxyz[:, 0] ** 2 + lantxyz[:, 1] ** 2 + lantxyz[:, 2] ** 2) antxyz = antxyz[r < rmax] log.debug('create_configuration_from_file: Maximum radius %.1f m includes %d antennas/stations' % (rmax, antxyz.shape[0])) diameters = diameters[r < rmax] names = numpy.array(names)[r < rmax] mounts = numpy.array(mounts)[r<rmax] else: log.debug('create_configuration_from_file: %d antennas/stations' % (antxyz.shape[0])) return antxyz, diameters, names, mounts def create_LOFAR_configuration(antfile: str, location, rmax=1e6) -> Configuration: """ Define from the LOFAR configuration file :param antfile: :return: Configuration """ antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",") nants = antxyz.shape[0] assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",") mounts = numpy.repeat('XY', nants) diameters = numpy.repeat(35.0, nants) antxyz, diameters, mounts, anames = limit_rmax(antxyz, diameters, anames, mounts, rmax) fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz, diameter=diameters, name='LOFAR') return fc def create_named_configuration(name: str = 'LOWBD2', **kwargs) -> Configuration: """ Standard configurations e.g. LOWBD2, MIDBD2 :param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA, ASKAP :param rmax: Maximum distance of station from the average (m) :return: For LOWBD2, setting rmax gives the following number of stations 100.0 13 300.0 94 1000.0 251 3000.0 314 10000.0 398 30000.0 476 100000.0 512 """ if name == 'LOWBD2': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD1': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD1.csv"), location=location, mount='xy', names='LOWBD1_%d', diameter=35.0, name=name, **kwargs) elif name == 'LOWBD2-CORE': location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2-CORE.csv"), location=location, mount='xy', names='LOWBD2_%d', diameter=35.0, name=name, **kwargs) elif (name == 'LOW') or (name == 'LOWR3'): location = EarthLocation(lon="116.76444824", lat="-26.824722084", height=300.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile(antfile=arl_path("data/configurations/ska1low_local.cfg"), mount='xy', name=name, location=location, **kwargs) elif (name == 'MID') or (name == "MIDR5"): location = EarthLocation(lon="21.443803", lat="-30.712925", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_MIDfile(antfile=arl_path("data/configurations/ska1mid_local.cfg"), mount='azel', name=name, location=location, **kwargs) elif name == 'ASKAP': location = EarthLocation(lon="+116.6356824", lat="-26.7013006", height=377.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/A27CR3P6B.in.csv"), mount='equatorial', names='ASKAP_%d', diameter=12.0, name=name, location=location, **kwargs) elif name == 'LOFAR': location = EarthLocation(x=[3826923.9] * u.m, y=[460915.1] * u.m, z=[5064643.2] * u.m) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) assert get_parameter(kwargs, "meta", False) is False fc = create_LOFAR_configuration(antfile=arl_path("data/configurations/LOFAR.csv"), location=location) elif name == 'VLAA': location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) elif name == 'VLAA_north': location = EarthLocation(lon="-107.6184", lat="90.000", height=0.0) log.info("create_named_configuration: %s\n\t%s\n\t%s" % (name, location.geocentric, location.geodetic)) fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"), location=location, mount='azel', names='VLA_%d', diameter=25.0, name=name, **kwargs) else: raise ValueError("No such Configuration %s" % name) return fc
SKA-ScienceDataProcessor/algorithm-reference-library
processing_components/simulation/configurations.py
Python
apache-2.0
11,417
0.006131
from vsg import parser class with_keyword(parser.keyword): ''' unique_id = selected_waveform_assignment : with_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class select_keyword(parser.keyword): ''' unique_id = selected_waveform_assignment : select_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class question_mark(parser.question_mark): ''' unique_id = selected_waveform_assignment : question_mark ''' def __init__(self, sString='?'): parser.question_mark.__init__(self) class target(parser.target): ''' unique_id = selected_waveform_assignment : target ''' def __init__(self, sString): parser.target.__init__(self, sString) class assignment(parser.assignment): ''' unique_id = selected_waveform_assignment : assignment ''' def __init__(self, sString): parser.assignment.__init__(self, sString) class semicolon(parser.semicolon): ''' unique_id = selected_waveform_assignment : semicolon ''' def __init__(self, sString=';'): parser.semicolon.__init__(self)
jeremiah-c-leary/vhdl-style-guide
vsg/token/selected_waveform_assignment.py
Python
gpl-3.0
1,188
0
# # zurllib.py # # This is (hopefully) a drop-in for urllib which will request gzip/deflate # compression and then decompress the output if a compressed response is # received while maintaining the API. # # by Robert Stone 2/22/2003 # extended by Matt Chisholm # from BitTorrent.platform import user_agent import urllib2 OldOpenerDirector = urllib2.OpenerDirector class MyOpenerDirector(OldOpenerDirector): def __init__(self): OldOpenerDirector.__init__(self) server_version = user_agent self.addheaders = [('User-agent', server_version)] urllib2.OpenerDirector = MyOpenerDirector del urllib2 from urllib import * from urllib2 import * from gzip import GzipFile from StringIO import StringIO import pprint DEBUG=0 class HTTPContentEncodingHandler(HTTPHandler): """Inherit and add gzip/deflate/etc support to HTTP gets.""" def http_open(self, req): # add the Accept-Encoding header to the request # support gzip encoding (identity is assumed) req.add_header("Accept-Encoding","gzip") if DEBUG: print "Sending:" print req.headers print "\n" fp = HTTPHandler.http_open(self,req) headers = fp.headers if DEBUG: pprint.pprint(headers.dict) url = fp.url resp = addinfourldecompress(fp, headers, url) if hasattr(fp, 'code'): resp.code = fp.code if hasattr(fp, 'msg'): resp.msg = fp.msg return resp class addinfourldecompress(addinfourl): """Do gzip decompression if necessary. Do addinfourl stuff too.""" def __init__(self, fp, headers, url): # we need to do something more sophisticated here to deal with # multiple values? What about other weird crap like q-values? # basically this only works for the most simplistic case and will # break in some other cases, but for now we only care about making # this work with the BT tracker so.... if headers.has_key('content-encoding') and headers['content-encoding'] == 'gzip': if DEBUG: print "Contents of Content-encoding: " + headers['Content-encoding'] + "\n" self.gzip = 1 self.rawfp = fp fp = GzipStream(fp) else: self.gzip = 0 return addinfourl.__init__(self, fp, headers, url) def close(self): self.fp.close() if self.gzip: self.rawfp.close() def iscompressed(self): return self.gzip class GzipStream(StringIO): """Magically decompress a file object. This is not the most efficient way to do this but GzipFile() wants to seek, etc, which won't work for a stream such as that from a socket. So we copy the whole shebang info a StringIO object, decompress that then let people access the decompressed output as a StringIO object. The disadvantage is memory use and the advantage is random access. Will mess with fixing this later. """ def __init__(self,fp): self.fp = fp # this is nasty and needs to be fixed at some point # copy everything into a StringIO (compressed) compressed = StringIO() r = fp.read() while r: compressed.write(r) r = fp.read() # now, unzip (gz) the StringIO to a string compressed.seek(0,0) gz = GzipFile(fileobj = compressed) str = '' r = gz.read() while r: str += r r = gz.read() # close our utility files compressed.close() gz.close() # init our stringio selves with the string StringIO.__init__(self, str) del str def close(self): self.fp.close() return StringIO.close(self) def test(): """Test this module. At the moment this is lame. """ print "Running unit tests.\n" def printcomp(fp): try: if fp.iscompressed(): print "GET was compressed.\n" else: print "GET was uncompressed.\n" except: print "no iscompressed function! this shouldn't happen" print "Trying to GET a compressed document...\n" fp = urlopen('http://a.scarywater.net/hng/index.shtml') print fp.read() printcomp(fp) fp.close() print "Trying to GET an unknown document...\n" fp = urlopen('http://www.otaku.org/') print fp.read() printcomp(fp) fp.close() # # Install the HTTPContentEncodingHandler that we've defined above. # install_opener(build_opener(HTTPContentEncodingHandler, ProxyHandler({}))) if __name__ == '__main__': test()
rays/ipodderx-core
BitTorrent/zurllib.py
Python
mit
4,720
0.005297
import string # The lexer knows two classes of symbols: # - any number of meta-symbols, which match a single meta-character each. # this argument is a dictionary mapping meta-characters to symbol classes. # # - exactly one terminal symbol, which matches a run of any characters # but ends as soon as a meta-character or a whitespace character # (not escaped with "\") is encountered. # whitespace characters delimit a sequence of multiple terminal symbols. # # this argument is a single symbol class. # # - optional: char_filter (character -> bool) determines if a character is # allowed in a terminal symbol. A character that neither matches a # meta-symbol, nor is whitespace, nor allowed in terminals, will cause the # lexical analysis to fail. # # - optional: with letters=True, each terminal character becomes a terminal # symbol. This has the same effect as adding spaces between every character # of the input. class lexer: def __init__(self, meta, terminal, char_filter=None, letters=False): assert not (set(meta.keys()) & set(string.whitespace)), 'Whitespace characters cannot be meta characters.' self.meta = meta self.terminal = terminal self.filter = char_filter self.letters = letters def lex(self, s): tokens = [] term = '' # The current terminal character run. bs = False # The last character was a backslash. # This space terminates a terminal symbol at the end of the input. s += ' ' for i, c in enumerate(s): # We found an unescaped backslash. if not bs and c == '\\': bs = True # We found an unescaped meta or whitespace character. elif not bs and (c in self.meta or c in string.whitespace): # Append the current terminal symbol (if any): if term: tokens.append(self.terminal(term)) term = '' # Append the meta symbol (if any): if c in self.meta: tokens.append(self.meta[c]()) # found another character, or one escaped by a backslash: elif self.filter == None or self.filter(c): bs = False # Append it to the current terminal symbol: term += c if self.letters: # If terminals are single characters, append it immediately: tokens.append(self.terminal(term)) term = '' else: raise LexError(i, c) return tokens class LexError(ValueError): def __init__(self, i, c): self.i, self.c = i, c def __str__(self): return 'Lexical Error at #{}: {} is not allowed.'.format(self.i, self.c)
cburschka/modod
src/modod/lexer.py
Python
mit
2,843
0.003166
import logging import sys import tkinter as tk from src.app import Labeler LOGGER = logging.getLogger(__name__) # parser = argparse.ArgumentParser(description='Some arguement for path connector') # parser.add_argument('-m', '--max', type=int, default=300, help='maximum frame for displaying path') # parser.add_argument('-t', '--tolerance', type=int, default=38, help='maximum tolerance of distance') # args = vars(parser.parse_args()) def log_handler(*loggers): formatter = logging.Formatter( '%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) # stream handler sh = logging.StreamHandler(sys.stdout) sh.setLevel(logging.INFO) sh.setFormatter(formatter) for logger in loggers: logger.addHandler(sh) logger.setLevel(logging.DEBUG) if __name__ == '__main__': logging.basicConfig( level=logging.INFO, format='%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout ) log_handler(LOGGER) labeler = Labeler() labeler.run()
kai06046/labeler
main.py
Python
apache-2.0
1,067
0.02343
import unittest import socket from time import sleep import subprocess TESTMSG = 'Hello World' MSGLEN = len(TESTMSG) PORT = 3000 class MySocket: """demonstration class only - coded for clarity, not efficiency """ def close(self): self.sock.close() def __init__(self, sock=None): if sock is None: self.sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM) else: self.sock = sock def connect(self, host, port): self.sock.connect((host, port)) def mysend(self, msg): totalsent = 0 while totalsent < MSGLEN: sent = self.sock.send(msg[totalsent:]) if sent == 0: raise RuntimeError("socket connection broken") totalsent = totalsent + sent def myreceive(self): self.sock.setblocking(False) chunks = [] bytes_recd = 0 while bytes_recd < MSGLEN: chunk = self.sock.recv(min(MSGLEN - bytes_recd, 4096)) if chunk == b'': raise RuntimeError("socket connection broken") chunks.append(chunk) bytes_recd = bytes_recd + len(chunk) return b''.join(chunks) class TestBasicNetwork(unittest.TestCase): def setUp(self): self.p = subprocess.Popen(['/bin/bash', '-c', './tests/smtp_server/test_echo/bin/test_smtp_echo '+str(PORT)], stderr=subprocess.DEVNULL, stdout=subprocess.PIPE, universal_newlines=True) sleep(.01) def tearDown(self): self.p.stdout.close() def test_server_rcv(self): s = MySocket() s.connect('localhost', PORT) s.mysend(str.encode(TESTMSG)) sleep(.005) s.close() self.p.kill() self.assertTrue(TESTMSG in self.p.stdout.read()) def test_server_send(self): s = MySocket() s.connect('localhost', PORT) s.mysend(str.encode(TESTMSG)) sleep(.1) msg = s.myreceive() s.close() self.p.kill() self.assertEqual(msg.decode(),TESTMSG) if __name__ == '__main__': unittest.main()
koolkt/mendoza
tests/smtp_server/test_echo/test_echo.py
Python
gpl-3.0
2,153
0.002322
import argparse from phocnet.training.phocnet_trainer import PHOCNetTrainer if __name__ == '__main__': parser = argparse.ArgumentParser() # required training parameters parser.add_argument('--doc_img_dir', action='store', type=str, required=True, help='The location of the document images.') parser.add_argument('--train_annotation_file', action='store', type=str, required=True, help='The file path to the READ-style XML file for the training partition of the dataset to be used.') parser.add_argument('--test_annotation_file', action='store', type=str, required=True, help='The file path to the READ-style XML file for the testing partition of the dataset to be used.') parser.add_argument('--proto_dir', action='store', type=str, required=True, help='Directory where to save the protobuffer files generated during the training.') parser.add_argument('--lmdb_dir', action='store', type=str, required=True, help='Directory where to save the LMDB databases created during training.') # IO parameters parser.add_argument('--save_net_dir', '-snd', action='store', type=str, help='Directory where to save the final PHOCNet. If unspecified, the net is not saved after training') parser.add_argument('--recreate_lmdbs', '-rl', action='store_true', default=False, help='Flag indicating to delete existing LMDBs for this dataset and recompute them.') parser.add_argument('--debug_mode', '-dm', action='store_true', default=False, help='Flag indicating to run the PHOCNet training in debug mode.') # Caffe parameters parser.add_argument('--learning_rate', '-lr', action='store', type=float, default=0.0001, help='The learning rate for SGD training. Default: 0.0001') parser.add_argument('--momentum', '-mom', action='store', type=float, default=0.9, help='The momentum for SGD training. Default: 0.9') parser.add_argument('--step_size', '-ss', action='store', type=int, default=70000, help='The step size at which to reduce the learning rate. Default: 70000') parser.add_argument('--display', action='store', type=int, default=500, help='The number of iterations after which to display the loss values. Default: 500') parser.add_argument('--test_interval', action='store', type=int, default=500, help='The number of iterations after which to periodically evaluate the PHOCNet. Default: 500') parser.add_argument('--max_iter', action='store', type=int, default=80000, help='The maximum number of SGD iterations. Default: 80000') parser.add_argument('--batch_size', '-bs', action='store', type=int, default=10, help='The batch size after which the gradient is computed. Default: 10') parser.add_argument('--weight_decay', '-wd', action='store', type=float, default=0.00005, help='The weight decay for SGD training. Default: 0.00005') parser.add_argument('--gamma', '-gam', action='store', type=float, default=0.1, help='The value with which the learning rate is multiplied after step_size iteraionts. Default: 0.1') parser.add_argument('--gpu_id', '-gpu', action='store', type=int, help='The ID of the GPU to use. If not specified, training is run in CPU mode.') # PHOCNet parameters parser.add_argument('--phoc_unigram_levels', '-pul', action='store', type=lambda x: [int(elem) for elem in x.split(',')], default='2,3,4,5', help='Comma seperated list of PHOC unigram levels to be used. Default: 2,3,4,5') parser.add_argument('--use_bigrams', '-ub', action='store_true', help='Flag indicating to build the PHOC with bigrams') parser.add_argument('--n_train_images', '-nti', action='store', type=int, default=500000, help='The number of images to be generated for the training LMDB. Default: 500000') parser.add_argument('--metric', '-met', action='store', type=str, default='braycurtis', help='The metric with which to compare the PHOCNet predicitions (possible metrics are all scipy metrics). Default: braycurtis') parser.add_argument('--annotation_delimiter', '-ad', action='store', type=str, help='If the annotation in the XML is separated by special delimiters, it can be specified here.') parser.add_argument('--use_lower_case_only', '-ulco', action='store_true', default=False, help='Flag indicating to convert all annotations from the XML to lower case before proceeding') params = vars(parser.parse_args()) PHOCNetTrainer(**params).train_phocnet()
ssudholt/phocnet
tools/train_phocnet.py
Python
bsd-3-clause
4,914
0.014449
# Copyright (C) 2001-2017 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" import random from io import StringIO import struct import dns.exception import dns.rdatatype import dns.rdataclass import dns.rdata import dns.set from ._compat import string_types # define SimpleSet here for backwards compatibility SimpleSet = dns.set.Set class DifferingCovers(dns.exception.DNSException): """An attempt was made to add a DNS SIG/RRSIG whose covered type is not the same as that of the other rdatas in the rdataset.""" class IncompatibleTypes(dns.exception.DNSException): """An attempt was made to add DNS RR data of an incompatible type.""" class Rdataset(dns.set.Set): """A DNS rdataset.""" __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0): """Create a new rdataset of the specified class and type. *rdclass*, an ``int``, the rdataclass. *rdtype*, an ``int``, the rdatatype. *covers*, an ``int``, the covered rdatatype. *ttl*, an ``int``, the TTL. """ super(Rdataset, self).__init__() self.rdclass = rdclass self.rdtype = rdtype self.covers = covers self.ttl = ttl def _clone(self): obj = super(Rdataset, self)._clone() obj.rdclass = self.rdclass obj.rdtype = self.rdtype obj.covers = self.covers obj.ttl = self.ttl return obj def update_ttl(self, ttl): """Perform TTL minimization. Set the TTL of the rdataset to be the lesser of the set's current TTL or the specified TTL. If the set contains no rdatas, set the TTL to the specified TTL. *ttl*, an ``int``. """ if len(self) == 0: self.ttl = ttl elif ttl < self.ttl: self.ttl = ttl def add(self, rd, ttl=None): """Add the specified rdata to the rdataset. If the optional *ttl* parameter is supplied, then ``self.update_ttl(ttl)`` will be called prior to adding the rdata. *rd*, a ``dns.rdata.Rdata``, the rdata *ttl*, an ``int``, the TTL. Raises ``dns.rdataset.IncompatibleTypes`` if the type and class do not match the type and class of the rdataset. Raises ``dns.rdataset.DifferingCovers`` if the type is a signature type and the covered type does not match that of the rdataset. """ # # If we're adding a signature, do some special handling to # check that the signature covers the same type as the # other rdatas in this rdataset. If this is the first rdata # in the set, initialize the covers field. # if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if ttl is not None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd) def union_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).union_update(other) def intersection_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).intersection_update(other) def update(self, other): """Add all rdatas in other to self. *other*, a ``dns.rdataset.Rdataset``, the rdataset from which to update. """ self.update_ttl(other.ttl) super(Rdataset, self).update(other) def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' def __str__(self): return self.to_text() def __eq__(self, other): if not isinstance(other, Rdataset): return False if self.rdclass != other.rdclass or \ self.rdtype != other.rdtype or \ self.covers != other.covers: return False return super(Rdataset, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): """Convert the rdataset into DNS master file format. See ``dns.name.Name.choose_relativity`` for more information on how *origin* and *relativize* determine the way names are emitted. Any additional keyword arguments are passed on to the rdata ``to_text()`` method. *name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with *name* as the owner name. *origin*, a ``dns.name.Name`` or ``None``, the origin for relative names. *relativize*, a ``bool``. If ``True``, names will be relativized to *origin*. """ if name is not None: name = name.choose_relativity(origin, relativize) ntext = str(name) pad = ' ' else: ntext = '' pad = '' s = StringIO() if override_rdclass is not None: rdclass = override_rdclass else: rdclass = self.rdclass if len(self) == 0: # # Empty rdatasets are used for the question section, and in # some dynamic updates, so we don't need to print out the TTL # (which is meaningless anyway). # s.write(u'%s%s%s %s\n' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype))) else: for rd in self: s.write(u'%s%s%d %s %s %s\n' % (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw))) # # We strip off the final \n for the caller's convenience in printing # return s.getvalue()[:-1] def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. *name*, a ``dns.name.Name`` is the owner name to use. *file* is the file where the name is emitted (typically a BytesIO file). *compress*, a ``dict``, is the compression table to use. If ``None`` (the default), names will not be compressed. *origin* is a ``dns.name.Name`` or ``None``. If the name is relative and origin is not ``None``, then *origin* will be appended to it. *override_rdclass*, an ``int``, is used as the class instead of the class of the rdataset. This is useful when rendering rdatasets associated with dynamic updates. *want_shuffle*, a ``bool``. If ``True``, then the order of the Rdatas within the Rdataset will be shuffled before rendering. Returns an ``int``, the number of records emitted. """ if override_rdclass is not None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self) def match(self, rdclass, rdtype, covers): """Returns ``True`` if this rdataset matches the specified class, type, and covers. """ if self.rdclass == rdclass and \ self.rdtype == rdtype and \ self.covers == covers: return True return False def from_text_list(rdclass, rdtype, ttl, text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified list of rdatas in text format. Returns a ``dns.rdataset.Rdataset`` object. """ if isinstance(rdclass, string_types): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, string_types): rdtype = dns.rdatatype.from_text(rdtype) r = Rdataset(rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(rdclass, rdtype, ttl, *text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified rdatas in text format. Returns a ``dns.rdataset.Rdataset`` object. """ return from_text_list(rdclass, rdtype, ttl, text_rdatas) def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. Returns a ``dns.rdataset.Rdataset`` object. """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) r.add(rd) return r def from_rdata(ttl, *rdatas): """Create an rdataset with the specified TTL, and with the specified rdata objects. Returns a ``dns.rdataset.Rdataset`` object. """ return from_rdata_list(ttl, rdatas)
pbaesse/Sissens
lib/python2.7/site-packages/eventlet/support/dns/rdataset.py
Python
gpl-3.0
11,374
0.000176
from __future__ import print_function import six import datetime from halonctl.modapi import Module from halonctl.roles import HTTPStatus class StatusModule(Module): '''Checks node statuses''' def run(self, nodes, args): yield (u"Cluster", u"Name", u"Address", u"Uptime", u"Status") for node, (code, result) in six.iteritems(nodes.service.getUptime()): if code != 200: self.partial = True uptime = datetime.timedelta(seconds=result) if code == 200 else None yield (node.cluster, node, node.host, uptime, HTTPStatus(code)) module = StatusModule()
halonsecurity/halonctl
halonctl/modules/status.py
Python
bsd-3-clause
576
0.027778
#!/usr/bin/env python # -*- coding: utf-8 -*- # # OMERO Grid Processor # Copyright 2008 Glencoe Software, Inc. All Rights Reserved. # Use is subject to license terms supplied in LICENSE.txt # import os import time import signal import logging import traceback import killableprocess as subprocess from path import path import Ice import omero import omero.clients import omero.scripts import omero.util import omero.util.concurrency import omero_ext.uuid as uuid # see ticket:3774 from omero.util import load_dotted_class from omero.util.temp_files import create_path, remove_path from omero.util.decorators import remoted, perf, locked from omero.rtypes import * from omero.util.decorators import remoted, perf, wraps sys = __import__("sys") def with_context(func, context): """ Decorator for invoking Ice methods with a context """ def handler(*args, **kwargs): args = list(args) args.append(context) return func(*args, **kwargs) handler = wraps(func)(handler) return handler class WithGroup(object): """ Wraps a ServiceInterfacePrx instance and applies a "omero.group" to the passed context on every invocation. For example, using a job handle as root requires logging manually into the group. (ticket:2044) """ def __init__(self, service, group_id): self._service = service self._group_id = str(group_id) def _get_ctx(self, group=None): ctx = self._service.ice_getCommunicator()\ .getImplicitContext().getContext() ctx = dict(ctx) ctx["omero.group"] = group return ctx def __getattr__(self, name): if name.startswith("_"): return self.__dict__[name] elif hasattr(self._service, name): method = getattr(self._service, name) ctx = self._get_ctx(self._group_id) return with_context(method, ctx) raise AttributeError( "'%s' object has no attribute '%s'" % (self.service, name)) class ProcessI(omero.grid.Process, omero.util.SimpleServant): """ Wrapper around a subprocess.Popen instance. Returned by ProcessorI when a job is submitted. This implementation uses the given interpreter to call a file that must be named "script" in the generated temporary directory. Call is equivalent to: cd TMP_DIR ICE_CONFIG=./config interpreter ./script >out 2>err & The properties argument is used to generate the ./config file. The params argument may be null in which case this process is being used solely to calculate the parameters for the script ("omero.scripts.parse=true") If iskill is True, then on cleanup, this process will reap the attached session completely. """ def __init__(self, ctx, interpreter, properties, params, iskill=False, Popen=subprocess.Popen, callback_cast=omero.grid.ProcessCallbackPrx.uncheckedCast, omero_home=path.getcwd()): """ Popen and callback_Cast are primarily for testing. """ omero.util.SimpleServant.__init__(self, ctx) self.omero_home = omero_home #: Location for OMERO_HOME/lib/python self.interpreter = interpreter #: Executable which will be used on the script self.properties = properties #: Properties used to create an Ice.Config self.params = params #: JobParams for this script. Possibly None if a ParseJob self.iskill = iskill #: Whether or not, cleanup should kill the session self.Popen = Popen #: Function which should be used for creating processes self.callback_cast = callback_cast #: Function used to cast all ProcessCallback proxies # Non arguments (mutable state) self.rcode = None #: return code from popen self.callbacks = {} #: dictionary from id strings to callback proxies self.popen = None #: process. if None, then this instance isn't alive. self.pid = None #: pid of the process. Once set, isn't nulled. self.started = None #: time the process started self.stopped = None #: time of deactivation self.final_status = None #: status which will be sent on set_job_status # Non arguments (immutable state) self.uuid = properties["omero.user"] #: session this instance is tied to # More fields set by these methods self.make_files() self.make_env() self.make_config() self.logger.info("Created %s in %s" % (self.uuid, self.dir)) # # Initialization methods # def make_env(self): self.env = omero.util.Environment( "CLASSPATH", "DISPLAY", "DYLD_LIBRARY_PATH", "HOME", "JYTHON_HOME", "LD_LIBRARY_PATH", "MLABRAW_CMD_STR", "PATH", "PYTHONPATH", ) # Since we know the location of our OMERO, we're going to # force the value for OMERO_HOME. This is useful in scripts # which want to be able to find their location. self.env.set("OMERO_HOME", self.omero_home) # WORKAROUND # Currently duplicating the logic here as in the PYTHONPATH # setting of the grid application descriptor (see etc/grid/*.xml) # This should actually be taken care of in the descriptor itself # by having setting PYTHONPATH to an absolute value. This is # not currently possible with IceGrid (without using icepatch -- # see 39.17.2 "node.datadir). self.env.append("PYTHONPATH", str(self.omero_home / "lib" / "python")) self.env.set("ICE_CONFIG", str(self.config_path)) # Also actively adding all jars under lib/server to the CLASSPATH lib_server = self.omero_home / "lib" / "server" for jar_file in lib_server.walk("*.jar"): self.env.append("CLASSPATH", str(jar_file)) def make_files(self): self.dir = create_path("process", ".dir", folder=True) self.script_path = self.dir / "script" self.config_path = self.dir / "config" self.stdout_path = self.dir / "out" self.stderr_path = self.dir / "err" def make_config(self): """ Creates the ICE_CONFIG file used by the client. """ config_file = open(str(self.config_path), "w") try: for key in self.properties.iterkeys(): config_file.write("%s=%s\n" % (key, self.properties[key])) finally: config_file.close() def tmp_client(self): """ Create a client for performing cleanup operations. This client should be closed as soon as possible by the process """ try: client = omero.client(["--Ice.Config=%s" % str(self.config_path)]) client.setAgent("OMERO.process") client.createSession().detachOnDestroy() self.logger.debug("client: %s" % client.sf) return client except: self.logger.error("Failed to create client for %s" % self.uuid) return None # # Activation / Deactivation # @locked def activate(self): """ Process creation has to wait until all external downloads, etc are finished. """ if self.isActive(): raise omero.ApiUsageException(None, None, "Already activated") self.stdout = open(str(self.stdout_path), "w") self.stderr = open(str(self.stderr_path), "w") self.popen = self.Popen( self.command(), cwd=str(self.dir), env=self.env(), stdout=self.stdout, stderr=self.stderr) self.pid = self.popen.pid self.started = time.time() self.stopped = None self.status("Activated") def command(self): """ Method to allow subclasses to override the launch behavior by changing the command passed to self.Popen """ return [self.interpreter, "./script"] @locked def deactivate(self): """ Cleans up the temporary directory used by the process, and terminates the Popen process if running. """ if not self.isActive(): raise omero.ApiUsageException(None, None, "Not active") if self.stopped: # Prevent recursion since we are reusing kill & cancel return self.stopped = time.time() d_start = time.time() self.status("Deactivating") # None of these should throw, but just in case try: self.shutdown() # Calls cancel & kill which recall this method! self.popen = None # Now we are finished client = self.tmp_client() try: self.set_job_status(client) self.cleanup_output() self.upload_output(client) # Important! self.cleanup_tmpdir() finally: if client: client.__del__() # Safe closeSession except Exception: self.logger.error( "FAILED TO CLEANUP pid=%s (%s)", self.pid, self.uuid, exc_info=True) d_stop = time.time() elapsed = int(self.stopped - self.started) d_elapsed = int(d_stop - d_start) self.status("Lived %ss. Deactivation took %ss." % (elapsed, d_elapsed)) @locked def isActive(self): """ Tests only if this instance has a non-None popen attribute. After activation this method will return True until the popen itself returns a non-None value (self.rcode) at which time it will be nulled and this method will again return False """ return self.popen is not None @locked def wasActivated(self): """ Returns true only if this instance has either a non-null popen or a non-null rcode field. """ return self.popen is not None or self.rcode is not None @locked def isRunning(self): return self.popen is not None and self.rcode is None @locked def isFinished(self): return self.rcode is not None @locked def alreadyDone(self): """ Allows short-cutting various checks if we already have a rcode for this popen. A non-None return value implies that a process was started and returned the given non-None value itself. """ if not self.wasActivated: raise omero.InternalException( None, None, "Process never activated") return self.isFinished() # # Cleanup methods # def __del__(self): self.cleanup() @perf @locked def check(self): """ Called periodically to keep the session alive. Returns False if this resource can be cleaned up. (Resources API) """ if not self.wasActivated(): return True # This should only happen on startup, so ignore try: self.poll() self.ctx.getSession().getSessionService().getSession(self.uuid) return True except: self.status("Keep alive failed") return False @perf @locked def cleanup(self): """ Deactivates the process (if active) and cleanups the server connection. (Resources API) """ if self.isRunning(): self.deactivate() if not self.iskill: return try: sf = self.ctx.getSession(recreate=False) except: self.logger.debug("Can't get session for cleanup") return self.status("Killing session") svc = sf.getSessionService() obj = omero.model.SessionI() obj.uuid = omero.rtypes.rstring(self.uuid) try: while svc.closeSession(obj) > 0: pass # No action to be taken when iskill == False if # we don't have an actual client to worry with. except: self.logger.error( "Error on session cleanup, kill=%s" % self.iskill, exc_info=True) def cleanup_output(self): """ Flush and close the stderr and stdout streams. """ try: if hasattr(self, "stderr"): self.stderr.flush() self.stderr.close() except: self.logger.error("cleanup of sterr failed", exc_info=True) try: if hasattr(self, "stdout"): self.stdout.flush() self.stdout.close() except: self.logger.error("cleanup of sterr failed", exc_info=True) def set_job_status(self, client): """ Sets the job status """ if not client: self.logger.error( "No client: Cannot set job status for pid=%s (%s)", self.pid, self.uuid) return gid = client.sf.getAdminService().getEventContext().groupId handle = WithGroup(client.sf.createJobHandle(), gid) try: status = self.final_status if status is None: status = (self.rcode == 0 and "Finished" or "Error") handle.attach(long(self.properties["omero.job"])) oldStatus = handle.setStatus(status) self.status( "Changed job status from %s to %s" % (oldStatus, status)) finally: handle.close() def upload_output(self, client): """ If this is not a params calculation (i.e. parms != null) and the stdout or stderr are non-null, they they will be uploaded and attached to the job. """ if not client: self.logger.error( "No client: Cannot upload output for pid=%s (%s)", self.pid, self.uuid) return if self.params: out_format = self.params.stdoutFormat err_format = self.params.stderrFormat else: out_format = "text/plain" err_format = out_format self._upload(client, self.stdout_path, "stdout", out_format) self._upload(client, self.stderr_path, "stderr", err_format) def _upload(self, client, filename, name, format): if not format: return filename = str(filename) # Might be path.path sz = os.path.getsize(filename) if not sz: self.status("No %s" % name) return try: ofile = client.upload(filename, name=name, type=format) jobid = long(client.getProperty("omero.job")) link = omero.model.JobOriginalFileLinkI() if self.params is None: link.parent = omero.model.ParseJobI(rlong(jobid), False) else: link.parent = omero.model.ScriptJobI(rlong(jobid), False) link.child = ofile.proxy() client.getSession().getUpdateService().saveObject(link) self.status( "Uploaded %s bytes of %s to %s" % (sz, filename, ofile.id.val)) except: self.logger.error( "Error on upload of %s for pid=%s (%s)", filename, self.pid, self.uuid, exc_info=True) def cleanup_tmpdir(self): """ Remove all known files and finally the temporary directory. If other files exist, an exception will be raised. """ try: remove_path(self.dir) except: self.logger.error("Failed to remove dir %s" % self.dir, exc_info=True) # # popen methods # def status(self, msg = ""): if self.isRunning(): self.rcode = self.popen.poll() self.logger.info("%s : %s", self, msg) @perf @remoted def poll(self, current = None): """ Checks popen.poll() (if active) and notifies all callbacks if necessary. If this method returns a non-None value, then the process will be marked inactive. """ if self.alreadyDone(): return rint(self.rcode) self.status("Polling") if self.rcode is None: # Haven't finished yet, so do nothing. return None else: self.deactivate() rv = rint(self.rcode) self.allcallbacks("processFinished", self.rcode) return rv @perf @remoted def wait(self, current = None): """ Waits on popen.wait() to return (if active) and notifies all callbacks. Marks this process as inactive. """ if self.alreadyDone(): return self.rcode self.status("Waiting") self.rcode = self.popen.wait() self.deactivate() self.allcallbacks("processFinished", self.rcode) return self.rcode def _term(self): """ Attempts to cancel the process by sending SIGTERM (or similar) """ try: self.status("os.kill(TERM)") os.kill(self.popen.pid, signal.SIGTERM) except AttributeError: self.logger.debug("No os.kill(TERM). Skipping cancel") def _send(self, iskill): """ Helper method for sending signals. This method only makes a call is the process is active. """ if self.isRunning(): try: if self.popen.poll() is None: if iskill: self.status("popen.kill(True)") self.popen.kill(True) else: self._term() else: self.status("Skipped signal") except OSError, oserr: self.logger.debug("err on pid=%s iskill=%s : %s", self.popen.pid, iskill, oserr) @perf @remoted def cancel(self, current = None): """ Tries to cancel popen (if active) and notifies callbacks. """ if self.alreadyDone(): return True self.final_status = "Cancelled" self._send(iskill=False) finished = self.isFinished() if finished: self.deactivate() self.allcallbacks("processCancelled", finished) return finished @perf @remoted def kill(self, current = None): if self.alreadyDone(): return True self.final_status = "Cancelled" self._send(iskill=True) finished = self.isFinished() if finished: self.deactivate() self.allcallbacks("processKilled", finished) return finished @perf @remoted def shutdown(self, current = None): """ If self.popen is active, then first call cancel, wait a period of time, and finally call kill. """ if self.alreadyDone(): return self.status("Shutdown") try: for i in range(5, 0, -1): if self.cancel(): break else: self.logger.warning("Shutdown: %s (%s). Killing in %s seconds.", self.pid, self.uuid, 6*(i-1)+1) self.stop_event.wait(6) self.kill() except: self.logger.error("Shutdown failed: %s (%s)", self.pid, self.uuid, exc_info = True) # # Callbacks # @remoted @locked def registerCallback(self, callback, current = None): try: id = callback.ice_getIdentity() key = "%s/%s" % (id.category, id.name) callback = callback.ice_oneway() callback = self.callback_cast(callback) if not callback: e = "Callback is invalid" else: self.callbacks[key] = callback self.logger.debug("Added callback: %s", key) return except Exception, ex: e = ex # Only reached on failure msg = "Failed to add callback: %s. Reason: %s" % (callback, e) self.logger.debug(msg) raise omero.ApiUsageException(None, None, msg) @remoted @locked def unregisterCallback(self, callback, current = None): try: id = callback.ice_getIdentity() key = "%s/%s" % (id.category, id.name) if not key in self.callback: raise omero.ApiUsageException(None, None, "No callback registered with id: %s" % key) del self.callbacks[key] self.logger.debug("Removed callback: %s", key) except Exception, e: msg = "Failed to remove callback: %s. Reason: %s" % (callback, e) self.logger.debug(msg) raise omero.ApiUsageException(None, None, msg) @locked def allcallbacks(self, method, arg): self.status("Callback %s" % method) for key, cb in self.callbacks.items(): try: m = getattr(cb, method) m(arg) except Ice.LocalException, e: self.logger.debug("LocalException calling callback %s on pid=%s (%s)" % (key, self.pid, self.uuid), exc_info = False) except: self.logger.error("Error calling callback %s on pid=%s (%s)" % (key, self.pid, self.uuid), exc_info = True) def __str__(self): return "<proc:%s,rc=%s,uuid=%s>" % (self.pid, (self.rcode is None and "-" or self.rcode), self.uuid) class MATLABProcessI(ProcessI): def make_files(self): """ Modify the script_path field from ProcessI.make_files in ordert to append a ".m" """ ProcessI.make_files(self) self.script_path = self.dir / "script.m" def command(self): """ Overrides ProcessI to call MATLAB idiosyncratically. """ matlab_cmd = [ self.interpreter, "-nosplash", "-nodisplay", "-nodesktop", "-r", "try, cd('%s'); script; catch, exit(1); end, exit(0)" % self.dir ] return matlab_cmd class UseSessionHolder(object): def __init__(self, sf): self.sf = sf def check(self): try: self.sf.keepAlive(None) return True except: return False def cleanup(self): pass class ProcessorI(omero.grid.Processor, omero.util.Servant): def __init__(self, ctx, needs_session = True, use_session = None, accepts_list = None, cfg = None, omero_home = path.getcwd(), category = None): if accepts_list is None: accepts_list = [] self.category = category #: Category to be used w/ ProcessI self.omero_home = omero_home # Extensions for user-mode processors (ticket:1672) self.use_session = use_session """ If set, this session will be returned from internal_session and the "needs_session" setting ignored. """ if self.use_session: needs_session = False self.accepts_list = accepts_list """ A list of contexts which will be accepted by this user-mode processor. """ omero.util.Servant.__init__(self, ctx, needs_session = needs_session) if cfg is None: self.cfg = os.path.join(omero_home, "etc", "ice.config") self.cfg = os.path.abspath(self.cfg) else: self.cfg = cfg # Keep this session alive until the processor is finished self.resources.add( UseSessionHolder(use_session) ) def setProxy(self, prx): """ Overrides the default action in order to register this proxy with the session's sharedResources to register for callbacks. The on_newsession handler will also keep new sessions informed. See ticket:2304 """ omero.util.Servant.setProxy(self, prx) session = self.internal_session() self.register_session(session) # Keep other session informed self.ctx.on_newsession = self.register_session def user_client(self, agent): """ Creates an omero.client instance for use by users. """ args = ["--Ice.Config=%s" % (self.cfg)] rtr = self.internal_session().ice_getRouter() if rtr: args.insert(0, "--Ice.Default.Router=%s" % rtr) # FIXME : How do we find an internal router? client = omero.client(args) client.setAgent(agent) return client def internal_session(self): """ Returns the session which should be used for lookups by this instance. Some methods will create a session based on the session parameter. In these cases, the session will belong to the user who is running a script. """ if self.use_session: return self.use_session else: return self.ctx.getSession() def register_session(self, session): self.logger.info("Registering processor %s", self.prx) prx = omero.grid.ProcessorPrx.uncheckedCast(self.prx) session.sharedResources().addProcessor(prx) def lookup(self, job): sf = self.internal_session() gid = job.details.group.id.val handle = WithGroup(sf.createJobHandle(), gid) try: handle.attach(job.id.val) if handle.jobFinished(): handle.close() raise omero.ApiUsageException("Job already finished.") prx = WithGroup(sf.getScriptService(), gid) file = prx.validateScript(job, self.accepts_list) except omero.SecurityViolation, sv: self.logger.debug("SecurityViolation on validate job %s from group %s", job.id.val, gid) file = None return file, handle @remoted def willAccept(self, userContext, groupContext, scriptContext, cb, current = None): userID = None if userContext != None: userID = userContext.id.val groupID = None if groupContext != None: groupID = groupContext.id.val scriptID = None if scriptContext != None: scriptID = scriptContext.id.val if scriptID: try: file, handle = self.lookup(scriptContext) handle.close() valid = (file is not None) except: self.logger.error("File lookup failed: user=%s, group=%s, script=%s",\ userID, groupID, scriptID, exc_info=1) return # EARlY EXIT ! else: valid = False for x in self.accepts_list: if isinstance(x, omero.model.Experimenter) and x.id.val == userID: valid = True elif isinstance(x, omero.model.ExperimenterGroup) and x.id.val == groupID: valid = True self.logger.debug("Accepts called on: user:%s group:%s scriptjob:%s - Valid: %s", userID, groupID, scriptID, valid) try: id = self.internal_session().ice_getIdentity().name cb = cb.ice_oneway() cb = omero.grid.ProcessorCallbackPrx.uncheckedCast(cb) cb.isAccepted(valid, id, str(self.prx)) except Exception, e: self.logger.warn("callback failed on willAccept: %s Exception:%s", cb, e) return valid @remoted def requestRunning(self, cb, current = None): try: cb = cb.ice_oneway() cb = omero.grid.ProcessorCallbackPrx.uncheckedCast(cb) servants = list(self.ctx.servant_map.values()) rv = [] for x in servants: try: rv.append(long(x.properties["omero.job"])) except: pass cb.responseRunning(rv) except Exception, e: self.logger.warn("callback failed on requestRunning: %s Exception:%s", cb, e) @remoted def parseJob(self, session, job, current = None): self.logger.info("parseJob: Session = %s, JobId = %s" % (session, job.id.val)) client = self.user_client("OMERO.parseJob") try: iskill = False client.joinSession(session).detachOnDestroy() properties = {} properties["omero.scripts.parse"] = "true" prx, process = self.process(client, session, job, current, None, properties, iskill) process.wait() rv = client.getOutput("omero.scripts.parse") if rv != None: return rv.val else: self.logger.warning("No output found for omero.scripts.parse. Keys: %s" % client.getOutputKeys()) return None finally: client.closeSession() del client @remoted def processJob(self, session, params, job, current = None): """ """ self.logger.info("processJob: Session = %s, JobId = %s" % (session, job.id.val)) client = self.user_client("OMERO.processJob") try: client.joinSession(session).detachOnDestroy() prx, process = self.process(client, session, job, current, params, iskill = True) return prx finally: client.closeSession() del client @perf def process(self, client, session, job, current, params, properties = None, iskill = True): """ session: session uuid, used primarily if client is None client: an omero.client object which should be attached to a session """ if properties is None: properties = {} if not session or not job or not job.id: raise omero.ApiUsageException("No null arguments") file, handle = self.lookup(job) try: if not file: raise omero.ApiUsageException(\ None, None, "Job should have one executable file attached.") sf = self.internal_session() if params: self.logger.debug("Checking params for job %s" % job.id.val) svc = sf.getSessionService() inputs = svc.getInputs(session) errors = omero.scripts.validate_inputs(params, inputs, svc, session) if errors: errors = "Invalid parameters:\n%s" % errors raise omero.ValidationException(None, None, errors) properties["omero.job"] = str(job.id.val) properties["omero.user"] = session properties["omero.pass"] = session properties["Ice.Default.Router"] = client.getProperty("Ice.Default.Router") launcher, ProcessClass = self.find_launcher(current) process = ProcessClass(self.ctx, launcher, properties, params, iskill, omero_home = self.omero_home) self.resources.add(process) # client.download(file, str(process.script_path)) scriptText = sf.getScriptService().getScriptText(file.id.val) process.script_path.write_bytes(scriptText) self.logger.info("Downloaded file: %s" % file.id.val) s = client.sha1(str(process.script_path)) if not s == file.hash.val: msg = "Sha1s don't match! expected %s, found %s" % (file.hash.val, s) self.logger.error(msg) process.cleanup() raise omero.InternalException(None, None, msg) else: process.activate() handle.setStatus("Running") id = None if self.category: id = Ice.Identity() id.name = "Process-%s" % uuid.uuid4() id.category = self.category prx = self.ctx.add_servant(current, process, ice_identity=id) return omero.grid.ProcessPrx.uncheckedCast(prx), process finally: handle.close() def find_launcher(self, current): launcher = "" process_class = "" if current.ctx: launcher = current.ctx.get("omero.launcher", "") process_class = current.ctx.get("omero.process", "omero.process.ProcessI") if not launcher: launcher = sys.executable self.logger.info("Using launcher: %s", launcher) self.logger.info("Using process: %s", process_class) # Imports in omero.util don't work well for this class # Handling classes from this module specially. internal = False parts = process_class.split(".") if len(parts) == 3: if parts[0:2] == ("omero", "processor"): internal = True if not process_class: ProcessClass = ProcessI elif internal: ProcessClass = globals()[parts[-1]] else: ProcessClass = load_dotted_class(process_class) return launcher, ProcessClass def usermode_processor(client, serverid = "UsermodeProcessor",\ cfg = None, accepts_list = None, stop_event = None,\ omero_home = path.getcwd()): """ Creates and activates a usermode processor for the given client. It is the responsibility of the client to call "cleanup()" on the ProcessorI implementation which is returned. cfg is the path to an --Ice.Config-valid file or files. If none is given, the value of ICE_CONFIG will be taken from the environment if available. Otherwise, all properties will be taken from the client instance. accepts_list is the list of IObject instances which will be passed to omero.api.IScripts.validateScript. If none is given, only the current Experimenter's own object will be passed. stop_event is an threading.Event. One will be acquired from omero.util.concurrency.get_event if none is provided. """ if cfg is None: cfg = os.environ.get("ICE_CONFIG") if accepts_list is None: uid = client.sf.getAdminService().getEventContext().userId accepts_list = [omero.model.ExperimenterI(uid, False)] if stop_event is None: stop_event = omero.util.concurrency.get_event(name="UsermodeProcessor") id = Ice.Identity() id.name = "%s-%s" % (serverid, uuid.uuid4()) id.category = client.getCategory() ctx = omero.util.ServerContext(serverid, client.ic, stop_event) impl = omero.processor.ProcessorI(ctx, use_session=client.sf, accepts_list=accepts_list, cfg=cfg, omero_home = omero_home, category=id.category) ctx.add_servant(client.adapter, impl, ice_identity=id) return impl
mtbc/openmicroscopy
components/tools/OmeroPy/src/omero/processor.py
Python
gpl-2.0
35,039
0.004053
from .settings import DEFAULT_MEDIA_FILTERS from django.utils.encoding import smart_str from hashlib import sha1 from mediagenerator.settings import MEDIA_DEV_MODE from mediagenerator.utils import load_backend, find_file, read_text_file import os import time class Filter(object): takes_input = True def __init__(self, **kwargs): self.file_filter = FileFilter self.config(kwargs, filetype=None, filter=None, bundle=None, _from_default=None) # We assume that if this is e.g. a 'js' backend then all input must # also be 'js'. Subclasses must override this if they expect a special # input file type. Also, subclasses have to check if their file type # is supported. self.input_filetype = self.filetype if self.takes_input: self.config(kwargs, input=()) if not isinstance(self.input, (tuple, list)): self.input = (self.input,) self._input_filters = None assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys()) @classmethod def from_default(cls, name): return {'input': name} def should_use_default_filter(self, ext): return ext != self._from_default def get_variations(self): """ Returns all possible variations that get generated by this filter. The result must be a dict whose values are tuples. """ return {} def get_output(self, variation): """ Yields content for each output item for the given variation. """ raise NotImplementedError() def get_dev_output(self, name, variation): """ Returns content for the given file name and variation in development mode. """ index, child = name.split('/', 1) index = int(index) filter = self.get_input_filters()[index] return filter.get_dev_output(child, variation) def get_dev_output_names(self, variation): """ Yields file names for the given variation in development mode. """ # By default we simply return our input filters' file names for index, filter in enumerate(self.get_input_filters()): for name, hash in filter.get_dev_output_names(variation): yield '%d/%s' % (index, name), hash def get_input(self, variation): """Yields contents for each input item.""" for filter in self.get_input_filters(): for input in filter.get_output(variation): yield input def get_input_filters(self): """Returns a Filter instance for each input item.""" if not self.takes_input: raise ValueError("The %s media filter doesn't take any input" % self.__class__.__name__) if self._input_filters is not None: return self._input_filters self._input_filters = [] for input in self.input: if isinstance(input, dict): filter = self.get_filter(input) else: filter = self.get_item(input) self._input_filters.append(filter) return self._input_filters def get_filter(self, config): backend_class = load_backend(config.get('filter')) return backend_class(filetype=self.input_filetype, bundle=self.bundle, **config) def get_item(self, name): ext = os.path.splitext(name)[1].lstrip('.') backend_classes = [] if ext in DEFAULT_MEDIA_FILTERS and self.should_use_default_filter(ext): ext_class = DEFAULT_MEDIA_FILTERS[ext] if isinstance(ext_class, basestring): backend_classes.append(load_backend(DEFAULT_MEDIA_FILTERS[ext])) elif isinstance(ext_class, tuple): backend_classes.append(FilterPipe) for pipe_entry in ext_class: backend_classes.append(load_backend(pipe_entry)) else: backend_classes.append(self.file_filter) backends = [] for backend_class in backend_classes: config = backend_class.from_default(name) config.setdefault('filter', '%s.%s' % (backend_class.__module__, backend_class.__name__)) config.setdefault('filetype', self.input_filetype) config['bundle'] = self.bundle # This is added to make really sure we don't instantiate the same # filter in an endless loop. Normally, the child class should # take care of this in should_use_default_filter(). config.setdefault('_from_default', ext) backends.append(backend_class(**config)) backend = backends.pop(0) for pipe_entry in backends: backend.grow_pipe(pipe_entry) return backend def _get_variations_with_input(self): """Utility function to get variations including input variations""" variations = self.get_variations() if not self.takes_input: return variations for filter in self.get_input_filters(): subvariations = filter._get_variations_with_input() for k, v in subvariations.items(): if k in variations and v != variations[k]: raise ValueError('Conflicting variations for "%s": %r != %r' % ( k, v, variations[k])) variations.update(subvariations) return variations def config(self, init, **defaults): for key in defaults: setattr(self, key, init.pop(key, defaults[key])) class FileFilter(Filter): """A filter that just returns the given file.""" takes_input = False def __init__(self, **kwargs): self.config(kwargs, name=None) self.mtime = self.hash = None super(FileFilter, self).__init__(**kwargs) @classmethod def from_default(cls, name): return {'name': name} def get_output(self, variation): yield self.get_dev_output(self.name, variation) def get_dev_output(self, name, variation): self.name = name return read_text_file(self._get_path()) def get_last_modified(self): path = find_file(self.name) return path and os.path.getmtime(path) def get_dev_output_names(self, variation): mtime = self.get_last_modified() # In dev mode, where a lot of requests # we can reduce proc time of filters # making hash = mtime of source file # instead of sha1(filtered_content) if MEDIA_DEV_MODE: hash = str(mtime) elif mtime != self.mtime: output = self.get_dev_output(self.name, variation) hash = sha1(smart_str(output)).hexdigest() else: hash = self.hash yield self.name, hash def _get_path(self): path = find_file(self.name) assert path, """File name "%s" doesn't exist.""" % self.name return path class RawFileFilter(FileFilter): takes_input = False def __init__(self, **kwargs): self.config(kwargs, path=None) super(RawFileFilter, self).__init__(**kwargs) def get_dev_output(self, name, variation): assert name == self.name, ( '''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")''' % (name, self.name)) return read_text_file(self.path) def get_dev_output_names(self, variation): mtime = os.path.getmtime(self.path) if mtime != self.mtime: output = self.get_dev_output(self.name, variation) hash = sha1(smart_str(output)).hexdigest() else: hash = self.hash yield self.name, hash class FilterPipe(FileFilter): def __init__(self, **kwargs): super(FilterPipe, self).__init__(**kwargs) self.pipe = [] def grow_pipe(self, pipe_entry): self.pipe.append(pipe_entry) def get_dev_output(self, name, variation): output = super(FilterPipe, self).get_dev_output(name, variation) for filter in self.pipe: output = filter.get_dev_output(name, variation, content=output) return output def get_last_modified(self): lmod = 0 for entry in self.pipe: entry_lm = entry.get_last_modified() if not entry_lm: return time.time() if entry_lm > lmod: lmod = entry_lm return lmod
Crop-R/django-mediagenerator
mediagenerator/generators/bundles/base.py
Python
bsd-3-clause
8,535
0.001875
from __future__ import print_function from contextlib import contextmanager from distutils.util import convert_path import os from shutil import copyfile import sys import textwrap from setuptools import setup, Command from setuptools.command.develop import develop as develop_cmd from setuptools.command.build_py import build_py # Returns the package and all its sub-packages def find_package_tree(root_path, root_package): root_path = root_path.replace('/', os.path.sep) packages = [root_package] root_count = len(root_path.split(os.path.sep)) for (dir_path, dir_names, file_names) in os.walk(convert_path(root_path)): # Prune dir_names *in-place* to prevent unwanted directory recursion for dir_name in list(dir_names): contains_init_file = os.path.isfile(os.path.join(dir_path, dir_name, '__init__.py')) if not contains_init_file: dir_names.remove(dir_name) # Exclude compiled PyKE rules, but keep associated unit tests. if dir_name == 'compiled_krb' and 'tests' not in dir_path: dir_names.remove(dir_name) if dir_names: prefix = dir_path.split(os.path.sep)[root_count:] packages.extend(['.'.join([root_package] + prefix + [dir_name]) for dir_name in dir_names]) return packages def file_walk_relative(top, remove=''): """ Returns a generator of files from the top of the tree, removing the given prefix from the root/file result. """ top = top.replace('/', os.path.sep) remove = remove.replace('/', os.path.sep) for root, dirs, files in os.walk(top): for file in files: yield os.path.join(root, file).replace(remove, '') @contextmanager def temporary_path(directory): """ Context manager that adds and subsequently removes the given directory to sys.path """ sys.path.insert(0, directory) try: yield finally: del sys.path[0] # Add full path so Python doesn't load any __init__.py in the intervening # directories, thereby saving setup.py from additional dependencies. with temporary_path('lib/iris/tests/runner'): from _runner import TestRunner # noqa: SETUP_DIR = os.path.dirname(__file__) def pip_requirements(name): fname = os.path.join(SETUP_DIR, 'requirements', '{}.txt'.format(name)) if not os.path.exists(fname): raise RuntimeError('Unable to find the {} requirements file at {}' ''.format(name, fname)) reqs = [] with open(fname, 'r') as fh: for line in fh: line = line.strip() if not line or line.startswith('#'): continue reqs.append(line) return reqs class SetupTestRunner(TestRunner, Command): pass class BaseCommand(Command): """A valid no-op command for setuptools & distutils.""" description = 'A no-op command.' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): pass class CleanSource(BaseCommand): description = 'clean orphaned pyc/pyo files from the source directory' def run(self): for root_path, dir_names, file_names in os.walk('lib'): for file_name in file_names: if file_name.endswith('pyc') or file_name.endswith('pyo'): compiled_path = os.path.join(root_path, file_name) source_path = compiled_path[:-1] if not os.path.exists(source_path): print('Cleaning', compiled_path) os.remove(compiled_path) def compile_pyke_rules(cmd, directory): # Call out to the python executable to pre-compile the Pyke rules. # Significant effort was put in to trying to get these to compile # within this build process but there was no obvious way of finding # a workaround to the issue presented in # https://github.com/SciTools/iris/issues/2481. shelled_code = textwrap.dedent("""\ import os # Monkey patch the load method to avoid "ModuleNotFoundError: No module # named 'iris.fileformats._pyke_rules.compiled_krb'". In this instance # we simply don't want the knowledge engine, so we turn the load method # into a no-op. from pyke.target_pkg import target_pkg target_pkg.load = lambda *args, **kwargs: None # Compile the rules by hand, without importing iris. That way we can # avoid the need for all of iris' dependencies being installed. os.chdir(os.path.join('{bld_dir}', 'iris', 'fileformats', '_pyke_rules')) # Import pyke *after* changing directory. Without this we get the compiled # rules in the wrong place. Identified in # https://github.com/SciTools/iris/pull/2891#issuecomment-341404187 from pyke import knowledge_engine knowledge_engine.engine('') """.format(bld_dir=directory)).split('\n') shelled_code = '; '.join( [line for line in shelled_code if not line.strip().startswith('#') and line.strip()]) args = [sys.executable, '-c', shelled_code] cmd.spawn(args) def copy_copyright(cmd, directory): # Copy the COPYRIGHT information into the package root iris_build_dir = os.path.join(directory, 'iris') for fname in ['COPYING', 'COPYING.LESSER']: copyfile(fname, os.path.join(iris_build_dir, fname)) def build_std_names(cmd, directory): # Call out to tools/generate_std_names.py to build std_names module. script_path = os.path.join('tools', 'generate_std_names.py') xml_path = os.path.join('etc', 'cf-standard-name-table.xml') module_path = os.path.join(directory, 'iris', 'std_names.py') args = (sys.executable, script_path, xml_path, module_path) cmd.spawn(args) def custom_cmd(command_to_override, functions, help_doc=""): """ Allows command specialisation to include calls to the given functions. """ class ExtendedCommand(command_to_override): description = help_doc or command_to_override.description def run(self): # Run the original command first to make sure all the target # directories are in place. command_to_override.run(self) # build_lib is defined if we are building the package. Otherwise # we want to to the work in-place. dest = getattr(self, 'build_lib', None) if dest is None: print(' [Running in-place]') # Pick the source dir instead (currently in the sub-dir "lib") dest = 'lib' for func in functions: func(self, dest) return ExtendedCommand def extract_version(): version = None fnme = os.path.join(SETUP_DIR, 'lib', 'iris', '__init__.py') with open(fnme) as fd: for line in fd: if (line.startswith('__version__')): _, version = line.split('=') version = version.strip()[1:-1] # Remove quotation characters break return version custom_commands = { 'test': SetupTestRunner, 'develop': custom_cmd( develop_cmd, [build_std_names, compile_pyke_rules]), 'build_py': custom_cmd( build_py, [build_std_names, compile_pyke_rules, copy_copyright]), 'std_names': custom_cmd(BaseCommand, [build_std_names], help_doc="generate CF standard name module"), 'pyke_rules': custom_cmd(BaseCommand, [compile_pyke_rules], help_doc="compile CF-NetCDF loader rules"), 'clean_source': CleanSource, } pypi_name = 'scitools-iris' with open(os.path.join(SETUP_DIR, 'README.md'), 'r') as fh: description = ''.join(fh.readlines()) setup( name=pypi_name, version=extract_version(), url='http://scitools.org.uk/iris/', author='UK Met Office', author_email='scitools-iris-dev@googlegroups.com', description="A powerful, format-agnostic, community-driven Python " "library for analysing and visualising Earth science data", long_description=description, long_description_content_type='text/markdown', packages=find_package_tree('lib/iris', 'iris'), package_dir={'': 'lib'}, include_package_data=True, cmdclass=custom_commands, zip_safe=False, setup_requires=pip_requirements('setup'), install_requires=pip_requirements('setup') + pip_requirements('core'), tests_require=['{}[test]'.format(pypi_name)], extras_require = { 'test': pip_requirements('test'), 'all': pip_requirements('all'), 'extensions': pip_requirements('extensions'), }, )
dkillick/iris
setup.py
Python
lgpl-3.0
8,908
0.000449
import numpy as np from pygmin.potentials.potential import BasePotential class WhamPotential(BasePotential): """ ############################################################################# # the idea behind this minimization procedure is as follows: ############################################################################# from a simulation at temperature T you find the probability of finding energy E is P(E,T). We know this can be compared to the density of states n(E) as P(E,T) = n(E) exp(-E/T) the density of states is independent of temperature, so we can use it to find P(E) at any other temperature, or Z(T), etc. But our estimate of n(E) from one temperature is not very good. So we combine P(E,T) multiple simulations at different temperatures to get a better estimate of n(E). The true density of states, n_T(E) is the weighted average of n_i(E) at all temperatures T_i n_F(E) = sum_i w_i*n_i(E) = sum_i w_i*P(E,T_i)*exp(E/T_i) where w_i are unknown. The best estimate for n_F(E) will be when the equality is satisfied as much as possible term by term. Define exp(R) the deviation from the term-by-term agreement R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i)*exp(E/T_i) ) we want to make each R(E,T_i) as small as possible. Define an "energy" function CHI2 = sum_E sum_i P(E,T_i)*|R(E,T_i)|^2 Where each R(E,T_i) contributes weight proportional to P(E,T_i) to the sum to make sure those with better statistics are more heavily weighted. To solve the problem we find the set of {n_F(E), w_i} which minimize CHI2 """ def __init__(self, logP, reduced_energy): """ To make it fit withing existing minimization schemes, we need to view it as a linear problem nrep: the number of replica variables, i.e. len(w_i) nbins: the number of bins in the histogram, e.g. len(n_F(E)) logP: = log(P(E,T_i)) a.k.a. log(visits) a 2d array of shape( nreps, nbins). reduced_energy: E/T_i a 2d array of shape( nreps, nbins) giving the reduced energy of each bin note: this works perfectly well for 2d histograms as well. In this case the 2d histograms should be linearized warning: many bins will be completely unoccupied for all replicas. This means there will be a lot of extra work done trying to minimize irrelevant variables. This is fine, just make sure logP == 0 for all bins with zero visits, not logP = -inf """ self.nreps, self.nbins = logP.shape self.logP = logP self.weight = self.logP + reduced_energy #self.reduced_energy = reduced_energy if ( np.isinf(self.logP).any() or np.isnan(self.logP).any() ): print "logP is NaN or infinite" exit(1) if False: #see how much effort is wasted nallzero = len( np.where(np.abs(self.logP.sum(1)) < 1e-10 )[0]) print "WHAM: number of degrees of freedom", self.nreps + self.nbins print "WHAM: number of irrelevant d.o.f. ", nallzero def getEnergy(self, X): """ X: is the array of unknowns of length nrep + nbins X[0:nrep] = {w_i} : the replica unknowns X[nrep:] = {log(n_F(E))} : the bin unknowns R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i)*exp(E/T_i) ) energy = sum_E sum_i P(E,T_i)*|R(E,T_i)|^2 """ wi = X[:self.nreps] lognF = X[self.nreps:] """ energy = 0. for irep in range(self.nreps): for ibin in range(self.nbins): R = lognF[ibin] - wi[irep] -( logP[irep, ibin] + reduced_energy[irep, ibin]) energy += logP[irep, ibin] * R**2 """ energy = np.sum( self.logP * (lognF[np.newaxis,:] - wi[:,np.newaxis] - self.weight)**2 ) return energy def getEnergyGradient(self, X): """ X: is the array of unknowns of length nrep + nbins X[0:nrep] = {w_i} : the replica unknowns X[nrep:] = {log(n_F(E))} : the bin unknowns R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i)*exp(E/T_i) ) energy = sum_E sum_i P(E,T_i)*|R(E,T_i)|^2 """ wi = X[:self.nreps] lognF = X[self.nreps:] R = lognF[np.newaxis,:] - wi[:,np.newaxis] - self.weight energy = np.sum( self.logP * (R)**2 ) gradient = np.zeros(len(X)) gradient[:self.nreps] = -2. * np.sum( self.logP * R, axis=1 ) gradient[self.nreps:] = 2. * np.sum( self.logP * R, axis=0 ) #print np.shape(gradient) #print gradient return energy, gradient
js850/PyGMIN
pygmin/wham/wham_potential.py
Python
gpl-3.0
4,820
0.011618
from __future__ import unicode_literals from django.utils.six.moves.urllib.parse import parse_qs, urlsplit from reviewboard.hostingsvcs.utils.paginator import (APIPaginator, InvalidPageError, ProxyPaginator) from reviewboard.testing import TestCase class DummyAPIPaginator(APIPaginator): start_query_param = 'start' per_page_query_param = 'per-page' def fetch_url(self, url): return { 'data': [1, 2, 3], 'headers': {}, } class APIPaginatorTests(TestCase): """Tests for APIPaginator.""" def test_construct_initial_load(self): """Testing APIPaginator construction performs initial load""" paginator = DummyAPIPaginator(None, 'http://example.com', start=10) self.assertEqual(paginator.page_data, [1, 2, 3]) def test_construct_with_start(self): """Testing APIPaginator construction with start=<value>""" url = 'http://example.com/api/list/?foo=1' paginator = DummyAPIPaginator(None, url, start=10) parts = urlsplit(paginator.url) query_params = parse_qs(parts[3]) self.assertEqual(query_params['foo'], ['1']) self.assertEqual(query_params['start'], ['10']) def test_construct_with_per_page(self): """Testing APIPaginator construction with per_page=<value>""" url = 'http://example.com/api/list/?foo=1' paginator = DummyAPIPaginator(None, url, per_page=10) parts = urlsplit(paginator.url) query_params = parse_qs(parts[3]) self.assertEqual(query_params['foo'], ['1']) self.assertEqual(query_params['per-page'], ['10']) def test_extract_page_info(self): """Testing APIPaginator page information extraction""" class PageInfoAPIPaginator(APIPaginator): def fetch_url(self, url): return { 'data': ['a', 'b', 'c'], 'headers': { 'Foo': 'Bar', }, 'per_page': 10, 'total_count': 100, 'prev_url': 'http://example.com/?page=1', 'next_url': 'http://example.com/?page=3', } paginator = PageInfoAPIPaginator(None, 'http://example.com/') self.assertEqual(paginator.page_data, ['a', 'b', 'c']) self.assertEqual(paginator.page_headers['Foo'], 'Bar') self.assertEqual(paginator.per_page, 10) self.assertEqual(paginator.total_count, 100) self.assertEqual(paginator.prev_url, 'http://example.com/?page=1') self.assertEqual(paginator.next_url, 'http://example.com/?page=3') def test_prev(self): """Testing APIPaginator.prev""" prev_url = 'http://example.com/?page=1' paginator = DummyAPIPaginator(None, 'http://example.com') paginator.prev_url = prev_url self.assertTrue(paginator.has_prev) self.assertFalse(paginator.has_next) data = paginator.prev() self.assertEqual(data, [1, 2, 3]) self.assertEqual(paginator.url, prev_url) def test_prev_without_prev_page(self): """Testing APIPaginator.prev without a previous page""" paginator = DummyAPIPaginator(None, 'http://example.com') url = paginator.url self.assertFalse(paginator.has_prev) self.assertRaises(InvalidPageError, paginator.prev) self.assertEqual(paginator.url, url) def test_next(self): """Testing APIPaginator.next""" next_url = 'http://example.com/?page=3' paginator = DummyAPIPaginator(None, 'http://example.com') paginator.next_url = next_url self.assertFalse(paginator.has_prev) self.assertTrue(paginator.has_next) data = paginator.next() self.assertEqual(data, [1, 2, 3]) self.assertEqual(paginator.url, next_url) def test_next_without_next_page(self): """Testing APIPaginator.next without a next page""" paginator = DummyAPIPaginator(None, 'http://example.com') url = paginator.url self.assertFalse(paginator.has_next) self.assertRaises(InvalidPageError, paginator.next) self.assertEqual(paginator.url, url) class ProxyPaginatorTests(TestCase): """Tests for ProxyPaginator.""" def setUp(self): self.paginator = DummyAPIPaginator(None, 'http://example.com') self.proxy = ProxyPaginator(self.paginator) def test_has_prev(self): """Testing ProxyPaginator.has_prev""" self.assertFalse(self.proxy.has_prev) self.paginator.prev_url = 'http://example.com/?start=1' self.assertTrue(self.proxy.has_prev) def test_has_next(self): """Testing ProxyPaginator.has_next""" self.assertFalse(self.proxy.has_next) self.paginator.next_url = 'http://example.com/?start=2' self.assertTrue(self.proxy.has_next) def test_per_page(self): """Testing ProxyPaginator.per_page""" self.paginator.per_page = 10 self.assertEqual(self.proxy.per_page, 10) def test_total_count(self): """Testing ProxyPaginator.total_count""" self.paginator.total_count = 100 self.assertEqual(self.proxy.total_count, 100) def test_prev(self): """Testing ProxyPaginator.prev""" prev_url = 'http://example.com/?page=1' self.paginator.prev_url = prev_url self.assertTrue(self.proxy.has_prev) self.assertFalse(self.proxy.has_next) data = self.proxy.prev() self.assertEqual(data, [1, 2, 3]) self.assertEqual(self.paginator.url, prev_url) def test_next(self): """Testing ProxyPaginator.next""" next_url = 'http://example.com/?page=3' self.paginator.next_url = next_url self.assertFalse(self.proxy.has_prev) self.assertTrue(self.proxy.has_next) data = self.proxy.next() self.assertEqual(data, [1, 2, 3]) self.assertEqual(self.paginator.url, next_url) def test_normalize_page_data(self): """Testing ProxyPaginator.normalize_page_data""" proxy = ProxyPaginator( self.paginator, normalize_page_data_func=lambda data: list(reversed(data))) self.assertEqual(proxy.page_data, [3, 2, 1]) def test_normalize_page_data_on_prev(self): """Testing ProxyPaginator.normalize_page_data on prev""" proxy = ProxyPaginator( self.paginator, normalize_page_data_func=lambda data: list(reversed(data))) self.paginator.prev_url = 'http://example.com/?page=1' data = proxy.prev() self.assertEqual(data, [3, 2, 1]) def test_normalize_page_data_on_next(self): """Testing ProxyPaginator.normalize_page_data on next""" proxy = ProxyPaginator( self.paginator, normalize_page_data_func=lambda data: list(reversed(data))) self.paginator.next_url = 'http://example.com/?page=3' data = proxy.next() self.assertEqual(data, [3, 2, 1])
KnowNo/reviewboard
reviewboard/hostingsvcs/utils/tests.py
Python
mit
7,168
0
""" Tests for Game scripts. """ import sys from twisted.trial.unittest import TestCase from twisted.python.failure import Failure from twisted.internet.defer import Deferred from twisted.python import log from twisted.internet import reactor from game.ui import UI from game.scripts.network_client import NetworkClient class DoubleLogModule(object): """ A thing which looks like some of the L{twisted.python.log} module. @ivar messages: A list of arguments passed to L{msg}. @ivar errors: A list of arguments passed to L{err}. @ivar logFiles: A list of arguments passed to L{startLogging}. """ def __init__(self): self.messages = [] self.errors = [] self.logFiles = [] self.msg = self.messages.append self.startLogging = self.logFiles.append def err(self, reason, message=None): self.errors.append((reason, message)) class DoubleReactor(object): """ A thing which has a L{run} method. @ivar runs: The number of times L{run} has been called. @ivar stops: The number of times L{stop} has been called. """ def __init__(self): self.runs = 0 self.stops = 0 def run(self): """ Record an attempt to run the reactor. """ self.runs += 1 def stop(self): """ Record an attempt to stop the reactor. """ self.stops += 1 class DoubleUI(object): """ A thing which looks like L{game.ui.UI}. @ivar starts: Mapping of tuples of host and port to L{Deferred}s. """ def __init__(self): self.starts = {} def start(self, (hostname, port)): """ Record an attempt to start the game @return: A L{Deferred}. """ d = Deferred() self.starts[hostname, port] = d return d class NetworkClientTest(TestCase): """ Tests for the simple network-client script. """ def setUp(self): """ Create a double logger, a double reactor, and a double UI, and a L{NetworkClient}. """ self.logger = DoubleLogModule() self.reactor = DoubleReactor() self.ui = DoubleUI() self.networkClient = NetworkClient( log=self.logger, reactor=self.reactor, uiFactory=lambda: self.ui) def test_main(self): """ There should be a method which takes command line arguments and runs the network client in an appropriate environment. """ host = 'example.com' port = 1337 self.networkClient.main(['network-client.py', host, str(port)]) self.assertEqual(self.logger.logFiles, [sys.stdout]) self.assertEqual(self.reactor.runs, 1) self.assertEqual(self.ui.starts.keys(), [(host, port)]) def test_logErrors(self): """ If the deferred returned from L{UI.start} should error back, the error should be logged. """ self.networkClient.run('host', 123) e = Exception("OH NOES!") self.ui.starts[('host', 123)].errback(e) self.assertEqual(len(self.logger.errors), 1) error, message = self.logger.errors[0] self.assertIdentical(error.value, e) self.assertEquals(message, "Problem running UI") def test_defaults(self): """ L{NetworkClient} should not require arguments to instantiate, and should have reasonable defaults. """ client = NetworkClient() self.assertIdentical(client.log, log) self.assertIdentical(client.reactor, reactor) self.assertIdentical(client.uiFactory, UI) def test_stop(self): """ When the L{Deferred} returned by the UI's C{start} method fires, L{NetworkClient} stops the reactor. """ self.networkClient.run('host', 123) self.assertEquals(self.reactor.stops, 0) self.ui.starts[('host', 123)].callback(None) self.assertEquals(self.reactor.stops, 1) def test_stopOnError(self): """ If the L{Deferred} returned by the UI's C{start} method fires with a L{Failure}, the failure is logged and L{NetworkClient} stops the reactor. """ self.networkClient.run('host', 123) self.ui.starts[('host', 123)].errback(Failure(RuntimeError("oops"))) self.assertEquals(self.reactor.stops, 1)
eriknelson/gam3
game/test/test_script.py
Python
mit
4,405
0.002724
from tests.package.test_python import TestPythonPackageBase class TestPythonPy2Can(TestPythonPackageBase): __test__ = True config = TestPythonPackageBase.config + \ """ BR2_PACKAGE_PYTHON=y BR2_PACKAGE_PYTHON_CAN=y """ sample_scripts = ["tests/package/sample_python_can.py"] timeout = 40 class TestPythonPy3Can(TestPythonPackageBase): __test__ = True config = TestPythonPackageBase.config + \ """ BR2_PACKAGE_PYTHON3=y BR2_PACKAGE_PYTHON_CAN=y """ sample_scripts = ["tests/package/sample_python_can.py"] timeout = 40
masahir0y/buildroot-yamada
support/testing/tests/package/test_python_can.py
Python
gpl-2.0
617
0
# -*- coding: utf-8 -*- import htmls from django import test from cradmin_legacy import datetimeutils from model_bakery import baker from devilry.devilry_cradmin import devilry_listbuilder class TestItemValue(test.TestCase): def test_title(self): testassignment = baker.make('core.Assignment', long_name='Test Assignment') selector = htmls.S(devilry_listbuilder.assignment.ItemValue(value=testassignment).render()) self.assertEqual( 'Test Assignment', selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized) def test_description(self): testassignment = baker.make('core.Assignment', publishing_time=datetimeutils.default_timezone_datetime(2016, 12, 11), first_deadline=datetimeutils.default_timezone_datetime(2016, 12, 24, 18, 0)) selector = htmls.S(devilry_listbuilder.assignment.ItemValue(value=testassignment).render()) self.assertEqual( 'First deadline: Saturday December 24, 2016, 18:00, ' 'Publishing time: Sunday December 11, 2016, 00:00', selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
devilry/devilry-django
devilry/devilry_cradmin/tests/test_devilry_listbuilder/test_assignment.py
Python
bsd-3-clause
1,305
0.005364
import os import sys from recommonmark.parser import CommonMarkParser sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'askomics')) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon'] master_doc = 'index' # The suffix of source filenames. source_suffix = ['.rst', '.md'] source_parsers = { '.md': CommonMarkParser, } def run_apidoc(_): from sphinx.apidoc import main parent_folder = os.path.join(os.path.dirname(__file__), '..') cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(parent_folder) module = os.path.join(parent_folder, 'askomics') output_path = os.path.join(cur_dir, 'api') main(['-e', '-f', '-o', output_path, module]) def setup(app): app.connect('builder-inited', run_apidoc)
askomics/askomics
docs/conf.py
Python
agpl-3.0
778
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('sistema', '0015_aporte'), ] operations = [ migrations.RenameField( model_name='aporte', old_name='total', new_name='interes', ), migrations.RenameField( model_name='servicio', old_name='cextraordinaria', new_name='importe', ), migrations.RemoveField( model_name='servicio', name='csocial', ), migrations.RemoveField( model_name='servicio', name='cterreno', ), migrations.RemoveField( model_name='servicio', name='curbanizacion', ), migrations.RemoveField( model_name='servicio', name='gadministrativos', ), migrations.RemoveField( model_name='servicio', name='interes', ), migrations.RemoveField( model_name='servicio', name='otros', ), migrations.AddField( model_name='servicio', name='nombre', field=models.CharField(default=datetime.datetime(2016, 10, 24, 14, 47, 46, 843455, tzinfo=utc), max_length=100), preserve_default=False, ), migrations.AlterField( model_name='aporte', name='num_factura', field=models.PositiveIntegerField(unique=True), ), migrations.AlterField( model_name='servicio', name='descripcion', field=models.CharField(help_text=b'Descripci\xc3\xb3n del servicio.', max_length=100, verbose_name=b'Descripci\xc3\xb3n'), ), ]
gabrielf10/webAmpunc
sistema/migrations/0016_auto_20161024_1447.py
Python
bsd-3-clause
1,903
0.001051
# # ovirt-engine-setup -- ovirt engine setup # Copyright (C) 2013-2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Misc plugin.""" import gettext import logging import os from otopi import constants as otopicons from otopi import plugin, util from ovirt_engine_setup import constants as osetupcons from ovirt_setup_lib import dialog def _(m): return gettext.dgettext(message=m, domain='ovirt-engine-setup') @util.export class Plugin(plugin.PluginBase): """Misc plugin.""" def __init__(self, context): super(Plugin, self).__init__(context=context) @plugin.event( stage=plugin.Stages.STAGE_BOOT, before=( otopicons.Stages.CORE_CONFIG_INIT, ), priority=plugin.Stages.PRIORITY_HIGH - 20, ) def _preinit(self): self.environment.setdefault( otopicons.CoreEnv.CONFIG_FILE_NAME, self.resolveFile( os.environ.get( otopicons.SystemEnvironment.CONFIG, self.resolveFile( osetupcons.FileLocations.OVIRT_OVIRT_SETUP_CONFIG_FILE ) ) ) ) logging.getLogger('ovirt').setLevel(logging.DEBUG) @plugin.event( stage=plugin.Stages.STAGE_INIT, priority=plugin.Stages.PRIORITY_HIGH, ) def _init(self): self.environment.setdefault( osetupcons.CoreEnv.GENERATED_BY_VERSION, None ) self.environment[ osetupcons.CoreEnv.ORIGINAL_GENERATED_BY_VERSION ] = self.environment[ osetupcons.CoreEnv.GENERATED_BY_VERSION ] self.environment[ osetupcons.CoreEnv.GENERATED_BY_VERSION ] = osetupcons.Const.PACKAGE_VERSION self.environment.setdefault( osetupcons.CoreEnv.DEVELOPER_MODE, None ) self.environment.setdefault( osetupcons.CoreEnv.UPGRADE_SUPPORTED_VERSIONS, '3.0,3.1,3.2,3.3,3.4,3.5,3.6,4.0' ) self.logger.debug( 'Package: %s-%s (%s)', osetupcons.Const.PACKAGE_NAME, osetupcons.Const.PACKAGE_VERSION, osetupcons.Const.DISPLAY_VERSION, ) self.environment[ osetupcons.CoreEnv.SETUP_ATTRS_MODULES ] = [osetupcons] if self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] is None: self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] = False if os.geteuid() != 0: if not dialog.queryBoolean( dialog=self.dialog, name='OVESETUP_SYSTEM_UNPRIVILEGED', note=_( 'Setup was run under unprivileged user ' 'this will produce development installation ' 'do you wish to proceed? (@VALUES@) [@DEFAULT@]: ' ), prompt=True, default=False, ): raise RuntimeError(_('Aborted by user')) self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] = True if ( not self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] and os.geteuid() != 0 ): raise RuntimeError( _('Running as non root and not in development mode') ) @plugin.event( stage=plugin.Stages.STAGE_CLEANUP, ) def _cleanup(self): self.dialog.note( text=_('Log file is located at {path}').format( path=self.environment[ otopicons.CoreEnv.LOG_FILE_NAME ], ), ) @plugin.event( stage=plugin.Stages.STAGE_TERMINATE, ) def _terminate(self): if self.environment[otopicons.BaseEnv.ERROR]: self.logger.error( _('Execution of {action} failed').format( action=self.environment[ osetupcons.CoreEnv.ACTION ], ), ) else: self.logger.info( _('Execution of {action} completed successfully').format( action=self.environment[ osetupcons.CoreEnv.ACTION ], ), ) # vim: expandtab tabstop=4 shiftwidth=4
OpenUniversity/ovirt-engine
packaging/setup/plugins/ovirt-engine-common/base/core/misc.py
Python
apache-2.0
4,950
0
# Copyright ClusterHQ Inc. See LICENSE file for details. """ Client for the Flocker REST API. This may eventually be a standalone package. """ from ._client import ( IFlockerAPIV1Client, FakeFlockerClient, Dataset, DatasetState, DatasetAlreadyExists, ) __all__ = ["IFlockerAPIV1Client", "FakeFlockerClient", "Dataset", "DatasetState", "DatasetAlreadyExists"]
adamtheturtle/flocker
flocker/apiclient/__init__.py
Python
apache-2.0
383
0
# structureModifyPowerRechargeRate # # Used by: # Structure Modules from group: Structure Capacitor Power Relay (2 of 2) type = "passive" def handler(fit, module, context): fit.ship.multiplyItemAttr("rechargeRate", module.getModifiedItemAttr("capacitorRechargeRateMultiplier"))
bsmr-eve/Pyfa
eos/effects/structuremodifypowerrechargerate.py
Python
gpl-3.0
284
0.003521
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CTC (Connectionist Temporal Classification) Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_ctc_ops from tensorflow.python.ops.nn_grad import _BroadcastMul from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access, invalid-name @tf_export("nn.ctc_loss") def ctc_loss(labels, inputs, sequence_length, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, time_major=True): """Computes the CTC (Connectionist Temporal Classification) Loss. This op implements the CTC loss as presented in the article: [A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber. Connectionist Temporal Classification: Labeling Unsegmented Sequence Data with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf) Input requirements: ``` sequence_length(b) <= time for all b max(labels.indices(labels.indices[:, 1] == b, 2)) <= sequence_length(b) for all b. ``` Notes: This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM. The `inputs` Tensor's innermost dimension size, `num_classes`, represents `num_labels + 1` classes, where num_labels is the number of true labels, and the largest value `(num_classes - 1)` is reserved for the blank label. For example, for a vocabulary containing 3 labels `[a, b, c]`, `num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`. Regarding the arguments `preprocess_collapse_repeated` and `ctc_merge_repeated`: If `preprocess_collapse_repeated` is True, then a preprocessing step runs before loss calculation, wherein repeated labels passed to the loss are merged into single labels. This is useful if the training labels come from, e.g., forced alignments and therefore have unnecessary repetitions. If `ctc_merge_repeated` is set False, then deep within the CTC calculation, repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified (non-standard) version of CTC. Here is a table of the (roughly) expected first order behavior: * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True` Classical CTC behavior: Outputs true repeated classes with blanks in between, and can also output repeated classes with no blanks in between that need to be collapsed by the decoder. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False` Never learns to output repeated classes, as they are collapsed in the input labels before training. * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False` Outputs repeated classes with blanks in between, but generally does not require the decoder to collapse/merge repeated classes. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True` Untested. Very likely will not learn to output repeated classes. The `ignore_longer_outputs_than_inputs` option allows to specify the behavior of the CTCLoss when dealing with sequences that have longer outputs than inputs. If true, the CTCLoss will simply return zero gradient for those items, otherwise an InvalidArgument error is returned, stopping training. Args: labels: An `int32` `SparseTensor`. `labels.indices[i, :] == [b, t]` means `labels.values[i]` stores the id for (batch b, time t). `labels.values[i]` must take on values in `[0, num_labels)`. See `core/ops/ctc_ops.cc` for more details. inputs: 3-D `float` `Tensor`. If time_major == False, this will be a `Tensor` shaped: `[batch_size, max_time, num_classes]`. If time_major == True (default), this will be a `Tensor` shaped: `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector, size `[batch_size]`. The sequence lengths. preprocess_collapse_repeated: Boolean. Default: False. If True, repeated labels are collapsed prior to the CTC calculation. ctc_merge_repeated: Boolean. Default: True. ignore_longer_outputs_than_inputs: Boolean. Default: False. If True, sequences with longer outputs than inputs will be ignored. time_major: The shape format of the `inputs` Tensors. If True, these `Tensors` must be shaped `[max_time, batch_size, num_classes]`. If False, these `Tensors` must be shaped `[batch_size, max_time, num_classes]`. Using `time_major = True` (default) is a bit more efficient because it avoids transposes at the beginning of the ctc_loss calculation. However, most TensorFlow data is batch-major, so by this function also accepts inputs in batch-major form. Returns: A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities. Raises: TypeError: if labels is not a `SparseTensor`. """ # The second, third, etc output tensors contain the gradients. We use it in # _CTCLossGrad() below. if not isinstance(labels, sparse_tensor.SparseTensor): raise TypeError("Expected labels (first argument) to be a SparseTensor") # For internal calculations, we transpose to [time, batch, num_classes] if not time_major: inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N) loss, _ = gen_ctc_ops.ctc_loss( inputs, labels.indices, labels.values, sequence_length, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) return loss # pylint: disable=unused-argument @ops.RegisterGradient("CTCLoss") def _CTCLossGrad(op, grad_loss, _): """The derivative provided by CTC Loss. Args: op: the CTCLoss op. grad_loss: The backprop for cost. Returns: The CTC Loss gradient. """ # Outputs are: loss, grad # # Currently there is no way to take the second derivative of this op # due to the fused implementation's interaction with tf.gradients(), # so we make sure we prevent silently incorrect results by raising # an error if the second derivative is requested via prevent_gradient. grad_without_gradient = array_ops.prevent_gradient( op.outputs[1], message="Currently there is no way to take the second " " derivative of ctc_loss due to the fused implementation's interaction " " with tf.gradients()") # Return gradient for inputs and None for # labels_indices, labels_values and sequence_length return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None] @tf_export("nn.ctc_greedy_decoder") def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True): """Performs greedy decoding on the logits given in input (best path). Note: Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank index `(num_classes - 1)`, no new element is emitted. If `merge_repeated` is `True`, merge repeated classes in output. This means that if consecutive logits' maximum indices are the same, only the first of these is emitted. The sequence `A B B * B * B` (where '*' is the blank label) becomes * `A B B B` if `merge_repeated=True`. * `A B B B B` if `merge_repeated=False`. Args: inputs: 3-D `float` `Tensor` sized `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, neg_sum_logits)` where decoded: A single-element list. `decoded[0]` is an `SparseTensor` containing the decoded outputs s.t.: `decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`. The rows store: `[batch, time]`. `decoded.values`: Values vector, size `(total_decoded_outputs)`. The vector stores the decoded classes. `decoded.dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length]` neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the sequence found, the negative of the sum of the greatest logit at each timeframe. """ outputs = gen_ctc_ops.ctc_greedy_decoder( inputs, sequence_length, merge_repeated=merge_repeated) (decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)], log_probabilities) @tf_export("nn.ctc_beam_search_decoder") def ctc_beam_search_decoder(inputs, sequence_length, beam_width=100, top_paths=1, merge_repeated=True): """Performs beam search decoding on the logits given in input. **Note** The `ctc_greedy_decoder` is a special case of the `ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but that decoder is faster for this special case). If `merge_repeated` is `True`, merge repeated classes in the output beams. This means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the sequence is `A B B * B * B` (where '*' is the blank label), the return value is: * `A B` if `merge_repeated = True`. * `A B B B` if `merge_repeated = False`. Args: inputs: 3-D `float` `Tensor`, size `[max_time x batch_size x num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, log_probabilities)` where decoded: A list of length top_paths, where `decoded[j]` is a `SparseTensor` containing the decoded outputs: `decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)` The rows store: [batch, time]. `decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`. The vector stores the decoded classes for beam j. `decoded[j].dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `float` matrix `(batch_size x top_paths)` containing sequence log-probabilities. """ decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = ( gen_ctc_ops.ctc_beam_search_decoder( inputs, sequence_length, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated)) return ( [sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(decoded_ixs, decoded_vals, decoded_shapes)], log_probabilities) ops.NotDifferentiable("CTCGreedyDecoder") ops.NotDifferentiable("CTCBeamSearchDecoder")
kobejean/tensorflow
tensorflow/python/ops/ctc_ops.py
Python
apache-2.0
11,926
0.00218
"""tests/test_output_format.py. Tests the output format handlers included with Hug Copyright (C) 2015 Timothy Edmund Crosley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import pytest import hug from collections import namedtuple from datetime import datetime def test_text(): '''Ensure that it's possible to output a Hug API method as text''' hug.output_format.text("Hello World!") == "Hello World!" hug.output_format.text(str(1)) == "1" def test_json(): '''Ensure that it's possible to output a Hug API method as JSON''' now = datetime.now() test_data = {'text': 'text', 'datetime': now, 'bytes': b'bytes'} output = hug.output_format.json(test_data).decode('utf8') assert 'text' in output assert 'bytes' in output assert now.isoformat() in output class NewObject(object): pass test_data['non_serializable'] = NewObject() with pytest.raises(TypeError): hug.output_format.json(test_data).decode('utf8') class NamedTupleObject(namedtuple('BaseTuple', ('name', 'value'))): pass data = NamedTupleObject('name', 'value') converted = hug.input_format.json(hug.output_format.json(data).decode('utf8')) assert converted == {'name': 'name', 'value': 'value'} def test_pretty_json(): '''Ensure that it's possible to output a Hug API method as prettified and indented JSON''' test_data = {'text': 'text'} assert hug.output_format.pretty_json(test_data).decode('utf8') == ('{\n' ' "text": "text"\n' '}') def test_json_camelcase(): '''Ensure that it's possible to output a Hug API method as camelCased JSON''' test_data = {'under_score': {'values_can': 'Be Converted'}} output = hug.output_format.json_camelcase(test_data).decode('utf8') assert 'underScore' in output assert 'valuesCan' in output assert 'Be Converted' in output
janusnic/hug
tests/test_output_format.py
Python
mit
2,987
0.004687
# -*- coding: utf-8 -*- from __future__ import print_function from .compat import user_input def correct(call_a, call_b): """ Informs the user that the order is correct. :param call_a: first call number - not used at this time but could be important in later versions. :param call_b: second call number - not used at this time but could be important in later versions. """ print("_________________________________") print("| |") print("| Correct |") print("| |") print("|________________________________|") def incorrect(call_a, call_b): """ Informs the user that the order is incorrect and provides the call numbers so the user can do a manual check. :param call_a: first call number :param call_b: second call number """ print("_________________________________") print("| |") print("| Incorrect |") print("| |") print("|________________________________|") print(call_a) print(call_b) print() def get_next_callnumber(barcode_dict): """ Prompts the user for a barcode and returns the appropriate call number. If the user inputs a barcode that is not in the dictionary, the user is prompted again. :param barcode_dict: dictionary of barcodes and call numbers :returns: call number that matches user input barcode """ barcode = user_input("Barcode >>> ") while barcode.lower() != 'exit': call_number = barcode_dict.get(barcode) if call_number is not None: return call_number else: print("Barcode does not have an associated call number") barcode = raw_input("Barcode >>> ") exit()
asmacdo/shelf-reader
shelf_reader/ui.py
Python
gpl-2.0
1,924
0.00052
from salesking.tests.base import SalesKingBaseTestCase class MockCollectionContactResponse(object): def __init__(self): self.status_code = 200 self.content = u''' {"contacts": [{"contact":{"id":"a55-akQyir5ld2abxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1286","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-01-31T20:49:11+01:00","updated_at":"2015-01-31T20:49:11+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg", "addresses": [{"address":{"id":"a56cekQyir5ld2abxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-01-31T20:49:11+01:00","updated_at":"2015-01-31T20:49:11+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}} ],"team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-01-31","sales_potential":null,"probability":null,"expected_revenue":null }, "links":[{"rel":"self","href":"contacts/a55-akQyir5ld2abxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/a55-akQyir5ld2abxfpGMl"}, {"rel":"update","href":"contacts/a55-akQyir5ld2abxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/a55-akQyir5ld2abxfpGMl/documents"}, {"rel":"attachments","href":"contacts/a55-akQyir5ld2abxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/a55-akQyir5ld2abxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/a55-akQyir5ld2abxfpGMl/estimates"}, {"rel":"orders","href":"contacts/a55-akQyir5ld2abxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/a55-akQyir5ld2abxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/a55-akQyir5ld2abxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/a55-akQyir5ld2abxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/a55-akQyir5ld2abxfpGMl/comments"}, {"rel":"emails","href":"contacts/a55-akQyir5ld2abxfpGMl/emails"}, {"rel":"emails create","href":"contacts/a55-akQyir5ld2abxfpGMl/emails"}] }, {"contact":{"id":"a6N570lb8r4yBvabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-01012-911","organisation":"Funky Music Times","last_name":"Darwin","first_name":"George","gender":"male","notes":null,"position":null,"title":null,"tax_number":null,"vat_number":null,"email":"","url":null,"birthday":null,"tag_list":"!example","created_at":"2011-12-21T23:00:43+01:00","updated_at":"2012-02-02T20:06:01+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":1,"cash_discount":null,"due_days":null,"address_field":"Funky Music Times\nHerr George Darwin\n71 Brushfield St\nE1 6 Greater London", "addresses": [{"address":{"id":"a6N4dclb8r4yBvabxfpGMl","city":"Greater London","address1":"71 Brushfield St","address2":null,"pobox":"","zip":"E1 6","state":null,"country":null,"created_at":"2011-12-21T23:00:43+01:00","updated_at":"2011-12-21T23:00:43+01:00","address_type":null,"order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":null,"sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/a6N570lb8r4yBvabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/a6N570lb8r4yBvabxfpGMl"}, {"rel":"update","href":"contacts/a6N570lb8r4yBvabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/a6N570lb8r4yBvabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/a6N570lb8r4yBvabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/a6N570lb8r4yBvabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/a6N570lb8r4yBvabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/a6N570lb8r4yBvabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/a6N570lb8r4yBvabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/a6N570lb8r4yBvabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/a6N570lb8r4yBvabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/a6N570lb8r4yBvabxfpGMl/comments"}, {"rel":"emails","href":"contacts/a6N570lb8r4yBvabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/a6N570lb8r4yBvabxfpGMl/emails"}]}, {"contact":{"id":"aBJabEQFyr5lXDabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1292","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-02-01T10:38:41+01:00","updated_at":"2015-02-01T10:38:41+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aBJecGQFyr5lXDabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-02-01T10:38:41+01:00","updated_at":"2015-02-01T10:38:41+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-02-01","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aBJabEQFyr5lXDabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aBJabEQFyr5lXDabxfpGMl"}, {"rel":"update","href":"contacts/aBJabEQFyr5lXDabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aBJabEQFyr5lXDabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aBJabEQFyr5lXDabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aBJabEQFyr5lXDabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aBJabEQFyr5lXDabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aBJabEQFyr5lXDabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aBJabEQFyr5lXDabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aBJabEQFyr5lXDabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aBJabEQFyr5lXDabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aBJabEQFyr5lXDabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aBJabEQFyr5lXDabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aBJabEQFyr5lXDabxfpGMl/emails"}]}, {"contact":{"id":"aG92TWQJOr5kgsabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1324","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-02-01T18:45:36+01:00","updated_at":"2015-02-01T18:45:36+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aG96MmQJOr5kgsabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-02-01T18:45:36+01:00","updated_at":"2015-02-01T18:45:36+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-02-01","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aG92TWQJOr5kgsabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aG92TWQJOr5kgsabxfpGMl"}, {"rel":"update","href":"contacts/aG92TWQJOr5kgsabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aG92TWQJOr5kgsabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aG92TWQJOr5kgsabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aG92TWQJOr5kgsabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aG92TWQJOr5kgsabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aG92TWQJOr5kgsabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aG92TWQJOr5kgsabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aG92TWQJOr5kgsabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aG92TWQJOr5kgsabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aG92TWQJOr5kgsabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aG92TWQJOr5kgsabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aG92TWQJOr5kgsabxfpGMl/emails"}]}, {"contact":{"id":"aG_NccQxWr5i6tabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1284","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-01-31T20:05:32+01:00","updated_at":"2015-01-31T20:05:32+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aG_Rg-QxWr5i6tabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-01-31T20:05:32+01:00","updated_at":"2015-01-31T20:05:32+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-01-31","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aG_NccQxWr5i6tabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aG_NccQxWr5i6tabxfpGMl"}, {"rel":"update","href":"contacts/aG_NccQxWr5i6tabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aG_NccQxWr5i6tabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aG_NccQxWr5i6tabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aG_NccQxWr5i6tabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aG_NccQxWr5i6tabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aG_NccQxWr5i6tabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aG_NccQxWr5i6tabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aG_NccQxWr5i6tabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aG_NccQxWr5i6tabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aG_NccQxWr5i6tabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aG_NccQxWr5i6tabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aG_NccQxWr5i6tabxfpGMl/emails"}]}, {"contact":{"id":"aH8DLsP_Sr5kddabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1257","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-01-29T22:09:37+01:00","updated_at":"2015-01-29T22:09:37+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aH8I34P_Sr5kddabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-01-29T22:09:37+01:00","updated_at":"2015-01-29T22:09:37+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-01-29","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aH8DLsP_Sr5kddabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aH8DLsP_Sr5kddabxfpGMl"}, {"rel":"update","href":"contacts/aH8DLsP_Sr5kddabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aH8DLsP_Sr5kddabxfpGMl/emails"}]}, {"contact":{"id":"aIwYA6QIur5i6tabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1312","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-02-01T16:15:19+01:00","updated_at":"2015-02-01T16:15:19+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aIw2FIQIur5i6tabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-02-01T16:15:19+01:00","updated_at":"2015-02-01T16:15:19+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-02-01","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aIwYA6QIur5i6tabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aIwYA6QIur5i6tabxfpGMl"}, {"rel":"update","href":"contacts/aIwYA6QIur5i6tabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aIwYA6QIur5i6tabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aIwYA6QIur5i6tabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aIwYA6QIur5i6tabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aIwYA6QIur5i6tabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aIwYA6QIur5i6tabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aIwYA6QIur5i6tabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aIwYA6QIur5i6tabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aIwYA6QIur5i6tabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aIwYA6QIur5i6tabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aIwYA6QIur5i6tabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aIwYA6QIur5i6tabxfpGMl/emails"}]}, {"contact":{"id":"aK-e82QIir5khAabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1304","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-02-01T15:53:55+01:00","updated_at":"2015-02-01T15:53:55+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aK-i-cQIir5khAabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-02-01T15:53:55+01:00","updated_at":"2015-02-01T15:53:55+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-02-01","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aK-e82QIir5khAabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aK-e82QIir5khAabxfpGMl"}, {"rel":"update","href":"contacts/aK-e82QIir5khAabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aK-e82QIir5khAabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aK-e82QIir5khAabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aK-e82QIir5khAabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aK-e82QIir5khAabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aK-e82QIir5khAabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aK-e82QIir5khAabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aK-e82QIir5khAabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aK-e82QIir5khAabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aK-e82QIir5khAabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aK-e82QIir5khAabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aK-e82QIir5khAabxfpGMl/emails"}]}, {"contact":{"id":"aWCH9mQxSr5k6sabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1279","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-01-31T19:58:48+01:00","updated_at":"2015-01-31T19:58:48+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"aWCL4WQxSr5k6sabxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-01-31T19:58:48+01:00","updated_at":"2015-01-31T19:58:48+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-01-31","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/aWCH9mQxSr5k6sabxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/aWCH9mQxSr5k6sabxfpGMl"}, {"rel":"update","href":"contacts/aWCH9mQxSr5k6sabxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/documents"}, {"rel":"attachments","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/estimates"}, {"rel":"orders","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/comments"}, {"rel":"emails","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/emails"}, {"rel":"emails create","href":"contacts/aWCH9mQxSr5k6sabxfpGMl/emails"}]}, {"contact":{"id":"adJ9O6QxGr5km4abxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-2015-1271","organisation":"salesking","last_name":"Jane","first_name":"Dow","gender":null,"notes":"APITEST","position":null,"title":null,"tax_number":null,"vat_number":null,"email":"sales.py-api@mailinator.com","url":null,"birthday":null,"tag_list":"","created_at":"2015-01-31T19:36:04+01:00","updated_at":"2015-01-31T19:36:04+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":0,"cash_discount":null,"due_days":null,"address_field":"salesking\nDow Jane\nFoo Street\nAppartment Bar\nDuisburg","addresses": [{"address":{"id":"adKbTSQxGr5km4abxfpGMl","city":"Duisburg","address1":"Foo Street","address2":"Appartment Bar","pobox":null,"zip":null,"state":null,"country":null,"created_at":"2015-01-31T19:36:04+01:00","updated_at":"2015-01-31T19:36:04+01:00","address_type":"work","order":null,"lat":null,"long":null,"_destroy":false}}], "team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":"2015-01-31","sales_potential":null,"probability":null,"expected_revenue":null}, "links": [{"rel":"self","href":"contacts/adJ9O6QxGr5km4abxfpGMl"}, {"rel":"instances","href":"contacts"}, {"rel":"destroy","href":"contacts/adJ9O6QxGr5km4abxfpGMl"}, {"rel":"update","href":"contacts/adJ9O6QxGr5km4abxfpGMl"}, {"rel":"create","href":"contacts"}, {"rel":"documents","href":"contacts/adJ9O6QxGr5km4abxfpGMl/documents"}, {"rel":"attachments","href":"contacts/adJ9O6QxGr5km4abxfpGMl/attachments"}, {"rel":"invoices","href":"contacts/adJ9O6QxGr5km4abxfpGMl/invoices"}, {"rel":"estimates","href":"contacts/adJ9O6QxGr5km4abxfpGMl/estimates"}, {"rel":"orders","href":"contacts/adJ9O6QxGr5km4abxfpGMl/orders"}, {"rel":"credit_notes","href":"contacts/adJ9O6QxGr5km4abxfpGMl/credit_notes"}, {"rel":"recurrings","href":"contacts/adJ9O6QxGr5km4abxfpGMl/recurrings"}, {"rel":"payment_reminders","href":"contacts/adJ9O6QxGr5km4abxfpGMl/payment_reminders"}, {"rel":"comments","href":"contacts/adJ9O6QxGr5km4abxfpGMl/comments"}, {"rel":"emails","href":"contacts/adJ9O6QxGr5km4abxfpGMl/emails"}, {"rel":"emails create","href":"contacts/adJ9O6QxGr5km4abxfpGMl/emails"}]}],"links":{"self":"https://frank.dev.salesking.eu/api/contacts?organisation=fb-&sort=ASC&per_page=10&page=1","next":"https://frank.dev.salesking.eu/api/contacts?organisation=fb-&sort=ASC&per_page=10&page=2"},"collection":{"current_page":1,"per_page":10,"total_entries":72,"total_pages":8}} '''.replace(u"\n", u"").replace(u"\t", u"").replace(u" ", u"") class ContactsCollectionMockTestCase(SalesKingBaseTestCase): pass
salesking/salesking_python_sdk
salesking/tests/test_contacts_collection_mock.py
Python
apache-2.0
24,848
0.000926
"""Manipulate release notes from Github commits to make them more compact. Gets its input from stdin, assumed in the following format: 1 line per commit, starting with "[Some subject]" and ending with "(#123456)" Lines with the same subject will be grouped together, sorted by increasing PR number. Commits not starting with a subject will be grouped under an empty subject. """ import itertools import re import sys from typing import Iterator # Matches '[Subject]' at start of line. _SUBJECT_REGEX = re.compile(r'^\[[^\]]+\]') # Matches '(#123456)' at end of line, and captures the number. _PR_REGEX = re.compile(r'\(#(\d+)\)$') _DROPPED_SUBJECTS = [f'[{subject}]' for subject in ['Clean Code', 'Small Fix', 'Easy Dev']] def _subject_key(line: str) -> str: """Get the subject for the line or an empty string.""" if subject_match := _SUBJECT_REGEX.search(line): return subject_match.group() return '' def _pr_key(line: str) -> int: """Get the PR number for the line or 0.""" if pr_match := _PR_REGEX.search(line): return int(pr_match.group(1)) return 0 def compress_notes(notes: Iterator[str]) -> str: """Group all commit messages by subject.""" separator = '\n\t' return '\n'.join( subject + separator + separator.join( line.rstrip()[len(subject):] for line in sorted_lines ) if len(sorted_lines := sorted(lines, key=_pr_key)) > 1 else sorted_lines[0].rstrip() for subject, lines in itertools.groupby(sorted(notes, key=_subject_key), key=_subject_key) if subject not in _DROPPED_SUBJECTS) if __name__ == '__main__': print(compress_notes(sys.stdin))
bayesimpact/bob-emploi
frontend/release/compress_notes.py
Python
gpl-3.0
1,668
0.002398
import re import threading from datetime import datetime, timedelta from typing import Any, List, Mapping, Optional, Tuple, Union from reactivex import Notification, Observable, abc, notification, typing from reactivex.disposable import CompositeDisposable, Disposable from reactivex.scheduler import NewThreadScheduler new_thread_scheduler = NewThreadScheduler() # tokens will be searched in the order below using pipe # group of elements: match any characters surrounded by () pattern_group = r"(\(.*?\))" # timespan: match one or multiple hyphens pattern_ticks = r"(-+)" # comma err: match any comma which is not in a group pattern_comma_error = r"(,)" # element: match | or # or one or more characters which are not - | # ( ) , pattern_element = r"(#|\||[^-,()#\|]+)" pattern = r"|".join( [ pattern_group, pattern_ticks, pattern_comma_error, pattern_element, ] ) tokens = re.compile(pattern) def hot( string: str, timespan: typing.RelativeTime = 0.1, duetime: typing.AbsoluteOrRelativeTime = 0.0, lookup: Optional[Mapping[Union[str, float], Any]] = None, error: Optional[Exception] = None, scheduler: Optional[abc.SchedulerBase] = None, ) -> Observable[Any]: _scheduler = scheduler or new_thread_scheduler if isinstance(duetime, datetime): duetime = duetime - _scheduler.now messages = parse( string, timespan=timespan, time_shift=duetime, lookup=lookup, error=error, raise_stopped=True, ) lock = threading.RLock() is_stopped = False observers: List[abc.ObserverBase[Any]] = [] def subscribe( observer: abc.ObserverBase[Any], scheduler: Optional[abc.SchedulerBase] = None ) -> abc.DisposableBase: # should a hot observable already completed or on error # re-push on_completed/on_error at subscription time? if not is_stopped: with lock: observers.append(observer) def dispose(): with lock: try: observers.remove(observer) except ValueError: pass return Disposable(dispose) def create_action(notification: Notification[Any]) -> typing.ScheduledAction[Any]: def action(scheduler: abc.SchedulerBase, state: Any = None): nonlocal is_stopped with lock: for observer in observers: notification.accept(observer) if notification.kind in ("C", "E"): is_stopped = True return action for message in messages: timespan, notification = message action = create_action(notification) # Don't make closures within a loop _scheduler.schedule_relative(timespan, action) return Observable(subscribe) def from_marbles( string: str, timespan: typing.RelativeTime = 0.1, lookup: Optional[Mapping[Union[str, float], Any]] = None, error: Optional[Exception] = None, scheduler: Optional[abc.SchedulerBase] = None, ) -> Observable[Any]: messages = parse( string, timespan=timespan, lookup=lookup, error=error, raise_stopped=True ) def subscribe( observer: abc.ObserverBase[Any], scheduler_: Optional[abc.SchedulerBase] = None ) -> abc.DisposableBase: _scheduler = scheduler or scheduler_ or new_thread_scheduler disp = CompositeDisposable() def schedule_msg( message: Tuple[typing.RelativeTime, Notification[Any]] ) -> None: duetime, notification = message def action(scheduler: abc.SchedulerBase, state: Any = None): notification.accept(observer) disp.add(_scheduler.schedule_relative(duetime, action)) for message in messages: # Don't make closures within a loop schedule_msg(message) return disp return Observable(subscribe) def parse( string: str, timespan: typing.RelativeTime = 1.0, time_shift: typing.RelativeTime = 0.0, lookup: Optional[Mapping[Union[str, float], Any]] = None, error: Optional[Exception] = None, raise_stopped: bool = False, ) -> List[Tuple[typing.RelativeTime, notification.Notification[Any]]]: """Convert a marble diagram string to a list of messages. Each character in the string will advance time by timespan (exept for space). Characters that are not special (see the table below) will be interpreted as a value to be emitted. numbers will be cast to int or float. Special characters: +--------+--------------------------------------------------------+ | `-` | advance time by timespan | +--------+--------------------------------------------------------+ | `#` | on_error() | +--------+--------------------------------------------------------+ | `|` | on_completed() | +--------+--------------------------------------------------------+ | `(` | open a group of elements sharing the same timestamp | +--------+--------------------------------------------------------+ | `)` | close a group of elements | +--------+--------------------------------------------------------+ | `,` | separate elements in a group | +--------+--------------------------------------------------------+ | space | used to align multiple diagrams, does not advance time | +--------+--------------------------------------------------------+ In a group of elements, the position of the initial `(` determines the timestamp at which grouped elements will be emitted. E.g. `--(12,3,4)--` will emit 12, 3, 4 at 2 * timespan and then advance virtual time by 8 * timespan. Examples: >>> parse("--1--(2,3)-4--|") >>> parse("a--b--c-", lookup={'a': 1, 'b': 2, 'c': 3}) >>> parse("a--b---#", error=ValueError("foo")) Args: string: String with marble diagram timespan: [Optional] duration of each character in second. If not specified, defaults to 0.1s. lookup: [Optional] dict used to convert an element into a specified value. If not specified, defaults to {}. time_shift: [Optional] time used to delay every elements. If not specified, defaults to 0.0s. error: [Optional] exception that will be use in place of the # symbol. If not specified, defaults to Exception('error'). raise_finished: [optional] raise ValueError if elements are declared after on_completed or on_error symbol. Returns: A list of messages defined as a tuple of (timespan, notification). """ error_ = error or Exception("error") lookup_ = lookup or {} if isinstance(timespan, timedelta): timespan = timespan.total_seconds() if isinstance(time_shift, timedelta): time_shift = time_shift.total_seconds() string = string.replace(" ", "") # try to cast a string to an int, then to a float def try_number(element: str) -> Union[float, str]: try: return int(element) except ValueError: try: return float(element) except ValueError: return element def map_element( time: typing.RelativeTime, element: str ) -> Tuple[typing.RelativeTime, Notification[Any]]: if element == "|": return (time, notification.OnCompleted()) elif element == "#": return (time, notification.OnError(error_)) else: value = try_number(element) value = lookup_.get(value, value) return (time, notification.OnNext(value)) is_stopped = False def check_stopped(element: str) -> None: nonlocal is_stopped if raise_stopped: if is_stopped: raise ValueError("Elements cannot be declared after a # or | symbol.") if element in ("#", "|"): is_stopped = True iframe = 0 messages: List[Tuple[typing.RelativeTime, Notification[Any]]] = [] for results in tokens.findall(string): timestamp = iframe * timespan + time_shift group, ticks, comma_error, element = results if group: elements = group[1:-1].split(",") for elm in elements: check_stopped(elm) grp_messages = [ map_element(timestamp, elm) for elm in elements if elm != "" ] messages.extend(grp_messages) iframe += len(group) if ticks: iframe += len(ticks) if comma_error: raise ValueError("Comma is only allowed in group of elements.") if element: check_stopped(element) message = map_element(timestamp, element) messages.append(message) iframe += len(element) return messages
ReactiveX/RxPY
reactivex/observable/marbles.py
Python
mit
9,232
0.000542
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015-2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Configuration options for Invenio-Search. The documentation for the configuration is in docs/configuration.rst. """ # # ELASTIC configuration # SEARCH_CLIENT_CONFIG = None """Dictionary of options for the Elasticsearch client. The value of this variable is passed to :py:class:`elasticsearch.Elasticsearch` as keyword arguments and is used to configure the client. See the available keyword arguments in the two following classes: - :py:class:`elasticsearch.Elasticsearch` - :py:class:`elasticsearch.Transport` If you specify the key ``hosts`` in this dictionary, the configuration variable :py:class:`~invenio_search.config.SEARCH_ELASTIC_HOSTS` will have no effect. """ SEARCH_ELASTIC_HOSTS = None # default localhost """Elasticsearch hosts. By default, Invenio connects to ``localhost:9200``. The value of this variable is a list of dictionaries, where each dictionary represents a host. The available keys in each dictionary is determined by the connection class: - :py:class:`elasticsearch.connection.Urllib3HttpConnection` (default) - :py:class:`elasticsearch.connection.RequestsHttpConnection` You can change the connection class via the :py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG`. If you specified the ``hosts`` key in :py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG` then this configuration variable will have no effect. """ SEARCH_MAPPINGS = None # loads all mappings and creates aliases for them """List of aliases for which, their search mappings should be created. - If `None` all aliases (and their search mappings) defined through the ``invenio_search.mappings`` entry point in setup.py will be created. - Provide an empty list ``[]`` if no aliases (or their search mappings) should be created. For example if you don't want to create aliases and their mappings for `authors`: .. code-block:: python # in your `setup.py` you would specify: entry_points={ 'invenio_search.mappings': [ 'records = invenio_foo_bar.mappings', 'authors = invenio_foo_bar.mappings', ], } # and in your config.py SEARCH_MAPPINGS = ['records'] """ SEARCH_RESULTS_MIN_SCORE = None """If set, the `min_score` parameter is added to each search request body. The `min_score` parameter excludes results which have a `_score` less than the minimum specified in `min_score`. Note that the `max_score` varies depending on the number of results for a given search query and it is not absolute value. Therefore, setting `min_score` too high can lead to 0 results because it can be higher than any result's `_score`. Please refer to `Elasticsearch min_score documentation <https://www.elastic.co/guide/en/elasticsearch/reference/current/ search-request-min-score.html>`_ for more information. """ SEARCH_INDEX_PREFIX = '' """Any index, alias and templates will be prefixed with this string. Useful to host multiple instances of the app on the same Elasticsearch cluster, for example on one app you can set it to `dev-` and on the other to `prod-`, and each will create non-colliding indices prefixed with the corresponding string. Usage example: .. code-block:: python # in your config.py SEARCH_INDEX_PREFIX = 'prod-' For templates, ensure that the prefix `__SEARCH_INDEX_PREFIX__` is added to your index names. This pattern will be replaced by the prefix config value. Usage example in your template.json: .. code-block:: json { "index_patterns": ["__SEARCH_INDEX_PREFIX__myindex-name-*"] } """
inveniosoftware/invenio-search
invenio_search/config.py
Python
mit
3,755
0
from __future__ import print_function, division # import sys,os quspin_path = os.path.join(os.getcwd(),"../../") sys.path.insert(0,quspin_path) # from quspin.operators import hamiltonian # Hamiltonians and operators from quspin.basis import boson_basis_1d # Hilbert space spin basis from quspin.tools.evolution import evolve # ODE evolve tool from quspin.tools.Floquet import Floquet_t_vec # stroboscopic time vector import numpy as np # generic math functions from six import iteritems # loop over elements of dictionary # L=50 # number of lattice sites i_CM = L//2-0.5 # centre of chain # ### static model parameters J=1.0 # hopping kappa_trap=0.002 # harmonic trap strength U=1.0 # mean-field (GPE) interaction # ### periodic driving A=1.0 # drive amplitude Omega=10.0 # drive frequency def drive(t,Omega): return np.exp(-1j*A*np.sin(Omega*t) ) def drive_conj(t,Omega): return np.exp(+1j*A*np.sin(Omega*t) ) drive_args=[Omega] # drive arguments t=Floquet_t_vec(Omega,30,len_T=1) # time vector, 30 stroboscopic periods # ### site-couping lists hopping=[[-J,i,(i+1)%L] for i in range(L)] trap=[[kappa_trap*(i-i_CM)**2,i] for i in range(L)] # ### operator strings for single-particle Hamiltonian static=[['n',trap]] dynamic=[["+-",hopping,drive,drive_args],["-+",hopping,drive_conj,drive_args]] # define single-particle basis basis = boson_basis_1d(L,Nb=1,sps=2) # Nb=1 boson and sps=2 states per site [empty and filled] # ### build Hamiltonian H=hamiltonian(static,dynamic,basis=basis,dtype=np.complex128) # calculate eigenvalues and eigenvectors of free particle in a harmonic trap E,V=H.eigh(time=0) # initial state normalised to one partcile per site phi0=V[:,0]*np.sqrt(L) ####### def GPE(time,phi): '''Solves the complex-valued time-dependent Gross-Pitaevskii equation: ''' # integrate static part of GPE phi_dot = -1j*( H.static.dot(phi) + U*np.abs(phi)**2*phi ) # integrate dynamic part of GPE for fun,Hdyn in iteritems(H.dynamic): phi_dot += -1j*fun(time)*Hdyn.dot(phi) return phi_dot # solving the complex-valued GPE takes one line phi_t = evolve(phi0,t.i,t.vals,GPE) ######## def GPE_real(time,phi,H,U): '''Solves the Gross-Pitaevskii equation, cast into real-valued form so it can be solved with a real-valued ODE solver. ''' # preallocate memory for phi_dot phi_dot = np.zeros_like(phi) # read off number of lattice sites (array dimension of phi) Ns=H.Ns # static single-particle part phi_dot[:Ns] = H.static.dot(phi[Ns:]).real phi_dot[Ns:] = -H.static.dot(phi[:Ns]).real # static GPE interaction phi_dot_2 = np.abs(phi[:Ns])**2 + np.abs(phi[Ns:])**2 phi_dot[:Ns] += U*phi_dot_2*phi[Ns:] phi_dot[Ns:] -= U*phi_dot_2*phi[:Ns] # dynamic single-particle term for func, Hdyn in iteritems(H.dynamic): fun=func(time) # evaluate drive phi_dot[:Ns] += ( +(fun.real)*Hdyn.dot(phi[Ns:]) + (fun.imag)*Hdyn.dot(phi[:Ns]) ).real phi_dot[Ns:] += ( -(fun.real)*Hdyn.dot(phi[:Ns]) + (fun.imag)*Hdyn.dot(phi[Ns:]) ).real return phi_dot # define ODE solver parameters GPE_params = (H,U) # solving the real-valued GPE takes one line phi_t = evolve(phi0,t.i,t.vals,GPE_real,stack_state=True,f_params=GPE_params)
weinbe58/QuSpin
sphinx/doc_examples/evolve-example.py
Python
bsd-3-clause
3,153
0.047257
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((10367.5, 666.475, 5813.89), (0.7, 0.7, 0.7), 890.203) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((9363.41, 2050.22, 6358.04), (0.7, 0.7, 0.7), 792.956) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((7464.66, 1774.38, 6218.72), (0.7, 0.7, 0.7), 856.786) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((8753.01, -132.87, 6559.57), (0.7, 0.7, 0.7), 963.679) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((7565.2, -1121.24, 6228.45), (0.7, 0.7, 0.7), 761.442) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((5465.12, -62.7116, 5542.75), (0.7, 0.7, 0.7), 961.183) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((4315.36, 616.757, 4460.58), (0.7, 0.7, 0.7), 753.151) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((4723.6, -194.458, 4527.34), (1, 0.7, 0), 1098.07) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((3642.56, 2077.44, 3565.13), (0.7, 0.7, 0.7), 1010.42) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((3578.41, 2223.59, 1887.18), (1, 0.7, 0), 821.043) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((2945.04, 3988.93, 2076.4), (0.7, 0.7, 0.7), 873.876) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((4206.8, 4503.38, 2500.92), (0.7, 0.7, 0.7), 625.532) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((5267.59, 5604.72, 2872.45), (0.7, 0.7, 0.7), 880.474) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((6382.11, 4553.22, 2591.12), (0.7, 0.7, 0.7), 659.161) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((8228.48, 5713.35, 1946.9), (0.7, 0.7, 0.7), 831.745) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((10152.1, 7484.7, 3589.85), (0.7, 0.7, 0.7), 803.065) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((9272.8, 7416.43, 5304.75), (0.7, 0.7, 0.7), 610.262) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((8320.45, 8201.08, 4614.49), (0.7, 0.7, 0.7), 741.265) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((7152.99, 7343.35, 3907.93), (0.7, 0.7, 0.7), 748.625) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((6486.83, 7632.59, 2637.98), (0.7, 0.7, 0.7), 677.181) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((4929.54, 5993.99, 3557.59), (0.7, 0.7, 0.7), 616.015) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((6245, 7563.37, 3485.17), (0.7, 0.7, 0.7), 653.154) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((5719.78, 7921.88, 4338.92), (0.7, 0.7, 0.7), 595.33) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((6805.84, 8371.51, 4949.59), (0.7, 0.7, 0.7), 627.901) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((8052.33, 7916.35, 5329.17), (0.7, 0.7, 0.7), 663.941) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((9514.83, 8297.87, 5221.41), (0.7, 0.7, 0.7), 663.899) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((8146.11, 7819.16, 4640.6), (0.7, 0.7, 0.7), 644.694) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((6993.28, 6007.91, 4183.08), (0.7, 0.7, 0.7), 896.802) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((5865.34, 6951.96, 4285.29), (0.7, 0.7, 0.7), 576.38) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((4723.93, 6558.74, 3779.52), (0.7, 0.7, 0.7), 635.092) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((4527.29, 6803.62, 4682.89), (0.7, 0.7, 0.7), 651.505) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((4157.77, 5702.47, 3364.08), (0.7, 0.7, 0.7), 718.042) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((3829.76, 7398.98, 3358.29), (0.7, 0.7, 0.7), 726.714) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((4558.78, 7799.8, 4598.94), (0.7, 0.7, 0.7), 673.585) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((5778.56, 8214.17, 4273.76), (0.7, 0.7, 0.7), 598.418) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((6816.41, 9101.18, 3792.38), (0.7, 0.7, 0.7), 693.382) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((5379.41, 7115.46, 3950.12), (0.7, 0.7, 0.7), 804.038) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((6192.87, 8712.16, 4481.68), (0.7, 0.7, 0.7), 816.178) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((6755.53, 7666.6, 4856.48), (0.7, 0.7, 0.7), 776.628) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((5822.1, 8776.18, 4221.64), (0.7, 0.7, 0.7), 750.656) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((4387.8, 7855.18, 4440.84), (0.7, 0.7, 0.7), 709.625) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((2763.53, 7655.5, 3448.5), (0.7, 0.7, 0.7), 927.681) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((2100.65, 10136, 4030.51), (0.7, 0.7, 0.7), 1088.21) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((1351.77, 8359.15, 3620.24), (0.7, 0.7, 0.7), 736.147) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((2856.27, 8500.18, 4269.38), (0.7, 0.7, 0.7), 861.101) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((2921.27, 6604.75, 4581.96), (0.7, 0.7, 0.7), 924.213) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((2519.39, 7241.23, 6392.08), (0.7, 0.7, 0.7), 881.828) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((1650.63, 8992.28, 5746.32), (0.7, 0.7, 0.7), 927.681) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((1652.74, 7272.92, 6488.99), (0.7, 0.7, 0.7), 831.576) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((2068.23, 5453.09, 6278.57), (0.7, 0.7, 0.7), 859.494) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((1030.35, 5960.32, 6075.86), (0.7, 0.7, 0.7), 704.845) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((2098.43, 4922.63, 5261.67), (0.7, 0.7, 0.7), 804.461) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((3471.12, 3920.87, 4786.2), (0.7, 0.7, 0.7), 934.111) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((2576.85, 2765.33, 4243.87), (0.7, 0.7, 0.7), 988.339) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((1913.52, 2958.74, 4544.5), (1, 0.7, 0), 803.7) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((2382.25, 4439.18, 5933.49), (0.7, 0.7, 0.7), 812.118) if "particle_56 geometry" not in marker_sets: s=new_marker_set('particle_56 geometry') marker_sets["particle_56 geometry"]=s s= marker_sets["particle_56 geometry"] mark=s.place_marker((3282.88, 3328.65, 7592), (0.7, 0.7, 0.7), 1177.93) if "particle_57 geometry" not in marker_sets: s=new_marker_set('particle_57 geometry') marker_sets["particle_57 geometry"]=s s= marker_sets["particle_57 geometry"] mark=s.place_marker((3874.78, 3784.79, 9992.1), (0.7, 0.7, 0.7), 1038.21) if "particle_58 geometry" not in marker_sets: s=new_marker_set('particle_58 geometry') marker_sets["particle_58 geometry"]=s s= marker_sets["particle_58 geometry"] mark=s.place_marker((4016.01, 3976.21, 10536.1), (1, 0.7, 0), 758.016) if "particle_59 geometry" not in marker_sets: s=new_marker_set('particle_59 geometry') marker_sets["particle_59 geometry"]=s s= marker_sets["particle_59 geometry"] mark=s.place_marker((4562.31, 3361.95, 10481.6), (0.7, 0.7, 0.7), 824.046) if "particle_60 geometry" not in marker_sets: s=new_marker_set('particle_60 geometry') marker_sets["particle_60 geometry"]=s s= marker_sets["particle_60 geometry"] mark=s.place_marker((3473.83, 3955.94, 9910.43), (0.7, 0.7, 0.7), 793.379) if "particle_61 geometry" not in marker_sets: s=new_marker_set('particle_61 geometry') marker_sets["particle_61 geometry"]=s s= marker_sets["particle_61 geometry"] mark=s.place_marker((2931.28, 3285.96, 10484.3), (0.7, 0.7, 0.7), 1011.56) if "particle_62 geometry" not in marker_sets: s=new_marker_set('particle_62 geometry') marker_sets["particle_62 geometry"]=s s= marker_sets["particle_62 geometry"] mark=s.place_marker((3535.84, 3569.76, 8707.48), (0.7, 0.7, 0.7), 1097.01) if "particle_63 geometry" not in marker_sets: s=new_marker_set('particle_63 geometry') marker_sets["particle_63 geometry"]=s s= marker_sets["particle_63 geometry"] mark=s.place_marker((5228.67, 4159.04, 9514.75), (0.7, 0.7, 0.7), 851.626) if "particle_64 geometry" not in marker_sets: s=new_marker_set('particle_64 geometry') marker_sets["particle_64 geometry"]=s s= marker_sets["particle_64 geometry"] mark=s.place_marker((6577.56, 5125.87, 10517.6), (0.7, 0.7, 0.7), 869.434) if "particle_65 geometry" not in marker_sets: s=new_marker_set('particle_65 geometry') marker_sets["particle_65 geometry"]=s s= marker_sets["particle_65 geometry"] mark=s.place_marker((5543.48, 6207.33, 9481.74), (0.7, 0.7, 0.7), 818.463) if "particle_66 geometry" not in marker_sets: s=new_marker_set('particle_66 geometry') marker_sets["particle_66 geometry"]=s s= marker_sets["particle_66 geometry"] mark=s.place_marker((5344.96, 6711.92, 11064.8), (0.7, 0.7, 0.7), 759.539) if "particle_67 geometry" not in marker_sets: s=new_marker_set('particle_67 geometry') marker_sets["particle_67 geometry"]=s s= marker_sets["particle_67 geometry"] mark=s.place_marker((4657.88, 5139.48, 9485.88), (0.7, 0.7, 0.7), 1088.59) if "particle_68 geometry" not in marker_sets: s=new_marker_set('particle_68 geometry') marker_sets["particle_68 geometry"]=s s= marker_sets["particle_68 geometry"] mark=s.place_marker((5558.97, 4686.06, 11558.7), (0.7, 0.7, 0.7), 822.312) if "particle_69 geometry" not in marker_sets: s=new_marker_set('particle_69 geometry') marker_sets["particle_69 geometry"]=s s= marker_sets["particle_69 geometry"] mark=s.place_marker((5567.08, 6270.05, 11842.4), (0.7, 0.7, 0.7), 749.81) if "particle_70 geometry" not in marker_sets: s=new_marker_set('particle_70 geometry') marker_sets["particle_70 geometry"]=s s= marker_sets["particle_70 geometry"] mark=s.place_marker((5228.38, 5056.13, 12200.2), (0.7, 0.7, 0.7), 764.488) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
batxes/4Cin
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/mtx1_models/SHH_WT_models8077.py
Python
gpl-3.0
17,576
0.025091
#!/usr/bin/env python import system as sys import traces as tra import info as nf import pylab as pl pos_info = '+0+600' pos_tra = '+300+600' pos_sys = '+0+0' i = nf.info(position=pos_info) s = sys.system(info=i, position=pos_sys) t = tra.traces(s, info=i, position=pos_tra) if pl.get_backend() == 'TkAgg': s.fig.tight_layout() t.fig.tight_layout() pl.show()
jusjusjus/Motiftoolbox
Fitzhugh_n-cell/run.py
Python
gpl-2.0
368
0.005435
""" WSGI config for p27_d17 project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os import sys BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # # BASE_DIR is useless as it is one directory above the one with manage.py PROJECT_DIR = os.path.join(os.path.dirname(__file__)) APPS_DIR = os.path.join(BASE_DIR, 'apps') # put apps on first part of path so we can leave off apps. when # importing an app sys.path.insert(0, APPS_DIR) print sys.path # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "p27_d17.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "p27_d17_prj.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
fogcitymarathoner/gae_p27_dj17
p27_d17_prj/wsgi.py
Python
mit
1,789
0.000559
import os import re from typing import Iterable from .env import use_bintray, skip_build_py, skip_build_py2, skip_build_py_module, skip_build_py2_module, py_packages, py_packages2 from .package import enumerate_packages, import_package built_packags: set = set() need_rebuild: set = set() def parse_packages(pkg_specs: str) -> Iterable[str]: for spec in pkg_specs.split(','): if spec == ':COMMIT_MARKER': if os.getenv('TRAVIS_EVENT_TYPE') == 'cron': continue mobj = re.search( r'pybuild-rebuild=(.+)', os.getenv('TRAVIS_COMMIT_MESSAGE', '')) if mobj: pkgs = mobj.group(1) if pkgs == 'ALL': yield from enumerate_packages() else: yield from pkgs.split(',') else: yield spec def build_package(pkgname: str) -> None: if pkgname in built_packags: return pkg = import_package(pkgname) if use_bintray and pkgname not in need_rebuild and pkg.fetch_tarball(): built_packags.add(pkgname) return for dep in pkg.dependencies: build_package(dep) if pkg.fresh(): for src in pkg.sources: src.download() for patch in getattr(pkg, 'patches', []): patch.apply(pkg.source) try: pkg.prepare() except NotImplementedError: print('Skipping prepare step') pkg.build() pkg.create_tarball() pkg.upload_tarball() built_packags.add(pkgname) def main(): # TODO: Make this configurable need_rebuild.update(parse_packages(':COMMIT_MARKER')) print(f'Packages to rebuild: {need_rebuild}') if not skip_build_py: build_package('python') if not skip_build_py2: build_package('python2') if not skip_build_py_module: for item in py_packages: build_package(item) if not skip_build_py2_module: for item in py_packages2: build_package(item)
qpython-android/QPython3-core
pybuild/main.py
Python
apache-2.0
2,041
0.00196
from toast.scene_graph import Component class Animation(Component): def __init__(self, key=None, frames=None): super(Animation, self).__init__() self.__animation_list = {} self.key = '' self.__current_image = None self.__index = 0 self.__time = 0 if key != None: self.add_animation(key, frames) def update(self, time=0): if self.key: self.__time += time self.__current_image, duration, callback = self.frame if hasattr(self.game_object, 'image'): self.game_object.image = self.image if self.__time > duration: self.__time = 0 self.__index +=1 if callback: callback() def add_animation(self, key, frames): if not self.key: self.key = key self.__current_image = frames[0][0] self.__animation_list[key] = frames @property def image(self): """ Returns the current image of the playing animation""" return self.__current_image @property def key(self): """ Returns the key of the playing animation """ return self.__current_animation @key.setter def key(self, value): self.__current_animation = value @property def index(self): """ Returns the current index of the playing animation """ if self.__index > len(self.__animation_list[self.key]) - 1: self.__index = 0 return self.__index @property def frame(self): """ Returns the current frame as a triple """ animation = self.__animation_list[self.key][self.index] image = animation[0] duration = animation[1] callback = None if len(animation) == 3: callback = self.__animation_list[self.key][self.index][2] return image, duration, callback def play(self, key, start_index=None): self.key = key if start_index != None: self.__index = start_index def stop(self): self.key = '' self.__index = 0
JoshuaSkelly/Toast
toast/animation.py
Python
mit
2,328
0.010309
import ipaddress from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import padding, rsa from .connection import NetworkAddress def encode_address(addr: NetworkAddress) -> bytes: return ipaddress.ip_address(addr[0]).packed + bytes([addr[1] >> 8, addr[1] & 0xFF]) class QuicRetryTokenHandler: def __init__(self) -> None: self._key = rsa.generate_private_key( public_exponent=65537, key_size=1024, backend=default_backend() ) def create_token(self, addr: NetworkAddress, destination_cid: bytes) -> bytes: retry_message = encode_address(addr) + b"|" + destination_cid return self._key.public_key().encrypt( retry_message, padding.OAEP( mgf=padding.MGF1(hashes.SHA256()), algorithm=hashes.SHA256(), label=None ), ) def validate_token(self, addr: NetworkAddress, token: bytes) -> bytes: retry_message = self._key.decrypt( token, padding.OAEP( mgf=padding.MGF1(hashes.SHA256()), algorithm=hashes.SHA256(), label=None ), ) encoded_addr, original_connection_id = retry_message.split(b"|", maxsplit=1) if encoded_addr != encode_address(addr): raise ValueError("Remote address does not match.") return original_connection_id
asajeffrey/servo
tests/wpt/web-platform-tests/tools/third_party/aioquic/src/aioquic/quic/retry.py
Python
mpl-2.0
1,449
0.003451
import StringIO import numpy as np from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter,\ has_nested_fields from numpy.testing import * class TestLineSplitter(TestCase): "Tests the LineSplitter class." # def test_no_delimiter(self): "Test LineSplitter w/o delimiter" strg = " 1 2 3 4 5 # test" test = LineSplitter()(strg) assert_equal(test, ['1', '2', '3', '4', '5']) test = LineSplitter('')(strg) assert_equal(test, ['1', '2', '3', '4', '5']) def test_space_delimiter(self): "Test space delimiter" strg = " 1 2 3 4 5 # test" test = LineSplitter(' ')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) test = LineSplitter(' ')(strg) assert_equal(test, ['1 2 3 4', '5']) def test_tab_delimiter(self): "Test tab delimiter" strg= " 1\t 2\t 3\t 4\t 5 6" test = LineSplitter('\t')(strg) assert_equal(test, ['1', '2', '3', '4', '5 6']) strg= " 1 2\t 3 4\t 5 6" test = LineSplitter('\t')(strg) assert_equal(test, ['1 2', '3 4', '5 6']) def test_other_delimiter(self): "Test LineSplitter on delimiter" strg = "1,2,3,4,,5" test = LineSplitter(',')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) # strg = " 1,2,3,4,,5 # test" test = LineSplitter(',')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) def test_constant_fixed_width(self): "Test LineSplitter w/ fixed-width fields" strg = " 1 2 3 4 5 # test" test = LineSplitter(3)(strg) assert_equal(test, ['1', '2', '3', '4', '', '5', '']) # strg = " 1 3 4 5 6# test" test = LineSplitter(20)(strg) assert_equal(test, ['1 3 4 5 6']) # strg = " 1 3 4 5 6# test" test = LineSplitter(30)(strg) assert_equal(test, ['1 3 4 5 6']) def test_variable_fixed_width(self): strg = " 1 3 4 5 6# test" test = LineSplitter((3,6,6,3))(strg) assert_equal(test, ['1', '3', '4 5', '6']) # strg = " 1 3 4 5 6# test" test = LineSplitter((6,6,9))(strg) assert_equal(test, ['1', '3 4', '5 6']) #------------------------------------------------------------------------------- class TestNameValidator(TestCase): # def test_case_sensitivity(self): "Test case sensitivity" names = ['A', 'a', 'b', 'c'] test = NameValidator().validate(names) assert_equal(test, ['A', 'a', 'b', 'c']) test = NameValidator(case_sensitive=False).validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='upper').validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='lower').validate(names) assert_equal(test, ['a', 'a_1', 'b', 'c']) # def test_excludelist(self): "Test excludelist" names = ['dates', 'data', 'Other Data', 'mask'] validator = NameValidator(excludelist = ['dates', 'data', 'mask']) test = validator.validate(names) assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) #------------------------------------------------------------------------------- class TestStringConverter(TestCase): "Test StringConverter" # def test_creation(self): "Test creation of a StringConverter" converter = StringConverter(int, -99999) assert_equal(converter._status, 1) assert_equal(converter.default, -99999) # def test_upgrade(self): "Tests the upgrade method." converter = StringConverter() assert_equal(converter._status, 0) converter.upgrade('0') assert_equal(converter._status, 1) converter.upgrade('0.') assert_equal(converter._status, 2) converter.upgrade('0j') assert_equal(converter._status, 3) converter.upgrade('a') assert_equal(converter._status, len(converter._mapper)-1) # def test_missing(self): "Tests the use of missing values." converter = StringConverter(missing_values=('missing','missed')) converter.upgrade('0') assert_equal(converter('0'), 0) assert_equal(converter(''), converter.default) assert_equal(converter('missing'), converter.default) assert_equal(converter('missed'), converter.default) try: converter('miss') except ValueError: pass # def test_upgrademapper(self): "Tests updatemapper" from datetime import date import time dateparser = lambda s : date(*time.strptime(s, "%Y-%m-%d")[:3]) StringConverter.upgrade_mapper(dateparser, date(2000,1,1)) convert = StringConverter(dateparser, date(2000, 1, 1)) test = convert('2001-01-01') assert_equal(test, date(2001, 01, 01)) test = convert('2009-01-01') assert_equal(test, date(2009, 01, 01)) test = convert('') assert_equal(test, date(2000, 01, 01)) # def test_string_to_object(self): "Make sure that string-to-object functions are properly recognized" from datetime import date import time conv = StringConverter(lambda s: date(*(time.strptime(s)[:3]))) assert_equal(conv._mapper[-2][0](0), 0j) assert(hasattr(conv, 'default')) #------------------------------------------------------------------------------- class TestMiscFunctions(TestCase): # def test_has_nested_dtype(self): "Test has_nested_dtype" ndtype = np.dtype(np.float) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', '|S3'), ('B', float)]) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) assert_equal(has_nested_fields(ndtype), True)
illume/numpy3k
numpy/lib/tests/test__iotools.py
Python
bsd-3-clause
6,114
0.004416
import select, logging, threading, sys, ssl, errno, socket, platform, time from threadly import Scheduler, Clock from .stats import Stats from .stats import noExcept from .client import Client from .server import Server from .tcp import TCPClient, TCPServer from .udp import UDPServer try: xrange(1) except: xrange = range if not "EPOLLRDHUP" in dir(select): select.EPOLLRDHUP = 0x2000 EMPTY_STRING = b'' class SelectSelector(): def __init__(self, readCallback, writeCallback, acceptCallback, errorCallback): self.__log = logging.getLogger("root.litesockets.SelectSelector") self.__log.info("Creating basic select selector for: {}".format(platform.system())) self.__readCallback = readCallback self.__writeCallback = writeCallback self.__acceptCallback = acceptCallback self.__errorCallback = errorCallback self.__readClients = set() self.__writeClients = set() self.__acceptServers = set() self.__nb_readClients = set() self.__nb_writeClients = set() self.__nb_acceptServers = set() self.__writeLock = threading.Condition() self.__readLock = threading.Condition() self.__acceptLock = threading.Condition() self.__nbLock = threading.Condition() self.__localExecuter = Scheduler(5) #need 5 thread, all can be blocked at once self.__running = True self.__localExecuter.execute(self.__doReads) self.__localExecuter.execute(self.__doWrites) self.__localExecuter.execute(self.__doAcceptor) def stop(self): self.__running = False self.__localExecuter.shutdown_now() def addServer(self, fileno): self.__acceptLock.acquire() self.__acceptServers.add(FileNoWrapper(fileno)) self.__acceptLock.release() def removeServer(self, fileno): now = FileNoWrapper(fileno) if now in self.__acceptServers: self.__acceptServers.remove(now) def addReader(self, fileno): now = FileNoWrapper(fileno) if now in self.__readClients or now in self.__nb_readClients: return if self.__readLock.acquire(blocking=False): self.__readClients.add(now) self.__readLock.release() else: self.__nb_readClients.add(now) self.__localExecuter.schedule(self.__tmpClientSelect, delay=0, recurring=False, key="SimpleKey") self.__localExecuter.schedule(self.__update_from_nb_selector, key="UpdateTask") def removeReader(self, fileno): now = FileNoWrapper(fileno) if now in self.__readClients: self.__readClients.remove(now) if now in self.__nb_readClients: self.__nb_readClients.remove(now) def addWriter(self, fileno): now = FileNoWrapper(fileno) if now in self.__writeClients or now in self.__nb_writeClients: return if self.__writeLock.acquire(blocking=False): self.__writeClients.add(now) self.__writeLock.release() else: self.__nb_writeClients.add(now) self.__localExecuter.schedule(self.__tmpClientSelect, key="SimpleKey") self.__localExecuter.schedule(self.__update_from_nb_selector, key="UpdateTask") def removeWriter(self, fileno): now = FileNoWrapper(fileno) if now in self.__writeClients: self.__writeClients.remove(now) if now in self.__nb_writeClients: self.__nb_writeClients.remove(now) def __doThread(self, t): while self.__running: try: t() except Exception as e: self.__log.error("GP Socket Exception: %s: %s"%(t, sys.exc_info()[0])) self.__log.error(e) def __update_from_nb_selector(self): if len(self.__nb_readClients) + len(self.__nb_writeClients) == 0: return else: self.__readLock.acquire() self.__nbLock.acquire() for r in self.__nb_readClients: self.__readClients.add(r) self.__nb_readClients.clear() self.__nbLock.release() self.__readLock.release() self.__writeLock.acquire() self.__nbLock.acquire() for r in self.__nb_writeClients: self.__writeClients.add(r) self.__nb_writeClients.clear() self.__nbLock.release() self.__writeLock.release() def __tmpClientSelect(self): if len(self.__nb_readClients) + len(self.__nb_writeClients) == 0: return rlist = [] wlist = [] xlist = [] self.__nbLock.acquire() try: rlist, wlist, xlist = select.select(self.__nb_readClients, self.__nb_writeClients, self.__readClients, 0.001) except: #We sometimes throw here when a client is removed from the set during the loop pass for rdy in rlist: try: self.__readCallback(rdy.fileno()) except Exception as e: self.__log.debug("nbRead Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) for rdy in wlist: try: self.__writeCallback(rdy.fileno()) except Exception as e: self.__log.debug("nbWrite Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) for bad in xlist: try: self.__errorCallback(bad.fileno()) except: self.__log.debug("nberrorCB Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) self.__nbLock.release() if len(self.__nb_readClients) + len(self.__nb_writeClients) > 0: self.__localExecuter.schedule(self.__tmpClientSelect, key="SimpleKey") def __doReads(self): rlist = [] wlist = [] xlist = [] self.__readLock.acquire() if len(self.__readClients) > 0: try: rlist, wlist, xlist = select.select(self.__readClients, [], self.__readClients, .1) except Exception as e: #We sometimes throw here when a client is removed from the set during the loop pass else: time.sleep(.1) self.__readLock.release() for rdy in rlist: try: if rdy in self.__readClients: self.__readCallback(rdy.fileno()) except IOError as e: if e.errno != errno.EBADF: self.__log.error("Unknown error in Selector Read") self.__log.error(e, exc_info=True) except Exception as e: self.__log.debug("Read Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) for bad in xlist: try: self.__errorCallback(bad.fileno()) except: self.__log.debug("errorCB Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) if self.__running: self.__localExecuter.execute(self.__doReads) def __doWrites(self): rlist = [] wlist = [] xlist = [] self.__writeLock.acquire() if len(self.__writeClients) > 0: try: rlist, wlist, xlist = select.select([], self.__writeClients,[] , .1) except Exception as e: #We sometimes throw here when a client is removed from the set during the loop pass else: time.sleep(.1) self.__writeLock.release() for rdy in wlist: try: if rdy in self.__writeClients: self.__writeCallback(rdy.fileno()) except Exception as e: self.__log.debug("Write Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) self.__writeClients.remove(rdy) if self.__running: self.__localExecuter.execute(self.__doWrites) def __doAcceptor(self): rlist = [] wlist = [] xlist = [] self.__acceptLock.acquire() if len(self.__acceptServers) > 0: try: rlist, wlist, xlist = select.select(self.__acceptServers, [], self.__acceptServers, .1) except: #We sometimes throw here when a server is removed from the set during the loop pass else: time.sleep(.1) self.__acceptLock.release() for bad in xlist: try: self.__errorCallback(bad.fileno()) self.__writeClients.remove(bad) except Exception as e: self.__log.debug("errorCB Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) for rdy in rlist: try: if rdy in self.__acceptServers: self.__acceptCallback(rdy.fileno()) except Exception as e: self.__log.debug("Accept Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) logging.exception("") self.__writeClients.remove(rdy) if self.__running: self.__localExecuter.execute(self.__doAcceptor) class FileNoWrapper(): def __init__(self, fileno): self.__fileno = fileno def __hash__(self): return self.__fileno; def __eq__(self, obj): if isinstance(obj, FileNoWrapper): return self.fileno() == obj.fileno(); else: return False def fileno(self): return self.__fileno class EpollSelector(): def __init__(self, readCallback, writeCallback, acceptCallback, errorCallback): self.__log = logging.getLogger("root.litesockets.EpollSelector") self.__log.info("Creating epoll selector for: {}".format(platform.system())) self.__DEFAULT_READ_POLLS = select.EPOLLIN|select.EPOLLRDHUP|select.EPOLLHUP|select.EPOLLERR self.__DEFAULT_ACCEPT_POLLS = select.EPOLLIN|select.EPOLLRDHUP|select.EPOLLHUP|select.EPOLLERR self.__readCallback = readCallback self.__writeCallback = writeCallback self.__acceptCallback = acceptCallback self.__errorCallback = errorCallback self.__ReadSelector = select.epoll() self.__WriteSelector = select.epoll() self.__AcceptorSelector = select.epoll() self.__running = True self.__localExecuter = Scheduler(3) self.__localExecuter.execute(self.__doReads) self.__localExecuter.execute(self.__doWrites) self.__localExecuter.execute(self.__doAcceptor) def stop(self): self.__running = False self.__ReadSelector.close() self.__WriteSelector.close() self.__AcceptorSelector.close() self.__localExecuter.shutdown_now() def addServer(self, fileno): try: self.__AcceptorSelector.register(fileno, self.__DEFAULT_ACCEPT_POLLS) except: noExcept(self.__AcceptorSelector.modify, fileno, self.__DEFAULT_ACCEPT_POLLS) def removeServer(self, fileno): noExcept(self.__AcceptorSelector.unregister, fileno) def addReader(self, fileno): try: self.__ReadSelector.register(fileno, self.__DEFAULT_READ_POLLS) except: noExcept(self.__ReadSelector.modify, fileno, self.__DEFAULT_READ_POLLS) def removeReader(self, fileno): noExcept(self.__ReadSelector.unregister, fileno) def addWriter(self, fileno): try: self.__WriteSelector.register(fileno, select.EPOLLOUT) except: noExcept(self.__WriteSelector.modify, fileno, select.EPOLLOUT) def removeWriter(self, fileno): noExcept(self.__WriteSelector.unregister, fileno) def __doThread(self, t): while self.__running: try: t() except Exception as e: self.__log.error("GP Socket Exception: %s: %s"%(t, sys.exc_info()[0])) self.__log.error(e) def __doReads(self): events = self.__ReadSelector.poll(100) for fileno, event in events: try: if event & select.EPOLLIN: self.__readCallback(fileno) if (event & select.EPOLLRDHUP or event & select.EPOLLHUP or event & select.EPOLLERR): self.__errorCallback(fileno) noExcept(self.__ReadSelector.unregister, fileno) except Exception as e: self.__log.debug("Read Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) if self.__running: self.__localExecuter.execute(self.__doReads) def __doWrites(self): events = self.__WriteSelector.poll(100) for fileno, event in events: try: if event & select.EPOLLOUT: self.__writeCallback(fileno) else: self.__errorCallback(fileno) noExcept(self.__WriteSelector.unregister, fileno) except Exception as e: self.__log.debug("Write Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) if self.__running: self.__localExecuter.execute(self.__doWrites) def __doAcceptor(self): events = self.__AcceptorSelector.poll(100) for fileno, event in events: try: if event & select.EPOLLIN: self.__acceptCallback(fileno) else: self.__errorCallback(fileno) noExcept(self.__WriteSelector.unregister, fileno) except Exception as e: self.__log.debug("Accept Error: %s"%(sys.exc_info()[0])) self.__log.debug(e) logging.exception("") if self.__running: self.__localExecuter.execute(self.__doAcceptor) class SocketExecuter(): """ The main SocketExecuter for litesockets. The SocketExecuter is what processes all socket operations. Doing the writes, reads, and accepting new connections. It also does all the callbacks when a read or new socket connects. Having a SocketExecuter is required for all litesockets Connections, and in general only 1 should be needed per process. """ def __init__(self, threads=5, scheduler=None, forcePlatform=None): """ Constructs a new SocketExecuter `threads` used to set the number of threads used when creating a Scheduler when no Scheduler is provided. `scheduler` this scheduler will be used with the SocketExecuters client/server callbacks. `forcePlatform` this sets the detected platform, this can be used to switch the selector object made. """ self.__log = logging.getLogger("root.litesockets.SocketExecuter:{}".format(id(self))) self.__clients = dict() self.__servers = dict() self.__internalExec = None if scheduler == None: self.__executor = Scheduler(threads) self.__internalExec = self.__executor else: self.__executor = scheduler self.__stats = Stats() if forcePlatform == None: forcePlatform = platform.system() if forcePlatform.lower().find("linux") > -1: self.__selector = EpollSelector(self.__clientRead, self.__clientWrite, self.__serverAccept, self.__socketerrors) else: self.__selector = SelectSelector(self.__clientRead, self.__clientWrite, self.__serverAccept, self.__socketerrors) self.__running = True def getScheduler(self): """ Returns the scheduler that is set for this SocketExecuter. """ return self.__executor def stop(self): """ Stops the SocketExecuter, this will close all clients/servers created from it. """ self.__log.info("Shutting Down!") self.__selector.stop() self.__running = False if self.__internalExec != None: self.__internalExec.shutdown_now() for i in list(self.__clients.values()): self.__log.debug("Closing Client:{}".format(i)) noExcept(i.close) for i in list(self.__servers.values()): self.__log.debug("Closing Server:{}".format(i)) noExcept(i.close) def isRunning(self): """ Returns True if the SocketExecuter is running or False if it was shutdown. """ return self.__running def updateClientOperations(self, client, disable=False): """ This is called to detect what operations to check the client for. This will decide if the client need to check to writes and/or reads and then takes the appropriate actions. `client` the client to check for operations on. `disable` if this is set to True it will force the client to be removed from both reads and writes. """ if not isinstance(client, Client): return FN = client.getFileDesc() if FN in self.__clients: if client.isClosed(): self.__selector.removeReader(FN) self.__selector.removeWriter(FN) del self.__clients[FN] return if client.getReadBufferSize() >= client.MAXBUFFER or disable: self.__selector.removeReader(FN) else: self.__selector.addReader(FN) if client.getWriteBufferSize() == 0 or disable: self.__selector.removeWriter(FN) else: self.__selector.addWriter(FN) def createUDPServer(self, host, port): """ Returns a UDPServer `host` the host or IP address open the listen port on. `port` the port to open up. """ us = UDPServer(host, port, self) FN = us.getFileDesc() self.__clients[FN] = us us.addCloseListener(self.__closeClient) return us def createTCPClient(self, host, port, use_socket = None): """ Returns a TCPClient `host` the host or IP to connect the client to. `port` the port on that host to connect to. """ c = TCPClient(host, port, self, use_socket=use_socket) self.__clients[c.getFileDesc()] = c c.addCloseListener(self.__closeClient) return c def createTCPServer(self, host, port): """ Returns a TCPServer `host` the host or IP address open the listen port on. `port` the port to open up. """ s = TCPServer(self, host, port) self.__servers[s.getFileDesc()] = s s.addCloseListener(self.__closeServer) return s def getClients(self): """ Returns a list of all the Clients still open and associated with this SocketExecuter. """ return list(self.__clients.values()) def getServers(self): """ Returns a list of all Servers still open and associated with this SocketExecuter. """ return list(self.__servers.values()) def getStats(self): return self.__stats def startServer(self, server): """ Generally this is not called except through Server.start() you can do that manually if wanted. `server` the server to start listening on. """ if isinstance(server, Server) and server.getFileDesc() in self.__servers: self.__selector.addServer(server.getFileDesc()) self.__log.info("Started New Server:{}".format(server)) def stopServer(self, server): """ Generally this is not called except through Server.stop() you can do that manually if wanted. `server` the server to start listening on. """ if isinstance(server, Server) and server.getFileDesc() in self.__servers: self.__selector.removeServer(server.getFileDesc()) def __socketerrors(self, fileno): if fileno in self.__clients: self.__clientErrors(self.__clients[fileno], fileno) elif fileno in self.__servers: self.__serverErrors(self.__servers[fileno], fileno) def __serverAccept(self, fileno): if fileno not in self.__servers: self.__selector.removeServer(fileno) return SERVER = self.__servers[fileno] try: conn, addr = SERVER.getSocket().accept() SERVER.addClient(conn) except: pass def __clientRead(self, fileno): if fileno not in self.__clients: self.__selector.removeReader(fileno) return read_client = self.__clients[fileno] data_read = 0 data = "" try: if read_client._getType() == "CUSTOM": data = read_client.READER() if data != EMPTY_STRING: read_client._addRead(data) data_read += len(data) elif read_client._getType() == "TCP":#.getSocket().type == socket.SOCK_STREAM: data = read_client.getSocket().recv(655360) if data != EMPTY_STRING: read_client._addRead(data) data_read += len(data) else: self.__clientErrors(read_client, fileno) elif read_client._getType() == "UDP": for i in range(100): data = EMPTY_STRING try: data, addr = read_client.getSocket().recvfrom(65536) except socket.error as e: if e.args[0] != errno.EWOULDBLOCK: raise e if data != EMPTY_STRING: read_client.runOnClientThread(read_client._addRead, args=([addr, data],)) data_read+=len(data) else: break self.__stats._addRead(data_read) return len(data) except ssl.SSLError as err: pass except KeyError as e: self.__log.debug("client removed on read") except IOError as e: if e.errno != errno.EAGAIN and e.errno != errno.EBADF: self.__log.error("Read Error 2: %s"%(sys.exc_info()[0])) self.__log.error(e) except Exception as e: self.__log.error("Read Error: %s"%(sys.exc_info()[0])) self.__log.error(e) self.__log.error(errno.EAGAIN) return 0 def __clientWrite(self, fileno): if fileno not in self.__clients: self.__selector.removeWriter(fileno) return CLIENT = self.__clients[fileno] l = 0 try: if CLIENT._getType() == "UDP": d = CLIENT._getWrite() l = CLIENT.getSocket().sendto(d[1], d[0]) CLIENT._reduceWrite(l) elif CLIENT._getType() == "TCP": w = CLIENT._getWrite() try: l = CLIENT.getSocket().send(w) except ssl.SSLEOFError as e: self.__log.error("SSLError closing client!") CLIENT.close() CLIENT._reduceWrite(l) elif self.__clients[fileno].TYPE == "CUSTOM": l = self.__clients[fileno].WRITER() self.__stats._addWrite(l) except Exception as e: self.__log.debug("clientWrite Error: %s"%(sys.exc_info()[0])) logging.exception("") self.__log.debug(e) def __serverErrors(self, server, fileno): self.__log.debug("Removing Server %d "%(fileno)) self.__selector.removeServer(fileno) server.close() def __clientErrors(self, client, fileno): self.__log.debug("Removing client %d "%(fileno)) self.__selector.removeReader(fileno) self.__selector.removeWriter(fileno) client.close() def __closeClient(self, client): client.close() def __closeServer(self, server): server.close() del self.__servers[server.getFileDesc()]
lwahlmeier/python-litesockets
litesockets/socketexecuter.py
Python
unlicense
21,816
0.018702
import unittest from mock import Mock from biicode.common.model.content import Content from biicode.common.model.content import ContentDeserializer from biicode.common.model.content import content_diff from biicode.common.exception import BiiSerializationException from biicode.common.model.id import ID class ContentTest(unittest.TestCase): def test_deserialize_exception(self): self.assertRaises(BiiSerializationException, ContentDeserializer(ID((0, 0, 0))).deserialize, "wrong object") self.assertIsNone(ContentDeserializer(ID).deserialize(None)) def test_content_diff(self): content_load1 = Mock() content_load2 = Mock() content_load1.is_binary = Mock(return_value=True) self.assertEquals(content_diff(content_load1, content_load2), "Unable to diff binary contents of base") content_load1.is_binary = Mock(return_value=False) content_load2.is_binary = Mock(return_value=True) self.assertEquals(content_diff(content_load1, content_load2), "Unable to diff binary contents of base") def test_content_similarity(self): content = Content(ID((0, 0, 0)), load=None) self.assertEquals(content.similarity(content), 1)
zhangf911/common
test/model/content_test.py
Python
mit
1,322
0
# coding=utf-8 # Licensed Materials - Property of IBM # Copyright IBM Corp. 2016 import os import unittest import sys import itertools from streamsx.topology.topology import * from streamsx.topology.tester import Tester from streamsx.topology import schema import streamsx.topology.context import streamsx.spl.op as op import streamsx.spl.toolkit import streamsx.scripts.extract import spl_tests_utils as stu class TestPrimitives(unittest.TestCase): """ Test @spl.primitive_operator decorated operators """ _multiprocess_can_split_ = True @classmethod def setUpClass(cls): """Extract Python operators in toolkit""" stu._extract_tk('testtkpy') def setUp(self): Tester.setup_distributed(self) def _get_metric(self, name): job = self.tester.submission_result.job ops = job.get_operators() self.assertEqual(1, len(ops)) ms = ops[0].get_metrics(name=name) self.assertEqual(1, len(ms)) m = ms[0] self.assertEqual(name, m.name) return m def _noports_check(self): job = self.tester.submission_result.job ops = job.get_operators() self.assertEqual(1, len(ops)) ms = ops[0].get_metrics(name='NP_mymetric') self.assertEqual(1, len(ms)) m = ms[0] self.assertEqual('NP_mymetric', m.name) self.assertEqual(89, m.value) def test_noports(self): """Operator with no inputs or outputs""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) bop = op.Invoke(topo, "com.ibm.streamsx.topology.pytest.pyprimitives::NoPorts", params = {'mn': 'mymetric', 'iv':89}) self.tester = Tester(topo) self.tester.local_check = self._noports_check self.tester.test(self.test_ctxtype, self.test_config) def _single_input_port_check(self): job = self.tester.submission_result.job top = None for op in job.get_operators(): if op.name == 'SIP_OP': top = op break self.assertIsNot(None, top) ms = top.get_metrics(name='SIP_METRIC') self.assertEqual(1, len(ms)) m = ms[0] self.assertEqual('SIP_METRIC', m.name) import time for i in range(10): if m.value == 1060: break time.sleep(1.0) m = top.get_metrics(name='SIP_METRIC')[0] self.assertEqual(1060, m.value) def test_single_input_port(self): """Operator with one input port""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s = topo.source([1043]) s = s.map(lambda x : (x,), schema='tuple<uint64 v>') bop = op.Sink("com.ibm.streamsx.topology.pytest.pyprimitives::SingleInputPort", s, name="SIP_OP") self.tester = Tester(topo) self.tester.local_check = self._single_input_port_check self.tester.test(self.test_ctxtype, self.test_config) def _multi_input_port_check(self): job = self.tester.submission_result.job top = None for op in job.get_operators(): if op.name == 'MIP_OP': top = op break self.assertIsNot(None, top) ms = top.get_metrics(name='MIP_METRIC_0') self.assertEqual(1, len(ms)) m0 = ms[0] self.assertEqual('MIP_METRIC_0', m0.name) ms = top.get_metrics(name='MIP_METRIC_1') self.assertEqual(1, len(ms)) m1 = ms[0] self.assertEqual('MIP_METRIC_1', m1.name) ms = top.get_metrics(name='MIP_METRIC_2') self.assertEqual(1, len(ms)) m2 = ms[0] self.assertEqual('MIP_METRIC_2', m2.name) import time for i in range(10): if m0.value == 9081 and m1.value == 379 and m2.value == -899: break time.sleep(1.0) m0 = top.get_metrics(name='MIP_METRIC_0')[0] m1 = top.get_metrics(name='MIP_METRIC_1')[0] m2 = top.get_metrics(name='MIP_METRIC_2')[0] self.assertEqual(9054 + 17, m0.value) self.assertEqual(345 + 34, m1.value) self.assertEqual(-953 + 51, m2.value) def test_multi_input_ports(self): """Operator with three input ports""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s0 = topo.source([9054]).map(lambda x : (x,), schema='tuple<uint64 v>') s1 = topo.source([345]).map(lambda x : (x,), schema='tuple<int64 v>') s2 = topo.source([-953]).map(lambda x : (x,), schema='tuple<int32 v>') bop = op.Invoke(topo, "com.ibm.streamsx.topology.pytest.pyprimitives::MultiInputPort", [s0,s1,s2], name="MIP_OP") self.tester = Tester(topo) self.tester.local_check = self._multi_input_port_check self.tester.test(self.test_ctxtype, self.test_config) # With output ports it's easier to test thus can use standalone. # class TestPrimitivesOutputs(unittest.TestCase): _multiprocess_can_split_ = True @classmethod def setUpClass(cls): """Extract Python operators in toolkit""" stu._extract_tk('testtkpy') def setUp(self): Tester.setup_standalone(self) def test_single_output_port(self): """Operator with single output port.""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s = topo.source([9237, -24]) s = s.map(lambda x : (x,), schema='tuple<int64 v>') bop = op.Map("com.ibm.streamsx.topology.pytest.pyprimitives::SingleOutputPort", s) r = bop.stream self.tester = Tester(topo) self.tester.tuple_count(s, 2) self.tester.contents(s, [{'v':9237}, {'v':-24}]) self.tester.test(self.test_ctxtype, self.test_config) def test_multi_output_ports(self): """Operator with multiple output port.""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s = topo.source([9237, -24]) s = s.map(lambda x : (x,), schema='tuple<int64 v>') bop = op.Invoke(topo, "com.ibm.streamsx.topology.pytest.pyprimitives::MultiOutputPorts", s, schemas=['tuple<int64 v1>', 'tuple<int32 v2>', 'tuple<int16 v3>']) r = bop.outputs self.tester = Tester(topo) self.tester.tuple_count(s, 2) self.tester.contents(r[0], [{'v1':9237}, {'v1':-24}]) self.tester.contents(r[1], [{'v2':9237+921}, {'v2':-24+921}]) self.tester.contents(r[2], [{'v3':9237-407}, {'v3':-24-407}]) self.tester.test(self.test_ctxtype, self.test_config) def test_dict_output_ports(self): """Operator with multiple output port submitting dict objects.""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s = topo.source([9237, -24]) s = s.map(lambda x : (x,x*2,x+4), schema='tuple<int64 d, int64 e, int64 f>') bop = op.Invoke(topo, "com.ibm.streamsx.topology.pytest.pyprimitives::DictOutputPorts", s, schemas=['tuple<int64 d, int64 e, int64 f>']*2) r = bop.outputs self.tester = Tester(topo) self.tester.tuple_count(r[0], 2) self.tester.tuple_count(r[1], 4) self.tester.contents(r[0], [{'d':9237, 'e':(9237*2), 'f':9237+4}, {'d':-24, 'e':(-24*2), 'f':-24+4}]) self.tester.contents(r[1], [{'d':9237+7, 'f':(9237*2)+777, 'e':9237+4+77}, {'d':9237, 'e':(9237*2), 'f':9237+4}, {'d':-24+7, 'f':(-24*2)+777, 'e':-24+4+77}, {'d':-24, 'e':(-24*2), 'f':-24+4}]) self.tester.test(self.test_ctxtype, self.test_config) def test_input_by_position(self): """Operator with input by position""" topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) s = topo.source([3642, -393]) s = s.map(lambda x : (x,x*2,x+4), schema='tuple<int64 d, int64 e, int64 f>') bop = op.Map("com.ibm.streamsx.topology.pytest.pyprimitives::InputByPosition", s) r = bop.stream self.tester = Tester(topo) self.tester.tuple_count(r, 2) self.tester.contents(r, [{'d':3642, 'e':(3642*2)+89, 'f':-92}, {'d':-393, 'e':(-393*2)+89, 'f':-92}]) self.tester.test(self.test_ctxtype, self.test_config) def test_only_output_port(self): """Operator with single output port and no inputs.""" count = 106 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) bop = op.Source(topo, "com.ibm.streamsx.topology.pytest.pyprimitives::OutputOnly", schema='tuple<int64 c>', params={'count':count}) r = bop.stream self.tester = Tester(topo) self.tester.tuple_count(r, count) self.tester.contents(r, list({'c':i+501} for i in range(count))) self.tester.test(self.test_ctxtype, self.test_config)
ddebrunner/streamsx.topology
test/python/spl/tests/test_splpy_primitives.py
Python
apache-2.0
8,994
0.008117
from json_serializable import JSONSerializable from irc_connection import IRCConnection from models import Conversation, Message from observable import ObservableList, SimpleObservable class Session(SimpleObservable, JSONSerializable): _json_attrs = ["conversations", "users"] def __init__(self): self.connections = {} #ObservableDict() self.users = ObservableList() self.conversations = ObservableList() self.conversation_lookup = {} def conversation_key(self, connection, name): return "%s@%s" % (name, connection) def get_conversation(self, connection, name): return self.conversation_lookup.get(self.conversation_key(connection, name)) def new_conversation(self, connection, name): conv = Conversation(name, connection, {}) conv.index = len(self.conversations) self.conversations.append(conv) self.conversation_lookup[self.conversation_key(connection, name)] = conv self.last_key = self.conversation_key(connection, name) return conv def join_conversation(self, connection, name): connection.join(name) def remove_conversation(self, connection, name): for i in range(len(self.conversations)): c = self.conversations[i] print c if c.name == name and c.connection == connection: print "DELETED" del self.conversation_lookup[self.conversation_key(connection, name)] del self.conversations[i] break def leave_conversation(self, connection, name): print "PART %s" % name return connection.part([name]) def user_joined_conversation(self, connection, username, chatroom): self.get_conversation(connection, chatroom).users.append(username) def user_left_conversation(self, connection, username, chatroom): try: self.get_conversation(connection, chatroom).users.remove(username) except: print "Failed to remove %s from %s" % (username, self.get_conversation(connection, chatroom)) def recv_message(self, connection, username, message, chatroom=None): if chatroom: conv_name = chatroom else: conv_name = username conv = self.get_conversation(connection, conv_name) if not conv: conv = self.create_conversation(connection, conv_name) conv.recv_message(Message(None, username, message, conv)) def conversation_by_id(self, conv_id): matches = [c for c in self.conversations if c.id == conv_id] if matches: return matches[0] else: return None def send_message(self, conversation_id, message): conv = self.conversation_by_id(conversation_id) conv.send_message(Message(None, "me", message, conv)) # TODO: Support IRC ['ACTION', 'looks around'] def start(self): irc = IRCConnection(self, [("irc.freenode.org", 6667)], "python_phone", "python_phone") self.connections["irc"] = irc print "Connecting..." irc.start()
uniite/pyirc
models/session.py
Python
mit
3,127
0.003518
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # PowerDNS Recursor documentation build configuration file, created by # sphinx-quickstart on Wed Jun 28 14:56:44 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os import glob # import sys # sys.path.insert(0, os.path.abspath('.')) import guzzle_sphinx_theme import datetime # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. #extensions = [] #extensions = ['redjack.sphinx.lua', 'sphinxcontrib.httpdomain', 'sphinxjsondomain'] extensions = ['sphinxcontrib.openapi', 'sphinxcontrib.fulltoc', 'changelog'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'indexTOC' # General information about the project. project = 'PowerDNS Authoritative Server' copyright = '2001-' + str(datetime.date.today().year) + ', PowerDNS.COM BV' author = 'PowerDNS.COM BV' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '4.2' # The full version, including alpha/beta/rc tags. #release = '4.1.1-pre' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.venv', 'security-advisories/security-policy.rst', 'common/secpoll.rst', 'common/api/*'] # The name of the Pygments (syntax highlighting) style to use. highlight_language = 'none' pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Changelog Options ---------------------------------------------------- changelog_render_ticket = "https://github.com/PowerDNS/pdns/issues/%s" changelog_render_pullreq = "https://github.com/PowerDNS/pdns/pull/%s" changelog_render_changeset = "https://github.com/PowerDNS/pdns/commit/%s" changelog_sections = ['New Features', 'Removed Features', 'Improvements', 'Bug Fixes'] changelog_inner_tag_sort = ['Internals', 'API', 'Tools', 'ALIAS', 'DNSUpdate', 'BIND', 'MySQL', 'Postgresql', 'LDAP', 'GeoIP', 'Remote'] changelog_hide_tags_in_entry = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = 'guzzle_sphinx_theme' extensions.append("guzzle_sphinx_theme") html_theme_options = { # Set the name of the project to appear in the sidebar "project_nav_name": "PowerDNS Authoritative Server", } html_favicon = 'common/favicon.ico' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_style = 'pdns.css' # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'PowerDNSAuthoritativedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { 'maxlistdepth' : '8', # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'a4paper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PowerDNS-Authoritative.tex', 'PowerDNS Authoritative Server Documentation', 'PowerDNS.COM BV', 'manual'), ] latex_logo = 'common/powerdns-logo-500px.png' # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). descriptions = { 'calidns': 'A DNS recursor testing tool', 'dnsbulktest': 'A debugging tool for intermittent resolver failures', 'dnsgram': 'A debugging tool for intermittent resolver failures', 'dnspcap2calidns': 'A tool to convert PCAPs of DNS traffic to calidns input', 'dnspcap2protobuf': 'A tool to convert PCAPs of DNS traffic to PowerDNS Protobuf', 'dnsreplay': 'A PowerDNS nameserver debugging tool', 'dnsscan': 'List the amount of queries per qtype in a pcap', 'dnsscope': 'A PowerDNS nameserver debugging tool', 'dnstcpbench': 'tool to perform TCP benchmarking of nameservers', 'dnswasher': 'A PowerDNS nameserver debugging tool', 'dumresp': 'A dumb DNS responder', 'ixfrdist': 'An IXFR/AXFR-only server that re-distributes zones', 'ixplore': 'A tool that provides insights into IXFRs', 'nproxy': 'DNS notification proxy', 'nsec3dig': 'Show and validate NSEC3 proofs', 'pdns_control': 'Control the PowerDNS nameserver', 'pdns_notify': 'A simple DNS NOTIFY sender', 'pdns_server': 'The PowerDNS Authoritative Nameserver', 'pdnsutil': 'PowerDNS record and DNSSEC command and control', 'saxfr': 'Perform AXFRs and show information about it', 'sdig': 'Perform a DNS query and show the results', 'zone2json': 'convert BIND zones to JSON', 'zone2ldap': 'convert zonefiles to ldif', 'zone2sql': 'convert BIND zones to SQL', } man_pages = [] for f in glob.glob('manpages/*.1.rst'): srcname = '.'.join(f.split('.')[:-1]) destname = srcname.split('/')[-1][:-2] man_pages.append((srcname, destname, descriptions.get(destname, ''), [author], 1)) man_pages.append(('manpages/ixfrdist.yml.5', 'ixfrdist.yml', 'The ixfrdist configuration file', [author], 5)) # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) #texinfo_documents = [ # (master_doc, 'PowerDNSRecursor', 'PowerDNS Recursor Documentation', # author, 'PowerDNSRecursor', 'One line description of project.', # 'Miscellaneous'), #] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html']
Habbie/pdns
docs/conf.py
Python
gpl-2.0
8,446
0.001658
import time from roboclaw import Roboclaw #Windows comport name rc = Roboclaw("COM11",115200) #Linux comport name #rc = Roboclaw("/dev/ttyACM0",115200) rc.Open() address = 0x80 while(1): rc.ForwardM1(address,32) #1/4 power forward rc.BackwardM2(address,32) #1/4 power backward time.sleep(2) rc.BackwardM1(address,32) #1/4 power backward rc.ForwardM2(address,32) #1/4 power forward time.sleep(2) rc.BackwardM1(address,0) #Stopped rc.ForwardM2(address,0) #Stopped time.sleep(2) m1duty = 16 m2duty = -16 rc.ForwardBackwardM1(address,64+m1duty) #1/4 power forward rc.ForwardBackwardM2(address,64+m2duty) #1/4 power backward time.sleep(2) m1duty = -16 m2duty = 16 rc.ForwardBackwardM1(address,64+m1duty) #1/4 power backward rc.ForwardBackwardM2(address,64+m2duty) #1/4 power forward time.sleep(2) rc.ForwardBackwardM1(address,64) #Stopped rc.ForwardBackwardM2(address,64) #Stopped time.sleep(2)
hopkira/k9-chess-angular
python/roboclaw_simplepwm.py
Python
unlicense
963
0.070613
try: from setuptools import setup except ImportError: from distutils.core import setup setup( name='morphounit', version='1.0.4', author='Shailesh Appukuttan, Pedro Garcia-Rodriguez', author_email='shailesh.appukuttan@cnrs.fr, pedro.garcia@cnrs.fr', packages=['morphounit', 'morphounit.capabilities', 'morphounit.tests', 'morphounit.tests.morph_cells', 'morphounit.tests.morph_circuits', 'morphounit.scores', 'morphounit.plots'], url='https://github.com/appukuttan-shailesh/morphounit', keywords = ['morphology', 'structure', 'circuit', 'testing', 'validation framework'], license='BSD 3-Clause', description='A SciUnit library for data-driven testing of neuronal morphologies.', long_description="", install_requires=['neo', 'elephant','sciunit>=0.1.5.2', 'neurom==1.4.10', 'tabulate', 'seaborn==0.9.0'], dependency_links = ['git+http://github.com/neuralensemble/python-neo.git#egg=neo-0.4.0dev', 'https://github.com/scidash/sciunit/tarball/dev'] )
pedroernesto/morphounit
setup.py
Python
bsd-3-clause
1,115
0.008072
# ENVISIoN # # Copyright (c) 2019-2021 Jesper Ericsson, Gabriel Anderberg, Didrik Axén, # Adam Engman, Kristoffer Gubberud Maras, Joakim Stenborg # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################################## # Alterations to this file by Gabriel Anderberg, Didrik Axén, # Adam Engman, Kristoffer Gubberud Maras, Joakim Stenborg # # To the extent possible under law, the person who associated CC0 with # the alterations to this file has waived all copyright and related # or neighboring rights to the alterations made to this file. # # You should have received a copy of the CC0 legalcode along with # this work. If not, see # <http://creativecommons.org/publicdomain/zero/1.0/>. import sys,os,inspect # path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # sys.path.insert(0, os.path.expanduser(path_to_current_folder)) import inviwopy import numpy as np import h5py from .NetworkHandler import NetworkHandler from envisionpy.utils.exceptions import * class LinePlotNetworkHandler(NetworkHandler): """ Handler class for charge visualization network. Sets up and manages the charge visualization """ def __init__(self, inviwoApp): NetworkHandler.__init__(self, inviwoApp) self.setup_plot_network() def get_ui_data(self): return [ "lineplot", self.get_x_range(), self.get_y_range(), self.get_line_enabled(), self.get_line_x(), self.get_grid_enabled(), self.get_grid_width(), self.get_x_labels_enabled(), self.get_y_labels_enabled(), self.get_label_n(), self.get_y_selection_info(), self.get_available_datasets() ] # ------------------------------------------ # ------- Property functions ------- def toggle_graph_canvas(self, enable): # Will add or remove the slice canvas try: graphCanvas = self.get_processor('graphCanvas') except ProcessorNotFoundError: graphCanvas = None # If already in correct mode dont do anything if (graphCanvas and enable) or (not graphCanvas and not enable): return if enable: graphCanvas = self.add_processor('org.inviwo.CanvasGL', 'graphCanvas', 25*7, 525) graphCanvas.inputSize.dimensions.value = inviwopy.glm.ivec2(500, 500) self.network.addConnection(self.get_processor("Title text").getOutport('outport'), graphCanvas.getInport('inport')) else: self.remove_processor('graphCanvas') def show(self): try: self.get_processor('graphCanvas').widget.show() except: pass def hide(self): try: self.get_processor('graphCanvas').widget.hide() except: pass def set_y_selection_type(self, option): # Set the type for date selection for Y datasets # 0: single dataset. 1: multiple datasets. 2: all datasets plotter = self.get_processor("Line plot") plotter.boolYSelection.value = (option == 1) plotter.allYSelection.value = (option == 2) def set_y_single_selection_index(self, index): plotter = self.get_processor("Line plot") plotter.ySelectionProperty.selectedIndex = index def set_y_single_selection_string(self, name): plotter = self.get_processor("Line plot") plotter.ySelectionProperty.value = name def set_y_multi_selection(self, selection): plotter = self.get_processor("Line plot") plotter.groupYSelection_.value = selection def set_title(self, title): title_text = self.get_processor("Title text") title_text.text.value = title def set_title_font(self): pass def set_title_font_size(self): pass def set_title_color(self): pass def set_x_range(self, xMax, xMin): plotter = self.get_processor("Line plot") plotter.x_range.value = inviwopy.glm.vec2(xMin, xMax) def set_y_range(self, xMax, xMin): plotter = self.get_processor("Line plot") plotter.y_range.value = inviwopy.glm.vec2(xMin, xMax) def toggle_vertical_line(self, enable): plotter = self.get_processor("Line plot") plotter.enable_line.value = enable def set_vertical_line_x(self, xPos): plotter = self.get_processor("Line plot") plotter.line_x_coordinate.value = xPos def toggle_grid(self, enable): plotter = self.get_processor("Line plot") plotter.enable_grid.value = enable def set_grid_size(self, width): plotter = self.get_processor("Line plot") plotter.grid_width.value = width def toggle_x_label(self, enable): plotter = self.get_processor("Line plot") plotter.show_x_labels.value = enable def toggle_y_label(self, enable): plotter = self.get_processor("Line plot") plotter.show_y_labels.value = enable def set_n_labels(self, n): plotter = self.get_processor("Line plot") plotter.label_number.value = n # ------------------------------------------------ # -------- Value getting functions for UI -------- # ------------------------------------------------ def get_dataset_list(self): Plotter = self.get_processor("Line plot") return Plotter.ySelectionProperty.identifiers def get_x_range(self): value = self.get_processor("Line plot").x_range.value return [value[0], value[1]] def get_y_range(self): value = self.get_processor("Line plot").y_range.value return [value[0], value[1]] def get_line_enabled(self): return self.get_processor("Line plot").enable_line.value def get_line_x(self): return self.get_processor("Line plot").line_x_coordinate.value def get_grid_enabled(self): return self.get_processor("Line plot").enable_grid.value def get_grid_width(self): return self.get_processor("Line plot").grid_width.value def get_x_labels_enabled(self): return self.get_processor("Line plot").show_x_labels.value def get_y_labels_enabled(self): return self.get_processor("Line plot").show_y_labels.value def get_label_n(self): return self.get_processor("Line plot").label_number.value def get_y_selection_info(self): plotter = self.get_processor("Line plot") if plotter.allYSelection.value: return [2] if plotter.boolYSelection.value: return [1, plotter.groupYSelection_.value] return [0, plotter.ySelectionProperty.selectedIndex] def get_available_datasets(self): plotter = self.get_processor("Line plot") return plotter.xSelectionProperty.identifiers # def get_grid_width(self): # pass def get_label_count(self): return self.get_processor("Line plot").label_number.value # ------------------------------------------ # ------- Network building functions ------- def setup_plot_network(self, xpos=0, ypos=300): function_to_dataframe = self.add_processor("org.inviwo.FunctionToDataFrame", "Function to dataframe", xpos, ypos) # self.network.addConnection(HDF5_to_function_processor.getOutport("functionVectorOutport"), # function_to_dataframe_processor.getInport("functionFlatMultiInport")) ypos += 75 line_plot = self.add_processor("org.inviwo.LinePlotProcessor", "Line plot", xpos, ypos) self.network.addConnection(function_to_dataframe.getOutport("dataframeOutport"), line_plot.getInport("dataFrameInport")) # if has_fermi_energy: # self.network.addConnection(fermi_point_processor.getOutport("pointVectorOutport"), # line_plot_processor.getInport("pointInport")) ypos += 75 mesh_renderer = self.add_processor("org.inviwo.Mesh2DRenderProcessorGL", "Mesh renderer", xpos, ypos) self.network.addConnection(line_plot.getOutport("outport"), mesh_renderer.getInport("inputMesh")) self.network.addConnection(line_plot.getOutport("labels"), mesh_renderer.getInport("imageInport")) ypos += 75 background = self.add_processor("org.inviwo.Background", "Background", xpos, ypos) self.network.addConnection(mesh_renderer.getOutport("outputImage"), background.getInport("inport")) ypos += 75 title_text = self.add_processor("org.inviwo.TextOverlayGL", "Title text", xpos, ypos) self.network.addConnection(background.getOutport('outport'), title_text.getInport('inport')) # if has_fermi_energy: # energy_text_processor.text.value = 'Energy - Fermi energy [eV]' # else: # energy_text_processor.text.value = 'Energy [eV]' title_text.font.fontSize.value = 20 # plotter_processor.font.anchor.value = inviwopy.glm.vec2(-1, -0.9234) title_text.position.value = inviwopy.glm.vec2(0.31, 0.93) title_text.color.value = inviwopy.glm.vec4(0,0,0,1) ypos += 75 canvas = self.add_processor("org.inviwo.CanvasGL", "graphCanvas", xpos, ypos) self.network.addConnection(title_text.getOutport('outport'), canvas.getInport('inport')) # Start modifying properties. # path_selection_processor.selection.value = '/Bandstructure/Bands' # HDF5_to_function_processor.yPathSelectionProperty.value = '/Energy' # line_plot_processor.allYSelection.value = True background.bgColor1.value = inviwopy.glm.vec4(1) background.bgColor2.value = inviwopy.glm.vec4(1) canvas.inputSize.dimensions.value = inviwopy.glm.size2_t(900, 700) canvas.widget.show() # if has_fermi_energy: # fermi_point_processor.pathSelectionProperty.value = '/FermiEnergy'
rartino/ENVISIoN
envisionpy/processor_network/LinePlotNetworkHandler.py
Python
bsd-2-clause
11,390
0.003337
import re from xcsoar.mapgen.waypoints.waypoint import Waypoint from xcsoar.mapgen.waypoints.list import WaypointList def __parse_line(line, bounds = None): if line.startswith('$'): return None lat = line[45:52] lat_neg = lat.startswith('S') lat = float(lat[1:3]) + float(lat[3:5]) / 60. + float(lat[5:7]) / 3600. if lat_neg: lat = -lat if bounds and (lat > bounds.top or lat < bounds.bottom): return None lon = line[52:60] lon_neg = lon.startswith('W') lon = float(lon[1:4]) + float(lon[4:6]) / 60. + float(lon[6:8]) / 3600. if lon_neg: lon = -lon if bounds and (lon > bounds.right or lon < bounds.left): return None wp = Waypoint() wp.lat = lat wp.lon = lon elev = line[41:45].strip() if elev != '': wp.altitude = float(elev) else: wp.altitude = 0.0 wp.short_name = line[:6] if wp.short_name.endswith('1'): wp.type = 'airport' elif wp.short_name.endswith('2'): wp.type = 'outlanding' wp.short_name = wp.short_name.strip() wp.name = line[7:41].strip() if 'GLD' in wp.name: wp.type = 'glider_site' if 'ULM' in wp.name: wp.type = 'ulm' pos = -1 if '#' in wp.name: pos = wp.name.find('#') if '*' in wp.name: pos = wp.name.find('*') if pos > -1: data = wp.name[pos + 1:] wp.name = wp.name[:pos].strip() icao = data[:4] if not icao.startswith('GLD') and not icao.startswith('ULM'): wp.icao = icao if data[4:5] == 'A': wp.surface = 'asphalt' elif data[4:5] == 'C': wp.surface = 'concrete' elif data[4:5] == 'L': wp.surface = 'loam' elif data[4:5] == 'S': wp.surface = 'sand' elif data[4:5] == 'Y': wp.surface = 'clay' elif data[4:5] == 'G': wp.surface = 'gras' elif data[4:5] == 'V': wp.surface = 'gravel' elif data[4:5] == 'D': wp.surface = 'dirt' runway_len = data[5:8].strip() if runway_len != '': wp.runway_len = int(runway_len) * 10 runway_dir = data[8:10].strip() if runway_dir != '': wp.runway_dir = int(runway_dir) * 10 freq = data[12:17].strip() if len(freq) == 5: if freq.endswith('2') or freq.endswith('7'): freq += '5' else: freq += '0' wp.freq = float(freq) / 1000. if wp.name.endswith('GLD'): wp.name = wp.name[:-3].strip() else: wp.name = wp.name.rstrip('!?1 ') if re.search('(^|\s)BERG($|\s)', wp.name): wp.type = 'mountain top' if re.search('(^|\s)COL($|\s)', wp.name): wp.type = 'mountain pass' if re.search('(^|\s)PASS($|\s)', wp.name): wp.type = 'mountain pass' if re.search('(^|\s)TOP($|\s)', wp.name): wp.type = 'mountain top' if re.search('(\s)A(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit' if re.search('(\s)AB(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit' if re.search('(\s)BAB(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit' if re.search('(\s)(\w){0,3}XA(\d){0,3}($|\s)', wp.name): wp.type = 'highway cross' if re.search('(\s)(\w){0,3}YA(\d){0,3}($|\s)', wp.name): wp.type = 'highway junction' if re.search('(\s)STR($|\s)', wp.name): wp.type = 'road' if re.search('(\s)SX($|\s)', wp.name): wp.type = 'road cross' if re.search('(\s)SY($|\s)', wp.name): wp.type = 'road junction' if re.search('(\s)EX($|\s)', wp.name): wp.type = 'railway cross' if re.search('(\s)EY($|\s)', wp.name): wp.type = 'railway junction' if re.search('(\s)TR($|\s)', wp.name): wp.type = 'gas station' if re.search('(\s)BF($|\s)', wp.name): wp.type = 'railway station' if re.search('(\s)RS($|\s)', wp.name): wp.type = 'railway station' if re.search('(\s)BR($|\s)', wp.name): wp.type = 'bridge' if re.search('(\s)TV($|\s)', wp.name): wp.type = 'tower' if re.search('(\s)KW($|\s)', wp.name): wp.type = 'powerplant' wp.name = wp.name.title() while ' ' in wp.name: wp.name = wp.name.replace(' ', ' ') wp.country_code = line[60:62].strip(); return wp def parse_welt2000_waypoints(lines, bounds = None): waypoint_list = WaypointList() for line in lines: wp = __parse_line(line, bounds) if wp: waypoint_list.append(wp) return waypoint_list
TobiasLohner/mapgen
lib/xcsoar/mapgen/waypoints/welt2000_reader.py
Python
gpl-2.0
4,373
0.028813
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUUnderlay(NURESTObject): """ Represents a Underlay in the VSD Notes: Underlays identify the transport networks that provide connectivity between NSGs, e.g. the Internet or a carrier MPLS VPN. The modelling of an underlay is required when using multiple disjoint underlays that are interconnected via one or more NSG-UBRs. The underlay object is used at the NSG Control uplink and at the NSG-UBR Underlay uplink. If no underlays are defined a default underlay is used. """ __rest_name__ = "underlay" __resource_name__ = "underlays" ## Constants CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): """ Initializes a Underlay instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> underlay = NUUnderlay(id=u'xxxx-xxx-xxx-xxx', name=u'Underlay') >>> underlay = NUUnderlay(data=my_dict) """ super(NUUnderlay, self).__init__() # Read/Write Attributes self._name = None self._last_updated_by = None self._last_updated_date = None self._description = None self._embedded_metadata = None self._underlay_id = None self._entity_scope = None self._creation_date = None self._owner = None self._external_id = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="underlay_id", remote_name="underlayId", attribute_type=int, is_required=False, is_unique=True) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def name(self): """ Get name value. Notes: Name of the underlay """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of the underlay """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def description(self): """ Get description value. Notes: Description of the underlay """ return self._description @description.setter def description(self, value): """ Set description value. Notes: Description of the underlay """ self._description = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def underlay_id(self): """ Get underlay_id value. Notes: Used to identify and make a distinction between different Underlays with an autogenerated integer. This attribute is named `underlayId` in VSD API. """ return self._underlay_id @underlay_id.setter def underlay_id(self, value): """ Set underlay_id value. Notes: Used to identify and make a distinction between different Underlays with an autogenerated integer. This attribute is named `underlayId` in VSD API. """ self._underlay_id = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
nuagenetworks/vspk-python
vspk/v6/nuunderlay.py
Python
bsd-3-clause
11,766
0.009009