text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
from pts.core.tools import filesystem as fs
from pts.core.basics.plot import mpl, plotting_libraries, pdf, plotting_formats
# -----------------------------------------------------------------
# Create configuration definition
definition = ConfigurationDefinition()
# Different plotting features
definition.add_flag("instruments", "plot a comparison between the SEDs of the different instruments", True)
definition.add_flag("contributions", "plot the various contributions to the SEDS", True)
# The output directory
definition.add_optional("output", "directory_path", "output directory", fs.cwd())
# The unit in which to plot
definition.add_optional("wavelength_unit", "length_unit", "unit of wavelength", "micron", convert_default=True)
definition.add_optional("unit", "photometric_unit", "photometric unit", "Jy", convert_default=True)
# -----------------------------------------------------------------
# The plotting format
definition.add_optional("format", "string", "plotting format", pdf, plotting_formats)
# The plotting library to use
definition.add_optional("library", "string", "plotting library", mpl, plotting_libraries)
# -----------------------------------------------------------------
# Reference SEDS
definition.add_optional("reference_seds", "filepath_list", "paths of reference SEDs")
# Ignore these filters
definition.add_optional("ignore_filters", "filter_list", "ignore these filters from the observed SEDs")
# -----------------------------------------------------------------
|
SKIRT/PTS
|
core/config/plot_simulation_seds.py
|
Python
|
agpl-3.0
| 1,942 | 0.004637 |
# -*- coding:utf-8 -*-
def decode(data):
try:
value, idx = __decode(data, 0)
retval = (True, value)
except Exception as e:
retval = (False, e.message)
finally:
return retval
def encode(data):
try:
value = __encode(data)
retval = (True, value)
except Exception, e:
retval = (False, e.message)
finally:
return retval
# 内部函数
# 解析bencode数据
def __decode(data, start_idx):
if data[start_idx] == 'i':
value, start_idx = __decode_int(data, start_idx + 1)
elif data[start_idx].isdigit():
value, start_idx = __decode_str(data, start_idx)
elif data[start_idx] == 'l':
value, start_idx = __decode_list(data, start_idx + 1)
elif data[start_idx] == 'd':
value, start_idx = __decode_dict(data, start_idx + 1)
else:
raise ValueError('__decode: not in i, l, d')
return value, start_idx
# 解析整数
def __decode_int(data, start_idx):
end_idx = data.index('e', start_idx)
try:
value = int(data[start_idx: end_idx])
except Exception:
raise Exception('__decode_int: error')
return value, end_idx + 1
# 解析字符串
def __decode_str(data, start_idx):
try:
end_idx = data.index(':', start_idx)
str_len = int(data[start_idx: end_idx])
start_idx = end_idx + 1
end_idx = start_idx + str_len
value = data[start_idx: end_idx]
except Exception:
raise Exception('__decode_str: error')
return value, end_idx
# 解析列表
def __decode_list(data, start_idx):
values = []
while data[start_idx] != 'e':
value, start_idx = __decode(data, start_idx)
values.append(value)
return values, start_idx + 1
# 解析字典
def __decode_dict(data, start_idx):
dict_value = dict()
while data[start_idx] != 'e':
key, start_idx = __decode(data, start_idx)
value, start_idx = __decode(data, start_idx)
dict_value[key] = value
return dict_value, start_idx + 1
# 数据编码
def __encode(data):
if isinstance(data, int):
value = __encode_int(data)
elif isinstance(data, str):
value = __encode_str(data)
elif isinstance(data, dict):
value = __encode_dict(data)
elif isinstance(data, list):
value = __encode_list(data)
else:
raise Exception('__encode: Error')
return value
# 数字编码
def __encode_int(data):
return 'i' + str(data) + 'e'
# 字符串编码
def __encode_str(data):
str_len = len(data)
return str(str_len) + ':' + data
# 列表编码
def __encode_list(data):
ret = 'l'
for datai in data:
ret += __encode(datai)
return ret + 'e'
# 字典编码
def __encode_dict(data):
ret = 'd'
for key, value in data.items():
ret += __encode(key)
ret += __encode(value)
return ret + 'e'
|
fupenglin/PyDHT
|
dht_bencode.py
|
Python
|
gpl-2.0
| 2,915 | 0 |
import platform
class OSCollector(object):
def __init__(self, docker_client=None):
self.docker_client = docker_client
def key_name(self):
return "osInfo"
def _zip_fields_values(self, keys, values):
data = {}
for key, value in zip(keys, values):
if len(value) > 0:
data[key] = value
else:
data[key] = None
return data
def _get_docker_version(self):
data = {}
if platform.system() == 'Linux':
version = "Unknown"
if self.docker_client:
ver_resp = self.docker_client.version()
version = "Docker version {0}, build {1}".format(
ver_resp.get("Version", "Unknown"),
ver_resp.get("GitCommit", "Unknown"))
data['dockerVersion'] = version
return data
def _get_os(self):
data = {}
if platform.system() == 'Linux':
info = platform.linux_distribution()
keys = ["distribution", "version", "versionDescription"]
data = self._zip_fields_values(keys, info)
data['kernelVersion'] = \
platform.release() if len(platform.release()) > 0 else None
return data
def get_data(self):
data = self._get_os()
data.update(self._get_docker_version())
return data
|
dx9/python-agent
|
cattle/plugins/host_info/os_c.py
|
Python
|
apache-2.0
| 1,411 | 0 |
# This file is part of LibreOsteo.
#
# LibreOsteo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreOsteo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreOsteo. If not, see <http://www.gnu.org/licenses/>.
"""
Django settings for LibreOsteo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os, sys, logging
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
if getattr(sys, 'frozen', False):
logger = logging.getLogger(__name__)
logger.info("Frozen with attribute value %s" %
(getattr(sys, 'frozen', False)))
logger.info("Real path of the start : %s " % (os.path.realpath(__file__)))
SITE_ROOT = os.path.split(
os.path.split(
os.path.split(os.path.dirname(
os.path.realpath(__file__)))[0])[0])[0]
logger.info("SITE_ROOT = %s" % SITE_ROOT)
if (getattr(sys, 'frozen', False)):
SITE_ROOT = os.path.split(SITE_ROOT)[0]
DATA_FOLDER = SITE_ROOT
if (getattr(sys, 'frozen', False) == 'macosx_app'):
DATA_FOLDER = os.path.join(
os.path.join(os.path.join(os.environ['HOME'], 'Library'),
'Application Support'), 'Libreosteo')
SITE_ROOT = os.path.join(os.path.split(SITE_ROOT)[0], 'Resources')
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
else:
SITE_ROOT = BASE_DIR
DATA_FOLDER = os.path.join(SITE_ROOT, "data")
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
from django.utils.translation import ugettext_lazy as _
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8xmh#fjyiamw^-_ro9m29^6^81^kc!aiczp)gvb#7with$dzb6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
LOCALE_PATHS = ('locale', os.path.join(SITE_ROOT, 'django', 'conf', 'locale'),
os.path.join(SITE_ROOT, 'locale'))
APPEND_SLASH = False
DEMONSTRATION = False
COMPRESS_ENABLED = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles', 'libreosteoweb',
'django_filters', 'rest_framework', 'compressor', 'zipcode_lookup',
'protected_media', 'haystack', 'statici18n'
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'libreosteoweb.middleware.OneSessionPerUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'libreosteoweb.middleware.LoginRequiredMiddleware',
'libreosteoweb.middleware.OfficeSettingsMiddleware',
]
ROOT_URLCONF = 'Libreosteo.urls'
WSGI_APPLICATION = 'Libreosteo.wsgi.application'
STATIC_ROOT = os.path.join(SITE_ROOT, "static/")
MEDIA_ROOT = os.path.join(DATA_FOLDER, "media/")
TEMPLATES = [
{
'BACKEND':
'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(SITE_ROOT, 'templates'),
os.path.join(SITE_ROOT, 'static'),
],
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'Libreosteo.zip_loader.Loader',
]
},
},
]
TEMPLATE_ZIP_FILES = ('library.zip', )
# Additional locations of static files
#STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# os.path.join(SITE_ROOT, 'static'),
# )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_FOLDER, 'db.sqlite3'),
#'ATOMIC_REQUESTS' : True,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'fr'
LANGUAGES = (
('fr', _('French')),
('en', _('English')),
)
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/files/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication', ),
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.ModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES':
['rest_framework.permissions.IsAuthenticated'],
'DEFAULT_FILTER_BACKENDS':
('django_filters.rest_framework.DjangoFilterBackend', ),
'TEST_REQUEST_DEFAULT_FORMAT':
'json',
}
LOGIN_URL = 'accounts/login'
LOGIN_URL_NAME = 'login'
LOGOUT_URL_NAME = 'logout'
LOGIN_REDIRECT_URL = '/'
INITIALIZE_ADMIN_URL_NAME = 'install'
NO_REROUTE_PATTERN_URL = [
r'^accounts/create-admin/$', r'^internal/restore', r'^jsi18n',
r'^web-view/partials/restore', r'^web-view/partials/register'
]
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format':
'%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': False,
},
'django.server': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'libreosteoweb': {
'handlers': ['console'],
'level': 'INFO',
},
'libreosteoweb.api': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(DATA_FOLDER, 'whoosh_index'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.rCSSMinFilter'
]
DISPLAY_SERVICE_NET_HELPER = True
PROTECTED_MEDIA_ROOT = os.path.join(DATA_FOLDER, "media")
PROTECTED_MEDIA_URL = "/files"
PROTECTED_MEDIA_LOCATION_PREFIX = "/internal" # Prefix used in nginx config
PROTECTED_MEDIA_AS_DOWNLOADS = False # Controls inclusion of a Content-Disposition header
|
libreosteo/Libreosteo
|
Libreosteo/settings/base.py
|
Python
|
gpl-3.0
| 9,670 | 0.000827 |
import google
import re
from bs4 import BeautifulSoup
def findContactPage(url):
html = google.get_page(url)
soup = BeautifulSoup(html)
contactStr = soup.find_all('a', href=re.compile(".*?contact", re.IGNORECASE))
return contactStr
if __name__ == "__main__":
url = "http://www.wrangler.com/"
contactStr = findContactPage(url)
if(len(contactStr) > 0):
contactPage = google.get_page(contactStr[0].get("href"))
print contactStr[0].get("href")#.find_parents("a")
soup = BeautifulSoup(contactPage)
emailStr = soup.find_all(text=re.compile("[\w\.-]+@[\w\.-]+"))
if(len(emailStr) > 0) :
print addressStr
else:
print "could not find email"
else:
print "could not find contacts page"
|
LeoYReyes/GoogleSearchAutomator
|
Crawler.py
|
Python
|
bsd-3-clause
| 789 | 0.011407 |
"""projectash URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from ash import views
admin.site.site_header = 'System Administrator'
admin.site.site_title = 'site admin'
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.user_login, name='login'),
url(r'^home', views.home, name='home'),
url(r'^register', views.createAccount, name='registration'),
url(r'^logout', views.user_logout, name='logout'),
url(r'^expense', views.expense, name='expense'),
url(r'^income/', views.income, name='income'),
url(r'^incomes/', views.totalIncome, name='incomes'),
url(r'^contact', views.contact, name='contact'),
url(r'^creditors', views.creditors, name='creditors'),
url(r'^debtors', views.debtors, name='debtors'),
url(r'^calendar', views.calendar, name='calendar'),
url(r'^tasks', views.addTask, name='task'),
url(r'^debit/(?P<uid>\d+)/$', views.debit, name='debit'),
url(r'^clear/(?P<uid>\d+)/$', views.clear_debit, name='debit'),
url(r'^viewDebt/(?P<uid>\d+)/$', views.view_debit, name='debit'),
url(r'^payDebt/(?P<pid>\d+)/$', views.pay_debt, name='payment'),
]
|
Ashaba/jash
|
projectash/urls.py
|
Python
|
bsd-2-clause
| 1,803 | 0.000555 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, overload, Union, List
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._operations._operations import build_get_answers_from_text_request, build_get_answers_request
from ..._patch import (
_validate_text_records,
_get_positional_body,
_verify_qna_id_and_question,
_handle_metadata_filter_conversion,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QuestionAnsweringClientOperationsMixin:
@overload
async def get_answers(
self, options: "_models.AnswersOptions", *, project_name: str, deployment_name: str, **kwargs: Any
) -> "_models.AnswersResult":
...
@overload
async def get_answers(
self,
*,
project_name: str,
deployment_name: str,
qna_id: Optional[int] = None,
question: Optional[str] = None,
top: Optional[int] = None,
user_id: Optional[str] = None,
confidence_threshold: Optional[float] = None,
answer_context: Optional["_models.KnowledgeBaseAnswerContext"] = None,
ranker_kind: Optional[str] = None,
filters: Optional["_models.QueryFilters"] = None,
short_answer_options: Optional["_models.ShortAnswerOptions"] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs: Any
) -> "_models.AnswersResult":
...
@distributed_trace_async
async def get_answers(self, *args, **kwargs) -> "_models.AnswersResult":
"""Answers the specified question using your knowledge base.
:param options: Positional only. POST body of the request. Either provide this
value or individual keyword arguments.
:type options: ~azure.ai.language.questionanswering.models.AnswersOptions
:keyword project_name: The name of the knowledge base project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:paramtype qna_id: int
:keyword question: User question to query against the knowledge base.
:paramtype question: str
:keyword top: Max number of answers to be returned for the question.
:paramtype top: int
:keyword user_id: Unique identifier for the user.
:paramtype user_id: str
:keyword confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.
:paramtype confidence_threshold: float
:keyword answer_context: Context object with previous QnA's information.
:paramtype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext
:keyword ranker_kind: Type of ranker to be used. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_kind: str
:keyword filters: Filter QnAs based on given metadata list and knowledge base sources.
:paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword short_answer_options: To configure Answer span prediction feature.
:paramtype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
Sources.
:paramtype include_unstructured_sources: bool
:return: AnswersResult
:rtype: ~azure.ai.language.questionanswering.models.AnswersResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
options = _get_positional_body(*args, **kwargs) or _models.AnswersOptions(
qna_id=kwargs.pop("qna_id", None),
question=kwargs.pop("question", None),
top=kwargs.pop("top", None),
user_id=kwargs.pop("user_id", None),
confidence_threshold=kwargs.pop("confidence_threshold", None),
answer_context=kwargs.pop("answer_context", None),
ranker_kind=kwargs.pop("ranker_kind", None),
filters=kwargs.pop("filters", None),
short_answer_options=kwargs.pop("short_answer_options", None),
include_unstructured_sources=kwargs.pop("include_unstructured_sources", None),
)
_verify_qna_id_and_question(options)
options = _handle_metadata_filter_conversion(options)
cls = kwargs.pop("cls", None) # type: ClsType["_models.AnswersResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
project_name = kwargs.pop("project_name") # type: str
deployment_name = kwargs.pop("deployment_name") # type: str
json = self._serialize.body(options, "AnswersOptions")
request = build_get_answers_request(
content_type=content_type,
project_name=project_name,
deployment_name=deployment_name,
json=json,
template_url=self.get_answers.metadata["url"],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("AnswersResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_answers.metadata = {"url": "/:query-knowledgebases"} # type: ignore
@overload
async def get_answers_from_text(
self, options: "_models.AnswersFromTextOptions", **kwargs: Any
) -> "_models.AnswersFromTextResult":
...
@overload
async def get_answers_from_text(
self,
*,
question: str,
text_documents: List[Union[str, "_models.TextDocument"]],
language: Optional[str] = None,
**kwargs: Any
) -> "_models.AnswersFromTextResult":
...
@distributed_trace_async
async def get_answers_from_text(self, *args, **kwargs) -> "_models.AnswersFromTextResult":
"""Answers the specified question using the provided text in the body.
:param options: Positional only. POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:type options: ~azure.ai.language.questionanswering.models.AnswersFromTextOptions
:keyword question: User question to query against the given text records.
:paramtype question: str
:keyword text_documents: Text records to be searched for given question.
:paramtype text_documents: list[str or ~azure.ai.language.questionanswering.models.TextDocument]
:keyword language: Language of the text records. This is BCP-47 representation of a language.
For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:paramtype language: str
:return: AnswersFromTextResult
:rtype: ~azure.ai.language.questionanswering.models.AnswersFromTextResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
options = _get_positional_body(*args, **kwargs) or _models.AnswersFromTextOptions(
question=kwargs.pop("question"),
text_documents=kwargs.pop("text_documents"),
language=kwargs.pop("language", self._default_language),
)
try:
options["records"] = _validate_text_records(options["records"])
except TypeError:
options.text_documents = _validate_text_records(options.text_documents)
cls = kwargs.pop("cls", None) # type: ClsType["_models.AnswersFromTextResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
json = self._serialize.body(options, "AnswersFromTextOptions")
request = build_get_answers_from_text_request(
content_type=content_type,
json=json,
template_url=self.get_answers_from_text.metadata["url"],
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("AnswersFromTextResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_answers_from_text.metadata = {"url": "/:query-text"} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_operations/_operations.py
|
Python
|
mit
| 10,950 | 0.004384 |
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import abc
import uuid
import os.path
import logging
import itertools
from collections import OrderedDict
from diffoscope.config import Config
from diffoscope.difference import Difference
from diffoscope.excludes import filter_excludes
from diffoscope.progress import Progress
from ..missing_file import MissingFile
from .file import path_apparent_size
from .fuzzy import perform_fuzzy_matching
NO_COMMENT = None
logger = logging.getLogger(__name__)
class Container(object, metaclass=abc.ABCMeta):
auto_diff_metadata = True
def __new__(cls, source):
if isinstance(source, MissingFile):
new = super(Container, MissingContainer).__new__(MissingContainer)
new.__init__(source)
return new
return super(Container, cls).__new__(cls)
def __init__(self, source):
self._source = source
# Keep a count of how "nested" we are
self.depth = 0
if hasattr(source, 'container') and source.container is not None:
self.depth = source.container.depth + 1
@property
def source(self):
return self._source
@abc.abstractmethod
def get_member_names(self):
raise NotImplementedError()
@abc.abstractmethod
def get_member(self, member_name):
raise NotImplementedError()
def get_path_name(self, dest_dir):
return os.path.join(dest_dir, str(uuid.uuid4()))
def get_filtered_members(self):
# If your get_member implementation is O(n) then this will be O(n^2)
# cost. In such cases it is HIGHLY RECOMMENDED to override this as well
for name in filter_excludes(self.get_member_names()):
yield name, self.get_member(name)
def perform_fuzzy_matching(self, my_members, other_members):
return perform_fuzzy_matching(my_members, other_members)
def get_adjusted_members(self):
"""
Returns an iterable of pairs. The key is what is used to match when
comparing containers. This may be used to e.g. strip off version
numbers, hashes, etc, efficiently for known file formats, so that we
don't need to use the expensive tlsh "fuzzy-hashing" logic.
Note that containers with 1 element are already force-compared against
other containers with 1 element, so you don't need to override this
method for those cases.
"""
return self.get_filtered_members()
def lookup_file(self, *names):
"""
Try to fetch a specific file by digging in containers.
"""
from .specialize import specialize
name, remainings = names[0], names[1:]
try:
file = self.get_member(name)
except KeyError:
return None
logger.debug("lookup_file(%s) -> %s", names, file)
specialize(file)
if not remainings:
return file
container = file.as_container
if not container:
return None
return container.lookup_file(*remainings)
def get_adjusted_members_sizes(self):
for name, member in self.get_adjusted_members():
if member.is_directory():
size = 4096 # default "size" of a directory
else:
size = path_apparent_size(member.path)
yield name, (member, size)
def comparisons(self, other):
my_members = OrderedDict(self.get_adjusted_members_sizes())
other_members = OrderedDict(other.get_adjusted_members_sizes())
total_size = sum(x[1] for x in itertools.chain(my_members.values(), other_members.values()))
# TODO: progress could be a bit more accurate here, give more weight to fuzzy-hashed files
# TODO: merge DirectoryContainer.comparisons() into this
with Progress(total_size) as p:
def prep_yield(my_name, other_name, comment=NO_COMMENT):
my_member, my_size = my_members.pop(my_name)
other_member, other_size = other_members.pop(other_name)
p.begin_step(my_size + other_size, msg=my_member.progress_name)
return my_member, other_member, comment
# if both containers contain 1 element, compare these
if len(my_members) == 1 and len(other_members) == 1:
yield prep_yield(next(iter(my_members.keys())),
next(iter(other_members.keys())))
return
other_names = set(other_members.keys())
# keep it sorted like my_members
both_names = [name for name in my_members.keys() if name in other_names]
for name in both_names:
yield prep_yield(name, name)
for my_name, other_name, score in self.perform_fuzzy_matching(my_members, other_members):
comment = "Files similar despite different names" \
" (difference score: {})".format(score)
yield prep_yield(my_name, other_name, comment)
if Config().new_file:
for my_member, my_size in my_members.values():
p.begin_step(my_size, msg=my_member.progress_name)
yield my_member, MissingFile('/dev/null', my_member), NO_COMMENT
for other_member, other_size in other_members.values():
p.begin_step(other_size, msg=other_member.progress_name)
yield MissingFile('/dev/null', other_member), other_member, NO_COMMENT
def compare(self, other, source=None, no_recurse=False):
from .compare import compare_files
def compare_pair(file1, file2, comment):
difference = compare_files(file1, file2, source=None, diff_content_only=no_recurse)
if comment:
if difference is None:
difference = Difference(None, file1.name, file2.name)
difference.add_comment(comment)
return difference
return filter(None, itertools.starmap(compare_pair, self.comparisons(other)))
class MissingContainer(Container):
def get_member_names(self):
return self.source.other_file.as_container.get_member_names()
def get_member(self, member_name):
return MissingFile('/dev/null')
|
ReproducibleBuilds/diffoscope
|
diffoscope/comparators/utils/container.py
|
Python
|
gpl-3.0
| 7,045 | 0.001136 |
# encoding:utf8
import numpy as np
import cv2
import base64
import beesion
import time
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
frame_processing_ratio = 4
frame_count = 0
cap = cv2.VideoCapture(1)
#cap2 = cv2.VideoCapture(1)
while(True):
_, frame = cap.read()
#_, frame2 = cap2.read()
if frame_count%frame_processing_ratio:
frame = cv2.resize(frame, (0,0), fx=0.33, fy=0.33)
if cv2.waitKey(1) & 0xFF == ord('c'):
_, img_png = cv2.imencode('.png', frame)
beesion.detect_text_front(img_png.tobytes())
_, img_png = cv2.imencode('.png', frame)
#faces = beesion.detect_faces(img_jpg.tobytes()) #google's
faces = beesion.detect_faces_offline(frame)# offline
if len(faces) == 1:
known_faces = beesion.load_known_faces()
cv2.imshow('frame',frame)
croped_faces = list()
face = faces[0]
y,x,h,w = face
frame = cv2.rectangle(frame, (x,y),(w,h),(0,255,0),2)
croped_faces.append(frame[y:h, w:x])
verification_result = beesion.verify_known_faces(known_faces, croped_faces[0])
logging.info(verification_result)
if verification_result and True in verification_result:
frame = cv2.putText(frame, 'Acceso permitido', (20,200),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 4)
while(cv2.waitKey(1) & 0xFF != ord('q')):
cv2.imshow('frame',frame)
elif verification_result and False in verification_result:
frame = cv2.putText(frame, "Acceso denegado", (20,200),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
else:
frame = cv2.putText(frame, "Por favor, no se mueva", (20,200),cv2.FONT_HERSHEY_SIMPLEX, 2, (255,0,0), 2)
cv2.imshow('frame',frame)
# cv2.imshow('frame2',frame2)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
frame_count+=1
if frame_count>4:
frame_count = 0 # avoid inifinite number
|
beeva-albertorincon/beeva-poc-google-ocr-faces
|
code/demo.py
|
Python
|
apache-2.0
| 2,248 | 0.018238 |
if __name__ == '__main__':
n = int(input())
for i in range(n):
print(i**2)
|
manishbisht/Competitive-Programming
|
Hackerrank/Practice/Python/1.introduction/05.Loops.py
|
Python
|
mit
| 107 | 0.009346 |
__author__ = 'roscoe'
import os
from datetime import datetime
import qgis.utils
import qgis.utils
from src.geogigpy import Repository
from src.geogigpy import geogig
from src.geogigpy.geogigexception import GeoGigException
from qgis.core import QgsVectorLayer, QgsMapLayerRegistry
class GeoRepo(object):
def __init__(self, remote, path, repo_type):
"""constructor"""
self.repo_type = repo_type
self.remote = remote
self.path = path
self.sql_database = os.path.join(self.path, 'database.sqlite')
self.local_repo = self.connect2repo()
self.root_path = os.path.normpath(__file__)
def connect2repo(self):
if os.path.isdir(os.path.join(self.path, '.geogig')):
print "Set to existing repo"
local_repo = Repository(self.path)
return local_repo
else:
if self.repo_type=="remote":
local_repo = Repository.newrepofromclone(self.remote, self.path)
print "New repo from clone"
else:
local_repo = Repository(self.path, init=True)
print "New repo initialized at : %s" % self.path
return local_repo
def export_to_shapefiles(self):
for t in self.local_repo.trees:
if t.path not in ("layer_statistics", "views_layer_statistics", "virts_layer_statistics"):
self.local_repo.exportshp('HEAD', t.path, os.path.join('HEAD', t.path,
os.path.join(self.path, t.path) + '.shp'))
# layer = qgis.utils.iface.addVectorLayer(os.path.join(self.path, t.path) + '.shp', t.path, "ogr")
vl = QgsVectorLayer("Point", "temporary_points", "memory")
print layer.geometryType()
pr = vl.dataProvider()
layer = qgis.utils.iface.addVectorLayer(os.path.join(self.path, t.path) + '.shp', t.path, "ogr")
# layers = QgsMapLayerRegistry.instance().mapLayers()
# for name, layer in layers.iteritems():
# print 'name: ' + str(name), 'layer type: ' + str(layer.geometryType())
my_dir = self.path
print 'deleting %s' % my_dir
for fname in os.listdir(my_dir):
if fname.startswith(t.path):
os.remove(os.path.join(my_dir, fname))
def import_all_shapefiles(self):
for f in os.listdir(self.path):
if f.endswith(".shp"):
shp_path = os.path.join(self.path, f)
self.local_repo.importshp(shp_path)
def add_commit_push(self, name, email, message):
message += " " + str(datetime.now())
self.local_repo.config(geogig.USER_NAME, name)
self.local_repo.config(geogig.USER_EMAIL, email)
try:
self.import_all_shapefiles()
except GeoGigException, e:
print 'Error with import_from_spatialite()'
try:
self.local_repo.addandcommit(message)
print 'Repo added and committed.'
except GeoGigException, e:
print e
def push_to_remote(self):
try:
self.local_repo.push("origin","master",True)
print 'Repo pushed.'
except GeoGigException, e:
print e
def pull_from_remote(self):
try:
self.local_repo.pull("origin")
except GeoGigException, e:
print e
# Notes:
# ------------------------------------------------------------------------
# changed self.connector.importpg to self.connector.importsl in repo.py
# changed commands.extend(["--table", table]) to commands.extend(["--all"])
# def importsl(self, database, table, add = False, dest = None):
# self.connector.importsl(database, table, add, dest)
# GeoGig can only import spatialite tables that have been created by
# export db from Geogig.
|
roscoeZA/GeoGigSync
|
geo_repo.py
|
Python
|
cc0-1.0
| 4,025 | 0.003727 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.tensorboard_service import (
TensorboardServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.tensorboard_service import (
TensorboardServiceClient,
)
from google.cloud.aiplatform_v1.services.tensorboard_service import pagers
from google.cloud.aiplatform_v1.services.tensorboard_service import transports
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import tensorboard
from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard
from google.cloud.aiplatform_v1.types import tensorboard_data
from google.cloud.aiplatform_v1.types import tensorboard_experiment
from google.cloud.aiplatform_v1.types import (
tensorboard_experiment as gca_tensorboard_experiment,
)
from google.cloud.aiplatform_v1.types import tensorboard_run
from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run
from google.cloud.aiplatform_v1.types import tensorboard_service
from google.cloud.aiplatform_v1.types import tensorboard_time_series
from google.cloud.aiplatform_v1.types import (
tensorboard_time_series as gca_tensorboard_time_series,
)
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None
assert (
TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TensorboardServiceClient, TensorboardServiceAsyncClient,]
)
def test_tensorboard_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TensorboardServiceGrpcTransport, "grpc"),
(transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tensorboard_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [TensorboardServiceClient, TensorboardServiceAsyncClient,]
)
def test_tensorboard_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_tensorboard_service_client_get_transport_class():
transport = TensorboardServiceClient.get_transport_class()
available_transports = [
transports.TensorboardServiceGrpcTransport,
]
assert transport in available_transports
transport = TensorboardServiceClient.get_transport_class("grpc")
assert transport == transports.TensorboardServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TensorboardServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceClient),
)
@mock.patch.object(
TensorboardServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceAsyncClient),
)
def test_tensorboard_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TensorboardServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TensorboardServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
TensorboardServiceClient,
transports.TensorboardServiceGrpcTransport,
"grpc",
"true",
),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
TensorboardServiceClient,
transports.TensorboardServiceGrpcTransport,
"grpc",
"false",
),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TensorboardServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceClient),
)
@mock.patch.object(
TensorboardServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tensorboard_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [TensorboardServiceClient, TensorboardServiceAsyncClient]
)
@mock.patch.object(
TensorboardServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceClient),
)
@mock.patch.object(
TensorboardServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TensorboardServiceAsyncClient),
)
def test_tensorboard_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_tensorboard_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TensorboardServiceClient,
transports.TensorboardServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tensorboard_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_tensorboard_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TensorboardServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TensorboardServiceClient,
transports.TensorboardServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tensorboard_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardRequest, dict,]
)
def test_create_tensorboard(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
client.create_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRequest()
@pytest.mark.asyncio
async def test_create_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.CreateTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_tensorboard_async_from_dict():
await test_create_tensorboard_async(request_type=dict)
def test_create_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard(
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
def test_create_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard(
tensorboard_service.CreateTensorboardRequest(),
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard(
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tensorboard(
tensorboard_service.CreateTensorboardRequest(),
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.GetTensorboardRequest, dict,]
)
def test_get_tensorboard(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard(
name="name_value",
display_name="display_name_value",
description="description_value",
blob_storage_path_prefix="blob_storage_path_prefix_value",
run_count=989,
etag="etag_value",
)
response = client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard.Tensorboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value"
assert response.run_count == 989
assert response.etag == "etag_value"
def test_get_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
client.get_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
@pytest.mark.asyncio
async def test_get_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.GetTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard(
name="name_value",
display_name="display_name_value",
description="description_value",
blob_storage_path_prefix="blob_storage_path_prefix_value",
run_count=989,
etag="etag_value",
)
)
response = await client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard.Tensorboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value"
assert response.run_count == 989
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_tensorboard_async_from_dict():
await test_get_tensorboard_async(request_type=dict)
def test_get_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
call.return_value = tensorboard.Tensorboard()
client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard()
)
await client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tensorboard(
tensorboard_service.GetTensorboardRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tensorboard(
tensorboard_service.GetTensorboardRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.UpdateTensorboardRequest, dict,]
)
def test_update_tensorboard(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
client.update_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
@pytest.mark.asyncio
async def test_update_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.UpdateTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_tensorboard_async_from_dict():
await test_update_tensorboard_async(request_type=dict)
def test_update_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRequest()
request.tensorboard.name = "tensorboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRequest()
request.tensorboard.name = "tensorboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[
"metadata"
]
def test_update_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tensorboard(
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tensorboard(
tensorboard_service.UpdateTensorboardRequest(),
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tensorboard(
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tensorboard(
tensorboard_service.UpdateTensorboardRequest(),
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ListTensorboardsRequest, dict,]
)
def test_list_tensorboards(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tensorboards_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
client.list_tensorboards()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
@pytest.mark.asyncio
async def test_list_tensorboards_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ListTensorboardsRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tensorboards_async_from_dict():
await test_list_tensorboards_async(request_type=dict)
def test_list_tensorboards_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
call.return_value = tensorboard_service.ListTensorboardsResponse()
client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tensorboards_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardsResponse()
)
await client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tensorboards_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tensorboards(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tensorboards_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tensorboards(
tensorboard_service.ListTensorboardsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tensorboards_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tensorboards(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tensorboards_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tensorboards(
tensorboard_service.ListTensorboardsRequest(), parent="parent_value",
)
def test_list_tensorboards_pager(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardsResponse(
tensorboards=[
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[], next_page_token="def",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tensorboards(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tensorboard.Tensorboard) for i in results)
def test_list_tensorboards_pages(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardsResponse(
tensorboards=[
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[], next_page_token="def",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),],
),
RuntimeError,
)
pages = list(client.list_tensorboards(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tensorboards_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardsResponse(
tensorboards=[
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[], next_page_token="def",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),],
),
RuntimeError,
)
async_pager = await client.list_tensorboards(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tensorboard.Tensorboard) for i in responses)
@pytest.mark.asyncio
async def test_list_tensorboards_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardsResponse(
tensorboards=[
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
tensorboard.Tensorboard(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[], next_page_token="def",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi",
),
tensorboard_service.ListTensorboardsResponse(
tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tensorboards(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [tensorboard_service.DeleteTensorboardRequest, dict,]
)
def test_delete_tensorboard(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
client.delete_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRequest()
@pytest.mark.asyncio
async def test_delete_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.DeleteTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tensorboard_async_from_dict():
await test_delete_tensorboard_async(request_type=dict)
def test_delete_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tensorboard(
tensorboard_service.DeleteTensorboardRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tensorboard(
tensorboard_service.DeleteTensorboardRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardExperimentRequest, dict,]
)
def test_create_tensorboard_experiment(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
response = client.create_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
def test_create_tensorboard_experiment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
client.create_tensorboard_experiment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest()
@pytest.mark.asyncio
async def test_create_tensorboard_experiment_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.CreateTensorboardExperimentRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
)
response = await client.create_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
@pytest.mark.asyncio
async def test_create_tensorboard_experiment_async_from_dict():
await test_create_tensorboard_experiment_async(request_type=dict)
def test_create_tensorboard_experiment_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardExperimentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
client.create_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_experiment_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardExperimentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment()
)
await client.create_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_experiment_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard_experiment(
parent="parent_value",
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_experiment
mock_val = gca_tensorboard_experiment.TensorboardExperiment(name="name_value")
assert arg == mock_val
arg = args[0].tensorboard_experiment_id
mock_val = "tensorboard_experiment_id_value"
assert arg == mock_val
def test_create_tensorboard_experiment_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard_experiment(
tensorboard_service.CreateTensorboardExperimentRequest(),
parent="parent_value",
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
@pytest.mark.asyncio
async def test_create_tensorboard_experiment_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard_experiment(
parent="parent_value",
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_experiment
mock_val = gca_tensorboard_experiment.TensorboardExperiment(name="name_value")
assert arg == mock_val
arg = args[0].tensorboard_experiment_id
mock_val = "tensorboard_experiment_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_experiment_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tensorboard_experiment(
tensorboard_service.CreateTensorboardExperimentRequest(),
parent="parent_value",
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.GetTensorboardExperimentRequest, dict,]
)
def test_get_tensorboard_experiment(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
response = client.get_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
def test_get_tensorboard_experiment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
client.get_tensorboard_experiment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardExperimentRequest()
@pytest.mark.asyncio
async def test_get_tensorboard_experiment_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.GetTensorboardExperimentRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
)
response = await client.get_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
@pytest.mark.asyncio
async def test_get_tensorboard_experiment_async_from_dict():
await test_get_tensorboard_experiment_async(request_type=dict)
def test_get_tensorboard_experiment_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardExperimentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
call.return_value = tensorboard_experiment.TensorboardExperiment()
client.get_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tensorboard_experiment_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardExperimentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_experiment.TensorboardExperiment()
)
await client.get_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tensorboard_experiment_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_experiment.TensorboardExperiment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tensorboard_experiment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tensorboard_experiment_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tensorboard_experiment(
tensorboard_service.GetTensorboardExperimentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tensorboard_experiment_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_experiment.TensorboardExperiment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_experiment.TensorboardExperiment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tensorboard_experiment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tensorboard_experiment_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tensorboard_experiment(
tensorboard_service.GetTensorboardExperimentRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.UpdateTensorboardExperimentRequest, dict,]
)
def test_update_tensorboard_experiment(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
response = client.update_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
def test_update_tensorboard_experiment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
client.update_tensorboard_experiment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest()
@pytest.mark.asyncio
async def test_update_tensorboard_experiment_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.UpdateTensorboardExperimentRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
source="source_value",
)
)
response = await client.update_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.source == "source_value"
@pytest.mark.asyncio
async def test_update_tensorboard_experiment_async_from_dict():
await test_update_tensorboard_experiment_async(request_type=dict)
def test_update_tensorboard_experiment_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardExperimentRequest()
request.tensorboard_experiment.name = "tensorboard_experiment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
client.update_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_experiment.name=tensorboard_experiment.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tensorboard_experiment_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardExperimentRequest()
request.tensorboard_experiment.name = "tensorboard_experiment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment()
)
await client.update_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_experiment.name=tensorboard_experiment.name/value",
) in kw["metadata"]
def test_update_tensorboard_experiment_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tensorboard_experiment(
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_experiment
mock_val = gca_tensorboard_experiment.TensorboardExperiment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tensorboard_experiment_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tensorboard_experiment(
tensorboard_service.UpdateTensorboardExperimentRequest(),
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tensorboard_experiment_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_experiment.TensorboardExperiment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_experiment.TensorboardExperiment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tensorboard_experiment(
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_experiment
mock_val = gca_tensorboard_experiment.TensorboardExperiment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tensorboard_experiment_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tensorboard_experiment(
tensorboard_service.UpdateTensorboardExperimentRequest(),
tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ListTensorboardExperimentsRequest, dict,]
)
def test_list_tensorboard_experiments(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardExperimentsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tensorboard_experiments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardExperimentsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tensorboard_experiments_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
client.list_tensorboard_experiments()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest()
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ListTensorboardExperimentsRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardExperimentsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_tensorboard_experiments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_async_from_dict():
await test_list_tensorboard_experiments_async(request_type=dict)
def test_list_tensorboard_experiments_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardExperimentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
call.return_value = tensorboard_service.ListTensorboardExperimentsResponse()
client.list_tensorboard_experiments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardExperimentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardExperimentsResponse()
)
await client.list_tensorboard_experiments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tensorboard_experiments_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardExperimentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tensorboard_experiments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tensorboard_experiments_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tensorboard_experiments(
tensorboard_service.ListTensorboardExperimentsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardExperimentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardExperimentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tensorboard_experiments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tensorboard_experiments(
tensorboard_service.ListTensorboardExperimentsRequest(),
parent="parent_value",
)
def test_list_tensorboard_experiments_pager(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[], next_page_token="def",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tensorboard_experiments(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, tensorboard_experiment.TensorboardExperiment) for i in results
)
def test_list_tensorboard_experiments_pages(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[], next_page_token="def",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
),
RuntimeError,
)
pages = list(client.list_tensorboard_experiments(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[], next_page_token="def",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
),
RuntimeError,
)
async_pager = await client.list_tensorboard_experiments(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, tensorboard_experiment.TensorboardExperiment)
for i in responses
)
@pytest.mark.asyncio
async def test_list_tensorboard_experiments_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_experiments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[], next_page_token="def",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardExperimentsResponse(
tensorboard_experiments=[
tensorboard_experiment.TensorboardExperiment(),
tensorboard_experiment.TensorboardExperiment(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_tensorboard_experiments(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [tensorboard_service.DeleteTensorboardExperimentRequest, dict,]
)
def test_delete_tensorboard_experiment(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tensorboard_experiment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
client.delete_tensorboard_experiment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest()
@pytest.mark.asyncio
async def test_delete_tensorboard_experiment_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.DeleteTensorboardExperimentRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tensorboard_experiment_async_from_dict():
await test_delete_tensorboard_experiment_async(request_type=dict)
def test_delete_tensorboard_experiment_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardExperimentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tensorboard_experiment_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardExperimentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tensorboard_experiment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tensorboard_experiment_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tensorboard_experiment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tensorboard_experiment_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tensorboard_experiment(
tensorboard_service.DeleteTensorboardExperimentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tensorboard_experiment_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_experiment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tensorboard_experiment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tensorboard_experiment_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tensorboard_experiment(
tensorboard_service.DeleteTensorboardExperimentRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardRunRequest, dict,]
)
def test_create_tensorboard_run(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
response = client.create_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_create_tensorboard_run_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
client.create_tensorboard_run()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRunRequest()
@pytest.mark.asyncio
async def test_create_tensorboard_run_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.CreateTensorboardRunRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.create_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_create_tensorboard_run_async_from_dict():
await test_create_tensorboard_run_async(request_type=dict)
def test_create_tensorboard_run_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRunRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
call.return_value = gca_tensorboard_run.TensorboardRun()
client.create_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_run_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRunRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun()
)
await client.create_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_run_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard_run(
parent="parent_value",
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
tensorboard_run_id="tensorboard_run_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_run
mock_val = gca_tensorboard_run.TensorboardRun(name="name_value")
assert arg == mock_val
arg = args[0].tensorboard_run_id
mock_val = "tensorboard_run_id_value"
assert arg == mock_val
def test_create_tensorboard_run_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard_run(
tensorboard_service.CreateTensorboardRunRequest(),
parent="parent_value",
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
tensorboard_run_id="tensorboard_run_id_value",
)
@pytest.mark.asyncio
async def test_create_tensorboard_run_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard_run(
parent="parent_value",
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
tensorboard_run_id="tensorboard_run_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_run
mock_val = gca_tensorboard_run.TensorboardRun(name="name_value")
assert arg == mock_val
arg = args[0].tensorboard_run_id
mock_val = "tensorboard_run_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_run_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tensorboard_run(
tensorboard_service.CreateTensorboardRunRequest(),
parent="parent_value",
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
tensorboard_run_id="tensorboard_run_id_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.BatchCreateTensorboardRunsRequest, dict,]
)
def test_batch_create_tensorboard_runs(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse()
response = client.batch_create_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse)
def test_batch_create_tensorboard_runs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
client.batch_create_tensorboard_runs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest()
@pytest.mark.asyncio
async def test_batch_create_tensorboard_runs_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.BatchCreateTensorboardRunsRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardRunsResponse()
)
response = await client.batch_create_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse)
@pytest.mark.asyncio
async def test_batch_create_tensorboard_runs_async_from_dict():
await test_batch_create_tensorboard_runs_async(request_type=dict)
def test_batch_create_tensorboard_runs_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchCreateTensorboardRunsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse()
client.batch_create_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_tensorboard_runs_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchCreateTensorboardRunsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardRunsResponse()
)
await client.batch_create_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_create_tensorboard_runs_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_tensorboard_runs(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
]
assert arg == mock_val
def test_batch_create_tensorboard_runs_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_tensorboard_runs(
tensorboard_service.BatchCreateTensorboardRunsRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
],
)
@pytest.mark.asyncio
async def test_batch_create_tensorboard_runs_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardRunsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_tensorboard_runs(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_tensorboard_runs_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_tensorboard_runs(
tensorboard_service.BatchCreateTensorboardRunsRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardRunRequest(parent="parent_value")
],
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.GetTensorboardRunRequest, dict,]
)
def test_get_tensorboard_run(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
response = client.get_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_get_tensorboard_run_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
client.get_tensorboard_run()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRunRequest()
@pytest.mark.asyncio
async def test_get_tensorboard_run_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.GetTensorboardRunRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.get_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_tensorboard_run_async_from_dict():
await test_get_tensorboard_run_async(request_type=dict)
def test_get_tensorboard_run_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRunRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
call.return_value = tensorboard_run.TensorboardRun()
client.get_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tensorboard_run_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRunRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_run.TensorboardRun()
)
await client.get_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tensorboard_run_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_run.TensorboardRun()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tensorboard_run(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tensorboard_run_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tensorboard_run(
tensorboard_service.GetTensorboardRunRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tensorboard_run_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_run.TensorboardRun()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_run.TensorboardRun()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tensorboard_run(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tensorboard_run_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tensorboard_run(
tensorboard_service.GetTensorboardRunRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.UpdateTensorboardRunRequest, dict,]
)
def test_update_tensorboard_run(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
response = client.update_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_update_tensorboard_run_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
client.update_tensorboard_run()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRunRequest()
@pytest.mark.asyncio
async def test_update_tensorboard_run_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.UpdateTensorboardRunRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.update_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_run.TensorboardRun)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_update_tensorboard_run_async_from_dict():
await test_update_tensorboard_run_async(request_type=dict)
def test_update_tensorboard_run_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRunRequest()
request.tensorboard_run.name = "tensorboard_run.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
call.return_value = gca_tensorboard_run.TensorboardRun()
client.update_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_run.name=tensorboard_run.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tensorboard_run_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRunRequest()
request.tensorboard_run.name = "tensorboard_run.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun()
)
await client.update_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_run.name=tensorboard_run.name/value",
) in kw["metadata"]
def test_update_tensorboard_run_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tensorboard_run(
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_run
mock_val = gca_tensorboard_run.TensorboardRun(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tensorboard_run_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tensorboard_run(
tensorboard_service.UpdateTensorboardRunRequest(),
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tensorboard_run_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_run.TensorboardRun()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_run.TensorboardRun()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tensorboard_run(
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_run
mock_val = gca_tensorboard_run.TensorboardRun(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tensorboard_run_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tensorboard_run(
tensorboard_service.UpdateTensorboardRunRequest(),
tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ListTensorboardRunsRequest, dict,]
)
def test_list_tensorboard_runs(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardRunsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardRunsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardRunsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tensorboard_runs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
client.list_tensorboard_runs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardRunsRequest()
@pytest.mark.asyncio
async def test_list_tensorboard_runs_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ListTensorboardRunsRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardRunsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardRunsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardRunsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tensorboard_runs_async_from_dict():
await test_list_tensorboard_runs_async(request_type=dict)
def test_list_tensorboard_runs_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardRunsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
call.return_value = tensorboard_service.ListTensorboardRunsResponse()
client.list_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tensorboard_runs_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardRunsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardRunsResponse()
)
await client.list_tensorboard_runs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tensorboard_runs_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardRunsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tensorboard_runs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tensorboard_runs_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tensorboard_runs(
tensorboard_service.ListTensorboardRunsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tensorboard_runs_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardRunsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardRunsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tensorboard_runs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tensorboard_runs_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tensorboard_runs(
tensorboard_service.ListTensorboardRunsRequest(), parent="parent_value",
)
def test_list_tensorboard_runs_pager(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[], next_page_token="def",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[tensorboard_run.TensorboardRun(),],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tensorboard_runs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tensorboard_run.TensorboardRun) for i in results)
def test_list_tensorboard_runs_pages(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[], next_page_token="def",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[tensorboard_run.TensorboardRun(),],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
),
RuntimeError,
)
pages = list(client.list_tensorboard_runs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tensorboard_runs_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[], next_page_token="def",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[tensorboard_run.TensorboardRun(),],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
),
RuntimeError,
)
async_pager = await client.list_tensorboard_runs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tensorboard_run.TensorboardRun) for i in responses)
@pytest.mark.asyncio
async def test_list_tensorboard_runs_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_runs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[], next_page_token="def",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[tensorboard_run.TensorboardRun(),],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardRunsResponse(
tensorboard_runs=[
tensorboard_run.TensorboardRun(),
tensorboard_run.TensorboardRun(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tensorboard_runs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [tensorboard_service.DeleteTensorboardRunRequest, dict,]
)
def test_delete_tensorboard_run(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tensorboard_run_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
client.delete_tensorboard_run()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRunRequest()
@pytest.mark.asyncio
async def test_delete_tensorboard_run_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.DeleteTensorboardRunRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardRunRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tensorboard_run_async_from_dict():
await test_delete_tensorboard_run_async(request_type=dict)
def test_delete_tensorboard_run_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardRunRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tensorboard_run_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardRunRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tensorboard_run(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tensorboard_run_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tensorboard_run(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tensorboard_run_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tensorboard_run(
tensorboard_service.DeleteTensorboardRunRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tensorboard_run_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_run), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tensorboard_run(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tensorboard_run_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tensorboard_run(
tensorboard_service.DeleteTensorboardRunRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict,]
)
def test_batch_create_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
response = client.batch_create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse
)
def test_batch_create_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
client.batch_create_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
response = await client.batch_create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse
)
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_async_from_dict():
await test_batch_create_tensorboard_time_series_async(request_type=dict)
def test_batch_create_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
client.batch_create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
await client.batch_create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_create_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_tensorboard_time_series(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
]
assert arg == mock_val
def test_batch_create_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_tensorboard_time_series(
tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_tensorboard_time_series(
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].requests
mock_val = [
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_tensorboard_time_series(
tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(),
parent="parent_value",
requests=[
tensorboard_service.CreateTensorboardTimeSeriesRequest(
parent="parent_value"
)
],
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardTimeSeriesRequest, dict,]
)
def test_create_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
response = client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
def test_create_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
client.create_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
)
response = await client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_async_from_dict():
await test_create_tensorboard_time_series_async(request_type=dict)
def test_create_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
await client.create_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard_time_series(
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
def test_create_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard_time_series(
tensorboard_service.CreateTensorboardTimeSeriesRequest(),
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard_time_series(
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tensorboard_time_series(
tensorboard_service.CreateTensorboardTimeSeriesRequest(),
parent="parent_value",
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.GetTensorboardTimeSeriesRequest, dict,]
)
def test_get_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
response = client.get_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
def test_get_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
client.get_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_get_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.GetTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
)
response = await client.get_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
@pytest.mark.asyncio
async def test_get_tensorboard_time_series_async_from_dict():
await test_get_tensorboard_time_series_async(request_type=dict)
def test_get_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
call.return_value = tensorboard_time_series.TensorboardTimeSeries()
client.get_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_time_series.TensorboardTimeSeries()
)
await client.get_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_time_series.TensorboardTimeSeries()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tensorboard_time_series(
tensorboard_service.GetTensorboardTimeSeriesRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_time_series.TensorboardTimeSeries()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_time_series.TensorboardTimeSeries()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tensorboard_time_series(
tensorboard_service.GetTensorboardTimeSeriesRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict,]
)
def test_update_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
response = client.update_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
def test_update_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
client.update_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_update_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value",
display_name="display_name_value",
description="description_value",
value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR,
etag="etag_value",
plugin_name="plugin_name_value",
plugin_data=b"plugin_data_blob",
)
)
response = await client.update_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert (
response.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
)
assert response.etag == "etag_value"
assert response.plugin_name == "plugin_name_value"
assert response.plugin_data == b"plugin_data_blob"
@pytest.mark.asyncio
async def test_update_tensorboard_time_series_async_from_dict():
await test_update_tensorboard_time_series_async(request_type=dict)
def test_update_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardTimeSeriesRequest()
request.tensorboard_time_series.name = "tensorboard_time_series.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
client.update_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series.name=tensorboard_time_series.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardTimeSeriesRequest()
request.tensorboard_time_series.name = "tensorboard_time_series.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
await client.update_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series.name=tensorboard_time_series.name/value",
) in kw["metadata"]
def test_update_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tensorboard_time_series(
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tensorboard_time_series(
tensorboard_service.UpdateTensorboardTimeSeriesRequest(),
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_tensorboard_time_series.TensorboardTimeSeries()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tensorboard_time_series(
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tensorboard_time_series(
tensorboard_service.UpdateTensorboardTimeSeriesRequest(),
tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ListTensorboardTimeSeriesRequest, dict,]
)
def test_list_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse(
next_page_token="next_page_token_value",
)
response = client.list_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardTimeSeriesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
client.list_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ListTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardTimeSeriesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async_from_dict():
await test_list_tensorboard_time_series_async(request_type=dict)
def test_list_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse()
client.list_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardTimeSeriesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardTimeSeriesResponse()
)
await client.list_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tensorboard_time_series(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tensorboard_time_series(
tensorboard_service.ListTensorboardTimeSeriesRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardTimeSeriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tensorboard_time_series(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tensorboard_time_series(
tensorboard_service.ListTensorboardTimeSeriesRequest(),
parent="parent_value",
)
def test_list_tensorboard_time_series_pager(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tensorboard_time_series(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, tensorboard_time_series.TensorboardTimeSeries)
for i in results
)
def test_list_tensorboard_time_series_pages(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
pages = list(client.list_tensorboard_time_series(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
async_pager = await client.list_tensorboard_time_series(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, tensorboard_time_series.TensorboardTimeSeries)
for i in responses
)
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_tensorboard_time_series(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict,]
)
def test_delete_tensorboard_time_series(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
client.delete_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_async_from_dict():
await test_delete_tensorboard_time_series_async(request_type=dict)
def test_delete_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tensorboard_time_series(
tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tensorboard_time_series(
tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict,],
)
def test_batch_read_tensorboard_time_series_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
response = client.batch_read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse
)
def test_batch_read_tensorboard_time_series_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
client.batch_read_tensorboard_time_series_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
)
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
response = await client.batch_read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse
)
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_async_from_dict():
await test_batch_read_tensorboard_time_series_data_async(request_type=dict)
def test_batch_read_tensorboard_time_series_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
request.tensorboard = "tensorboard/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = (
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
client.batch_read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard=tensorboard/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
request.tensorboard = "tensorboard/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
await client.batch_read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard=tensorboard/value",) in kw["metadata"]
def test_batch_read_tensorboard_time_series_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_read_tensorboard_time_series_data(tensorboard="tensorboard_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = "tensorboard_value"
assert arg == mock_val
def test_batch_read_tensorboard_time_series_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_read_tensorboard_time_series_data(
tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(),
tensorboard="tensorboard_value",
)
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_read_tensorboard_time_series_data(
tensorboard="tensorboard_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = "tensorboard_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_read_tensorboard_time_series_data(
tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(),
tensorboard="tensorboard_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict,]
)
def test_read_tensorboard_time_series_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
response = client.read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse
)
def test_read_tensorboard_time_series_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
client.read_tensorboard_time_series_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest()
@pytest.mark.asyncio
async def test_read_tensorboard_time_series_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
)
response = await client.read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse
)
@pytest.mark.asyncio
async def test_read_tensorboard_time_series_data_async_from_dict():
await test_read_tensorboard_time_series_data_async(request_type=dict)
def test_read_tensorboard_time_series_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest()
request.tensorboard_time_series = "tensorboard_time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
client.read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series=tensorboard_time_series/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_tensorboard_time_series_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest()
request.tensorboard_time_series = "tensorboard_time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
)
await client.read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series=tensorboard_time_series/value",
) in kw["metadata"]
def test_read_tensorboard_time_series_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_tensorboard_time_series_data(
tensorboard_time_series="tensorboard_time_series_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = "tensorboard_time_series_value"
assert arg == mock_val
def test_read_tensorboard_time_series_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_tensorboard_time_series_data(
tensorboard_service.ReadTensorboardTimeSeriesDataRequest(),
tensorboard_time_series="tensorboard_time_series_value",
)
@pytest.mark.asyncio
async def test_read_tensorboard_time_series_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ReadTensorboardTimeSeriesDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_tensorboard_time_series_data(
tensorboard_time_series="tensorboard_time_series_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = "tensorboard_time_series_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_read_tensorboard_time_series_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_tensorboard_time_series_data(
tensorboard_service.ReadTensorboardTimeSeriesDataRequest(),
tensorboard_time_series="tensorboard_time_series_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ReadTensorboardBlobDataRequest, dict,]
)
def test_read_tensorboard_blob_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
response = client.read_tensorboard_blob_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse)
def test_read_tensorboard_blob_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
client.read_tensorboard_blob_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest()
@pytest.mark.asyncio
async def test_read_tensorboard_blob_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ReadTensorboardBlobDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
response = await client.read_tensorboard_blob_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse)
@pytest.mark.asyncio
async def test_read_tensorboard_blob_data_async_from_dict():
await test_read_tensorboard_blob_data_async(request_type=dict)
def test_read_tensorboard_blob_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ReadTensorboardBlobDataRequest()
request.time_series = "time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
call.return_value = iter(
[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
client.read_tensorboard_blob_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "time_series=time_series/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_tensorboard_blob_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ReadTensorboardBlobDataRequest()
request.time_series = "time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
await client.read_tensorboard_blob_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "time_series=time_series/value",) in kw["metadata"]
def test_read_tensorboard_blob_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_tensorboard_blob_data(time_series="time_series_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].time_series
mock_val = "time_series_value"
assert arg == mock_val
def test_read_tensorboard_blob_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_tensorboard_blob_data(
tensorboard_service.ReadTensorboardBlobDataRequest(),
time_series="time_series_value",
)
@pytest.mark.asyncio
async def test_read_tensorboard_blob_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_tensorboard_blob_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[tensorboard_service.ReadTensorboardBlobDataResponse()]
)
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_tensorboard_blob_data(
time_series="time_series_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].time_series
mock_val = "time_series_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_read_tensorboard_blob_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_tensorboard_blob_data(
tensorboard_service.ReadTensorboardBlobDataRequest(),
time_series="time_series_value",
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.WriteTensorboardExperimentDataRequest, dict,]
)
def test_write_tensorboard_experiment_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse()
response = client.write_tensorboard_experiment_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.WriteTensorboardExperimentDataResponse
)
def test_write_tensorboard_experiment_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
client.write_tensorboard_experiment_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest()
@pytest.mark.asyncio
async def test_write_tensorboard_experiment_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.WriteTensorboardExperimentDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardExperimentDataResponse()
)
response = await client.write_tensorboard_experiment_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.WriteTensorboardExperimentDataResponse
)
@pytest.mark.asyncio
async def test_write_tensorboard_experiment_data_async_from_dict():
await test_write_tensorboard_experiment_data_async(request_type=dict)
def test_write_tensorboard_experiment_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.WriteTensorboardExperimentDataRequest()
request.tensorboard_experiment = "tensorboard_experiment/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse()
client.write_tensorboard_experiment_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_experiment=tensorboard_experiment/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_write_tensorboard_experiment_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.WriteTensorboardExperimentDataRequest()
request.tensorboard_experiment = "tensorboard_experiment/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardExperimentDataResponse()
)
await client.write_tensorboard_experiment_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_experiment=tensorboard_experiment/value",
) in kw["metadata"]
def test_write_tensorboard_experiment_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.write_tensorboard_experiment_data(
tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=[
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_experiment
mock_val = "tensorboard_experiment_value"
assert arg == mock_val
arg = args[0].write_run_data_requests
mock_val = [
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
]
assert arg == mock_val
def test_write_tensorboard_experiment_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.write_tensorboard_experiment_data(
tensorboard_service.WriteTensorboardExperimentDataRequest(),
tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=[
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
],
)
@pytest.mark.asyncio
async def test_write_tensorboard_experiment_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_experiment_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardExperimentDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.write_tensorboard_experiment_data(
tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=[
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_experiment
mock_val = "tensorboard_experiment_value"
assert arg == mock_val
arg = args[0].write_run_data_requests
mock_val = [
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_write_tensorboard_experiment_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.write_tensorboard_experiment_data(
tensorboard_service.WriteTensorboardExperimentDataRequest(),
tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=[
tensorboard_service.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value"
)
],
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.WriteTensorboardRunDataRequest, dict,]
)
def test_write_tensorboard_run_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardRunDataResponse()
response = client.write_tensorboard_run_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse)
def test_write_tensorboard_run_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
client.write_tensorboard_run_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest()
@pytest.mark.asyncio
async def test_write_tensorboard_run_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.WriteTensorboardRunDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardRunDataResponse()
)
response = await client.write_tensorboard_run_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse)
@pytest.mark.asyncio
async def test_write_tensorboard_run_data_async_from_dict():
await test_write_tensorboard_run_data_async(request_type=dict)
def test_write_tensorboard_run_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.WriteTensorboardRunDataRequest()
request.tensorboard_run = "tensorboard_run/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
call.return_value = tensorboard_service.WriteTensorboardRunDataResponse()
client.write_tensorboard_run_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard_run=tensorboard_run/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_write_tensorboard_run_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.WriteTensorboardRunDataRequest()
request.tensorboard_run = "tensorboard_run/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardRunDataResponse()
)
await client.write_tensorboard_run_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard_run=tensorboard_run/value",) in kw[
"metadata"
]
def test_write_tensorboard_run_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardRunDataResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.write_tensorboard_run_data(
tensorboard_run="tensorboard_run_value",
time_series_data=[
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_run
mock_val = "tensorboard_run_value"
assert arg == mock_val
arg = args[0].time_series_data
mock_val = [
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
]
assert arg == mock_val
def test_write_tensorboard_run_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.write_tensorboard_run_data(
tensorboard_service.WriteTensorboardRunDataRequest(),
tensorboard_run="tensorboard_run_value",
time_series_data=[
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
],
)
@pytest.mark.asyncio
async def test_write_tensorboard_run_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.write_tensorboard_run_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.WriteTensorboardRunDataResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.WriteTensorboardRunDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.write_tensorboard_run_data(
tensorboard_run="tensorboard_run_value",
time_series_data=[
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_run
mock_val = "tensorboard_run_value"
assert arg == mock_val
arg = args[0].time_series_data
mock_val = [
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_write_tensorboard_run_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.write_tensorboard_run_data(
tensorboard_service.WriteTensorboardRunDataRequest(),
tensorboard_run="tensorboard_run_value",
time_series_data=[
tensorboard_data.TimeSeriesData(
tensorboard_time_series_id="tensorboard_time_series_id_value"
)
],
)
@pytest.mark.parametrize(
"request_type", [tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict,]
)
def test_export_tensorboard_time_series_data(request_type, transport: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
next_page_token="next_page_token_value",
)
response = client.export_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager)
assert response.next_page_token == "next_page_token_value"
def test_export_tensorboard_time_series_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
client.export_tensorboard_time_series_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest()
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
next_page_token="next_page_token_value",
)
)
response = await client.export_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_async_from_dict():
await test_export_tensorboard_time_series_data_async(request_type=dict)
def test_export_tensorboard_time_series_data_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest()
request.tensorboard_time_series = "tensorboard_time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse()
)
client.export_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series=tensorboard_time_series/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest()
request.tensorboard_time_series = "tensorboard_time_series/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ExportTensorboardTimeSeriesDataResponse()
)
await client.export_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tensorboard_time_series=tensorboard_time_series/value",
) in kw["metadata"]
def test_export_tensorboard_time_series_data_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_tensorboard_time_series_data(
tensorboard_time_series="tensorboard_time_series_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = "tensorboard_time_series_value"
assert arg == mock_val
def test_export_tensorboard_time_series_data_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_tensorboard_time_series_data(
tensorboard_service.ExportTensorboardTimeSeriesDataRequest(),
tensorboard_time_series="tensorboard_time_series_value",
)
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ExportTensorboardTimeSeriesDataResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_tensorboard_time_series_data(
tensorboard_time_series="tensorboard_time_series_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard_time_series
mock_val = "tensorboard_time_series_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_tensorboard_time_series_data(
tensorboard_service.ExportTensorboardTimeSeriesDataRequest(),
tensorboard_time_series="tensorboard_time_series_value",
)
def test_export_tensorboard_time_series_data_pager(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
next_page_token="abc",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[], next_page_token="def",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),],
next_page_token="ghi",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_time_series", ""),)
),
)
pager = client.export_tensorboard_time_series_data(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) for i in results)
def test_export_tensorboard_time_series_data_pages(transport_name: str = "grpc"):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
next_page_token="abc",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[], next_page_token="def",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),],
next_page_token="ghi",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
),
RuntimeError,
)
pages = list(client.export_tensorboard_time_series_data(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
next_page_token="abc",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[], next_page_token="def",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),],
next_page_token="ghi",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
),
RuntimeError,
)
async_pager = await client.export_tensorboard_time_series_data(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, tensorboard_data.TimeSeriesDataPoint) for i in responses
)
@pytest.mark.asyncio
async def test_export_tensorboard_time_series_data_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.export_tensorboard_time_series_data),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
next_page_token="abc",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[], next_page_token="def",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),],
next_page_token="ghi",
),
tensorboard_service.ExportTensorboardTimeSeriesDataResponse(
time_series_data_points=[
tensorboard_data.TimeSeriesDataPoint(),
tensorboard_data.TimeSeriesDataPoint(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.export_tensorboard_time_series_data(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TensorboardServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TensorboardServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TensorboardServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TensorboardServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TensorboardServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TensorboardServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TensorboardServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TensorboardServiceGrpcTransport,
transports.TensorboardServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.TensorboardServiceGrpcTransport,)
def test_tensorboard_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TensorboardServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_tensorboard_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TensorboardServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_tensorboard",
"get_tensorboard",
"update_tensorboard",
"list_tensorboards",
"delete_tensorboard",
"create_tensorboard_experiment",
"get_tensorboard_experiment",
"update_tensorboard_experiment",
"list_tensorboard_experiments",
"delete_tensorboard_experiment",
"create_tensorboard_run",
"batch_create_tensorboard_runs",
"get_tensorboard_run",
"update_tensorboard_run",
"list_tensorboard_runs",
"delete_tensorboard_run",
"batch_create_tensorboard_time_series",
"create_tensorboard_time_series",
"get_tensorboard_time_series",
"update_tensorboard_time_series",
"list_tensorboard_time_series",
"delete_tensorboard_time_series",
"batch_read_tensorboard_time_series_data",
"read_tensorboard_time_series_data",
"read_tensorboard_blob_data",
"write_tensorboard_experiment_data",
"write_tensorboard_run_data",
"export_tensorboard_time_series_data",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_tensorboard_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TensorboardServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_tensorboard_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TensorboardServiceTransport()
adc.assert_called_once()
def test_tensorboard_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TensorboardServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TensorboardServiceGrpcTransport,
transports.TensorboardServiceGrpcAsyncIOTransport,
],
)
def test_tensorboard_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TensorboardServiceGrpcTransport, grpc_helpers),
(transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TensorboardServiceGrpcTransport,
transports.TensorboardServiceGrpcAsyncIOTransport,
],
)
def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_tensorboard_service_host_no_port():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_tensorboard_service_host_with_port():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_tensorboard_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TensorboardServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_tensorboard_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TensorboardServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TensorboardServiceGrpcTransport,
transports.TensorboardServiceGrpcAsyncIOTransport,
],
)
def test_tensorboard_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TensorboardServiceGrpcTransport,
transports.TensorboardServiceGrpcAsyncIOTransport,
],
)
def test_tensorboard_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_tensorboard_service_grpc_lro_client():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_tensorboard_service_grpc_lro_async_client():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_tensorboard_path():
project = "squid"
location = "clam"
tensorboard = "whelk"
expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(
project=project, location=location, tensorboard=tensorboard,
)
actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard)
assert expected == actual
def test_parse_tensorboard_path():
expected = {
"project": "octopus",
"location": "oyster",
"tensorboard": "nudibranch",
}
path = TensorboardServiceClient.tensorboard_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_tensorboard_path(path)
assert expected == actual
def test_tensorboard_experiment_path():
project = "cuttlefish"
location = "mussel"
tensorboard = "winkle"
experiment = "nautilus"
expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
)
actual = TensorboardServiceClient.tensorboard_experiment_path(
project, location, tensorboard, experiment
)
assert expected == actual
def test_parse_tensorboard_experiment_path():
expected = {
"project": "scallop",
"location": "abalone",
"tensorboard": "squid",
"experiment": "clam",
}
path = TensorboardServiceClient.tensorboard_experiment_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path)
assert expected == actual
def test_tensorboard_run_path():
project = "whelk"
location = "octopus"
tensorboard = "oyster"
experiment = "nudibranch"
run = "cuttlefish"
expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
run=run,
)
actual = TensorboardServiceClient.tensorboard_run_path(
project, location, tensorboard, experiment, run
)
assert expected == actual
def test_parse_tensorboard_run_path():
expected = {
"project": "mussel",
"location": "winkle",
"tensorboard": "nautilus",
"experiment": "scallop",
"run": "abalone",
}
path = TensorboardServiceClient.tensorboard_run_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_tensorboard_run_path(path)
assert expected == actual
def test_tensorboard_time_series_path():
project = "squid"
location = "clam"
tensorboard = "whelk"
experiment = "octopus"
run = "oyster"
time_series = "nudibranch"
expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
run=run,
time_series=time_series,
)
actual = TensorboardServiceClient.tensorboard_time_series_path(
project, location, tensorboard, experiment, run, time_series
)
assert expected == actual
def test_parse_tensorboard_time_series_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
"tensorboard": "winkle",
"experiment": "nautilus",
"run": "scallop",
"time_series": "abalone",
}
path = TensorboardServiceClient.tensorboard_time_series_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TensorboardServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = TensorboardServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = TensorboardServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = TensorboardServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = TensorboardServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = TensorboardServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = TensorboardServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = TensorboardServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TensorboardServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = TensorboardServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TensorboardServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TensorboardServiceTransport, "_prep_wrapped_messages"
) as prep:
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TensorboardServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TensorboardServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TensorboardServiceClient, transports.TensorboardServiceGrpcTransport),
(
TensorboardServiceAsyncClient,
transports.TensorboardServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-aiplatform
|
tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py
|
Python
|
apache-2.0
| 360,461 | 0.000977 |
# http://deeplearning.net/tutorial/code/mlp.py
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from neuromancy.theano_tutorials.tutorial_logreg import LogisticRegression, load_data
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hidden,
activation=T.tanh)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = abs(self.hiddenLayer.W).sum() \
+ abs(self.logRegressionLayer.W).sum()
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
+ (self.logRegressionLayer.W ** 2).sum()
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(rng=rng, input=x, n_in=28 * 28,
n_hidden=n_hidden, n_out=10)
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
# the cost we minimize during training is the negative log likelihood
# of the model plus the regularization terms (L1 and L2);
# cost is expressed here symbolically
cost = classifier.negative_log_likelihood(y) \
+ L1_reg * classifier.L1 \
+ L2_reg * classifier.L2_sqr
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = [] # try this: gparams = T.grad(cost, params)
for param in classifier.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
# train_model is a function that updates the model parameters by SGD.
# Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],gparams[i]) pairs.
updates = []
for param, gparam in zip(classifier.params, gparams):
updates.append((param, param - learning_rate * gparam))
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatches before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_mlp()
|
cliffclive/neuromancy
|
neuromancy/theano_tutorials/tutorial_mlp.py
|
Python
|
mit
| 13,906 | 0.001582 |
import os
import sys
import logging
import base64
from subprocess import check_call
from transparencyscript.constants import TRANSPARENCY_SUFFIX
from transparencyscript.utils import make_transparency_name, get_config_vars, get_password_vars, get_task_vars, \
get_transparency_vars, get_tree_head, get_lego_env, get_lego_command, get_save_command, get_chain, post_chain, \
write_to_file
from transparencyscript.signed_certificate_timestamp import SignedCertificateTimestamp
def main(name=None):
if name not in (None, '__main__'):
return
# Initialize logging for script
log = logging.getLogger()
log.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Store default parameters and keys in config_vars
if len(sys.argv) > 1:
config_path = sys.argv[1]
config_vars = get_config_vars(config_path)
else:
print("ERROR: script_config.json path is required as an argument.")
sys.exit(1)
# Store AWS credentials in password_vars
password_path = os.path.join(os.path.dirname(config_path), 'passwords.json')
password_vars = get_password_vars(password_path)
# Concatenate local config_vars with task_vars created from task.json
if "task_json" in config_vars:
task_path = config_vars["task_json"]
task_vars = get_task_vars(task_path)
config_vars = get_transparency_vars(config_vars, task_vars)
# Parse tree head from summary file
tree_head = get_tree_head(config_vars)
if tree_head is None:
raise Exception("No tree head found in summary file")
base_name = "{}.{}".format("invalid", TRANSPARENCY_SUFFIX)
trans_name = make_transparency_name(tree_head, config_vars["payload"]["version"],
config_vars["payload"]["stage-product"])
# Issue and save the certificate, then delete the extra files lego created
lego_env = get_lego_env(password_vars)
lego_command = get_lego_command(config_vars, base_name, trans_name)
save_command = get_save_command(config_vars, base_name)
cleanup_command = "rm -rf {}/lego".format(config_vars["work_dir"])
check_call(lego_command, env=lego_env, shell=True)
check_call(save_command, shell=True)
check_call(cleanup_command, shell=True)
# Submit chain to certificate transparency log if log_list exists
if 'log_list' in config_vars:
req = get_chain(config_vars)
resp_list = post_chain(config_vars["log_list"], req)
# Remove sct_list file if it already exists
sct_file_path = os.path.join(config_vars["public_artifact_dir"], config_vars["sct_filename"])
try:
os.remove(sct_file_path)
except OSError:
pass
# Append to sct_list file for each chain
for resp in resp_list:
sct = SignedCertificateTimestamp(resp)
sct = base64.b64encode(sct.to_rfc6962()).decode('utf-8')
write_to_file(sct_file_path, sct, open_mode='a')
main(name=__name__)
|
BrandonTang/binary-transparency
|
transparencyscript/script.py
|
Python
|
mpl-2.0
| 3,070 | 0.002606 |
from misc.standalone_helper import decode_string, double_decode_string
from .__general_data_v1 import GeneralEndpointDataV1
class PartitionsPartitionEndpoint(GeneralEndpointDataV1):
def _get(self) -> bool:
partition_id = self._request_holder.get_params()["partition"]
partition_id = decode_string(partition_id)
persisted_info = self._outbound_gate.get_last_measurement("partition", partition_id, "info")
if persisted_info is not None:
self._response_holder.update_body_data({
"timestamp": persisted_info["timestamp"],
"general-info": persisted_info["value"]
})
return True
@classmethod
def get_paths(cls):
return [
"/partitions/<string:partition>"
]
@classmethod
def get_name(cls):
return "partition entity"
@classmethod
def _get_parent(cls):
from .partitions import PartitionsEndpoint
return PartitionsEndpoint
@classmethod
def _get_children(cls):
from .partitions_partition_free import PartitionsPartitionFreeEndpoint
from .partitions_partition_total import PartitionsPartitionTotalEndpoint
from .partitions_partition_used import PartitionsPartitionUsedEndpoint
return [
("/free", PartitionsPartitionFreeEndpoint),
("/total", PartitionsPartitionTotalEndpoint),
("/used", PartitionsPartitionUsedEndpoint)
]
@classmethod
def _get_mandatory_parameters(cls):
return [
cls.get_partition_id_validator()
]
@classmethod
def get_partition_id_validator(cls):
return "partition", lambda x: cls._outbound_gate.is_argument_valid(
"partition", double_decode_string(x))
|
OpServ-Monitoring/opserv-backend
|
app/server/restful_api/data/v1/endpoints/partitions_partition.py
|
Python
|
gpl-3.0
| 1,794 | 0.001115 |
#!/usr/bin/env python
# This example script was ported from Perl Spreadsheet::WriteExcel module.
# The author of the Spreadsheet::WriteExcel module is John McNamara
# <jmcnamara@cpan.org>
__revision__ = """$Id: hyperlink2.py,v 1.3 2004/01/31 18:56:07 fufff Exp $"""
###############################################################################
#
# Example of how to use the WriteExcel module to write internal and internal
# hyperlinks.
#
# If you wish to run this program and follow the hyperlinks you should create
# the following directory structure:
#
# C:\ -- Temp --+-- Europe
# |
# \-- Asia
#
#
# See also hyperlink1.pl for web URL examples.
#
# reverse('(c)'), February 2002, John McNamara, jmcnamara@cpan.org
#
import pyXLWriter as xl
# Create three workbooks:
# C:\Temp\Europe\Ireland.xls
# C:\Temp\Europe\Italy.xls
# C:\Temp\Asia\China.xls
#
ireland = xl.Writer(r'C:\Temp\Europe\Ireland.xls')
ire_links = ireland.add_worksheet('Links')
ire_sales = ireland.add_worksheet('Sales')
ire_data = ireland.add_worksheet('Product Data')
italy = xl.Writer(r'C:\Temp\Europe\Italy.xls')
ita_links = italy.add_worksheet('Links')
ita_sales = italy.add_worksheet('Sales')
ita_data = italy.add_worksheet('Product Data')
china = xl.Writer(r'C:\Temp\Asia\China.xls')
cha_links = china.add_worksheet('Links')
cha_sales = china.add_worksheet('Sales')
cha_data = china.add_worksheet('Product Data')
# Add a format
format = ireland.add_format(color='green', bold=1)
ire_links.set_column('A:B', 25)
###############################################################################
#
# Examples of internal links
#
ire_links.write('A1', 'Internal links', format)
# Internal link
ire_links.write('A2', 'internal:Sales!A2')
# Internal link to a range
ire_links.write('A3', 'internal:Sales!A3:D3')
# Internal link with an alternative string
ire_links.write_url('A4', 'internal:Sales!A4', 'Link')
# Internal link with a format
ire_links.write('A5', 'internal:Sales!A5', format)
# Internal link with an alternative string and format
ire_links.write_url('A6', 'internal:Sales!A6', 'Link', format)
# Internal link (spaces in worksheet name)
ire_links.write('A7', "internal:'Product Data'!A7")
###############################################################################
#
# Examples of external links
#
ire_links.write('B1', 'External links', format)
# External link to a local file
ire_links.write('B2', 'external:Italy.xls')
# External link to a local file with worksheet
ire_links.write('B3', 'external:Italy.xls#Sales!B3')
# External link to a local file with worksheet and alternative string
ire_links.write_url('B4', 'external:Italy.xls#Sales!B4', 'Link')
# External link to a local file with worksheet and format
ire_links.write('B5', 'external:Italy.xls#Sales!B5', format)
# External link to a remote file, absolute path
ire_links.write('B6', 'external:c:/Temp/Asia/China.xls')
# External link to a remote file, relative path
ire_links.write('B7', 'external:../Asia/China.xls')
# External link to a remote file with worksheet
ire_links.write('B8', 'external:c:/Temp/Asia/China.xls#Sales!B8')
# External link to a remote file with worksheet (with spaces in the name)
ire_links.write('B9', "external:c:/Temp/Asia/China.xls#'Product Data'!B9")
###############################################################################
#
# Some utility links to return to the main sheet
#
ire_sales.write_url('A2', 'internal:Links!A2', 'Back')
ire_sales.write_url('A3', 'internal:Links!A3', 'Back')
ire_sales.write_url('A4', 'internal:Links!A4', 'Back')
ire_sales.write_url('A5', 'internal:Links!A5', 'Back')
ire_sales.write_url('A6', 'internal:Links!A6', 'Back')
ire_data.write_url('A7', 'internal:Links!A7', 'Back')
ita_links.write_url('A1', 'external:Ireland.xls#Links!B2', 'Back')
ita_sales.write_url('B3', 'external:Ireland.xls#Links!B3', 'Back')
ita_sales.write_url('B4', 'external:Ireland.xls#Links!B4', 'Back')
ita_sales.write_url('B5', 'external:Ireland.xls#Links!B5', 'Back')
cha_links.write_url('A1', 'external:../Europe/Ireland.xls#Links!B6', 'Back')
cha_sales.write_url('B8', 'external:../Europe/Ireland.xls#Links!B8', 'Back')
cha_data.write_url('B9', 'external:../Europe/Ireland.xls#Links!B9', 'Back')
ireland.close()
italy.close()
china.close()
|
evgenybf/pyXLWriter
|
examples/hyperlink2.py
|
Python
|
lgpl-2.1
| 4,310 | 0.001624 |
import datetime
import six
try:
from django.contrib.sites.requests import RequestSite
except ImportError: # Django < 1.9
from django.contrib.sites.models import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from django.core.paginator import Paginator, EmptyPage
from django.views.decorators.cache import cache_page
from graphite.util import json, epoch, epoch_to_dt, jsonResponse, HttpError, HttpResponse
from graphite.events.models import Event
from graphite.render.attime import parseATTime
from graphite.settings import EVENTS_PER_PAGE, _PAGE_LINKS
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return epoch(obj)
return json.JSONEncoder.default(self, obj)
def get_page_range(paginator, page):
"""
Generate page range
"""
page_range = []
if 4>page:
if len(paginator.page_range)>_PAGE_LINKS:
page_range = [p for p in range(1, _PAGE_LINKS+1)]
else:
page_range=paginator.page_range
else:
for p in paginator.page_range:
if p<page:
if page-p<(_PAGE_LINKS)//2:
page_range.append(p)
if p>=page:
if p-page<(_PAGE_LINKS)//2:
page_range.append(p)
if len(page_range)>_PAGE_LINKS and page>5:
page_range = page_range[:-1]
return page_range
@cache_page(60 * 15)
def view_events(request, page_id=1):
if request.method == "GET":
try:
page_id = int(page_id)
except ValueError:
page_id = 1
events = fetch(request)
paginator = Paginator(events, EVENTS_PER_PAGE)
try:
events = paginator.page(page_id)
except EmptyPage:
events = paginator.page(paginator.num_pages)
pages = get_page_range(paginator, page_id)
context = {'events': events,
'site': RequestSite(request),
'pages': pages,
'protocol': 'https' if request.is_secure() else 'http'}
return render(request, 'events.html', context)
else:
return post_event(request)
@jsonResponse(encoder=DjangoJSONEncoder)
def jsonDetail(request, queryParams, event_id):
try:
e = Event.objects.get(id=event_id)
e.tags = e.tags.split()
return model_to_dict(e)
except ObjectDoesNotExist:
raise HttpError('Event matching query does not exist', status=404)
def detail(request, event_id):
if request.META.get('HTTP_ACCEPT') == 'application/json':
return jsonDetail(request, event_id)
e = get_object_or_404(Event, pk=event_id)
context = {'event': e}
return render(request, 'event.html', context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.body)
assert isinstance(event, dict)
tags = event.get('tags')
if tags is not None:
if isinstance(tags, list):
tags = ' '.join(tags)
elif not isinstance(tags, six.string_types):
return HttpResponse(
json.dumps({'error': '"tags" must be an array or space-separated string'}),
status=400)
else:
tags = None
if 'when' in event:
when = epoch_to_dt(event['when'])
else:
when = now()
Event.objects.create(
what=event.get('what'),
tags=tags,
when=when,
data=event.get('data', ''),
)
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
query_params = request.GET.copy()
query_params.update(request.POST)
if 'jsonp' in query_params:
response = HttpResponse(
"%s(%s)" % (query_params.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
content_type='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
content_type='application/json')
return response
def fetch(request):
if request.GET.get('from') is not None:
time_from = parseATTime(request.GET['from'])
else:
time_from = epoch_to_dt(0)
if request.GET.get('until') is not None:
time_until = parseATTime(request.GET['until'])
else:
time_until = now()
set_operation = request.GET.get('set')
tags = request.GET.get('tags')
if tags is not None:
tags = request.GET.get('tags').split(' ')
result = []
for x in Event.find_events(time_from, time_until, tags=tags, set_operation=set_operation):
# django-tagging's with_intersection() returns matches with unknown tags
# this is a workaround to ensure we only return positive matches
if set_operation == 'intersection':
if len(set(tags) & set(x.as_dict()['tags'])) == len(tags):
result.append(x.as_dict())
else:
result.append(x.as_dict())
return result
|
drax68/graphite-web
|
webapp/graphite/events/views.py
|
Python
|
apache-2.0
| 5,333 | 0.002813 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/routed-vlan/ipv6/addresses/address/vrrp/vrrp-group/interface-tracking/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for VRRP interface tracking
"""
__slots__ = (
"_path_helper", "_extmethods", "__track_interface", "__priority_decrement"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__track_interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
self.__priority_decrement = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"routed-vlan",
"ipv6",
"addresses",
"address",
"vrrp",
"vrrp-group",
"interface-tracking",
"state",
]
def _get_track_interface(self):
"""
Getter method for track_interface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/track_interface (leafref)
YANG Description: Sets an interface that should be
tracked for up/down events to dynamically change the
priority state of the VRRP group, and potentially
change the mastership if the tracked interface going
down lowers the priority sufficiently
"""
return self.__track_interface
def _set_track_interface(self, v, load=False):
"""
Setter method for track_interface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/track_interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_track_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_track_interface() directly.
YANG Description: Sets an interface that should be
tracked for up/down events to dynamically change the
priority state of the VRRP group, and potentially
change the mastership if the tracked interface going
down lowers the priority sufficiently
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """track_interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="track-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=False)""",
}
)
self.__track_interface = t
if hasattr(self, "_set"):
self._set()
def _unset_track_interface(self):
self.__track_interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
def _get_priority_decrement(self):
"""
Getter method for priority_decrement, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/priority_decrement (uint8)
YANG Description: Set the value to subtract from priority when
the tracked interface goes down
"""
return self.__priority_decrement
def _set_priority_decrement(self, v, load=False):
"""
Setter method for priority_decrement, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/priority_decrement (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_decrement is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_decrement() directly.
YANG Description: Set the value to subtract from priority when
the tracked interface goes down
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority_decrement must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(0), is_leaf=True, yang_name="priority-decrement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""",
}
)
self.__priority_decrement = t
if hasattr(self, "_set"):
self._set()
def _unset_priority_decrement(self):
self.__priority_decrement = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
track_interface = __builtin__.property(_get_track_interface)
priority_decrement = __builtin__.property(_get_priority_decrement)
_pyangbind_elements = OrderedDict(
[
("track_interface", track_interface),
("priority_decrement", priority_decrement),
]
)
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/__init__.py
|
Python
|
apache-2.0
| 11,529 | 0.001475 |
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.magnetostatics import *
from tests_common import *
class MagnetostaticsInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System()
def setUp(self):
self.system.box_l = 10, 10, 10
if not self.system.part.exists(0):
self.system.part.add(id=0, pos=(0.1, 0.1, 0.1), dip=(1.3, 2.1, -6))
if not self.system.part.exists(1):
self.system.part.add(id=1, pos=(0, 0, 0), dip=(7.3, 6.1, -4))
if "DP3M" in espressomd.features():
test_DP3M = generate_test_for_class(DipolarP3M, dict(prefactor=1.0,
epsilon=0.0,
inter=1000,
mesh_off=[
0.5, 0.5, 0.5],
r_cut=2.4,
mesh=[
8, 8, 8],
cao=1,
alpha=12,
accuracy=0.01))
if "DIPOLAR_DIRECT_SUM" in espressomd.features():
test_DdsCpu = generate_test_for_class(
DipolarDirectSumCpu, dict(prefactor=3.4))
test_DdsRCpu = generate_test_for_class(
DipolarDirectSumWithReplicaCpu, dict(prefactor=3.4, n_replica=2))
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
tbereau/espresso
|
testsuite/python/magnetostaticInteractions.py
|
Python
|
gpl-3.0
| 2,692 | 0.007058 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments
# pylint: disable=global-statement, unused-import
"""NDArray configuration API."""
from __future__ import absolute_import as _abs
import ctypes
from ..base import _LIB
from ..base import c_str_array, c_handle_array
from ..base import NDArrayHandle, CachedOpHandle
from ..base import check_call
class NDArrayBase(object):
"""Base data structure for ndarray"""
__slots__ = ["handle", "writable"]
# pylint: disable= no-member
def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
if handle is not None:
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
def __del__(self):
check_call(_LIB.MXNDArrayFree(self.handle))
def __reduce__(self):
return (_ndarray_cls, (None,), self.__getstate__())
_ndarray_cls = None
def _set_ndarray_class(cls):
"""Set the symbolic class to be cls"""
global _ndarray_cls
_ndarray_cls = cls
def _imperative_invoke(handle, ndargs, keys, vals, out):
"""ctypes implementation of imperative invoke wrapper"""
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXImperativeInvokeEx(
ctypes.c_void_p(handle),
ctypes.c_int(len(ndargs)),
c_handle_array(ndargs),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.c_int(len(keys)),
c_str_array(keys),
c_str_array([str(s) for s in vals]),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
if num_output.value == 1:
return _ndarray_cls(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [_ndarray_cls(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i])
for i in range(num_output.value)]
class CachedOp(object):
"""Cached operator handle."""
__slots__ = ["handle"]
def __init__(self, sym):
self.handle = CachedOpHandle()
check_call(_LIB.MXCreateCachedOp(
sym.handle,
ctypes.byref(self.handle)))
def __del__(self):
check_call(_LIB.MXFreeCachedOp(self.handle))
def __call__(self, *args, **kwargs):
"""ctypes implementation of imperative invoke wrapper"""
out = kwargs.pop('out', None)
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
if kwargs:
raise TypeError(
"CachedOp.__call__ got unexpected keyword argument(s): " + \
', '.join(kwargs.keys()))
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXInvokeCachedOpEx(
self.handle,
ctypes.c_int(len(args)),
c_handle_array(args),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
if num_output.value == 1:
return _ndarray_cls(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [_ndarray_cls(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i])
for i in range(num_output.value)]
|
madjam/mxnet
|
python/mxnet/_ctypes/ndarray.py
|
Python
|
apache-2.0
| 5,374 | 0.000744 |
from django import http
from django.conf.urls import patterns
from django.contrib import admin
from django.db import models
from django.forms.models import modelform_factory
from django.shortcuts import get_object_or_404
from django.template import loader, Context
from django.views.generic import View
def get_printable_field_value(instance, fieldname):
""" Get the display value of a model field, showing a comma-delimited
list for M2M fields.
"""
field = instance._meta.get_field(fieldname)
field_value = getattr(instance, fieldname)
if isinstance(field, models.ManyToManyField):
field_value = ', '.join([unicode(f) for f in
field_value.all()])
return field_value
class AjaxModelFormView(View):
""" Handles AJAX updates of a single field on an object
(You likely don't need to use this directly as the admin
registers a URL for it itself.)
"""
model = None
valid_fields = None
def __init__(self, model, valid_fields, **kwargs):
self.model = model
self.valid_fields = valid_fields
def post(self, request, object_id, *args, **kwargs):
if not request.user or not request.user.is_staff:
return http.HttpResponseForbidden()
request = request.POST.copy()
fieldname = request.pop('field', None)[0]
form_prefix = request.pop('prefix', None)[0]
# prevent setting fields that weren't made AJAX-editable
if fieldname not in self.valid_fields:
return http.HttpResponseBadRequest()
ItemForm = modelform_factory(self.model, fields=(fieldname,))
instance = get_object_or_404(self.model, pk=object_id)
form = ItemForm(request, instance=instance, prefix=form_prefix)
if not form or not form.is_valid():
return http.HttpResponseBadRequest()
form.save()
new_value = get_printable_field_value(instance, fieldname)
return http.HttpResponse(new_value)
class AjaxModelAdmin(admin.ModelAdmin):
""" Admin class providing support for inline forms in
listview that are submitted through AJAX.
"""
def __init__(self, *args, **kwargs):
HANDLER_NAME_TPL = "_%s_ajax_handler"
if not hasattr(self, 'ajax_list_display'):
self.ajax_list_display = []
self.list_display = list(self.list_display)
self.list_display = self.list_display + map(lambda name: HANDLER_NAME_TPL % name,
self.ajax_list_display)
super(AjaxModelAdmin, self).__init__(*args, **kwargs)
for name in self.ajax_list_display:
setattr(self, HANDLER_NAME_TPL % name,
self._get_field_handler(name))
self.ajax_item_template = loader.get_template('ajax_changelist/'
'field_form.html')
def get_urls(self):
""" Add endpoint for saving a new field value. """
urls = super(AjaxModelAdmin, self).get_urls()
list_urls = patterns('',
(r'^(?P<object_id>\d+)$',
AjaxModelFormView.as_view(model=self.model,
valid_fields=self.ajax_list_display)))
return list_urls + urls
def _get_field_handler(self, fieldname):
""" Handle rendering of AJAX-editable fields for the changelist, by
dynamically building a callable for each field.
"""
def handler_function(obj, *args, **kwargs):
ItemForm = modelform_factory(self.model, fields=(fieldname,))
form = ItemForm(instance=obj, prefix="c" + unicode(obj.id))
field_value = get_printable_field_value(obj, fieldname)
# Render the field value and edit form
return self.ajax_item_template.render(Context({
'object_id': obj.id,
'field_name': fieldname,
'form': form.as_p(),
'field_value': field_value
}))
handler_function.allow_tags = True
handler_function.short_description = fieldname
return handler_function
class Media:
#FIXME: dripping jQueries is straight-up wack.
js = ('//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'ajax_changelist/js/lib/jquery.django_csrf.js',
'ajax_changelist/js/admin.js',)
css = {
'all': ('ajax_changelist/css/admin.css',)
}
|
SohoTechLabs/django-ajax-changelist
|
ajax_changelist/admin.py
|
Python
|
mit
| 4,495 | 0.001557 |
# -*- coding: utf-8 -*-
#!/usr/bin/python
import numpy as np
import scipy
from sklearn import preprocessing
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from collections import Counter
from scipy.stats.stats import pearsonr
import data_readers
import feature_extractors as fe
import label_transformers as lt
import training_functions as training
import utils
def build_dataset(reader, phi_list, class_func, vectorizer=None, verbose=False):
"""Core general function for building experimental
hand-generated feature datasets.
Parameters
----------
reader : iterator
Should follow the format of data_readers. This is the dataset
we'll be featurizing.
phi_list : array of feature functions (default: [`manual_content_flags`])
Any function that takes a string as input and returns a
bool/int/float-valued dict as output.
class_func : function on the labels
A function that modifies the labels based on the experimental
design. If `class_func` returns None for a label, then that
item is ignored.
vectorizer : sklearn.feature_extraction.DictVectorizer
If this is None, then a new `DictVectorizer` is created and
used to turn the list of dicts created by `phi` into a
feature matrix. This happens when we are training.
If this is not None, then it's assumed to be a `DictVectorizer`
and used to transform the list of dicts. This happens in
assessment, when we take in new instances and need to
featurize them as we did in training.
Returns
-------
dict
A dict with keys 'X' (the feature matrix), 'y' (the list of
labels), 'vectorizer' (the `DictVectorizer`), and
'raw_examples' (the example strings, for error analysis).
"""
labels = []
feat_dicts = []
raw_examples = []
rows = []
for i, (paragraph, parse, label) in enumerate(reader()):
if i % 100 == 0:
print " Starting feature extraction for unit #%d " % (i+1)
cls = class_func(label)
#print label, cls
if cls != None:
labels.append(cls)
raw_examples.append(paragraph)
if verbose:
print cls, ":", paragraph
features = Counter()
for phi in phi_list:
cur_feats = phi(paragraph, parse)
if cur_feats is None:
continue
# If we won't accidentally blow away data, merge 'em.
overlap_feature_names = features.viewkeys() & cur_feats.viewkeys()
if verbose and len(overlap_feature_names) > 0:
print "Note: Overlap features are ", overlap_feature_names
features |= cur_feats
rows.append(cur_feats['row'])
feat_dicts.append(features)
if verbose:
print features
print
print "Completed all feature extraction: %d units" % (i+1)
# In training, we want a new vectorizer, but in
# assessment, we featurize using the existing vectorizer:
feat_matrix = None
if vectorizer == None:
vectorizer = DictVectorizer(sparse=True)
feat_matrix = vectorizer.fit_transform(feat_dicts)
else:
feat_matrix = vectorizer.transform(feat_dicts)
return {'X': feat_matrix,
'y': labels,
'vectorizer': vectorizer,
'raw_examples': raw_examples}
def experiment_features(
train_reader=data_readers.toy,
assess_reader=None,
train_size=0.7,
phi_list=[fe.manual_content_flags],
class_func=lt.identity_class_func,
train_func=training.fit_logistic_at_with_crossvalidation,
score_func=scipy.stats.stats.pearsonr,
verbose=True):
"""Generic experimental framework for hand-crafted features.
Either assesses with a random train/test split of `train_reader`
or with `assess_reader` if it is given.
Parameters
----------
train_reader : data iterator (default: `train_reader`)
Iterator for training data.
assess_reader : iterator or None (default: None)
If None, then the data from `train_reader` are split into
a random train/test split, with the the train percentage
determined by `train_size`. If not None, then this should
be an iterator for assessment data (e.g., `dev_reader`).
train_size : float (default: 0.7)
If `assess_reader` is None, then this is the percentage of
`train_reader` devoted to training. If `assess_reader` is
not None, then this value is ignored.
phi_list : array of feature functions (default: [`manual_content_flags`])
Any function that takes a string as input and returns a
bool/int/float-valued dict as output.
class_func : function on the labels
A function that modifies the labels based on the experimental
design. If `class_func` returns None for a label, then that
item is ignored.
train_func : model wrapper (default: `fit_logistic_at_with_crossvalidation`)
Any function that takes a feature matrix and a label list
as its values and returns a fitted model with a `predict`
function that operates on feature matrices.
score_metric : function name (default: `utils.safe_weighted_f1`)
This should be an `sklearn.metrics` scoring function. The
default is weighted average F1.
verbose : bool (default: True)
Whether to print out the model assessment to standard output.
Prints
-------
To standard output, if `verbose=True`
Model confusion matrix and a model precision/recall/F1 report.
Returns
-------
float
The overall scoring metric for assess set as determined by `score_metric`.
float
The overall Cronbach's alpha for assess set
np.array
The confusion matrix (rows are truth, columns are predictions)
list of dictionaries
A list of {truth:_ , prediction:_, example:_} dicts on the assessment data
"""
# Train dataset:
train = build_dataset(train_reader, phi_list, class_func, vectorizer=None, verbose=verbose)
# Manage the assessment set-up:
indices = np.arange(0, len(train['y']))
X_train = train['X']
y_train = np.array(train['y'])
train_examples = np.array(train['raw_examples'])
X_assess = None
y_assess = None
assess_examples = None
if assess_reader == None:
print " Raw y training distribution:"
print " ", np.bincount(y_train)[1:]
indices_train, indices_assess, y_train, y_assess = train_test_split(
indices, y_train, train_size=train_size, stratify=y_train)
X_assess = X_train[indices_assess]
assess_examples = train_examples[indices_assess]
X_train = X_train[indices_train]
train_examples = train_examples[indices_train]
print " Train y distribution:"
print " ", np.bincount(y_train)[1:]
print " Test y distribution:"
print " ", np.bincount(y_assess)[1:]
else:
assess = build_dataset(
assess_reader,
phi_list,
class_func,
vectorizer=train['vectorizer'])
X_assess, y_assess, assess_examples = assess['X'], assess['y'], np.array(assess['raw_examples'])
# Normalize:
nonzero_cells = len(X_train.nonzero()[0])
total_cells = 1.*X_train.shape[0] * X_train.shape[1]
proportion_nonzero = nonzero_cells/total_cells
print "sparsity: %g/1 are nonzero" % proportion_nonzero
if proportion_nonzero > 0.5: # if dense matrix
X_train = X_train.toarray()
X_assess = X_assess.toarray()
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_assess = scaler.transform(X_assess)
else:
scaler = preprocessing.MaxAbsScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_assess = scaler.transform(X_assess)
# Train:
mod = train_func(X_train, y_train)
# Predictions:
predictions_on_assess = mod.predict(X_assess)
assess_performance = get_score_example_pairs(y_assess, predictions_on_assess, assess_examples)
predictions_on_train = mod.predict(X_train)
train_performance = get_score_example_pairs(y_train, predictions_on_train, train_examples)
# Report:
if verbose:
print "\n-- TRAINING RESULTS --"
print_verbose_overview(y_train, predictions_on_train)
print "\n-- ASSESSMENT RESULTS --"
print_verbose_overview(y_assess, predictions_on_assess)
try:
the_score = score_func(y_assess, predictions_on_assess)
except:
the_score = (0,0)
# Return the overall results on the assessment data:
return the_score, \
utils.cronbach_alpha(y_assess, predictions_on_assess), \
confusion_matrix(y_assess, predictions_on_assess), \
assess_performance
def get_score_example_pairs(y, y_hat, examples):
""" Return a list of dicts: {truth score, predicted score, example} """
paired_results = sorted(zip(y, y_hat), key=lambda x: x[0]-x[1])
performance = []
for i, (truth, prediction) in enumerate(paired_results):
performance.append({"truth": truth, "prediction": prediction, "example": examples[i]})
return performance
def print_verbose_overview(y, yhat):
""" Print a performance overview """
print "Correlation: ", pearsonr(y, yhat)[0]
print "Alpha: ", utils.cronbach_alpha(y, yhat)
print "Classification report:"
print classification_report(y, yhat, digits=3)
print "Confusion matrix:"
print confusion_matrix(y, yhat)
print " (Rows are truth; columns are predictions)"
def experiment_features_iterated(
train_reader=data_readers.toy,
assess_reader=None,
train_size=0.7,
phi_list=[fe.manual_content_flags],
class_func=lt.identity_class_func,
train_func=training.fit_logistic_at_with_crossvalidation,
score_func=utils.safe_weighted_f1,
verbose=True,
iterations=1):
"""
Generic iterated experimental framework for hand-crafted features.
"""
correlation_overall = []
cronbach_overall = []
conf_matrix_overall = None
assess_performance = []
while len(correlation_overall) < iterations:
print "\nStarting iteration: %d/%d" % (len(correlation_overall)+1, iterations)
try:
correlation_local, cronbach_local, conf_matrix_local, perf_local = experiment_features(
train_reader=train_reader,
assess_reader=assess_reader,
train_size=train_size,
phi_list=phi_list,
class_func=class_func,
train_func=train_func,
score_func=score_func,
verbose=verbose)
correlation_overall.append(correlation_local[0])
cronbach_overall.append(cronbach_local)
assess_performance.extend(perf_local)
if conf_matrix_overall is None:
conf_matrix_overall = conf_matrix_local
else:
conf_matrix_overall += conf_matrix_local
except (ValueError,UserWarning) as e:
print e
if verbose:
print "\n-- OVERALL --"
print correlation_overall
print cronbach_overall
print conf_matrix_overall
return correlation_overall, cronbach_overall, conf_matrix_overall, assess_performance
|
ptoman/icgauge
|
icgauge/experiment_frameworks.py
|
Python
|
mit
| 11,989 | 0.009425 |
'''
Copyleft Mar 10, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
# import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import simuPOP as sim
from simuPOP.demography import *
model = MultiStageModel([
InstantChangeModel(T=200,
# start with an ancestral population of size 1000
N0=(1000, 'Ancestral'),
# change population size at 50 and 60
G=[50, 60],
# change to population size 200 and back to 1000
NG=[(200, 'bottleneck'), (1000, 'Post-Bottleneck')]),
ExponentialGrowthModel(
T=50,
# split the population into two subpopulations
N0=[(400, 'P1'), (600, 'P2')],
# expand to size 4000 and 5000 respectively
NT=[4000, 5000])]
)
def exp(T=10):return ExponentialGrowthModel(T=T, N0=1000, NT=200)
def lin(T=10):return LinearGrowthModel(T=T, N0=200, NT=1000)
model=MultiStageModel([exp(),lin(),exp(),lin(),exp(),lin(),exp(),lin(),exp(),lin()])
# model.init_size returns the initial population size
# migrate_to is required for migration
model=exp(50)
#model=lin(50)
#model=MultiStageModel([exp(50),lin(50)])
pop = sim.Population(size=model.init_size, loci=1,
infoFields=model.info_fields)
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.5, 0.5])
],
matingScheme=sim.RandomMating(subPopSize=model),
finalOps=
sim.Stat(alleleFreq=0, vars=['alleleFreq_sp']),
gen=model.num_gens
)
model
# print out population size and frequency
#for idx, name in enumerate(pop.subPopNames()):
#print('%s (%d): %.4f' % (name, pop.subPopSize(name), pop.dvars(idx).alleleFreq[0][0]))
# get a visual presentation of the demographic model
import matplotlib
model.plot('/home/arya/bottleneck.png',title='bottleneck')
|
airanmehr/bio
|
Scripts/Miscellaneous/Tutorials/demography.py
|
Python
|
mit
| 2,014 | 0.021351 |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift.common.splice`'''
import os
import errno
import ctypes
import logging
import tempfile
import unittest
import contextlib
import re
import mock
import six
from swift.common.splice import splice, tee
LOGGER = logging.getLogger(__name__)
def NamedTemporaryFile():
'''Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base.
'''
if six.PY3:
return tempfile.NamedTemporaryFile(buffering=0)
else:
return tempfile.NamedTemporaryFile(bufsize=0)
def safe_close(fd):
'''Close a file descriptor, ignoring any exceptions'''
try:
os.close(fd)
except Exception:
LOGGER.exception('Error while closing FD')
@contextlib.contextmanager
def pipe():
'''Context-manager providing 2 ends of a pipe, closing them at exit'''
fds = os.pipe()
try:
yield fds
finally:
safe_close(fds[0])
safe_close(fds[1])
class TestSplice(unittest.TestCase):
'''Tests for `splice`'''
def setUp(self):
if not splice.available:
raise unittest.SkipTest('splice not available')
def test_flags(self):
'''Test flag attribute availability'''
self.assertTrue(hasattr(splice, 'SPLICE_F_MOVE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_NONBLOCK'))
self.assertTrue(hasattr(splice, 'SPLICE_F_MORE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_GIFT'))
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(splice.available)
def test_splice_pipe_to_pipe(self):
'''Test `splice` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = splice(p1a, None, p2b, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 3), b'def')
def test_splice_file_to_pipe(self):
'''Test `splice` from a file to a pipe'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
fd.write(b'abcdef')
fd.seek(0, os.SEEK_SET)
res = splice(fd, None, pb, None, 3, 0)
self.assertEqual(res, (3, None, None))
# `fd.tell()` isn't updated...
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 3)
fd.seek(0, os.SEEK_SET)
res = splice(fd, 3, pb, None, 3, 0)
self.assertEqual(res, (3, 6, None))
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 0)
self.assertEqual(os.read(pa, 6), b'abcdef')
def test_splice_pipe_to_file(self):
'''Test `splice` from a pipe to a file'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
os.write(pb, b'abcdef')
res = splice(pa, None, fd, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(fd.tell(), 3)
fd.seek(0, os.SEEK_SET)
res = splice(pa, None, fd, 3, 3, 0)
self.assertEqual(res, (3, None, 6))
self.assertEqual(fd.tell(), 0)
self.assertEqual(fd.read(6), b'abcdef')
@mock.patch.object(splice, '_c_splice')
def test_fileno(self, mock_splice):
'''Test handling of file-descriptors'''
splice(1, None, 2, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
mock_splice.reset_mock()
with open('/dev/zero', 'r') as fd:
splice(fd, None, fd, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((fd.fileno(), None, fd.fileno(), None, 3, 0),
{}))
@mock.patch.object(splice, '_c_splice')
def test_flags_list(self, mock_splice):
'''Test handling of flag lists'''
splice(1, None, 2, None, 3,
[splice.SPLICE_F_MOVE, splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, flags), {}))
mock_splice.reset_mock()
splice(1, None, 2, None, 3, [])
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] splice: %s' % (err, os.strerror(err))
try:
splice(fd, None, fd, None, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, splice, 1, None, 2, None, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `splice` support'''
class LibC(object):
'''A fake `libc` object tracking `splice` attribute access'''
def __init__(self):
self.splice_retrieved = False
@property
def splice(self):
self.splice_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Splice` instance
# Something you're not supposed to do in actual code
new_splice = type(splice)()
self.assertFalse(new_splice.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.splice_retrieved)
class TestTee(unittest.TestCase):
'''Tests for `tee`'''
def setUp(self):
if not tee.available:
raise unittest.SkipTest('tee not available')
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(tee.available)
def test_tee_pipe_to_pipe(self):
'''Test `tee` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = tee(p1a, p2b, 3, 0)
self.assertEqual(res, 3)
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 6), b'abcdef')
@mock.patch.object(tee, '_c_tee')
def test_fileno(self, mock_tee):
'''Test handling of file-descriptors'''
with pipe() as (pa, pb):
tee(pa, pb, 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
mock_tee.reset_mock()
tee(os.fdopen(pa, 'r'), os.fdopen(pb, 'w'), 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
@mock.patch.object(tee, '_c_tee')
def test_flags_list(self, mock_tee):
'''Test handling of flag lists'''
tee(1, 2, 3, [splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_tee.call_args, ((1, 2, 3, flags), {}))
mock_tee.reset_mock()
tee(1, 2, 3, [])
self.assertEqual(mock_tee.call_args, ((1, 2, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] tee: %s' % (err, os.strerror(err))
try:
tee(fd, fd, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, tee, 1, 2, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `tee` support'''
class LibC(object):
'''A fake `libc` object tracking `tee` attribute access'''
def __init__(self):
self.tee_retrieved = False
@property
def tee(self):
self.tee_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Tee` instance
# Something you're not supposed to do in actual code
new_tee = type(tee)()
self.assertFalse(new_tee.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.tee_retrieved)
|
nadeemsyed/swift
|
test/unit/common/test_splice.py
|
Python
|
apache-2.0
| 10,235 | 0 |
# -*- coding: utf-8 -*-
import sys
import pygeoip
import os.path
import socket
import sqlite3
import time
import re
DATAFILE = os.path.join(sys.path[0], "GeoIP.dat")
# STANDARD = reload from disk
# MEMORY_CACHE = load to memory
# MMAP_CACHE = memory using mmap
gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE)
def init(botconfig):
open_DB(True)
def open_DB(createTable=False, db="module_geokick.db"):
conn = sqlite3.connect(db)
c = conn.cursor()
if createTable:
c.execute('CREATE TABLE IF NOT EXISTS exceptions (hostmask);')
conn.commit()
return conn, c
def command_geo_exempt(bot, user, channel, args):
""".geo_exempt nick!ident@hostname | Supports wildcards, for example *!*@*site.com (! and @ are required)"""
if get_op_status(user):
if not get_exempt_status(args):
if len(args) < 4:
conn, c = open_DB()
insert = "INSERT INTO exceptions VALUES ('" + args + "');"
c.execute(insert)
conn.commit()
conn.close()
bot.say(channel, "Success: " + args.encode('utf-8') + " added to exempt list.")
return True
else:
return bot.say(channel, "Error: invalid exempt. See .help geo_exempt")
else:
return bot.say(channel, "Error: exempt exists already!")
def command_geo_list(bot, user, channel, args):
if get_op_status(user):
conn, c = open_DB()
c.execute('SELECT hostmask FROM exceptions;')
rows = c.fetchall()
conn.close()
if rows:
excepts = str("")
for i in rows:
excepts += "[" + i[0] + "] "
return bot.say(channel, "Exceptions: " + excepts)
else:
return bot.say(channel, "Error: no exceptions added. See .help geo_exempt")
def command_geo_remove(bot, user, channel, args):
""".geo_remove hostname"""
if get_op_status(user):
conn, c = open_DB()
c.execute("SELECT hostmask FROM exceptions WHERE hostmask = '" + args + "'")
if c.fetchone():
conn, c = open_DB()
c.execute("DELETE FROM exceptions WHERE hostmask = '" + args + "'")
conn.commit()
conn.close()
bot.say(channel, "Success: exception removed.")
else:
bot.say(channel, "Error: hostmask not found. Check .geo_list for broader exempts that would override what you are trying to add.")
def get_op_status(user):
if isAdmin(user):
return True
else:
# käytetään authentikointiin qban_moduulin adminlistaa
conn, c = open_DB(db="module_qban_ops.db")
c.execute("SELECT hostmask FROM ops WHERE hostmask = '" + user + "' ")
if c.fetchone():
retval = True
else:
retval = False
conn.close()
return retval
# try to split user string as dictionary with nick, ident and hostname
def get_data(user):
try:
temp = user.split('@')[0]
data = {'nick':getNick(user), 'ident':temp.split('!')[1], 'host':user.split('@')[1] }
return data
except:
return False
#@todo blacklist = ['elisa-mobile.fi', 'nat-elisa-mobile.fi']
def get_exempt_status(user):
if isAdmin(user):
return True
else:
data = get_data(user)
if data:
conn, c = open_DB()
c.execute('SELECT hostmask FROM exceptions;')
rows = c.fetchall()
conn.close()
# iterate all hostmasks
for i in rows:
row = get_data(i[0])
j = 0
# check current row data against that of the user data
for row_value in row.values():
for data_value in data.values():
# if a wildcard or exact match
if row_value == "*" or ( row_value in data_value and "*" not in row_value ):
j += 1
break
# if contains a wildcard, we have to regex
elif "*" in row_value:
regex = re.escape(row_value)
regex = row_value.replace("*",".*")
if re.search(regex, data_value):
j += 1
break
# if counter reaches three, user matches exception list
if j == 3:
return True
return False
def handle_userJoined(bot, user, channel):
# if tested user is in exception list
if not get_exempt_status(user):
host = user.split('@')[1]
# attempt to get location data from the geoip database
try:
country = gi4.country_name_by_name(host)
except socket.gaierror:
country = None
# if country information was found & if it wasn't Finland
if country != "Finland" and country != "":
# grab nickname and hostname of the user
nick = getNick(user)
banmask = "*!*@" + host
banmask = banmask.encode('utf-8')
# ban & kick
bot.mode(channel, True, 'b', mask=banmask)
bot.kick(channel, nick, "Hosted from a banned country (" + country + ") or host (" + host + "). If you think you should have access, /msg lolfi .request_exempt")
# unban after 300s to avoid filling the banlist
time.sleep(300)
bot.mode(channel, False, 'b', mask=banmask)
def command_request_exempt(bot, user, channel, args):
if channel != "#projekti_lol":
nick = getNick(user)
bot.say("#projekti_lol".encode('utf-8'), "Notification: " + nick + " (" + user + ") requested and exempt.")
|
rnyberg/pyfibot
|
pyfibot/modules/module_geokick.py
|
Python
|
bsd-3-clause
| 5,174 | 0.015471 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetClicksForLink
# Returns the number of clicks on a single Bitly link.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetClicksForLink(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetClicksForLink Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetClicksForLink, self).__init__(temboo_session, '/Library/Bitly/LinkMetrics/GetClicksForLink')
def new_input_set(self):
return GetClicksForLinkInputSet()
def _make_result_set(self, result, path):
return GetClicksForLinkResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetClicksForLinkChoreographyExecution(session, exec_id, path)
class GetClicksForLinkInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetClicksForLink
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The OAuth access token provided by Bitly.)
"""
super(GetClicksForLinkInputSet, self)._set_input('AccessToken', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The result limit. Defaults to 100. Range is 1 to 1000.)
"""
super(GetClicksForLinkInputSet, self)._set_input('Limit', value)
def set_Link(self, value):
"""
Set the value of the Link input for this Choreo. ((required, string) A Bitly link.)
"""
super(GetClicksForLinkInputSet, self)._set_input('Link', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in. Accepted values are "json" or "xml". Defaults to "json".)
"""
super(GetClicksForLinkInputSet, self)._set_input('ResponseFormat', value)
def set_Rollup(self, value):
"""
Set the value of the Rollup input for this Choreo. ((optional, boolean) Accepted values are true or false. When set to true, this returns data for multiple units rolled up to a single result instead of a separate value for each period of time.)
"""
super(GetClicksForLinkInputSet, self)._set_input('Rollup', value)
def set_Timestamp(self, value):
"""
Set the value of the Timestamp input for this Choreo. ((optional, date) An epoch timestamp, indicating the most recent time for which to pull metrics.)
"""
super(GetClicksForLinkInputSet, self)._set_input('Timestamp', value)
def set_Timezone(self, value):
"""
Set the value of the Timezone input for this Choreo. ((optional, string) An integer hour offset from UTC (-12..12), or a timezone string. Defaults to "America/New_York".)
"""
super(GetClicksForLinkInputSet, self)._set_input('Timezone', value)
def set_UnitName(self, value):
"""
Set the value of the UnitName input for this Choreo. ((optional, string) The unit of time that corresponds to query you want to run. Accepted values are: minute, hour, day, week, month, and day. Defaults to "day".)
"""
super(GetClicksForLinkInputSet, self)._set_input('UnitName', value)
def set_UnitValue(self, value):
"""
Set the value of the UnitValue input for this Choreo. ((optional, integer) An integer representing the amount of time to query for. Corresponds to the UnitName input. Defaults to -1 indicating to return all units of time.)
"""
super(GetClicksForLinkInputSet, self)._set_input('UnitValue', value)
class GetClicksForLinkResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetClicksForLink Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Bitly.)
"""
return self._output.get('Response', None)
class GetClicksForLinkChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetClicksForLinkResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Bitly/LinkMetrics/GetClicksForLink.py
|
Python
|
apache-2.0
| 5,517 | 0.005256 |
# encoding: utf-8
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libstego. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009 2010 by Marko Krause <zeratul2099@googlemail.com>
from django import forms
class AESEncryptForm(forms.Form):
message = forms.CharField(label='Klartext', required=True)
key = forms.CharField(label='Schlüssel', required=True)
block_values = ((1, 'ECB'), (2, 'CBC'), (3, 'CFB'))
block_mode = forms.ChoiceField(choices=block_values, label='Blockmodus')
class AESDecryptForm(forms.Form):
cypher_text = forms.IntegerField(label='Geheimtext', required=True)
key = forms.CharField(label='Schlüssel', required=True)
block_values = ((1, 'ECB'), (2, 'CBC'), (3, 'CFB'))
block_mode = forms.ChoiceField(choices=block_values, label='Blockmodus')
class SimpleEncryptForm(forms.Form):
message = forms.CharField(label='Klartext', required=True)
key = forms.CharField(label='Schlüssel', required=True)
class SimpleDecryptForm(forms.Form):
cypher_text = forms.CharField(label='Geheimtext', required=True)
key = forms.CharField(label='Schlüssel', required=True)
class RSAEncryptForm(forms.Form):
message = forms.CharField(label='Klartext', required=True)
key = forms.FileField(label='Öffentlicher Schlüssel', required=True)
class RSADecryptForm(forms.Form):
cypher_text = forms.CharField(label='Geheimtext', required=True)
key = forms.FileField(label='Privater Schluessel', required=True)
class SimplestForm(forms.Form):
message = forms.CharField(label='Klar-/Geheimtext', required=True)
class CaesarEncryptForm(forms.Form):
message = forms.CharField(label='Klartext', required=True)
key_values = ((1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'),
(7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'),
(13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'),
(19, '19'), (20, '20'), (21, '21'), (22, '22'), (23, '23'), (24, '24'),
(25, '25'))
key = forms.ChoiceField(choices=key_values, label='Schlüssel')
class CaesarDecryptForm(forms.Form):
cypher_text = forms.CharField(label='Geheimtext', required=True)
key_values = ((1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'),
(7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'),
(13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'),
(19, '19'), (20, '20'), (21, '21'), (22, '22'), (23, '23'), (24, '24'),
(25, '25'))
key = forms.ChoiceField(choices=key_values, label='Schlüssel')
class AffineEncryptForm(forms.Form):
message = forms.CharField(label='Klartext', required=True)
a_values = ((1, '1'), (3, '3'), (5, '5'), (7, '7'), (9, '9'), (11, '11'), (15, '15'),
(17, '17'), (19, '19'), (21, '21'), (23, '23'), (25, '25'))
b_values = ((1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'),
(7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'),
(13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'),
(19, '19'), (20, '20'), (21, '21'), (22, '22'), (23, '23'), (24, '24'), (25, '25'))
keyA = forms.ChoiceField(choices=a_values, label='Schlüssel A')
keyB = forms.ChoiceField(choices=b_values, label='Schlüssel B')
class AffineDecryptForm(forms.Form):
cypher_text = forms.CharField(label='Geheimtext', required=True)
a_values = ((1, '1'), (9, '3'), (21, '5'), (15, '7'), (3, '9'), (19, '11'), (7, '15'),
(23, '17'), (11, '19'), (5, '21'), (17, '23'), (25, '25'))
b_values = ((1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'),
(7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'),
(13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'),
(19, '19'), (20, '20'), (21, '21'), (22, '22'), (23, '23'), (24, '24'), (25, '25'))
keyA = forms.ChoiceField(choices=a_values, label='Schlüssel A')
keyB = forms.ChoiceField(choices=b_values, label='Schlüssel B')
|
zeratul2099/crypt_app
|
crypto/models.py
|
Python
|
gpl-3.0
| 4,784 | 0.005029 |
#!/usr/bin/env python
import common
import sys
for name,ip,port in common.get_vm_config():
format_args = {'public_port': port,
'name': name,
'local_address': ip
}
print "%(local_address)s\t%(name)s %(name)s.acme.intern" % format_args
|
bcoding/docker-host-scripts
|
py/generate_etc_hosts.py
|
Python
|
unlicense
| 301 | 0.006645 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20150703_0836'),
]
operations = [
migrations.AlterField(
model_name='user',
name='followers',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='followers_rel_+'),
),
]
|
28harishkumar/Social-website-django
|
user/migrations/0003_auto_20150703_0843.py
|
Python
|
mit
| 485 | 0.002062 |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def listToDict(a):
x = {}
for b in a:
x[b] = True
return x
"""plainchars = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X", "Y", "Z", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "0", ".", "-", "_"]
plaindict = listToDict(plainchars)
print plaindict"""
def isLocalAddress(address, localdict):
splitaddress = address.split("@")
if len(splitaddress) == 2 and localdict.get(splitaddress[-1], False):
return True
else:
return False
def isPlain(text):
plaindict = {'-': True, '.': True, '1': True, '0': True, '3': True, '2': True, '5': True, '4': True, '7': True, '6': True, '9': True, '8': True, 'A': True, 'C': True, 'B': True, 'E': True, 'D': True, 'G': True, 'F': True, 'I': True, 'H': True, 'K': True, 'J': True, 'M': True, 'L': True, 'O': True, 'N': True, 'Q': True, 'P': True, 'S': True, 'R': True, 'U': True, 'T': True, 'W': True, 'V': True, 'Y': True, 'X': True, 'Z': True, '_': True, 'a': True, 'c': True, 'b': True, 'e': True, 'd': True, 'g': True, 'f': True, 'i': True, 'h': True, 'k': True, 'j': True, 'm': True, 'l': True, 'o': True, 'n': True, 'q': True, 'p': True, 's': True, 'r': True, 'u': True, 't': True, 'w': True, 'v': True, 'y': True, 'x': True, 'z': True}
for c in text:
if plaindict.get(c, False) == False:
return False
return True
|
sparkslabs/kamaelia_
|
Sketches/RJL/SMTP/MailShared.py
|
Python
|
apache-2.0
| 2,422 | 0.004129 |
import numpy as np
import os
from tensorutils.antisym import get_antisymmetrizer_product as asym
test_dir_path = os.path.dirname(os.path.realpath(__file__))
array_path_template = os.path.join(test_dir_path, "random_arrays", "{:s}.npy")
def test__composition_1():
array1 = np.load(array_path_template.format("15x15"))
array2 = asym("0") * array1
assert (np.allclose(array1, array2))
def test__composition_1_1():
array1 = np.load(array_path_template.format("15x15"))
array2 = asym("0/1") * array1
array3 = array1 - array1.transpose()
assert (np.allclose(array2, array3))
def test__composition_1_2():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("1/2") * array1
array3 = asym("0/1,2") * array2
array4 = array2 - array2.transpose((1, 0, 2)) - array2.transpose((2, 1, 0))
assert (np.allclose(array3, array4))
def test__composition_2_1():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("0/1") * array1
array3 = asym("0,1/2") * array2
array4 = array2 - array2.transpose((2, 1, 0)) - array2.transpose((0, 2, 1))
assert (np.allclose(array3, array4))
def test__composition_1_1_1():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("0/1/2") * array1
array3 = (array1
- array1.transpose((0, 2, 1))
- array1.transpose((1, 0, 2))
+ array1.transpose((1, 2, 0))
+ array1.transpose((2, 0, 1))
- array1.transpose((2, 1, 0)))
assert (np.allclose(array2, array3))
def test__composition_1_3():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("1/2/3") * array1
array3 = asym("0/1,2,3") * array2
array4 = (array2
- array2.transpose((1, 0, 2, 3))
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0)))
assert (np.allclose(array3, array4))
def test__composition_2_2():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1|2/3") * array1
array3 = asym("0,1/2,3") * array2
array4 = (array2
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 2, 1, 3))
- array2.transpose((0, 3, 2, 1))
+ array2.transpose((2, 3, 0, 1)))
assert (np.allclose(array3, array4))
def test__composition_3_1():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1/2") * array1
array3 = asym("0,1,2/3") * array2
array4 = (array2
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 3, 2, 1))
- array2.transpose((0, 1, 3, 2)))
assert (np.allclose(array3, array4))
def test__composition_1_2_1():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("1/2") * array1
array3 = asym("0/1,2/3") * array2
array4 = (array2
- array2.transpose((1, 0, 2, 3))
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 3, 2, 1))
- array2.transpose((0, 1, 3, 2))
+ array2.transpose((1, 0, 3, 2))
+ array2.transpose((2, 3, 0, 1))
+ array2.transpose((1, 3, 2, 0))
+ array2.transpose((2, 1, 3, 0))
+ array2.transpose((3, 0, 2, 1))
+ array2.transpose((3, 1, 0, 2)))
assert (np.allclose(array3, array4))
def test__expression_01():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = 0.25 * asym("0/1|2/3") * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
def test__expression_02():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = (0.25 * asym("0/1")) * asym("2/3") * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
def test__expression_03():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1") * (asym("2/3") * 0.25) * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
if __name__ == "__main__":
test__composition_1()
test__composition_1_1()
test__composition_1_2()
test__composition_2_1()
test__composition_1_1_1()
test__composition_1_3()
test__composition_2_2()
test__composition_3_1()
test__composition_1_2_1()
test__expression_01()
test__expression_02()
test__expression_03()
|
avcopan/meinsum
|
test/test_antisym.py
|
Python
|
gpl-3.0
| 5,061 | 0 |
# -*- coding: utf-8 -*-
"""
sleekxmpp.util
~~~~~~~~~~~~~~
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz, Lance J.T. Stout
:license: MIT, see LICENSE for more details
"""
from sleekxmpp.util.misc_ops import bytes, unicode, hashes, hash, \
num_to_bytes, bytes_to_num, quote, \
XOR, safedict
# =====================================================================
# Standardize import of Queue class:
import sys
def _gevent_threads_enabled():
if not 'gevent' in sys.modules:
return False
try:
from gevent import thread as green_thread
thread = __import__('thread')
return thread.LockType is green_thread.LockType
except ImportError:
return False
if _gevent_threads_enabled():
import gevent.queue as queue
Queue = queue.JoinableQueue
else:
try:
import queue
except ImportError:
import Queue as queue
Queue = queue.Queue
QueueEmpty = queue.Empty
|
danielvdao/facebookMacBot
|
venv/lib/python2.7/site-packages/sleekxmpp/util/__init__.py
|
Python
|
mit
| 1,067 | 0.002812 |
# -*- coding: utf-8 -*-
# quiz/quiz.py
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'Cześć, tu Python!'
if __name__ == '__main__':
app.run(debug=True)
|
koduj-z-klasa/python101
|
docs/webflask/quiz/quiz2.py
|
Python
|
mit
| 204 | 0 |
import logging
import page_objects
def option_should_be_selected(text):
'''
Verifies specified dropdown option is selected.
Parameters
----------
text : str
Returns
-------
None
Raises
------
AssertionError
If the specified option is not selected.
'''
page = page_objects.dropdown.DropdownPage()
selected_option = page.selected_option()
if selected_option != text:
log_str = 'FAIL:\n Actual option selected: {}\n Expected option selected: {}'.format(selected_option, text)
logging.error(log_str)
raise AssertionError(log_str)
logging.info('PASS: Option "{}" selected.'.format(selected_option))
return
|
MooMan272/selenium_the_internet
|
verify/dropdown.py
|
Python
|
mit
| 710 | 0.002817 |
import httplib, time, inspect
import selenium.webdriver.remote.webdriver
from pytanium_element import PytaniumElement
OldRemoteWebDriver = selenium.webdriver.remote.webdriver.WebDriver
# Redefine the RemoteWebDriver
class RemoteWebDriver(OldRemoteWebDriver):
# NOTE: Both desired_capabilities and capabilities have to be
# defined due to inconsistencies in the Firefox WebDriver
def __init__(self, desired_capabilities = None, capabilities = None, *args, **kwargs):
# Modify the existing WebElement identification functions
old_find_element = OldRemoteWebDriver.find_element
def find_element(*args, **kwargs):
webelement = old_find_element(*args, **kwargs)
return PytaniumElement(selenium_element = webelement)
OldRemoteWebDriver.find_element = find_element
# Override the ability to identify multiple elements
old_find_elements = OldRemoteWebDriver.find_elements
def find_elements(*args, **kwargs):
webelements = old_find_elements(*args, **kwargs)
webelements = [PytaniumElement(selenium_element = webelement) for webelement in webelements]
return webelements
OldRemoteWebDriver.find_elements = find_elements
# Allows you to inject a custom script on every page
self.browser_js = ""
# Determines what XHR states should pause execution by default
self.xhr_wait_states = [1, 2, 3]
# Create the default pytanium_capabilities
pytanium_capabilities = {'unexpectedAlertBehaviour' : 'ignore',
'suppressAlerts' : False,
'suppressConfirms' : False,
'suppressPrompts' : False,
'suppressPrints' : False,
'enableRecorder' : False,
'waitForAjax' : True,
'waitForImages' : True,
'recorderHost' : 'localhost',
'recorderPort' : 9999
}
# If desired_capabilities were passed, update the defaults
if desired_capabilities and capabilities:
raise Exception("Both desired_capabilites or capabilities were passed to the WebDriver")
elif desired_capabilities:
if type(desired_capabilities) is dict:
pytanium_capabilities.update(desired_capabilities)
else:
raise Exception("desired_capabilities must be a dictionary")
elif capabilities:
if type(capabilities) is dict:
pytanium_capabilities.update(capabilities)
else:
raise Exception("capabilities must be a dictionary")
# Set the custom pytanium_capabilities of pytanium
self.suppress_alerts = pytanium_capabilities['suppressAlerts']
self.suppress_confirms = pytanium_capabilities['suppressConfirms']
self.suppress_prompts = pytanium_capabilities['suppressPrompts']
self.suppress_prints = pytanium_capabilities['suppressPrints']
self.wait_for_ajax = pytanium_capabilities['waitForAjax']
self.wait_for_images = pytanium_capabilities['waitForImages']
self.enable_recorder = pytanium_capabilities['enableRecorder']
self.recorder_host = pytanium_capabilities['recorderHost']
self.recorder_port = pytanium_capabilities['recorderPort']
# If we're using the recorder, check the proxy
if self.enable_recorder:
self.check_recorder_proxy()
extra_ie_capabilities = {"proxy": {
"httpProxy":"{0}:{1}".format(self.recorder_host, str(self.recorder_port)),
"ftpProxy":None,
"sslProxy":None,
"noProxy":None,
"proxyType":"MANUAL",
"class":"org.openqa.selenium.Proxy",
"autodetect":False
}}
pytanium_capabilities.update(extra_ie_capabilities)
# Build accessors to help identify objects using Sahi's style
self.accessors = []
self.accessors_name_set = set()
self.load_accessors()
# Build the old remote webdriver
if desired_capabilities:
OldRemoteWebDriver.__init__(self, desired_capabilities = pytanium_capabilities, *args, **kwargs)
elif capabilities:
# Firefox only
OldRemoteWebDriver.__init__(self, capabilities = pytanium_capabilities, *args, **kwargs)
else:
OldRemoteWebDriver.__init__(self, *args, **kwargs)
# Set the default window as the first open window
self.default_window = self.current_window_handle
def check_recorder_proxy(self):
try:
testconn = httplib.HTTPConnection(self.recorder_host, self.recorder_port)
testconn.connect()
testconn.request("GET", "/_s_/spr/blank.htm")
testconn.getresponse();
testconn.close()
except Exception:
raise Exception("The recorder proxy is not available. Please start Sahi on {0}:{1}.".format(self.recorder_host, self.recorder_port))
def get_alert(self):
a = self.switch_to_alert()
try:
a.text
except Exception:
print "There was no alert, confirm, or prompt found"
a = None
return a
alert = property(get_alert)
confirm = property(get_alert)
prompt = property(get_alert)
def addAD(self, accessor):
self.accessors.append(accessor)
self.accessors_name_set.add(accessor['name'])
# Taken *almost* directly from concat.js in Sahi
def load_accessors(self):
# self.addAD({'tag': "INPUT", 'type': "text", 'event':"change", 'name': "textbox", 'attributes': ["name", "id", "index", "className"], 'action': "_setValue", 'value': "value"})
self.addAD({'tag': "A", 'type': None, 'event':"click", 'name': "link", 'attributes': ["sahiText", "title|alt", "id", "index", "href", "className"], 'action': "click", 'value': "sahiText"})
# self.addAD({'tag': "IMG", 'type': None, 'event':"click", 'name': "image", 'attributes': ["title|alt", "id", this.getFileFromURL, "index", "className"], 'action': "click"})
self.addAD({'tag': "IMG", 'type': None, 'event':"click", 'name': "image", 'attributes': ["title|alt", "id", "fileFromURL", "index", "className"], 'action': "click"})
self.addAD({'tag': "LABEL", 'type': None, 'event':"click", 'name': "label", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "LI", 'type': None, 'event':"click", 'name': "listItem", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "UL", 'type': None, 'event':"click", 'name': "list", 'attributes': ["id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "OL", 'type': None, 'event':"click", 'name': "list", 'attributes': ["id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "DIV", 'type': None, 'event':"click", 'name': "div", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "SPAN", 'type': None, 'event':"click", 'name': "span", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "TABLE", 'type': None, 'event':"click", 'name': "table", 'attributes': ["id", "className", "index"], 'action': None, 'value': "sahiText"})
self.addAD({'tag': "TR", 'type': None, 'event':"click", 'name': "row", 'attributes': ["id", "className", "sahiText", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "TD", 'type': None, 'event':"click", 'name': "cell", 'attributes': ["sahiText", "id", "className", "index", "encaps_TR", "encaps_TABLE"], 'action': "click", 'idOnly': False, 'value': "sahiText"})
self.addAD({'tag': "TH", 'type': None, 'event':"click", 'name': "tableHeader", 'attributes': ["sahiText", "id", "className", "encaps_TABLE"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "INPUT", 'type': "button", 'event':"click", 'name': "button", 'attributes': ["value", "name", "id", "index", "className"], 'action': "click", 'value': "value"})
self.addAD({'tag': "BUTTON", 'type': "button", 'event':"click", 'name': "button", 'attributes': ["sahiText", "name", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
# self.addAD({'tag': "INPUT", 'type': "checkbox", 'event':"click", 'name': "checkbox", 'attributes': ["name", "id", "value", "className", "index"], 'action': "click", 'value': "checked", 'assertions': function(value){return [("true" == ("" + value)) ? _sahi.language.ASSERT_CHECKED : _sahi.language.ASSERT_NOT_CHECKED];}})
self.addAD({'tag': "INPUT", 'type': "checkbox", 'event':"click", 'name': "checkbox", 'attributes': ["name", "id", "value", "className", "index"], 'action': "click", 'value': "checked"})
self.addAD({'tag': "INPUT", 'type': "password", 'event':"change", 'name': "password", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
# self.addAD({'tag': "INPUT", 'type': "radio", 'event':"click", 'name': "radio", 'attributes': ["id", "name", "value", "className", "index"], 'action': "click", 'value': "checked", assertions: function(value){return [("true" == ("" + value)) ? _sahi.language.ASSERT_CHECKED : _sahi.language.ASSERT_NOT_CHECKED];}})
self.addAD({'tag': "INPUT", 'type': "radio", 'event':"click", 'name': "radio", 'attributes': ["id", "name", "value", "className", "index"], 'action': "click", 'value': "checked"})
self.addAD({'tag': "INPUT", 'type': "submit", 'event':"click", 'name': "submit", 'attributes': ["value", "name", "id", "className", "index"], 'action': "click", 'value': "value"})
self.addAD({'tag': "BUTTON", 'type': "submit", 'event':"click", 'name': "submit", 'attributes': ["sahiText", "name", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "INPUT", 'type': "text", 'event':"change", 'name': "textbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "reset", 'event':"click", 'name': "reset", 'attributes': ["value", "name", "id", "className", "index"], 'action': "click", 'value': "value"})
self.addAD({'tag': "BUTTON", 'type': "reset", 'event':"click", 'name': "reset", 'attributes': ["sahiText", "name", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "INPUT", 'type': "hidden", 'event':"", 'name': "hidden", 'attributes': ["name", "id", "className", "index"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "file", 'event':"click", 'name': "file", 'attributes': ["name", "id", "index", "className"], 'action': "setFile", 'value': "value"})
# self.addAD({'tag': "INPUT", 'type': "image", 'event':"click", 'name': "imageSubmitButton", 'attributes': ["title|alt", "name", "id", this.getFileFromURL, "index", "className"], 'action': "click"})
self.addAD({'tag': "INPUT", 'type': "image", 'event':"click", 'name': "imageSubmitButton", 'attributes': ["title|alt", "name", "id", "fileFromURL", "index", "className"], 'action': "click"})
self.addAD({'tag': "INPUT", 'type': "date", 'event':"change", 'name': "datebox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "datetime", 'event':"change", 'name': "datetimebox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "datetime-local", 'event':"change", 'name': "datetimelocalbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "email", 'event':"change", 'name': "emailbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "month", 'event':"change", 'name': "monthbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "number", 'event':"change", 'name': "numberbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "range", 'event':"change", 'name': "rangebox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "search", 'event':"change", 'name': "searchbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "tel", 'event':"change", 'name': "telephonebox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "time", 'event':"change", 'name': "timebox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "url", 'event':"change", 'name': "urlbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "INPUT", 'type': "week", 'event':"change", 'name': "weekbox", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
# self.addAD({'tag': "SELECT", 'type': None, 'event':"change", 'name': "select", 'attributes': ["name", "id", "index", "className"], 'action': "setSelected", 'value': function(el){return _sahi._getSelectedText(el) || _sahi.getOptionId(el, el.value) || el.value;},assertions: function(value){return [_sahi.language.ASSERT_SELECTION];}})
self.addAD({'tag': "SELECT", 'type': None, 'event':"change", 'name': "select", 'attributes': ["name", "id", "index", "className"], 'action': "setSelected"})
self.addAD({'tag': "OPTION", 'type': None, 'event':"none", 'name': "option", 'attributes': ["encaps_SELECT", "sahiText", "value", "id", "index"], 'action': "", 'value': "sahiText"})
self.addAD({'tag': "TEXTAREA", 'type': None, 'event':"change", 'name': "textarea", 'attributes': ["name", "id", "index", "className"], 'action': "setValue", 'value': "value"})
self.addAD({'tag': "H1", 'type': None, 'event':"click", 'name': "heading1", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "H2", 'type': None, 'event':"click", 'name': "heading2", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "H3", 'type': None, 'event':"click", 'name': "heading3", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "H4", 'type': None, 'event':"click", 'name': "heading4", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "H5", 'type': None, 'event':"click", 'name': "heading5", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "H6", 'type': None, 'event':"click", 'name': "heading6", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "AREA", 'type': None, 'event':"click", 'name': "area", 'attributes': ["id", "title|alt", "href", "shape", "className", "index"], 'action': "click"})
self.addAD({'tag': "MAP", 'type': None, 'event':"click", 'name': "map", 'attributes': ["name", "id", "title", "className", "index"], 'action': "click"})
self.addAD({'tag': "P", 'type': None, 'event':"click", 'name': "paragraph", 'attributes': ["encaps_A", "id", "className", "sahiText", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "I", 'type': None, 'event':"click", 'name': "italic", 'attributes': ["encaps_A", "sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "EM", 'type': None, 'event':"click", 'name': "emphasis", 'attributes': ["encaps_A", "sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "B", 'type': None, 'event':"click", 'name': "bold", 'attributes': ["encaps_A", "sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "STRONG", 'type': None, 'event':"click", 'name': "strong", 'attributes': ["encaps_A", "sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "PRE", 'type': None, 'event':"click", 'name': "preformatted", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "CODE", 'type': None, 'event':"click", 'name': "code", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "BLOCKQUOTE", 'type': None, 'event':"click", 'name': "blockquote", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "CANVAS", 'type': None, 'event':"click", 'name': "canvas", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "ABBR", 'type': None, 'event':"click", 'name': "abbr", 'attributes': ["encaps_A", "sahiText", "title", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "HR", 'type': None, 'event':"click", 'name': "hr", 'attributes': ["id", "className", "index"], 'action': "click", 'value': ""})
# var o_fn1 = function(o){try{return o._sahi_getFlexId()}catch(e){}};
# var o_fn2 = function(o){try{return o._sahi_getUID()}catch(e){}};
# self.addAD({'tag': "OBJECT", 'type': None, 'event':"click", 'name': "object", 'attributes': ["id", "name", "data", o_fn1, o_fn2], 'action': "click", 'value': ""})
# self.addAD({'tag': "EMBED", 'type': None, 'event':"click", 'name': "embed", 'attributes': ["name", "id", o_fn1, o_fn2], 'action': "click", 'value': ""})
self.addAD({'tag': "OBJECT", 'type': None, 'event':"click", 'name': "object", 'attributes': ["id", "name", "data"], 'action': "click", 'value': ""})
self.addAD({'tag': "EMBED", 'type': None, 'event':"click", 'name': "embed", 'attributes': ["name", "id"], 'action': "click", 'value': ""})
self.addAD({'tag': "DL", 'type': None, 'event':"click", 'name': "dList", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "DT", 'type': None, 'event':"click", 'name': "dTerm", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "DD", 'type': None, 'event':"click", 'name': "dDesc", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "RECT", 'type': None, 'event':"click", 'name': "svg_rect", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "TSPAN", 'type': None, 'event':"click", 'name': "svg_tspan", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "CIRCLE", 'type': None, 'event':"click", 'name': "svg_circle", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "ELLIPSE", 'type': None, 'event':"click", 'name': "svg_ellipse", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "LINE", 'type': None, 'event':"click", 'name': "svg_line", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "POLYGONE", 'type': None, 'event':"click", 'name': "svg_polygon", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "POLYLINE", 'type': None, 'event':"click", 'name': "svg_polyline", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "PATH", 'type': None, 'event':"click", 'name': "svg_path", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
self.addAD({'tag': "TEXT", 'type': None, 'event':"click", 'name': "svg_text", 'attributes': ["sahiText", "id", "className", "index"], 'action': "click", 'value': "sahiText"})
def link(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "link", identifier = identifier, *args, **kwargs)
def image(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "image", identifier = identifier, *args, **kwargs)
def label(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "label", identifier = identifier, *args, **kwargs)
def listItem(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "listItem", identifier = identifier, *args, **kwargs)
def list(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "list", identifier = identifier, *args, **kwargs)
def div(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "div", identifier = identifier, *args, **kwargs)
def span(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "span", identifier = identifier, *args, **kwargs)
def table(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "table", identifier = identifier, *args, **kwargs)
def row(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "row", identifier = identifier, *args, **kwargs)
def cell(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "cell", identifier = identifier, *args, **kwargs)
def tableHeader(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "tableHeader", identifier = identifier, *args, **kwargs)
def button(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "button", identifier = identifier, *args, **kwargs)
def checkbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "checkbox", identifier = identifier, *args, **kwargs)
def password(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "password", identifier = identifier, *args, **kwargs)
def radio(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "radio", identifier = identifier, *args, **kwargs)
def submit(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "submit", identifier = identifier, *args, **kwargs)
def textbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "textbox", identifier = identifier, *args, **kwargs)
def reset(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "reset", identifier = identifier, *args, **kwargs)
def hidden(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "hidden", identifier = identifier, *args, **kwargs)
def file(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "file", identifier = identifier, *args, **kwargs)
def imageSubmitButton(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "imageSubmitButton", identifier = identifier, *args, **kwargs)
def datebox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "datebox", identifier = identifier, *args, **kwargs)
def datetimebox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "datetimebox", identifier = identifier, *args, **kwargs)
def datetimelocalbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "datetimelocalbox", identifier = identifier, *args, **kwargs)
def emailbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "emailbox", identifier = identifier, *args, **kwargs)
def monthbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "monthbox", identifier = identifier, *args, **kwargs)
def numberbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "numberbox", identifier = identifier, *args, **kwargs)
def rangebox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "rangebox", identifier = identifier, *args, **kwargs)
def searchbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "searchbox", identifier = identifier, *args, **kwargs)
def telephonebox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "telephonebox", identifier = identifier, *args, **kwargs)
def timebox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "timebox", identifier = identifier, *args, **kwargs)
def urlbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "urlbox", identifier = identifier, *args, **kwargs)
def weekbox(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "weekbox", identifier = identifier, *args, **kwargs)
def select(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "select", identifier = identifier, *args, **kwargs)
def option(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "option", identifier = identifier, *args, **kwargs)
def textarea(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "textarea", identifier = identifier, *args, **kwargs)
def heading1(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading1", identifier = identifier, *args, **kwargs)
def heading2(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading2", identifier = identifier, *args, **kwargs)
def heading3(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading3", identifier = identifier, *args, **kwargs)
def heading4(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading4", identifier = identifier, *args, **kwargs)
def heading5(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading5", identifier = identifier, *args, **kwargs)
def heading6(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "heading6", identifier = identifier, *args, **kwargs)
def area(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "area", identifier = identifier, *args, **kwargs)
def map(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "map", identifier = identifier, *args, **kwargs)
def paragraph(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "paragraph", identifier = identifier, *args, **kwargs)
def italic(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "italic", identifier = identifier, *args, **kwargs)
def emphasis(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "emphasis", identifier = identifier, *args, **kwargs)
def bold(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "bold", identifier = identifier, *args, **kwargs)
def strong(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "strong", identifier = identifier, *args, **kwargs)
def preformatted(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "preformatted", identifier = identifier, *args, **kwargs)
def code(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "code", identifier = identifier, *args, **kwargs)
def blockquote(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "blockquote", identifier = identifier, *args, **kwargs)
def canvas(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "canvas", identifier = identifier, *args, **kwargs)
def abbr(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "abbr", identifier = identifier, *args, **kwargs)
def hr(self, identifier, *args, **kwargs):
return PytaniumElement(pytanium_parent = self, accessor_name = "hr", identifier = identifier, *args, **kwargs)
confirm_action = True
prompt_text = ""
def inject_extensions(self):
# Inject javascript to supplement Selenium functionality
if self.suppress_alerts:
alert = """
// Backup the old alert
window.oldalert = window.oldalert || window.alert;
// Override the window.alert
window.alert = function() {
window.lastAlertText = arguments[0];
return true;
};
"""
else:
alert = """
// Reset alert if it's been changed
window.alert = window.oldalert || window.alert;
"""
if self.suppress_confirms:
confirm = """
// Backup the old confirm
window.oldconfirm = window.oldconfirm || window.confirm;
// Override the window.confirm
window.confirm = function() {
window.lastConfirmText = arguments[0];
return """ + str(self.confirm_action).lower() + """;
};
"""
else:
confirm = """
// Reset confirm if it's been changed
window.confirm = window.oldconfirm || window.confirm;
"""
if self.suppress_prompts:
prompt = """
// Backup the old prompt
window.oldprompt = window.oldprompt || window.prompt;
// Override the window.prompt
window.prompt = function() {
window.lastPromptText = arguments[0];
return '""" + str(self.prompt_text) + """';
};
"""
else:
prompt = """
// Reset prompt if it's been changed
window.prompt = window.oldprompt || window.prompt;
"""
if self.suppress_prints:
print_override = """
// Backup the old print
window.print = window.oldprint || window.print;
// Override the window.print
window.print = function() {
window.printCalled = true;
return true;
};
"""
else:
print_override = """
// Reset print if it's been changed
window.print = window.oldprint || window.print;
"""
#TODO: If an array of wait states is empty, then don't inject
if self.wait_for_ajax:
# Build the wait logic with booleans instead of indexOf
# because IE8 doesn't have indexOf by default
wait_logic = ' == readyState || '.join(str(i) for i in self.xhr_wait_states) + ' == readyState'
ajax = """
// Create a list of XMLHttpRequests
window.XHRs = window.XHRs || [];
// Use the proxy pattern on open
XMLHttpRequest.prototype.oldopen = XMLHttpRequest.prototype.oldopen || XMLHttpRequest.prototype.open;
XMLHttpRequest.prototype.open = function(method, url, async, username, password){
// Push the XHR to our global list
window.XHRs.push(this);
return this.oldopen.apply(this, arguments);
};
// Define a way to check if the requests are done
window.pytaniumAjaxReady = function(){
for(var XHR = 0; XHR < window.XHRs.length; XHR++){
readyState = window.XHRs[XHR].readyState;
if(""" + wait_logic + """){
return false;
}
}
return true;
}
"""
else:
ajax = """
// Reset open if it's been changed
XMLHttpRequest.prototype.open = XMLHttpRequest.prototype.oldopen || XMLHttpRequest.prototype.open;
"""
script = alert + confirm + prompt + print_override + ajax + self.browser_js
self.execute_script(script)
def last_alert(self):
return self.execute_script("return window.lastAlertText;")
def clear_last_alert(self):
return self.execute_script("window.lastAlertText = null;")
def print_called(self):
return self.execute_script("return window.printCalled || false;")
def clear_print_called(self):
return self.execute_script("window.printCalled = false;")
def last_confirm(self):
return self.execute_script("return window.lastConfirmText;")
def clear_last_confirm(self):
return self.execute_script("window.lastConfirmText = null;")
def last_prompt(self):
return self.execute_script("return window.lastPromptText;")
def clear_last_prompt(self):
return self.execute_script("window.lastPromptText = null;")
def is_ajax_complete(self):
if self.wait_for_ajax:
# Check if all the ajax requests are complete
javascript_check = self.execute_script("""
if(window.pytaniumAjaxReady){
return window.pytaniumAjaxReady();
}
else{
return true;
}
""")
return javascript_check
else:
return True
def are_images_complete(self):
if self.wait_for_images:
# Check if all the images are loaded
images = self.find_elements_by_tag_name("img")
for image in images:
is_complete = image.get_attribute("complete")
if is_complete is None or is_complete == False:
return False
return True
def wait_until_load_complete(self):
timeout_limit = 30
timeout = time.time() + timeout_limit
interval = .5
while time.time() < timeout:
if self.is_ajax_complete() and self.are_images_complete():
return
time.sleep(interval)
raise Exception("Ajax requests and picture loads on the page took longer than " + str(timeout_limit) + " seconds to execute")
# Modify the base webdriver
selenium.webdriver.remote.webdriver.WebDriver = RemoteWebDriver
# Reload all the drivers that use the base webdriver
reload(selenium.webdriver.firefox.webdriver)
Firefox = selenium.webdriver.firefox.webdriver.WebDriver
reload(selenium.webdriver.chrome.webdriver)
Chrome = selenium.webdriver.chrome.webdriver.WebDriver
reload(selenium.webdriver.ie.webdriver)
Ie = selenium.webdriver.ie.webdriver.WebDriver
|
kevlened/pytanium
|
pytanium/webdriver.py
|
Python
|
lgpl-3.0
| 39,315 | 0.019407 |
# -*- coding: utf-8 -*-
"""
{{ project_name }}.libs.common.models
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains all models that can be used across apps
:copyright: (c) 2015
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .utils.text import slugify
class SlugModel(models.Model):
"""
A base class for any model that wants to implement an auto generated slug
field.
"""
# how many times we'll retry creating a slug before giving up
MAX_RETRIES = 100
slug = models.SlugField(_('slug'), max_length=255, unique=True)
class Meta:
abstract = True
@classmethod
def is_valid_slug(cls, slug):
"""Convenience method to check if the given slug already exists."""
return not cls.objects.filter(slug=slug).exists()
@classmethod
def get_by_slug(cls, slug):
"""
Return the :class:`{{ project_name }}.libs.common.models.SlugModel` for the given
slug. If the slug dosen't exist, return None.
:param slug: the slug value to search for
"""
try:
return cls.objects.get(slug=slug)
except cls.DoesNotExist:
return None
def base_slug_value(self):
"""
As a subclass of :class:`{{ project_name }}.libs.common.models.SlugModel` one must
implement the :method:`{{ project_name }}.libs.common.models.SlugModel.base_slug_value`
which returns a unicode value that is used as the basis of the slug value.
"""
raise NotImplementedError
def generate_slug(self, value=None):
"""
Create a slug based on the value of
:method:`{{ project_name }}.libs.common.models.SlugModel.base_slug_value`, ensure
that the slug is unique by comparing it to existing slugs.
"""
if value is None:
value = self.base_slug_value()
field = self._meta.get_field('slug')
return slugify(value, max_length=field.max_length,
usable=self.is_valid_slug, max_retries=self.MAX_RETRIES)
def save(self, *args, **kwargs):
"""
Right before a model is saved, check to see if the slug field has yet
to be defined. If so, generate and set the
:attr:`{{ project_name }}.libs.common.models.SlugModel.slug`.
"""
if not self.slug:
# a slug has not yet been defined, generate one
self.slug = self.generate_slug()
return super(SlugModel, self).save(*args, **kwargs)
|
ericbuckley/django-project-template
|
project_name/libs/common/models.py
|
Python
|
bsd-3-clause
| 2,547 | 0.002356 |
# IMAP folder support
# Copyright (C) 2002-2012 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import email
import random
import binascii
import re
import time
from sys import exc_info
from .Base import BaseFolder
from offlineimap import imaputil, imaplibutil, OfflineImapError
from offlineimap.imaplib2 import MonthNames
class IMAPFolder(BaseFolder):
def __init__(self, imapserver, name, repository):
name = imaputil.dequote(name)
self.sep = imapserver.delim
super(IMAPFolder, self).__init__(name, repository)
self.expunge = repository.getexpunge()
self.root = None # imapserver.root
self.imapserver = imapserver
self.messagelist = None
self.randomgenerator = random.Random()
#self.ui is set in BaseFolder
def selectro(self, imapobj, force = False):
"""Select this folder when we do not need write access.
Prefer SELECT to EXAMINE if we can, since some servers
(Courier) do not stabilize UID validity until the folder is
selected.
.. todo: Still valid? Needs verification
:param: Enforce new SELECT even if we are on that folder already.
:returns: raises :exc:`OfflineImapError` severity FOLDER on error"""
try:
imapobj.select(self.getfullname(), force = force)
except imapobj.readonly:
imapobj.select(self.getfullname(), readonly = True, force = force)
def suggeststhreads(self):
return 1
def waitforthread(self):
self.imapserver.connectionwait()
def getcopyinstancelimit(self):
return 'MSGCOPY_' + self.repository.getname()
def get_uidvalidity(self):
"""Retrieve the current connections UIDVALIDITY value
UIDVALIDITY value will be cached on the first call.
:returns: The UIDVALIDITY as (long) number."""
if hasattr(self, '_uidvalidity'):
# use cached value if existing
return self._uidvalidity
imapobj = self.imapserver.acquireconnection()
try:
# SELECT (if not already done) and get current UIDVALIDITY
self.selectro(imapobj)
typ, uidval = imapobj.response('UIDVALIDITY')
assert uidval != [None] and uidval != None, \
"response('UIDVALIDITY') returned [None]!"
self._uidvalidity = long(uidval[-1])
return self._uidvalidity
finally:
self.imapserver.releaseconnection(imapobj)
def quickchanged(self, statusfolder):
# An IMAP folder has definitely changed if the number of
# messages or the UID of the last message have changed. Otherwise
# only flag changes could have occurred.
retry = True # Should we attempt another round or exit?
while retry:
retry = False
imapobj = self.imapserver.acquireconnection()
try:
# Select folder and get number of messages
restype, imapdata = imapobj.select(self.getfullname(), True,
True)
self.imapserver.releaseconnection(imapobj)
except OfflineImapError as e:
# retry on dropped connections, raise otherwise
self.imapserver.releaseconnection(imapobj, True)
if e.severity == OfflineImapError.ERROR.FOLDER_RETRY:
retry = True
else: raise
except:
# cleanup and raise on all other errors
self.imapserver.releaseconnection(imapobj, True)
raise
# 1. Some mail servers do not return an EXISTS response
# if the folder is empty. 2. ZIMBRA servers can return
# multiple EXISTS replies in the form 500, 1000, 1500,
# 1623 so check for potentially multiple replies.
if imapdata == [None]:
return True
maxmsgid = 0
for msgid in imapdata:
maxmsgid = max(long(msgid), maxmsgid)
# Different number of messages than last time?
if maxmsgid != statusfolder.getmessagecount():
return True
return False
def cachemessagelist(self):
maxage = self.config.getdefaultint("Account %s" % self.accountname,
"maxage", -1)
maxsize = self.config.getdefaultint("Account %s" % self.accountname,
"maxsize", -1)
self.messagelist = {}
imapobj = self.imapserver.acquireconnection()
try:
res_type, imapdata = imapobj.select(self.getfullname(), True, True)
if imapdata == [None] or imapdata[0] == '0':
# Empty folder, no need to populate message list
return
# By default examine all UIDs in this folder
msgsToFetch = '1:*'
if (maxage != -1) | (maxsize != -1):
search_cond = "(";
if(maxage != -1):
#find out what the oldest message is that we should look at
oldest_struct = time.gmtime(time.time() - (60*60*24*maxage))
if oldest_struct[0] < 1900:
raise OfflineImapError("maxage setting led to year %d. "
"Abort syncing." % oldest_struct[0],
OfflineImapError.ERROR.REPO)
search_cond += "SINCE %02d-%s-%d" % (
oldest_struct[2],
MonthNames[oldest_struct[1]],
oldest_struct[0])
if(maxsize != -1):
if(maxage != -1): # There are two conditions, add space
search_cond += " "
search_cond += "SMALLER %d" % maxsize
search_cond += ")"
res_type, res_data = imapobj.search(None, search_cond)
if res_type != 'OK':
raise OfflineImapError("SEARCH in folder [%s]%s failed. "
"Search string was '%s'. Server responded '[%s] %s'" % (
self.getrepository(), self,
search_cond, res_type, res_data),
OfflineImapError.ERROR.FOLDER)
# Result UIDs are seperated by space, coalesce into ranges
msgsToFetch = imaputil.uid_sequence(res_data[0].split())
if not msgsToFetch:
return # No messages to sync
# Get the flags and UIDs for these. single-quotes prevent
# imaplib2 from quoting the sequence.
res_type, response = imapobj.fetch("'%s'" % msgsToFetch,
'(FLAGS UID)')
if res_type != 'OK':
raise OfflineImapError("FETCHING UIDs in folder [%s]%s failed. "
"Server responded '[%s] %s'" % (
self.getrepository(), self,
res_type, response),
OfflineImapError.ERROR.FOLDER)
finally:
self.imapserver.releaseconnection(imapobj)
for messagestr in response:
# looks like: '1 (FLAGS (\\Seen Old) UID 4807)' or None if no msg
# Discard initial message number.
if messagestr == None:
continue
messagestr = messagestr.split(' ', 1)[1]
options = imaputil.flags2hash(messagestr)
if not 'UID' in options:
self.ui.warn('No UID in message with options %s' %\
str(options),
minor = 1)
else:
uid = long(options['UID'])
flags = imaputil.flagsimap2maildir(options['FLAGS'])
rtime = imaplibutil.Internaldate2epoch(messagestr)
self.messagelist[uid] = {'uid': uid, 'flags': flags, 'time': rtime}
def getmessagelist(self):
return self.messagelist
def getmessage(self, uid):
"""Retrieve message with UID from the IMAP server (incl body)
:returns: the message body or throws and OfflineImapError
(probably severity MESSAGE) if e.g. no message with
this UID could be found.
"""
imapobj = self.imapserver.acquireconnection()
try:
fails_left = 2 # retry on dropped connection
while fails_left:
try:
imapobj.select(self.getfullname(), readonly = True)
res_type, data = imapobj.uid('fetch', str(uid),
'(BODY.PEEK[])')
fails_left = 0
except imapobj.abort as e:
# Release dropped connection, and get a new one
self.imapserver.releaseconnection(imapobj, True)
imapobj = self.imapserver.acquireconnection()
self.ui.error(e, exc_info()[2])
fails_left -= 1
if not fails_left:
raise e
if data == [None] or res_type != 'OK':
#IMAP server says bad request or UID does not exist
severity = OfflineImapError.ERROR.MESSAGE
reason = "IMAP server '%s' failed to fetch message UID '%d'."\
"Server responded: %s %s" % (self.getrepository(), uid,
res_type, data)
if data == [None]:
#IMAP server did not find a message with this UID
reason = "IMAP server '%s' does not have a message "\
"with UID '%s'" % (self.getrepository(), uid)
raise OfflineImapError(reason, severity)
# data looks now e.g. [('320 (UID 17061 BODY[]
# {2565}','msgbody....')] we only asked for one message,
# and that msg is in data[0]. msbody is in [0][1]
data = data[0][1].replace("\r\n", "\n")
if len(data)>200:
dbg_output = "%s...%s" % (str(data)[:150],
str(data)[-50:])
else:
dbg_output = data
self.ui.debug('imap', "Returned object from fetching %d: '%s'" %
(uid, dbg_output))
finally:
self.imapserver.releaseconnection(imapobj)
return data
def getmessagetime(self, uid):
return self.messagelist[uid]['time']
def getmessageflags(self, uid):
return self.messagelist[uid]['flags']
def generate_randomheader(self, content):
"""Returns a unique X-OfflineIMAP header
Generate an 'X-OfflineIMAP' mail header which contains a random
unique value (which is based on the mail content, and a random
number). This header allows us to fetch a mail after APPENDing
it to an IMAP server and thus find out the UID that the server
assigned it.
:returns: (headername, headervalue) tuple, consisting of strings
headername == 'X-OfflineIMAP' and headervalue will be a
random string
"""
headername = 'X-OfflineIMAP'
# We need a random component too. If we ever upload the same
# mail twice (e.g. in different folders), we would still need to
# get the UID for the correct one. As we won't have too many
# mails with identical content, the randomness requirements are
# not extremly critial though.
# compute unsigned crc32 of 'content' as unique hash
# NB: crc32 returns unsigned only starting with python 3.0
headervalue = str( binascii.crc32(content) & 0xffffffff ) + '-'
headervalue += str(self.randomgenerator.randint(0,9999999999))
return (headername, headervalue)
def savemessage_addheader(self, content, headername, headervalue):
self.ui.debug('imap',
'savemessage_addheader: called to add %s: %s' % (headername,
headervalue))
insertionpoint = content.find("\r\n\r\n")
self.ui.debug('imap', 'savemessage_addheader: insertionpoint = %d' % insertionpoint)
leader = content[0:insertionpoint]
self.ui.debug('imap', 'savemessage_addheader: leader = %s' % repr(leader))
if insertionpoint == 0 or insertionpoint == -1:
newline = ''
insertionpoint = 0
else:
newline = "\r\n"
newline += "%s: %s" % (headername, headervalue)
self.ui.debug('imap', 'savemessage_addheader: newline = ' + repr(newline))
trailer = content[insertionpoint:]
self.ui.debug('imap', 'savemessage_addheader: trailer = ' + repr(trailer))
return leader + newline + trailer
def savemessage_searchforheader(self, imapobj, headername, headervalue):
self.ui.debug('imap', 'savemessage_searchforheader called for %s: %s' % \
(headername, headervalue))
# Now find the UID it got.
headervalue = imapobj._quote(headervalue)
try:
matchinguids = imapobj.uid('search', 'HEADER', headername, headervalue)[1][0]
except imapobj.error as err:
# IMAP server doesn't implement search or had a problem.
self.ui.debug('imap', "savemessage_searchforheader: got IMAP error '%s' while attempting to UID SEARCH for message with header %s" % (err, headername))
return 0
self.ui.debug('imap', 'savemessage_searchforheader got initial matchinguids: ' + repr(matchinguids))
if matchinguids == '':
self.ui.debug('imap', "savemessage_searchforheader: UID SEARCH for message with header %s yielded no results" % headername)
return 0
matchinguids = matchinguids.split(' ')
self.ui.debug('imap', 'savemessage_searchforheader: matchinguids now ' + \
repr(matchinguids))
if len(matchinguids) != 1 or matchinguids[0] == None:
raise ValueError("While attempting to find UID for message with "
"header %s, got wrong-sized matchinguids of %s" %\
(headername, str(matchinguids)))
return long(matchinguids[0])
def savemessage_fetchheaders(self, imapobj, headername, headervalue):
""" We fetch all new mail headers and search for the right
X-OfflineImap line by hand. The response from the server has form:
(
'OK',
[
(
'185 (RFC822.HEADER {1789}',
'... mail headers ...'
),
' UID 2444)',
(
'186 (RFC822.HEADER {1789}',
'... 2nd mail headers ...'
),
' UID 2445)'
]
)
We need to locate the UID just after mail headers containing our
X-OfflineIMAP line.
Returns UID when found, 0 when not found.
"""
self.ui.debug('imap', 'savemessage_fetchheaders called for %s: %s' % \
(headername, headervalue))
# run "fetch X:* rfc822.header"
# since we stored the mail we are looking for just recently, it would
# not be optimal to fetch all messages. So we'll find highest message
# UID in our local messagelist and search from there (exactly from
# UID+1). That works because UIDs are guaranteed to be unique and
# ascending.
if self.getmessagelist():
start = 1+max(self.getmessagelist().keys())
else:
# Folder was empty - start from 1
start = 1
# Imaplib quotes all parameters of a string type. That must not happen
# with the range X:*. So we use bytearray to stop imaplib from getting
# in our way
result = imapobj.uid('FETCH', bytearray('%d:*' % start), 'rfc822.header')
if result[0] != 'OK':
raise OfflineImapError('Error fetching mail headers: ' + '. '.join(result[1]),
OfflineImapError.ERROR.MESSAGE)
result = result[1]
found = 0
for item in result:
if found == 0 and type(item) == type( () ):
# Walk just tuples
if re.search("(?:^|\\r|\\n)%s:\s*%s(?:\\r|\\n)" % (headername, headervalue),
item[1], flags=re.IGNORECASE):
found = 1
elif found == 1:
if type(item) == type (""):
uid = re.search("UID\s+(\d+)", item, flags=re.IGNORECASE)
if uid:
return int(uid.group(1))
else:
self.ui.warn("Can't parse FETCH response, can't find UID: %s", result.__repr__())
else:
self.ui.warn("Can't parse FETCH response, we awaited string: %s", result.__repr__())
return 0
def getmessageinternaldate(self, content, rtime=None):
"""Parses mail and returns an INTERNALDATE string
It will use information in the following order, falling back as an attempt fails:
- rtime parameter
- Date header of email
We return None, if we couldn't find a valid date. In this case
the IMAP server will use the server local time when appening
(per RFC).
Note, that imaplib's Time2Internaldate is inherently broken as
it returns localized date strings which are invalid for IMAP
servers. However, that function is called for *every* append()
internally. So we need to either pass in `None` or the correct
string (in which case Time2Internaldate() will do nothing) to
append(). The output of this function is designed to work as
input to the imapobj.append() function.
TODO: We should probably be returning a bytearray rather than a
string here, because the IMAP server will expect plain
ASCII. However, imaplib.Time2INternaldate currently returns a
string so we go with the same for now.
:param rtime: epoch timestamp to be used rather than analyzing
the email.
:returns: string in the form of "DD-Mmm-YYYY HH:MM:SS +HHMM"
(including double quotes) or `None` in case of failure
(which is fine as value for append)."""
if rtime is None:
message = email.message_from_string(content)
# parsedate returns a 9-tuple that can be passed directly to
# time.mktime(); Will be None if missing or not in a valid
# format. Note that indexes 6, 7, and 8 of the result tuple are
# not usable.
datetuple = email.utils.parsedate(message.get('Date'))
if datetuple is None:
#could not determine the date, use the local time.
return None
#make it a real struct_time, so we have named attributes
datetuple = time.struct_time(datetuple)
else:
#rtime is set, use that instead
datetuple = time.localtime(rtime)
try:
# Check for invalid dates
if datetuple[0] < 1981:
raise ValueError
# Check for invalid dates
datetuple_check = time.localtime(time.mktime(datetuple))
if datetuple[:2] != datetuple_check[:2]:
raise ValueError
except (ValueError, OverflowError):
# Argh, sometimes it's a valid format but year is 0102
# or something. Argh. It seems that Time2Internaldate
# will rause a ValueError if the year is 0102 but not 1902,
# but some IMAP servers nonetheless choke on 1902.
self.ui.debug('imap', "Message with invalid date %s. Server will use local time." \
% datetuple)
return None
#produce a string representation of datetuple that works as
#INTERNALDATE
num2mon = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun',
7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
#tm_isdst coming from email.parsedate is not usable, we still use it here, mhh
if datetuple.tm_isdst == '1':
zone = -time.altzone
else:
zone = -time.timezone
offset_h, offset_m = divmod(zone//60, 60)
internaldate = '"%02d-%s-%04d %02d:%02d:%02d %+03d%02d"' \
% (datetuple.tm_mday, num2mon[datetuple.tm_mon], datetuple.tm_year, \
datetuple.tm_hour, datetuple.tm_min, datetuple.tm_sec, offset_h, offset_m)
return internaldate
def savemessage(self, uid, content, flags, rtime):
"""Save the message on the Server
This backend always assigns a new uid, so the uid arg is ignored.
This function will update the self.messagelist dict to contain
the new message after sucessfully saving it.
See folder/Base for details. Note that savemessage() does not
check against dryrun settings, so you need to ensure that
savemessage is never called in a dryrun mode.
:param rtime: A timestamp to be used as the mail date
:returns: the UID of the new message as assigned by the server. If the
message is saved, but it's UID can not be found, it will
return 0. If the message can't be written (folder is
read-only for example) it will return -1."""
self.ui.savemessage('imap', uid, flags, self)
# already have it, just save modified flags
if uid > 0 and self.uidexists(uid):
self.savemessageflags(uid, flags)
return uid
retry_left = 2 # succeeded in APPENDING?
imapobj = self.imapserver.acquireconnection()
try:
while retry_left:
# UIDPLUS extension provides us with an APPENDUID response.
use_uidplus = 'UIDPLUS' in imapobj.capabilities
# get the date of the message, so we can pass it to the server.
date = self.getmessageinternaldate(content, rtime)
content = re.sub("(?<!\r)\n", "\r\n", content)
if not use_uidplus:
# insert a random unique header that we can fetch later
(headername, headervalue) = self.generate_randomheader(
content)
self.ui.debug('imap', 'savemessage: header is: %s: %s' %\
(headername, headervalue))
content = self.savemessage_addheader(content, headername,
headervalue)
if len(content)>200:
dbg_output = "%s...%s" % (content[:150], content[-50:])
else:
dbg_output = content
self.ui.debug('imap', "savemessage: date: %s, content: '%s'" %
(date, dbg_output))
try:
# Select folder for append and make the box READ-WRITE
imapobj.select(self.getfullname())
except imapobj.readonly:
# readonly exception. Return original uid to notify that
# we did not save the message. (see savemessage in Base.py)
self.ui.msgtoreadonly(self, uid, content, flags)
return uid
#Do the APPEND
try:
(typ, dat) = imapobj.append(self.getfullname(),
imaputil.flagsmaildir2imap(flags),
date, content)
retry_left = 0 # Mark as success
except imapobj.abort as e:
# connection has been reset, release connection and retry.
retry_left -= 1
self.imapserver.releaseconnection(imapobj, True)
imapobj = self.imapserver.acquireconnection()
if not retry_left:
raise OfflineImapError("Saving msg in folder '%s', "
"repository '%s' failed (abort). Server reponded: %s\n"
"Message content was: %s" %
(self, self.getrepository(), str(e), dbg_output),
OfflineImapError.ERROR.MESSAGE)
self.ui.error(e, exc_info()[2])
except imapobj.error as e: # APPEND failed
# If the server responds with 'BAD', append()
# raise()s directly. So we catch that too.
# drop conn, it might be bad.
self.imapserver.releaseconnection(imapobj, True)
imapobj = None
raise OfflineImapError("Saving msg folder '%s', repo '%s'"
"failed (error). Server reponded: %s\nMessage content was: "
"%s" % (self, self.getrepository(), str(e), dbg_output),
OfflineImapError.ERROR.MESSAGE)
# Checkpoint. Let it write out stuff, etc. Eg searches for
# just uploaded messages won't work if we don't do this.
(typ,dat) = imapobj.check()
assert(typ == 'OK')
# get the new UID. Test for APPENDUID response even if the
# server claims to not support it, as e.g. Gmail does :-(
if use_uidplus or imapobj._get_untagged_response('APPENDUID', True):
# get new UID from the APPENDUID response, it could look
# like OK [APPENDUID 38505 3955] APPEND completed with
# 38505 bein folder UIDvalidity and 3955 the new UID.
# note: we would want to use .response() here but that
# often seems to return [None], even though we have
# data. TODO
resp = imapobj._get_untagged_response('APPENDUID')
if resp == [None]:
self.ui.warn("Server supports UIDPLUS but got no APPENDUID "
"appending a message.")
return 0
uid = long(resp[-1].split(' ')[1])
if uid == 0:
self.ui.warn("savemessage: Server supports UIDPLUS, but"
" we got no usable uid back. APPENDUID reponse was "
"'%s'" % str(resp))
else:
# we don't support UIDPLUS
uid = self.savemessage_searchforheader(imapobj, headername,
headervalue)
# See docs for savemessage in Base.py for explanation
# of this and other return values
if uid == 0:
self.ui.debug('imap', 'savemessage: attempt to get new UID '
'UID failed. Search headers manually.')
uid = self.savemessage_fetchheaders(imapobj, headername,
headervalue)
self.ui.warn('imap', "savemessage: Searching mails for new "
"Message-ID failed. Could not determine new UID.")
finally:
self.imapserver.releaseconnection(imapobj)
if uid: # avoid UID FETCH 0 crash happening later on
self.messagelist[uid] = {'uid': uid, 'flags': flags}
self.ui.debug('imap', 'savemessage: returning new UID %d' % uid)
return uid
def savemessageflags(self, uid, flags):
"""Change a message's flags to `flags`.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
imapobj = self.imapserver.acquireconnection()
try:
try:
imapobj.select(self.getfullname())
except imapobj.readonly:
self.ui.flagstoreadonly(self, [uid], flags)
return
result = imapobj.uid('store', '%d' % uid, 'FLAGS',
imaputil.flagsmaildir2imap(flags))
assert result[0] == 'OK', 'Error with store: ' + '. '.join(result[1])
finally:
self.imapserver.releaseconnection(imapobj)
result = result[1][0]
if not result:
self.messagelist[uid]['flags'] = flags
else:
flags = imaputil.flags2hash(imaputil.imapsplit(result)[1])['FLAGS']
self.messagelist[uid]['flags'] = imaputil.flagsimap2maildir(flags)
def addmessageflags(self, uid, flags):
self.addmessagesflags([uid], flags)
def addmessagesflags_noconvert(self, uidlist, flags):
self.processmessagesflags('+', uidlist, flags)
def addmessagesflags(self, uidlist, flags):
"""This is here for the sake of UIDMaps.py -- deletemessages must
add flags and get a converted UID, and if we don't have noconvert,
then UIDMaps will try to convert it twice."""
self.addmessagesflags_noconvert(uidlist, flags)
def deletemessageflags(self, uid, flags):
self.deletemessagesflags([uid], flags)
def deletemessagesflags(self, uidlist, flags):
self.processmessagesflags('-', uidlist, flags)
def processmessagesflags(self, operation, uidlist, flags):
if len(uidlist) > 101:
# Hack for those IMAP ervers with a limited line length
self.processmessagesflags(operation, uidlist[:100], flags)
self.processmessagesflags(operation, uidlist[100:], flags)
return
imapobj = self.imapserver.acquireconnection()
try:
try:
imapobj.select(self.getfullname())
except imapobj.readonly:
self.ui.flagstoreadonly(self, uidlist, flags)
return
r = imapobj.uid('store',
imaputil.uid_sequence(uidlist),
operation + 'FLAGS',
imaputil.flagsmaildir2imap(flags))
assert r[0] == 'OK', 'Error with store: ' + '. '.join(r[1])
r = r[1]
finally:
self.imapserver.releaseconnection(imapobj)
# Some IMAP servers do not always return a result. Therefore,
# only update the ones that it talks about, and manually fix
# the others.
needupdate = list(uidlist)
for result in r:
if result == None:
# Compensate for servers that don't return anything from
# STORE.
continue
attributehash = imaputil.flags2hash(imaputil.imapsplit(result)[1])
if not ('UID' in attributehash and 'FLAGS' in attributehash):
# Compensate for servers that don't return a UID attribute.
continue
flagstr = attributehash['FLAGS']
uid = long(attributehash['UID'])
self.messagelist[uid]['flags'] = imaputil.flagsimap2maildir(flagstr)
try:
needupdate.remove(uid)
except ValueError: # Let it slide if it's not in the list
pass
for uid in needupdate:
if operation == '+':
self.messagelist[uid]['flags'] |= flags
elif operation == '-':
self.messagelist[uid]['flags'] -= flags
def change_message_uid(self, uid, new_uid):
"""Change the message from existing uid to new_uid
If the backend supports it. IMAP does not and will throw errors."""
raise OfflineImapError('IMAP backend cannot change a messages UID from '
'%d to %d' % (uid, new_uid),
OfflineImapError.ERROR.MESSAGE)
def deletemessage(self, uid):
self.deletemessages_noconvert([uid])
def deletemessages(self, uidlist):
self.deletemessages_noconvert(uidlist)
def deletemessages_noconvert(self, uidlist):
# Weed out ones not in self.messagelist
uidlist = [uid for uid in uidlist if self.uidexists(uid)]
if not len(uidlist):
return
self.addmessagesflags_noconvert(uidlist, set('T'))
imapobj = self.imapserver.acquireconnection()
try:
try:
imapobj.select(self.getfullname())
except imapobj.readonly:
self.ui.deletereadonly(self, uidlist)
return
if self.expunge:
assert(imapobj.expunge()[0] == 'OK')
finally:
self.imapserver.releaseconnection(imapobj)
for uid in uidlist:
del self.messagelist[uid]
|
spaetz/offlineimap
|
offlineimap/folder/IMAP.py
|
Python
|
gpl-2.0
| 33,949 | 0.004242 |
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Editor.
"""
from nose.plugins.attrib import attr
from .test_studio_video_module import CMSVideoBaseTest
@attr('shard_2')
class VideoEditorTest(CMSVideoBaseTest):
"""
CMS Video Editor Test Class
"""
def setUp(self):
super(VideoEditorTest, self).setUp()
def _create_video_component(self, subtitles=False):
"""
Create a video component and navigate to unit page
Arguments:
subtitles (bool): Upload subtitles or not
"""
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def test_default_settings(self):
"""
Scenario: User can view Video metadata
Given I have created a Video component
And I edit the component
Then I see the correct video settings and default values
"""
self._create_video_component()
self.edit_component()
self.assertTrue(self.video.verify_settings())
def test_modify_video_display_name(self):
"""
Scenario: User can modify Video display name
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
Then I can modify video display name
And my video display name change is persisted on save
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Component Display Name', 'Transformers')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertTrue(self.video.verify_field_value('Component Display Name', 'Transformers'))
def test_hidden_captions(self):
"""
Scenario: Captions are hidden when "transcript display" is false
Given I have created a Video component with subtitles
And I have set "transcript display" to False
Then when I view the video it does not show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'False', 'select')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_shown_captions(self):
"""
Scenario: Captions are shown when "transcript display" is true
Given I have created a Video component with subtitles
And I have set "transcript display" to True
Then when I view the video it does show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'True', 'select')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
def test_translations_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "uk, zh" translations
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
def test_upload_large_transcript(self):
"""
Scenario: User can upload transcript file with > 1mb size
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "1mb_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('1mb_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
def test_translations_download_works_w_saving(self):
"""
Scenario: Translations downloading works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And video language menu has "uk, zh" translations
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_download_works_wo_saving(self):
"""
Scenario: Translations downloading works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_remove_works_w_saving(self):
"""
Scenario: Translations removing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
Then I remove translation for "zh" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_remove_works_wo_saving(self):
"""
Scenario: Translations removing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I see translations for "uk"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.assertEqual(self.video.translations(), ['uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_w_saving(self):
"""
Scenario: Translations clearing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_wo_saving(self):
"""
Scenario: Translations clearing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_cannot_upload_sjson_translation(self):
"""
Scenario: User cannot upload translations in sjson format
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "uk" language code
And I try to upload transcript file "subs_3_yD_cEKoCk.srt.sjson"
Then I see validation error "Only SRT files can be uploaded. Please select a file ending in .srt to upload."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('uk')
self.video.upload_asset('subs_3_yD_cEKoCk.srt.sjson', asset_type='transcript')
error_msg = 'Only SRT files can be uploaded. Please select a file ending in .srt to upload.'
self.assertEqual(self.video.upload_status_message, error_msg)
def test_replace_translation_w_save(self):
"""
Scenario: User can easy replace the translation by another one w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_replace_translation_wo_save(self):
"""
Scenario: User can easy replace the translation by another one w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_translation_upload_remove_upload(self):
"""
Scenario: Upload "zh" file "A" -> Remove "zh" -> Upload "zh" file "B"
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
Then I remove translation for "zh" language code
And I upload transcript file "uk_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.video.upload_translation('uk_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_select_language_twice(self):
"""
Scenario: User cannot select the same language twice
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "zh" language code
And I click button "Add"
Then I cannot choose "zh" language code
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.assertEqual(self.video.translations(), [u'zh', u''])
def test_table_of_contents(self):
"""
Scenario: User can see table of content at the first position
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|table |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "table, uk" translations
And I see video language with code "table" at position "0"
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'table')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), [u'table', u'uk'])
self.assertEqual(self.video.caption_languages.keys()[0], 'table')
def test_upload_transcript_with_BOM(self):
"""
Scenario: User can upload transcript file with BOM(Byte Order Mark) in it.
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts_with_BOM.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "莎拉·佩林 (Sarah Palin)" text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts_with_BOM.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "莎拉·佩林 (Sarah Palin)".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
|
eestay/edx-platform
|
common/test/acceptance/tests/video/test_studio_video_editor.py
|
Python
|
agpl-3.0
| 22,804 | 0.001114 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {}}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound('bogus test message')
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
Thingee/cinder
|
cinder/tests/api/v2/test_snapshot_metadata.py
|
Python
|
apache-2.0
| 21,148 | 0.000142 |
#! /usr/bin/env python
import os
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Return memory usage in bytes.
'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Return resident memory usage in bytes.
'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Return stack size in bytes.
'''
return _VmB('VmStk:') - since
|
mattduan/proof
|
util/memory.py
|
Python
|
bsd-3-clause
| 982 | 0.003055 |
#!usr/bin/python
'''
Get satellite data according to input file.
'''
from random import shuffle,random
import os,json
from utils.mapbox_static import MapboxStatic
from utils.coordinate_converter import CoordConvert
from modules.getFeatures import latLon,getBBox
from libs.foldernames import satDataFolder,testDataFolder
def get_satellite(inputFile,mapboxtoken=None,count=1000,zoomLevel=17,
outputFolder='data',xpixel=480,ypixel=360,epsg=None,elements=None,
randomImages=False):
'''
Get satellite data in order to input GIS information.
Parameters:
'inputFile': Input file (GeoJSON format or parsed into GeoJSON)
'mapboxtoken': Access token for Mapbox (go to mapbox.com to create one)
'count': Number of satellite images to be downloaded
'zoomLevel': Zoom level (see libs/zoomLevel.csv for resolutions)
'outputFolder': Folder to store output data in
'xpixel': Number of pixels of satellite images (width)
'ypixel': Number of pixels of satellite images (height)
'epsg': EPSG code for coordinate system in GIS data (will try to find automatically
if not provided)
'elements': GIS data can also be input directly
'randomImages': Get center of random polygons (False) or within Boundary Box of data (True)
'''
if (not inputFile) and (not elements):
print "Error: Provide input file."
exit()
if not mapboxtoken:
print "Error: Provide mapbox token (more informations on www.mapbox.com)."
exit()
#parser.add_argument('--sport',
# type=str, default='baseball',
# help='Sport tag, for example: baseball, tennis, or soccer.')
# We need the elements
if not elements:
print 'Loading %s...' % inputFile
with open(inputFile, 'r') as f:
elements = json.load(f)
#get coordinate system
myCoordConvert = CoordConvert()
code=myCoordConvert.getCoordSystem(elements,epsg)
#create folders
subpath=outputFolder+"/"+os.path.split(inputFile)[-1][:-5]
if not os.path.isdir(subpath):
os.mkdir(subpath)
print 'Directory',subpath,'created'
if not os.path.isdir(subpath+satDataFolder):
os.mkdir(subpath+satDataFolder)
print 'Directory',subpath+satDataFolder,'created'
if not os.path.isdir(subpath+testDataFolder):
os.mkdir(subpath+testDataFolder)
print 'Directory',subpath+testDataFolder,'created'
#Write metadata
with open(subpath+satDataFolder+"meta.csv","a+") as f:
f.write("ZoomLevel,,"+str(zoomLevel)+"\n")
#get bbox if set to random
if randomImages:
xlist=[]
ylist=[]
for element in elements['features']:
minxe,maxxe,minye,maxye=getBBox(element)
xlist.append(minxe)
xlist.append(maxxe)
ylist.append(minye)
ylist.append(maxye)
minx=min(xlist)
maxx=max(xlist)
miny=min(ylist)
maxy=max(ylist)
element_list = []
index_list = range(len(elements['features'])) #featue map
# Randomize elements list to make sure we don't download all pics from the
shuffle(index_list)
for i in index_list:
element_list.append(elements['features'][i]) #feature map
# Now we're gonna download the satellite images for these locations
namespace= os.path.split(inputFile)[-1][:-5] #get input file name as namespace
mapbox_static = MapboxStatic(
namespace=namespace,
root_folder=subpath+satDataFolder[0:-1])
total_downloaded = 0
c = 0
print "------------------- Getting Satellite data -------------------"
for element in element_list:
if randomImages:
randomValue=random()
av_lon=minx+((maxx-minx)*randomValue)
av_lat=miny+((maxy-miny)*randomValue)
element_id_str=1000000+c #1000000 indicates random value
with open(subpath+satDataFolder+"meta.csv","a+") as f:
f.write(str(element_id_str)+","+str(av_lon)+","+str(av_lat)+"\n")
else:
element_id_str = index_list[c]
#figure out center of polygon
av_lon,av_lat=latLon(element)
#Convert to standard format
if code != 4319: # if not already in wgs84 standard format
lotlan= myCoordConvert.convert(av_lon,av_lat)
longitude=lotlan[0]
latitude=lotlan[1]
else: #if already in wgs84 format
latitude= av_lat
longitude= av_lon
#get url
print "Coordinates WSG64: "+str(longitude)+','+str(latitude)
if (av_lon != longitude) and (av_lat != latitude):
print "Coordinates Native: "+str(av_lon)+','+str(av_lat)
url = mapbox_static.get_url(
latitude=latitude,
longitude=longitude,
mapbox_zoom=zoomLevel,
access_token=mapboxtoken,
width=xpixel,
height=ypixel)
#download data
success = mapbox_static.download_tile(
element_id=element_id_str,
url=url,verbose=True)
if success:
total_downloaded += 1
print total_downloaded,'/',count
c += 1
if total_downloaded >= count:
break
|
worldbank/cv4ag
|
modules/get_satellite.py
|
Python
|
mit
| 4,605 | 0.044517 |
__author__ = 'yinjun'
class Solution:
"""
@param nums: The integer array
@return: The length of LIS (longest increasing subsequence)
"""
def longestIncreasingSubsequence(self, nums):
# write your code here
if nums == None or nums == []:
return 0
l = len(nums)
length = [0 for i in range()]
maxLength = 0
for i in range(l):
length[i] = 1
for j in range(0, i):
if nums[j] <= nums[i]:
length[i] = max(length[i], length[j] + 1)
maxLength = max(maxLength, length[i])
return maxLength
|
shootsoft/practice
|
lintcode/NineChapters/04/longest-increasing-subsequence.py
|
Python
|
apache-2.0
| 646 | 0.006192 |
import pandas as pd
import numpy as np
import cython as cy
#coding=UTF8
class Strategy(object):
_capital = cy.declare(cy.double)
_net_flows = cy.declare(cy.double)
_last_value = cy.declare(cy.double)
_last_price = cy.declare(cy.double)
_last_fee = cy.declare(cy.double)
def run(self):
pass
def setup(self,data):
self.data=data
@property
def values(self):
if self.root.stale:
self.root.update(self.now, None)
return self._values.ix[:self.now]
class SMAStrategy(Strategy):
def __init__(self,short,long):
self.short=short
self.long=long
def run(self):
short_avg=pd.rolling_mean(self.data,self.short)
long_avg=pd.rolling_meam(self.data,self.long)
print(short_avg)
for day in self.data.index:
pass
print(self.data[day])
'''if(self.short_avg[]>self.long_avg):
pass
else:
pass'''
|
dingmingliu/quanttrade
|
quanttrade/core/strategy.py
|
Python
|
apache-2.0
| 1,007 | 0.017875 |
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import pytest
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import XgmiiFrame, XgmiiSource, XgmiiSink
from cocotbext.axi import AxiStreamBus, AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
if len(dut.xgmii_txd) == 64:
cocotb.start_soon(Clock(dut.rx_clk, 6.4, units="ns").start())
cocotb.start_soon(Clock(dut.tx_clk, 6.4, units="ns").start())
else:
cocotb.start_soon(Clock(dut.rx_clk, 3.2, units="ns").start())
cocotb.start_soon(Clock(dut.tx_clk, 3.2, units="ns").start())
self.xgmii_source = XgmiiSource(dut.xgmii_rxd, dut.xgmii_rxc, dut.rx_clk, dut.rx_rst)
self.xgmii_sink = XgmiiSink(dut.xgmii_txd, dut.xgmii_txc, dut.tx_clk, dut.tx_rst)
self.axis_source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "tx_axis"), dut.tx_clk, dut.tx_rst)
self.axis_sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "rx_axis"), dut.rx_clk, dut.rx_rst)
dut.rx_ptp_ts.setimmediatevalue(0)
dut.tx_ptp_ts.setimmediatevalue(0)
async def reset(self):
self.dut.rx_rst.setimmediatevalue(0)
self.dut.tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
self.dut.rx_rst <= 1
self.dut.tx_rst <= 1
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
self.dut.rx_rst <= 0
self.dut.tx_rst <= 0
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12):
tb = TB(dut)
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = XgmiiFrame.from_payload(test_data)
await tb.xgmii_source.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.rx_clk)
await RisingEdge(dut.rx_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12):
tb = TB(dut)
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.xgmii_sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert tb.xgmii_sink.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
async def run_test_tx_alignment(dut, payload_data=None, ifg=12):
enable_dic = int(os.getenv("PARAM_ENABLE_DIC"))
tb = TB(dut)
byte_width = tb.axis_source.width // 8
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
for length in range(60, 92):
await tb.reset()
test_frames = [payload_data(length) for k in range(10)]
start_lane = []
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.xgmii_sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.ctrl is None
start_lane.append(rx_frame.start_lane)
tb.log.info("length: %d", length)
tb.log.info("start_lane: %s", start_lane)
start_lane_ref = []
# compute expected starting lanes
lane = 0
deficit_idle_count = 0
for test_data in test_frames:
if ifg == 0:
lane = 0
start_lane_ref.append(lane)
lane = (lane + len(test_data)+4+ifg) % byte_width
if enable_dic:
offset = lane % 4
if deficit_idle_count+offset >= 4:
offset += 4
lane = (lane - offset) % byte_width
deficit_idle_count = (deficit_idle_count + offset) % 4
else:
offset = lane % 4
if offset > 0:
offset += 4
lane = (lane - offset) % byte_width
tb.log.info("start_lane_ref: %s", start_lane_ref)
assert start_lane_ref == start_lane
await RisingEdge(dut.tx_clk)
assert tb.xgmii_sink.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
def size_list():
return list(range(60, 128)) + [512, 1514, 9214] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12, 0])
factory.generate_tests()
factory = TestFactory(run_test_tx_alignment)
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
@pytest.mark.parametrize("enable_dic", [1, 0])
@pytest.mark.parametrize("data_width", [32, 64])
def test_eth_mac_10g(request, data_width, enable_dic):
dut = "eth_mac_10g"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "axis_xgmii_rx_32.v"),
os.path.join(rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(rtl_dir, "axis_xgmii_tx_32.v"),
os.path.join(rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(rtl_dir, "lfsr.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['CTRL_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['ENABLE_PADDING'] = 1
parameters['ENABLE_DIC'] = enable_dic
parameters['MIN_FRAME_LENGTH'] = 64
parameters['PTP_PERIOD_NS'] = 0x6 if parameters['DATA_WIDTH'] == 64 else 0x3
parameters['PTP_PERIOD_FNS'] = 0x6666 if parameters['DATA_WIDTH'] == 64 else 0x3333
parameters['TX_PTP_TS_ENABLE'] = 0
parameters['TX_PTP_TS_WIDTH'] = 96
parameters['TX_PTP_TAG_ENABLE'] = parameters['TX_PTP_TS_ENABLE']
parameters['TX_PTP_TAG_WIDTH'] = 16
parameters['RX_PTP_TS_ENABLE'] = 0
parameters['RX_PTP_TS_WIDTH'] = 96
parameters['TX_USER_WIDTH'] = (parameters['TX_PTP_TAG_WIDTH'] if parameters['TX_PTP_TS_ENABLE'] and parameters['TX_PTP_TAG_ENABLE'] else 0) + 1
parameters['RX_USER_WIDTH'] = (parameters['RX_PTP_TS_WIDTH'] if parameters['RX_PTP_TS_ENABLE'] else 0) + 1
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
alexforencich/verilog-ethernet
|
tb/eth_mac_10g/test_eth_mac_10g.py
|
Python
|
mit
| 9,014 | 0.000998 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-22 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishes', '0001_squashed_0002_auto_20160522_1408'),
]
operations = [
migrations.AlterField(
model_name='wish',
name='brief',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Brief'),
),
]
|
dvl/imagefy-web
|
imagefy/wishes/migrations/0002_auto_20160522_1447.py
|
Python
|
mit
| 506 | 0.001976 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Luis Alejandro Martínez Faneyth
#
# This file is part of Condiment.
#
# Condiment is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Condiment is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
condiment.common.setup.report
=========================
This module contains common functions to process the information needed
by Setuptools/Distutils setup script.
"""
from distutils.cmd import Command
class report_setup_data(Command):
description = 'Compress CSS files.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from pprint import pprint
from condiment import BASEDIR
from condiment.common.setup.utils import (get_packages, get_data_files,
get_package_data,
get_setup_data)
from condiment.config.pkg import (exclude_sources, exclude_patterns,
include_data_patterns, exclude_packages)
setup_data = get_setup_data(BASEDIR)
packages = get_packages(path=BASEDIR,
exclude_packages=exclude_packages)
data_files = get_data_files(path=BASEDIR, patterns=include_data_patterns,
exclude_files=exclude_sources + \
exclude_patterns)
package_data = get_package_data(path=BASEDIR, packages=packages,
data_files=data_files,
exclude_files=exclude_sources + \
exclude_patterns,
exclude_packages=exclude_packages)
setup_data['data_files'] = data_files
setup_data['package_data'] = package_data
pprint(setup_data)
|
LuisAlejandro/condiment
|
condiment/common/setup/report.py
|
Python
|
gpl-3.0
| 2,481 | 0.003226 |
"""
Defines forms for providing validation of embargo admin details.
"""
import ipaddress
from django import forms
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from .models import IPFilter, RestrictedCourse
class RestrictedCourseForm(forms.ModelForm):
"""Validate course keys for the RestrictedCourse model.
The default behavior in Django admin is to:
* Save course keys for courses that do not exist.
* Return a 500 response if the course key format is invalid.
Using this form ensures that we display a user-friendly
error message instead.
"""
class Meta:
model = RestrictedCourse
fields = '__all__'
def clean_course_key(self):
"""Validate the course key.
Checks that the key format is valid and that
the course exists. If not, displays an error message.
Arguments:
field_name (str): The name of the field to validate.
Returns:
CourseKey
"""
cleaned_id = self.cleaned_data['course_key']
error_msg = _('COURSE NOT FOUND. Please check that the course ID is valid.')
try:
course_key = CourseKey.from_string(cleaned_id)
except InvalidKeyError:
raise forms.ValidationError(error_msg) # lint-amnesty, pylint: disable=raise-missing-from
if not modulestore().has_course(course_key):
raise forms.ValidationError(error_msg)
return course_key
class IPFilterForm(forms.ModelForm):
"""Form validating entry of IP addresses"""
class Meta:
model = IPFilter
fields = '__all__'
def _is_valid_ip(self, address):
"""Whether or not address is a valid ipv4 address or ipv6 address"""
try:
# Is this an valid ip address?
ipaddress.ip_network(address)
except ValueError:
return False
return True
def _valid_ip_addresses(self, addresses):
"""
Checks if a csv string of IP addresses contains valid values.
If not, raises a ValidationError.
"""
if addresses == '':
return ''
error_addresses = []
for addr in addresses.split(','):
address = addr.strip()
if not self._is_valid_ip(address):
error_addresses.append(address)
if error_addresses:
msg = f'Invalid IP Address(es): {error_addresses}'
msg += ' Please fix the error(s) and try again.'
raise forms.ValidationError(msg)
return addresses
def clean_whitelist(self):
"""Validates the whitelist"""
whitelist = self.cleaned_data["whitelist"]
return self._valid_ip_addresses(whitelist)
def clean_blacklist(self):
"""Validates the blacklist"""
blacklist = self.cleaned_data["blacklist"]
return self._valid_ip_addresses(blacklist)
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/embargo/forms.py
|
Python
|
agpl-3.0
| 3,045 | 0.000657 |
# Author: legend
# Mail: kygx.legend@gmail.com
# File: mesos.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import re
MASTER_IP = '172.16.104.62'
MESOS_PATH = '/data/opt/mesos-1.4.0/build'
WORK_DIR = '/tmp/mesos/work_dir'
MASTER_SH = 'bin/mesos-master.sh'
WORKER_SH = 'bin/mesos-agent.sh'
def print_cmd(cmd, tag=None):
if not tag:
print cmd
return
print '[{}] {}'.format(tag, cmd)
def run(cmd):
cmd = '{} --work_dir={}'.format(cmd, WORK_DIR)
print_cmd(cmd, tag='Run')
print os.system(cmd)
def run_master():
run('{} --ip={}'.format(os.path.join(MESOS_PATH, MASTER_SH), MASTER_IP))
def run_agent():
run('{} --master={}:5050'.format(os.path.join(MESOS_PATH, WORKER_SH), MASTER_IP))
def kill_master():
cmd = 'pkill lt-mesos-master'
print_cmd(cmd, tag='Run')
print os.system(cmd)
def kill_agent():
cmd = 'pkill lt-mesos-agent'
print_cmd(cmd, tag='Run')
print os.system(cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Mesos Cluster Tools')
parser.add_argument('-rm', '--runmaster', help='Run master', action='store_true')
parser.add_argument('-ra', '--runagent', help='Run agent', action='store_true')
parser.add_argument('-km', '--killmaster', help='Kill master', action='store_true')
parser.add_argument('-ka', '--killagent', help='Kill agent', action='store_true')
args = parser.parse_args()
if args.runmaster:
run_master()
if args.runagent:
run_agent()
if args.killmaster:
kill_master()
if args.killagent:
kill_agent()
|
legendlee1314/ooni
|
mesos.py
|
Python
|
mit
| 1,625 | 0.008 |
#OBJ2VXP: Converts simple OBJ files to VXP expansions
#Copyright (C) 2004-2015 Foone Turing
#
#This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
sys.path.append('code')
import pygame
from pygame.constants import *
import sockgui
sockgui.setDataPath('code')
from converterbase import ConverterBase
import os
import time
import obj2vxp
import obj2vxptex
from error import SaveError,LoadError
import ConfigParser
import vxpinstaller
class obj2vxpGUI(ConverterBase):
def __init__(self,screen):
ConverterBase.__init__(self,screen)
ui=self.ui
ys=self.makeTab(10,94,'CFG settings')
ui.add(sockgui.Label(ui,[20,ys+10],'Expansion name:'))
ui.add(sockgui.Label(ui,[20,ys+26],'Author name:'))
ui.add(sockgui.Label(ui,[20,ys+42],'Orig. Author name:'))
ui.add(sockgui.Label(ui,[20,ys+58],'Shortname:'))
ui.add(sockgui.Label(ui,[20,ys+74],'Filename:'))
self.filenamelabel=sockgui.Label(ui,[120,ys+74],'')
ui.add(self.filenamelabel)
self.namebox= sockgui.TextBox(ui,[120,ys+10-3],40)
self.authorbox= sockgui.TextBox(ui,[120,ys+26-3],40)
self.origauthorbox= sockgui.TextBox(ui,[120,ys+42-3],40)
self.shortnamebox= sockgui.TextBox(ui,[120,ys+58-3],40,callback=self.onShortNameChanged)
self.shortnamebox.setAllowedKeys(sockgui.UPPERCASE+sockgui.LOWERCASE+sockgui.DIGITS+'._-')
self.authorbox.setText(self.getAuthor())
ui.add(self.namebox)
ui.add(self.authorbox)
ui.add(self.origauthorbox)
ui.add(sockgui.Button(ui,[330,ys+42-3],'Same',callback=self.copyAuthorToOrigAuthor))
ui.add(self.shortnamebox)
self.namebox.activate()
ys=self.makeTab(ys+94+5,120,'OBJ to convert')
self.files=sockgui.ListBox(ui,[20,ys+10],[62,10],items=self.getOBJList())
if self.files.getNumItems()>0:
self.files.select(0)
ui.add(self.files)
self.enhance_color=sockgui.CheckBox(ui,[100,ys+103],'Enhance Color',self.getEnhanceColor())
self.textured=sockgui.CheckBox(ui,[200,ys+103],'Textured',self.getTextured())
ui.add(sockgui.Button(ui,[20,ys+99],'Refresh list',callback=self.refreshList))
ui.add(self.enhance_color)
ui.add(self.textured)
#ui.add(sockgui.BorderBox(ui,[10,224],[screen.get_width()-20,110]))
ys=self.makeTab(ys+120+5,30,'3dmm IDs')
ui.add(sockgui.Label(ui,[20,ys+10],'ID:'))
self.idbox=sockgui.TextBox(ui,[40,ys+7],10)
self.idbox.setAllowedKeys('0123456789')
ui.add(self.idbox)
ui.add(sockgui.Button(ui,[110,ys+7],'Generate ID',callback=self.generateNewID))
ys=self.makeTab(ys+30+5,66,'Control')
self.install_check=sockgui.CheckBox(ui,[240,ys+13],'Install VXP',self.getInstallCheck())
ui.add(self.install_check)
self.progress=sockgui.ProgressBox(ui,[20,ys+10],[200,16],maxvalue=6)
ui.add(self.progress)
self.errortext=sockgui.Label(ui,[20,ys+32],'')
ui.add(self.errortext)
self.startbutton=sockgui.Button(ui,[20,ys+46],'Create VXP',callback=self.createVXP)
ui.add(self.startbutton)
ui.registerHotKey(K_F5,self.updateListBox)
def refreshList(self,junk):
self.files.setItems(self.getOBJList())
def updateListBox(self,event):
if event.type==KEYUP:
self.refreshList(0)
def statusCallback(self,text):
self.errortext.setText(text)
self.ui.draw()
def createVXP(self,junk):
self.saveSettings()
self.progress.setValue(0)
try:
outfile=str(self.shortnamebox.getText())+'.vxp'
objfile=self.files.getSelectedText()
if objfile is None:
raise SaveError('no OBJ selected')
try:
uniqueid=int(self.idbox.getText())
except ValueError:
raise SaveError('Failed: Bad ID!')
name=str(self.namebox.getText())
author=str(self.authorbox.getText())
origauthor=str(self.origauthorbox.getText())
shortname=str(self.shortnamebox.getText())
enhance=self.enhance_color.isChecked()
self.errortext.setText('Converting...')
if self.textured.isChecked():
ret=obj2vxptex.CreateVXPExpansionFromOBJTextured(name,author,origauthor,outfile,shortname,objfile,
uniqueid,self.progressCallback,self.statusCallback)
else:
ret=obj2vxp.CreateVXPExpansionFromOBJ(name,author,origauthor,outfile,shortname,objfile,
uniqueid,self.progressCallback,enhance,self.statusCallback)
if ret:
self.errortext.setText('VXP saved as %s' % (outfile))
self.idbox.setText('') #So we don't reuse them by mistake.
if self.install_check.isChecked():
vxpinstaller.installVXP(outfile)
self.errortext.setText('VXP saved as %s, and installed.' % (outfile))
else:
self.errortext.setText('Failed: unknown error (!ret)')
except SaveError,e:
self.errortext.setText('Failed: ' + str(e).strip('"'))
except LoadError,e:
self.errortext.setText('Failed: ' + str(e).strip('"'))
except ValueError:
self.errortext.setText('Failed: Bad ID!')
except pygame.error,e:
self.errortext.setText('Failed: ' + str(e).strip('"'))
def copyAuthorToOrigAuthor(self,junk):
self.origauthorbox.setText(self.authorbox.getText())
def saveExtraSettings(self):
try:
self.config.add_section('obj2vxp')
except:
pass
self.config.set('obj2vxp','enhance',`self.enhance_color.isChecked()`)
self.config.set('obj2vxp','textured',`self.textured.isChecked()`)
def getEnhanceColor(self):
try:
val=self.config.get('obj2vxp','enhance')
return sockgui.BoolConv(val)
except:
return False
def getTextured(self):
try:
val=self.config.get('obj2vxp','textured')
return sockgui.BoolConv(val)
except:
return False
def getOBJList(self):
out=[]
for file in os.listdir('.'):
flower=file.lower()
if flower.endswith('.obj'):
out.append(file)
return out
def onShortNameChanged(self,data,newtext):
if newtext=='':
out=''
else:
out=self.shortnamebox.getText() + '.vxp'
self.filenamelabel.setRed(os.path.exists(out))
self.filenamelabel.setText(out)
def RunConverter(title):
pygame.display.set_caption(title+'obj2vxpGUI '+obj2vxp.version)
screen=pygame.display.set_mode((375,397))
gui=obj2vxpGUI(screen)
return gui.run()
if __name__=='__main__':
pygame.init()
RunConverter('')
def GetInfo():
return ('obj2vxp','Convert OBJs to props',None,obj2vxp.version) # None is the ICONOS.
|
foone/7gen
|
bin/obj2vxpGUI.py
|
Python
|
gpl-2.0
| 6,717 | 0.056573 |
"""
Flask-EasyWebDAV
-------------
This is the description for that library
"""
from setuptools import setup
setup(
name='Flask-EasyWebDAV',
version='0.1',
url='http://github.com/ghachey/flask-easywebdav',
license='MIT',
author='Ghislain Hachey',
author_email='ghachey@outlook.com',
description='Very simple extension to add support for easywebdav',
long_description=__doc__,
py_modules=['flask_easywebdav'],
# if you would be using a package instead use packages instead
# of py_modules:
# packages=['flask_easywebdav'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'easywebdav'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
ghachey/flask-easywebdav
|
setup.py
|
Python
|
mit
| 1,092 | 0 |
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for mesos manager
"""
import json
from cfgm_common.vnc_db import DBBase
from bitstring import BitArray
from vnc_api.vnc_api import (KeyValuePair)
from mesos_manager.vnc.vnc_mesos_config import VncMesosConfig as vnc_mesos_config
from mesos_manager.sandesh.mesos_introspect import ttypes as introspect
class DBBaseMM(DBBase):
obj_type = __name__
# Infra annotations that will be added on objects with custom annotations.
ann_fq_name_infra_key = ["project", "cluster", "owner"]
def __init__(self, uuid, obj_dict=None):
# By default there are no annotations added on an object.
self.ann_fq_name = None
@staticmethod
def get_infra_annotations():
"""Get infra annotations."""
annotations = {}
annotations['owner'] = vnc_mesos_config.cluster_owner()
annotations['cluster'] = vnc_mesos_config.cluster_name()
return annotations
@classmethod
def _get_annotations(cls, vnc_caller, name, mesos_type,
**custom_ann_kwargs):
"""Get all annotations.
Annotations are aggregated from multiple sources like infra info,
input params and custom annotations. This method is meant to be an
aggregator of all possible annotations.
"""
# Get annotations declared on the caller.
annotations = dict(vnc_caller.get_annotations())
# Update annotations with infra specific annotations.
infra_anns = cls.get_infra_annotations()
infra_anns['project'] = vnc_mesos_config.cluster_project_name()
annotations.update(infra_anns)
# Update annotations based on explicity input params.
input_anns = {}
input_anns['name'] = name
if mesos_type:
input_anns['kind'] = mesos_type
annotations.update(input_anns)
# Append other custom annotations.
annotations.update(custom_ann_kwargs)
return annotations
@classmethod
def add_annotations(cls, vnc_caller, obj, name, mesos_type=None,
**custom_ann_kwargs):
"""Add annotations on the input object.
Given an object, this method will add all required and specfied
annotations on that object.
"""
# Construct annotations to be added on the object.
annotations = cls._get_annotations(vnc_caller, name,
mesos_type, **custom_ann_kwargs)
# Validate that annotations have all the info to construct
# the annotations-based-fq-name as required by the object's db.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct mesos_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(name)
raise Exception(err_msg)
# Annotate the object.
for ann_key, ann_value in annotations.iteritems():
obj.add_annotations(KeyValuePair(key=ann_key, value=ann_value))
@classmethod
def _update_fq_name_to_uuid(cls, uuid, obj_dict):
cls._fq_name_to_uuid[tuple(obj_dict['fq_name'])] = uuid
@classmethod
def get_fq_name_to_uuid(cls, fq_name):
return cls._fq_name_to_uuid.get(tuple(fq_name))
@classmethod
def _get_ann_fq_name_from_obj(cls, obj_dict):
"""Get the annotated fully qualified name from the object.
Annotated-fq-names are contructed from annotations found on the
object. The format of the fq-name is specified in the object's db
class. This method will construct the annoated-fq-name of the input
object.
"""
fq_name = None
if hasattr(cls, 'ann_fq_name_key'):
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
if obj_dict.get('annotations') and\
obj_dict['annotations'].get('key_value_pair'):
kvps = obj_dict['annotations']['key_value_pair']
for elem in fq_name_key:
for kvp in kvps:
if kvp.get("key") != elem:
continue
fq_name.append(kvp.get("value"))
break
return fq_name
@classmethod
def _get_ann_fq_name_from_params(cls, **kwargs):
"""Construct annotated fully qualified name using input params."""
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
for elem in fq_name_key:
for key, value in kwargs.iteritems():
if key != elem:
continue
fq_name.append(value)
break
return fq_name
@classmethod
def get_ann_fq_name_to_uuid(cls, vnc_caller, name,
mesos_type=None, **kwargs):
"""Get vnc object uuid corresponding to an annotated-fq-name.
The annotated-fq-name is constructed from the input params given
by the caller.
"""
# Construct annotations based on input params.
annotations = cls._get_annotations(vnc_caller, name,
mesos_type, **kwargs)
# Validate that annoatations has all info required for construction
# of annotated-fq-name.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct mesos_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(name)
raise Exception(err_msg)
# Lookup annnoated-fq-name in annotated-fq-name to uuid table.
return cls._ann_fq_name_to_uuid.get(
tuple(cls._get_ann_fq_name_from_params(**annotations)))
@classmethod
def _update_ann_fq_name_to_uuid(cls, uuid, ann_fq_name):
cls._ann_fq_name_to_uuid[tuple(ann_fq_name)] = uuid
def build_fq_name_to_uuid(self, uuid, obj_dict):
"""Populate uuid in all tables tracking uuid."""
if not obj_dict:
return
# Update annotated-fq-name to uuid table.
self.ann_fq_name = self._get_ann_fq_name_from_obj(obj_dict)
if self.ann_fq_name:
self._update_ann_fq_name_to_uuid(uuid, self.ann_fq_name)
# Update vnc fq-name to uuid table.
self._update_fq_name_to_uuid(uuid, obj_dict)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.ann_fq_name:
if tuple(obj.ann_fq_name) in cls._ann_fq_name_to_uuid:
del cls._ann_fq_name_to_uuid[tuple(obj.ann_fq_name)]
if tuple(obj.fq_name) in cls._fq_name_to_uuid:
del cls._fq_name_to_uuid[tuple(obj.fq_name)]
def evaluate(self):
# Implement in the derived class
pass
@classmethod
def objects(cls):
# Get all vnc objects of this class.
return cls._dict.values()
@staticmethod
def _build_annotation_dict(annotation_dict):
return {str(annot['key']): str(annot['value'])
for annot
in annotation_dict['key_value_pair']} \
if annotation_dict and annotation_dict.get('key_value_pair') \
else {}
@staticmethod
def _build_string_dict(src_dict):
dst_dict = {}
if src_dict:
for key, value in src_dict.iteritems():
dst_dict[str(key)] = str(value)
return dst_dict
@staticmethod
def _build_cls_uuid_list(cls, collection):
return [cls(str(list(collection)[i]))
for i in xrange(len(collection))] \
if collection else []
class VirtualMachineMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_machine'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.owner = None
self.cluster = None
self.virtual_router = None
self.virtual_machine_interfaces = set()
self.pod_labels = None
self.pod_node = None
self.node_ip = None
super(VirtualMachineMM, self).__init__(uuid, obj_dict)
obj_dict = self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if not obj:
return
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
if self.annotations:
for kvp in self.annotations['key_value_pair'] or []:
if kvp['key'] == 'owner':
self.owner = kvp['value']
elif kvp['key'] == 'cluster':
self.cluster = kvp['value']
elif kvp['key'] == 'labels':
self.pod_labels = json.loads(kvp['value'])
self.update_single_ref('virtual_router', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
super(VirtualMachineMM, cls).delete(uuid)
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Machine DB lookup/introspect request. """
vm_resp = introspect.VirtualMachineDatabaseListResp(vms=[])
# Iterate through all elements of Virtual Machine DB.
for vm in VirtualMachineMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vm_uuid and req.vm_uuid != vm.uuid:
continue
vm_annotations = cls._build_annotation_dict(vm.annotations)
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vm.virtual_machine_interfaces)
vr = introspect.VRUuid(vr_uuid=str(vm.virtual_router)) \
if vm.virtual_router else None
# Construct response for an element.
vm_instance = introspect.VirtualMachineInstance(
uuid=vm.uuid,
name=vm.name,
cluster=vm.cluster,
annotations=vm_annotations,
owner=vm.owner,
node_ip=str(vm.node_ip),
pod_node=vm.pod_node,
pod_labels=vm.pod_labels,
vm_interfaces=vmis,
vrouter_uuid=vr)
# Append the constructed element info to the response.
vm_resp.vms.append(vm_instance)
# Send the reply out.
vm_resp.response(req.context())
class VirtualRouterMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_router'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
_ip_addr_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(VirtualRouterMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.virtual_machines = set()
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
self.update_multiple_refs('virtual_machine', obj)
self.virtual_router_ip_address = obj.get('virtual_router_ip_address')
if self.virtual_router_ip_address:
self.build_ip_addr_to_uuid(
self.uuid, self.virtual_router_ip_address)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
@classmethod
def build_ip_addr_to_uuid(cls, uuid, ip_addr):
cls._ip_addr_to_uuid[tuple(ip_addr)] = uuid
@classmethod
def get_ip_addr_to_uuid(cls, ip_addr):
return cls._ip_addr_to_uuid.get(tuple(ip_addr))
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Router DB lookup/introspect request. """
vr_resp = introspect.VirtualRouterDatabaseListResp(vrs=[])
# Iterate through all elements of Virtual Router DB.
for vr in VirtualRouterMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vr_uuid and req.vr_uuid != vr.uuid:
continue
vr_annotations = cls._build_annotation_dict(vr.annotations)
vms = cls._build_cls_uuid_list(
introspect.VMUuid, vr.virtual_machines)
# Construct response for an element.
vr_instance = introspect.VirtualRouterInstance(
uuid=vr.uuid,
name=vr.fq_name[-1],
fq_name=vr.fq_name,
annotations=vr_annotations,
virtual_machines=vms)
# Append the constructed element info to the response.
vr_resp.vrs.append(vr_instance)
# Send the reply out.
vr_resp.response(req.context())
class VirtualMachineInterfaceMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_machine_interface'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(VirtualMachineInterfaceMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.host_id = None
self.virtual_network = None
self.virtual_machine = None
self.instance_ips = set()
self.floating_ips = set()
self.virtual_machine_interfaces = set()
self.security_groups = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# Cache bindings on this VMI.
if obj.get('virtual_machine_interface_bindings', None):
bindings = obj['virtual_machine_interface_bindings']
kvps = bindings.get('key_value_pair', None)
for kvp in kvps or []:
if kvp['key'] == 'host_id':
self.host_id = kvp['value']
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('floating_ip', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('virtual_machine', obj)
self.update_multiple_refs('security_group', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('floating_ip', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('virtual_machine', {})
obj.update_multiple_refs('security_group', {})
obj.update_multiple_refs('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Machine Interface DB lookup/introspect request. """
vmi_resp = introspect.VirtualMachineInterfaceDatabaseListResp(vmis=[])
# Iterate through all elements of Virtual Router DB.
for vmi in VirtualMachineInterfaceMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vmi_uuid and req.vmi_uuid != vmi.uuid:
continue
vmi_annotations = cls._build_annotation_dict(vmi.annotations)
fips = cls._build_cls_uuid_list(
introspect.FIPUuid, vmi.floating_ips)
sgs = cls._build_cls_uuid_list(
introspect.SGUuid, vmi.security_groups)
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vmi.virtual_machine_interfaces)
# Construct response for an element.
vmi_instance = introspect.VirtualMachineInterfaceInstance(
uuid=vmi.uuid,
name=vmi.fq_name[-1],
fq_name=vmi.fq_name,
annotations=vmi_annotations,
floating_ips=fips,
host_id=vmi.host_id,
security_groups=sgs,
virtual_machine=str(vmi.virtual_machine),
virtual_machine_interfaces=vmis,
virtual_network=str(vmi.virtual_network))
# Append the constructed element info to the response.
vmi_resp.vmis.append(vmi_instance)
# Send the reply out.
vmi_resp.response(req.context())
class VirtualNetworkMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_network'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
def __init__(self, uuid, obj_dict=None):
super(VirtualNetworkMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.instance_ips = set()
self.network_ipams = set()
self.network_ipam_subnets = {}
self.annotations = None
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.build_fq_name_to_uuid(self.uuid, obj)
# Cache ipam-subnet-uuid to ipam-fq-name mapping.
# This is useful when we would like to locate an ipam in a VN,
# from which we would like to request ip allocation.
self.network_ipam_subnets = {}
# Iterate through ipam's on this VN.
for ipam in obj.get('network_ipam_refs', []):
# Get the ipam's attributes.
ipam_attr = ipam.get('attr', None)
# Get the ipam fq-name.
ipam_fq_name = ipam['to']
if ipam_attr:
# Iterate through ipam subnets to cache uuid - fqname mapping.
for subnet in ipam_attr.get('ipam_subnets', []):
subnet_uuid = subnet.get('subnet_uuid', None)
if subnet_uuid:
self.network_ipam_subnets[subnet_uuid] = ipam_fq_name
# Get annotations on this virtual network.
self.annotations = obj.get('annotations', {})
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('network_ipam', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('network_ipam', {})
obj.remove_from_parent()
del cls._dict[uuid]
# Given an ipam-fq-name, return its subnet uuid on this VN.
def get_ipam_subnet_uuid(self, ipam_fq_name):
for subnet_uuid, fq_name in self.network_ipam_subnets.iteritems():
if fq_name == ipam_fq_name:
return subnet_uuid
return None
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Network DB lookup/introspect request. """
vn_resp = introspect.VirtualNetworkDatabaseListResp(vns=[])
# Iterate through all elements of Virtual Network DB.
for vn in VirtualNetworkMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vn_uuid and req.vn_uuid != vn.uuid:
continue
vn_annotations = cls._build_annotation_dict(vn.annotations)
ipam_subnets = [introspect.NetworkIpamSubnetInstance(
uuid=sub[0], fq_name=sub[1])
for sub
in vn.network_ipam_subnets.iteritems()]
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vn.virtual_machine_interfaces)
iips = cls._build_cls_uuid_list(
introspect.IIPUuid, vn.instance_ips)
nipams = cls._build_cls_uuid_list(
introspect.NIPAMUuid, vn.network_ipams)
# Construct response for an element.
vn_instance = introspect.VirtualNetworkInstance(
uuid=vn.uuid,
name=vn.fq_name[-1],
fq_name=vn.fq_name,
annotations=vn_annotations,
virtual_machine_interfaces=vmis,
instance_ips=iips,
network_ipams=nipams,
network_ipam_subnets=ipam_subnets)
# Append the constructed element info to the response.
vn_resp.vns.append(vn_instance)
# Send the reply out.
vn_resp.response(req.context())
class InstanceIpMM(DBBaseMM):
_dict = {}
obj_type = 'instance_ip'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(InstanceIpMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.address = None
self.family = None
self.virtual_machine_interfaces = set()
self.virtual_networks = set()
self.floating_ips = set()
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.family = obj.get('instance_ip_family', 'v4')
self.address = obj.get('instance_ip_address', None)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('virtual_network', obj)
self.floating_ips = set([fip['uuid']
for fip in obj.get('floating_ips', [])])
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('virtual_network', {})
del cls._dict[uuid]
@classmethod
def get_object(cls, ip, vn_fq_name):
items = cls._dict.items()
for uuid, iip_obj in items:
if ip == iip_obj.address:
vn_uuid = VirtualNetworkMM.get_fq_name_to_uuid(vn_fq_name)
if vn_uuid and vn_uuid in iip_obj.virtual_networks:
return iip_obj
return None
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to InstanceIp DB lookup/introspect request. """
iip_resp = introspect.InstanceIpDatabaseListResp(iips=[])
# Iterate through all elements of InstanceIp DB.
for iip in InstanceIpMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.iip_uuid and req.iip_uuid != iip.uuid:
continue
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, iip.virtual_machine_interfaces)
vns = cls._build_cls_uuid_list(
introspect.VNUuid, iip.virtual_networks)
fips = cls._build_cls_uuid_list(
introspect.FIPUuid, iip.floating_ips)
# Construct response for an element.
iip_instance = introspect.InstanceIpInstance(
uuid=iip.uuid,
name=iip.fq_name[-1],
fq_name=iip.fq_name,
address=str(iip.address),
family=iip.family,
vm_interfaces=vmis,
virtual_networks=vns,
floating_ips=fips)
# Append the constructed element info to the response.
iip_resp.iips.append(iip_instance)
# Send the reply out.
iip_resp.response(req.context())
# end class InstanceIpMM
class ProjectMM(DBBaseMM):
_dict = {}
obj_type = 'project'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(ProjectMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.ns_labels = {}
self.virtual_networks = set()
self.annotations = None
self.security_groups = set()
obj_dict = self.update(obj_dict)
self.set_children('virtual_network', obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.build_fq_name_to_uuid(self.uuid, obj)
# Update SecurityGroup info.
sg_list = obj.get('security_groups', [])
for sg in sg_list:
self.security_groups.add(sg['uuid'])
self.annotations = obj.get('annotations', {})
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
def get_security_groups(self):
return set(self.security_groups)
def add_security_group(self, sg_uuid):
self.security_groups.add(sg_uuid)
def remove_security_group(self, sg_uuid):
self.security_groups.discard(sg_uuid)
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Project DB lookup/introspect request. """
project_resp = introspect.ProjectDatabaseListResp(projects=[])
# Iterate through all elements of Project DB.
for project in ProjectMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.project_uuid and req.project_uuid != project.uuid:
continue
project_annotations = cls._build_annotation_dict(
project.annotations)
ns_labels = cls._build_string_dict(project.ns_labels)
sgs = cls._build_cls_uuid_list(
introspect.SGUuid, project.security_groups)
vns = cls._build_cls_uuid_list(
introspect.VNUuid, project.virtual_networks)
# Construct response for an element.
project_instance = introspect.ProjectInstance(
uuid=project.uuid,
name=project.fq_name[-1],
fq_name=project.fq_name,
annotations=project_annotations,
ns_labels=ns_labels,
security_groups=sgs,
virtual_networks=vns)
# Append the constructed element info to the response.
project_resp.projects.append(project_instance)
# Send the reply out.
project_resp.response(req.context())
class DomainMM(DBBaseMM):
_dict = {}
obj_type = 'domain'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(DomainMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Domain DB lookup/introspect request. """
domain_resp = introspect.DomainDatabaseListResp(domains=[])
# Iterate through all elements of Domain DB.
for domain in DomainMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.domain_uuid and req.domain_uuid != domain.uuid:
continue
domain_annotations = cls._build_annotation_dict(
domain.annotations)
# Construct response for an element.
domain_instance = introspect.DomainInstance(
uuid=domain.uuid,
name=domain.fq_name[-1],
fq_name=domain.fq_name,
annotations=domain_annotations)
# Append the constructed element info to the response.
domain_resp.domains.append(domain_instance)
# Send the reply out.
domain_resp.response(req.context())
class NetworkIpamMM(DBBaseMM):
_dict = {}
obj_type = 'network_ipam'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(NetworkIpamMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to NetworkIpam DB lookup/introspect request. """
network_ipam_resp = introspect.NetworkIpamDatabaseListResp(
network_ipams=[])
# Iterate through all elements of NetworkIpam DB.
for network_ipam in NetworkIpamMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.network_ipam_uuid \
and req.network_ipam_uuid != network_ipam.uuid:
continue
network_ipam_annotations = cls._build_annotation_dict(
network_ipam.annotations)
# Construct response for an element.
network_ipam_instance = introspect.NetworkIpamInstance(
uuid=network_ipam.uuid,
name=network_ipam.fq_name[-1],
fq_name=network_ipam.fq_name,
annotations=network_ipam_annotations)
# Append the constructed element info to the response.
network_ipam_resp.network_ipams.append(network_ipam_instance)
# Send the reply out.
network_ipam_resp.response(req.context())
# end class NetworkIpamMM
class NetworkPolicyMM(DBBaseMM):
_dict = {}
obj_type = 'network_policy'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(NetworkPolicyMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
# end class NetworkPolicyMM
|
rombie/contrail-controller
|
src/container/mesos-manager/mesos_manager/vnc/config_db.py
|
Python
|
apache-2.0
| 32,110 | 0.000685 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpdateClusterUpgradeDescription(Model):
"""Parameters for updating a cluster upgrade.
:param upgrade_kind: Possible values include: 'Invalid', 'Rolling',
'Rolling_ForceRestart'. Default value: "Rolling" .
:type upgrade_kind: str or :class:`enum <azure.servicefabric.models.enum>`
:param update_description:
:type update_description: :class:`RollingUpgradeUpdateDescription
<azure.servicefabric.models.RollingUpgradeUpdateDescription>`
:param cluster_health_policy:
:type cluster_health_policy: :class:`ClusterHealthPolicy
<azure.servicefabric.models.ClusterHealthPolicy>`
:param enable_delta_health_evaluation:
:type enable_delta_health_evaluation: bool
:param cluster_upgrade_health_policy:
:type cluster_upgrade_health_policy:
:class:`ClusterUpgradeHealthPolicyObject
<azure.servicefabric.models.ClusterUpgradeHealthPolicyObject>`
:param application_health_policy_map:
:type application_health_policy_map: :class:`ApplicationHealthPolicies
<azure.servicefabric.models.ApplicationHealthPolicies>`
"""
_attribute_map = {
'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'},
'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'},
'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'},
'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'},
'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'},
'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'},
}
def __init__(self, upgrade_kind="Rolling", update_description=None, cluster_health_policy=None, enable_delta_health_evaluation=None, cluster_upgrade_health_policy=None, application_health_policy_map=None):
self.upgrade_kind = upgrade_kind
self.update_description = update_description
self.cluster_health_policy = cluster_health_policy
self.enable_delta_health_evaluation = enable_delta_health_evaluation
self.cluster_upgrade_health_policy = cluster_upgrade_health_policy
self.application_health_policy_map = application_health_policy_map
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description.py
|
Python
|
mit
| 2,832 | 0.002119 |
import unittest
import xen.xend.sxp
class test_sxp(unittest.TestCase):
def testAllFromString(self):
def t(inp, expected):
self.assertEqual(xen.xend.sxp.all_from_string(inp), expected)
t('String', ['String'])
t('(String Thing)', [['String', 'Thing']])
t('(String) (Thing)', [['String'], ['Thing']])
def testParseFixed(self):
fin = file('../xen/xend/tests/xend-config.sxp', 'rb')
try:
config = xen.xend.sxp.parse(fin)
self.assertEqual(
xen.xend.sxp.child_value(
config,
'xend-relocation-hosts-allow'),
'^localhost$ ^localhost\\.localdomain$')
finally:
fin.close()
def testParseConfigExample(self):
fin = file('../../examples/xend-config.sxp', 'rb')
try:
config = xen.xend.sxp.parse(fin)
finally:
fin.close()
def test_suite():
return unittest.makeSuite(test_sxp)
|
YongMan/Xen-4.3.1
|
tools/python/xen/xend/tests/test_sxp.py
|
Python
|
gpl-2.0
| 1,015 | 0.003941 |
from .theuerkaufPeak import PeakModelTheuerkauf
from .eePeak import PeakModelEE
# dictionary of available peak models
PeakModels = dict()
PeakModels["theuerkauf"] = PeakModelTheuerkauf
PeakModels["ee"] = PeakModelEE
|
op3/hdtv
|
hdtv/peakmodels/__init__.py
|
Python
|
gpl-2.0
| 217 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("django_facebook", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Party'
db.create_table(u'votes_party', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('official_site', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('facebook_page', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('wikpedia_article', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('wikpedia_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('open_k_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('logo_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal(u'votes', ['Party'])
# Adding model 'Candidate'
db.create_table(u'votes_candidate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('party', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['votes.Party'], null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('number_of_votes', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('is_knesset_member', self.gf('django.db.models.fields.BooleanField')(default=False)),
('pesonal_site', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('facebook_page', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('wikpedia_article', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('wikpedia_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('open_k_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('image_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal(u'votes', ['Candidate'])
# Adding M2M table for field voters on 'Candidate'
m2m_table_name = db.shorten_name(u'votes_candidate_voters')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('candidate', models.ForeignKey(orm[u'votes.candidate'], null=False)),
('facebookcustomuser', models.ForeignKey(orm[u'django_facebook.facebookcustomuser'], null=False))
))
db.create_unique(m2m_table_name, ['candidate_id', 'facebookcustomuser_id'])
def backwards(self, orm):
# Deleting model 'Party'
db.delete_table(u'votes_party')
# Deleting model 'Candidate'
db.delete_table(u'votes_candidate')
# Removing M2M table for field voters on 'Candidate'
db.delete_table(db.shorten_name(u'votes_candidate_voters'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'votes.candidate': {
'Meta': {'ordering': "['-number_of_votes']", 'object_name': 'Candidate'},
'facebook_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_knesset_member': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'number_of_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'open_k_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['votes.Party']", 'null': 'True', 'blank': 'True'}),
'pesonal_site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'voters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'}),
'wikpedia_article': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'wikpedia_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'votes.party': {
'Meta': {'object_name': 'Party'},
'facebook_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'official_site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'open_k_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'wikpedia_article': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'wikpedia_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['votes']
|
AriMeidan/Fantasy-Knesset
|
votes/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 10,925 | 0.008055 |
from vt_manager_kvm.communication.sfa.util.xrn import urn_to_hrn
from vt_manager_kvm.communication.sfa.trust.credential import Credential
from vt_manager_kvm.communication.sfa.trust.auth import Auth
class Start:
def __init__(self, xrn, creds, **kwargs):
hrn, type = urn_to_hrn(xrn)
valid_creds = Auth().checkCredentials(creds, 'startslice', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
return
|
ict-felix/stack
|
vt_manager_kvm/src/python/vt_manager_kvm/communication/sfa/methods/Start.py
|
Python
|
apache-2.0
| 471 | 0.008493 |
# -*- coding: utf-8 -*-
#
# Project of Information-Theoretic Modeling documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 12 14:45:52 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Project of Information-Theoretic Modeling'
copyright = u'2014, Simo Linkola, Teemu Pitkänen and Kalle Timperi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProjectofInformation-TheoreticModelingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ProjectofInformation-TheoreticModeling.tex', u'Project of Information-Theoretic Modeling Documentation',
u'Simo Linkola, Teemu Pitkänen and Kalle Timperi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'projectofinformation-theoreticmodeling', u'Project of Information-Theoretic Modeling Documentation',
[u'Simo Linkola, Teemu Pitkänen and Kalle Timperi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ProjectofInformation-TheoreticModeling', u'Project of Information-Theoretic Modeling Documentation',
u'Simo Linkola, Teemu Pitkänen and Kalle Timperi', 'ProjectofInformation-TheoreticModeling', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
assamite/itm_project
|
docs/source/conf.py
|
Python
|
gpl-2.0
| 8,919 | 0.006282 |
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
import string
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import same_origin
from django.utils.six.moves import zip
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure."
CSRF_SECRET_LENGTH = 32
CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_string():
return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS)
def _salt_cipher_secret(secret):
"""
Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
token by adding a salt and using it to encrypt the secret.
"""
salt = _get_new_csrf_string()
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in salt))
cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs)
return salt + cipher
def _unsalt_cipher_token(token):
"""
Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt
the second half to produce the original secret.
"""
salt = token[:CSRF_SECRET_LENGTH]
token = token[CSRF_SECRET_LENGTH:]
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt))
secret = ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok
return secret
def _get_new_csrf_token():
return _salt_cipher_secret(_get_new_csrf_string())
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
csrf_secret = _get_new_csrf_string()
request.META["CSRF_COOKIE"] = _salt_cipher_secret(csrf_secret)
else:
csrf_secret = _unsalt_cipher_token(request.META["CSRF_COOKIE"])
request.META["CSRF_COOKIE_USED"] = True
return _salt_cipher_secret(csrf_secret)
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_token(),
})
request.csrf_cookie_needs_reset = True
def _sanitize_token(token):
# Allow only ASCII alphanumerics
if re.search('[^a-zA-Z0-9]', force_text(token)):
return _get_new_csrf_token()
elif len(token) == CSRF_TOKEN_LENGTH:
return token
elif len(token) == CSRF_SECRET_LENGTH:
# Older Django versions set cookies to values of CSRF_SECRET_LENGTH
# alphanumeric characters. For backwards compatibility, accept
# such values as unsalted secrets.
# It's easier to salt here and be consistent later, rather than add
# different code paths in the checks, although that might be a tad more
# efficient.
return _salt_cipher_secret(token)
return _get_new_csrf_token()
def _compare_salted_tokens(request_csrf_token, csrf_token):
# Assume both arguments are sanitized -- that is, strings of
# length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS.
return constant_time_compare(
_unsalt_cipher_token(request_csrf_token),
_unsalt_cipher_token(csrf_token),
)
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning(
'Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
csrf_token = None
else:
csrf_token = _sanitize_token(cookie_token)
if csrf_token != cookie_token:
# Cookie token needed to be replaced;
# the cookie needs to be reset.
request.csrf_cookie_needs_reset = True
# Use same token next time.
request.META['CSRF_COOKIE'] = csrf_token
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC7231 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# secret we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = base_referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return self._reject(request, REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != 'https':
return self._reject(request, REASON_INSECURE_REFERER)
# If there isn't a CSRF_COOKIE_DOMAIN, assume we need an exact
# match on host:port. If not, obey the cookie rules.
if settings.CSRF_COOKIE_DOMAIN is None:
# request.get_host() includes the port.
good_referer = request.get_host()
else:
good_referer = settings.CSRF_COOKIE_DOMAIN
server_port = request.get_port()
if server_port not in ('443', '80'):
good_referer = '%s:%s' % (good_referer, server_port)
# if not any(is_same_domain(referer.netloc, host) for host in good_hosts):
# reason = REASON_BAD_REFERER % referer.geturl()
good_referer = 'https://%s/' % request.get_host()
if not same_origin(base_referer, good_referer):
reason = REASON_BAD_REFERER % (base_referer, good_referer)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
request_csrf_token = _sanitize_token(request_csrf_token)
if not _compare_salted_tokens(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if not getattr(request, 'csrf_cookie_needs_reset', False):
if getattr(response, 'csrf_cookie_set', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_cookie_set = True
return response
|
jyotsna1820/django
|
django/middleware/csrf.py
|
Python
|
bsd-3-clause
| 12,394 | 0.001291 |
import math
from log_parser import parse_loglines
from event_timer import add_realtimes_to_datapoint_list
class HeartBeatTimings(object):
def __init__(self, hrm_datapoint_iter):
self.samples = list(_generate_beat_samples(hrm_datapoint_iter))
def first_beat_time(self):
return self.samples[0][0]
def last_beat_time(self):
return self.samples[-1][0]
def __str__(self):
duration = self.samples[-1][0] - self.samples[0][0]
beats = self.samples[-1][1] - self.samples[0][1]
samples = len(self.samples)
return 'HeartBeatTimings: %d beats in %d samples over %f seconds from %f' % (beats, samples, duration, self.first_beat_time())
def idx_last_datapoint_before(self, timestamp):
if len(self.samples) == 0 or self.samples[0][0] >= timestamp:
return None
lower = 0
upper = len(self.samples) - 1
while upper > lower+1:
mid = int((upper + lower) / 2)
if self.samples[mid][0] >= timestamp:
upper = mid
else:
lower = mid
return lower
def timeslice(self, from_time=None, to_time=None):
"Return a subset of the data by time interval"
return HeartBeatTimingsTimeslice(self, from_time, to_time)
def realtimes_of_beats(self):
""" Generate the real clock time of each heart beat
Yield the time of each heartbeat the occured within the data set.
Where there is incomplete data, use None as a placeholder for the
beats whos exact time is unknown.
"""
prev_count = self.samples[0][1] - 1
for timestamp, count in self.samples:
beats_between_samples = count - prev_count
if beats_between_samples > 1:
for _ in xrange(beats_between_samples-1):
yield None
yield timestamp
prev_count = count
def compute_mean_hr_bpm(self):
start_timestamp, start_count = self.samples[0]
end_timestamp, end_count = self.samples[-1]
exact_interval = end_timestamp - start_timestamp
beats_over_interval = end_count - start_count
hr_bps = beats_over_interval / exact_interval
hr_bpm = 60 * hr_bps
return hr_bpm
def compute_hrv_ms(self):
""" Compute a metric of heart rate variability
http://en.wikipedia.org/wiki/Heart_rate_variability#Time-domain_methods
This is an attempt at the "root mean square of successive differences"
metric of HRV. The result in is milliseconds.
"""
def hrv_sample(times):
if None in times:
return None
int1 = times[1] - times[0]
int2 = times[2] - times[1]
int_diff = int2 - int1
return int_diff * int_diff
beattimes = list(self.realtimes_of_beats())
if len(beattimes) < 3:
return None
total_var = 0
sample_count = 0
for i in xrange(len(beattimes)-2):
var = hrv_sample(beattimes[i:i+3])
if var is not None:
total_var += var
sample_count += 1
if sample_count == 0:
return None
else:
return 1000 * math.sqrt(total_var / sample_count)
class HeartBeatTimingsTimeslice(HeartBeatTimings):
def __init__(self, parent, from_time, to_time):
from_idx = None
if from_time is not None:
from_idx = parent.idx_last_datapoint_before(from_time)
if from_idx is None:
from_idx = 0
to_idx = None
if to_time is not None:
to_idx = parent.idx_last_datapoint_before(to_time)
if to_idx is None:
to_idx = len(parent.samples) - 1
self.samples = parent.samples[from_idx:to_idx+1]
def _generate_beat_samples(hrm_datapoint_iter):
""" Generate the times of individual heart beats
Yield a sequence of (last_beat_time, beat_count) tuples giving
the clock time at which individual heart beats occurred and the
cumulative beat count. Normally beat_count will increase by 1
with each returned value, but it may jump by more if a gap
in the data means that we have incomplete information.
"""
prev = None
for dp in hrm_datapoint_iter:
count = dp.beat_ets.count
if prev is None:
prev_beat_realtime = dp.prev_beat_realtime()
if prev_beat_realtime is not None:
yield prev_beat_realtime, count-1
yield dp.beat_ets.last_at_realtime, count
else:
beats_in_interval = count - prev.beat_ets.count
if beats_in_interval > 1:
prev_beat_realtime = dp.prev_beat_realtime()
if prev_beat_realtime is not None:
yield prev_beat_realtime, count-1
if beats_in_interval > 0:
yield dp.beat_ets.last_at_realtime, count
prev = dp
|
ncleaton/njcant
|
njcant/hr_analysis.py
|
Python
|
mit
| 4,240 | 0.027123 |
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from .generic import GenericClient
from requests.auth import HTTPDigestAuth
class qbittorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(qbittorrentAPI, self).__init__('qbittorrent', host, username, password)
self.url = self.host
self.session.auth = HTTPDigestAuth(self.username, self.password);
def _get_auth(self):
try:
self.response = self.session.get(self.host, verify=False)
self.auth = self.response.content
except:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
self.url = self.host+'command/download'
data = {'urls': result.url}
return self._request(method='post', data=data)
def _add_torrent_file(self, result):
self.url = self.host+'command/upload'
files = {'torrents': (result.name + '.torrent', result.content)}
return self._request(method='post', files=files)
def _set_torrent_priority(self, result):
self.url = self.host+'command/decreasePrio '
if result.priority == 1:
self.url = self.host+'command/increasePrio'
data = {'hashes': result.hash}
return self._request(method='post', data=data)
def _set_torrent_pause(self, result):
self.url = self.host+'command/resume'
if sickbeard.TORRENT_PAUSED:
self.url = self.host+'command/pause'
data = {'hash': result.hash}
return self._request(method='post', data=data)
api = qbittorrentAPI()
|
guijomatos/SickRage
|
sickbeard/clients/qbittorrent_client.py
|
Python
|
gpl-3.0
| 2,391 | 0.002509 |
import json
import urllib2
# open the url and the screen name
# (The screen name is the screen name of the user for whom to return results for)
def get_data():
url = "http://api.twitter.com/1/statuses/user_timeline.json?screen_name=python"
# this takes a python object and dumps it to a string which is a JSON
# representation of that object
data = json.load(urllib2.urlopen(url))
# print the result
print data
get_data()
|
anthonyndunguwanja/Anthony-Ndungu-bootcamp-17
|
Day 3/http_client.py
|
Python
|
mit
| 439 | 0.013667 |
"""
Base logic for pywow structures
"""
from structures import Structure, Skeleton
from .fields import *
from .main import *
from .generated import GeneratedStructure
class StructureNotFound(Exception):
pass
class StructureLoader():
wowfiles = None
@classmethod
def setup(cls):
if cls.wowfiles is None:
cls.wowfiles = {}
for name in globals():
try:
if not issubclass(globals()[name], Structure):
continue
except TypeError:
continue
cls.wowfiles[name.lower()] = globals()[name]
@classmethod
def getstructure(cls, name, build=0, parent=None):
name = name.replace("-", "_")
if name in cls.wowfiles:
return cls.wowfiles[name](build, parent)
raise StructureNotFound("Structure not found for file %r" % (name))
StructureLoader.setup()
getstructure = StructureLoader.getstructure
class LocalizedStringField(Structure):
"""
Structure for the LocalizedField class
"""
fields = Skeleton(
StringField("enus"),
StringField("kokr"),
StringField("frfr"),
StringField("dede"),
StringField("zhcn"),
StringField("zhtw"),
StringField("eses"),
StringField("esmx"),
BitMaskField("locflags")
)
def changed_5595(self, fields):
fields.insert_fields((
StringField("ruru"),
StringField("unk1"),
StringField("unk2"),
StringField("unk3"),
StringField("unk4"),
StringField("unk5"),
StringField("unk6"),
StringField("unk7"),
), before="locflags")
def changed_11927(self, fields):
self.changed_5595(fields)
fields.delete_fields(
"kokr", "frfr", "dede",
"zhcn", "zhtw", "eses",
"esmx", "ruru", "unk1",
"unk2", "unk3", "unk4",
"unk5", "unk6", "unk7",
"locflags",
)
def changed_11993(self, fields):
self.changed_5595(fields)
def changed_12025(self, fields):
self.changed_11927(fields)
|
jleclanche/pywow
|
wdbc/structures/__init__.py
|
Python
|
cc0-1.0
| 1,799 | 0.033352 |
"""Conditional module is the xmodule, which you can use for disabling
some xmodules by conditions.
"""
import json
import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.modulestore import Location
from xmodule.seq_module import SequenceDescriptor
from xblock.core import Scope, List
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger('mitx.' + __name__)
class ConditionalFields(object):
show_tag_list = List(help="Poll answers", scope=Scope.content)
class ConditionalModule(ConditionalFields, XModule):
"""
Blocks child module from showing unless certain conditions are met.
Example:
<conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True">
<show sources="i4x://.../test_6; i4x://.../Avi_resources"/>
<video url_name="secret_video" />
</conditional>
<conditional> tag attributes:
sources - location id of required modules, separated by ';'
submitted - map to `is_submitted` module method.
(pressing RESET button makes this function to return False.)
attempted - map to `is_attempted` module method
correct - map to `is_correct` module method
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
<show> tag attributes:
sources - location id of required modules, separated by ';'
You can add you own rules for <conditional> tag, like
"completed", "attempted" etc. To do that yo must extend
`ConditionalModule.conditions_map` variable and add pair:
my_attr: my_property/my_method
After that you can use it:
<conditional my_attr="some value" ...>
...
</conditional>
And my_property/my_method will be called for required modules.
"""
js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/conditional/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
]}
js_module_name = "Conditional"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
# Map
# key: <tag attribute in xml>
# value: <name of module attribute>
conditions_map = {
'poll_answer': 'poll_answer', # poll_question attr
# problem was submitted (it can be wrong)
# if student will press reset button after that,
# state will be reverted
'submitted': 'is_submitted', # capa_problem attr
# if student attempted problem
'attempted': 'is_attempted', # capa_problem attr
# if problem is full points
'correct': 'is_correct',
'voted': 'voted' # poll_question attr
}
def _get_condition(self):
# Get first valid condition.
for xml_attr, attr_name in self.conditions_map.iteritems():
xml_value = self.descriptor.xml_attributes.get(xml_attr)
if xml_value:
return xml_value, attr_name
raise Exception('Error in conditional module: unknown condition "%s"' % xml_attr)
def is_condition_satisfied(self):
self.required_modules = [self.system.get_module(descriptor) for
descriptor in self.descriptor.get_required_module_descriptors()]
xml_value, attr_name = self._get_condition()
if xml_value and self.required_modules:
for module in self.required_modules:
if not hasattr(module, attr_name):
# We don't throw an exception here because it is possible for
# the descriptor of a required module to have a property but
# for the resulting module to be a (flavor of) ErrorModule.
# So just log and return false.
log.warn('Error in conditional module: \
required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name))
return False
attr = getattr(module, attr_name)
if callable(attr):
attr = attr()
if xml_value != str(attr):
break
else:
return True
return False
def get_html(self):
# Calculate html ids of dependencies
self.required_html_ids = [descriptor.location.html_id() for
descriptor in self.descriptor.get_required_module_descriptors()]
return self.system.render_template('conditional_ajax.html', {
'element_id': self.location.html_id(),
'id': self.id,
'ajax_url': self.system.ajax_url,
'depends': ';'.join(self.required_html_ids)
})
def handle_ajax(self, _dispatch, _data):
"""This is called by courseware.moduleodule_render, to handle
an AJAX call.
"""
if not self.is_condition_satisfied():
defmsg = "{link} must be attempted before this will become visible."
message = self.descriptor.xml_attributes.get('message', defmsg)
context = {'module': self,
'message': message}
html = self.system.render_template('conditional_module.html',
context)
return json.dumps({'html': [html], 'message': bool(message)})
html = [child.get_html() for child in self.get_display_items()]
return json.dumps({'html': html})
def get_icon_class(self):
new_class = 'other'
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
child_classes = [self.system.get_module(child_descriptor).get_icon_class()
for child_descriptor in self.descriptor.get_children()]
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class ConditionalDescriptor(ConditionalFields, SequenceDescriptor):
"""Descriptor for conditional xmodule."""
_tag_name = 'conditional'
module_class = ConditionalModule
filename_extension = "xml"
has_score = False
@staticmethod
def parse_sources(xml_element, system, return_descriptor=False):
"""Parse xml_element 'sources' attr and:
if return_descriptor=True - return list of descriptors
if return_descriptor=False - return list of locations
"""
result = []
sources = xml_element.get('sources')
if sources:
locations = [location.strip() for location in sources.split(';')]
for location in locations:
if Location.is_valid(location): # Check valid location url.
try:
if return_descriptor:
descriptor = system.load_item(location)
result.append(descriptor)
else:
result.append(location)
except ItemNotFoundError:
msg = "Invalid module by location."
log.exception(msg)
system.error_tracker(msg)
return result
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescritpor instances upon
which this module depends.
"""
return ConditionalDescriptor.parse_sources(
self.xml_attributes, self.system, True)
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
show_tag_list = []
for child in xml_object:
if child.tag == 'show':
location = ConditionalDescriptor.parse_sources(
child, system)
children.extend(location)
show_tag_list.extend(location)
else:
try:
descriptor = system.process_xml(etree.tostring(child))
module_url = descriptor.location.url()
children.append(module_url)
except:
msg = "Unable to load child when parsing Conditional."
log.exception(msg)
system.error_tracker(msg)
return {'show_tag_list': show_tag_list}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element(self._tag_name)
for child in self.get_children():
location = str(child.location)
if location in self.show_tag_list:
show_str = '<{tag_name} sources="{sources}" />'.format(
tag_name='show', sources=location)
xml_object.append(etree.fromstring(show_str))
else:
xml_object.append(
etree.fromstring(child.export_to_xml(resource_fs)))
return xml_object
|
IITBinterns13/edx-platform-dev
|
common/lib/xmodule/xmodule/conditional_module.py
|
Python
|
agpl-3.0
| 9,200 | 0.001413 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from timeline.models import AlumniInfo
admin.site.register(AlumniInfo)
|
csriharsha/fosswebsite
|
timeline/admin.py
|
Python
|
mit
| 200 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateMuteConfig
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1_generated_SecurityCenter_UpdateMuteConfig_async]
from google.cloud import securitycenter_v1
async def sample_update_mute_config():
# Create a client
client = securitycenter_v1.SecurityCenterAsyncClient()
# Initialize request argument(s)
mute_config = securitycenter_v1.MuteConfig()
mute_config.filter = "filter_value"
request = securitycenter_v1.UpdateMuteConfigRequest(
mute_config=mute_config,
)
# Make the request
response = await client.update_mute_config(request=request)
# Handle the response
print(response)
# [END securitycenter_v1_generated_SecurityCenter_UpdateMuteConfig_async]
|
googleapis/python-securitycenter
|
samples/generated_samples/securitycenter_v1_generated_security_center_update_mute_config_async.py
|
Python
|
apache-2.0
| 1,620 | 0.000617 |
__author__ = 'roy'
import logging
logger = logging.getLogger()
class TestA():
_multiprocess_can_split_ = True
def setup(self):
logger.info("I'm in setup")
def teardown(self):
logger.info("I'm in teardown")
def test1(self):
logger.info("I'm in test 1")
assert 1 == 1
def test2(self):
logger.info("I'm in test 2")
assert 2 == 2
def test3(self):
logger.info("I'm in test 3")
assert 3 == 3
def test4(self):
logger.info("I'm in test 4")
assert 4 == 4
class TestB():
_multiprocess_can_split_ = True
def setup(self):
logger.info("I'm in setup")
def teardown(self):
logger.info("I'm in teardown")
def test1(self):
logger.info("I'm in test 1")
assert 1 == 1
def test2(self):
logger.info("I'm in test 2")
assert 2 == 2
def test3(self):
logger.info("I'm in test 3")
assert 3 == 3
def test4(self):
logger.info("I'm in test 4")
assert 4 == 4
|
taykey/nose-logpertest
|
tests/tests_logpertest.py
|
Python
|
apache-2.0
| 1,068 | 0.000936 |
from django.conf.urls import url
from django.contrib.admin.sites import AdminSite
from functools import update_wrapper
from templatesadmin import views as ta_views
urlpatterns = [
url(r'^$', ta_views.listing, name='templatesadmin-overview'),
url(r'^edit/(?P<path>.*)/$', ta_views.modify, name='templatesadmin-edit'),
]
|
GrandComicsDatabase/django-templatesadmin
|
templatesadmin/urls.py
|
Python
|
bsd-3-clause
| 328 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
import math
class MrpBom(models.Model):
_inherit = 'mrp.bom'
@api.model
def _bom_explode(self, bom, product, factor, properties=None, level=0,
routing_id=False, previous_products=None,
master_bom=None):
routing_id = bom.routing_id.id or routing_id
result, result2 = super(MrpBom, self)._bom_explode(
bom, product, factor, properties=properties, level=level,
routing_id=routing_id, previous_products=previous_products,
master_bom=master_bom)
result2 = self._get_workorder_operations(
result2, factor=factor, level=level, routing_id=routing_id)
return result, result2
def _get_routing_line_from_workorder(self, routing_id, seq, workcenter_id,
wo_name):
""" Returns first routing line from a given data if found
@param routing_id: Routing id
@param seq: workorder sequence
@param workcenter_id: Workcenter id
@return: wo_name = Workorder name
"""
routing_line_obj = self.env['mrp.routing.workcenter']
domain = [('routing_id', '=', routing_id), ('sequence', '=', seq),
('workcenter_id', '=', workcenter_id)]
routing_lines = routing_line_obj.search(domain)
for rl in routing_lines:
if rl.name in wo_name:
return rl
return routing_line_obj
def _get_workorder_operations(self, result2, factor, level=0,
routing_id=False):
for work_order in result2:
if (work_order['sequence'] < level or
work_order.get('routing_wc_line')):
continue
seq = work_order['sequence'] - level
rl = self._get_routing_line_from_workorder(
routing_id, seq, work_order['workcenter_id'],
work_order['name'])
cycle = rl.cycle_nbr and int(math.ceil(factor / rl.cycle_nbr)) or 0
hour = rl.hour_nbr * cycle
default_wc_line = rl.op_wc_lines.filtered(lambda r: r.default)
work_order['cycle'] = cycle
work_order['hour'] = hour
work_order['time_start'] = default_wc_line.time_start or 0.0
work_order['time_stop'] = default_wc_line.time_stop or 0.0
work_order['routing_wc_line'] = rl.id
work_order['do_production'] = rl.do_production
return result2
@api.multi
@api.onchange('routing_id')
def onchange_routing_id(self):
for line in self.bom_line_ids:
line.operation = (self.routing_id.workcenter_lines and
self.routing_id.workcenter_lines[0])
if self.routing_id:
return {'warning': {
'title': _('Changing Routing'),
'message': _("Changing routing will cause to change the"
" operation in which each component will be"
" consumed, by default it is set the first"
" one of the routing")
}}
return {}
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
operation = fields.Many2one(
comodel_name='mrp.routing.workcenter', string='Consumed in')
|
jorsea/odoomrp-wip
|
mrp_operations_extension/models/mrp_bom.py
|
Python
|
agpl-3.0
| 4,254 | 0 |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
IR stores typedefs and they are resolved by the code generator.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call, via the resolve_typedefs() method.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlStringifier
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: mixin for typedef resolution
IdlArgument is 'picklable', as it is stored in interfaces_info.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlType, IdlUnionType, IdlArrayType, IdlSequenceType, IdlNullableType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
STANDARD_TYPEDEFS = {
# http://www.w3.org/TR/WebIDL/#common-DOMTimeStamp
'DOMTimeStamp': 'unsigned long long',
}
################################################################################
# TypedObject (mixin for typedef resolution)
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type = None
def resolve_typedefs(self, typedefs):
"""Resolve typedefs to actual types in the object."""
# Constructors don't have their own return type, because it's the
# interface itself.
if not self.idl_type:
return
# Need to re-assign self.idl_type, not just mutate idl_type,
# since type(idl_type) may change.
self.idl_type = self.idl_type.resolve_typedefs(typedefs)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, idl_name, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.implements = []
self.interfaces = {}
self.idl_name = idl_name
self.typedefs = {}
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(idl_name, child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(idl_name, child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
type_name = child.GetName()
self.typedefs[type_name] = typedef_node_to_type(child)
elif child_class == 'Enum':
enumeration = IdlEnum(idl_name, child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(idl_name, child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
self.implements.append(IdlImplement(child))
elif child_class == 'Dictionary':
dictionary = IdlDictionary(idl_name, child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def resolve_typedefs(self, typedefs):
# Resolve typedefs with the actual types.
# http://www.w3.org/TR/WebIDL/#idl-typedefs
typedefs.update(dict((typedef_name, IdlType(type_name))
for typedef_name, type_name in
STANDARD_TYPEDEFS.iteritems()))
for callback_function in self.callback_functions.itervalues():
callback_function.resolve_typedefs(typedefs)
for interface in self.interfaces.itervalues():
interface.resolve_typedefs(typedefs)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
continue
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node, arguments_node = children
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(idl_name, arguments_node)
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlDictionaryMember(object):
def __init__(self, idl_name, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.idl_name = idl_name
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
# FIXME: remove, just treat enums as a dictionary
def __init__(self, idl_name, node):
self.idl_name = idl_name
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, idl_name, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.stringifier = None
self.iterable = None
self.maplike = None
self.setlike = None
self.original_interface = None
self.partial_interfaces = []
if not node: # Early exit for IdlException.__init__
return
self.is_callback = bool(node.GetProperty('CALLBACK'))
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
self.attributes.append(IdlAttribute(idl_name, child))
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
self.operations.append(IdlOperation(idl_name, child))
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(idl_name, child)
self.process_stringifier()
elif child_class == 'Iterable':
self.iterable = IdlIterable(idl_name, child)
elif child_class == 'Maplike':
self.maplike = IdlMaplike(idl_name, child)
elif child_class == 'Setlike':
self.setlike = IdlSetlike(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
if len(filter(None, [self.iterable, self.maplike, self.setlike])) > 1:
raise ValueError('Interface can only have one of iterable<>, maplike<> and setlike<>.')
def resolve_typedefs(self, typedefs):
for attribute in self.attributes:
attribute.resolve_typedefs(typedefs)
for constant in self.constants:
constant.resolve_typedefs(typedefs)
for constructor in self.constructors:
constructor.resolve_typedefs(typedefs)
for custom_constructor in self.custom_constructors:
custom_constructor.resolve_typedefs(typedefs)
for operation in self.operations:
operation.resolve_typedefs(typedefs)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, idl_name, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self, idl_name)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(idl_name, child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, idl_name, node):
self.is_read_only = bool(node.GetProperty('READONLY'))
self.is_static = bool(node.GetProperty('STATIC'))
self.idl_name = idl_name
self.name = node.GetName()
# Defaults, overridden below
self.idl_type = None
self.extended_attributes = {}
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.idl_name = idl_name
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
else:
self.extended_attributes = {}
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
return 'String("%s")' % self.value
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'nullptr'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type == 'boolean':
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, idl_name, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
if not node:
self.is_static = False
return
self.idl_name = idl_name
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = bool(node.GetProperty('STATIC'))
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
self.idl_type = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(idl_name, child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, idl_name, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls(idl_name)
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, idl_name, arguments_node):
constructor = cls(idl_name)
constructor.name = name
constructor.arguments = arguments_node_to_arguments(idl_name, arguments_node)
constructor.is_constructor = True
return constructor
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = node.GetProperty('OPTIONAL') # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.idl_name = idl_name
self.name = node.GetName()
self.default_value = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = bool(child.GetProperty('ELLIPSIS'))
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def __getstate__(self):
# FIXME: Return a picklable object which has enough information to
# unpickle.
return {}
def __setstate__(self, state):
pass
def arguments_node_to_arguments(idl_name, node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(idl_name, argument_node)
for argument_node in node.GetChildren()]
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, idl_name, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(idl_name, child)
elif child_class == 'Operation':
operation = IdlOperation(idl_name, child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Iterable, Maplike, Setlike
################################################################################
class IdlIterable(object):
def __init__(self, idl_name, node):
children = node.GetChildren()
# FIXME: Support extended attributes.
if len(children) == 1:
self.key_type = None
self.value_type = type_node_to_type(children[0])
elif len(children) == 2:
self.key_type = type_node_to_type(children[0])
self.value_type = type_node_to_type(children[1])
else:
raise ValueError('Unexpected number of children: %d' % len(children))
class IdlMaplike(object):
def __init__(self, idl_name, node):
self.is_read_only = bool(node.GetProperty('READONLY'))
children = node.GetChildren()
# FIXME: Support extended attributes.
if len(children) == 2:
self.key_type = type_node_to_type(children[0])
self.value_type = type_node_to_type(children[1])
else:
raise ValueError('Unexpected number of children: %d' % len(children))
class IdlSetlike(object):
def __init__(self, idl_name, node):
self.is_read_only = bool(node.GetProperty('READONLY'))
children = node.GetChildren()
# FIXME: Support extended attributes.
if len(children) == 1:
self.value_type = type_node_to_type(children[0])
else:
raise ValueError('Unexpected number of children: %d' % len(children))
################################################################################
# Implement statements
################################################################################
class IdlImplement(object):
def __init__(self, node):
self.left_interface = node.GetName()
self.right_interface = node.GetProperty('REFERENCE')
################################################################################
# Extended attributes
################################################################################
class Exposure:
"""An Exposure holds one Exposed or RuntimeEnabled condition.
Each exposure has two properties: exposed and runtime_enabled.
Exposure(e, r) corresponds to [Exposed(e r)]. Exposure(e) corresponds to
[Exposed=e].
"""
def __init__(self, exposed, runtime_enabled=None):
self.exposed = exposed
self.runtime_enabled = runtime_enabled
def ext_attributes_node_to_extended_attributes(idl_name, node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = arguments_node_to_arguments(idl_name, child)
elif name == 'Exposed':
if child_class and child_class != 'Arguments':
raise ValueError('[Exposed] only supports Arguments as child, but has child of class: %s' % child_class)
exposures = []
if child_class == 'Arguments':
exposures = [Exposure(exposed=str(arg.idl_type),
runtime_enabled=arg.name)
for arg in arguments_node_to_arguments('*', child)]
else:
value = extended_attribute_node.GetProperty('VALUE')
if type(value) is str:
exposures = [Exposure(exposed=value)]
else:
exposures = [Exposure(exposed=v) for v in value]
extended_attributes[name] = exposures
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(idl_name, extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', idl_name, arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', idl_name, arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', idl_name, arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = bool(node.GetProperty('UNRESTRICTED'))
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class == 'Sequence':
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
elif node_class == 'Promise':
return IdlType('Promise')
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Sequence node expects exactly 1 child, got %s' % len(children))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
sequence_type = IdlSequenceType(element_type)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/WebKit/Source/bindings/scripts/idl_definitions.py
|
Python
|
bsd-3-clause
| 38,904 | 0.001954 |
'''Package for Banded Min Hash based Similarity Calculations'''
from min_hash import *
|
ClickSecurity/data_hacking
|
data_hacking/min_hash/__init__.py
|
Python
|
mit
| 87 | 0 |
import os
import sys
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
if sys.version_info[0] == 2:
bytes = str
pathjoin = os.path.join
pathexists = os.path.exists
expanduser = os.path.expanduser
abspath = os.path.abspath
dirname = os.path.dirname
def pickle(value):
return _pickle.dumps(value, protocol=_pickle.HIGHEST_PROTOCOL)
def unpickle(encoded_value):
return _pickle.loads(bytes(encoded_value))
def import_module(path):
__import__(path)
return sys.modules[path]
def import_object(name):
"""Imports an object by name.
import_object('x.y.z') is equivalent to 'from x.y import z'.
"""
parts = name.split('.')
m = '.'.join(parts[:-1])
attr = parts[-1]
obj = __import__(m, None, None, [attr], 0)
try:
return getattr(obj, attr)
except AttributeError as e:
raise ImportError("'%s' does not exist in module '%s'" % (attr, m))
|
gmflanagan/waterboy
|
waterboy/utils.py
|
Python
|
bsd-3-clause
| 941 | 0.005313 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from typing import NamedTuple
from copy import deepcopy
from superdesk.resource import Resource, not_analyzed, not_indexed, not_enabled, text_with_keyword, not_dynamic
from .packages import LINKED_IN_PACKAGES, PACKAGE
from eve.utils import config
from superdesk.utils import SuperdeskBaseEnum
GUID_TAG = "tag"
GUID_FIELD = "guid"
GUID_NEWSML = "newsml"
INGEST_ID = "ingest_id"
INGEST_VERSION = "ingest_version"
FAMILY_ID = "family_id"
ASSOCIATIONS = "associations"
#: item public states
class PubStatuses(NamedTuple):
USABLE: str
HOLD: str
CANCELED: str
PUB_STATUS: PubStatuses = PubStatuses("usable", "withheld", "canceled")
class ContentTypes(NamedTuple):
TEXT: str
PREFORMATTED: str
AUDIO: str
VIDEO: str
PICTURE: str
GRAPHIC: str
COMPOSITE: str
EVENT: str
CONTENT_TYPE: ContentTypes = ContentTypes(
"text", "preformatted", "audio", "video", "picture", "graphic", "composite", "event"
)
MEDIA_TYPES = ("audio", "video", "picture", "graphic")
ITEM_TYPE = "type"
ITEM_STATE = "state"
ITEM_PRIORITY = "priority"
ITEM_URGENCY = "urgency"
#: item internal states
class ContentStates(NamedTuple):
DRAFT: str
INGESTED: str
ROUTED: str
FETCHED: str
SUBMITTED: str
PROGRESS: str
SPIKED: str
PUBLISHED: str
KILLED: str
CORRECTED: str
SCHEDULED: str
RECALLED: str
UNPUBLISHED: str
CORRECTION: str
BEING_CORRECTED: str
CONTENT_STATE: ContentStates = ContentStates(
"draft",
"ingested",
"routed",
"fetched",
"submitted",
"in_progress",
"spiked",
"published",
"killed",
"corrected",
"scheduled",
"recalled",
"unpublished",
"correction",
"being_corrected",
)
PUBLISH_STATES = {
CONTENT_STATE.PUBLISHED,
CONTENT_STATE.SCHEDULED,
CONTENT_STATE.CORRECTED,
CONTENT_STATE.KILLED,
CONTENT_STATE.RECALLED,
CONTENT_STATE.UNPUBLISHED,
CONTENT_STATE.BEING_CORRECTED,
}
class Formats(NamedTuple):
HTML: str
PRESERVED: str
FORMAT = "format"
FORMATS: Formats = Formats("HTML", "preserved")
BYLINE = "byline"
SIGN_OFF = "sign_off"
EMBARGO = "embargo"
PUBLISH_SCHEDULE = "publish_schedule"
SCHEDULE_SETTINGS = "schedule_settings"
PROCESSED_FROM = "processed_from"
# part the task dict
LAST_DESK = "last_desk"
LAST_AUTHORING_DESK = "last_authoring_desk"
LAST_PRODUCTION_DESK = "last_production_desk"
DESK_HISTORY = "desk_history"
ITEM_EVENT_ID = "event_id"
geopoint = {
"type": "dict",
"mapping": {"type": "geo_point"},
"nullable": True,
"schema": {
"lat": {"type": "float"},
"lon": {"type": "float"},
},
}
entity_metadata = {
"type": "list",
"nullable": True,
"mapping": {
"type": "object",
"dynamic": False,
"properties": {
"name": text_with_keyword,
"qcode": not_analyzed,
"scheme": not_analyzed,
"source": not_analyzed,
},
},
}
metadata_schema = {
config.ID_FIELD: {"type": "string", "unique": True},
#: Identifiers
"guid": {"type": "string", "unique": True, "mapping": not_analyzed},
"uri": {
"type": "string",
"mapping": not_analyzed,
},
"unique_id": {
"type": "integer",
"unique": True,
},
"unique_name": {"type": "string", "unique": True, "mapping": not_analyzed},
"version": {"type": "integer"},
"ingest_id": {"type": "string", "mapping": not_analyzed},
"ingest_version": {"type": "string", "mapping": not_analyzed},
"family_id": {"type": "string", "mapping": not_analyzed},
"related_to": { # this field keeps a reference to the related item from which metadata has been copied
"type": "string",
"mapping": not_analyzed,
},
# Audit Information
"original_creator": Resource.rel("users"),
"version_creator": Resource.rel("users"),
"firstcreated": {"type": "datetime"},
"versioncreated": {"type": "datetime"},
"firstpublished": {
"type": "datetime",
"required": False,
"nullable": True,
},
# Ingest Details
"ingest_provider": Resource.rel("ingest_providers"),
"source": {"type": "string", "mapping": not_analyzed}, # The value is copied from the ingest_providers vocabulary
"original_source": {"type": "string", "mapping": not_analyzed}, # This value is extracted from the ingest
"ingest_provider_sequence": {"type": "string", "mapping": not_analyzed},
# Copyright Information
"usageterms": {
"type": "string",
"nullable": True,
},
"copyrightnotice": {"type": "string", "nullable": True, "mapping": not_indexed},
"copyrightholder": {"type": "string", "nullable": True},
# Category Details
"anpa_category": {
"type": "list",
"nullable": True,
"mapping": {
"type": "object",
"properties": {
"qcode": not_analyzed,
"name": not_analyzed,
"scheme": not_analyzed,
},
},
},
"subject": {
"type": "list",
"mapping": {"type": "object", "dynamic": False, "properties": {"qcode": not_analyzed, "name": not_analyzed}},
},
"genre": {
"type": "list",
"nullable": True,
"mapping": {"type": "object", "properties": {"name": not_analyzed, "qcode": not_analyzed}},
},
"company_codes": {
"type": "list",
"mapping": {
"type": "object",
"properties": {"qcode": not_analyzed, "name": not_analyzed, "security_exchange": not_analyzed},
},
},
# Item Metadata
ITEM_TYPE: {
"type": "string",
"allowed": tuple(CONTENT_TYPE),
"default": "text",
"mapping": not_analyzed,
},
"package_type": {"type": "string", "allowed": ["takes"]}, # deprecated
"language": {
"type": "string",
"mapping": not_analyzed,
"nullable": True,
},
"abstract": {
"type": "string",
"nullable": True,
},
"headline": {
"type": "string",
"mapping": {
"type": "string",
"analyzer": "html_field_analyzer",
"search_analyzer": "html_field_analyzer",
},
},
"slugline": {
"type": "string",
"mapping": {
"type": "string",
"fielddata": True,
"fields": {
"phrase": {
"type": "string",
"analyzer": "phrase_prefix_analyzer",
"search_analyzer": "phrase_prefix_analyzer",
"fielddata": True,
},
"keyword": {
"type": "keyword",
},
},
},
},
"anpa_take_key": {
"type": "string",
"nullable": True,
},
"correction_sequence": {"type": "integer", "nullable": True, "mapping": not_analyzed},
"rewrite_sequence": {"type": "integer", "nullable": True, "mapping": not_analyzed},
"rewrite_of": {
"type": "string",
"nullable": True,
"mapping": not_analyzed,
},
"rewritten_by": {
"type": "string",
"nullable": True,
"mapping": not_analyzed,
},
"sequence": {
"type": "integer",
"nullable": True,
},
"keywords": {"type": "list", "mapping": {"type": "string"}},
"word_count": {"type": "integer"},
"priority": {"type": "integer", "nullable": True},
"urgency": {"type": "integer", "nullable": True},
"profile": {
"type": "string",
"nullable": True,
"mapping": not_analyzed,
},
# Related to state of an article
ITEM_STATE: {
"type": "string",
"allowed": tuple(CONTENT_STATE),
"mapping": not_analyzed,
},
# The previous state the item was in before for example being spiked, when un-spiked it will revert to this state
"revert_state": {
"type": "string",
"allowed": tuple(CONTENT_STATE),
"mapping": not_analyzed,
},
"pubstatus": {
"type": "string",
"allowed": tuple(PUB_STATUS),
"default": PUB_STATUS.USABLE,
"mapping": not_analyzed,
"nullable": True,
},
"signal": {
"type": "list",
"mapping": {
"type": "object",
"properties": {"qcode": not_analyzed, "name": not_analyzed, "scheme": not_analyzed},
},
},
BYLINE: {
"type": "string",
"nullable": True,
},
"ednote": {
"type": "string",
"nullable": True,
},
"authors": {
"type": "list",
"nullable": True,
"mapping": {
"type": "object",
"dynamic": False,
"properties": {
"uri": not_analyzed,
"parent": not_analyzed,
"name": not_analyzed,
"role": not_analyzed,
"jobtitle": not_enabled,
},
},
},
"description_text": {"type": "string", "nullable": True},
# This is a description of the item as recieved from its source.
"archive_description": {"type": "string", "nullable": True},
"groups": {
"type": "list",
"minlength": 1,
"nullable": True,
"mapping": {
"dynamic": False,
"properties": {
"id": not_analyzed,
"refs": {
"dynamic": False,
"properties": {
"idRef": not_analyzed,
"_id": not_analyzed,
"uri": not_analyzed,
"guid": not_analyzed,
"type": not_analyzed,
"location": not_analyzed,
"headline": {"type": "string"},
"slugline": {"type": "string"},
},
},
},
},
},
"deleted_groups": {
"type": "list",
"minlength": 1,
"nullable": True,
},
"body_html": {
"type": "string",
"nullable": True,
"mapping": {"type": "string", "analyzer": "html_field_analyzer", "search_analyzer": "html_field_analyzer"},
},
"body_text": {
"type": "string",
"nullable": True,
},
"dateline": {
"type": "dict",
"nullable": True,
"schema": {
"located": {
"type": "dict",
"nullable": True,
"schema": {
"state_code": {"type": "string"},
"city": {"type": "string"},
"tz": {"type": "string"},
"country_code": {"type": "string"},
"dateline": {"type": "string"},
"alt_name": {"type": "string"},
"state": {"type": "string"},
"city_code": {"type": "string"},
"country": {"type": "string"},
"code": {"type": "string"},
"scheme": {"type": "string"},
"location": geopoint,
"place": {
"type": "dict",
"nullable": True,
"mapping": not_enabled,
"schema": {
"code": {"type": "string"},
"name": {"type": "string"},
"qcode": {"type": "string"},
"scheme": {"type": "string"},
"feature_class": {"type": "string"},
"location": geopoint,
"continent_code": {"type": "string", "nullable": True},
"region": {"type": "string", "nullable": True},
"region_code": {"type": "string", "nullable": True},
"locality": {"type": "string", "nullable": True},
"state": {"type": "string", "nullable": True},
"country": {"type": "string", "nullable": True},
"world_region": {"type": "string", "nullable": True},
"locality_code": {"type": "string", "nullable": True},
"state_code": {"type": "string", "nullable": True},
"country_code": {"type": "string", "nullable": True},
"world_region_code": {"type": "string", "nullable": True},
"rel": {"type": "string", "nullable": True},
"tz": {"type": "string", "nullable": True},
},
},
},
},
"date": {"type": "datetime", "nullable": True},
"source": {"type": "string"},
"text": {"type": "string", "nullable": True},
},
},
"expiry": {"type": "datetime"},
# Media Related
"media": {"type": "file"},
"mimetype": {"type": "string", "mapping": not_analyzed},
"poi": {
"type": "dict",
"schema": {"x": {"type": "float", "nullable": False}, "y": {"type": "float", "nullable": False}},
},
"renditions": {
"type": "dict",
"schema": {},
"allow_unknown": True,
"mapping": not_enabled,
},
"filemeta": {
"type": "dict",
"schema": {},
"allow_unknown": True,
"mapping": not_enabled,
},
"filemeta_json": {"type": "string"},
"media_file": {"type": "string"},
"contents": {"type": "list"},
ASSOCIATIONS: {
"type": "dict",
"allow_unknown": True,
"schema": {},
"mapping": {
"type": "object",
"dynamic": False,
"properties": {
"featuremedia": { # keep indexing featuremedia - we do some filtering using it
"type": "object",
"dynamic": False,
"properties": {
"_id": not_analyzed,
"guid": not_analyzed,
"unique_id": {"type": "integer"},
},
}
},
},
},
# track references to other objects,
# based on associations but allows queries
"refs": {
"type": "list",
"readonly": True,
"schema": {
"_id": {"type": "string"},
"key": {"type": "string"},
"uri": {"type": "string"},
"guid": {"type": "string"},
"type": {"type": "string"},
"source": {"type": "string", "nullable": True},
},
"mapping": {
"type": "object",
"properties": {
"_id": not_analyzed,
"key": not_analyzed,
"uri": not_analyzed,
"guid": not_analyzed,
"type": not_analyzed,
"source": not_analyzed,
},
},
},
"alt_text": {"type": "string", "nullable": True},
# aka Locator as per NewML Specification
"place": {
"type": "list",
"nullable": True,
"mapping": {
"type": "object",
"dynamic": False,
"properties": {
"scheme": not_analyzed,
"qcode": not_analyzed,
"code": not_analyzed, # content api
"name": not_analyzed,
"locality": not_analyzed, # can be used for city/town/village etc.
"state": not_analyzed,
"country": not_analyzed,
"world_region": not_analyzed,
"locality_code": not_analyzed,
"state_code": not_analyzed,
"country_code": not_analyzed,
"world_region_code": not_analyzed,
"feature_class": not_analyzed,
"location": {"type": "geo_point"},
"rel": not_analyzed,
},
},
},
"event": deepcopy(entity_metadata),
"person": deepcopy(entity_metadata),
"object": deepcopy(entity_metadata),
"organisation": deepcopy(entity_metadata),
# Not Categorized
"creditline": {"type": "string"},
LINKED_IN_PACKAGES: {
"type": "list",
"readonly": True,
"schema": {
"type": "dict",
"schema": {PACKAGE: Resource.rel("archive"), "package_type": {"type": "string"}}, # deprecated
},
},
"highlight": Resource.rel("highlights"),
"highlights": {"type": "list", "schema": Resource.rel("highlights", True)},
"marked_desks": {
"type": "list",
"nullable": True,
"schema": {
"type": "dict",
"schema": {
"desk_id": Resource.rel("desks", True),
"date_marked": {"type": "datetime", "nullable": True},
"user_marked": Resource.rel("users", required=False, nullable=True),
"date_acknowledged": {"type": "datetime", "nullable": True},
"user_acknowledged": Resource.rel("users", required=False, nullable=True),
},
},
},
"more_coming": {"type": "boolean"}, # deprecated
# Field which contains all the sign-offs done on this article, eg. twd/jwt/ets
SIGN_OFF: {
"type": "string",
"nullable": True,
},
# Desk and Stage Details
"task": {
"type": "dict",
"schema": {
"user": {"type": "string", "mapping": not_analyzed, "nullable": True},
"desk": {"type": "string", "mapping": not_analyzed, "nullable": True},
"desk_history": {"type": "list", "mapping": not_analyzed},
"last_desk": {"type": "string", "mapping": not_analyzed},
"stage": {"type": "string", "mapping": not_analyzed, "nullable": True},
"status": {"type": "string", "mapping": not_analyzed},
},
},
# Task and Lock Details
"task_id": {"type": "string", "mapping": not_analyzed, "versioned": False},
"lock_user": Resource.rel("users"),
"lock_time": {"type": "datetime", "versioned": False},
"lock_session": Resource.rel("auth"),
# Action when the story is locked: edit, correct, kill
"lock_action": {"type": "string", "mapping": not_analyzed, "nullable": True},
# template used to create an item
"template": Resource.rel("content_templates"),
"body_footer": { # Public Service Announcements
"type": "string",
"nullable": True,
"mapping": not_indexed,
},
"flags": {
"type": "dict",
"schema": {
"marked_for_not_publication": {"type": "boolean", "default": False},
"marked_for_legal": {"type": "boolean", "default": False},
"marked_archived_only": {"type": "boolean", "default": False},
"marked_for_sms": {"type": "boolean", "default": False},
},
"default": {
"marked_for_not_publication": False,
"marked_for_legal": False,
"marked_archived_only": False,
"marked_for_sms": False,
},
},
"sms_message": {"type": "string", "mapping": not_analyzed, "nullable": True},
FORMAT: {"type": "string", "mapping": not_analyzed, "default": FORMATS.HTML},
# True indicates that the item has been or is to be published as a result of a routing rule
"auto_publish": {"type": "boolean"},
# draft-js internal data
"fields_meta": {
"type": "dict",
"schema": {},
"allow_unknown": True,
"nullable": True,
"mapping": not_enabled,
},
"annotations": {
"type": "list",
"mapping": not_enabled,
"schema": {
"type": "dict",
"schema": {
"id": {"type": "integer"},
"type": {"type": "string"},
"body": {"type": "string"},
},
},
},
"extra": {
"type": "dict",
"schema": {},
"mapping": not_dynamic,
"allow_unknown": True,
},
"attachments": {
"type": "list",
"nullable": True,
"schema": {
"type": "dict",
"schema": {
"attachment": Resource.rel("attachments", nullable=False),
},
},
},
# references assignment related to the coverage
"assignment_id": {"type": "string", "mapping": not_analyzed},
"translated_from": {
"type": "string",
"mapping": not_analyzed,
},
"translation_id": {
"type": "string",
"mapping": not_analyzed,
},
"translations": {
"type": "list",
"mapping": not_analyzed,
},
# references item id for items auto published using internal destinations
PROCESSED_FROM: {"type": "string", "mapping": not_analyzed},
# ingested embargoed info, not using embargo to avoid validation
"embargoed": {"type": "datetime"},
"embargoed_text": {"type": "string", "mapping": not_indexed},
"marked_for_user": Resource.rel("users", required=False, nullable=True),
"marked_for_sign_off": {"type": "string", "nullable": True},
"broadcast": {
"type": "dict",
"schema": {
"status": {"type": "string", "mapping": not_analyzed},
"master_id": {"type": "string", "mapping": not_analyzed},
"rewrite_id": {"type": "string", "mapping": not_analyzed},
},
},
ITEM_EVENT_ID: {"type": "string", "mapping": not_analyzed},
# schedules
EMBARGO: {"type": "datetime", "nullable": True},
PUBLISH_SCHEDULE: {"type": "datetime", "nullable": True},
SCHEDULE_SETTINGS: {
"type": "dict",
"schema": {
"time_zone": {"type": "string", "nullable": True, "mapping": not_analyzed},
"utc_embargo": {"type": "datetime", "nullable": True},
"utc_publish_schedule": {"type": "datetime", "nullable": True},
},
},
# usage tracking
"used": {"type": "boolean"},
"used_count": {"type": "integer"},
"used_updated": {"type": "datetime"},
"metrics": {
"type": "dict",
"readonly": True,
"allow_unknown": True,
},
# system fields
"_type": {"type": "string", "mapping": None},
"operation": {"type": "string"},
"es_highlight": {"type": "dict", "allow_unknown": True, "readonly": True},
# targeting fields
"target_regions": {
"type": "list",
"nullable": True,
"schema": {
"type": "dict",
"schema": {"qcode": {"type": "string"}, "name": {"type": "string"}, "allow": {"type": "boolean"}},
},
},
"target_types": {
"type": "list",
"nullable": True,
"schema": {
"type": "dict",
"schema": {"qcode": {"type": "string"}, "name": {"type": "string"}, "allow": {"type": "boolean"}},
},
},
"target_subscribers": {"type": "list", "nullable": True},
}
metadata_schema["lock_user"]["versioned"] = False
metadata_schema["lock_session"]["versioned"] = False
crop_schema = {
"CropLeft": {"type": "integer"},
"CropRight": {"type": "integer"},
"CropTop": {"type": "integer"},
"CropBottom": {"type": "integer"},
}
def remove_metadata_for_publish(item):
"""Remove metadata from item that should not be public.
:param item: Item containing the metadata
:return: item
"""
from superdesk.attachments import is_attachment_public
if len(item.get("attachments", [])) > 0:
item["attachments"] = [attachment for attachment in item["attachments"] if is_attachment_public(attachment)]
return item
class Priority(SuperdeskBaseEnum):
"""Priority values."""
Flash = 1
Urgent = 2
Three_Paragraph = 3
Screen_Finance = 4
Continuous_News = 5
Ordinary = 6
def get_schema(versioning=False):
schema = metadata_schema.copy()
if versioning:
schema.update(
{
"_id_document": {"type": "string"},
"_current_version": {"type": "integer"},
}
)
return schema
|
petrjasek/superdesk-core
|
superdesk/metadata/item.py
|
Python
|
agpl-3.0
| 24,541 | 0.001589 |
#!/usr/bin/env python
# coding: utf-8
import unittest
import sys
import os
PROJECT_PATH = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
ROOT_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(os.path.join(PROJECT_PATH, 'src'))
tests = unittest.TestLoader().discover(ROOT_PATH, "*.py")
result = unittest.TextTestRunner().run(tests)
if not result.wasSuccessful():
sys.exit(1)
|
renzon/blob_app
|
test/testloader.py
|
Python
|
mit
| 630 | 0.001587 |
from .plotter import *
|
brain-research/mirage-rl-qprop
|
rllab/plotter/__init__.py
|
Python
|
mit
| 23 | 0 |
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import fixture as fixture_config
import oslo_messaging
from ceilometer.api import hooks
from ceilometer.tests import base
class TestTestNotifierHook(base.BaseTestCase):
def setUp(self):
super(TestTestNotifierHook, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_init_notifier_with_drivers(self):
self.CONF.set_override('telemetry_driver', 'messagingv2',
group='publisher_notifier')
hook = hooks.NotifierHook(self.CONF)
notifier = hook.notifier
self.assertIsInstance(notifier, oslo_messaging.Notifier)
self.assertEqual(['messagingv2'], notifier._driver_names)
|
ityaptin/ceilometer
|
ceilometer/tests/unit/api/test_hooks.py
|
Python
|
apache-2.0
| 1,354 | 0 |
""" Views for a student's profile information. """
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404
from django.views.decorators.http import require_http_methods
from django_countries import countries
from badges.utils import badges_enabled
from edxmako.shortcuts import marketing_link, render_to_response
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.accounts.api import get_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotAuthorized, UserNotFound
from openedx.core.djangoapps.user_api.preferences.api import get_user_preferences
from student.models import User
@login_required
@require_http_methods(['GET'])
def learner_profile(request, username):
"""Render the profile page for the specified username.
Args:
request (HttpRequest)
username (str): username of user whose profile is requested.
Returns:
HttpResponse: 200 if the page was sent successfully
HttpResponse: 302 if not logged in (redirect to login page)
HttpResponse: 405 if using an unsupported HTTP method
Raises:
Http404: 404 if the specified user is not authorized or does not exist
Example usage:
GET /account/profile
"""
try:
return render_to_response(
'student_profile/learner_profile.html',
learner_profile_context(request, username, request.user.is_staff)
)
except (UserNotAuthorized, UserNotFound, ObjectDoesNotExist):
raise Http404
def learner_profile_context(request, profile_username, user_is_staff):
"""Context for the learner profile page.
Args:
logged_in_user (object): Logged In user.
profile_username (str): username of user whose profile is requested.
user_is_staff (bool): Logged In user has staff access.
build_absolute_uri_func ():
Returns:
dict
Raises:
ObjectDoesNotExist: the specified profile_username does not exist.
"""
profile_user = User.objects.get(username=profile_username)
logged_in_user = request.user
own_profile = (logged_in_user.username == profile_username)
account_settings_data = get_account_settings(request, [profile_username])[0]
preferences_data = get_user_preferences(profile_user, profile_username)
context = {
'data': {
'profile_user_id': profile_user.id,
'default_public_account_fields': settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'],
'default_visibility': settings.ACCOUNT_VISIBILITY_CONFIGURATION['default_visibility'],
'accounts_api_url': reverse("accounts_api", kwargs={'username': profile_username}),
'preferences_api_url': reverse('preferences_api', kwargs={'username': profile_username}),
'preferences_data': preferences_data,
'account_settings_data': account_settings_data,
'profile_image_upload_url': reverse('profile_image_upload', kwargs={'username': profile_username}),
'profile_image_remove_url': reverse('profile_image_remove', kwargs={'username': profile_username}),
'profile_image_max_bytes': settings.PROFILE_IMAGE_MAX_BYTES,
'profile_image_min_bytes': settings.PROFILE_IMAGE_MIN_BYTES,
'account_settings_page_url': reverse('account_settings'),
'has_preferences_access': (logged_in_user.username == profile_username or user_is_staff),
'own_profile': own_profile,
'country_options': list(countries),
'find_courses_url': marketing_link('COURSES'),
'language_options': settings.ALL_LANGUAGES,
'badges_logo': staticfiles_storage.url('certificates/images/backpack-logo.png'),
'badges_icon': staticfiles_storage.url('certificates/images/ico-mozillaopenbadges.png'),
'backpack_ui_img': staticfiles_storage.url('certificates/images/backpack-ui.png'),
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
},
'disable_courseware_js': True,
}
if badges_enabled():
context['data']['badges_api_url'] = reverse("badges_api:user_assertions", kwargs={'username': profile_username})
return context
|
miptliot/edx-platform
|
lms/djangoapps/student_profile/views.py
|
Python
|
agpl-3.0
| 4,548 | 0.003518 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt, cstr
from webnotes import msgprint
from webnotes.model.controller import DocListController
status_map = {
"Contact": [
["Replied", "communication_sent"],
["Open", "communication_received"]
],
"Job Applicant": [
["Replied", "communication_sent"],
["Open", "communication_received"]
],
"Lead": [
["Replied", "communication_sent"],
["Converted", "has_customer"],
["Opportunity", "has_opportunity"],
["Open", "communication_received"],
],
"Opportunity": [
["Draft", None],
["Submitted", "eval:self.doc.docstatus==1"],
["Lost", "eval:self.doc.status=='Lost'"],
["Quotation", "has_quotation"],
["Replied", "communication_sent"],
["Cancelled", "eval:self.doc.docstatus==2"],
["Open", "communication_received"],
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.doc.docstatus==1"],
["Lost", "eval:self.doc.status=='Lost'"],
["Ordered", "has_sales_order"],
["Replied", "communication_sent"],
["Cancelled", "eval:self.doc.docstatus==2"],
["Open", "communication_received"],
],
"Sales Order": [
["Draft", None],
["Submitted", "eval:self.doc.docstatus==1"],
["Stopped", "eval:self.doc.status=='Stopped'"],
["Cancelled", "eval:self.doc.docstatus==2"],
],
"Support Ticket": [
["Replied", "communication_sent"],
["Open", "communication_received"]
],
}
class StatusUpdater(DocListController):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False):
if self.doc.get("__islocal"):
return
if self.doc.doctype in status_map:
sl = status_map[self.doc.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.doc.status = s[0]
break
elif s[1].startswith("eval:"):
if eval(s[1][5:]):
self.doc.status = s[0]
break
elif getattr(self, s[1])():
self.doc.status = s[0]
break
if update:
webnotes.conn.set_value(self.doc.doctype, self.doc.name, "status", self.doc.status)
def on_communication(self):
self.communication_set = True
self.set_status(update=True)
del self.communication_set
def communication_received(self):
if getattr(self, "communication_set", False):
last_comm = self.doclist.get({"doctype":"Communication"})
if last_comm:
return last_comm[-1].sent_or_received == "Received"
def communication_sent(self):
if getattr(self, "communication_set", False):
last_comm = self.doclist.get({"doctype":"Communication"})
if last_comm:
return last_comm[-1].sent_or_received == "Sent"
def validate_qty(self):
"""
Validates qty at row level
"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
# get unique transactions to update
for d in self.doclist:
if d.doctype == args['source_dt'] and d.fields.get(args["join_field"]):
args['name'] = d.fields[args['join_field']]
# get all qty where qty > target_field
item = webnotes.conn.sql("""select item_code, `%(target_ref_field)s`,
`%(target_field)s`, parenttype, parent from `tab%(target_dt)s`
where `%(target_ref_field)s` < `%(target_field)s`
and name="%(name)s" and docstatus=1""" % args, as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
if not item[args['target_ref_field']]:
msgprint("""As %(target_ref_field)s for item: %(item_code)s in \
%(parenttype)s: %(parent)s is zero, system will not check \
over-delivery or over-billed""" % item)
elif args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - \
item[args['target_ref_field']]
if item['reduce_by'] > .01:
msgprint("""
Row #%(idx)s: Max %(target_ref_field)s allowed for <b>Item \
%(item_code)s</b> against <b>%(parenttype)s %(parent)s</b> \
is <b>""" % item + cstr(item[args['target_ref_field']]) +
"""</b>.<br>You must reduce the %(target_ref_field)s by \
%(reduce_by)s""" % item, raise_exception=1)
else:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
msgprint("""
Row #%(idx)s: Max %(target_ref_field)s allowed for <b>Item %(item_code)s</b> \
against <b>%(parenttype)s %(parent)s</b> is <b>%(max_allowed)s</b>.
If you want to increase your overflow tolerance, please increase tolerance %% in \
Global Defaults or Item master.
Or, you must reduce the %(target_ref_field)s by %(reduce_by)s
Also, please check if the order item has already been billed in the Sales Order""" %
item, raise_exception=1)
def update_qty(self, change_modified=True):
"""
Updates qty at row level
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.doc.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.doc.name
else:
args['cond'] = ' and parent!="%s"' % self.doc.name
args['modified_cond'] = ''
if change_modified:
args['modified_cond'] = ', modified = now()'
# update quantities in child table
for d in self.doclist:
if d.doctype == args['source_dt']:
# updates qty in the child table
args['detail_id'] = d.fields.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
args['second_source_condition'] = """ + (select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (docstatus=1))""" % args
if args['detail_id']:
webnotes.conn.sql("""update `tab%(target_dt)s`
set %(target_field)s = (select sum(%(source_field)s)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s)) %(second_source_condition)s
where name='%(detail_id)s'""" % args)
# get unique transactions to update
for name in set([d.fields.get(args['percent_join_field']) for d in self.doclist
if d.doctype == args['source_dt']]):
if name:
args['name'] = name
# update percent complete in the parent table
webnotes.conn.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = (select sum(if(%(target_ref_field)s >
ifnull(%(target_field)s, 0), %(target_field)s,
%(target_ref_field)s))/sum(%(target_ref_field)s)*100
from `tab%(target_dt)s` where parent="%(name)s") %(modified_cond)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
webnotes.conn.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(ifnull(%(target_parent_field)s,0)<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(webnotes.conn.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(webnotes.conn.get_value('Global Defaults', None,
'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
|
saurabh6790/med_app_rels
|
controllers/status_updater.py
|
Python
|
agpl-3.0
| 8,752 | 0.035078 |
"""renaming Jan COPE to Feb
Revision ID: 942d61446bfa
Revises: 99fb6b79b5f7
Create Date: 2021-01-18 13:37:50.121134
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '942d61446bfa'
down_revision = '99fb6b79b5f7'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_feb', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_feb_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_feb_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.drop_column('participant_summary', 'questionnaire_on_cope_jan_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_jan')
op.drop_column('participant_summary', 'questionnaire_on_cope_jan_authored')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan_authored', mysql.DATETIME(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan', mysql.SMALLINT(display_width=6), autoincrement=False, nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan_time', mysql.DATETIME(), nullable=True))
op.drop_column('participant_summary', 'questionnaire_on_cope_feb_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_feb_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_feb')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
all-of-us/raw-data-repository
|
rdr_service/alembic/versions/942d61446bfa_renaming_jan_cope_to_feb.py
|
Python
|
bsd-3-clause
| 3,082 | 0.004867 |
"""Auto-generated file, do not edit by hand. FO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_FO = PhoneMetadata(id='FO', country_code=298, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{5}', possible_number_pattern='\\d{6}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:20|[3-4]\\d|8[19])\\d{4}', possible_number_pattern='\\d{6}', example_number='201234'),
mobile=PhoneNumberDesc(national_number_pattern='(?:[27][1-9]|5\\d)\\d{4}', possible_number_pattern='\\d{6}', example_number='211234'),
toll_free=PhoneNumberDesc(national_number_pattern='80[257-9]\\d{3}', possible_number_pattern='\\d{6}', example_number='802123'),
premium_rate=PhoneNumberDesc(national_number_pattern='90(?:[1345][15-7]|2[125-7]|99)\\d{2}', possible_number_pattern='\\d{6}', example_number='901123'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='(?:6[0-36]|88)\\d{4}', possible_number_pattern='\\d{6}', example_number='601234'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix_for_parsing='(10(?:01|[12]0|88))',
number_format=[NumberFormat(pattern='(\\d{6})', format='\\1', domestic_carrier_code_formatting_rule='$CC \\1')])
|
titansgroup/python-phonenumbers
|
python/phonenumbers/data/region_FO.py
|
Python
|
apache-2.0
| 1,769 | 0.008479 |
import os
import sys
from setuptools import setup
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# The argparse module was introduced in python 2.7 or python 3.2
REQUIRES = ["argparse"] if sys.version[:3] in ('2.6', '3.0', '3.1') else []
setup(
version='0.2.1.dev0',
zip_safe = True,
name = "seqfile",
author = "Utkarsh Upadhyay",
author_email = "musically.ut@gmail.com",
description = ("Find the next file in a sequence of files in a thread-safe way."),
license = "MIT",
keywords = "file threadsafe sequence",
install_requires = REQUIRES + [ "natsort>=3.5.6" ],
url = "https://github.com/musically-ut/seqfile",
packages = ["seqfile"],
setup_requires = REQUIRES + ["nose>=1.0", "natsort>=3.5.6", "pep8>=1.6.2"],
test_suite = "nose.collector",
long_description = read("README.rst"),
entry_points = {"console_scripts": [ "seqfile = seqfile.seqfile:_run" ]
},
classifiers = [
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Natural Language :: English"
],
)
|
musically-ut/seqfile
|
setup.py
|
Python
|
mit
| 1,646 | 0.031592 |
# -*- coding: utf-8 -*-
# Python stdlib
import unittest
# Unit tests
from unit_tests.test_tfstate import test_base, test_provider
def suite():
suite = unittest.TestSuite()
suite.addTests(test_base.suite())
suite.addTests(test_provider.suite())
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
rodynnz/python-tfstate
|
unit_tests/test_tfstate/__init__.py
|
Python
|
lgpl-3.0
| 359 | 0.002786 |
# pib.py - functions for handling Serbian VAT numbers
# coding: utf-8
#
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PIB (Poreski Identifikacioni Broj, Serbian tax identification number).
The Serbian tax identification number consists of 9 digits where the last
digit is a check digit.
>>> validate('101134702')
'101134702'
>>> validate('101134703')
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
from stdnum.exceptions import *
from stdnum.iso7064 import mod_11_10
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -.').strip()
def validate(number):
"""Check if the number is a valid VAT number. This checks the length,
formatting and check digit."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) != 9:
raise InvalidLength()
mod_11_10.validate(number)
return number
def is_valid(number):
"""Check if the number is a valid VAT number."""
try:
return bool(validate(number))
except ValidationError:
return False
|
arthurdejong/python-stdnum
|
stdnum/rs/pib.py
|
Python
|
lgpl-2.1
| 1,963 | 0 |
"""
Urls for idea app
"""
from django.conf.urls import url
from openedx.features.idea.api_views import FavoriteAPIView
from openedx.features.idea.views import ChallengeLandingView, IdeaCreateView, IdeaDetailView, IdeaListingView
urlpatterns = [
url(
r'^overview/$',
ChallengeLandingView.as_view(),
name='challenge-landing'
),
url(
r'^$',
IdeaListingView.as_view(),
name='idea-listing'
),
url(
r'^create/$',
IdeaCreateView.as_view(),
name='idea-create'
),
url(
r'^(?P<pk>[0-9]+)/$',
IdeaDetailView.as_view(),
name='idea-details'
),
url(
r'^api/favorite/(?P<idea_id>[0-9]+)/$',
FavoriteAPIView.as_view(),
name='mark-favorite-api-view'
)
]
|
philanthropy-u/edx-platform
|
openedx/features/idea/urls.py
|
Python
|
agpl-3.0
| 799 | 0.001252 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.gaeutil import SaveCommand, ModelSearchCommand
from gaeforms.ndb.form import ModelForm
from gaegraph.business_base import UpdateNode
from course_app.model import Course
class CourseForm(ModelForm):
"""
Form used do save and update operations
"""
_model_class = Course
_include = [Course.price,
Course.start_date,
Course.name]
class CourseFormDetail(ModelForm):
"""
Form used to show entity details
"""
_model_class = Course
_include = [Course.price,
Course.creation,
Course.start_date,
Course.name]
def populate_form(self, model):
dct = super(CourseFormDetail, self).populate_form(model)
dct['id'] = unicode(model.key.id())
return dct
class CourseFormShort(CourseFormDetail):
"""
Form used to show entity short version, mainly for tables
"""
_model_class = Course
_include = [Course.price,
Course.creation,
Course.start_date,
Course.name]
class SaveCourseCommand(SaveCommand):
_model_form_class = CourseForm
class UpdateCourseCommand(UpdateNode):
_model_form_class = CourseForm
class ListCourseCommand(ModelSearchCommand):
def __init__(self, page_size=100, start_cursor=None, offset=0, use_cache=True, cache_begin=True, **kwargs):
super(ListCourseCommand, self).__init__(Course.query_by_creation(), page_size, start_cursor, offset, use_cache,
cache_begin, **kwargs)
|
gamunax/pyhtongamunax
|
backend/apps/course_app/commands.py
|
Python
|
mit
| 1,665 | 0.007207 |
from cStringIO import StringIO
from json.tests import PyTest, CTest
class TestDump(object):
def test_dump(self):
sio = StringIO()
self.json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEqual(self.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(self.dumps(
{2: 3.0, 4.0: 5L, False: 1, 6L: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
class TestPyDump(TestDump, PyTest): pass
class TestCDump(TestDump, CTest): pass
|
ArneBab/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/json/tests/test_dump.py
|
Python
|
mit
| 738 | 0.004065 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FloatingIPDetailsNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_clients(cls):
super(FloatingIPDetailsNegativeTestJSON, cls).setup_clients()
cls.client = cls.floating_ips_client
@test.attr(type=['negative'])
@test.idempotent_id('7ab18834-4a4b-4f28-a2c5-440579866695')
@test.services('network')
def test_get_nonexistent_floating_ip_details(self):
# Negative test:Should not be able to GET the details
# of non-existent floating IP
# Creating a non-existent floatingIP id
if CONF.service_available.neutron:
non_exist_id = str(uuid.uuid4())
else:
non_exist_id = data_utils.rand_int_id(start=999)
self.assertRaises(lib_exc.NotFound,
self.client.get_floating_ip_details, non_exist_id)
|
danielmellado/tempest
|
tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
|
Python
|
apache-2.0
| 1,698 | 0 |
__all__ = [ "command_statistics", "json_reader", "mongo", "sata", "xgig" ]
|
LoneKirov/PyTrace
|
pytrace/__init__.py
|
Python
|
bsd-3-clause
| 78 | 0.025641 |
#@TODO: Support for html parsing!!
import sys
try:
from colorama import init, Fore, Back, Style
colorama = True
except ImportError:
colorama = False
__all__ = ["console"]
class Console:
def __init__(self):
self.reset()
self.color = False
def set_color(self, color):
self.color = color
if colorama and color:
init(autoreset=True)
def reset(self):
self.indent = 0
self.flags = ''
self.fill_up = ''
self.fill_down = ''
self.center_width = False
self.center_char = ' '
def eprint(self, msg, indent=0, flags='', fill_up=None, fill_down=None,
center_width=False, center_char=' ', end="\n"):
"""Prints a message to stdout with many coloring and style options.
msg: message to print
indent: insert spaces before the beginning
flags: bold, light, red
fill_up: char to use to create a bar above the string
fill_down: char to use to create a bar below the string
center_width, center_char: see center method of strings - help("".center)
end: string to append before printing (see python3 print function)
"""
if indent:
msg = (' ' * 4 * indent) + msg
elif self.indent:
msg = (' ' * 4 * self.indent) + msg
size = len(msg)
if center_width:
msg = msg.center(center_width, center_char)
elif self.center_width:
if center_char:
msg = msg.center(self.center_width, center_char)
else:
msg = msg.center(self.center_width, self.center_char)
if fill_up:
title = (fill_up * size) + "\n"
msg = title + msg
elif self.fill_up:
title = (self.fill_up * size) + "\n"
msg = title + msg
if fill_down:
title = "\n" + (fill_down * size)
msg += title
elif self.fill_down:
title = "\n" + (self.fill_down * size)
msg += title
if colorama and self.color:
allflags = flags + self.flags
if 'bold' in allflags:
msg = Style.BRIGHT + msg
if 'light' in allflags:
msg = Style.DIM + msg
if 'red' in allflags:
msg = Fore.RED + msg
elif 'blue' in allflags:
msg = Fore.BLUE + msg
elif 'green' in allflags:
msg = Fore.GREEN + msg
#@TODO: add other flags here
msg += end
sys.stdout.write(msg)
sys.stdout.flush()
def print_success(self, success):
if success:
self.eprint('[OK]', flags='green,bold')
else:
self.eprint('[FAIL]', flags='red,bold')
def title(self, msg):
self.eprint(msg, flags='bold', fill_up='=', fill_down='=')
console = Console()
|
BackupGGCode/pkgcreator
|
pkgcreator/PkgCreator/console.py
|
Python
|
gpl-3.0
| 2,947 | 0.003054 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(root):
if not root:
return 0
left = dfs(root.left)
right = dfs(root.right)
diff[0] += abs(left - right)
return left + root.val + right
diff = [0]
dfs(root)
return diff[0]
|
zqfan/leetcode
|
algorithms/563. Binary Tree Tilt/solution.py
|
Python
|
gpl-3.0
| 585 | 0 |
###########################################################################
#
# Support code for the 'psyco.compact' type.
from __future__ import generators
try:
from UserDict import DictMixin
except ImportError:
# backported from Python 2.3 to Python 2.2
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other):
# Make progressively weaker assumptions about "other"
if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, '__iter__'): # iter saves memory
for k in other:
self[k] = other[k]
else:
for k in other.keys():
self[k] = other[k]
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
###########################################################################
from _psyco import compact
class compactdictproxy(DictMixin):
def __init__(self, ko):
self._ko = ko # compact object of which 'self' is the dict
def __getitem__(self, key):
return compact.__getslot__(self._ko, key)
def __setitem__(self, key, value):
compact.__setslot__(self._ko, key, value)
def __delitem__(self, key):
compact.__delslot__(self._ko, key)
def keys(self):
return compact.__members__.__get__(self._ko)
def clear(self):
keys = self.keys()
keys.reverse()
for key in keys:
del self[key]
def __repr__(self):
keys = ', '.join(self.keys())
return '<compactdictproxy object {%s}>' % (keys,)
|
Southpaw-TACTIC/Team
|
src/python/Lib/site-packages/psyco/kdictproxy.py
|
Python
|
epl-1.0
| 4,502 | 0.00422 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.jobs import BackfillJob
from airflow.models import DagRun, TaskInstance
from airflow.operators.subdag_operator import SubDagOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.state import State
from sqlalchemy import or_
def _create_dagruns(dag, execution_dates, state, run_id_template):
"""
Infers from the dates which dag runs need to be created and does so.
:param dag: the dag to create dag runs for
:param execution_dates: list of execution dates to evaluate
:param state: the state to set the dag run to
:param run_id_template:the template for run id to be with the execution date
:return: newly created and existing dag runs for the execution dates supplied
"""
# find out if we need to create any dag runs
drs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates)
dates_to_create = list(set(execution_dates) - set([dr.execution_date for dr in drs]))
for date in dates_to_create:
dr = dag.create_dagrun(
run_id=run_id_template.format(date.isoformat()),
execution_date=date,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
)
drs.append(dr)
return drs
def set_state(task, execution_date, upstream=False, downstream=False,
future=False, past=False, state=State.SUCCESS, commit=False):
"""
Set the state of a task instance and if needed its relatives. Can set state
for future tasks (calculated from execution_date) and retroactively
for past tasks. Will verify integrity of past dag runs in order to create
tasks that did not exist. It will not create dag runs that are missing
on the schedule (but it will as for subdag dag runs if needed).
:param task: the task from which to work. task.task.dag needs to be set
:param execution_date: the execution date from which to start looking
:param upstream: Mark all parents (upstream tasks)
:param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags
:param future: Mark all future tasks on the interval of the dag up until
last execution date.
:param past: Retroactively mark all tasks starting from start_date of the DAG
:param state: State to which the tasks need to be set
:param commit: Commit tasks to be altered to the database
:return: list of tasks that have been created and updated
"""
assert timezone.is_localized(execution_date)
# microseconds are supported by the database, but is not handled
# correctly by airflow on e.g. the filesystem and in other places
execution_date = execution_date.replace(microsecond=0)
assert task.dag is not None
dag = task.dag
latest_execution_date = dag.latest_execution_date
assert latest_execution_date is not None
# determine date range of dag runs and tasks to consider
end_date = latest_execution_date if future else execution_date
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date=start_date, end_date=end_date)
# find relatives (siblings = downstream, parents = upstream) if needed
task_ids = [task.task_id]
if downstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if upstream:
relatives = task.get_flat_relatives(upstream=True)
task_ids += [t.task_id for t in relatives]
# verify the integrity of the dag runs in case a task was added or removed
# set the confirmed execution dates as they might be different
# from what was provided
confirmed_dates = []
drs = DagRun.find(dag_id=dag.dag_id, execution_date=dates)
for dr in drs:
dr.dag = dag
dr.verify_integrity()
confirmed_dates.append(dr.execution_date)
# go through subdagoperators and create dag runs. We will only work
# within the scope of the subdag. We wont propagate to the parent dag,
# but we will propagate from parent to subdag.
session = Session()
dags = [dag]
sub_dag_ids = []
while len(dags) > 0:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator):
# this works as a kind of integrity check
# it creates missing dag runs for subdagoperators,
# maybe this should be moved to dagrun.verify_integrity
drs = _create_dagruns(current_task.subdag,
execution_dates=confirmed_dates,
state=State.RUNNING,
run_id_template=BackfillJob.ID_FORMAT_PREFIX)
for dr in drs:
dr.dag = current_task.subdag
dr.verify_integrity()
if commit:
dr.state = state
session.merge(dr)
dags.append(current_task.subdag)
sub_dag_ids.append(current_task.subdag.dag_id)
# now look for the task instances that are affected
TI = TaskInstance
# get all tasks of the main dag that will be affected by a state change
qry_dag = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(confirmed_dates),
TI.task_id.in_(task_ids)).filter(
or_(TI.state.is_(None),
TI.state != state)
)
# get *all* tasks of the sub dags
if len(sub_dag_ids) > 0:
qry_sub_dag = session.query(TI).filter(
TI.dag_id.in_(sub_dag_ids),
TI.execution_date.in_(confirmed_dates)).filter(
or_(TI.state.is_(None),
TI.state != state)
)
if commit:
tis_altered = qry_dag.with_for_update().all()
if len(sub_dag_ids) > 0:
tis_altered += qry_sub_dag.with_for_update().all()
for ti in tis_altered:
ti.state = state
session.commit()
else:
tis_altered = qry_dag.all()
if len(sub_dag_ids) > 0:
tis_altered += qry_sub_dag.all()
session.expunge_all()
session.close()
return tis_altered
def set_dag_run_state(dag, execution_date, state=State.SUCCESS, commit=False):
"""
Set the state of a dag run and all task instances associated with the dag
run for a specific execution date.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param state: the state to which the DAG need to be set
:param commit: commit DAG and tasks to be altered to the database
:return: list of tasks that have been created and updated
:raises: AssertionError if dag or execution_date is invalid
"""
res = []
if not dag or not execution_date:
return res
# Mark all task instances in the dag run
for task in dag.tasks:
task.dag = dag
new_state = set_state(task=task, execution_date=execution_date,
state=state, commit=commit)
res.extend(new_state)
# Mark the dag run
if commit:
drs = DagRun.find(dag.dag_id, execution_date=execution_date)
for dr in drs:
dr.dag = dag
dr.update_state()
return res
|
yk5/incubator-airflow
|
airflow/api/common/experimental/mark_tasks.py
|
Python
|
apache-2.0
| 8,610 | 0.000697 |
def save_bacteria_dna():
"""
0 = A,
1 = T,
2 = C,
3 = G
footnote:
>>>ord('c')
99
>>>chr(97)
'c'
same unichr() command
"""
char_list = []
binary_list = []
request_word = raw_input("Please enter the word,"
"you want to save in bacteria dna.")
for i in request_word:
char_list.append(ord(i))
result = radix_changer(char_list, binary_list)
print result
def radix_changer(char_list, binary_list):
"""
10radix to 4radix number system
for ex.:
we've got only one character.
And our character is c.
'c' in ascii table;
c=99
function doing this;
99 % 4 = 3 **
99 / 4 = 24
in this step:
3 -> will be save value.
24 -> in next process, value of c
and every step doing this again.
like;
24 % 4 = 0 **
24 / 4 = 6
----------
6 % 4 = 2 **
6 / 4 = 1 **
...
so our binary code in starred lines
real character: c
in_ascii_format: 99
binary: 1203 :)
"""
counter = 0
while counter < len(char_list):
number = char_list[counter]
binary = ""
while number >= 4:
binary += str(number % 4)
number /= 4
binary += str(number)
binary_list.append(binary[::-1])
counter += 1
# turn to genetic format
# like 1203 -> TCAG
result = recombinant_dna(binary_list)
return result
def recombinant_dna(binary_list):
"""
each binary_list value is 4base number
its mean max_value for each character is 3
for ex.:
entering string: can
by one by for chars;
ascii: 1203 - 1201 - 1232
and I will format this blocks
for first char
1 -> T
2 -> C
0 -> A
3 -> G
and finally c character saved 'TCAG' :)
"""
counter = 0
tmp_str, result_str = "", ""
while counter < len(binary_list):
tmp_str = binary_list[counter]
for j in range(0, 4):
if tmp_str[j] == '0':
result_str += 'A'
if tmp_str[j] == '1':
result_str += 'T'
if tmp_str[j] == '2':
result_str += 'C'
if tmp_str[j] == '3':
result_str += 'G'
result_str += chr(10)
counter += 1
return result_str
if __name__ == '__main__':
save_bacteria_dna()
|
dogancankilment/UnixTools
|
utils/biology/storage_in_bacterias.py
|
Python
|
gpl-2.0
| 2,800 | 0.000357 |
import unittest
import asyncio
import aiozmq
import aiozmq.rpc
import logging
from unittest import mock
from asyncio.test_utils import run_briefly
from aiozmq._test_util import log_hook, RpcMixin
class MyHandler(aiozmq.rpc.AttrHandler):
def __init__(self, queue, loop):
self.queue = queue
self.loop = loop
@asyncio.coroutine
@aiozmq.rpc.method
def coro(self, arg):
yield from self.queue.put(arg)
@aiozmq.rpc.method
def func(self, arg):
self.queue.put_nowait(arg)
@asyncio.coroutine
@aiozmq.rpc.method
def add(self, arg: int=1):
yield from self.queue.put(arg + 1)
@aiozmq.rpc.method
def func_error(self):
raise ValueError
@aiozmq.rpc.method
def suspicious(self, arg: int):
self.queue.put_nowait(arg)
return 3
@aiozmq.rpc.method
@asyncio.coroutine
def fut(self):
f = asyncio.Future(loop=self.loop)
yield from self.queue.put(f)
yield from f
class PipelineTestsMixin(RpcMixin):
@classmethod
def setUpClass(self):
logger = logging.getLogger()
self.log_level = logger.getEffectiveLevel()
logger.setLevel(logging.DEBUG)
@classmethod
def tearDownClass(self):
logger = logging.getLogger()
logger.setLevel(self.log_level)
def exception_handler(self, loop, context):
self.err_queue.put_nowait(context)
def make_pipeline_pair(self, log_exceptions=False,
exclude_log_exceptions=(), use_loop=True):
@asyncio.coroutine
def create():
server = yield from aiozmq.rpc.serve_pipeline(
MyHandler(self.queue, self.loop),
bind='tcp://127.0.0.1:*',
loop=self.loop if use_loop else None,
log_exceptions=log_exceptions,
exclude_log_exceptions=exclude_log_exceptions)
connect = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.connect_pipeline(
connect=connect,
loop=self.loop if use_loop else None)
return client, server
self.client, self.server = self.loop.run_until_complete(create())
return self.client, self.server
def test_coro(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
yield from client.notify.coro(1)
ret = yield from self.queue.get()
self.assertEqual(1, ret)
yield from client.notify.coro(2)
ret = yield from self.queue.get()
self.assertEqual(2, ret)
self.loop.run_until_complete(communicate())
def test_add(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
yield from client.notify.add()
ret = yield from self.queue.get()
self.assertEqual(ret, 2)
yield from client.notify.add(2)
ret = yield from self.queue.get()
self.assertEqual(ret, 3)
self.loop.run_until_complete(communicate())
def test_bad_handler(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
with log_hook('aiozmq.rpc', self.err_queue):
yield from client.notify.bad_handler()
ret = yield from self.err_queue.get()
self.assertEqual(logging.ERROR, ret.levelno)
self.assertEqual("Call to %r caused error: %r", ret.msg)
self.assertEqual(('bad_handler', mock.ANY),
ret.args)
self.assertIsNotNone(ret.exc_info)
self.loop.run_until_complete(communicate())
def test_func(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
yield from client.notify.func(123)
ret = yield from self.queue.get()
self.assertEqual(ret, 123)
self.loop.run_until_complete(communicate())
def test_func_error(self):
client, server = self.make_pipeline_pair(log_exceptions=True)
@asyncio.coroutine
def communicate():
with log_hook('aiozmq.rpc', self.err_queue):
yield from client.notify.func_error()
ret = yield from self.err_queue.get()
self.assertEqual(logging.ERROR, ret.levelno)
self.assertEqual("An exception %r from method %r "
"call occurred.\n"
"args = %s\nkwargs = %s\n", ret.msg)
self.assertEqual((mock.ANY, 'func_error', '()', '{}'),
ret.args)
self.assertIsNotNone(ret.exc_info)
self.loop.run_until_complete(communicate())
def test_default_event_loop(self):
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
self.addCleanup(asyncio.set_event_loop_policy, None)
self.addCleanup(self.loop.close)
self.loop = asyncio.get_event_loop()
self.client, self.server = self.make_pipeline_pair(use_loop=False)
self.assertIs(self.client._loop, self.loop)
self.assertIs(self.server._loop, self.loop)
def test_warning_if_remote_return_not_None(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
with log_hook('aiozmq.rpc', self.err_queue):
yield from client.notify.suspicious(1)
ret = yield from self.queue.get()
self.assertEqual(1, ret)
ret = yield from self.err_queue.get()
self.assertEqual(logging.WARNING, ret.levelno)
self.assertEqual('Pipeline handler %r returned not None',
ret.msg)
self.assertEqual(('suspicious',), ret.args)
self.assertIsNone(ret.exc_info)
self.loop.run_until_complete(communicate())
run_briefly(self.loop)
def test_call_closed_pipeline(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
client.close()
yield from client.wait_closed()
with self.assertRaises(aiozmq.rpc.ServiceClosedError):
yield from client.notify.func()
self.loop.run_until_complete(communicate())
def test_server_close(self):
client, server = self.make_pipeline_pair()
@asyncio.coroutine
def communicate():
client.notify.fut()
fut = yield from self.queue.get()
self.assertEqual(1, len(server._proto.pending_waiters))
task = next(iter(server._proto.pending_waiters))
self.assertIsInstance(task, asyncio.Task)
server.close()
yield from server.wait_closed()
yield from asyncio.sleep(0, loop=self.loop)
self.assertEqual(0, len(server._proto.pending_waiters))
fut.cancel()
self.loop.run_until_complete(communicate())
class LoopPipelineTests(unittest.TestCase, PipelineTestsMixin):
def setUp(self):
self.loop = aiozmq.ZmqEventLoop()
asyncio.set_event_loop(None)
self.client = self.server = None
self.queue = asyncio.Queue(loop=self.loop)
self.err_queue = asyncio.Queue(loop=self.loop)
self.loop.set_exception_handler(self.exception_handler)
def tearDown(self):
self.close_service(self.client)
self.close_service(self.server)
self.loop.close()
asyncio.set_event_loop(None)
# zmq.Context.instance().term()
class LooplessPipelineTests(unittest.TestCase, PipelineTestsMixin):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.client = self.server = None
self.queue = asyncio.Queue(loop=self.loop)
self.err_queue = asyncio.Queue(loop=self.loop)
self.loop.set_exception_handler(self.exception_handler)
def tearDown(self):
self.close_service(self.client)
self.close_service(self.server)
self.loop.close()
asyncio.set_event_loop(None)
# zmq.Context.instance().term()
|
MetaMemoryT/aiozmq
|
tests/rpc_pipeline_test.py
|
Python
|
bsd-2-clause
| 8,374 | 0.000239 |
"""Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up my Django's
``makemessage`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
http://stackoverflow.com/questions/2090717/getting-translation-strings-for-jinja2-templates-integrated-with-django-1-x
"""
import re
from django.core.management.commands import makemessages
from django.utils.translation import trans_real
class Command(makemessages.Command):
def handle(self, *args, **options):
old_endblock_re = trans_real.endblock_re
old_block_re = trans_real.block_re
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
trans_real.endblock_re = re.compile(
trans_real.endblock_re.pattern + '|' + r"""^\s*endtrans$""")
trans_real.block_re = re.compile(
trans_real.block_re.pattern + '|' + r"""^\s*trans(?:\s+(?!'|")(?=.*?=.*?)|$)""")
trans_real.plural_re = re.compile(
trans_real.plural_re.pattern + '|' + r"""^\s*pluralize(?:\s+.+|$)""")
try:
super(Command, self).handle(*args, **options)
finally:
trans_real.endblock_re = old_endblock_re
trans_real.block_re = old_block_re
|
akx/coffin
|
coffin/management/commands/makemessages.py
|
Python
|
bsd-3-clause
| 2,126 | 0.000941 |
# -*- coding: utf-8 -*-
import logging
import re
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from PIL import Image
from urllib.request import urlopen
from urllib.parse import urlencode
import json
from bs4 import BeautifulSoup, element
from datetime import datetime, timedelta
from waybackscraper.exceptions import ScrapeError
from waybackscraper import wayback
logger = logging.getLogger('mrot.imdb')
OMDB_API_TEMPLATE = 'http://www.omdbapi.com/?{query}'
IMDB_MOVIE_TEMPLATE = "http://www.imdb.com/title/{movie_id}/"
IMDB_NO_POSTER = 'http://www.imdb.com/images/nopicture/medium/video.png'
class IMDbMovie(object):
def __init__(self, title, year, imdb_id, poster):
self.title = title
self.year = year
self.imdb_id = imdb_id
self.poster = poster
def download_ratings(self, concurrency=5, delta=30):
"""
Download the ratings of the movie over time
:param concurrency: Maximum of concurrent requests to the wayback machine
:param delta: Minimum number of days between two ratings
:return: The ratings of the movie indexed by their date
"""
logger.info('Downloading ratings for the movie {movie_name}.'.format(movie_name=self.title))
# The URL for this movie on IMDb
imdb_url = IMDB_MOVIE_TEMPLATE.format(movie_id=self.imdb_id)
# Use the wayback machine to scrape the ratings of the movie over time
ratings = wayback.scrape_archives(url=imdb_url, scrape_function=read_ratings,
min_date=datetime(self.year, 1, 1, 0, 0), max_date=datetime.now(),
user_agent='mrot', min_timedelta=timedelta(days=delta),
concurrency=concurrency)
return ratings
def plot_ratings(self, concurrency=5, delta=30):
"""
Show a time series representing the ratings of the movie over time
:param concurrency: Maximum of concurrent requests to the wayback machine
:param delta: Minimum number of days between two ratings
"""
# Download the movie ratings
ratings = self.download_ratings(concurrency, delta)
if ratings:
# Show the ratings and the movie poster on one figure
fig = plt.figure()
# 1 row, 2 columns position 1
img_fig = fig.add_subplot(121)
# Hide axis around the poster
img_fig.axes.get_xaxis().set_visible(False)
img_fig.axes.get_yaxis().set_visible(False)
# Show the poster on the first column
poster = self.poster if self.poster != 'N/A' else IMDB_NO_POSTER
f = urlopen(poster)
img = Image.open(f)
img_fig.imshow(img)
# 1 row, 2 columns position 2
ratings_fig = fig.add_subplot(122)
# Show ratings on the second column
sorted_keys = sorted(ratings.keys())
axis_values = mdates.date2num(sorted_keys)
ratings_fig.plot_date(x=axis_values, y=[ratings[key] for key in sorted_keys], fmt="r-")
ratings_fig.set_title('Ratings of the movie "{title}" over time'.format(title=self.title))
ratings_fig.set_ylabel("Ratings")
# Set the range of the y value to (min_rating - 1), (max_rating + 1)
ratings_fig.set_ylim([max(min(ratings.values()) - 1, 0), min(max(ratings.values()) + 1, 10)])
# Show the figure
plt.setp(ratings_fig.get_xticklabels(), rotation=30, horizontalalignment='right')
plt.show()
else:
logger.info('No ratings found for the movie {movie_name}.'.format(movie_name=self.title))
def find_movies(movie_name):
"""
Find the movies corresponding to the given movie name
:param movie_name:
:return: A list of movies
"""
logger.info('Searching for movies named {movie_name}.'.format(movie_name=movie_name))
movies = []
# Query OMDb API with the given movie name
api_response = query_search_api(s=movie_name, type_filter='movie')
if api_response['Response'] == 'True':
movies = [IMDbMovie(movie['Title'], int(movie['Year']), movie['imdbID'], movie['Poster']) for
movie in api_response['Search']]
return movies
def query_search_api(s='', type_filter='movie'):
"""
Query OMDb API to obtain movie information
:param s: Movie title to search for.
:param type_filter: Type of result to return.
:return:
"""
query = urlencode({'s': s, 'type': type_filter})
omdb_api_url = OMDB_API_TEMPLATE.format(query=query)
with urlopen(omdb_api_url) as response:
# Read and decode the API response
json_response = response.read().decode("utf-8")
result = json.loads(json_response)
return result
async def read_ratings(session, archive_url, archive_timestamp, archive_content):
"""
Extract a movie rating from its imdb page
:raise: A ScrapeError if the rating could not be extracted
:return:
"""
try:
soup = BeautifulSoup(archive_content, 'html.parser')
ratings_element = soup.find('span', itemprop="ratingValue")
if ratings_element is not None and ratings_element.string != '-':
return float(ratings_element.string.replace(',', '.'))
ratings_element = soup.find('div', class_="star-box-giga-star")
if ratings_element is not None:
return float(ratings_element.string)
ratings_element = soup.find('span', class_="rating-rating")
if ratings_element is not None:
if type(ratings_element.contents[0]) is element.NavigableString:
return float(ratings_element.contents[0].string)
else:
return float(ratings_element.span.string)
# Fallback, find a string matching "float/10"
ratings_ovr_ten = soup.find(string=re.compile("^[\d\.]+/10$"))
if ratings_ovr_ten is not None:
return float(ratings_ovr_ten.string.split('/')[0])
raise ScrapeError('Ratings not found')
except ValueError:
raise ScrapeError('Not a valid number')
|
abrenaut/mrot
|
mrot/imdb.py
|
Python
|
mit
| 6,223 | 0.002732 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2011 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from PyQt4 import QtCore, QtGui
from picard.util import webbrowser2, find_executable
from picard.const import FPCALC_NAMES
from picard.config import BoolOption, TextOption
from picard.ui.options import OptionsPage, register_options_page
from picard.ui.ui_options_fingerprinting import Ui_FingerprintingOptionsPage
class FingerprintingOptionsPage(OptionsPage):
NAME = "fingerprinting"
TITLE = N_("Fingerprinting")
PARENT = None
SORT_ORDER = 45
ACTIVE = True
options = [
TextOption("setting", "fingerprinting_system", "acoustid"),
TextOption("setting", "acoustid_fpcalc", ""),
TextOption("setting", "acoustid_apikey", ""),
]
def __init__(self, parent=None):
super(FingerprintingOptionsPage, self).__init__(parent)
self.ui = Ui_FingerprintingOptionsPage()
self.ui.setupUi(self)
self.ui.disable_fingerprinting.clicked.connect(self.update_groupboxes)
self.ui.use_acoustid.clicked.connect(self.update_groupboxes)
self.ui.acoustid_fpcalc_browse.clicked.connect(self.acoustid_fpcalc_browse)
self.ui.acoustid_fpcalc_download.clicked.connect(self.acoustid_fpcalc_download)
self.ui.acoustid_apikey_get.clicked.connect(self.acoustid_apikey_get)
def load(self):
if self.config.setting["fingerprinting_system"] == "acoustid":
self.ui.use_acoustid.setChecked(True)
else:
self.ui.disable_fingerprinting.setChecked(True)
self.ui.acoustid_fpcalc.setText(self.config.setting["acoustid_fpcalc"])
self.ui.acoustid_apikey.setText(self.config.setting["acoustid_apikey"])
self.update_groupboxes()
def save(self):
if self.ui.use_acoustid.isChecked():
self.config.setting["fingerprinting_system"] = "acoustid"
else:
self.config.setting["fingerprinting_system"] = ""
self.config.setting["acoustid_fpcalc"] = unicode(self.ui.acoustid_fpcalc.text())
self.config.setting["acoustid_apikey"] = unicode(self.ui.acoustid_apikey.text())
def update_groupboxes(self):
if self.ui.use_acoustid.isChecked():
self.ui.acoustid_settings.setEnabled(True)
if self.ui.acoustid_fpcalc.text().isEmpty():
fpcalc_path = find_executable(*FPCALC_NAMES)
if fpcalc_path:
self.ui.acoustid_fpcalc.setText(fpcalc_path)
else:
self.ui.acoustid_settings.setEnabled(False)
def acoustid_fpcalc_browse(self):
path = QtGui.QFileDialog.getOpenFileName(self, "", self.ui.acoustid_fpcalc.text())
if path:
path = os.path.normpath(unicode(path))
self.ui.acoustid_fpcalc.setText(path)
def acoustid_fpcalc_download(self):
webbrowser2.open("http://acoustid.org/chromaprint#download")
def acoustid_apikey_get(self):
webbrowser2.open("http://acoustid.org/api-key")
register_options_page(FingerprintingOptionsPage)
|
mwiencek/picard
|
picard/ui/options/fingerprinting.py
|
Python
|
gpl-2.0
| 3,809 | 0.001576 |
from pymongo import MongoClient
from vaderSentiment.vaderSentiment import sentiment as vs
import os
# File for writing sentiment for storage and analysis
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
outputFile = "sentiment.json"
f = open(os.path.join(__location__, outputFile), 'w+')
# Open mongodb client, access the database collection, and find documents
# based on criteria
client = MongoClient()
db = client["Virus"]
coll = db["Zika"]
criteria = {"lang": "en"}
cursor = coll.find(criteria)
# Dictionary of months for date conversion and empty array for documents
months = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5,
"Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10,
"Nov": 11, "Dec": 12}
docs = []
for document in cursor:
# Convert time from Tue Mar 29 04:04:22 +0000 2016 to 2016-3-29
time = document["created_at"].split()
month = months[time[1]]
day = time[2]
year = time[5]
date = str(year) + "-" + str(month) + "-" + str(day)
docs.append({"text": document["text"],
"date": '"' + date + '"'})
aggregate = {}
count = {}
for doc in docs:
text = doc["text"].encode('utf-8')
sentiment = vs(text)
value = (sentiment['neg'] * -1) + (sentiment['pos'])
if doc["date"] not in aggregate:
aggregate[doc["date"]] = value
count[doc["date"]] = 1
else:
aggregate[doc["date"]] += value
count[doc["date"]] += 1
# normalize
f.write("[ \n")
for date in aggregate:
aggregate[date] = aggregate[date]/count[date]
f.write('{ \t "date": ' + str(date) +
',\n \t "value": ' + str(aggregate[date]) + "\n }")
f.write(", \n")
f.close()
|
kearnsw/Twitt.IR
|
src/VaderSentiment.py
|
Python
|
gpl-3.0
| 1,714 | 0.001167 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class Unity3DPipeline(object):
def process_item(self, item, spider):
return item
|
TheWaWaR/scrapy-snippets
|
projects/unity3d/unity3d/pipelines.py
|
Python
|
mit
| 261 | 0 |
import rcblog
if __name__ == '__main__':
rcblog.main()
|
sanchopanca/rcblog
|
run.py
|
Python
|
mit
| 60 | 0 |
import os
import sys
from django.conf import settings
if not settings.configured:
settings_dict = dict(
INSTALLED_APPS=(
#'django.contrib.contenttypes',
'inspector_panel',
'inspector_panel.tests',
),
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3"
}
},
)
settings.configure(**settings_dict)
def runtests(*test_args):
if not test_args:
test_args = ['tests']
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
|
NESCent/feedingdb
|
debug-inspector-panel/runtests.py
|
Python
|
gpl-3.0
| 808 | 0.001238 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.