repo_name
stringlengths 4
116
| path
stringlengths 4
379
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
quantrocket-llc/quantrocket-client
|
quantrocket/cli/subcommands/zipline.py
|
18276
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from quantrocket.cli.utils.parse import dict_str
def add_subparser(subparsers):
_parser = subparsers.add_parser("zipline", description="QuantRocket CLI for Zipline", help="Backtest and trade Zipline strategies")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Create a Zipline bundle for US stocks.
This command defines the bundle parameters but does not ingest the actual
data. To ingest the data, see `quantrocket zipline ingest`.
Examples:
Create a minute data bundle for all US stocks:
quantrocket zipline create-usstock-bundle usstock-1min
Create a bundle for daily data only:
quantrocket zipline create-usstock-bundle usstock-1d --data-frequency daily
Create a minute data bundle based on a universe:
quantrocket zipline create-usstock-bundle usstock-tech-1min --universes us-tech
Create a minute data bundle of free sample data:
quantrocket zipline create-usstock-bundle usstock-free-1min --free
"""
parser = _subparsers.add_parser(
"create-usstock-bundle",
help="create a Zipline bundle for US stocks",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the bundle (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-i", "--sids",
metavar="SID",
help="limit to these sids (only supported for minute data bundles)")
parser.add_argument(
"-u", "--universes",
metavar="UNIVERSE",
help="limit to these universes (only supported for minute data bundles)")
parser.add_argument(
"--free",
action="store_true",
help="limit to free sample data")
parser.add_argument(
"-d", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="whether to collect minute data (which also includes daily data) or "
"only daily data. Default is minute data. Possible choices: %(choices)s")
parser.set_defaults(func="quantrocket.zipline._cli_create_usstock_bundle")
examples = """
Create a Zipline bundle from a history database or real-time aggregate
database.
You can ingest 1-minute or 1-day databases.
This command defines the bundle parameters but does not ingest the actual
data. To ingest the data, see `quantrocket zipline ingest`.
Examples:
Create a bundle from a history database called "es-fut-1min" and name
it like the history database:
quantrocket zipline create-bundle-from-db es-fut-1min --from-db es-fut-1min --calendar us_futures --start-date 2015-01-01
Create a bundle named "usa-stk-1min-2017" for ingesting a single year of US
1-minute stock data from a history database called "usa-stk-1min":
quantrocket zipline create-bundle-from-db usa-stk-1min-2017 --from-db usa-stk-1min -s 2017-01-01 -e 2017-12-31 --calendar XNYS
Create a bundle from a real-time aggregate database and specify how to map
Zipline fields to the database fields:
quantrocket zipline create-bundle-from-db free-stk-1min --from-db free-stk-tick-1min --calendar XNYS --start-date 2020-06-01 --fields close:LastPriceClose open:LastPriceOpen high:LastPriceHigh low:LastPriceLow volume:VolumeClose
"""
parser = _subparsers.add_parser(
"create-bundle-from-db",
help="create a Zipline bundle from a history database or real-time aggregate database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the bundle (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-d", "--from-db",
metavar="CODE",
help="the code of a history database or real-time aggregate database to ingest")
parser.add_argument(
"-c", "--calendar",
metavar="NAME",
help="the name of the calendar to use with this bundle "
"(provide '?' or any invalid calendar name to see available choices)")
parser.add_argument(
"-f", "--fields",
nargs="*",
type=dict_str,
metavar="ZIPLINE_FIELD:DB_FIELD",
help="mapping of Zipline fields (open, high, low, close, volume) to "
"db fields. Pass as 'zipline_field:db_field'. Defaults to mapping Zipline "
"'open' to db 'Open', etc.")
filters = parser.add_argument_group("filtering options for db ingestion")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
required=True,
help="limit to historical data on or after this date. This parameter is required "
"and also determines the default start date for backtests and queries.")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to historical data on or before this date")
filters.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"--exclude-universes",
nargs="*",
metavar="UNIVERSE",
help="exclude these universes")
filters.add_argument(
"--exclude-sids",
nargs="*",
metavar="SID",
help="exclude these sids")
parser.set_defaults(func="quantrocket.zipline._cli_create_bundle_from_db")
examples = """
Ingest data into a previously defined bundle.
Examples:
Ingest data into a bundle called usstock-1min:
quantrocket zipline ingest usstock-1min
"""
parser = _subparsers.add_parser(
"ingest",
help="ingest data into a previously defined bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids, overriding stored config")
parser.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes, overriding stored config")
parser.set_defaults(func="quantrocket.zipline._cli_ingest_bundle")
examples = """
List available data bundles and whether data has been ingested into them.
Examples:
quantrocket zipline list-bundles
"""
parser = _subparsers.add_parser(
"list-bundles",
help="list available data bundles and whether data has been ingested into them",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func="quantrocket.zipline._cli_list_bundles")
examples = """
Return the configuration of a bundle.
Examples:
Return the configuration of a bundle called 'usstock-1min':
quantrocket zipline config usstock-1min
"""
parser = _subparsers.add_parser(
"config",
help="return the configuration of a bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.set_defaults(func="quantrocket.zipline._cli_get_bundle_config")
examples = """
Delete a bundle.
Examples:
Delete a bundle called 'es-fut-1min':
quantrocket zipline drop-bundle es-fut-1min --confirm-by-typing-bundle-code-again es-fut-1min
"""
parser = _subparsers.add_parser(
"drop-bundle",
help="delete a bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.add_argument(
"--confirm-by-typing-bundle-code-again",
metavar="CODE",
required=True,
help="enter the bundle code again to confirm you want to drop the bundle, its config, "
"and all its data")
parser.set_defaults(func="quantrocket.zipline._cli_drop_bundle")
examples = """
Set or show the default bundle to use for backtesting and trading.
Setting a default bundle is a convenience and is optional. It can be
overridden by manually specifying a bundle when backtesting or
trading.
Examples:
Set a bundle named usstock-1min as the default:
quantrocket zipline default-bundle usstock-1min
Show current default bundle:
quantrocket zipline default-bundle
"""
parser = _subparsers.add_parser(
"default-bundle",
help="set or show the default bundle to use for backtesting and trading",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"bundle",
nargs="?",
help="the bundle code")
parser.set_defaults(func="quantrocket.zipline._cli_get_or_set_default_bundle")
examples = """
Query minute or daily data from a Zipline bundle and download to a CSV file.
Examples:
Download a CSV of minute prices since 2015 for a single security from a bundle called
"usstock-1min":
quantrocket zipline get usstock-1min --start-date 2015-01-01 -i FIBBG12345 -o minute_prices.csv
"""
parser = _subparsers.add_parser(
"get",
help="query minute or daily data from a Zipline bundle and download to a CSV file",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to history on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to history on or before this date")
filters.add_argument(
"-d", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="whether to query minute or daily data. If omitted, defaults to "
"minute data for minute bundles and to daily data for daily bundles. "
"This parameter only needs to be set to request daily data from a minute "
"bundle. Possible choices: %(choices)s")
filters.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"--exclude-universes",
nargs="*",
metavar="UNIVERSE",
help="exclude these universes")
filters.add_argument(
"--exclude-sids",
nargs="*",
metavar="SID",
help="exclude these sids")
filters.add_argument(
"-t", "--times",
nargs="*",
metavar="HH:MM:SS",
help="limit to these times")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
outputs.add_argument(
"-f", "--fields",
metavar="FIELD",
nargs="*",
help="only return these fields (pass '?' or any invalid fieldname to see "
"available fields)")
parser.set_defaults(func="quantrocket.zipline._cli_download_bundle_file")
examples = """
Backtest a Zipline strategy and write the test results to a CSV file.
The CSV result file contains several DataFrames stacked into one: the Zipline performance
results, plus the extracted returns, transactions, positions, and benchmark returns from those
results.
Examples:
Run a backtest from a strategy file called etf-arb.py and save a CSV file of results,
logging backtest progress at annual intervals:
quantrocket zipline backtest etf-arb --bundle arca-etf-eod -s 2010-04-01 -e 2016-02-01 -o results.csv --progress A
"""
parser = _subparsers.add_parser(
"backtest",
help="backtest a Zipline strategy and write the test results to a CSV file",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"strategy",
metavar="CODE",
help="the strategy to run (strategy filename without extension)")
parser.add_argument(
"-f", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="the data frequency to use. Possible choices: %(choices)s "
"(default is minute)")
parser.add_argument(
"--capital-base",
type=float,
metavar="FLOAT",
help="the starting capital for the simulation (default is 1e6 (1 million))")
parser.add_argument(
"-b", "--bundle",
metavar="CODE",
help="the data bundle to use for the simulation. If omitted, the default "
"bundle (if set) is used.")
parser.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="the start date of the simulation (defaults to the bundle start date)")
parser.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="the end date of the simulation (defaults to today)")
parser.add_argument(
"-p", "--progress",
metavar="FREQ",
help="log backtest progress at this interval (use a pandas offset alias, "
"for example 'D' for daily, 'W' for weeky, 'M' for monthly, 'A' for annually)")
parser.add_argument(
"-o", "--output",
metavar="FILENAME",
dest="filepath_or_buffer",
help="the location to write the output file (omit to write to stdout)")
parser.set_defaults(func="quantrocket.zipline._cli_backtest")
examples = """
Create a pyfolio PDF tear sheet from a Zipline backtest result.
Examples:
Create a pyfolio tear sheet from a Zipline CSV results file:
quantrocket zipline tearsheet results.csv -o results.pdf
Run a Zipline backtest and create a pyfolio tear sheet without saving
the CSV file:
quantrocket zipline backtest dma -s 2010-04-01 -e 2016-02-01 | quantrocket zipline tearsheet -o dma.pdf
"""
parser = _subparsers.add_parser(
"tearsheet",
help="create a pyfolio tear sheet from a Zipline backtest result",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"infilepath_or_buffer",
metavar="FILENAME",
nargs="?",
default="-",
help="the CSV file from a Zipline backtest (omit to read file from stdin)")
parser.add_argument(
"-o", "--output",
metavar="FILENAME",
required=True,
dest="outfilepath_or_buffer",
help="the location to write the pyfolio tear sheet")
parser.set_defaults(func="quantrocket.zipline._cli_create_tearsheet")
examples = """
Trade a Zipline strategy.
Examples:
Trade a strategy defined in momentum-pipeline.py:
quantrocket zipline trade momentum-pipeline --bundle my-bundle
"""
parser = _subparsers.add_parser(
"trade",
help="trade a Zipline strategy",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"strategy",
metavar="CODE",
help="the strategy to run (strategy filename without extension)")
parser.add_argument(
"-b", "--bundle",
metavar="CODE",
help="the data bundle to use. If omitted, the default bundle "
"(if set) is used.")
parser.add_argument(
"-a", "--account",
help="the account to run the strategy in. Only required "
"if the strategy is allocated to more than one "
"account in quantrocket.zipline.allocations.yml")
parser.add_argument(
"-f", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="the data frequency to use. Possible choices: %(choices)s "
"(default is minute)")
parser.set_defaults(func="quantrocket.zipline._cli_trade")
examples = """
List actively trading Zipline strategies.
Examples:
List strategies:
quantrocket zipline active
"""
parser = _subparsers.add_parser(
"active",
help="list actively trading Zipline strategies",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func="quantrocket.zipline._cli_list_active_strategies")
examples = """
Cancel actively trading strategies.
Examples:
Cancel a single strategy:
quantrocket zipline cancel --strategies momentum-pipeline
Cancel all strategies:
quantrocket zipline cancel --all
"""
parser = _subparsers.add_parser(
"cancel",
help="cancel actively trading strategies",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-s", "--strategies",
nargs="*",
metavar="CODE",
help="limit to these strategies")
parser.add_argument(
"-a", "--accounts",
metavar="ACCOUNT",
nargs="*",
help="limit to these accounts")
parser.add_argument(
"--all",
action="store_true",
dest="cancel_all",
help="cancel all actively trading strategies")
parser.set_defaults(func="quantrocket.zipline._cli_cancel_strategies")
|
apache-2.0
|
JPPorcel/CCSA
|
P4/tareas/max/MaxMapper.java
|
863
|
package oldapi;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class MaxMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, DoubleWritable>
{
private static final int MISSING = -9999;
public static int col=5;
public void map(LongWritable key, Text value, OutputCollector<Text, DoubleWritable> output, Reporter reporter) throws IOException
{
String line = value.toString();
String[] parts = line.split(",");
output.collect(new Text("max"), new DoubleWritable(Double.parseDouble(parts[col])));
}
}
|
apache-2.0
|
mkeijzer/push-forth
|
push-forth/test/org/arg/test/TestIntegers.java
|
1238
|
package org.arg.test;
import org.arg.pushforth.dictionary.SymbolTable;
import org.arg.pushforth.instructions.Instruction;
import org.arg.pushforth.instructions.Numbers;
import org.arg.pushforth.program.Program;
import org.arg.pushforth.program.Programs;
import org.junit.Test;
public class TestIntegers {
static {
Numbers.load();
}
@Test
public void testDivByZero() {
Program prog = Programs.parse("[[0 2 /]]");
Exception exc = null;
try {
int x = 1/0;
} catch (ArithmeticException e) {
exc = e;
}
Program expected = Programs.list(Programs.list(), exc);
TestInstructions.test(prog, expected);
}
@Test
public void testRemByZero() {
Program prog = Programs.parse("[[0 2 %]]");
Exception exc = null;
try {
int x = 1/0;
} catch (ArithmeticException e) {
exc = e;
}
Program expected = Programs.list(Programs.list(), exc);
TestInstructions.test(prog, expected);
}
@Test
public void testDivNotByZero() {
Program prog = Programs.parse("[[2 0 /]]");
Program expected = Programs.list(Programs.list(), 0);
TestInstructions.test(prog, expected);
}
}
|
apache-2.0
|
gchq/stroom
|
stroom-ui/src/components/password/ResetPasswordRequest/index.ts
|
81
|
export { ResetPasswordRequestContainer } from "./ResetPasswordRequestContainer";
|
apache-2.0
|
TcmExtensions/TcmHttpUpload
|
TcmHttpUpload/Misc/TcmUri.cs
|
5995
|
#region Header
////////////////////////////////////////////////////////////////////////////////////
//
// File Description: TcmUri
// ---------------------------------------------------------------------------------
// Date Created : February 28, 2013
// Author : Rob van Oostenrijk
// ---------------------------------------------------------------------------------
//
// Based on https://code.google.com/p/tridion-2011-power-tools/source/browse/trunk/PowerTools.Model/Utils/TcmUri.cs?r=633
// Thanks to pkjaer.sdl
//
////////////////////////////////////////////////////////////////////////////////////
#endregion
using System;
using System.Text.RegularExpressions;
namespace TcmHttpUpload.Misc
{
/// <summary>
/// Initializes a new instance of the TcmUri class.
/// </summary>
public class TcmUri
{
private static readonly Regex mTcmRegEx = new Regex(@"tcm:(\d+)-(\d+)-?(\d*)-?v?(\d*)");
private int mItemId;
private int mItemType;
private int mPublicationId;
private int mVersion;
/// <summary>
/// <see cref="TcmUri" /> publication identifier
/// </summary>
/// <value>
/// Publication identifier
/// </value>
public int PublicationId
{
get
{
return mPublicationId;
}
set
{
mPublicationId = value;
}
}
/// <summary>
/// <see cref="TcmUri" /> item identifier
/// </summary>
/// <value>
/// Item identifier
/// </value>
public int ItemId
{
get
{
return mItemId;
}
set
{
mItemId = value;
}
}
/// <summary>
/// <see cref="TcmUri" /> item type
/// </summary>
/// <value>
/// Item type
/// </value>
public int ItemType
{
get
{
return mItemType;
}
set
{
mItemType = value;
}
}
/// <summary>
/// <see cref="TcmUri" /> item version
/// </summary>
/// <value>
/// Item version
/// </value>
public int Version
{
get
{
return mVersion;
}
set
{
mVersion = value;
}
}
/// <summary>
/// Initializes a new instance of the TcmUri class.
/// </summary>
/// <param name="uri">The string representation of the TCM URI.</param>
public TcmUri(String uri)
{
if (!Parse(uri, out mPublicationId, out mItemId, out mItemType, out mVersion))
throw new Exception(String.Format("Invalid TcmUri {0}", uri));
}
/// <summary>
/// Initializes a new instance of the TcmUri class.
/// </summary>
/// <param name="publicationId">The ID of the Publication the item belongs to.</param>
/// <param name="itemId">The ID of the item itself.</param>
/// <param name="itemType">The type of item (e.g. 16 for a Component)</param>
public TcmUri(int publicationId, int itemId, int itemType): this(publicationId, itemId, itemType, 0)
{
}
/// <summary>
/// Initializes a new instance of the TcmUri class.
/// </summary>
/// <param name="publicationId">The ID of the Publication the item belongs to.</param>
/// <param name="itemId">The ID of the item itself.</param>
/// <param name="itemType">The type of item (e.g. 16 for a Component)</param>
/// <param name="version">The specific version of the item to retrieve.</param>
public TcmUri(int publicationId, int itemId, int itemType, int version)
: this(String.Format("tcm:{0}-{1}-{2}-v{3}", publicationId, itemId, itemType, version))
{
}
/// <summary>
/// Returns the string representation of the TCM URI.
/// </summary>
public override String ToString()
{
if (Version > 0)
return String.Format("tcm:{0}-{1}-{2}-v{3}", mPublicationId, mItemId, mItemType, mVersion);
if (ItemType == 16)
return String.Format("tcm:{0}-{1}", mPublicationId, mItemId);
return String.Format("tcm:{0}-{1}-{2}", mPublicationId, mItemId, mItemType);
}
/// <summary>
/// Check if a given string is a valid TCM URI.
/// </summary>
/// <param name="uri">The string representation of a TCM URI (e.g. "tcm:2-255-32").</param>
/// <returns><code>true</code> if the string is valid as a TCM URI; <code>false</code> otherwise.</returns>
public static bool IsValid(String uri)
{
int publicationId;
int itemId;
int itemType;
int version;
return Parse(uri, out publicationId, out itemId, out itemType, out version);
}
/// <summary>
/// Returns the equivalent of a <code>null</code> value for a TCM URI.
/// </summary>
public static TcmUri UriNull
{
get
{
return new TcmUri("tcm:0-0-0");
}
}
/// <summary>
/// Converts the string representation of a TCM URI to integers representing the different parts of the URI.
/// </summary>
/// <param name="input">The string to parse.</param>
/// <param name="publicationId">The ID of the Publication the item belongs to.</param>
/// <param name="itemId">The ID of the item itself.</param>
/// <param name="itemType">The type of the item (e.g. 16 for a Component)</param>
/// <param name="version">The version of the item. Defaults to 0 which is the current version.</param>
/// <returns><code>true</code> if the parsing succeeded; <code>false</code> otherwise.</returns>
protected static bool Parse(String input, out int publicationId, out int itemId, out int itemType, out int version)
{
publicationId = 0;
itemId = 0;
itemType = 0;
version = 0;
if (String.IsNullOrEmpty(input))
return false;
Match m = mTcmRegEx.Match(input);
if (!m.Success)
return false;
try
{
publicationId = Convert.ToInt32(m.Groups[1].Value);
itemId = Convert.ToInt32(m.Groups[2].Value);
version = 0;
itemType = 16;
if (m.Groups.Count > 3)
{
itemType = Convert.ToInt32(m.Groups[3].Value);
if (m.Groups.Count > 4)
version = Convert.ToInt32(m.Groups[4].Value);
}
if (publicationId == 0 && itemId == 0 && itemType == 0 && version == 0)
return true;
return publicationId > -1 && itemId > 0 && itemType > 0 && version > -1;
}
catch (FormatException)
{
}
catch (OverflowException)
{
}
return false;
}
}
}
|
apache-2.0
|
adamralph/Workshop.Microservices
|
exercises/02-publish-subscribe/after/Divergent.Finance.API/Controllers/PricesController.cs
|
1059
|
using Divergent.Finance.Data.Context;
using System.Collections.Generic;
using System.Linq;
using System.Web.Http;
namespace Divergent.Finance.API.Controllers
{
[RoutePrefix("api/prices")]
public class PricingController : ApiController
{
[HttpGet, Route("orders/total")]
public IEnumerable<dynamic> GetOrdersTotal(string orderIds)
{
var orderIdList = orderIds.Split(',')
.Select(id => int.Parse(id))
.ToList();
using (var db = new FinanceContext())
{
return db.OrderItemPrices
.Where(orderItemPrice => orderIdList.Contains(orderItemPrice.OrderId))
.GroupBy(orderItemPrice => orderItemPrice.OrderId)
.Select(orderGroup => new
{
OrderId = orderGroup.Key,
Amount = orderGroup.Sum(orderItemPrice => orderItemPrice.ItemPrice),
})
.ToList();
}
}
}
}
|
apache-2.0
|
roberthafner/flowable-engine
|
modules/flowable-idm-api/src/main/java/org/activiti/idm/api/GroupQuery.java
|
2099
|
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.activiti.idm.api;
import java.util.List;
/**
* Allows to programmatically query for {@link Group}s.
*
* @author Joram Barrez
*/
public interface GroupQuery extends Query<GroupQuery, Group> {
/** Only select {@link Group}s with the given id. */
GroupQuery groupId(String groupId);
/** Only select {@link Group}s with the given ids. */
GroupQuery groupIds(List<String> groupIds);
/** Only select {@link Group}s with the given name. */
GroupQuery groupName(String groupName);
/**
* Only select {@link Group}s where the name matches the given parameter. The syntax to use is that of SQL, eg. %activiti%.
*/
GroupQuery groupNameLike(String groupNameLike);
/** Only select {@link Group}s which have the given type. */
GroupQuery groupType(String groupType);
/** Only selects {@link Group}s where the given user is a member of. */
GroupQuery groupMember(String groupMemberUserId);
/** Only selects {@link Group}s where the given users are a member of. */
GroupQuery groupMembers(List<String> groupMemberUserIds);
// sorting ////////////////////////////////////////////////////////
/**
* Order by group id (needs to be followed by {@link #asc()} or {@link #desc()}).
*/
GroupQuery orderByGroupId();
/**
* Order by group name (needs to be followed by {@link #asc()} or {@link #desc()}).
*/
GroupQuery orderByGroupName();
/**
* Order by group type (needs to be followed by {@link #asc()} or {@link #desc()}).
*/
GroupQuery orderByGroupType();
}
|
apache-2.0
|
dfalcone/wp-store
|
wp-store/wp-store-wsa/domain/VirtualItem.cs
|
2061
|
/// Copyright (C) 2012-2014 Soomla Inc.
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using SoomlaWpCore.data;
using SoomlaWpCore.util;
using SoomlaWpCore;
namespace SoomlaWpStore.domain
{
public abstract class VirtualItem : SoomlaEntity<VirtualItem>
{
public VirtualItem(String Name, String Description, String ItemId) : base(Name, Description, ItemId)
{
}
public VirtualItem(JSONObject jsonObject) : base(jsonObject)
{
}
/*public new JSONObject toJSONObject()
{
return base.toJSONObject();
}*/
public int give(int amount)
{
return give(amount, true);
}
public abstract int give(int amount, bool notify);
public int take(int amount)
{
return take(amount, true);
}
public abstract int take(int amount, bool notify);
public int resetBalance(int balance)
{
return resetBalance(balance, true);
}
public abstract int resetBalance(int balance, bool notify);
public String getItemId()
{
return mID;
}
public String getName()
{
return mName;
}
private const String TAG = "SOOMLA VirtualItem"; //used for Log messages
}
}
|
apache-2.0
|
kantega/respiro
|
plugins/core/mongodb-driver/src/main/java/org/kantega/respiro/mongodb/driver/DefaultMongoDBBuilder.java
|
3230
|
/*
* Copyright 2019 Kantega AS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kantega.respiro.mongodb.driver;
import com.mongodb.MongoClient;
import com.mongodb.MongoCredential;
import com.mongodb.ServerAddress;
import org.kantega.respiro.mongodb.MongoDBBuilder;
import org.kantega.respiro.mongodb.MongoDatabaseProvider;
import org.kantega.respiro.mongodb.MongoDatabaseProviderModifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
public class DefaultMongoDBBuilder implements MongoDBBuilder {
final Collection<MongoDatabaseProviderModifier> modifiers;
public DefaultMongoDBBuilder(Collection<MongoDatabaseProviderModifier> modifiers) {
this.modifiers = modifiers;
}
@Override
public Build mongodatabase(List<ServerAddress> serverAddresses) {
return new DefaultBuild(serverAddresses);
}
@Override
public Build mongodatabase(String addressList) {
String[] addresses = addressList.split(",");
List<ServerAddress> srvAddresses = new ArrayList<>();
for (String a : addresses) {
String address[] = a.split(":");
if (address.length < 2)
throw new RuntimeException(String.format("Server address cannot be split into host and port '%s'", a));
srvAddresses.add(new ServerAddress(address[0], Integer.valueOf(address[1])));
}
return mongodatabase(srvAddresses);
}
private class DefaultBuild implements MongoDBBuilder.Build {
private List<ServerAddress> serverAddresses = new ArrayList<>();
final List<MongoCredential> credentials = new ArrayList<>();
public DefaultBuild(List<ServerAddress> serverAddresses) {
this.serverAddresses = serverAddresses;
}
@Override
public Build auth(String username, String password, String database) {
credentials.add(MongoCredential.createScramSha1Credential(username, database, password.toCharArray()));
return this;
}
@Override
public MongoDatabaseProvider build() {
final MongoClient client =
credentials.isEmpty()
? new MongoClient(serverAddresses)
: new MongoClient(serverAddresses, credentials);
MongoDatabaseProvider mdp = client::getDatabase;
MongoDatabaseProvider modified = mdp;
for (MongoDatabaseProviderModifier modifier : modifiers) {
modified = modifier.modify(modified);
}
return modified;
}
}
}
|
apache-2.0
|
Seddryck/Lookum
|
Lookum.Framework/Lookup/BaseLookup.cs
|
3226
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace Lookum.Framework.Lookup
{
public abstract class BaseLookup<K, V> : ILookup
{
public bool IsLoaded { get; protected set; }
/// <summary>
/// Internal container to store in-memory the different keys/values to use in the lookup
/// </summary>
protected IDictionary<K, V> Map { get; private set; }
/// <summary>
/// Define the function to be executed when a key is requested but not found in the map.
/// </summary>
protected Func<K, V> NonMatchBehavior { get; set; }
protected BaseLookup()
{
IsLoaded = false;
Map = new Dictionary<K, V>();
NonMatchBehavior = delegate { return default(V); };
}
protected BaseLookup(bool throwException)
: this()
{
if (throwException)
NonMatchBehavior = ThrowException;
}
protected BaseLookup(V defaultValue)
: this()
{
NonMatchBehavior = delegate {return defaultValue;};
}
protected BaseLookup(Func<K, V> nonMatchBehavior)
: this()
{
NonMatchBehavior = nonMatchBehavior;
}
private V ThrowException(K id)
{
var msg = String.Format("The key '{0}' has not been found during the execution of the lookup filled with {1} values.", id, Map.Keys.Count);
throw new KeyNotFoundException(msg);
}
/// <summary>
/// Return the value associated to the key. If the key is not found apply the behavior specified in NonMatchBehavior
/// </summary>
/// <param name="id">The key (guid) that you're looking to translate to a value (string)</param>
/// <returns>The translated value. IF the key has not been found and the behaviour is set to return a default value then the default value will returned.</returns>
public virtual void Load()
{
if (IsLoaded)
throw new InvalidOperationException("The lookup has already been loaded. If you want to reload it, you should clear it first.");
OnLoad();
IsLoaded = true;
}
protected abstract void OnLoad();
/// <summary>
/// Return the value associated to the key. If the key is not found apply the behavior specified in NonMatchBehavior
/// </summary>
/// <param name="id">The key (guid) that you're looking to translate to a value (string)</param>
/// <returns>The translated value. IF the key has not been found and the behaviour is set to return a default value then the default value will returned.</returns>
public virtual V Match(K id)
{
if (!IsLoaded)
throw new NotLoadedLookupException();
if (Map.ContainsKey(id))
return Map[id];
return NonMatchBehavior(id);
}
public int Count()
{
return Map.Count;
}
public void Clear()
{
IsLoaded = false;
Map.Clear();
}
}
}
|
apache-2.0
|
googleads/googleads-php-lib
|
examples/AdManager/v202111/CreativeService/GetImageCreatives.php
|
3532
|
<?php
/**
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Google\AdsApi\Examples\AdManager\v202111\CreativeService;
require __DIR__ . '/../../../../vendor/autoload.php';
use Google\AdsApi\AdManager\AdManagerSession;
use Google\AdsApi\AdManager\AdManagerSessionBuilder;
use Google\AdsApi\AdManager\Util\v202111\StatementBuilder;
use Google\AdsApi\AdManager\v202111\ServiceFactory;
use Google\AdsApi\Common\OAuth2TokenBuilder;
/**
* This example gets all image creatives.
*
* <p>It is meant to be run from a command line (not as a webpage) and requires
* that you've setup an `adsapi_php.ini` file in your home directory with your
* API credentials and settings. See README.md for more info.
*/
class GetImageCreatives
{
public static function runExample(
ServiceFactory $serviceFactory,
AdManagerSession $session
) {
$creativeService = $serviceFactory->createCreativeService($session);
// Create a statement to select creatives.
$pageSize = StatementBuilder::SUGGESTED_PAGE_LIMIT;
$statementBuilder = (new StatementBuilder())
->where('creativeType = :creativeType')
->orderBy('id ASC')
->limit($pageSize)
->withBindVariableValue('creativeType', 'ImageCreative');
// Retrieve a small amount of creatives at a time, paging
// through until all creatives have been retrieved.
$totalResultSetSize = 0;
do {
$page = $creativeService->getCreativesByStatement(
$statementBuilder->toStatement()
);
// Print out some information for each creative.
if ($page->getResults() !== null) {
$totalResultSetSize = $page->getTotalResultSetSize();
$i = $page->getStartIndex();
foreach ($page->getResults() as $creative) {
printf(
"%d) Creative with ID %d and name '%s' was found.%s",
$i++,
$creative->getId(),
$creative->getName(),
PHP_EOL
);
}
}
$statementBuilder->increaseOffsetBy($pageSize);
} while ($statementBuilder->getOffset() < $totalResultSetSize);
printf("Number of results found: %d%s", $totalResultSetSize, PHP_EOL);
}
public static function main()
{
// Generate a refreshable OAuth2 credential for authentication.
$oAuth2Credential = (new OAuth2TokenBuilder())->fromFile()
->build();
// Construct an API session configured from an `adsapi_php.ini` file
// and the OAuth2 credentials above.
$session = (new AdManagerSessionBuilder())->fromFile()
->withOAuth2Credential($oAuth2Credential)
->build();
self::runExample(new ServiceFactory(), $session);
}
}
GetImageCreatives::main();
|
apache-2.0
|
revelup/salt-preseed
|
src/main.go
|
1213
|
package main
import (
"fmt"
"log"
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/revelup/httpserver"
"github.com/streamrail/concurrent-map"
)
const (
listenAddress string = "127.0.0.1:8080"
rateLimit int = 3
)
var ipPerMinute cmap.ConcurrentMap
func init() {
ipPerMinute = cmap.New()
}
func validateIP(clientIP string) bool {
keyPart := time.Now().Local().Format("2006-01-02T15:04")
key := fmt.Sprintf("%v:%v", keyPart, clientIP)
if val, ok := ipPerMinute.Get(key); ok {
count := val.(int) + 1
ipPerMinute.Set(key, count)
return count <= rateLimit
}
ipPerMinute.Set(key, 1)
return true
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/robots.txt", httpserver.NoRobotsHandler).Methods("GET")
r.HandleFunc("/", httpserver.EmptyReponseHandler).Methods("GET")
http.Handle("/", httpserver.RateLimit(httpserver.PanicRecoveryMiddleware(httpserver.LogMiddleware(r)), validateIP))
s := &http.Server{
Addr: listenAddress,
Handler: nil,
ReadTimeout: 1000 * time.Second,
WriteTimeout: 1000 * time.Second,
MaxHeaderBytes: 1 << 20,
}
log.Printf("listen: %s", listenAddress)
err := s.ListenAndServe()
if err != nil {
log.Fatal(err)
}
}
|
apache-2.0
|
GoogleCloudPlatform/python-docs-samples
|
cloud-sql/mysql/client-side-encryption/snippets/cloud_kms_env_aead.py
|
1521
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START cloud_sql_mysql_cse_key]
import logging
import tink
from tink import aead
from tink.integration import gcpkms
logger = logging.getLogger(__name__)
def init_tink_env_aead(
key_uri: str,
credentials: str) -> tink.aead.KmsEnvelopeAead:
aead.register()
try:
gcp_client = gcpkms.GcpKmsClient(key_uri, credentials)
gcp_aead = gcp_client.get_aead(key_uri)
except tink.TinkError as e:
logger.error("Error initializing GCP client: %s", e)
raise e
# Create envelope AEAD primitive using AES256 GCM for encrypting the data
# This key should only be used for client-side encryption to ensure authenticity and integrity
# of data.
key_template = aead.aead_key_templates.AES256_GCM
env_aead = aead.KmsEnvelopeAead(key_template, gcp_aead)
print(f"Created envelope AEAD Primitive using KMS URI: {key_uri}")
return env_aead
# [END cloud_sql_mysql_cse_key]
|
apache-2.0
|
apache/jsecurity
|
src/org/jsecurity/session/SessionException.java
|
3552
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jsecurity.session;
import org.jsecurity.JSecurityException;
import java.io.Serializable;
/**
* General security exception attributed to problems during interaction with the system during
* a session.
*
* @author Les Hazlewood
* @since 0.1
*/
public class SessionException extends JSecurityException {
private Serializable sessionId;
/**
* Creates a new SessionException.
*/
public SessionException() {
super();
}
/**
* Constructs a new SessionException.
*
* @param message the reason for the exception
*/
public SessionException(String message) {
super(message);
}
/**
* Constructs a new SessionException.
*
* @param cause the underlying Throwable that caused this exception to be thrown.
*/
public SessionException(Throwable cause) {
super(cause);
}
/**
* Constructs a new SessionException.
*
* @param message the reason for the exception
* @param cause the underlying Throwable that caused this exception to be thrown.
*/
public SessionException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new SessionException.
*
* @param sessionId the session id of associated {@link Session Session}.
*/
public SessionException(Serializable sessionId) {
setSessionId(sessionId);
}
/**
* Constructs a new SessionException.
*
* @param message the reason for the exception
* @param sessionId the session id of associated {@link Session Session}.
*/
public SessionException(String message, Serializable sessionId) {
this(message);
setSessionId(sessionId);
}
/**
* Constructs a new InvalidSessionException.
*
* @param message the reason for the exception
* @param cause the underlying Throwable that caused this exception to be thrown.
* @param sessionId the session id of associated {@link Session Session}.
*/
public SessionException(String message, Throwable cause, Serializable sessionId) {
this(message, cause);
setSessionId(sessionId);
}
/**
* Returns the session id of the associated <tt>Session</tt>.
*
* @return the session id of the associated <tt>Session</tt>.
*/
public Serializable getSessionId() {
return sessionId;
}
/**
* Sets the session id of the <tt>Session</tt> associated with this exception.
*
* @param sessionId the session id of the <tt>Session</tt> associated with this exception.
*/
public void setSessionId(Serializable sessionId) {
this.sessionId = sessionId;
}
}
|
apache-2.0
|
sqrrrl/google-api-nodejs-client
|
src/apis/bigquerydatatransfer/index.ts
|
1309
|
// Copyright 2018, Google, LLC.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! THIS FILE IS AUTO-GENERATED */
import {getAPI, GoogleConfigurable} from 'googleapis-common';
import {bigquerydatatransfer_v1} from './v1';
export const VERSIONS = {
'v1': bigquerydatatransfer_v1.Bigquerydatatransfer,
};
export function bigquerydatatransfer(version: 'v1'):
bigquerydatatransfer_v1.Bigquerydatatransfer;
export function bigquerydatatransfer(options: bigquerydatatransfer_v1.Options):
bigquerydatatransfer_v1.Bigquerydatatransfer;
export function
bigquerydatatransfer<T = bigquerydatatransfer_v1.Bigquerydatatransfer>(
this: GoogleConfigurable,
versionOrOptions: 'v1'|bigquerydatatransfer_v1.Options) {
return getAPI<T>('bigquerydatatransfer', versionOrOptions, VERSIONS, this);
}
|
apache-2.0
|
liuyf8688/demos
|
actor-akka-demos/src/main/java/com/liuyf/demo/actor/akka/tutorials/actor/lifecycle/StartStopActor2.java
|
513
|
package com.liuyf.demo.actor.akka.tutorials.actor.lifecycle;
import akka.actor.AbstractActor;
public class StartStopActor2 extends AbstractActor {
@Override
public void preStart() throws Exception {
System.out.println("second started");
}
@Override
public void postStop() throws Exception {
System.out.println("second stopped");
}
@Override
public Receive createReceive() {
return receiveBuilder()
.build();
}
}
|
apache-2.0
|
depositolegale/bookdeposit-desktop
|
main.js
|
1118
|
const electron = require("electron");
const app = electron.app;
const BrowserWindow = electron.BrowserWindow;
const ipcMain = electron.ipcMain;
const path = require("path");
const url = require("url");
const fs = require("fs");
let mainWindow;
var mkdirOutputDirectory = function() {
dir = path.join(app.getPath("desktop"), "BNCF-Bookdeposit");
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir);
}
};
var reload = function() {
ipcMain.on("reload", (event, arg) => {
mainWindow.webContents.reload();
});
};
function createWindow() {
mkdirOutputDirectory();
mainWindow = new BrowserWindow({ width: 600, height: 800 });
mainWindow.loadURL(
url.format({
pathname: path.join(__dirname, "index.html"),
protocol: "file:",
slashes: true
})
);
mainWindow.on("closed", function() {
mainWindow = null;
});
}
app.on("ready", function() {
createWindow();
reload();
});
app.on("window-all-closed", function() {
if (process.platform !== "darwin") {
app.quit();
}
});
app.on("activate", function() {
if (mainWindow === null) {
createWindow();
}
});
|
apache-2.0
|
jt120/my-cabin
|
concurrency/src/main/java/syn/TestSync.java
|
224
|
package syn;
/**
* @author ze.liu
* @since 2014/5/20
*/
public class TestSync {
public static void main(String[] args) {
System.out.println(Fruit.State.START.compareTo(Fruit.State.END));
}
}
|
apache-2.0
|
agentmedia/phine-core
|
src/Core/Modules/Backend/PageForm.php
|
24387
|
<?php
namespace Phine\Bundles\Core\Modules\Backend;
use Phine\Bundles\Core\Logic\Module\BackendForm;
use Phine\Framework\System\Http\Request;
use Phine\Framework\FormElements\Fields\Input;
use App\Phine\Database\Core\Site;
use App\Phine\Database\Core\Page;
use Phine\Framework\Validation\DatabaseCount;
use Phine\Framework\System\Http\Response;
use Phine\Bundles\Core\Logic\Routing\BackendRouter;
use Phine\Bundles\Core\Logic\Tree\TreeBuilder;
use Phine\Bundles\Core\Logic\Tree\PageTreeProvider;
use App\Phine\Database\Access;
use App\Phine\Database\Core\Layout;
use Phine\Framework\FormElements\Fields\Select;
use Phine\Framework\System\IO\Path;
use Phine\Framework\System\IO\File;
use Phine\Bundles\Core\Logic\Routing\Rewriter;
use Phine\Framework\Webserver\Apache\Htaccess\Writer;
use Phine\Bundles\Core\Logic\DBEnums\MenuAccess;
use Phine\Bundles\Core\Logic\Access\Backend\GroupFinder;
use Phine\Bundles\Core\Logic\Access\Backend\RightsFinder;
use App\Phine\Database\Core\Usergroup;
use Phine\Bundles\Core\Snippets\BackendRights\PageRights;
use Phine\Bundles\Core\Logic\Util\DBSelectUtil;
use Phine\Bundles\Core\Logic\Access\Backend\Enums\BackendAction;
use App\Phine\Database\Core\PageMembergroup;
use Phine\Bundles\Core\Logic\Util\MembergroupUtil;
use App\Phine\Database\Core\Membergroup;
use Phine\Framework\FormElements\Fields\Checkbox;
use Phine\Framework\System\Date;
use Phine\Framework\Sitemap\Enums\ChangeFrequency;
use Phine\Bundles\Core\Logic\Logging\Logger;
use Phine\Bundles\Core\Logic\Logging\Enums\Action;
use Phine\Bundles\Core\Snippets\FormParts\PageUrlSelector;
use Phine\Bundles\Core\Logic\DBEnums\PageType;
use Phine\Bundles\Core\Logic\Routing\FrontendRouter;
use App\Phine\Database\Core\Area;
use App\Phine\Database\Core\PageContent;
/**
* The page form
*/
class PageForm extends BackendForm
{
/**
* The site currently edited
* @var Site
*/
private $site;
/**
* The edited page
* @var Page
*/
private $page;
/**
* The parent page
* @var Page
*/
private $parent;
/**
* The previous page
* @var Page
*/
private $previous;
/**
* The page rights snippet
* @var PageRights
*/
protected $pageRights;
/**
* The localized date format used for the date pickers
* @var string
*/
private $dateFormat;
/**
* True if member groups exist
* @var boolean
*/
protected $hasMemberGroups;
/**
* A page url selector for redirect pages
* @var
*/
protected $selector;
/**
* Initializes the form
* @return boolean
*/
protected function Init()
{
$this->page = new Page(Request::GetData('page'));
$this->parent = Page::Schema()->ByID(Request::GetData('parent'));
$this->previous = Page::Schema()->ByID(Request::GetData('previous'));
$this->site = $this->page->Exists() ? $this->page->GetSite() :
Site::Schema()->ByID(Request::GetData('site'));
$this->dateFormat = Trans('Core.DateFormat');
$this->InitPageRights();
if (!$this->page->Exists() && !$this->site->Exists())
{
Response::Redirect(BackendRouter::ModuleUrl(new SiteList()));
return true;
}
$this->AddLayoutField();
$this->AddNameField();
$this->AddUrlField();
$this->AddTitleField();
$this->AddDescriptionField();
$this->AddKeywordsField();
$this->AddMenuAccessField();
$this->AddUserGroupField();
$this->AddGuestsOnlyField();
$this->AddMemberGroupField();
$this->AddPublishField();
$this->AddSubmit();
$this->AddPublishFromDateField();
$this->AddPublishFromHourField();
$this->AddPublishFromMinuteField();
$this->AddPublishToDateField();
$this->AddPublishToHourField();
$this->AddPublishToMinuteField();
$this->AddSitemapRelevanceField();
$this->AddSitemapChangeFrequencyField();
$this->AddTypeField();
$this->AddRedirectTargetSelector();
return parent::Init();
}
private function AddGuestsOnlyField()
{
$name = 'GuestsOnly';
$field = new Checkbox($name, '1', (bool)$this->page->GetGuestsOnly());
$this->AddField($field);
}
private function AddMemberGroupField()
{
$name = 'MemberGroup';
$field = MembergroupUtil::PageCheckList($name, $this->page);
$this->hasMemberGroups = count($field->GetOptions()) > 0;
if ($this->hasMemberGroups)
{
$field->SetHtmlAttribute('id', $name);
$this->AddField($field);
}
}
/**
* Finds the parent group
* @return Usergroup
*/
private function FindParentGroup()
{
$parentGroup = null;
if ($this->parent)
{
$parentGroup = GroupFinder::FindPageGroup($this->parent);
}
if (!$parentGroup)
{
$parentGroup = $this->site->GetUserGroup();
}
return $parentGroup;
}
/**
* Adds the user group field
*/
private function AddUserGroupField()
{
$name = 'UserGroup';
$field = new Select($name, '');
$parentGroup = $this->FindParentGroup();
$inheritText = $parentGroup ? Trans('Core.PageForm.UserGroup.Inherit_{0}', $parentGroup->GetName()) :
Trans('Core.PageForm.UserGroup.Inherit');
$field->AddOption('', $inheritText);
if ($this->page->Exists() && $this->page->GetUserGroup())
{
$field->SetValue($this->page->GetUserGroup()->GetID());
}
DBSelectUtil::AddUserGroupOptions($field);
$this->AddField($field);
}
/**
*
*
* @return PageRights
*/
protected function InitPageRights()
{
$rights = $this->page->Exists() ? $this->page->GetUserGroupRights() : null;
$parentRights = null;
if ($this->parent)
{
$parentRights = RightsFinder::FindPageRights($this->parent);
}
if (!$parentRights)
{
$siteRights = $this->site->GetUserGroupRights();
if ($siteRights)
{
$parentRights = $this->site->GetUserGroupRights()->GetPageRights();
}
}
$this->pageRights = new PageRights($parentRights, $rights);
}
/**
* Adds name field to the form
*/
private function AddNameField()
{
$name = 'Name';
$this->AddField(Input::Text($name, $this->page->GetName()));
$this->SetRequired($name);
$this->AddValidator($name, DatabaseCount::UniqueFieldAnd($this->page, $name, $this->SiteCondition()));
}
private function SiteCondition()
{
$sql = Access::SqlBuilder();
$tbl = Page::Schema()->Table();
return $sql->Equals($tbl->Field('Site'), $sql->Value($this->site->GetID()));
}
/**
* Adds the url field to the form
*/
private function AddUrlField()
{
$name = 'Url';
$this->AddField(Input::Text($name, $this->page->GetUrl()));
$this->SetRequired($name);
$this->AddValidator($name, DatabaseCount::UniqueFieldAnd($this->page, $name, $this->SiteCondition()));
}
/**
* Adds the title field to the form
*/
private function AddTitleField()
{
$name = 'Title';
$this->AddField(Input::Text($name, $this->page->GetTitle()));
}
/**
* Adds the description field to the form
*/
private function AddDescriptionField()
{
$name = 'Description';
$this->AddField(Input::Text($name, $this->page->GetDescription()));
}
/**
* Adds the keywords field to the form
*/
private function AddKeywordsField()
{
$name = 'Keywords';
$this->AddField(Input::Text($name, $this->page->GetKeywords()));
}
/**
* Adds the layout field to the form
*/
private function AddLayoutField()
{
$name = 'Layout';
$select = new Select($name);
if ($this->page->Exists())
{
$select->SetValue($this->page->GetLayout()->GetID());
}
$select->AddOption('', Trans('Core.PleaseSelect'));
$sql = Access::SqlBuilder();
$tbl = Layout::Schema()->Table();
$order = $sql->OrderList($sql->OrderAsc($tbl->Field('Name')));
$layouts = Layout::Schema()->Fetch(false, null, $order);
foreach ($layouts as $layout)
{
$select->AddOption($layout->GetID(), $layout->GetName());
}
$this->AddField($select);
$this->SetRequired($name);
}
/**
* Adds the menu access select
*/
private function AddMenuAccessField()
{
$name = 'MenuAccess';
$value = $this->page->Exists() ? $this->page->GetMenuAccess() : (string)MenuAccess::Authorized();
$select = new Select($name, $value);
foreach (MenuAccess::AllowedValues() as $access)
{
$select->AddOption($access, Trans("Core.PageForm.$name.$access"));
}
$this->AddField($select);
$this->SetRequired($name);
}
/**
* Adds the publish check box
*/
private function AddPublishField()
{
$name = 'Publish';
$field = new Checkbox($name, '1', (bool)$this->page->GetPublish());
$this->AddField($field);
}
/**
* Adds the publish from date field
*/
private function AddPublishFromDateField()
{
$name = 'PublishFromDate';
$from = $this->page->GetPublishFrom();
$field = Input::Text($name, $from ? $from->ToString($this->dateFormat) : '');
$field->SetHtmlAttribute('data-type', 'date');
$this->AddField($field);
}
/**
* Adds the publish from hour field
*/
private function AddPublishFromHourField()
{
$name = 'PublishFromHour';
$from = $this->page->GetPublishFrom();
$field = Input::Text($name, $from ? $from->ToString('H') : '');
$field->SetHtmlAttribute('data-type', 'hour');
$this->AddField($field);
}
/**
* Adds the publish from minute field
*/
private function AddPublishFromMinuteField()
{
$name = 'PublishFromMinute';
$from = $this->page->GetPublishFrom();
$field = Input::Text($name, $from ? $from->ToString('i') : '');
$field->SetHtmlAttribute('data-type', 'minute');
$this->AddField($field);
}
/**
* Adds the publish to date field
*/
private function AddPublishToDateField()
{
$name = 'PublishToDate';
$to = $this->page->GetPublishTo();
$field = Input::Text($name, $to ? $to->ToString($this->dateFormat) : '');
$field->SetHtmlAttribute('data-type', 'date');
$this->AddField($field);
}
/**
* Adds the publish to hour field
*/
private function AddPublishToHourField()
{
$name = 'PublishToHour';
$to = $this->page->GetPublishTo();
$field = Input::Text($name, $to ? $to->ToString('H') : '');
$field->SetHtmlAttribute('data-type', 'hour');
$this->AddField($field);
}
private function AddPublishToMinuteField()
{
$name = 'PublishToMinute';
$to = $this->page->GetPublishTo();
$field = Input::Text($name, $to ? $to->ToString('i') : '');
$field->SetHtmlAttribute('data-type', 'minute');
$this->AddField($field);
}
/**
* Adds the sitemap relevance field
*/
private function AddSitemapRelevanceField()
{
$name = 'SitemapRelevance';
$value = $this->page->Exists() ? 10* $this->page->GetSitemapRelevance() : 7;
$field = new Select($name, $value);
for ($val = 0; $val <= 10; ++$val)
{
$decSep = Trans('Core.DecimalSeparator');
$thousSep = Trans('Core.ThousandsSeparator');
$text = number_format($val / 10, 1, $decSep, $thousSep);
$field->AddOption($val, $text);
}
$this->AddField($field);
}
/**
* Adds the sitemap change frequency field
*/
private function AddSitemapChangeFrequencyField()
{
$name = 'SitemapChangeFrequency';
$value = $this->page->Exists() ? $this->page->GetSitemapChangeFrequency() :
(string)ChangeFrequency::Weekly();
$field = new Select($name, $value);
$values = ChangeFrequency::AllowedValues();
foreach ($values as $val)
{
$field->AddOption($val, Trans('Core.Sitemap.ChangeFrequency.' . ucfirst($val)));
}
$this->AddField($field);
}
private function AddTypeField()
{
$name = 'Type';
$value = $this->page->Exists() ? $this->page->GetType() : (string)PageType::Normal();
$field = new Select($name, $value);
$types = PageType::AllowedValues();
$ex404 = FrontendRouter::Page404($this->site);
foreach ($types as $type)
{
if ($type == (string)PageType::NotFound() &&
$ex404 && !$ex404->Equals($this->page))
{
continue;
}
$field->AddOption($type, Trans('Core.PageForm.Type.' . ucfirst($type)));
}
$this->AddField($field);
}
private function AddRedirectTargetSelector()
{
$name = 'RedirectTarget';
$this->selector = new PageUrlSelector($name, Trans($this->Label($name)), $this->page->GetRedirectTarget());
if ($this->page->Exists())
{
$this->selector->DisablePage($this->page);
}
if ($this->Value('Type') == (string)PageType::RedirectPermanent() ||
$this->Value('Type') == (string)PageType::RedirectTemporary())
{
$this->selector->SetRequired($this->ErrorPrefix($name));
}
$this->Elements()->AddElement($name, $this->selector);
}
/**
* Saves the page
*/
protected function OnSuccess()
{
$prevLayout = $this->page->GetLayout();
$this->page->SetName($this->Value('Name'));
$this->page->SetUrl($this->Value('Url'));
$this->page->SetSite($this->site);
$this->page->SetTitle($this->Value('Title'));
$this->page->SetDescription($this->Value('Description'));
$this->page->SetKeywords($this->Value('Keywords'));
$newLayout = new Layout($this->Value('Layout'));
$this->page->SetLayout($newLayout);
$this->page->SetMenuAccess($this->Value('MenuAccess'));
$this->page->SetGuestsOnly((bool)$this->Value('GuestsOnly'));
$this->page->SetPublish((bool)$this->Value('Publish'));
$this->page->SetPublishFrom($this->PublishDate('PublishFrom'));
$this->page->SetPublishTo($this->PublishDate('PublishTo'));
$relevance = (float)$this->Value('SitemapRelevance') / 10;
$this->page->SetSitemapRelevance(min(max(0.0, $relevance), 1.0));
$this->page->SetSitemapChangeFrequency($this->Value('SitemapChangeFrequency'));
$this->SaveType();
$action = Action::Update();
if (!$this->page->Exists())
{
$action = Action::Create();
$this->SaveNew();
}
else
{
$this->ReassignContents($prevLayout, $newLayout);
$this->page->Save();
}
$logger = new Logger(self::Guard()->GetUser());
$logger->ReportPageAction($this->page, $action);
if ($this->CanAssignGroup())
{
$this->SaveRights();
}
$this->SaveMemberGroups();
$this->AdjustHtaccess();
Response::Redirect($this->BackLink());
}
/**
* Reassigns contents by area names if layout was changed
* @param Layout $prevLayout The old layout
* @param Layout $newLayout The new layout
*/
private function ReassignContents(Layout $prevLayout, Layout $newLayout)
{
if ($prevLayout->Equals($newLayout)) {
return;
}
$oldAreas = Area::Schema()->FetchByLayout(false, $prevLayout);
foreach ($oldAreas as $oldArea)
{
$newArea = $this->FindNewArea($oldArea, $newLayout);
if ($newArea) {
$this->TransferArea($oldArea, $newArea);
}
else{
$this->ClearArea($oldArea);
}
}
}
private function TransferArea(Area $oldArea, Area $newArea) {
$sql = Access::SqlBuilder();
$tblPageContent = PageContent::Schema()->Table();
$setList = $sql->SetList('Area', $sql->Value($newArea->GetID()));
$condition = $sql->Equals($tblPageContent->Field('Area'), $sql->Value($oldArea->GetID()))
->And_($sql->Equals($tblPageContent->Field('Page'), $sql->Value($this->page->GetID())));
$update = $sql->Update(PageContent::Schema()->Table(), $setList, $condition);
Access::Connection()->ExecuteQuery($update);
}
private function ClearArea(Area $oldArea) {
$sql = Access::SqlBuilder();
$tblPageContent = PageContent::Schema()->Table();
$condition = $sql->Equals($tblPageContent->Field('Area'), $sql->Value($oldArea->GetID()))
->And_($sql->Equals($tblPageContent->Field('Page'), $sql->Value($this->page->GetID())));
PageContent::Schema()->Delete($condition);
}
private function FindNewArea(Area $oldArea, Layout $newLayout) {
$sql = Access::SqlBuilder();
$tblArea = Area::Schema()->Table();
$where = $sql->Equals($tblArea->Field('Layout'), $sql->Value($newLayout->GetID()))
->And_($sql->Equals($tblArea->Field('Name'), $sql->Value($oldArea->GetName())));
return Area::Schema()->First($where);
}
private function SaveType()
{
$type = PageType::ByValue($this->Value('Type'));
$this->page->SetType((string)$type);
$target = $this->page->GetRedirectTarget();
switch ($type)
{
case PageType::Normal():
case PageType::NotFound():
if ($target)
{
$this->page->SetRedirectTarget(null);
$target->Delete();
}
break;
case PageType::RedirectPermanent():
case PageType::RedirectTemporary():
$this->page->SetRedirectTarget($this->selector->Save($target));
}
}
/**
* Gets a publishing date
* @param string $baseName The base name; 'PublishFrom' or 'PublishTo'
* @return Date Returns the date
*/
private function PublishDate($baseName)
{
if (!$this->page->GetPublish())
{
return null;
}
$strDate = $this->Value($baseName . 'Date');
if (!$strDate)
{
return null;
}
$date = \DateTime::createFromFormat($this->dateFormat, $strDate);
$date->setTime((int)$this->Value($baseName . 'Hour'), (int)$this->Value($baseName . 'Minute'), 0);
return Date::FromDateTime($date);
}
/**
* True if user group can be assigned
* @return bool
*/
protected function CanAssignGroup()
{
return self::Guard()->Allow(BackendAction::AssignGroups(), $this->page);
}
/**
* Saves the group and right settings
*/
private function SaveRights()
{
$groupID = $this->Value('UserGroup');
$userGroup = Usergroup::Schema()->ByID($groupID);
$this->page->SetUserGroup($userGroup);
if (!$userGroup)
{
$oldRights = $this->page->GetUserGroupRights();
if ($oldRights)
{
$oldRights->GetContentRights()->Delete();
}
$this->page->SetUserGroupRights(null);
}
else
{
$this->pageRights->Save();
$this->page->SetUserGroupRights($this->pageRights->Rights());
}
$this->page->Save();
}
/**
* Saves the member groups
*/
private function SaveMemberGroups()
{
$selectedIDs = Request::PostArray('MemberGroup');
if ($this->page->GetGuestsOnly())
{
$selectedIDs = array();
}
$exIDs = Membergroup::GetKeyList(MembergroupUtil::PageMembergroups($this->page));
$this->DeleteOldMemberGroups($selectedIDs);
$this->SaveNewMemberGroups($selectedIDs, $exIDs);
}
/**
* Deletes the old member groups
* @param array $selectedIDs The selected member ids
*/
private function DeleteOldMemberGroups(array $selectedIDs)
{
$sql = Access::SqlBuilder();
$tblPgGrp = PageMembergroup::Schema()->Table();
$where = $sql->Equals($tblPgGrp->Field('Page'), $sql->Value($this->page->GetID()));
if (count($selectedIDs))
{
$inSelected = $sql->InListFromValues($selectedIDs);
$where = $where->And_($sql->NotIn($tblPgGrp->Field('MemberGroup'), $inSelected));
}
PageMembergroup::Schema()->Delete($where);
}
/**
* Saves page member groups not already assigned
* @param array $selectedIDs The selected member group ids
* @param array $exIDs The already assigned membergroup ids
*/
private function SaveNewMemberGroups(array $selectedIDs, array $exIDs)
{
foreach ($selectedIDs as $selID)
{
if (!in_array($selID, $exIDs))
{
$pgGrp = new PageMembergroup();
$pgGrp->SetPage($this->page);
$pgGrp->SetMemberGroup(new Membergroup($selID));
$pgGrp->Save();
}
}
}
/**
* Takes care of page tree insertion important for a fresh page
*/
private function SaveNew()
{
$treeBuilder = new TreeBuilder(new PageTreeProvider($this->site));
$treeBuilder->Insert($this->page, $this->parent, $this->previous);
}
/**
* Adds necessary rewrite commands
*/
private function AdjustHtaccess()
{
$file = Path::Combine(PHINE_PATH, 'Public/.htaccess');
if (!File::Exists($file))
{
throw new \Exception('HTACCESS FILE $file NOT FOUND');
}
$writer = new Writer();
$rewriter = new Rewriter($writer);
$text = File::GetContents($file);
$startPos = strpos($text, (string)$rewriter->PageStartComment($this->page));
$endPos = false;
$pageFound = false;
if ($startPos === false)
{
$startPos = strpos($text, (string)$rewriter->EndComment());
$endPos = $startPos;
}
else
{
$endPos = strpos($text, (string)$rewriter->PageEndComment($this->page));
if ($endPos !== false)
{
$pageFound = true;
$endPos += strlen((string)$rewriter->PageEndComment($this->page));
}
}
if ($startPos === false || $endPos === false)
{
throw new \Exception("HTACCESS COMMANDS NOT FOUND");
}
$rewriter->AddPageCommands($this->page);
$newText = substr($text, 0, $startPos) . $writer->ToString() . substr($text, $endPos);
File::CreateWithText($file, $newText);
}
/**
* The link for the back button
* @return string Returns the url to the page tree
*/
protected function BackLink()
{
$params = array('site'=>$this->site->GetID());
if ($this->page->Exists())
{
$params['selected'] = $this->page->GetID();
}
else if ($this->previous)
{
$params['selected'] = $this->previous->GetID();
}
else if ($this->parent)
{
$params['selected'] = $this->parent->GetID();
}
return BackendRouter::ModuleUrl(new PageTree(), $params);
}
}
|
apache-2.0
|
IntelliTect/Coalesce
|
src/Coalesce.Web/Controllers/Generated/StandaloneReadonlyController.g.cs
|
694
|
using IntelliTect.Coalesce.Knockout.Controllers;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Mvc;
using Microsoft.AspNetCore.Hosting;
namespace Coalesce.Web.Controllers
{
[Authorize]
public partial class StandaloneReadonlyController : BaseViewController<Coalesce.Domain.StandaloneReadonly>
{
[Authorize]
public ActionResult Cards()
{
return IndexImplementation(false, @"~/Views/Generated/StandaloneReadonly/Cards.cshtml");
}
[Authorize]
public ActionResult Table()
{
return IndexImplementation(false, @"~/Views/Generated/StandaloneReadonly/Table.cshtml");
}
}
}
|
apache-2.0
|
lqbweb/logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/db/jdbc/FactoryMethodConnectionSource.java
|
6322
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.appender.db.jdbc;
import java.io.PrintWriter;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.core.util.Loader;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.Strings;
/**
* A {@link JdbcAppender} connection source that uses a public static factory method to obtain a {@link Connection} or
* {@link DataSource}.
*/
@Plugin(name = "ConnectionFactory", category = "Core", elementType = "connectionSource", printObject = true)
public final class FactoryMethodConnectionSource implements ConnectionSource {
private static final Logger LOGGER = StatusLogger.getLogger();
private final DataSource dataSource;
private final String description;
private FactoryMethodConnectionSource(final DataSource dataSource, final String className, final String methodName,
final String returnType) {
this.dataSource = dataSource;
this.description = "factory{ public static " + returnType + ' ' + className + '.' + methodName + "() }";
}
@Override
public Connection getConnection() throws SQLException {
return this.dataSource.getConnection();
}
@Override
public String toString() {
return this.description;
}
/**
* Factory method for creating a connection source within the plugin manager.
*
* @param className The name of a public class that contains a static method capable of returning either a
* {@link DataSource} or a {@link Connection}.
* @param methodName The name of the public static method on the aforementioned class that returns the data source
* or connection. If this method returns a {@link Connection}, it should return a new connection
* every call.
* @return the created connection source.
*/
@PluginFactory
public static FactoryMethodConnectionSource createConnectionSource(
@PluginAttribute("class") final String className,
@PluginAttribute("method") final String methodName) {
if (Strings.isEmpty(className) || Strings.isEmpty(methodName)) {
LOGGER.error("No class name or method name specified for the connection factory method.");
return null;
}
final Method method;
try {
final Class<?> factoryClass = Loader.loadClass(className);
method = factoryClass.getMethod(methodName);
} catch (final Exception e) {
LOGGER.error(e.toString(), e);
return null;
}
final Class<?> returnType = method.getReturnType();
String returnTypeString = returnType.getName();
DataSource dataSource;
if (returnType == DataSource.class) {
try {
dataSource = (DataSource) method.invoke(null);
returnTypeString += "[" + dataSource + ']';
} catch (final Exception e) {
LOGGER.error(e.toString(), e);
return null;
}
} else if (returnType == Connection.class) {
dataSource = new DataSource() {
@Override
public Connection getConnection() throws SQLException {
try {
return (Connection) method.invoke(null);
} catch (final Exception e) {
throw new SQLException("Failed to obtain connection from factory method.", e);
}
}
@Override
public Connection getConnection(final String username, final String password) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public int getLoginTimeout() throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public PrintWriter getLogWriter() throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean isWrapperFor(final Class<?> iface) throws SQLException {
return false;
}
@Override
public void setLoginTimeout(final int seconds) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public void setLogWriter(final PrintWriter out) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return null;
}
};
} else {
LOGGER.error("Method [{}.{}()] returns unsupported type [{}].", className, methodName,
returnType.getName());
return null;
}
return new FactoryMethodConnectionSource(dataSource, className, methodName, returnTypeString);
}
}
|
apache-2.0
|
qingshan/sieve
|
parse/lex.go
|
8144
|
package parse
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*Lexer) stateFn
// lexer holds the state of the scanner.
type Lexer struct {
name string // the name of the input; used only for error reports
input string // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position in the input
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
items chan Token // channel of scanned items
parenDepth int // nesting depth of ( ) exprs
}
// next returns the next rune in the input.
func (l *Lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input.
func (l *Lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (l *Lexer) backup() {
l.pos -= l.width
}
// emit passes an item back to the client.
func (l *Lexer) emit(t TokenType) {
l.items <- Token{t, l.start, l.input[l.start:l.pos]}
l.start = l.pos
}
// ignore skips over the pending input before this point.
func (l *Lexer) ignore() {
l.start = l.pos
}
// accept consumes the next rune if it's from the valid set.
func (l *Lexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
return true
}
l.backup()
return false
}
// acceptRun consumes a run of runes from the valid set.
func (l *Lexer) acceptRun(valid string) {
for strings.IndexRune(valid, l.next()) >= 0 {
}
l.backup()
}
// lineNumber reports which line we're on, based on the position of
// the previous item returned by nextItem. Doing it this way
// means we don't have to worry about peek double counting.
func (l *Lexer) lineNumber() int {
return 1 + strings.Count(l.input[:l.lastPos], "\n")
}
// colNumber reports which column on the current line we're on,
// based on the position of the current rune
func (l *Lexer) colNumber() int {
ln := l.lineNumber()
lines := strings.SplitN(l.input, "\n", ln)
var total int
for i := range lines[:ln - 1] {
total += len(lines[i])
}
return int(l.pos) - total - (ln - 1)
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
l.items <- Token{ERROR, l.start, fmt.Sprintf(format, args...)}
return nil
}
// nextItem returns the next item from the input.
func (l *Lexer) NextItem() Token {
token := <-l.items
l.lastPos = token.Pos
return token
}
// lex creates a new scanner for the input string.
func Lex(name, input string) *Lexer {
l := &Lexer{
name: name,
input: input,
items: make(chan Token),
}
go l.run()
return l
}
// run runs the state machine for the lexer.
func (l *Lexer) run() {
for l.state = lexStart; l.state != nil; {
l.state = l.state(l)
}
}
// state functions
func lexStart(l *Lexer) stateFn {
switch r := l.next(); {
case r == eof:
l.emit(EOF)
return nil
case r == ';':
return lexSemiColon
case r == ':':
return lexTag
case r == ',':
l.emit(COMMA)
return lexStart
case r == '"':
return lexString
case r == '(':
l.emit(LEFTPAREN)
l.parenDepth++
return lexStart
case r == ')':
l.emit(RIGHTPAREN)
l.parenDepth--
if l.parenDepth < 0 {
return l.errorf("unexpected right paren at line %d:%d with %#U", l.lineNumber(), l.colNumber(), r)
}
return lexStart
case r == '[':
l.emit(LEFTBRACKET)
return lexStart
case r == ']':
l.emit(RIGHTBRACKET)
return lexStart
case r == '{':
l.emit(LEFTCURLY)
return lexStart
case r == '}':
l.emit(RIGHTCURLY)
return lexStart
case isSpace(r):
return lexSpace
case isEndOfLine(r):
return lexEndOfLine
case ('0' <= r && r <= '9'):
l.backup()
return lexNumber
case isAlphaNumeric(r):
l.backup()
return lexIdentifier
case r == '#':
return lexLineComment
case r == '/' && l.peek() == '*':
return lexBlockComment
default:
return l.errorf("unknown syntax: %q", l.input[l.start:l.pos])
}
}
// lexSemiColon scans a semicolon
func lexSemiColon(l *Lexer) stateFn {
l.emit(SEMICOLON)
return lexStart
}
// lexSpace scans a run of space characters.
// One space has already been seen.
func lexSpace(l *Lexer) stateFn {
for isSpace(l.peek()) {
l.next()
}
l.ignore()
return lexStart
}
// lexEndOfLine scans a end of line character.
func lexEndOfLine(l *Lexer) stateFn {
for isEndOfLine(l.peek()) {
l.next()
}
l.ignore()
return lexStart
}
// lexNumber scans a number: decimal with optional KMG
//
func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() {
return l.errorf("bad number syntax at %d:%d with %q", l.lineNumber(), l.colNumber(), l.input[l.start:l.pos])
}
l.emit(NUMBER)
return lexStart
}
func (l *Lexer) scanNumber() bool {
digits := "0123456789"
l.acceptRun(digits)
l.accept("kKmMgG")
if isAlphaNumeric(l.peek()) {
l.next()
return false
}
return true
}
func lexString(l *Lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case r != '"':
// absorb.
case r == eof:
return l.errorf("Non-terminating string literal at %#U", r)
default:
l.emit(STRING)
break Loop
}
}
return lexStart
}
// lexTag scans an tag.
func lexTag(l *Lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case isAlphaNumeric(r):
// absorb.
default:
l.backup()
l.emit(TAG)
break Loop
}
}
return lexStart
}
// lexIdentifier scans an alphanumeric.
func lexIdentifier(l *Lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case isAlphaNumeric(r):
// absorb.
default:
l.backup()
word := l.input[l.start:l.pos]
// if !l.atTerminator() {
// return l.errorf("bad character %#U", r)
// }
switch {
case key[word] > COMMAND:
l.emit(key[word])
default:
l.emit(IDENTIFIER)
}
break Loop
}
}
return lexStart
}
// atTerminator reports whether the input is at valid termination character to
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
// like "$x+2" not being acceptable without a space, in case we decide one
// day to implement arithmetic.
func (l *Lexer) atTerminator() bool {
r := l.peek()
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', ':', ')', '(':
return true
}
return false
}
func lexLineComment(l *Lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case !isEndOfLine(r):
// absorb.
default:
l.backup()
if !l.atTerminator() {
return l.errorf("bad character %#U at %d:%d", r, l.lineNumber(), l.colNumber())
}
l.emit(LINECOMMENT)
break Loop
}
}
return lexStart
}
func lexBlockComment(l *Lexer) stateFn {
Loop:
for {
// if we find '*' and the next is '/'
switch r := l.next(); {
case !l.atEndBlockComment():
// absorb.
case r == eof:
return l.errorf("Non-terminating block comment at %#U", r)
default:
// l.backup()
// l.next()
word := l.input[l.start:l.pos]
switch {
case strings.Index(word, "*/") == len(word) - len("*/"):
l.emit(BLOCKCOMMENT)
default:
return l.errorf("error in block comment at %#U", r)
}
break Loop
}
}
return lexStart
}
func (l *Lexer) atEndBlockComment() bool {
word := l.input[l.pos - 2 : l.pos]
if strings.Index(word, "*/") == len(word) - len("*/") {
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
|
apache-2.0
|
alexeremeev/aeremeev
|
chapter_010/src/test/java/ru/job4j/todo/servlets/MainViewTest.java
|
3541
|
package ru.job4j.todo.servlets;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import org.junit.Before;
import org.junit.Test;
import ru.job4j.todo.database.Database;
import ru.job4j.todo.models.Item;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.sql.Timestamp;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Main view test.
* @author aeremeev.
* @version 1
* @since 31.01.2018
*/
public class MainViewTest {
private final Database database = new Database();
/**
* Clear table.
*/
@Before
public void clearTable() {
database.clearTable();
}
/**
* Test of adding item.
* @throws ServletException ServletException.
* @throws IOException IOException.
*/
@Test
public void whenAddNewItemThenGetItem() throws ServletException, IOException {
Item original = new Item();
original.setId(1);
original.setDescription("test");
original.setDone(false);
MainView servlet = new MainView();
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
StringWriter stringWriter = new StringWriter();
PrintWriter writer = new PrintWriter(stringWriter);
when(request.getParameter("description")).thenReturn("test");
when(request.getParameter("done")).thenReturn("false");
when(response.getWriter()).thenReturn(writer);
servlet.doPost(request, response);
writer.flush();
Item result = database.findById(1);
original.setCreated(result.getCreated());
assertTrue(stringWriter.toString().contains("Success"));
assertThat(result, is(original));
}
/**
* Test of getting JSON string of item list.
* @throws ServletException ServletException.
* @throws IOException IOException.
*/
@Test
public void whenGetItemThenReturnJSONString() throws ServletException, IOException {
Item item = new Item();
item.setDescription("test");
item.setDone(true);
item.setCreated(new Timestamp(System.currentTimeMillis()));
database.createOrUpdate(item);
MainView servlet = new MainView();
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
StringWriter stringWriter = new StringWriter();
PrintWriter writer = new PrintWriter(stringWriter);
when(request.getParameter("done")).thenReturn("true");
when(response.getWriter()).thenReturn(writer);
servlet.doGet(request, response);
writer.flush();
JsonObject object = new JsonObject();
object.addProperty("id", item.getId());
object.addProperty("description", item.getDescription());
object.addProperty("created", String.format("%1$TD %1$TT", item.getCreated()));
object.addProperty("done", item.getDone());
JsonArray array = new JsonArray();
array.add(object);
JsonObject expected = new JsonObject();
expected.addProperty("items", array.toString());
assertTrue(stringWriter.toString().equals(expected.toString()));
}
}
|
apache-2.0
|
wuyr/LuZhiShen
|
app/src/main/java/org/lvu/adapters/SubAdapters/video/Video9Adapter.java
|
449
|
package org.lvu.adapters.SubAdapters.video;
import android.content.Context;
import org.lvu.models.Data;
import java.util.List;
/**
* Created by wuyr on 1/10/17 9:26 PM.
*/
public class Video9Adapter extends Video1Adapter {
public Video9Adapter(Context context, int layoutId, List<Data> data) {
super(context, layoutId, data);
}
@Override
protected String getUrl() {
return "/html/vodlist/xl/%s.json";
}
}
|
apache-2.0
|
unicef/un-partner-portal
|
backend/unpp_api/apps/review/migrations/0026_auto_20180911_1043.py
|
787
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-11 10:43
from __future__ import unicode_literals
import common.database_fields
import django.contrib.postgres.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('review', '0025_auto_20180907_1318'),
]
operations = [
migrations.AlterField(
model_name='partnerflag',
name='type_history',
field=django.contrib.postgres.fields.ArrayField(base_field=common.database_fields.FixedTextField(choices=[('FL1_Obs', 'Observation'), ('FL2_Yel', 'Yellow Flag'), ('FL3_Esc', 'Escalated Flag'), ('FL4_Red', 'Red Flag')], default='FL2_Yel'), blank=True, default=list, editable=False, null=True, size=None),
),
]
|
apache-2.0
|
adamrduffy/trinidad-1.0.x
|
trinidad-impl/src/main/java/org/apache/myfaces/trinidadinternal/style/xml/parse/IncludePropertyNode.java
|
4590
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.myfaces.trinidadinternal.style.xml.parse;
import org.apache.myfaces.trinidad.logging.TrinidadLogger;
/**
* IncludePropertyNode is used to represent a single <includeProperty%gt; element
* in a parsed XML Style Sheet Language document.
* The includeProperty element is used to include a single property of one style
* within another style. Thus, the includeProperty element is very similar to the
* includeStyle element. The only difference is that includeStyle includes all properties
* of the referenced style, whereas includeProperty includes only a single property.
* Currently, the includeProperty element is not yet ported to the skin's CSS syntax.
* It exists only in the XSS syntax.
*
* @version $Name: $ ($Revision: adfrt/faces/adf-faces-impl/src/main/java/oracle/adfinternal/view/faces/style/xml/parse/IncludePropertyNode.java#0 $) $Date: 10-nov-2005.18:58:07 $
*/
public class IncludePropertyNode
{
/**
* Creates an IncludePropertyNode. In general, either the name or
* selector of the included style is specified.
*/
public IncludePropertyNode(
String name,
String selector,
String propertyName,
String localPropertyName)
{
assert ((name!=null) ||(selector!=null));
if (propertyName == null)
{
throw new NullPointerException(_LOG.getMessage(
"NULL_PROPERTYNAME"));
}
_name = name;
_selector = selector;
_propertyName = propertyName;
_localPropertyName = localPropertyName;
}
/**
* Returns the name of the style to include.
*/
public String getName()
{
return _name;
}
/**
* Returns the selector of the style to include.
*/
public String getSelector()
{
return _selector;
}
/**
* Returns the name of the property to include
*/
public String getPropertyName()
{
return _propertyName;
}
/**
* Returns the name of the property as it should appear in the
* including style.
*/
public String getLocalPropertyName()
{
if (_localPropertyName == null)
return _propertyName;
return _localPropertyName;
}
@Override
public boolean equals(Object obj)
{
if (this == obj)
return true;
if (!(obj instanceof IncludePropertyNode))
return false;
// obj at this point must be an IncludePropertyNode
IncludePropertyNode test = (IncludePropertyNode)obj;
return
(_selector == test._selector || (_selector != null && _selector.equals(test._selector))) &&
(_name == test._name || (_name != null && _name.equals(test._name))) &&
(_propertyName == test._propertyName ||
(_propertyName != null && _propertyName.equals(test._propertyName))) &&
(_localPropertyName == test._localPropertyName ||
(_localPropertyName != null && _localPropertyName.equals(test._localPropertyName)));
}
@Override
public int hashCode()
{
int hash = 17;
hash = 37*hash + ((null == _name) ? 0 : _name.hashCode());
hash = 37*hash + ((null == _selector) ? 0 : _selector.hashCode());
hash = 37*hash + ((null == _propertyName) ? 0 : _propertyName.hashCode());
hash = 37*hash + ((null == _localPropertyName) ? 0 : _localPropertyName.hashCode());
return hash;
}
@Override
public String toString()
{
return
"[name=" + _name + ", " +
"selector=" + _selector + ", " +
"propertyName=" + _propertyName + ", " +
"localPropertyName=" + _localPropertyName + "]";
}
private final String _name;
private final String _selector;
private final String _propertyName;
private final String _localPropertyName;
private static final TrinidadLogger _LOG = TrinidadLogger.createTrinidadLogger(
IncludePropertyNode.class);
}
|
apache-2.0
|
Bartmax/Foundatio
|
src/Core/Extensions/CollectionExtensions.cs
|
1014
|
using System;
using System.Collections.Generic;
using System.Linq;
namespace Foundatio.Extensions {
public static class CollectionExtensions {
public static ICollection<T> ReduceTimeSeries<T>(this ICollection<T> items, Func<T, DateTime> dateSelector, Func<ICollection<T>, DateTime, T> reducer, int dataPoints) {
if (items.Count <= dataPoints)
return items;
var minTicks = items.Min(dateSelector).Ticks;
var maxTicks = items.Max(dateSelector).Ticks;
var bucketSize = (maxTicks - minTicks) / dataPoints;
var buckets = new List<long>();
long currentTick = minTicks;
while (currentTick < maxTicks) {
buckets.Add(currentTick);
currentTick += bucketSize;
}
buckets.Reverse();
return items.GroupBy(i => buckets.First(b => dateSelector(i).Ticks >= b)).Select(g => reducer(g.ToList(), new DateTime(g.Key))).ToList();
}
}
}
|
apache-2.0
|
h6ah4i/cxxdasp
|
include/cxxdasp/resampler/halfband/f32_mono_sse_halfband_x2_resampler_core_operator.hpp
|
3558
|
//
// Copyright (C) 2014 Haruki Hasegawa
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef CXXDASP_RESAMPLER_F32_MONO_SSE_HALFBAND_X2_RESAMPLER_CORE_OPERATOR_HPP_
#define CXXDASP_RESAMPLER_F32_MONO_SSE_HALFBAND_X2_RESAMPLER_CORE_OPERATOR_HPP_
#include <cxxporthelper/compiler.hpp>
#if CXXPH_COMPILER_SUPPORTS_X86_SSE
#include <cxxporthelper/cstdint>
#include <cxxporthelper/x86_intrinsics.hpp>
#include <cxxdasp/datatype/audio_frame.hpp>
#include <cxxdasp/utils/utils.hpp>
namespace cxxdasp {
namespace resampler {
class f32_mono_sse_halfband_x2_resampler_core_operator {
public:
/**
* Source audio frame type.
*/
typedef datatype::f32_mono_frame_t src_frame_t;
/**
* Destination audio frame type.
*/
typedef datatype::f32_mono_frame_t dest_frame_t;
/**
* FIR coefficients type.
*/
typedef float coeffs_t;
/** Number of channels */
#if CXXPH_COMPILER_SUPPORTS_CONSTEXPR
static constexpr int num_channels = 1;
#else
enum { num_channels = 1 };
#endif
/**
* Check this operator class is available.
* @return whether the class is available
*/
static bool is_supported() CXXPH_NOEXCEPT { return cxxporthelper::platform_info::support_sse(); }
void dual_convolve(dest_frame_t *CXXPH_RESTRICT dest, const src_frame_t *CXXPH_RESTRICT src1,
const src_frame_t *CXXPH_RESTRICT src2, const coeffs_t *CXXPH_RESTRICT coeffs1,
const coeffs_t *CXXPH_RESTRICT coeffs2, int n) const CXXPH_NOEXCEPT
{
const float *CXXPH_RESTRICT f32_src1 = reinterpret_cast<const float *>(src1);
const float *CXXPH_RESTRICT f32_src2 = reinterpret_cast<const float *>(src2);
const float *CXXPH_RESTRICT f32_coeffs1 =
reinterpret_cast<const float *>(CXXDASP_UTIL_ASSUME_ALIGNED_FUNC(coeffs1, CXXPH_PLATFORM_SIMD_ALIGNMENT));
const float *CXXPH_RESTRICT f32_coeffs2 =
reinterpret_cast<const float *>(CXXDASP_UTIL_ASSUME_ALIGNED_FUNC(coeffs2, CXXPH_PLATFORM_SIMD_ALIGNMENT));
__m128 t1, t2;
t1 = _mm_setzero_ps();
t2 = _mm_setzero_ps();
assert((n & 0x3) == 0);
for (int i = 0; i < n; i += 4) {
const __m128 c1 = _mm_load_ps(&f32_coeffs1[i]);
const __m128 s1 = _mm_loadu_ps(&f32_src1[i]);
t1 = _mm_add_ps(t1, _mm_mul_ps(c1, s1));
const __m128 c2 = _mm_load_ps(&f32_coeffs2[i]);
const __m128 s2 = _mm_loadu_ps(&f32_src2[i]);
t2 = _mm_add_ps(t2, _mm_mul_ps(c2, s2));
}
(*dest) = mm_hadd_all_ps(_mm_add_ps(t1, t2));
}
private:
static float mm_hadd_all_ps(const __m128 &m) CXXPH_NOEXCEPT
{
CXXPH_ALIGNAS(16) float tmp[4];
_mm_store_ps(&tmp[0], m);
return (tmp[0] + tmp[1] + tmp[2] + tmp[3]);
}
};
} // namespace resampler
} // namespace cxxdasp
#endif // CXXPH_COMPILER_SUPPORTS_X86_SSE
#endif // CXXDASP_RESAMPLER_F32_MONO_SSE_HALFBAND_X2_RESAMPLER_CORE_OPERATOR_HPP_
|
apache-2.0
|
mgj/MvvmCross-Dreams
|
Dreams.Core/ViewModels/First/FirstViewModel.cs
|
1275
|
using Dreams.Core.ViewModels.Common;
using Dreams.Core.ViewModels.Second;
using MvvmCross.Core.ViewModels;
namespace Dreams.Core.ViewModels.First
{
/// <summary>
/// Purpose: To show how to do simple data binding, as well
/// as navigation between viewmodels (which can include
/// passing data)
/// </summary>
public class FirstViewModel
: DreamsViewModelBase
{
private string _hello = string.Empty;
public FirstViewModel() : base()
{
}
public string Hello
{
get
{
return _hello;
}
set
{
SetProperty(ref _hello, value, "Hello");
Log.Log("Setting Hello to: " + value);
}
}
private MvxCommand _showSecondCommand;
public MvxCommand ShowSecondCommand
{
get
{
_showSecondCommand = _showSecondCommand ?? new MvxCommand(DoShowSecondCommand);
return _showSecondCommand;
}
}
private void DoShowSecondCommand()
{
ShowViewModel<SecondViewModel>(new SecondViewModelBundle() { Data = "Hello from FirstView - " + Hello });
}
}
}
|
apache-2.0
|
openslack/openslack-crawler
|
examples/tutorial/tutorial/misc/proxy.py
|
11654
|
'''
PROXIES = [
{"ip_port":"218.94.149.114:8080"},
{"ip_port":"202.51.120.58:8080"},
{"ip_port":"180.244.214.180:8080"},
{"ip_port":"186.216.160.147:8080"},
{"ip_port":"61.153.149.205:8080"},
{"ip_port":"221.176.214.246:8080"},
{"ip_port":"59.172.208.186:8080"},
{"ip_port":"61.167.49.188:8080"},
{"ip_port":"118.98.35.251:8080"},
{"ip_port":"221.195.42.195:8080"},
{"ip_port":"222.89.55.123:8080"},
{"ip_port":"222.124.35.116:8080"},
{"ip_port":"219.159.105.180:8080"},
{"ip_port":"200.27.114.228:8080"},
{"ip_port":"120.35.31.101:8080"},
{"ip_port":"222.124.147.105:8080"},
{"ip_port":"222.169.15.234:8080"},
{"ip_port":"200.111.115.173:8080"},
{"ip_port":"195.178.34.206:8080"},
{"ip_port":"78.134.255.41:8080"},
{"ip_port":"78.134.255.42:8080"},
{"ip_port":"190.255.58.244:8080"},
{"ip_port":"186.215.231.211:8080"},
{"ip_port":"122.225.22.22:8080"},
{"ip_port":"190.151.111.202:8080"},
{"ip_port":"58.83.224.217:8080"},
{"ip_port":"186.194.7.185:8080"},
{"ip_port":"118.97.209.218:8080"},
{"ip_port":"180.211.180.194:8080"},
{"ip_port":"88.85.125.78:8080"},
{"ip_port":"88.85.108.16:8080"},
{"ip_port":"110.139.60.228:8080"},
{"ip_port":"190.85.37.90:8080"},
{"ip_port":"116.66.204.50:8080"},
{"ip_port":"118.97.103.82:8080"},
{"ip_port":"122.0.66.102:8080"},
{"ip_port":"190.111.17.161:8080"},
{"ip_port":"180.139.91.27:8080"},
{"ip_port":"218.16.145.109:8080"},
{"ip_port":"189.114.111.190:8080"},
{"ip_port":"222.83.160.45:8080"},
{"ip_port":"119.235.26.129:8080"},
{"ip_port":"190.108.83.21:8080"},
{"ip_port":"217.146.208.162:8080"},
{"ip_port":"190.111.121.57:8080"},
{"ip_port":"190.0.50.38:8080"},
{"ip_port":"109.123.126.253:8080"},
{"ip_port":"190.96.64.234:8080"},
{"ip_port":"84.41.108.74:8080"},
{"ip_port":"189.90.127.24:8080"},
{"ip_port":"202.51.113.81:8080"},
{"ip_port":"2.185.108.83:8080"},
{"ip_port":"41.78.239.194:8080"},
{"ip_port":"41.78.103.42:8080"},
{"ip_port":"41.203.89.186:8080"},
{"ip_port":"218.25.15.19:8080"},
{"ip_port":"222.169.11.234:8080"},
{"ip_port":"41.203.92.50:8080"},
{"ip_port":"221.7.159.224:8080"},
{"ip_port":"59.172.208.189:8080"},
{"ip_port":"72.64.146.135:8080"},
{"ip_port":"77.38.171.87:8080"},
{"ip_port":"80.78.65.186:8080"},
{"ip_port":"80.90.27.60:8080"},
{"ip_port":"222.134.74.30:8080"},
{"ip_port":"85.185.157.15:8080"},
{"ip_port":"89.251.103.130:8080"},
{"ip_port":"91.144.44.65:8080"},
{"ip_port":"94.56.129.24:8080"},
{"ip_port":"95.215.48.146:8080"},
{"ip_port":"115.124.74.14:8080"},
{"ip_port":"117.41.182.188:8080"},
{"ip_port":"118.97.16.106:8080"},
{"ip_port":"119.254.90.18:8080"},
{"ip_port":"124.81.113.183:8080"},
{"ip_port":"124.195.124.202:8080"},
{"ip_port":"125.39.238.240:8080"},
{"ip_port":"125.69.132.100:8080"},
{"ip_port":"173.213.108.111:8080"},
{"ip_port":"182.23.41.90:8080"},
{"ip_port":"186.4.110.36:8080"},
{"ip_port":"186.38.35.74:8080"},
{"ip_port":"186.215.202.163:8080"},
{"ip_port":"187.72.138.99:8080"},
{"ip_port":"188.93.20.179:8080"},
{"ip_port":"188.254.250.115:8080"},
{"ip_port":"190.0.52.214:8080"},
{"ip_port":"41.190.16.17:8080"},
{"ip_port":"190.0.57.98:8080"},
{"ip_port":"190.0.58.58:8080"},
{"ip_port":"190.85.55.187:8080"},
{"ip_port":"190.116.87.4:8080"},
{"ip_port":"190.144.162.238:8080"},
{"ip_port":"190.248.129.62:8080"},
{"ip_port":"193.107.168.26:8080"},
{"ip_port":"195.3.254.159:8080"},
{"ip_port":"200.5.113.202:8080"},
{"ip_port":"200.195.131.76:8080"},
{"ip_port":"200.195.136.150:8080"},
{"ip_port":"200.195.176.77:8080"},
{"ip_port":"200.196.51.130:8080"},
{"ip_port":"202.69.102.243:8080"},
{"ip_port":"202.77.111.74:8080"},
{"ip_port":"202.97.159.227:8080"},
{"ip_port":"202.148.26.114:8080"},
{"ip_port":"202.162.219.122:8080"},
{"ip_port":"203.185.128.75:8080"},
{"ip_port":"213.192.60.99:8080"},
{"ip_port":"213.131.41.6:8080"},
{"ip_port":"213.222.148.141:8080"},
{"ip_port":"93.114.61.245:8080"},
{"ip_port":"218.15.164.131:8080"},
{"ip_port":"219.83.71.250:8080"},
{"ip_port":"219.83.100.195:8080"},
{"ip_port":"219.159.198.57:8080"},
{"ip_port":"180.96.19.25:8080"},
{"ip_port":"186.153.120.42:8080"},
{"ip_port":"220.113.5.198:8080"},
{"ip_port":"146.219.18.10:80"},
{"ip_port":"79.170.50.25:80"},
{"ip_port":"112.25.12.37:80"},
{"ip_port":"201.2.240.54:80"},
{"ip_port":"211.141.73.219:80"},
{"ip_port":"186.201.231.178:80"},
{"ip_port":"218.6.13.35:80"},
{"ip_port":"221.194.179.39:80"},
{"ip_port":"221.194.179.40:80"},
{"ip_port":"200.66.85.215:80"},
{"ip_port":"72.247.26.145:80"},
{"ip_port":"222.89.226.18:80"},
{"ip_port":"198.106.123.35:80"},
{"ip_port":"200.27.114.228:80"},
{"ip_port":"222.240.224.131:80"},
{"ip_port":"123.50.56.206:80"},
{"ip_port":"187.109.56.102:80"},
{"ip_port":"59.57.15.71:80"},
{"ip_port":"103.10.56.11:80"},
{"ip_port":"61.135.208.184:80"},
{"ip_port":"61.157.217.31:80"},
{"ip_port":"88.41.153.22:80"},
{"ip_port":"212.33.200.174:80"},
{"ip_port":"202.115.207.25:80"},
{"ip_port":"95.172.68.150:80"},
{"ip_port":"116.236.205.100:80"},
{"ip_port":"119.97.146.152:80"},
{"ip_port":"221.12.89.189:80"},
{"ip_port":"198.106.147.36:80"},
{"ip_port":"112.25.12.39:80"},
{"ip_port":"112.25.12.36:80"},
{"ip_port":"122.72.0.227:80"},
{"ip_port":"122.56.15.70:80"},
{"ip_port":"210.19.83.197:80"},
{"ip_port":"122.72.2.200:80"},
{"ip_port":"202.3.247.118:80"},
{"ip_port":"202.103.215.199:80"},
{"ip_port":"202.105.233.40:80"},
{"ip_port":"203.20.238.21:80"},
{"ip_port":"210.19.83.202:80"},
{"ip_port":"220.248.162.130:80"},
{"ip_port":"221.7.228.138:80"},
{"ip_port":"222.165.175.118:80"},
{"ip_port":"130.94.148.99:80"},
{"ip_port":"219.239.66.253:80"},
{"ip_port":"119.46.68.228:80"},
{"ip_port":"124.158.18.230:80"},
{"ip_port":"59.46.173.75:80"},
{"ip_port":"210.13.71.76:80"},
{"ip_port":"61.234.169.138:80"},
{"ip_port":"219.153.71.171:80"},
{"ip_port":"210.13.71.77:80"},
{"ip_port":"80.58.29.174:80"},
{"ip_port":"80.58.250.68:80"},
{"ip_port":"211.154.83.35:80"},
{"ip_port":"222.92.116.10:80"},
{"ip_port":"202.99.213.83:80"},
{"ip_port":"211.7.242.67:80"},
{"ip_port":"64.90.0.185:80"},
{"ip_port":"64.209.134.133:80"},
{"ip_port":"66.83.34.50:80"},
{"ip_port":"101.44.1.22:80"},
{"ip_port":"101.44.1.26:80"},
{"ip_port":"206.180.107.103:80"},
{"ip_port":"101.44.1.23:80"},
{"ip_port":"117.211.123.62:80"},
{"ip_port":"203.77.192.92:80"},
{"ip_port":"119.6.73.235:80"},
{"ip_port":"119.233.255.51:80"},
{"ip_port":"119.233.255.60:80"},
{"ip_port":"122.72.20.124:80"},
{"ip_port":"122.72.20.125:80"},
{"ip_port":"122.72.20.126:80"},
{"ip_port":"122.72.20.127:80"},
{"ip_port":"122.72.112.148:80"},
{"ip_port":"122.72.112.166:80"},
{"ip_port":"123.139.155.104:80"},
{"ip_port":"123.139.155.106:80"},
{"ip_port":"124.205.178.62:80"},
{"ip_port":"196.4.89.15:80"},
{"ip_port":"183.95.132.76:80"},
{"ip_port":"200.43.29.2:80"},
{"ip_port":"195.122.135.117:80"},
{"ip_port":"122.252.60.10:80"},
{"ip_port":"194.159.14.158:80"},
{"ip_port":"202.186.33.164:80"},
{"ip_port":"212.142.138.130:80"},
{"ip_port":"221.176.168.178:80"},
{"ip_port":"206.224.254.17:80"},
{"ip_port":"207.144.99.112:80"},
{"ip_port":"195.243.192.198:80"},
{"ip_port":"211.5.227.122:80"},
{"ip_port":"61.187.64.20:80"},
{"ip_port":"167.192.8.9:80"},
{"ip_port":"72.167.162.209:80"},
{"ip_port":"174.36.27.137:80"},
{"ip_port":"202.108.50.72:80"},
{"ip_port":"203.171.227.115:80"},
{"ip_port":"211.154.83.37:80"},
{"ip_port":"211.161.152.98:80"},
{"ip_port":"211.161.152.100:80"},
{"ip_port":"211.161.152.105:80"},
{"ip_port":"211.161.152.108:80"},
{"ip_port":"211.161.152.107:80"},
{"ip_port":"211.161.152.109:80"},
{"ip_port":"97.87.24.113:80"},
{"ip_port":"211.162.121.182:80"},
{"ip_port":"173.201.41.198:80"},
{"ip_port":"200.54.92.187:80"},
{"ip_port":"211.167.112.15:80"},
{"ip_port":"213.222.148.141:80"},
{"ip_port":"115.236.98.109:80"},
{"ip_port":"218.21.64.5:80"},
{"ip_port":"186.136.72.41:80"},
{"ip_port":"218.21.91.214:80"},
{"ip_port":"217.218.98.13:80"},
{"ip_port":"217.218.98.16:80"},
{"ip_port":"218.23.49.155:80"},
{"ip_port":"112.25.12.38:80"},
{"ip_port":"218.29.54.105:80"},
{"ip_port":"218.247.138.40:80"},
{"ip_port":"219.234.130.38:80"},
{"ip_port":"219.234.130.39:80"},
{"ip_port":"220.195.192.172:80"},
{"ip_port":"221.130.7.232:80"},
{"ip_port":"222.141.199.150:80"},
{"ip_port":"92.39.54.161:80"},
{"ip_port":"221.130.162.48:81"},
{"ip_port":"221.194.177.162:81"},
{"ip_port":"210.14.70.21:81"},
{"ip_port":"187.63.15.61:3128"},
{"ip_port":"125.88.75.151:3128"},
{"ip_port":"200.153.191.213:3128"},
{"ip_port":"118.96.31.91:3128"},
{"ip_port":"118.97.208.194:3128"},
{"ip_port":"203.114.112.101:3128"},
{"ip_port":"219.130.39.9:3128"},
{"ip_port":"212.93.195.229:3128"},
{"ip_port":"113.53.240.90:3128"},
{"ip_port":"200.153.191.224:3128"},
{"ip_port":"201.238.150.239:3128"},
{"ip_port":"60.191.220.241:3128"},
{"ip_port":"119.2.3.222:3128"},
{"ip_port":"201.238.227.202:3128"},
{"ip_port":"122.194.119.156:3128"},
{"ip_port":"202.183.155.171:3128"},
{"ip_port":"78.39.68.145:3128"},
{"ip_port":"187.0.222.167:3128"},
{"ip_port":"190.85.37.90:3128"},
{"ip_port":"201.64.254.228:3128"},
{"ip_port":"222.165.175.118:3128"},
{"ip_port":"41.75.201.146:3128"},
{"ip_port":"177.19.208.138:3128"},
{"ip_port":"189.80.149.98:3128"},
{"ip_port":"202.182.172.2:3128"},
{"ip_port":"72.64.146.135:3128"},
{"ip_port":"72.64.146.136:3128"},
{"ip_port":"195.158.108.84:3128"},
{"ip_port":"41.89.130.2:3128"},
{"ip_port":"49.212.161.19:3128"},
{"ip_port":"49.248.103.28:3128"},
{"ip_port":"60.190.129.52:3128"},
{"ip_port":"61.7.252.67:3128"},
{"ip_port":"118.96.153.64:3128"},
{"ip_port":"122.3.237.161:3128"},
{"ip_port":"122.113.28.52:3128"},
{"ip_port":"173.213.108.111:3128"},
{"ip_port":"182.253.17.130:3128"},
{"ip_port":"186.42.198.234:3128"},
{"ip_port":"186.113.26.34:3128"},
{"ip_port":"186.113.26.35:3128"},
{"ip_port":"186.113.26.36:3128"},
{"ip_port":"186.113.26.37:3128"},
{"ip_port":"218.14.227.197:3128"},
{"ip_port":"189.91.223.42:3128"},
{"ip_port":"190.128.138.114:3128"},
{"ip_port":"190.158.248.250:3128"},
{"ip_port":"190.196.19.107:3128"},
{"ip_port":"200.27.183.100:3128"},
{"ip_port":"200.153.150.142:3128"},
{"ip_port":"200.166.194.135:3128"},
{"ip_port":"202.47.88.65:3128"},
{"ip_port":"202.47.88.46:3128"},
{"ip_port":"202.95.129.210:3128"},
{"ip_port":"202.95.155.55:3128"},
{"ip_port":"202.138.249.50:3128"},
{"ip_port":"81.177.144.176:3128"},
{"ip_port":"211.100.49.9:3128"},
{"ip_port":"211.100.61.202:3128"},
{"ip_port":"211.100.61.204:3128"},
{"ip_port":"200.54.92.187:3128"},
{"ip_port":"213.197.81.50:3128"},
{"ip_port":"186.201.27.66:3128"},
{"ip_port":"218.29.131.11:3128"},
{"ip_port":"218.84.126.82:3128"},
{"ip_port":"59.37.163.156:3128"},
{"ip_port":"221.2.228.202:8000"},
{"ip_port":"81.201.60.208:1080"},
{"ip_port":"59.124.175.83:444"},
]
'''
PROXIES = [
{"ip_port": "127.0.0.1:8087"},
]
|
apache-2.0
|
leopardoooo/cambodia
|
ycsoft-lib/src/main/java/com/ycsoft/beans/device/RDeviceProcure.java
|
3057
|
/**
* RDeviceProcure.java 2010/09/06
*/
package com.ycsoft.beans.device;
import java.io.Serializable;
import java.util.Date;
import com.ycsoft.commons.store.MemoryDict;
import com.ycsoft.daos.config.POJO;
/**
* RDeviceProcure -> R_DEVICE_PROCURE mapping
*/
@POJO(
tn="R_DEVICE_PROCURE",
sn="",
pk="")
public class RDeviceProcure implements Serializable {
// RDeviceProcure all properties
/**
*
*/
private static final long serialVersionUID = 8161376910470688227L;
private Integer device_done_code ;
private String procure_no;
private String depot_id ;
private String procure_dept ;
private String procurer ;
private String procure_type ;
private String doc_type ;
private String doc_no ;
private String optr_id ;
private Date create_time ;
private String remark ;
private String procure_type_text;
/**
* default empty constructor
*/
public RDeviceProcure() {}
// device_done_code getter and setter
public Integer getDevice_done_code(){
return device_done_code ;
}
public void setDevice_done_code(Integer device_done_code){
this.device_done_code = device_done_code ;
}
// depot_id getter and setter
public String getDepot_id(){
return depot_id ;
}
public void setDepot_id(String depot_id){
this.depot_id = depot_id ;
}
// procure_dept getter and setter
public String getProcure_dept(){
return procure_dept ;
}
public void setProcure_dept(String procure_dept){
this.procure_dept = procure_dept ;
}
// procurer getter and setter
public String getProcurer(){
return procurer ;
}
public void setProcurer(String procurer){
this.procurer = procurer ;
}
// procure_type getter and setter
public String getProcure_type(){
return procure_type ;
}
public void setProcure_type(String procure_type){
procure_type_text = MemoryDict.getDictName("DEPOT_BUY_MODE", procure_type);
this.procure_type = procure_type ;
}
// doc_type getter and setter
public String getDoc_type(){
return doc_type ;
}
public void setDoc_type(String doc_type){
this.doc_type = doc_type ;
}
// doc_no getter and setter
public String getDoc_no(){
return doc_no ;
}
public void setDoc_no(String doc_no){
this.doc_no = doc_no ;
}
// optr_id getter and setter
public String getOptr_id(){
return optr_id ;
}
public void setOptr_id(String optr_id){
this.optr_id = optr_id ;
}
// create_time getter and setter
public Date getCreate_time(){
return create_time ;
}
public void setCreate_time(Date create_time){
this.create_time = create_time ;
}
// remark getter and setter
public String getRemark(){
return remark ;
}
public void setRemark(String remark){
this.remark = remark ;
}
public String getProcure_type_text() {
return procure_type_text;
}
public String getProcure_no() {
return procure_no;
}
public void setProcure_no(String procure_no) {
this.procure_no = procure_no;
}
}
|
apache-2.0
|
theflofly/tensorflow
|
tensorflow/core/graph/graph.cc
|
26621
|
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/graph/graph.h"
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/while_context.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
const int Graph::kControlSlot = -1;
struct NodeProperties {
public:
NodeProperties(const OpDef* op_def, const NodeDef& node_def,
const DataTypeSlice inputs, const DataTypeSlice outputs)
: op_def(op_def),
node_def(node_def),
input_types(inputs.begin(), inputs.end()),
output_types(outputs.begin(), outputs.end()) {}
const OpDef* op_def; // not owned
NodeDef node_def;
const DataTypeVector input_types;
const DataTypeVector output_types;
};
// Node
#define REF_CLASS(key, value) \
{key, value}, { "Ref" key, value }
const std::unordered_map<string, Node::NodeClass>& Node::kNodeClassTable =
*new std::unordered_map<string, Node::NodeClass>({
// Keep in same order as NodeClass values
REF_CLASS("Switch", NC_SWITCH),
REF_CLASS("Merge", NC_MERGE),
REF_CLASS("Enter", NC_ENTER),
REF_CLASS("Exit", NC_EXIT),
REF_CLASS("NextIteration", NC_NEXT_ITERATION),
{"LoopCond", NC_LOOP_COND},
{"ControlTrigger", NC_CONTROL_TRIGGER},
{"_Send", NC_SEND},
{"_HostSend", NC_HOST_SEND},
{"_Recv", NC_RECV},
{"_HostRecv", NC_HOST_RECV},
{"Const", NC_CONSTANT},
{"HostConst", NC_CONSTANT},
{"Variable", NC_VARIABLE},
{"VariableV2", NC_VARIABLE},
REF_CLASS("Identity", NC_IDENTITY),
{"GetSessionHandle", NC_GET_SESSION_HANDLE},
{"GetSessionHandleV2", NC_GET_SESSION_HANDLE},
{"GetSessionTensor", NC_GET_SESSION_TENSOR},
{"DeleteSessionTensor", NC_DELETE_SESSION_TENSOR},
{"Size", NC_METADATA},
{"Shape", NC_METADATA},
{"Rank", NC_METADATA},
{"_ScopedAllocator", NC_SCOPED_ALLOCATOR},
{"CollectiveReduce", NC_COLLECTIVE},
{"CollectiveBcastSend", NC_COLLECTIVE},
{"CollectiveBcastRecv", NC_COLLECTIVE},
{"FakeParam", NC_FAKE_PARAM},
{"PartitionedCall", NC_PARTITIONED_CALL},
{"StatefulPartitionedCall", NC_PARTITIONED_CALL},
});
#undef REF_CLASS
Node::NodeClass Node::GetNodeClassForOp(const string& ts) {
auto it = kNodeClassTable.find(ts);
if (it != kNodeClassTable.end()) {
return it->second;
} else {
return NC_OTHER;
}
}
string Node::DebugString() const {
string ret = strings::StrCat("{name:'", name(), "' id:", id_);
if (IsSource()) {
strings::StrAppend(&ret, " source}");
} else if (IsSink()) {
strings::StrAppend(&ret, " sink}");
} else {
strings::StrAppend(&ret, " op device:");
strings::StrAppend(&ret, "{", assigned_device_name(), "}");
strings::StrAppend(&ret, " def:{", SummarizeNode(*this), "}}");
}
return ret;
}
Node::Node()
: id_(-1),
cost_id_(-1),
class_(NC_UNINITIALIZED),
props_(nullptr),
assigned_device_name_index_(0),
while_ctx_(nullptr) {}
void Node::Initialize(int id, int cost_id,
std::shared_ptr<NodeProperties> props) {
DCHECK_EQ(id_, -1);
DCHECK(in_edges_.empty());
DCHECK(out_edges_.empty());
id_ = id;
cost_id_ = cost_id;
props_ = std::move(props);
// Initialize the class_ based on the type string
class_ = GetNodeClassForOp(props_->node_def.op());
}
void Node::Clear() {
in_edges_.clear();
out_edges_.clear();
id_ = -1;
cost_id_ = -1;
class_ = NC_UNINITIALIZED;
props_.reset();
assigned_device_name_index_ = 0;
}
void Node::UpdateProperties() {
DataTypeVector inputs;
DataTypeVector outputs;
Status status =
InOutTypesForNode(props_->node_def, *(props_->op_def), &inputs, &outputs);
if (!status.ok()) {
LOG(ERROR) << "Failed at updating node: " << status;
return;
}
props_ = std::make_shared<NodeProperties>(props_->op_def, props_->node_def,
inputs, outputs);
}
const string& Node::name() const { return props_->node_def.name(); }
const string& Node::type_string() const { return props_->node_def.op(); }
const NodeDef& Node::def() const { return props_->node_def; }
const OpDef& Node::op_def() const { return *props_->op_def; }
int32 Node::num_inputs() const { return props_->input_types.size(); }
DataType Node::input_type(int32 i) const { return props_->input_types[i]; }
const DataTypeVector& Node::input_types() const { return props_->input_types; }
int32 Node::num_outputs() const { return props_->output_types.size(); }
DataType Node::output_type(int32 o) const { return props_->output_types[o]; }
const DataTypeVector& Node::output_types() const {
return props_->output_types;
}
AttrSlice Node::attrs() const { return AttrSlice(def()); }
const protobuf::RepeatedPtrField<string>& Node::requested_inputs() const {
return def().input();
}
const string& Node::requested_device() const { return def().device(); }
gtl::iterator_range<NeighborIter> Node::out_nodes() const {
return gtl::make_range(NeighborIter(out_edges_.begin(), false),
NeighborIter(out_edges_.end(), false));
}
gtl::iterator_range<NeighborIter> Node::in_nodes() const {
return gtl::make_range(NeighborIter(in_edges_.begin(), true),
NeighborIter(in_edges_.end(), true));
}
void Node::MaybeCopyOnWrite() {
// NodeProperties may be shared between Nodes. Make a copy if so.
if (!props_.unique()) {
props_ = std::make_shared<NodeProperties>(*props_);
}
}
AttrValue* Node::AddAttrHelper(const string& name) {
MaybeCopyOnWrite();
return &((*props_->node_def.mutable_attr())[name]);
}
void Node::ClearAttr(const string& name) {
MaybeCopyOnWrite();
(*props_->node_def.mutable_attr()).erase(name);
}
void Node::set_name(string name) {
MaybeCopyOnWrite();
props_->node_def.set_name(std::move(name));
}
void Node::set_requested_device(const string& device) {
MaybeCopyOnWrite();
props_->node_def.set_device(device);
}
void Node::set_original_node_names(const std::vector<string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_node_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_node_names() = {names.begin(), names.end()};
}
}
Status Node::input_edge(int idx, const Edge** e) const {
if (idx < 0 || idx >= num_inputs()) {
return errors::InvalidArgument("Invalid input_edge index: ", idx, ", Node ",
name(), " only has ", num_inputs(),
" inputs.");
}
// This does a linear search over the edges. In the common case,
// the number of elements is small enough that this search isn't
// expensive. Should it become a bottleneck, one can make an
// optimization where, if the number of edges is small, we use
// linear iteration, and if the number of edges is large, we perform
// an indexing step during construction that keeps an array of Edges
// indexed by pointer. This would keep the size of each Node small
// in the common case but make this function faster when the number
// of edges is large.
for (const Edge* edge : in_edges()) {
if (edge->dst_input() == idx) {
*e = edge;
return Status::OK();
}
}
return errors::NotFound("Could not find input edge ", idx, " for ", name());
}
// Returns a vector of the non-control input edges to a node, indexed by ID.
Status Node::input_edges(std::vector<const Edge*>* input_edges) const {
input_edges->clear();
input_edges->resize(num_inputs(), nullptr);
for (const Edge* edge : in_edges()) {
if (edge->IsControlEdge()) continue;
if (edge->dst_input() < 0 || edge->dst_input() >= num_inputs()) {
return errors::Internal("Invalid edge input number ", edge->dst_input());
}
if ((*input_edges)[edge->dst_input()] != nullptr) {
return errors::Internal("Duplicate edge input number: ",
edge->dst_input());
}
(*input_edges)[edge->dst_input()] = edge;
}
for (int i = 0; i < num_inputs(); ++i) {
if ((*input_edges)[i] == nullptr) {
return errors::InvalidArgument("Missing edge input number: ", i);
}
}
return Status::OK();
}
Status Node::input_node(int idx, Node** n) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
if (e == nullptr) {
*n = nullptr;
} else {
*n = e->src();
}
return Status::OK();
}
Status Node::input_node(int idx, const Node** const_n) const {
Node* n;
TF_RETURN_IF_ERROR(input_node(idx, &n));
*const_n = n;
return Status::OK();
}
Status Node::input_tensor(int idx, OutputTensor* t) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
DCHECK(e != nullptr);
*t = OutputTensor(e->src(), e->src_output());
return Status::OK();
}
// NodeDebugInfo
NodeDebugInfo::NodeDebugInfo(const Node& n) : NodeDebugInfo(n.def()) {}
NodeDebugInfo::NodeDebugInfo(const NodeDef& ndef) : name(ndef.name()) {
if (ndef.has_experimental_debug_info()) {
const auto& names = ndef.experimental_debug_info().original_node_names();
original_node_names.assign(names.begin(), names.end());
}
}
// InputTensor
bool InputTensor::operator==(const InputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 InputTensor::Hash::operator()(InputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
// OutputTensor
bool OutputTensor::operator==(const OutputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 OutputTensor::Hash::operator()(OutputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
// Graph
Graph::Graph(const OpRegistryInterface* ops)
: ops_(ops, FunctionDefLibrary()),
versions_(new VersionDef),
arena_(8 << 10 /* 8kB */) {
versions_->set_producer(TF_GRAPH_DEF_VERSION);
versions_->set_min_consumer(TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
// Initialize the name interning table for assigned_device_name.
device_names_.push_back("");
DCHECK_EQ(0, InternDeviceName(""));
// Source and sink have no endpoints, just control edges.
NodeDef def;
def.set_name("_SOURCE");
def.set_op("NoOp");
Status status;
Node* source = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(source->id(), kSourceId);
def.set_name("_SINK");
Node* sink = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(sink->id(), kSinkId);
AddControlEdge(source, sink);
}
Graph::Graph(const FunctionLibraryDefinition& flib_def)
: Graph(flib_def.default_registry()) {
// Need a new-enough consumer to support the functions we add to the graph.
if (flib_def.ToProto().function_size() > 0 &&
versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
Status s = ops_.AddLibrary(flib_def);
CHECK(s.ok()) << s.error_message();
}
Graph::~Graph() {
// Manually call the destructors for all the Nodes we constructed using
// placement new.
for (Node* node : nodes_) {
if (node != nullptr) {
node->~Node();
}
}
for (Node* node : free_nodes_) {
node->~Node();
}
// Edges have no destructor, and we arena-allocated them, so no need to
// destroy them.
}
const VersionDef& Graph::versions() const { return *versions_; }
void Graph::set_versions(const VersionDef& versions) { *versions_ = versions; }
Node* Graph::AddNode(const NodeDef& node_def, Status* status) {
const OpDef* op_def;
status->Update(ops_.LookUpOpDef(node_def.op(), &op_def));
if (!status->ok()) return nullptr;
DataTypeVector inputs;
DataTypeVector outputs;
status->Update(InOutTypesForNode(node_def, *op_def, &inputs, &outputs));
if (!status->ok()) {
*status = AttachDef(*status, node_def);
return nullptr;
}
Node* node = AllocateNode(
std::make_shared<NodeProperties>(op_def, node_def, inputs, outputs),
nullptr);
return node;
}
Node* Graph::CopyNode(const Node* node) {
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
Node* copy = AllocateNode(node->props_, node);
copy->set_assigned_device_name(node->assigned_device_name());
// Since the OpDef of a function may be owned by the Graph that owns 'node',
// relookup the OpDef in the target graph. If it differs, then clone the
// node properties with the updated OpDef.
const OpDef* op_def;
TF_CHECK_OK(ops_.LookUpOpDef(node->type_string(), &op_def));
if (op_def != node->props_->op_def) {
copy->MaybeCopyOnWrite();
copy->props_->op_def = op_def;
}
return copy;
}
void Graph::RemoveNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
// Remove any edges involving this node.
while (!node->in_edges_.empty()) {
RemoveEdge(*node->in_edges_.begin());
}
while (!node->out_edges_.empty()) {
RemoveEdge(*node->out_edges_.begin());
}
ReleaseNode(node);
}
const Edge* Graph::AddEdge(Node* source, int x, Node* dest, int y) {
TF_DCHECK_OK(IsValidNode(source)) << source->DebugString();
TF_DCHECK_OK(IsValidNode(dest)) << dest->DebugString();
// source/sink must only be linked via control slots, and
// control slots must only be linked to control slots.
if (source == source_node() || dest == sink_node() || x == kControlSlot ||
y == kControlSlot) {
DCHECK_EQ(x, kControlSlot) << source->DebugString();
DCHECK_EQ(y, kControlSlot) << dest->DebugString();
}
Edge* e = nullptr;
if (free_edges_.empty()) {
e = new (arena_.Alloc(sizeof(Edge))) Edge; // placement new
} else {
e = free_edges_.back();
free_edges_.pop_back();
}
e->id_ = edges_.size();
e->src_ = source;
e->dst_ = dest;
e->src_output_ = x;
e->dst_input_ = y;
CHECK(source->out_edges_.insert(e).second);
CHECK(dest->in_edges_.insert(e).second);
edges_.push_back(e);
++num_edges_;
return e;
}
void Graph::RemoveEdge(const Edge* e) {
TF_DCHECK_OK(IsValidNode(e->src_)) << e->src_->DebugString();
TF_DCHECK_OK(IsValidNode(e->dst_)) << e->dst_->DebugString();
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
CHECK_EQ(e, edges_[e->id_]);
CHECK_GT(num_edges_, 0);
edges_[e->id_] = nullptr;
Edge* del = const_cast<Edge*>(e);
del->src_ = nullptr;
del->dst_ = nullptr;
del->id_ = -1;
del->src_output_ = kControlSlot - 1;
del->dst_input_ = kControlSlot - 1;
free_edges_.push_back(del);
--num_edges_;
}
const Edge* Graph::AddControlEdge(Node* source, Node* dest,
bool allow_duplicates) {
if (!allow_duplicates) {
for (const Edge* edge : dest->in_edges()) {
if (edge->IsControlEdge() && edge->src() == source) {
// The requested edge already exists.
return nullptr;
}
}
}
// Modify dest's NodeDef if necessary.
if (!source->IsSource() && !dest->IsSink() && !allow_duplicates) {
// Check if this input is already in dest's NodeDef.
const string new_input = strings::StrCat("^", source->name());
bool input_exists = false;
for (const string& input : dest->props_->node_def.input()) {
if (input == new_input) {
input_exists = true;
break;
}
}
if (!input_exists) {
dest->MaybeCopyOnWrite();
dest->props_->node_def.add_input(new_input);
}
}
return AddEdge(source, kControlSlot, dest, kControlSlot);
}
void Graph::RemoveControlEdge(const Edge* e) {
if (!e->src_->IsSource() && !e->dst_->IsSink()) {
e->dst_->MaybeCopyOnWrite();
string e_src_name = strings::StrCat("^", e->src_->name());
auto* inputs = e->dst_->props_->node_def.mutable_input();
for (auto it = inputs->begin(); it != inputs->end(); ++it) {
if (*it == e_src_name) {
inputs->erase(it);
break;
}
}
}
RemoveEdge(e);
}
namespace {
const Edge* FindEdge(const Node* dst, int index) {
for (const Edge* e : dst->in_edges()) {
if (e->dst_input() == index) return e;
}
return nullptr;
}
} // namespace
Status Graph::UpdateEdge(Node* new_src, int new_src_index, Node* dst,
int dst_index) {
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
const Edge* e = FindEdge(dst, dst_index);
if (e == nullptr) {
return errors::InvalidArgument("Couldn't find edge to ",
FormatNodeForError(*dst));
}
RemoveEdge(e);
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
(*dst->props_->node_def.mutable_input())[dst_index] =
strings::StrCat(new_src->name(), ":", new_src_index);
return Status::OK();
}
Status Graph::AddWhileInputHack(Node* new_src, int new_src_index, Node* dst) {
if (dst->type_string() != "While") {
return errors::Internal(
"dst argument to AddWhileEdgeHack should be a While op, got: ",
dst->DebugString());
}
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
// Find the current number of data inputs. We'll add the new edge to the next
// missing data input.
int dst_index = 0;
for (const Edge* edge : dst->in_edges()) {
if (edge->IsControlEdge()) continue;
++dst_index;
}
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
dst->props_->node_def.add_input(
strings::StrCat(new_src->name(), ":", new_src_index));
return Status::OK();
}
Status Graph::AddFunctionLibrary(const FunctionDefLibrary& fdef_lib) {
// Need a new-enough consumer to support the functions we add to the graph.
if (fdef_lib.function_size() > 0 && versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddLibrary(fdef_lib);
}
namespace {
void AddInput(NodeDef* dst, StringPiece src_name, int src_slot) {
if (src_slot == Graph::kControlSlot) {
dst->add_input(strings::StrCat("^", src_name));
} else if (src_slot == 0) {
dst->add_input(src_name.data(), src_name.size());
} else {
dst->add_input(strings::StrCat(src_name, ":", src_slot));
}
}
} // namespace
void Graph::ToGraphDef(GraphDef* graph_def) const {
ToGraphDefSubRange(graph_def, 0);
}
GraphDef Graph::ToGraphDefDebug() const {
GraphDef ret;
ToGraphDef(&ret);
return ret;
}
void Graph::ToGraphDefSubRange(GraphDef* graph_def, int from_node_id) const {
graph_def->Clear();
*graph_def->mutable_versions() = versions();
*graph_def->mutable_library() = ops_.ToProto();
graph_def->mutable_node()->Reserve(std::max(1, num_nodes() - from_node_id));
std::vector<const Edge*>
inputs; // Construct this outside the loop for speed.
for (auto id = from_node_id; id < num_node_ids(); ++id) {
const Node* node = FindNodeId(id);
if (node == nullptr || !node->IsOp()) continue;
NodeDef* node_def = graph_def->add_node();
*node_def = node->def();
// Use the node's assigned device, if any, instead of the device requested
// in the NodeDef.
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
// Get the inputs for this Node. We make sure control inputs are
// after data inputs, as required by GraphDef.
inputs.clear();
inputs.resize(node->num_inputs(), nullptr);
for (const Edge* edge : node->in_edges()) {
if (edge->IsControlEdge()) {
inputs.push_back(edge);
} else {
CHECK(inputs[edge->dst_input()] == nullptr)
<< "Edge " << edge->src()->DebugString() << ":"
<< edge->dst()->DebugString() << " with dst_input "
<< edge->dst_input() << " and had pre-existing input edge "
<< inputs[edge->dst_input()]->src()->DebugString() << ":"
<< inputs[edge->dst_input()]->dst()->DebugString();
inputs[edge->dst_input()] = edge;
}
}
// Sort the control inputs for more predictable serialization.
std::sort(inputs.begin() + node->num_inputs(), inputs.end(),
[](const Edge* a, const Edge* b) -> bool {
return a->src()->name() < b->src()->name();
});
node_def->clear_input();
node_def->mutable_input()->Reserve(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
const Edge* edge = inputs[i];
if (edge == nullptr) {
if (i < node->requested_inputs().size()) {
node_def->add_input(node->requested_inputs()[i]);
} else {
node_def->add_input("");
}
} else {
const Node* src = edge->src();
if (!src->IsOp()) continue;
AddInput(node_def, src->name(), edge->src_output());
}
}
}
}
string Graph::NewName(StringPiece prefix) {
return strings::StrCat(prefix, "/_", name_counter_++);
}
Status Graph::IsValidNode(const Node* node) const {
if (node == nullptr) {
return errors::InvalidArgument("Node is null");
}
const int id = node->id();
if (id < 0) {
return errors::InvalidArgument("node id ", id, " is less than zero");
}
if (static_cast<size_t>(id) >= nodes_.size()) {
return errors::InvalidArgument(
"node id ", id, " is >= than number of nodes in graph ", nodes_.size());
}
if (nodes_[id] != node) {
return errors::InvalidArgument("Node with id ", id,
" is different from the passed in node. "
"Does it belong to a different graph?");
}
return Status::OK();
}
Status Graph::IsValidOutputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_outputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of outputs: ", node->num_outputs(),
") does not have ", "output ", idx);
}
return Status::OK();
}
Status Graph::IsValidInputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_inputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of inputs: ", node->num_inputs(),
") does not have ", "input ", idx);
}
return Status::OK();
}
Node* Graph::AllocateNode(std::shared_ptr<NodeProperties> props,
const Node* cost_node) {
Node* node = nullptr;
if (free_nodes_.empty()) {
node = new (arena_.Alloc(sizeof(Node))) Node; // placement new
} else {
node = free_nodes_.back();
free_nodes_.pop_back();
}
node->graph_ = this;
const int id = nodes_.size();
int cost_id = cost_node ? cost_node->cost_id() : id;
node->Initialize(id, cost_id, std::move(props));
nodes_.push_back(node);
++num_nodes_;
return node;
}
void Graph::ReleaseNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
nodes_[node->id()] = nullptr;
free_nodes_.push_back(node);
--num_nodes_;
node->Clear();
}
// Ensures that 'device_name' is present in the device name table, and returns
// the index of that device name. The index is stable, and can be used in
// calls to Node::set_assigned_device_name_index().
int Graph::InternDeviceName(const string& device_name) {
// Special case, very common. Also, this allows us to use a single map
// lookup below, instead of two. The 'if (index_cell > 0)' test below
// relies on this check.
if (device_name.empty()) {
return 0;
}
int& index_cell = device_names_map_[device_name];
if (index_cell > 0) {
return index_cell;
}
const int index = device_names_map_.size();
index_cell = index;
device_names_.push_back(device_name);
return index;
}
Status Graph::AddWhileContext(StringPiece frame_name,
std::vector<Node*> enter_nodes,
std::vector<Node*> exit_nodes,
OutputTensor cond_output,
std::vector<OutputTensor> body_inputs,
std::vector<OutputTensor> body_outputs,
WhileContext** result) {
auto pair = while_ctxs_.insert(std::pair<string, WhileContext>(
string(frame_name),
WhileContext(frame_name, std::move(enter_nodes), std::move(exit_nodes),
cond_output, std::move(body_inputs),
std::move(body_outputs))));
if (!pair.second) {
*result = nullptr;
return errors::InvalidArgument("WhileContext with frame name '", frame_name,
"' already exists");
}
*result = &pair.first->second;
return Status::OK();
}
std::unordered_map<string, Node*> Graph::BuildNodeNameIndex() const {
std::unordered_map<string, Node*> result;
for (Node* n : nodes()) {
result[n->name()] = n;
}
return result;
}
string Edge::DebugString() const {
return strings::Printf("[id=%d %s:%d -> %s:%d]", id_, src_->name().c_str(),
src_output_, dst_->name().c_str(), dst_input_);
}
} // namespace tensorflow
|
apache-2.0
|
gonmarques/cdi-properties
|
cdi-properties-test/cdi-properties-test-glassfish3122/src/test/java/com/byteslounge/cdi/test/it/WarProvidedLocaleMethodThreadLocalPTIT.java
|
2741
|
/*
* Copyright 2015 byteslounge.com (Gonçalo Marques).
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.byteslounge.cdi.test.it;
import java.io.IOException;
import java.net.URL;
import java.util.Locale;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.container.test.api.RunAsClient;
import org.jboss.arquillian.drone.api.annotation.Drone;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.arquillian.test.api.ArquillianResource;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
import com.byteslounge.cdi.test.it.common.AbstractWarProvidedLocaleMethodThreadLocal;
import com.byteslounge.cdi.test.it.common.IntegrationTestDeploymentUtils;
import com.byteslounge.cdi.test.utils.MessageBundleUtils;
/**
* Integration Test
*
* @author Gonçalo Marques
* @since 1.1.0
*/
@RunWith(Arquillian.class)
public class WarProvidedLocaleMethodThreadLocalPTIT extends AbstractWarProvidedLocaleMethodThreadLocal {
@Drone
private WebDriver browser;
@FindBy(id = "result")
private WebElement result;
@FindBy(id = "integer")
private WebElement integer;
@Deployment
public static WebArchive createArchive() throws IOException {
WebArchive webArchive = CommonWarProvidedLocaleMethodThreadLocalPT.createArchive();
IntegrationTestDeploymentUtils.addMavenDependencies(webArchive,
"org.slf4j:slf4j-api:" + System.getProperty("slf4j-api.version"),
"org.slf4j:slf4j-jdk14:" + System.getProperty("slf4j-api.version"));
return webArchive;
}
@Test
@RunAsClient
public void test(@ArquillianResource URL contextPath) {
browser.get(contextPath + "testservlet");
Assert.assertEquals(result.getText(),
MessageBundleUtils.resolveProperty("hello.world", "bl.messages", new Locale("pt", "PT")));
Assert.assertEquals(integer.getText(),
MessageBundleUtils.resolveProperty("some.integer", "bl.messages", new Locale("pt", "PT")));
}
}
|
apache-2.0
|
idunnololz/Swapp
|
app/src/main/java/com/ocr/LuminanceSource.java
|
3990
|
/*
* Copyright 2009 ZXing authors
* Copyright 2011 Robert Theis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ocr;
/**
* The purpose of this class hierarchy is to abstract different bitmap implementations across
* platforms into a standard interface for requesting greyscale luminance values. The interface
* only provides immutable methods; therefore crop and rotation create copies. This is to ensure
* that one Reader does not modify the original luminance source and leave it in an unknown state
* for other Readers in the chain.
*
* The code for this class was adapted from the ZXing project: http://code.google.com/p/zxing
*/
public abstract class LuminanceSource {
private final int width;
private final int height;
protected LuminanceSource(int width, int height) {
this.width = width;
this.height = height;
}
/**
* Fetches one row of luminance data from the underlying platform's bitmap. Values range from
* 0 (black) to 255 (white). Because Java does not have an unsigned byte type, callers will have
* to bitwise and with 0xff for each value. It is preferable for implementations of this method
* to only fetch this row rather than the whole image, since no 2D Readers may be installed and
* getMatrix() may never be called.
*
* @param y The row to fetch, 0 <= y < getHeight().
* @param row An optional preallocated array. If null or too small, it will be ignored.
* Always use the returned object, and ignore the .length of the array.
* @return An array containing the luminance data.
*/
public abstract byte[] getRow(int y, byte[] row);
/**
* Fetches luminance data for the underlying bitmap. Values should be fetched using:
* int luminance = array[y * width + x] & 0xff;
*
* @return A row-major 2D array of luminance values. Do not use result.length as it may be
* larger than width * height bytes on some platforms. Do not modify the contents
* of the result.
*/
public abstract byte[] getMatrix();
/**
* @return The width of the bitmap.
*/
public final int getWidth() {
return width;
}
/**
* @return The height of the bitmap.
*/
public final int getHeight() {
return height;
}
/**
* @return Whether this subclass supports cropping.
*/
public boolean isCropSupported() {
return true;
}
/**
* Returns a new object with cropped image data. Implementations may keep a reference to the
* original data rather than a copy. Only callable if isCropSupported() is true.
*
* @param left The left coordinate, 0 <= left < getWidth().
* @param top The top coordinate, 0 <= top <= getHeight().
* @param width The width of the rectangle to crop.
* @param height The height of the rectangle to crop.
* @return A cropped version of this object.
*/
public LuminanceSource crop(int left, int top, int width, int height) {
throw new RuntimeException("This luminance source does not support cropping.");
}
/**
* @return Whether this subclass supports counter-clockwise rotation.
*/
public boolean isRotateSupported() {
return false;
}
/**
* Returns a new object with rotated image data. Only callable if isRotateSupported() is true.
*
* @return A rotated version of this object.
*/
public LuminanceSource rotateCounterClockwise() {
throw new RuntimeException("This luminance source does not support rotation.");
}
}
|
apache-2.0
|
hwangsyin/crm
|
py/service/customer.py
|
2782
|
import settings
import environment
from datetime import datetime
import pymongo
from service import db
from domains import Customer
# 客户管理
class CustomerService:
def add(self, customer):
if not customer:
return None
customer_id_cursor = db["customer"].find(None, {"_id": False, "id":True}) \
.sort("id", pymongo.DESCENDING).limit(1)
if customer_id_cursor.count() < 1:
return None
max_customer_id = customer_id_cursor[0]["id"]
customer_id = max_customer_id + 1
customer.id = customer_id
customer.start_time = datetime.now()
customer.end_time = None
db["customer"].insert(customer.doc())
return customer_id
""" 分页查询客户 """
def find_page(self, page_num = 1, page_size = settings.settings_app["page_size"], customer_spec = None):
if page_size < 1:
return None
if page_num < 1:
return None
count = db["customer"].find().count()
page_count = int(count / page_size) + 1
if page_count < page_num:
page_num = page_count
skip = (page_num - 1) * page_size
if count < page_size:
skip = 0
limit = page_size
spec = None if not customer_spec else customer_spec.spec()
cc = db["customer"].find(spec, {"_id": False}) \
.sort("start_time", pymongo.DESCENDING).skip(skip).limit(limit)
customer_type_map = self.__customer_type__(doc = True)
customers = []
for c_doc in cc:
c_doc["type"] = customer_type_map[c_doc["type"]]
customers.append(Customer(c_doc))
return customers
""" 根据 ID 查找客户 """
def find(self, id):
if not id:
return None
customer_doc_cursor = db["customer"].find({"id": id}, {"_id": False})
if not customer_doc_cursor or customer_doc_cursor.count() != 1:
return None
customer_doc = customer_doc_cursor[0]
customer_type_doc_cursor = db["customer_type"].find({"key": customer_doc["type"]}, {"_id": False})
if not customer_type_doc_cursor or customer_type_doc_cursor.count() != 1:
customer_doc["type"] = None
else:
customer_doc["type"] = customer_type_doc_cursor[0]
return Customer(customer_doc)
def __customer_type__(self, doc):
result = {}
customer_type_cursor = db["customer_type"].find(None, {"_id": False})
if customer_type_cursor.count() > 0:
for customer_type_doc in customer_type_cursor:
result[customer_type_doc["key"]] = customer_type_doc if doc else CustomerType(customer_type_doc)
return result
|
apache-2.0
|
bshp/midPoint
|
model/workflow-impl/src/main/java/com/evolveum/midpoint/wf/impl/util/MiscHelper.java
|
7087
|
/*
* Copyright (c) 2010-2019 Evolveum and contributors
*
* This work is dual-licensed under the Apache License 2.0
* and European Union Public License. See LICENSE file for details.
*/
package com.evolveum.midpoint.wf.impl.util;
import com.evolveum.midpoint.model.api.ModelInteractionService;
import com.evolveum.midpoint.model.api.context.ModelContext;
import com.evolveum.midpoint.model.impl.lens.LensContext;
import com.evolveum.midpoint.prism.PrismContext;
import com.evolveum.midpoint.prism.PrismObject;
import com.evolveum.midpoint.prism.PrismObjectDefinition;
import com.evolveum.midpoint.prism.polystring.PolyString;
import com.evolveum.midpoint.provisioning.api.ProvisioningService;
import com.evolveum.midpoint.repo.api.RepositoryService;
import com.evolveum.midpoint.schema.expression.TypedValue;
import com.evolveum.midpoint.schema.result.OperationResult;
import com.evolveum.midpoint.schema.util.ApprovalContextUtil;
import com.evolveum.midpoint.schema.util.CaseTypeUtil;
import com.evolveum.midpoint.task.api.Task;
import com.evolveum.midpoint.util.exception.*;
import com.evolveum.midpoint.util.logging.LoggingUtils;
import com.evolveum.midpoint.util.logging.Trace;
import com.evolveum.midpoint.util.logging.TraceManager;
import com.evolveum.midpoint.wf.util.ApprovalUtils;
import com.evolveum.midpoint.xml.ns._public.common.common_3.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author mederly
*/
@Component
public class MiscHelper {
private static final Trace LOGGER = TraceManager.getTrace(MiscHelper.class);
@Autowired private ProvisioningService provisioningService;
@Autowired private PrismContext prismContext;
@Autowired private ModelInteractionService modelInteractionService;
@Autowired
@Qualifier("cacheRepositoryService")
private RepositoryService repositoryService;
public PrismObject<UserType> getRequesterIfExists(CaseType aCase, OperationResult result) {
if (aCase == null || aCase.getRequestorRef() == null) {
return null;
}
ObjectReferenceType requesterRef = aCase.getRequestorRef();
//noinspection unchecked
return (PrismObject<UserType>) resolveAndStoreObjectReference(requesterRef, result);
}
public TypedValue<PrismObject> resolveTypedObjectReference(ObjectReferenceType ref, OperationResult result) {
PrismObject resolvedObject = resolveObjectReference(ref, false, result);
if (resolvedObject == null) {
PrismObjectDefinition<ObjectType> def = prismContext.getSchemaRegistry().findObjectDefinitionByCompileTimeClass(ObjectType.class);
return new TypedValue<>(null, def);
} else {
return new TypedValue<>(resolvedObject);
}
}
public String getCompleteStageInfo(CaseType aCase) {
return ApprovalContextUtil.getCompleteStageInfo(aCase);
}
public String getAnswerNice(CaseType aCase) {
if (CaseTypeUtil.isApprovalCase(aCase)) {
return ApprovalUtils.makeNiceFromUri(getOutcome(aCase));
} else {
return getOutcome(aCase);
}
}
private String getOutcome(CaseType aCase) {
return aCase.getApprovalContext() != null ? aCase.getOutcome() : null;
}
public List<ObjectReferenceType> getAssigneesAndDeputies(CaseWorkItemType workItem, Task task, OperationResult result)
throws SchemaException {
List<ObjectReferenceType> rv = new ArrayList<>();
rv.addAll(workItem.getAssigneeRef());
rv.addAll(modelInteractionService.getDeputyAssignees(workItem, task, result));
return rv;
}
public List<CaseType> getSubcases(CaseType rootCase, OperationResult result) throws SchemaException {
return getSubcases(rootCase.getOid(), result);
}
public List<CaseType> getSubcases(String oid, OperationResult result) throws SchemaException {
return repositoryService.searchObjects(CaseType.class,
prismContext.queryFor(CaseType.class)
.item(CaseType.F_PARENT_REF).ref(oid)
.build(),
null,
result)
.stream()
.map(o -> o.asObjectable())
.collect(Collectors.toList());
}
public ModelContext getModelContext(CaseType aCase, Task task, OperationResult result) throws SchemaException,
ConfigurationException, ObjectNotFoundException, CommunicationException, ExpressionEvaluationException {
LensContextType modelContextType = aCase.getModelContext();
if (modelContextType == null) {
return null;
}
return LensContext.fromLensContextType(modelContextType, prismContext, provisioningService, task, result);
}
public PrismObject resolveObjectReference(ObjectReferenceType ref, OperationResult result) {
return resolveObjectReference(ref, false, result);
}
public PrismObject resolveAndStoreObjectReference(ObjectReferenceType ref, OperationResult result) {
return resolveObjectReference(ref, true, result);
}
private PrismObject resolveObjectReference(ObjectReferenceType ref, boolean storeBack, OperationResult result) {
if (ref == null) {
return null;
}
if (ref.asReferenceValue().getObject() != null) {
return ref.asReferenceValue().getObject();
}
try {
PrismObject object = repositoryService.getObject((Class) prismContext.getSchemaRegistry().getCompileTimeClass(ref.getType()), ref.getOid(), null, result);
if (storeBack) {
ref.asReferenceValue().setObject(object);
}
return object;
} catch (ObjectNotFoundException e) {
// there should be a note in result by now
LoggingUtils.logException(LOGGER, "Couldn't get reference {} details because it couldn't be found", e, ref);
return null;
} catch (SchemaException e) {
// there should be a note in result by now
LoggingUtils.logUnexpectedException(LOGGER, "Couldn't get reference {} details due to schema exception", e, ref);
return null;
}
}
public ObjectReferenceType resolveObjectReferenceName(ObjectReferenceType ref, OperationResult result) {
if (ref == null || ref.getTargetName() != null) {
return ref;
}
PrismObject<?> object;
if (ref.asReferenceValue().getObject() != null) {
object = ref.asReferenceValue().getObject();
} else {
object = resolveObjectReference(ref, result);
if (object == null) {
return ref;
}
}
ref = ref.clone();
ref.setTargetName(PolyString.toPolyStringType(object.getName()));
return ref;
}
}
|
apache-2.0
|
avenwu/leetcode
|
src/com/avenwu/leetcode/LC002ValidPalindrome.java
|
2581
|
package com.avenwu.leetcode;
/**
* Created by aven on 14-10-19.
* <p/>
* Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
* For example,
* "A man, a plan, a canal: Panama" is a palindrome.
* "race a car" is not a palindrome.
* <p/>
* Note:
* Have you consider that the string might be empty? This is a good question to ask during an interview.
* <p/>
* For the purpose of this problem, we define empty string as valid palindrome.
*/
public class LC002ValidPalindrome {
public static void main(String[] args) {
Solution solution = new Solution();
// System.out.println("a=" + (int) 'a' + ", z=" + (int) 'z' + ", A=" + (int) 'A' + ", Z=" + (int) 'Z');
System.out.println("result=" + solution.isPalindrome("A man, a plan, a canal: Panama"));
System.out.println("result=" + solution.isPalindrome("A MAN, a plan, a canal: Panama"));
System.out.println("result=" + solution.isPalindrome("race a car"));
System.out.println("result=" + solution.isPalindrome(" "));
System.out.println("result=" + solution.isPalindrome(".,."));
}
public static final class Solution {
public boolean isPalindrome(String s) {
// take of invalid input
if (s == null || s.equals("") || s.length() == 1) return true;
int max = s.length() - 1;
for (int i = 0, j = max; i <= j; i++, j--) {
char left = s.charAt(i);
char right = s.charAt(j);
//skip characters
while (!isAlphanumeric(left) && i <= j) {
i++;
if (inRange(i, max)) {
left = s.charAt(i);
}
}
while (!isAlphanumeric(right) && i <= j) {
j--;
if (inRange(j, max)) {
right = s.charAt(j);
}
}
// check equals
if (toLowercase(left) != toLowercase(right) && i <= j) {
return false;
}
}
return true;
}
public boolean inRange(int i, int max) {
return i >= 0 && i <= max;
}
public boolean isAlphanumeric(char c) {
return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
public char toLowercase(char c) {
return c >= 'a' && c <= 'z' ? (char) (c - 32) : c;
}
}
}
|
apache-2.0
|
aws/aws-sdk-java
|
aws-java-sdk-servicecatalog/src/main/java/com/amazonaws/services/servicecatalog/model/transform/DescribePortfolioShareStatusRequestProtocolMarshaller.java
|
2916
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.servicecatalog.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.servicecatalog.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribePortfolioShareStatusRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribePortfolioShareStatusRequestProtocolMarshaller implements
Marshaller<Request<DescribePortfolioShareStatusRequest>, DescribePortfolioShareStatusRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true)
.operationIdentifier("AWS242ServiceCatalogService.DescribePortfolioShareStatus").serviceName("AWSServiceCatalog").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public DescribePortfolioShareStatusRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<DescribePortfolioShareStatusRequest> marshall(DescribePortfolioShareStatusRequest describePortfolioShareStatusRequest) {
if (describePortfolioShareStatusRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<DescribePortfolioShareStatusRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(
SDK_OPERATION_BINDING, describePortfolioShareStatusRequest);
protocolMarshaller.startMarshalling();
DescribePortfolioShareStatusRequestMarshaller.getInstance().marshall(describePortfolioShareStatusRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
|
apache-2.0
|
joschi/jackson-datatype-threetenbp
|
src/test/java/com/fasterxml/jackson/datatype/threetenbp/ser/TestYearMonthSerializationWithCustomFormatter.java
|
2349
|
package com.fasterxml.jackson.datatype.threetenbp.ser;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsEqual.equalTo;
import static org.hamcrest.core.StringContains.containsString;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.datatype.threetenbp.deser.YearMonthDeserializer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.threeten.bp.YearMonth;
import org.threeten.bp.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
@RunWith(Parameterized.class)
public class TestYearMonthSerializationWithCustomFormatter {
private final DateTimeFormatter formatter;
public TestYearMonthSerializationWithCustomFormatter(DateTimeFormatter formatter) {
this.formatter = formatter;
}
@Test
public void testSerialization() throws Exception {
YearMonth dateTime = YearMonth.now();
assertThat(serializeWith(dateTime, formatter), containsString(dateTime.format(formatter)));
}
private String serializeWith(YearMonth dateTime, DateTimeFormatter f) throws Exception {
ObjectMapper mapper = new ObjectMapper().registerModule(new SimpleModule()
.addSerializer(new YearMonthSerializer(f)));
return mapper.writeValueAsString(dateTime);
}
@Test
public void testDeserialization() throws Exception {
YearMonth dateTime = YearMonth.now();
assertThat(deserializeWith(dateTime.format(formatter), formatter), equalTo(dateTime));
}
private YearMonth deserializeWith(String json, DateTimeFormatter f) throws Exception {
ObjectMapper mapper = new ObjectMapper().registerModule(new SimpleModule()
.addDeserializer(YearMonth.class, new YearMonthDeserializer(f)));
return mapper.readValue("\"" + json + "\"", YearMonth.class);
}
@Parameters
public static Collection<Object[]> customFormatters() {
Collection<Object[]> formatters = new ArrayList<>();
formatters.add(new Object[]{DateTimeFormatter.ofPattern("uuuu-MM")});
formatters.add(new Object[]{DateTimeFormatter.ofPattern("uu-M")});
return formatters;
}
}
|
apache-2.0
|
McLeodMoores/starling
|
projects/analytics/src/main/java/com/opengamma/analytics/financial/equity/EquityTrsDataBundle.java
|
3298
|
/**
* Copyright (C) 2014 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.equity;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.analytics.financial.provider.description.interestrate.MulticurveProviderInterface;
import com.opengamma.analytics.financial.provider.description.interestrate.ParameterProviderInterface;
import com.opengamma.analytics.financial.provider.sensitivity.multicurve.ForwardSensitivity;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.tuple.DoublesPair;
/**
* Data bundle with one equity price and a multi-curve provider.
*/
public class EquityTrsDataBundle implements ParameterProviderInterface {
/** The equity price **/
private final double _spotEquity;
// TODO: Should this be replace by a map of LegalEntity/price (to be able to handle several equities in the same object).
/** The multi-curve provider */
private final MulticurveProviderInterface _curves;
/**
* @param spotEquity
* the spot equity price
* @param curves
* discounting curves, not null
*/
public EquityTrsDataBundle(final double spotEquity, final MulticurveProviderInterface curves) {
ArgumentChecker.notNull(curves, "curves");
_spotEquity = spotEquity;
_curves = curves;
}
/**
* Gets the spot equity price.
*
* @return the spot equity price
*/
public double getSpotEquity() {
return _spotEquity;
}
/**
* Gets the curves.
*
* @return the curves
*/
public MulticurveProviderInterface getCurves() {
return _curves;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + _curves.hashCode();
long temp;
temp = Double.doubleToLongBits(_spotEquity);
result = prime * result + (int) (temp ^ temp >>> 32);
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof EquityTrsDataBundle)) {
return false;
}
final EquityTrsDataBundle other = (EquityTrsDataBundle) obj;
if (Double.compare(_spotEquity, other._spotEquity) != 0) {
return false;
}
if (!ObjectUtils.equals(_curves, other._curves)) {
return false;
}
return true;
}
@Override
public ParameterProviderInterface copy() {
final MulticurveProviderInterface multicurveProvider = _curves.copy();
return new EquityTrsDataBundle(_spotEquity, multicurveProvider);
}
@Override
public MulticurveProviderInterface getMulticurveProvider() {
return _curves.getMulticurveProvider();
}
@Override
public double[] parameterSensitivity(final String name, final List<DoublesPair> pointSensitivity) {
return _curves.parameterSensitivity(name, pointSensitivity);
}
@Override
public double[] parameterForwardSensitivity(final String name, final List<ForwardSensitivity> pointSensitivity) {
return _curves.parameterForwardSensitivity(name, pointSensitivity);
}
@Override
public Set<String> getAllCurveNames() {
return _curves.getAllCurveNames();
}
}
|
apache-2.0
|
D3-LucaPiombino/MassTransit
|
src/MassTransit.QuartzIntegration/ScheduledMessageJob.cs
|
6079
|
// Copyright 2007-2015 Chris Patterson, Dru Sellers, Travis Smith, et. al.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
namespace MassTransit.QuartzIntegration
{
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Net.Mime;
using System.Text;
using Logging;
using Newtonsoft.Json;
using Pipeline;
using Quartz;
using Util;
public class ScheduledMessageJob :
IJob
{
static readonly ILog _log = Logger.Get<ScheduledMessageJob>();
readonly IBus _bus;
public ScheduledMessageJob(IBus bus)
{
_bus = bus;
}
public string Destination { get; set; }
public string ExpirationTime { get; set; }
public string ResponseAddress { get; set; }
public string FaultAddress { get; set; }
public string Body { get; set; }
public string MessageId { get; set; }
public string MessageType { get; set; }
public string ContentType { get; set; }
public string RequestId { get; set; }
public string CorrelationId { get; set; }
public string ConversationId { get; set; }
public string InitiatorId { get; set; }
public string TokenId { get; set; }
public string HeadersAsJson { get; set; }
public void Execute(IJobExecutionContext context)
{
try
{
var destinationAddress = new Uri(Destination);
Uri sourceAddress = _bus.Address;
IPipe<SendContext> sendPipe = CreateMessageContext(sourceAddress, destinationAddress, context.Trigger.Key.Name);
ISendEndpoint endpoint = TaskUtil.Await(() => _bus.GetSendEndpoint(destinationAddress));
var scheduled = new Scheduled();
TaskUtil.Await(() => endpoint.Send(scheduled, sendPipe));
}
catch (Exception ex)
{
string message = string.Format(CultureInfo.InvariantCulture,
"An exception occurred sending message {0} to {1}", MessageType, Destination);
_log.Error(message, ex);
throw new JobExecutionException(message, ex);
}
}
IPipe<SendContext> CreateMessageContext(Uri sourceAddress, Uri destinationAddress, string triggerKey)
{
IPipe<SendContext> sendPipe = Pipe.New<SendContext>(x =>
{
x.UseExecute(context =>
{
context.DestinationAddress = (destinationAddress);
context.SourceAddress = (sourceAddress);
context.ResponseAddress = (ToUri(ResponseAddress));
context.FaultAddress = (ToUri(FaultAddress));
SetHeaders(context);
context.MessageId = ConvertIdToGuid(MessageId);
context.RequestId = ConvertIdToGuid(RequestId);
context.CorrelationId = ConvertIdToGuid(CorrelationId);
context.ConversationId = ConvertIdToGuid(ConversationId);
context.InitiatorId = ConvertIdToGuid(InitiatorId);
Guid? tokenId = ConvertIdToGuid(TokenId);
if (tokenId.HasValue)
{
context.Headers.Set(MessageHeaders.SchedulingTokenId, tokenId.Value.ToString("N"));
}
context.Headers.Set(MessageHeaders.QuartzTriggerKey, triggerKey);
if (!string.IsNullOrEmpty(ExpirationTime))
context.TimeToLive = DateTime.UtcNow - DateTime.Parse(ExpirationTime);
context.Serializer = new ScheduledBodySerializer(new ContentType(ContentType), Encoding.UTF8.GetBytes(Body));
});
});
return sendPipe;
}
static Guid? ConvertIdToGuid(string id)
{
if (string.IsNullOrWhiteSpace(id))
return default(Guid?);
Guid messageId;
if (Guid.TryParse(id, out messageId))
return messageId;
throw new FormatException("The Id was not a Guid: " + id);
}
void SetHeaders(SendContext context)
{
if (string.IsNullOrEmpty(HeadersAsJson))
return;
var headers = JsonConvert.DeserializeObject<IDictionary<string, object>>(HeadersAsJson);
foreach (var header in headers)
context.Headers.Set(header.Key, header.Value);
}
static Uri ToUri(string s)
{
if (string.IsNullOrEmpty(s))
return null;
return new Uri(s);
}
class Scheduled
{
}
class ScheduledBodySerializer :
IMessageSerializer
{
readonly byte[] _body;
public ScheduledBodySerializer(ContentType contentType, byte[] body)
{
ContentType = contentType;
_body = body;
}
public ContentType ContentType { get; }
public void Serialize<T>(Stream stream, SendContext<T> context)
where T : class
{
stream.Write(_body, 0, _body.Length);
}
}
}
}
|
apache-2.0
|
Shivam101/SachinApp-Android
|
src/com/shivamb7/sachinapp/Facts2.java
|
5112
|
package com.shivamb7.sachinapp;
import java.util.Locale;
import com.google.ads.AdRequest;
import com.google.ads.AdView;
import android.app.ActionBar;
import android.content.res.Resources;
import android.graphics.drawable.Drawable;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentPagerAdapter;
import android.support.v4.app.NavUtils;
import android.support.v4.view.ViewPager;
import android.view.Gravity;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
public class Facts2 extends FragmentActivity {
/**
* The {@link android.support.v4.view.PagerAdapter} that will provide
* fragments for each of the sections. We use a
* {@link android.support.v4.app.FragmentPagerAdapter} derivative, which
* will keep every loaded fragment in memory. If this becomes too memory
* intensive, it may be best to switch to a
* {@link android.support.v4.app.FragmentStatePagerAdapter}.
*/
SectionsPagerAdapter mSectionsPagerAdapter;
/**
* The {@link ViewPager} that will host the section contents.
*/
ViewPager mViewPager;
AdView adv;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_facts2);
adv=(AdView)findViewById(R.id.displayads);
adv.loadAd(new AdRequest());
ActionBar ab=getActionBar();
Resources r=getResources();
Drawable d=r.getDrawable(R.color.playred);
ab.setBackgroundDrawable(d);
// Create the adapter that will return a fragment for each of the three
// primary sections of the app.
mSectionsPagerAdapter = new SectionsPagerAdapter(
getSupportFragmentManager());
// Set up the ViewPager with the sections adapter.
mViewPager = (ViewPager) findViewById(R.id.pager);
mViewPager.setAdapter(mSectionsPagerAdapter);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.facts2, menu);
return true;
}
/**
* A {@link FragmentPagerAdapter} that returns a fragment corresponding to
* one of the sections/tabs/pages.
*/
public class SectionsPagerAdapter extends FragmentPagerAdapter {
public SectionsPagerAdapter(FragmentManager fm) {
super(fm);
}
@Override
public Fragment getItem(int position) {
// getItem is called to instantiate the fragment for the given page.
// Return a DummySectionFragment (defined as a static inner class
// below) with the page number as its lone argument.
if(position==0)
{
Fragment frag1=new FactFrag1();
return frag1;
}
else if(position==1)
{
Fragment frag2=new FactFrag2();
return frag2;
}
else if(position==2)
{
Fragment frag3=new FactFrag3();
return frag3;
}
else if(position==3)
{
Fragment frag4=new FactFrag4();
return frag4;
}
else if(position==4)
{
Fragment frag5=new FactFrag5();
return frag5;
}
else if(position==5)
{
Fragment frag6=new FactFrag6();
return frag6;
}
else if(position==6)
{
Fragment frag7=new FactFrag7();
return frag7;
}
else if(position==7)
{
Fragment frag8=new FactFrag8();
return frag8;
}
else if(position==8)
{
Fragment frag9=new FactFrag9();
return frag9;
}
else if(position==9)
{
Fragment frag10=new FactFrag10();
return frag10;
}
else
{
Fragment fragment = new DummySectionFragment();
Bundle args = new Bundle();
args.putInt(DummySectionFragment.ARG_SECTION_NUMBER, position + 1);
fragment.setArguments(args);
return fragment;
}
}
@Override
public int getCount() {
// Show 3 total pages.
return 10;
}
@Override
public CharSequence getPageTitle(int position) {
Locale l = Locale.getDefault();
switch (position) {
case 0:
return getString(R.string.title_section1).toUpperCase(l);
case 1:
return getString(R.string.title_section2).toUpperCase(l);
case 2:
return getString(R.string.title_section3).toUpperCase(l);
}
return null;
}
}
/**
* A dummy fragment representing a section of the app, but that simply
* displays dummy text.
*/
public static class DummySectionFragment extends Fragment {
/**
* The fragment argument representing the section number for this
* fragment.
*/
public static final String ARG_SECTION_NUMBER = "section_number";
public DummySectionFragment() {
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View rootView = inflater.inflate(R.layout.fragment_facts2_dummy,
container, false);
TextView dummyTextView = (TextView) rootView
.findViewById(R.id.section_label);
dummyTextView.setText(Integer.toString(getArguments().getInt(
ARG_SECTION_NUMBER)));
return rootView;
}
}
}
|
apache-2.0
|
orignMaster/osikani
|
app/src/commonTest/java/app/vfmesthack/kiss/test/common/rules/TestComponentRule.java
|
1952
|
package app.vfmesthack.kiss.test.common.rules;
import android.content.Context;
import org.junit.rules.TestRule;
import org.junit.runner.Description;
import org.junit.runners.model.Statement;
import app.vfmesthack.kiss.BoilerplateApplication;
import app.vfmesthack.kiss.data.DataManager;
import app.vfmesthack.kiss.test.common.injection.component.DaggerTestComponent;
import app.vfmesthack.kiss.test.common.injection.component.TestComponent;
import app.vfmesthack.kiss.test.common.injection.module.ApplicationTestModule;
/**
* Test rule that creates and sets a Dagger TestComponent into the application overriding the
* existing application component.
* Use this rule in your test case in order for the app to use mock dependencies.
* It also exposes some of the dependencies so they can be easily accessed from the tests, e.g. to
* stub mocks etc.
*/
public class TestComponentRule implements TestRule {
private final TestComponent mTestComponent;
private final Context mContext;
public TestComponentRule(Context context) {
mContext = context;
BoilerplateApplication application = BoilerplateApplication.get(context);
mTestComponent = DaggerTestComponent.builder()
.applicationTestModule(new ApplicationTestModule(application))
.build();
}
public Context getContext() {
return mContext;
}
public DataManager getMockDataManager() {
return mTestComponent.dataManager();
}
@Override
public Statement apply(final Statement base, Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
BoilerplateApplication application = BoilerplateApplication.get(mContext);
application.setComponent(mTestComponent);
base.evaluate();
application.setComponent(null);
}
};
}
}
|
apache-2.0
|
jaymzh/chef
|
spec/unit/train_transport_spec.rb
|
3885
|
#
# Author:: Bryan McLellan (<btm@loftninjas.org>)
# Copyright:: Copyright (c) Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "spec_helper"
describe Chef::TrainTransport do
describe "load_credentials" do
let(:transport) { Chef::TrainTransport.new }
let(:good_credentials) { { "switch.cisco.com" => { "user" => "cisco", "password" => "cisco", "enable_password" => "secret" } } }
before do
allow(Chef::TrainTransport).to receive(:parse_credentials_file).and_return(good_credentials)
end
it "matches credentials when they exist" do
expect(Chef::TrainTransport.load_credentials("switch.cisco.com")[:user]).to eq("cisco")
expect(Chef::TrainTransport.load_credentials("switch.cisco.com")[:password]).to eq("cisco")
expect(Chef::TrainTransport.load_credentials("switch.cisco.com")[:enable_password]).to eq("secret")
end
it "returns nil if there is no match" do
expect(Chef::TrainTransport.load_credentials("router.unicorns.com")).to be_nil
end
# [foo.example.org] => {"foo"=>{"example"=>{"org"=>{}}}}
# ['foo.example.org'] => {"foo.example.org"=>{}}
it "warns if the host has been split by toml" do
allow(Chef::TrainTransport).to receive(:credentials_file_path).and_return("/Users/scotthourglass/.chef/credentials")
allow(Chef::TrainTransport).to receive(:parse_credentials_file).and_return({ "foo" => { "example" => { "org" => {} } } })
expect(Chef::Log).to receive(:warn).with(/as a Hash/)
expect(Chef::Log).to receive(:warn).with(/Hostnames must be surrounded by single quotes/)
expect(Chef::TrainTransport.load_credentials("foo.example.org")).to be_nil
end
end
describe "credentials_file_path" do
let(:config_cred_file_path) { "/somewhere/credentials" }
let(:host_cred_file_path) { Chef::Platform.windows? ? "C:\\chef\\foo.example.org\\credentials" : "/etc/chef/foo.example.org/credentials" }
context "when a file path is specified by a config" do
before do
tm_config = double("Config Context", host: "foo.example.org", credentials_file: config_cred_file_path)
allow(Chef::Config).to receive(:target_mode).and_return(tm_config)
end
it "returns the path if it exists" do
allow(File).to receive(:exist?).with(config_cred_file_path).and_return(true)
expect(Chef::TrainTransport.credentials_file_path).to eq(config_cred_file_path)
end
it "raises an error if it does not exist" do
allow(File).to receive(:exist?).and_return(false)
expect { Chef::TrainTransport.credentials_file_path }.to raise_error(ArgumentError, /does not exist/)
end
end
it "raises an error if the default creds files do not exist" do
allow(File).to receive(:exist?).and_return(false)
expect { Chef::TrainTransport.credentials_file_path }.to raise_error(ArgumentError, /does not exist/)
end
it "returns the path to the default config file if it exists" do
tm_config = double("Config Context", host: "foo.example.org", credentials_file: nil)
allow(Chef::Config).to receive(:target_mode).and_return(tm_config)
allow(File).to receive(:exist?).with(host_cred_file_path).and_return(true)
expect(Chef::TrainTransport.credentials_file_path).to eq(host_cred_file_path)
end
end
end
|
apache-2.0
|
mcheo/ansible_f5
|
post_slack.py
|
156
|
from slacker import Slacker
import sys
slack = Slacker(<slack_token>)
# Send a message to #general channel
slack.chat.post_message('#demo', sys.argv[1])
|
apache-2.0
|
AnanyaKumar/kubernetes
|
pkg/api/testing/fuzzer.go
|
10880
|
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"math/rand"
"reflect"
"strconv"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient"
"github.com/google/gofuzz"
"speter.net/go/exp/math/dec/inf"
)
// FuzzerFor can randomly populate api objects that are destined for version.
func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
f := fuzz.New().NilChance(.5).NumElements(1, 1)
if src != nil {
f.RandSource(src)
}
f.Funcs(
func(j *runtime.PluginBase, c fuzz.Continue) {
// Do nothing; this struct has only a Kind field and it must stay blank in memory.
},
func(j *runtime.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.ObjectMeta, c fuzz.Continue) {
j.Name = c.RandString()
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
j.UID = types.UID(c.RandString())
j.GenerateName = c.RandString()
var sec, nsec int64
c.Fuzz(&sec)
c.Fuzz(&nsec)
j.CreationTimestamp = util.Unix(sec, nsec).Rfc3339Copy()
},
func(j *api.ObjectReference, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = c.RandString()
j.Kind = c.RandString()
j.Namespace = c.RandString()
j.Name = c.RandString()
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.FieldPath = c.RandString()
},
func(j *api.ListMeta, c fuzz.Continue) {
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
},
func(j *api.ListOptions, c fuzz.Continue) {
// TODO: add some parsing
j.LabelSelector, _ = labels.Parse("a=b")
j.FieldSelector, _ = fields.ParseSelector("a=b")
},
func(j *api.PodPhase, c fuzz.Continue) {
statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown}
*j = statuses[c.Rand.Intn(len(statuses))]
},
func(j *api.PodTemplateSpec, c fuzz.Continue) {
// TODO: v1beta1/2 can't round trip a nil template correctly, fix by having v1beta1/2
// conversion compare converted object to nil via DeepEqual
j.ObjectMeta = api.ObjectMeta{}
c.Fuzz(&j.ObjectMeta)
j.ObjectMeta = api.ObjectMeta{Labels: j.ObjectMeta.Labels}
j.Spec = api.PodSpec{}
c.Fuzz(&j.Spec)
},
func(j *api.Binding, c fuzz.Continue) {
c.Fuzz(&j.ObjectMeta)
j.Target.Name = c.RandString()
},
func(j *api.ReplicationControllerSpec, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
//j.TemplateRef = nil // this is required for round trip
},
func(j *api.ReplicationControllerStatus, c fuzz.Continue) {
// only replicas round trips
j.Replicas = int(c.RandUint64())
},
func(j *api.DaemonControllerSpec, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
},
func(j *api.DaemonControllerStatus, c fuzz.Continue) {
j.NodesRunningDaemon = int(c.RandUint64())
j.NodesShouldRunDaemon = int(c.RandUint64())
},
func(j *api.List, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
// TODO: uncomment when round trip starts from a versioned object
if false { //j.Items == nil {
j.Items = []runtime.Object{}
}
},
func(j *runtime.Object, c fuzz.Continue) {
// TODO: uncomment when round trip starts from a versioned object
if true { //c.RandBool() {
*j = &runtime.Unknown{
TypeMeta: runtime.TypeMeta{Kind: "Something", APIVersion: "unknown"},
RawJSON: []byte(`{"apiVersion":"unknown","kind":"Something","someKey":"someValue"}`),
}
} else {
types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}}
t := types[c.Rand.Intn(len(types))]
c.Fuzz(t)
*j = t
}
},
func(pb map[docker.Port][]docker.PortBinding, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pb[docker.Port(c.RandString())] = []docker.PortBinding{
{c.RandString(), c.RandString()},
{c.RandString(), c.RandString()},
}
},
func(pm map[string]docker.PortMapping, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pm[c.RandString()] = docker.PortMapping{
c.RandString(): c.RandString(),
}
},
func(q *resource.Quantity, c fuzz.Continue) {
// Real Quantity fuzz testing is done elsewhere;
// this limited subset of functionality survives
// round-tripping to v1beta1/2.
q.Amount = &inf.Dec{}
q.Format = resource.DecimalExponent
//q.Amount.SetScale(inf.Scale(-c.Intn(12)))
q.Amount.SetUnscaled(c.Int63n(1000))
},
func(p *api.PullPolicy, c fuzz.Continue) {
policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent}
*p = policies[c.Rand.Intn(len(policies))]
},
func(rp *api.RestartPolicy, c fuzz.Continue) {
policies := []api.RestartPolicy{api.RestartPolicyAlways, api.RestartPolicyNever, api.RestartPolicyOnFailure}
*rp = policies[c.Rand.Intn(len(policies))]
},
func(vs *api.VolumeSource, c fuzz.Continue) {
// Exactly one of the fields must be set.
v := reflect.ValueOf(vs).Elem()
i := int(c.RandUint64() % uint64(v.NumField()))
v = v.Field(i).Addr()
// Use a new fuzzer which cannot populate nil to ensure one field will be set.
fuzz.New().NilChance(0).NumElements(1, 1).Fuzz(v.Interface())
},
func(d *api.DNSPolicy, c fuzz.Continue) {
policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault}
*d = policies[c.Rand.Intn(len(policies))]
},
func(p *api.Protocol, c fuzz.Continue) {
protocols := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP}
*p = protocols[c.Rand.Intn(len(protocols))]
},
func(p *api.ServiceAffinity, c fuzz.Continue) {
types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone}
*p = types[c.Rand.Intn(len(types))]
},
func(p *api.ServiceType, c fuzz.Continue) {
types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer}
*p = types[c.Rand.Intn(len(types))]
},
func(ct *api.Container, c fuzz.Continue) {
c.FuzzNoCustom(ct) // fuzz self without calling this function again
ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty
},
func(ev *api.EnvVar, c fuzz.Continue) {
ev.Name = c.RandString()
if c.RandBool() {
ev.Value = c.RandString()
} else {
ev.ValueFrom = &api.EnvVarSource{}
ev.ValueFrom.FieldRef = &api.ObjectFieldSelector{}
versions := []string{"v1beta1", "v1beta2", "v1beta3"}
ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))]
ev.ValueFrom.FieldRef.FieldPath = c.RandString()
}
},
func(sc *api.SecurityContext, c fuzz.Continue) {
c.FuzzNoCustom(sc) // fuzz self without calling this function again
priv := c.RandBool()
sc.Privileged = &priv
sc.Capabilities = &api.Capabilities{
Add: make([]api.Capability, 0),
Drop: make([]api.Capability, 0),
}
c.Fuzz(&sc.Capabilities.Add)
c.Fuzz(&sc.Capabilities.Drop)
},
func(e *api.Event, c fuzz.Continue) {
c.FuzzNoCustom(e) // fuzz self without calling this function again
// Fix event count to 1, otherwise, if a v1beta1 or v1beta2 event has a count set arbitrarily, it's count is ignored
if e.FirstTimestamp.IsZero() {
e.Count = 1
} else {
c.Fuzz(&e.Count)
}
},
func(s *api.Secret, c fuzz.Continue) {
c.FuzzNoCustom(s) // fuzz self without calling this function again
s.Type = api.SecretTypeOpaque
},
func(pv *api.PersistentVolume, c fuzz.Continue) {
c.FuzzNoCustom(pv) // fuzz self without calling this function again
types := []api.PersistentVolumePhase{api.VolumeAvailable, api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeFailed}
pv.Status.Phase = types[c.Rand.Intn(len(types))]
pv.Status.Message = c.RandString()
reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimRecycle, api.PersistentVolumeReclaimRetain}
pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
},
func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) {
c.FuzzNoCustom(pvc) // fuzz self without calling this function again
types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending}
pvc.Status.Phase = types[c.Rand.Intn(len(types))]
},
func(s *api.NamespaceSpec, c fuzz.Continue) {
s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes}
},
func(s *api.NamespaceStatus, c fuzz.Continue) {
s.Phase = api.NamespaceActive
},
func(http *api.HTTPGetAction, c fuzz.Continue) {
c.FuzzNoCustom(http) // fuzz self without calling this function again
http.Path = "/" + http.Path // can't be blank
},
func(ss *api.ServiceSpec, c fuzz.Continue) {
c.FuzzNoCustom(ss) // fuzz self without calling this function again
if len(ss.Ports) == 0 {
// There must be at least 1 port.
ss.Ports = append(ss.Ports, api.ServicePort{})
c.Fuzz(&ss.Ports[0])
}
for i := range ss.Ports {
switch ss.Ports[i].TargetPort.Kind {
case util.IntstrInt:
ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero
case util.IntstrString:
ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty
}
}
},
func(n *api.Node, c fuzz.Continue) {
c.FuzzNoCustom(n)
n.Spec.ExternalID = "external"
},
)
return f
}
|
apache-2.0
|
ryfow/kubernetes
|
pkg/registry/endpoints.go
|
2771
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"net"
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
func MakeEndpointController(serviceRegistry ServiceRegistry, podRegistry PodRegistry) *EndpointController {
return &EndpointController{
serviceRegistry: serviceRegistry,
podRegistry: podRegistry,
}
}
type EndpointController struct {
serviceRegistry ServiceRegistry
podRegistry PodRegistry
}
func findPort(manifest *api.ContainerManifest, portName util.IntOrString) (int, error) {
if ((portName.Kind == util.IntstrString && len(portName.StrVal) == 0) ||
(portName.Kind == util.IntstrInt && portName.IntVal == 0)) &&
len(manifest.Containers[0].Ports) > 0 {
return manifest.Containers[0].Ports[0].ContainerPort, nil
}
if portName.Kind == util.IntstrInt {
return portName.IntVal, nil
}
name := portName.StrVal
for _, container := range manifest.Containers {
for _, port := range container.Ports {
if port.Name == name {
return port.ContainerPort, nil
}
}
}
return -1, fmt.Errorf("no suitable port for manifest: %s", manifest.ID)
}
func (e *EndpointController) SyncServiceEndpoints() error {
services, err := e.serviceRegistry.ListServices()
if err != nil {
return err
}
var resultErr error
for _, service := range services.Items {
pods, err := e.podRegistry.ListPods(labels.Set(service.Selector).AsSelector())
if err != nil {
glog.Errorf("Error syncing service: %#v, skipping.", service)
resultErr = err
continue
}
endpoints := make([]string, len(pods))
for ix, pod := range pods {
port, err := findPort(&pod.DesiredState.Manifest, service.ContainerPort)
if err != nil {
glog.Errorf("Failed to find port for service: %v, %v", service, err)
continue
}
endpoints[ix] = net.JoinHostPort(pod.CurrentState.PodIP, strconv.Itoa(port))
}
err = e.serviceRegistry.UpdateEndpoints(api.Endpoints{
Name: service.ID,
Endpoints: endpoints,
})
if err != nil {
glog.Errorf("Error updating endpoints: %#v", err)
continue
}
}
return resultErr
}
|
apache-2.0
|
speedcom/cqrs-es-dddd
|
src/main/scala/com/speedcom/Module.scala
|
722
|
package com.speedcom
import com.speedcom.core.bank_account.boundary.BankAccountFinder
import com.speedcom.core.bank_account.usecases.{UcGetBalance, UcContributeMoney}
import com.speedcom.core.transaction_history.boundary.TransactionHistoryFinder
import com.speedcom.inmem.{InMemBankAccountFinder, InMemTransactionHistoryFinder, BankRoot}
trait Module {
// storage
val bankRoot = new BankRoot
val transactionHistoryFinder: TransactionHistoryFinder = new InMemTransactionHistoryFinder(bankRoot)
val bankAccountFinder: BankAccountFinder = new InMemBankAccountFinder(bankRoot)
// use cases
val ucContributeCash = UcContributeMoney(bankAccountFinder)
val ucGetBalance = UcGetBalance(transactionHistoryFinder)
}
|
apache-2.0
|
google-research/google-research
|
gift/pipelines/multi_env_trainer.py
|
12972
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi environment trainer."""
import functools
import time
from flax.deprecated import nn
from flax.training import common_utils
import jax
from jax.experimental.optimizers import clip_grads
import jax.numpy as jnp
import numpy as np
from gift.pipelines import pipeline_utils
from gift.pipelines import trainer
class MultiEnvTrainer(trainer.Trainer):
"""Base class for multi environment trainers."""
def get_total_eval_steps(self):
total_eval_steps = {}
def get_num_steps(split, env):
return np.ceil(self.task.dataset.splits[split][env].num_examples /
self.hparams.eval_batch_size)
for split in self.task.dataset.splits:
total_eval_steps[split] = {
env: int(get_num_steps(split, env))
for env in self.task.dataset.splits[split]
}
return total_eval_steps
def metrics_fn(self, env_logits, env_batch, env_ids, model_params):
return self.task.metrics_fn(
env_logits=env_logits,
env_batches=env_batch,
env_ids=env_ids,
params=model_params)
def training_loss_fn(self, flax_model, train_state, batch, dropout_rng,
env_ids):
"""Runs forward pass and computes loss.
Args:
flax_model: A flax module.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer.
batch: Batches from different environments.
dropout_rng: FLAX PRNG key.
env_ids: list(int); List if environment ids.
Returns:
loss, new_module_state and computed logits for each batch.
"""
raise NotImplementedError
def setup_pmapped_tain_and_eval_steps(self):
eval_env_ids = list(
map(int, self.task.dataset.data_iters.validation.keys()))
train_env_ids, _ = list(
zip(*dict(self.task.dataset.data_iters['train']).items()))
train_env_ids = list(map(int, train_env_ids))
self.p_train_step = functools.partial(
self.train_step, env_ids=train_env_ids)
self.p_eval_step = functools.partial(
self.eval_step, all_env_ids=eval_env_ids)
self.pmapped_train_step = jax.pmap(
self.p_train_step,
axis_name='batch',
in_axes=(0, 0, 0),
donate_argnums=(1, 2))
self.pmapped_eval_step = jax.pmap(
self.p_eval_step,
axis_name='batch',
in_axes=(0, 0),
static_broadcasted_argnums=(2))
self.pmapped_forward_pass = jax.pmap(
self.forward_pass,
axis_name='batch',
in_axes=(0, 0, 0, 0),
static_broadcasted_argnums=(4, 5))
def forward_pass(self,
flax_model,
train_state,
batch,
rng,
input_layer_key='input',
train=True):
# bind the rng to the host/device we are on.
rng = pipeline_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to=['host', 'device'])
inputs = pipeline_utils.get_multi_env_inputs(batch, 'inputs')
with nn.stochastic(rng):
(env_logits, all_env_reps, selected_env_reps,
new_model_state) = pipeline_utils.vmapped_flax_module_with_reps(
inputs, flax_model, train_state.model_state, input_layer_key, train)
selected_env_reps = selected_env_reps.reshape(
(selected_env_reps.shape[0], selected_env_reps.shape[1], -1))
return env_logits, all_env_reps, selected_env_reps, new_model_state
def get_next_batch(self, data_iter):
"""Return the next batch for multi environment datasets.
Args:
data_iter: list(map) List of iterators on the different domains of the
dataset split (train/test/valid).
Returns:
List of batches.
"""
return jax.tree_map(next, data_iter)
def train_step(self, train_state, batch, env_ids):
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Args:
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer.
batch: A single batch of data.
env_ids: list(int): List of training environments codes.
Returns:
Updated state of training and calculated metrics.
"""
max_grad_norm = self.hparams.get('max_grad_norm', None)
new_rng, rng = jax.random.split(train_state.rng)
# bind the rng to the host/device we are on.
dropout_rng = pipeline_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to=['host', 'device'])
train_loss_fn = functools.partial(
self.training_loss_fn,
train_state=train_state,
batch=batch,
dropout_rng=dropout_rng,
env_ids=env_ids)
new_train_state, metrics = self.compute_grads_and_update(
batch, env_ids, max_grad_norm, new_rng, train_loss_fn, train_state)
return new_train_state, metrics
# TODO(samiraabnar): Try to avoid code duplication when overriding this fn.
def compute_grads_and_update(self, batch, env_ids, max_grad_norm, new_rng,
train_loss_fn, train_state):
# Compute learning rate:
lr = self.get_learning_rate(train_state.global_step)
# Compute gradients:
compute_gradient_fn = jax.value_and_grad(train_loss_fn, has_aux=True)
(_, (new_model_state, logits,
logs)), grad = compute_gradient_fn(train_state.optimizer.target)
# Update parameters:
grad = jax.lax.pmean(grad, axis_name='batch')
# Clip gradients:
if max_grad_norm is not None:
grad = clip_grads(grad, max_grad_norm)
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Get the new (updated) train_state:
new_train_state = pipeline_utils.TrainState(
global_step=train_state.global_step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
metrics = self.collect_metrics(batch, env_ids, logits, logs, lr,
train_state.optimizer.target)
return new_train_state, metrics
def collect_metrics(self, batch, env_ids, logits, logs, lr, model_params):
"""Collect metrics."""
metrics_dict = self.metrics_fn(logits, batch, env_ids, model_params)
metrics_dict['learning_rate'] = lr
if isinstance(logs, dict):
for key in logs:
if jnp.isscalar(logs[key]):
metrics_dict[key] = logs[key]
else:
metrics_dict[f'mean_{key}'] = jnp.mean(logs[key])
return metrics_dict
def eval_step(self, train_state, batch, env_id, all_env_ids):
"""Runs a single step of evaluation.
Args:
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
env_id: int: Eval environments code.
all_env_ids: List of eval all environment ids.
Returns:
Calculated metrics.
"""
flax_model = train_state.optimizer.target
inputs = pipeline_utils.get_multi_env_inputs(batch, 'inputs')
with nn.stateful(train_state.model_state, mutable=False):
env_logits = pipeline_utils.vmapped_flax_module_eval(flax_model, inputs)
if env_id >= 0:
metrics = self.metrics_fn(env_logits, batch, [env_id], flax_model)
else:
metrics = self.metrics_fn(env_logits, batch, all_env_ids, flax_model)
return metrics
def train(self):
"""Training loop."""
master = jax.host_id() == 0
train_metrics = []
train_summary, eval_summary = None, None
tick = time.time()
eval_env_ids = list(
map(int, self.task.dataset.data_iters.validation.keys()))
train_env_ids, train_iters = list(
zip(*dict(self.task.dataset.data_iters['train']).items()))
train_env_ids = list(map(int, train_env_ids))
for step in range(self.start_step + 1, self.total_steps + 1):
train_batches = self.get_next_batch(train_iters)
self.train_state, t_metrics = self.pmapped_train_step(
self.train_state, train_batches)
t_metrics = jax.tree_map(lambda x: x[0], t_metrics)
train_metrics.append(t_metrics)
eval_summary, train_metrics, train_summary, tick = self.maybe_eval_and_log(
eval_env_ids, eval_summary, master, step, tick, train_metrics,
train_summary)
# Sync and save
self.train_state = self.checkpoint(self.train_state, step)
# wait until computations are done before exiting (for timing!)
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# return the train and eval summary after last step for regresesion testing
return train_summary, eval_summary
def maybe_eval_and_log(self, eval_env_ids, eval_summary, master, step, tick,
train_metrics, train_summary):
if (step % self.eval_frequency == 0) or (step == self.total_steps):
train_metrics = jax.device_get(train_metrics)
train_metrics = common_utils.stack_forest(train_metrics)
train_summary = pipeline_utils.compute_global_mean_metrics(train_metrics)
tock = time.time()
steps_per_sec = self.eval_frequency / (tock - tick)
tick = tock
# Log train summary:
if master:
self.write_train_summary(
step=step,
metric_dict=train_metrics,
summary=train_summary,
steps_per_sec=steps_per_sec)
# Reset metric accumulation for next evaluation cycle:
train_metrics = []
# Sync model state across replicas:
self.train_state = pipeline_utils.sync_model_state_across_replicas(
self.train_state)
# Evaluate and log the results:
eval_summary, self.train_state = self.eval(step, self.train_state,
eval_env_ids)
return eval_summary, train_metrics, train_summary, tick
def eval(self, step, train_state, eval_env_ids=None):
"""Evaluation loop.
Args:
step: int; Training step.
train_state: TrainState; Object containing training state.
eval_env_ids: list(int); Eval environments ids.
Returns:
eval_summart, train_state
"""
eval_summary, eval_metrics = self.eval_split(
train_state=train_state,
eval_env_ids=eval_env_ids,
split_name='validation')
# log eval summary
master = jax.host_id() == 0
if master:
self.write_eval_summary(
step=step, metric_dict=eval_metrics, summary=eval_summary)
return eval_summary, train_state
def eval_split(self, train_state, split_name, eval_env_ids=None):
"""Evaluation loop on the specified split.
Args:
train_state: TrainState; Object containing training state.
split_name: str; Name of the data split we want to evaluate the model on.
eval_env_ids: list(int); Eval environments ids.
Returns:
eval_summary, train_state
"""
data_iters = self.task.dataset.data_iters[split_name]
if eval_env_ids is None:
eval_env_ids = list(map(int, data_iters.keys()))
eval_metrics = {}
if isinstance(self.steps_per_eval, dict):
for env_id in eval_env_ids:
env_id_str = str(env_id)
env_eval_metrics = []
for _ in range(self.steps_per_eval[split_name][env_id_str]):
env_eval_batches = self.get_next_batch([data_iters[env_id_str]])
e_metrics = self.pmapped_eval_step(train_state, env_eval_batches,
env_id)
env_eval_metrics.append(e_metrics)
env_eval_metrics = common_utils.get_metrics(env_eval_metrics)
eval_metrics.update(env_eval_metrics)
eval_summary = pipeline_utils.compute_global_mean_metrics(eval_metrics)
else:
_, data_iters = list(zip(*dict(data_iters).items()))
eval_metrics = []
for _ in range(self.steps_per_eval):
env_eval_batches = self.get_next_batch(data_iters)
e_metrics = self.pmapped_eval_step(train_state, env_eval_batches, -1)
eval_metrics.append(e_metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_summary = pipeline_utils.compute_global_mean_metrics(eval_metrics)
return eval_summary, eval_metrics
|
apache-2.0
|
OpenNTF/org.openntf.domino
|
domino/org.openntf.domino.graph/src/main/java/org/openntf/domino/graph2/builtin/Eventable.java
|
2629
|
/**
* Copyright © 2013-2021 The OpenNTF Domino API Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openntf.domino.graph2.builtin;
import org.openntf.domino.graph2.impl.DVertex;
import com.tinkerpop.blueprints.Vertex;
import com.tinkerpop.frames.VertexFrame;
import com.tinkerpop.frames.modules.javahandler.JavaHandler;
import com.tinkerpop.frames.modules.javahandler.JavaHandlerContext;
public interface Eventable extends VertexFrame {
@JavaHandler
public boolean create();
@JavaHandler
public boolean read();
@JavaHandler
public boolean update();
@JavaHandler
public boolean delete();
@JavaHandler
public boolean isNew();
@JavaHandler
public boolean onCreate() throws NoSuchMethodException;
@JavaHandler
public boolean onRead() throws NoSuchMethodException;
@JavaHandler
public boolean onUpdate() throws NoSuchMethodException;
@JavaHandler
public boolean onDelete() throws NoSuchMethodException;
public static abstract class Impl implements JavaHandlerContext<Vertex>, Eventable {
@Override
public boolean create() {
try {
return onCreate();
} catch (NoSuchMethodException nsme) {
return true; //ignore
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
@Override
public boolean read() {
try {
return onRead();
} catch (NoSuchMethodException nsme) {
return true; //ignore
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
@Override
public boolean update() {
try {
return onUpdate();
} catch (NoSuchMethodException nsme) {
return true; //ignore
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
@Override
public boolean delete() {
try {
return onDelete();
} catch (NoSuchMethodException nsme) {
return true; //ignore
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
@Override
public boolean isNew() {
Vertex v = this.asVertex();
return ((DVertex) v).asDocument().isNewNote();
}
}
}
|
apache-2.0
|
kzubrinic/oop
|
oop/src/hr/unidu/oop/p02/Pdv0.java
|
965
|
package hr.unidu.oop.p02;
/**
* Klasa koji služi kao primjer korištenja varijabli instance
*/
public class Pdv0 {
/**
* pdv je varijabla instance (objekta).
* Svaki objekt ima svoju kopiju varijable pdv. Promjena njezine
* vrijednosti odnosi se samo na jedan objekt.
*/
private double pdv = 0;
public void racPdv(double osn) {
double iznosPdva;
iznosPdva = osn * pdv / 100;
System.out.println("Stopa je " + pdv + " PDV na osnovicu "+ osn + " je " + iznosPdva);
}
public boolean postaviStopu(double ns){
if (ns < 0) {
System.out.println("Stopa ne smije biti negativna!");
return false;
}
pdv = ns;
return true;
}
public static void main(String[] args) {
Pdv0 p1 = new Pdv0();
if (!p1.postaviStopu(25))
return;
Pdv0 p2 = new Pdv0();
if (!p2.postaviStopu(10))
return;
p1.racPdv(100);
p2.racPdv(100);
}
}
|
apache-2.0
|
KDanila/KDanila
|
chapter_001/src/test/java/ru/job4j/condition/package-info.java
|
145
|
/**
* Package for PointTest class.
*
* @author Danila Kuzmin (mailto:bus1d0@mail.ru)
* @version $Id$
* @since 0.1
*/
package ru.job4j.condition;
|
apache-2.0
|
BushrootPDX/app
|
src/gardenactionselector/actions.js
|
1308
|
import * as actions from '../garden/constants';
import gardensApi from '../services/gardensApi';
export const makeSaveGarden = gardensApi => garden => dispatch => {
dispatch({ type: actions.SAVING_GARDEN });
return gardensApi.updateGarden(garden)
.then(saved => {
dispatch({
type: actions.SAVED_GARDEN,
payload: saved
});
},
error => {
dispatch({
type: actions.SAVE_GARDEN_ERROR,
payload: error
});
});
};
export const saveGarden = makeSaveGarden(gardensApi);
export const makeDeleteGarden = gardensApi => garden => dispatch => {
console.log('garden action thingits the only console log', garden);
dispatch({ type: actions.DELETING_GARDEN });
return gardensApi.delete(garden)
.then(({response, revisedUser}) => {
dispatch({
type: actions.DELETED_GARDEN
});
dispatch({
type: 'FETCHED_USER',
payload: revisedUser
});
},
error => {
dispatch({
type: actions.DELETE_GARDEN_ERROR,
payload: error
});
});
};
export const deleteGarden = makeDeleteGarden(gardensApi);
|
apache-2.0
|
software-engineering-amsterdam/poly-ql
|
SantiagoCarrillo/q-language/src/edu/uva/softwarecons/model/expression/UnaryExpression.java
|
407
|
package edu.uva.softwarecons.model.expression;
/**
* Falconlabs
*
* @author Santiago Carrillo
* Date: 2/20/14
*/
public abstract class UnaryExpression
implements Expression
{
private final Expression argument;
public UnaryExpression( Expression argument )
{
this.argument = argument;
}
public Expression getArgument()
{
return argument;
}
}
|
apache-2.0
|
CrimsonInn/dab-tree
|
src/main.cc
|
863
|
#include <iostream>
#include <glog/logging.h>
#include <gflags/gflags.h>
#include "data.h"
#include "tree.h"
#include "matrix.h"
#include "proto_func.h"
#include "trainer.h"
DEFINE_double(lr, 1.0, "learning rate");
DEFINE_uint64(batch, 10000, "batch size");
DEFINE_string(train, "BATCH_DATA_FILE", "training data file");
DEFINE_uint64(threads, 1, "thread num");
DEFINE_uint64(stage, 50, "boosting stage num");
DEFINE_uint64(splits, 100, "splits num for continuous feature");
DEFINE_uint64(nodes, 64, "tree node num");
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
Trainer trainer(FLAGS_train, FLAGS_batch, FLAGS_lr, FLAGS_threads, FLAGS_splits, FLAGS_nodes);
for (size_t i = 0; i < FLAGS_stage; ++i) {
trainer.TrainOneBatch();
}
// trainer.tree.Print();
gflags::ShutDownCommandLineFlags();
return 0;
}
|
apache-2.0
|
Hexworks/zircon
|
zircon.jvm.examples/src/main/java/org/hexworks/zircon/examples/components/AllComponentsExampleJava.java
|
4981
|
package org.hexworks.zircon.examples.components;
import org.hexworks.zircon.api.component.*;
import org.hexworks.zircon.api.data.Tile;
import org.hexworks.zircon.examples.base.Defaults;
import org.hexworks.zircon.examples.base.OneColumnComponentExampleJava;
import org.hexworks.zircon.internal.component.renderer.NoOpComponentRenderer;
import static java.lang.Thread.sleep;
import static org.hexworks.zircon.api.ComponentDecorations.box;
import static org.hexworks.zircon.api.ComponentDecorations.shadow;
import static org.hexworks.zircon.api.Components.*;
import static org.hexworks.zircon.api.GraphicalTilesetResources.nethack16x16;
import static org.hexworks.zircon.api.graphics.BoxType.TOP_BOTTOM_DOUBLE;
public class AllComponentsExampleJava extends OneColumnComponentExampleJava {
public static void main(String[] args) {
new AllComponentsExampleJava().show("All Components Example");
}
@Override
public void build(VBox box) {
HBox columns = hbox()
.withComponentRenderer(new NoOpComponentRenderer<>())
.withPreferredSize(box.getContentSize().withRelativeHeight(-1))
.build();
box.addComponent(columns);
int half = columns.getContentSize().getWidth() / 2;
VBox leftColumn = vbox()
.withPreferredSize(columns.getContentSize().withWidth(half))
.withSpacing(1)
.withDecorations(box(TOP_BOTTOM_DOUBLE, "Content"))
.build();
VBox rightColumn = vbox()
.withPreferredSize(columns.getContentSize().withWidth(half))
.withSpacing(1)
.withDecorations(box(TOP_BOTTOM_DOUBLE, "Interactions"))
.build();
int columnWidth = rightColumn.getContentSize().getWidth();
leftColumn.addComponent(header().withText("This is a header"));
leftColumn.addComponent(label().withText("This is a label"));
leftColumn.addComponent(listItem().withText("A list item to read"));
leftColumn.addComponent(paragraph()
.withPreferredSize(leftColumn.getContentSize().getWidth(), 3)
.withText("And a multi-line paragraph which is very long."));
if (Defaults.TILESET.getSize().getWidth() == 16) {
leftColumn.addComponent(icon()
.withIcon(Tile.newBuilder()
.withTileset(nethack16x16())
.withName("Plate mail")
.buildGraphicalTile())
.withTileset(nethack16x16()));
}
leftColumn.addComponent(textBox(leftColumn.getContentSize().getWidth() - 3)
.addHeader("Text Box!")
.withDecorations(box(), shadow())
.addParagraph("This is a paragraph which won't fit on one line."));
VBox radioBox = vbox()
.withPreferredSize(columnWidth, 6)
.withDecorations(box(), shadow())
.build();
RadioButton a = radioButton()
.withText("Option A")
.withKey("a")
.build();
RadioButton b = radioButton()
.withText("Option B")
.withKey("b")
.build();
RadioButton c = radioButton()
.withText("Option C")
.withKey("c")
.build();
radioBox.addComponent(a);
radioBox.addComponent(b);
radioBox.addComponent(c);
RadioButtonGroup group = radioButtonGroup().build();
group.addComponents(a, b, c);
rightColumn.addComponent(radioBox);
rightColumn.addComponent(horizontalNumberInput()
.withPreferredSize(columnWidth, 1)
.withInitialValue(5)
.withMinValue(1)
.withMaxValue(100)
.withPreferredSize(columnWidth, 3)
.withDecorations(box()));
ProgressBar progressBar = progressBar()
.withNumberOfSteps(100)
.withRange(100)
.withDisplayPercentValueOfProgress(true)
.withPreferredSize(columnWidth, 3)
.withDecorations(box())
.build();
progressBar.setProgress(1);
new Thread(() -> {
try {
while (progressBar.getProgress() < 100) {
sleep(1500);
progressBar.setProgress(progressBar.getProgress() + 1);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}).start();
rightColumn.addComponent(progressBar);
rightColumn.addComponent(horizontalSlider()
.withMinValue(1)
.withMaxValue(100)
.withNumberOfSteps(100)
.withPreferredSize(columnWidth, 3));
columns.addComponents(leftColumn, rightColumn);
}
}
|
apache-2.0
|
mark-friedman/blockly
|
core/generator.js
|
18854
|
/**
* @license
* Copyright 2012 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Utility functions for generating executable code from
* Blockly code.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.Generator');
goog.require('Blockly.Block');
/** @suppress {extraRequire} */
goog.require('Blockly.constants');
goog.require('Blockly.utils.deprecation');
goog.requireType('Blockly.Names');
goog.requireType('Blockly.Workspace');
/**
* Class for a code generator that translates the blocks into a language.
* @param {string} name Language name of this generator.
* @constructor
*/
Blockly.Generator = function(name) {
this.name_ = name;
this.FUNCTION_NAME_PLACEHOLDER_REGEXP_ =
new RegExp(this.FUNCTION_NAME_PLACEHOLDER_, 'g');
};
/**
* Arbitrary code to inject into locations that risk causing infinite loops.
* Any instances of '%1' will be replaced by the block ID that failed.
* E.g. ' checkTimeout(%1);\n'
* @type {?string}
*/
Blockly.Generator.prototype.INFINITE_LOOP_TRAP = null;
/**
* Arbitrary code to inject before every statement.
* Any instances of '%1' will be replaced by the block ID of the statement.
* E.g. 'highlight(%1);\n'
* @type {?string}
*/
Blockly.Generator.prototype.STATEMENT_PREFIX = null;
/**
* Arbitrary code to inject after every statement.
* Any instances of '%1' will be replaced by the block ID of the statement.
* E.g. 'highlight(%1);\n'
* @type {?string}
*/
Blockly.Generator.prototype.STATEMENT_SUFFIX = null;
/**
* The method of indenting. Defaults to two spaces, but language generators
* may override this to increase indent or change to tabs.
* @type {string}
*/
Blockly.Generator.prototype.INDENT = ' ';
/**
* Maximum length for a comment before wrapping. Does not account for
* indenting level.
* @type {number}
*/
Blockly.Generator.prototype.COMMENT_WRAP = 60;
/**
* List of outer-inner pairings that do NOT require parentheses.
* @type {!Array<!Array<number>>}
*/
Blockly.Generator.prototype.ORDER_OVERRIDES = [];
/**
* Whether the init method has been called.
* Generators that set this flag to false after creation and true in init
* will cause blockToCode to emit a warning if the generator has not been
* initialized. If this flag is untouched, it will have no effect.
* @type {?boolean}
*/
Blockly.Generator.prototype.isInitialized = null;
/**
* Generate code for all blocks in the workspace to the specified language.
* @param {!Blockly.Workspace=} workspace Workspace to generate code from.
* @return {string} Generated code.
*/
Blockly.Generator.prototype.workspaceToCode = function(workspace) {
if (!workspace) {
// Backwards compatibility from before there could be multiple workspaces.
console.warn('No workspace specified in workspaceToCode call. Guessing.');
workspace = Blockly.getMainWorkspace();
}
var code = [];
this.init(workspace);
var blocks = workspace.getTopBlocks(true);
for (var i = 0, block; (block = blocks[i]); i++) {
var line = this.blockToCode(block);
if (Array.isArray(line)) {
// Value blocks return tuples of code and operator order.
// Top-level blocks don't care about operator order.
line = line[0];
}
if (line) {
if (block.outputConnection) {
// This block is a naked value. Ask the language's code generator if
// it wants to append a semicolon, or something.
line = this.scrubNakedValue(line);
if (this.STATEMENT_PREFIX && !block.suppressPrefixSuffix) {
line = this.injectId(this.STATEMENT_PREFIX, block) + line;
}
if (this.STATEMENT_SUFFIX && !block.suppressPrefixSuffix) {
line = line + this.injectId(this.STATEMENT_SUFFIX, block);
}
}
code.push(line);
}
}
code = code.join('\n'); // Blank line between each section.
code = this.finish(code);
// Final scrubbing of whitespace.
code = code.replace(/^\s+\n/, '');
code = code.replace(/\n\s+$/, '\n');
code = code.replace(/[ \t]+\n/g, '\n');
return code;
};
// The following are some helpful functions which can be used by multiple
// languages.
/**
* Prepend a common prefix onto each line of code.
* Intended for indenting code or adding comment markers.
* @param {string} text The lines of code.
* @param {string} prefix The common prefix.
* @return {string} The prefixed lines of code.
*/
Blockly.Generator.prototype.prefixLines = function(text, prefix) {
return prefix + text.replace(/(?!\n$)\n/g, '\n' + prefix);
};
/**
* Recursively spider a tree of blocks, returning all their comments.
* @param {!Blockly.Block} block The block from which to start spidering.
* @return {string} Concatenated list of comments.
*/
Blockly.Generator.prototype.allNestedComments = function(block) {
var comments = [];
var blocks = block.getDescendants(true);
for (var i = 0; i < blocks.length; i++) {
var comment = blocks[i].getCommentText();
if (comment) {
comments.push(comment);
}
}
// Append an empty string to create a trailing line break when joined.
if (comments.length) {
comments.push('');
}
return comments.join('\n');
};
/**
* Generate code for the specified block (and attached blocks).
* The generator must be initialized before calling this function.
* @param {Blockly.Block} block The block to generate code for.
* @param {boolean=} opt_thisOnly True to generate code for only this statement.
* @return {string|!Array} For statement blocks, the generated code.
* For value blocks, an array containing the generated code and an
* operator order value. Returns '' if block is null.
*/
Blockly.Generator.prototype.blockToCode = function(block, opt_thisOnly) {
if (this.isInitialized === false) {
console.warn(
'Generator init was not called before blockToCode was called.');
}
if (!block) {
return '';
}
if (!block.isEnabled()) {
// Skip past this block if it is disabled.
return opt_thisOnly ? '' : this.blockToCode(block.getNextBlock());
}
if (block.isInsertionMarker()) {
// Skip past insertion markers.
return opt_thisOnly ? '' : this.blockToCode(block.getChildren(false)[0]);
}
var func = this[block.type];
if (typeof func != 'function') {
throw Error('Language "' + this.name_ + '" does not know how to generate ' +
'code for block type "' + block.type + '".');
}
// First argument to func.call is the value of 'this' in the generator.
// Prior to 24 September 2013 'this' was the only way to access the block.
// The current preferred method of accessing the block is through the second
// argument to func.call, which becomes the first parameter to the generator.
var code = func.call(block, block);
if (Array.isArray(code)) {
// Value blocks return tuples of code and operator order.
if (!block.outputConnection) {
throw TypeError('Expecting string from statement block: ' + block.type);
}
return [this.scrub_(block, code[0], opt_thisOnly), code[1]];
} else if (typeof code == 'string') {
if (this.STATEMENT_PREFIX && !block.suppressPrefixSuffix) {
code = this.injectId(this.STATEMENT_PREFIX, block) + code;
}
if (this.STATEMENT_SUFFIX && !block.suppressPrefixSuffix) {
code = code + this.injectId(this.STATEMENT_SUFFIX, block);
}
return this.scrub_(block, code, opt_thisOnly);
} else if (code === null) {
// Block has handled code generation itself.
return '';
}
throw SyntaxError('Invalid code generated: ' + code);
};
/**
* Generate code representing the specified value input.
* @param {!Blockly.Block} block The block containing the input.
* @param {string} name The name of the input.
* @param {number} outerOrder The maximum binding strength (minimum order value)
* of any operators adjacent to "block".
* @return {string} Generated code or '' if no blocks are connected or the
* specified input does not exist.
*/
Blockly.Generator.prototype.valueToCode = function(block, name, outerOrder) {
if (isNaN(outerOrder)) {
throw TypeError('Expecting valid order from block: ' + block.type);
}
var targetBlock = block.getInputTargetBlock(name);
if (!targetBlock) {
return '';
}
var tuple = this.blockToCode(targetBlock);
if (tuple === '') {
// Disabled block.
return '';
}
// Value blocks must return code and order of operations info.
// Statement blocks must only return code.
if (!Array.isArray(tuple)) {
throw TypeError('Expecting tuple from value block: ' + targetBlock.type);
}
var code = tuple[0];
var innerOrder = tuple[1];
if (isNaN(innerOrder)) {
throw TypeError('Expecting valid order from value block: ' +
targetBlock.type);
}
if (!code) {
return '';
}
// Add parentheses if needed.
var parensNeeded = false;
var outerOrderClass = Math.floor(outerOrder);
var innerOrderClass = Math.floor(innerOrder);
if (outerOrderClass <= innerOrderClass) {
if (outerOrderClass == innerOrderClass &&
(outerOrderClass == 0 || outerOrderClass == 99)) {
// Don't generate parens around NONE-NONE and ATOMIC-ATOMIC pairs.
// 0 is the atomic order, 99 is the none order. No parentheses needed.
// In all known languages multiple such code blocks are not order
// sensitive. In fact in Python ('a' 'b') 'c' would fail.
} else {
// The operators outside this code are stronger than the operators
// inside this code. To prevent the code from being pulled apart,
// wrap the code in parentheses.
parensNeeded = true;
// Check for special exceptions.
for (var i = 0; i < this.ORDER_OVERRIDES.length; i++) {
if (this.ORDER_OVERRIDES[i][0] == outerOrder &&
this.ORDER_OVERRIDES[i][1] == innerOrder) {
parensNeeded = false;
break;
}
}
}
}
if (parensNeeded) {
// Technically, this should be handled on a language-by-language basis.
// However all known (sane) languages use parentheses for grouping.
code = '(' + code + ')';
}
return code;
};
/**
* Generate a code string representing the blocks attached to the named
* statement input. Indent the code.
* This is mainly used in generators. When trying to generate code to evaluate
* look at using workspaceToCode or blockToCode.
* @param {!Blockly.Block} block The block containing the input.
* @param {string} name The name of the input.
* @return {string} Generated code or '' if no blocks are connected.
*/
Blockly.Generator.prototype.statementToCode = function(block, name) {
var targetBlock = block.getInputTargetBlock(name);
var code = this.blockToCode(targetBlock);
// Value blocks must return code and order of operations info.
// Statement blocks must only return code.
if (typeof code != 'string') {
throw TypeError('Expecting code from statement block: ' +
(targetBlock && targetBlock.type));
}
if (code) {
code = this.prefixLines(/** @type {string} */ (code), this.INDENT);
}
return code;
};
/**
* Add an infinite loop trap to the contents of a loop.
* Add statement suffix at the start of the loop block (right after the loop
* statement executes), and a statement prefix to the end of the loop block
* (right before the loop statement executes).
* @param {string} branch Code for loop contents.
* @param {!Blockly.Block} block Enclosing block.
* @return {string} Loop contents, with infinite loop trap added.
*/
Blockly.Generator.prototype.addLoopTrap = function(branch, block) {
if (this.INFINITE_LOOP_TRAP) {
branch = this.prefixLines(this.injectId(this.INFINITE_LOOP_TRAP, block),
this.INDENT) + branch;
}
if (this.STATEMENT_SUFFIX && !block.suppressPrefixSuffix) {
branch = this.prefixLines(this.injectId(this.STATEMENT_SUFFIX, block),
this.INDENT) + branch;
}
if (this.STATEMENT_PREFIX && !block.suppressPrefixSuffix) {
branch = branch + this.prefixLines(this.injectId(this.STATEMENT_PREFIX,
block), this.INDENT);
}
return branch;
};
/**
* Inject a block ID into a message to replace '%1'.
* Used for STATEMENT_PREFIX, STATEMENT_SUFFIX, and INFINITE_LOOP_TRAP.
* @param {string} msg Code snippet with '%1'.
* @param {!Blockly.Block} block Block which has an ID.
* @return {string} Code snippet with ID.
*/
Blockly.Generator.prototype.injectId = function(msg, block) {
var id = block.id.replace(/\$/g, '$$$$'); // Issue 251.
return msg.replace(/%1/g, '\'' + id + '\'');
};
/**
* Comma-separated list of reserved words.
* @type {string}
* @protected
*/
Blockly.Generator.prototype.RESERVED_WORDS_ = '';
/**
* Add one or more words to the list of reserved words for this language.
* @param {string} words Comma-separated list of words to add to the list.
* No spaces. Duplicates are ok.
*/
Blockly.Generator.prototype.addReservedWords = function(words) {
this.RESERVED_WORDS_ += words + ',';
};
/**
* This is used as a placeholder in functions defined using
* Blockly.Generator.provideFunction_. It must not be legal code that could
* legitimately appear in a function definition (or comment), and it must
* not confuse the regular expression parser.
* @type {string}
* @protected
*/
Blockly.Generator.prototype.FUNCTION_NAME_PLACEHOLDER_ = '{leCUI8hutHZI4480Dc}';
/**
* A dictionary of definitions to be printed before the code.
* @type {!Object|undefined}
* @protected
*/
Blockly.Generator.prototype.definitions_;
/**
* A dictionary mapping desired function names in definitions_ to actual
* function names (to avoid collisions with user functions).
* @type {!Object|undefined}
* @protected
*/
Blockly.Generator.prototype.functionNames_;
/**
* A database of variable and procedure names.
* @type {!Blockly.Names|undefined}
* @protected
*/
Blockly.Generator.prototype.nameDB_;
Object.defineProperty(Blockly.Generator.prototype, 'variableDB_', {
/**
* Getter.
* @deprecated 'variableDB_' was renamed to 'nameDB_' (May 2021).
* @this {Blockly.Generator}
* @return {!Blockly.Names|undefined} Name database.
*/
get: function() {
Blockly.utils.deprecation.warn(
'variableDB_', 'May 2021', 'May 2026', 'nameDB_');
return this.nameDB_;
},
/**
* Setter.
* @deprecated 'variableDB_' was renamed to 'nameDB_' (May 2021).
* @this {Blockly.Generator}
* @param {!Blockly.Names|undefined} nameDb New name database.
*/
set: function(nameDb) {
Blockly.utils.deprecation.warn(
'variableDB_', 'May 2021', 'May 2026', 'nameDB_');
this.nameDB_ = nameDb;
}
});
/**
* Define a developer-defined function (not a user-defined procedure) to be
* included in the generated code. Used for creating private helper functions.
* The first time this is called with a given desiredName, the code is
* saved and an actual name is generated. Subsequent calls with the
* same desiredName have no effect but have the same return value.
*
* It is up to the caller to make sure the same desiredName is not
* used for different helper functions (e.g. use "colourRandom" and
* "listRandom", not "random"). There is no danger of colliding with reserved
* words, or user-defined variable or procedure names.
*
* The code gets output when Blockly.Generator.finish() is called.
*
* @param {string} desiredName The desired name of the function
* (e.g. mathIsPrime).
* @param {!Array<string>} code A list of statements. Use ' ' for indents.
* @return {string} The actual name of the new function. This may differ
* from desiredName if the former has already been taken by the user.
* @protected
*/
Blockly.Generator.prototype.provideFunction_ = function(desiredName, code) {
if (!this.definitions_[desiredName]) {
var functionName = this.nameDB_.getDistinctName(desiredName,
Blockly.PROCEDURE_CATEGORY_NAME);
this.functionNames_[desiredName] = functionName;
var codeText = code.join('\n').replace(
this.FUNCTION_NAME_PLACEHOLDER_REGEXP_, functionName);
// Change all ' ' indents into the desired indent.
// To avoid an infinite loop of replacements, change all indents to '\0'
// character first, then replace them all with the indent.
// We are assuming that no provided functions contain a literal null char.
var oldCodeText;
while (oldCodeText != codeText) {
oldCodeText = codeText;
codeText = codeText.replace(/^(( {2})*) {2}/gm, '$1\0');
}
codeText = codeText.replace(/\0/g, this.INDENT);
this.definitions_[desiredName] = codeText;
}
return this.functionNames_[desiredName];
};
/**
* Hook for code to run before code generation starts.
* Subclasses may override this, e.g. to initialise the database of variable
* names.
* @param {!Blockly.Workspace} _workspace Workspace to generate code from.
*/
Blockly.Generator.prototype.init = function(_workspace) {
// Optionally override
// Create a dictionary of definitions to be printed before the code.
this.definitions_ = Object.create(null);
// Create a dictionary mapping desired developer-defined function names in
// definitions_ to actual function names (to avoid collisions with
// user-defined procedures).
this.functionNames_ = Object.create(null);
};
/**
* Common tasks for generating code from blocks. This is called from
* blockToCode and is called on every block, not just top level blocks.
* Subclasses may override this, e.g. to generate code for statements following
* the block, or to handle comments for the specified block and any connected
* value blocks.
* @param {!Blockly.Block} _block The current block.
* @param {string} code The code created for this block.
* @param {boolean=} _opt_thisOnly True to generate code for only this
* statement.
* @return {string} Code with comments and subsequent blocks added.
* @protected
*/
Blockly.Generator.prototype.scrub_ = function(_block, code, _opt_thisOnly) {
// Optionally override
return code;
};
/**
* Hook for code to run at end of code generation.
* Subclasses may override this, e.g. to prepend the generated code with import
* statements or variable definitions.
* @param {string} code Generated code.
* @return {string} Completed code.
*/
Blockly.Generator.prototype.finish = function(code) {
// Optionally override
// Clean up temporary data.
delete this.definitions_;
delete this.functionNames_;
return code;
};
/**
* Naked values are top-level blocks with outputs that aren't plugged into
* anything.
* Subclasses may override this, e.g. if their language does not allow
* naked values.
* @param {string} line Line of generated code.
* @return {string} Legal line of code.
*/
Blockly.Generator.prototype.scrubNakedValue = function(line) {
// Optionally override
return line;
};
|
apache-2.0
|
redhat-openstack/trove
|
trove/tests/scenario/helpers/mariadb_helper.py
|
1101
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.tests.scenario.helpers.mysql_helper import MysqlHelper
class MariadbHelper(MysqlHelper):
def __init__(self, expected_override_name):
super(MariadbHelper, self).__init__(expected_override_name)
# Mariadb currently does not support configuration groups.
# see: bug/1532256
def get_dynamic_group(self):
return dict()
def get_non_dynamic_group(self):
return dict()
def get_invalid_groups(self):
return []
|
apache-2.0
|
Oncilla/scion
|
go/integration/end2end_integration/main.go
|
8947
|
// Copyright 2018 ETH Zurich, Anapaya Systems
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/scionproto/scion/go/lib/addr"
"github.com/scionproto/scion/go/lib/integration"
"github.com/scionproto/scion/go/lib/log"
"github.com/scionproto/scion/go/lib/serrors"
"github.com/scionproto/scion/go/lib/snet"
"github.com/scionproto/scion/go/lib/util"
"github.com/scionproto/scion/go/pkg/app/feature"
)
var (
subset string
attempts int
timeout = &util.DurWrap{Duration: 10 * time.Second}
parallelism int
name string
cmd string
features string
)
func getCmd() (string, bool) {
return cmd, strings.Contains(cmd, "end2end")
}
func main() {
os.Exit(realMain())
}
func realMain() int {
addFlags()
if err := integration.Init(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to init: %s\n", err)
return 1
}
defer log.HandlePanic()
defer log.Flush()
if len(features) != 0 {
if _, err := feature.ParseDefault(strings.Split(features, ",")); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing features: %s\n", err)
return 1
}
}
clientArgs := []string{
"-log.console", "debug",
"-attempts", strconv.Itoa(attempts),
"-timeout", timeout.String(),
"-sciond", integration.Daemon,
"-local", integration.SrcAddrPattern + ":0",
"-remote", integration.DstAddrPattern + ":" + integration.ServerPortReplace,
}
serverArgs := []string{
"-mode", "server",
"-sciond", integration.Daemon,
"-local", integration.DstAddrPattern + ":0",
}
if len(features) != 0 {
clientArgs = append(clientArgs, "--features", features)
serverArgs = append(serverArgs, "--features", features)
}
in := integration.NewBinaryIntegration(name, cmd, clientArgs, serverArgs)
pairs, err := getPairs()
if err != nil {
log.Error("Error selecting tests", "err", err)
return 1
}
if err := runTests(in, pairs); err != nil {
log.Error("Error during tests", "err", err)
return 1
}
return 0
}
// addFlags adds the necessary flags.
func addFlags() {
flag.IntVar(&attempts, "attempts", 1, "Number of attempts per client before giving up.")
flag.StringVar(&cmd, "cmd", "./bin/end2end",
"The end2end binary to run (default: ./bin/end2end)")
flag.StringVar(&name, "name", "end2end_integration",
"The name of the test that is running (default: end2end_integration)")
flag.Var(timeout, "timeout", "The timeout for each attempt")
flag.StringVar(&subset, "subset", "all", "Subset of pairs to run (all|core#core|"+
"noncore#localcore|noncore#core|noncore#noncore)")
flag.IntVar(¶llelism, "parallelism", 1, "How many end2end tests run in parallel.")
flag.StringVar(&features, "features", "",
fmt.Sprintf("enable development features (%v)", feature.String(&feature.Default{}, "|")))
}
// runTests runs the end2end tests for all pairs. In case of an error the
// function is terminated immediately.
func runTests(in integration.Integration, pairs []integration.IAPair) error {
return integration.ExecuteTimed(in.Name(), func() error {
// Make sure that all executed commands can write to the RPC server
// after shutdown.
defer time.Sleep(time.Second)
// Estimating the timeout we should have is hard. CI will abort after 10
// minutes anyway. Thus this value.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
// First run all servers
type srvResult struct {
cleaner func()
err error
}
// Start servers in parallel.
srvResults := make(chan srvResult)
for _, dst := range integration.ExtractUniqueDsts(pairs) {
go func(dst *snet.UDPAddr) {
defer log.HandlePanic()
srvCtx, cancel := context.WithCancel(ctx)
waiter, err := in.StartServer(srvCtx, dst)
if err != nil {
log.Error(fmt.Sprintf("Error in server: %s", dst.String()), "err", err)
}
cleaner := func() {
cancel()
if waiter != nil {
waiter.Wait()
}
}
srvResults <- srvResult{cleaner: cleaner, err: err}
}(dst)
}
// Wait for all servers being started.
var errs serrors.List
for range integration.ExtractUniqueDsts(pairs) {
res := <-srvResults
// We need to register a cleanup for all servers.
// Do not short-cut exit here.
if res.err != nil {
errs = append(errs, res.err)
}
defer res.cleaner()
}
if err := errs.ToError(); err != nil {
return err
}
// Start a done signal listener. This is how the end2end binary
// communicates with this integration test. This is solely used to print
// the progress of the test.
var ctrMtx sync.Mutex
var ctr int
socket, clean, err := integration.ListenDone(func(src, dst addr.IA) {
ctrMtx.Lock()
defer ctrMtx.Unlock()
ctr++
testInfo := fmt.Sprintf("%v -> %v (%v/%v)", src, dst, ctr, len(pairs))
log.Info(fmt.Sprintf("Test %v: %s", in.Name(), testInfo))
})
if err != nil {
return err
}
defer clean()
// CI collapses if parallelism is too high.
semaphore := make(chan struct{}, parallelism)
// Docker exec comes with a 1 second overhead. We group all the pairs by
// the clients. And run all pairs for a given client in one execution.
// Thus, reducing the overhead dramatically.
groups := integration.GroupBySource(pairs)
clientResults := make(chan error, len(groups))
for src, dsts := range groups {
go func(src *snet.UDPAddr, dsts []*snet.UDPAddr) {
defer log.HandlePanic()
semaphore <- struct{}{}
defer func() { <-semaphore }()
// Aggregate all the commands that need to be run.
cmds := make([]integration.Cmd, 0, len(dsts))
for _, dst := range dsts {
cmd, err := clientTemplate(socket).Template(src, dst)
if err != nil {
clientResults <- err
return
}
cmds = append(cmds, cmd)
}
var tester string
if *integration.Docker {
tester = integration.TesterID(src)
}
logFile := fmt.Sprintf("%s/client_%s.log", logDir(), src.IA.FileFmt(false))
err := integration.Run(ctx, integration.RunConfig{
Commands: cmds,
LogFile: logFile,
Tester: tester,
})
if err != nil {
err = serrors.WithCtx(err, "file", logFile)
}
clientResults <- err
}(src, dsts)
}
errs = nil
for range groups {
err := <-clientResults
if err != nil {
errs = append(errs, err)
}
}
return errs.ToError()
})
}
func clientTemplate(progressSock string) integration.Cmd {
bin, progress := getCmd()
cmd := integration.Cmd{
Binary: bin,
Args: []string{
"-log.console", "debug",
"-attempts", strconv.Itoa(attempts),
"-timeout", timeout.String(),
"-sciond", integration.Daemon,
"-local", integration.SrcAddrPattern + ":0",
"-remote", integration.DstAddrPattern + ":" + integration.ServerPortReplace,
},
}
if len(features) != 0 {
cmd.Args = append(cmd.Args, "--features", features)
}
if progress {
cmd.Args = append(cmd.Args, "-progress", progressSock)
}
return cmd
}
// getPairs returns the pairs to test according to the specified subset.
func getPairs() ([]integration.IAPair, error) {
pairs := integration.IAPairs(integration.DispAddr)
if subset == "all" {
return pairs, nil
}
parts := strings.Split(subset, "#")
if len(parts) != 2 {
return nil, serrors.New("Invalid subset", "subset", subset)
}
return filter(parts[0], parts[1], pairs, integration.ASList), nil
}
// filter returns the list of ASes that are part of the desired subset.
func filter(src, dst string, pairs []integration.IAPair, ases *util.ASList) []integration.IAPair {
var res []integration.IAPair
s, err1 := addr.IAFromString(src)
d, err2 := addr.IAFromString(dst)
if err1 == nil && err2 == nil {
for _, pair := range pairs {
if pair.Src.IA.Equal(s) && pair.Dst.IA.Equal(d) {
res = append(res, pair)
return res
}
}
}
for _, pair := range pairs {
filter := !contains(ases, src != "noncore", pair.Src.IA)
filter = filter || !contains(ases, dst != "noncore", pair.Dst.IA)
if dst == "localcore" {
filter = filter || pair.Src.IA.I != pair.Dst.IA.I
}
if !filter {
res = append(res, pair)
}
}
return res
}
func contains(ases *util.ASList, core bool, ia addr.IA) bool {
l := ases.Core
if !core {
l = ases.NonCore
}
for _, as := range l {
if ia.Equal(as) {
return true
}
}
return false
}
func logDir() string {
return filepath.Join(integration.LogDir(), name)
}
|
apache-2.0
|
edde-framework/edde-framework
|
src/Edde/Api/Protocol/Request/IRequestService.php
|
743
|
<?php
declare(strict_types=1);
namespace Edde\Api\Protocol\Request;
use Edde\Api\Protocol\IElement;
use Edde\Api\Protocol\IProtocolHandler;
interface IRequestService extends IProtocolHandler {
/**
* @param IRequestHandler $requestHandler
*
* @return IRequestService
*/
public function registerRequestHandler(IRequestHandler $requestHandler): IRequestService;
/**
* return list of current responses
*
* @return IElement[]
*/
public function getResponseList(): array;
/**
* get the response by the given request; if it was already executed the response would be returned
*
* @param IElement $element
*
* @return IElement
*/
public function request(IElement $element): IElement;
}
|
apache-2.0
|
onybo/TypeScript.Presentation
|
SampleProject/Scripts/services/HotkeysService.js
|
1169
|
var Services;
(function (Services) {
'use strict';
function listData() {
return {
getItems: ""
};
}
var HotkeysService = (function () {
function HotkeysService(hotkeys, $translate, $rootScope, $log) {
this.hotkeys = hotkeys;
this.hotkeys.add({
combo: 'ctrl+alt+e',
description: 'English',
callback: function () {
$log.debug('Switched to english');
$translate.use('en');
}
});
this.hotkeys.add({
combo: 'ctrl+alt+n',
description: 'Norwegian',
callback: function () {
$log.debug('Switched to norwegian');
$translate.use('no');
}
});
}
HotkeysService.$inject = [
'hotkeys',
'$translate',
'$rootScope',
'$log'
];
return HotkeysService;
})();
Services.HotkeysService = HotkeysService;
})(Services || (Services = {}));
//# sourceMappingURL=HotkeysService.js.map
|
apache-2.0
|
baldimir/drools
|
kie-dmn/kie-dmn-core/src/main/java/org/kie/dmn/core/impl/DMNRuntimeImpl.java
|
41982
|
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.dmn.core.impl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import javax.xml.namespace.QName;
import org.drools.core.impl.InternalKnowledgeBase;
import org.kie.api.runtime.KieRuntimeFactory;
import org.kie.dmn.api.core.DMNContext;
import org.kie.dmn.api.core.DMNDecisionResult;
import org.kie.dmn.api.core.DMNMessage;
import org.kie.dmn.api.core.DMNModel;
import org.kie.dmn.api.core.DMNResult;
import org.kie.dmn.api.core.DMNRuntime;
import org.kie.dmn.api.core.DMNType;
import org.kie.dmn.api.core.ast.BusinessKnowledgeModelNode;
import org.kie.dmn.api.core.ast.DMNNode;
import org.kie.dmn.api.core.ast.DecisionNode;
import org.kie.dmn.api.core.ast.DecisionServiceNode;
import org.kie.dmn.api.core.ast.InputDataNode;
import org.kie.dmn.api.core.event.BeforeEvaluateDecisionEvent;
import org.kie.dmn.api.core.event.DMNRuntimeEventListener;
import org.kie.dmn.core.api.DMNFactory;
import org.kie.dmn.core.api.EvaluatorResult;
import org.kie.dmn.core.ast.BusinessKnowledgeModelNodeImpl;
import org.kie.dmn.core.ast.DMNBaseNode;
import org.kie.dmn.core.ast.DMNDecisionServiceEvaluator;
import org.kie.dmn.core.ast.DMNFunctionWithReturnType;
import org.kie.dmn.core.ast.DecisionNodeImpl;
import org.kie.dmn.core.ast.DecisionServiceNodeImpl;
import org.kie.dmn.core.ast.InputDataNodeImpl;
import org.kie.dmn.core.compiler.DMNOption;
import org.kie.dmn.core.compiler.DMNProfile;
import org.kie.dmn.core.compiler.RuntimeTypeCheckOption;
import org.kie.dmn.core.util.Msg;
import org.kie.dmn.core.util.MsgUtil;
import org.kie.dmn.feel.runtime.FEELFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.kie.dmn.api.core.DMNDecisionResult.DecisionEvaluationStatus.EVALUATING;
import static org.kie.dmn.api.core.DMNDecisionResult.DecisionEvaluationStatus.FAILED;
import static org.kie.dmn.api.core.DMNDecisionResult.DecisionEvaluationStatus.SKIPPED;
public class DMNRuntimeImpl
implements DMNRuntime {
private static final Logger logger = LoggerFactory.getLogger( DMNRuntimeImpl.class );
private DMNRuntimeEventManagerImpl eventManager;
private final DMNRuntimeKB runtimeKB;
private boolean overrideRuntimeTypeCheck = false;
public DMNRuntimeImpl(DMNRuntimeKB runtimeKB) {
this.runtimeKB = runtimeKB != null ? runtimeKB : new VoidDMNRuntimeKB();
this.eventManager = new DMNRuntimeEventManagerImpl();
for (DMNRuntimeEventListener listener : this.runtimeKB.getListeners()) {
this.addListener(listener);
}
}
@Override
public List<DMNModel> getModels() {
return runtimeKB.getModels();
}
@Override
public DMNModel getModel(String namespace, String modelName) {
Objects.requireNonNull(namespace, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "namespace"));
Objects.requireNonNull(modelName, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "modelName"));
return runtimeKB.getModel(namespace, modelName);
}
@Override
public DMNModel getModelById(String namespace, String modelId) {
Objects.requireNonNull(namespace, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "namespace"));
Objects.requireNonNull(modelId, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "modelId"));
return runtimeKB.getModelById(namespace, modelId);
}
@Override
public DMNResult evaluateAll(DMNModel model, DMNContext context) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
boolean performRuntimeTypeCheck = performRuntimeTypeCheck(model);
DMNResultImpl result = createResult( model, context );
DMNRuntimeEventManagerUtils.fireBeforeEvaluateAll( eventManager, model, result );
// the engine should evaluate all Decisions belonging to the "local" model namespace, not imported decision explicitly.
Set<DecisionNode> decisions = model.getDecisions().stream().filter(d -> d.getModelNamespace().equals(model.getNamespace())).collect(Collectors.toSet());
for( DecisionNode decision : decisions ) {
evaluateDecision(context, result, decision, performRuntimeTypeCheck);
}
DMNRuntimeEventManagerUtils.fireAfterEvaluateAll( eventManager, model, result );
return result;
}
@Override
@Deprecated
public DMNResult evaluateDecisionByName(DMNModel model, String decisionName, DMNContext context) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(decisionName, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "decisionName"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
return evaluateByName(model, context, decisionName);
}
@Override
@Deprecated
public DMNResult evaluateDecisionById(DMNModel model, String decisionId, DMNContext context) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(decisionId, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "decisionId"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
return evaluateById(model, context, decisionId);
}
@Override
public DMNResult evaluateByName( DMNModel model, DMNContext context, String... decisionNames ) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
Objects.requireNonNull(decisionNames, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "decisionNames"));
if (decisionNames.length == 0) {
throw new IllegalArgumentException(MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_EMPTY, "decisionNames"));
}
final DMNResultImpl result = createResult( model, context );
for (String name : decisionNames) {
evaluateByNameInternal( model, context, result, name );
}
return result;
}
/**
*
* @param kieBaseName
* @return
*/
public KieRuntimeFactory getKieRuntimeFactory(String kieBaseName) {
return runtimeKB.getKieRuntimeFactory(kieBaseName);
}
private void evaluateByNameInternal( DMNModel model, DMNContext context, DMNResultImpl result, String name ) {
boolean performRuntimeTypeCheck = performRuntimeTypeCheck(model);
Optional<DecisionNode> decision = Optional.ofNullable(model.getDecisionByName(name));
if (decision.isPresent()) {
final boolean walkingIntoScope = walkIntoImportScopeInternalDecisionInvocation(result, model, decision.get());
evaluateDecision(context, result, decision.get(), performRuntimeTypeCheck);
if (walkingIntoScope) {
result.getContext().popScope();
}
} else {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
null,
result,
null,
null,
Msg.DECISION_NOT_FOUND_FOR_NAME,
name );
}
}
@Override
public DMNResult evaluateById( DMNModel model, DMNContext context, String... decisionIds ) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
Objects.requireNonNull(decisionIds, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "decisionIds"));
if (decisionIds.length == 0) {
throw new IllegalArgumentException(MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_EMPTY, "decisionIds"));
}
final DMNResultImpl result = createResult( model, context );
for ( String id : decisionIds ) {
evaluateByIdInternal( model, context, result, id );
}
return result;
}
private void evaluateByIdInternal( DMNModel model, DMNContext context, DMNResultImpl result, String id ) {
boolean performRuntimeTypeCheck = performRuntimeTypeCheck(model);
Optional<DecisionNode> decision = Optional.ofNullable(model.getDecisionById(id));
if (decision.isPresent()) {
final boolean walkingIntoScope = walkIntoImportScopeInternalDecisionInvocation(result, model, decision.get());
evaluateDecision(context, result, decision.get(), performRuntimeTypeCheck);
if (walkingIntoScope) {
result.getContext().popScope();
}
} else {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
null,
result,
null,
null,
Msg.DECISION_NOT_FOUND_FOR_ID,
id );
}
}
@Override
public void addListener(DMNRuntimeEventListener listener) {
this.eventManager.addListener( listener );
}
@Override
public void removeListener(DMNRuntimeEventListener listener) {
this.eventManager.removeListener( listener );
}
@Override
public boolean hasListeners() {
return this.eventManager.hasListeners();
}
@Override
public Set<DMNRuntimeEventListener> getListeners() {
return this.eventManager.getListeners();
}
private DMNResultImpl createResult(DMNModel model, DMNContext context) {
DMNResultImpl result = createResultImpl(model, context);
for (DecisionNode decision : model.getDecisions().stream().filter(d -> d.getModelNamespace().equals(model.getNamespace())).collect(Collectors.toSet())) {
result.addDecisionResult(new DMNDecisionResultImpl(decision.getId(), decision.getName()));
}
return result;
}
private DMNResultImpl createResultImpl(DMNModel model, DMNContext context) {
DMNResultImpl result = new DMNResultImpl(model);
result.setContext(context.clone()); // DMNContextFPAImpl.clone() creates DMNContextImpl
return result;
}
@Override
public DMNResult evaluateDecisionService(DMNModel model, DMNContext context, String decisionServiceName) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
Objects.requireNonNull(context, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "context"));
Objects.requireNonNull(decisionServiceName, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "decisionServiceName"));
boolean typeCheck = performRuntimeTypeCheck(model);
DMNResultImpl result = createResultImpl(model, context);
// the engine should evaluate all belonging to the "local" model namespace, not imported nodes explicitly.
Optional<DecisionServiceNode> lookupDS = ((DMNModelImpl) model).getDecisionServices().stream()
.filter(d -> d.getModelNamespace().equals(model.getNamespace()))
.filter(ds -> ds.getName().equals(decisionServiceName))
.findFirst();
if (lookupDS.isPresent()) {
DecisionServiceNodeImpl decisionService = (DecisionServiceNodeImpl) lookupDS.get();
for (DMNNode dep : decisionService.getInputParameters().values()) {
if (!isNodeValueDefined(result, decisionService, dep)) {
DMNMessage message = MsgUtil.reportMessage(logger,
DMNMessage.Severity.WARN,
decisionService.getSource(),
result,
null,
null,
Msg.REQ_INPUT_NOT_FOUND_FOR_DS,
getDependencyIdentifier(decisionService, dep),
getIdentifier(decisionService));
final boolean walkingIntoScope = walkIntoImportScope(result, decisionService, dep);
result.getContext().set(dep.getName(), null);
if (walkingIntoScope) {
result.getContext().popScope();
}
} else {
final boolean walkingIntoScope = walkIntoImportScope(result, decisionService, dep);
final Object originalValue = result.getContext().get(dep.getName());
DMNType depType = ((DMNModelImpl) model).getTypeRegistry().unknown();
if (dep instanceof InputDataNode) {
depType = ((InputDataNode) dep).getType();
} else if (dep instanceof DecisionNode) {
depType = ((DecisionNode) dep).getResultType();
}
Object c = coerceUsingType(originalValue,
depType,
typeCheck,
(r, t) -> MsgUtil.reportMessage(logger,
DMNMessage.Severity.WARN,
decisionService.getDecisionService(),
result,
null,
null,
Msg.PARAMETER_TYPE_MISMATCH_DS,
dep.getName(),
t,
MsgUtil.clipString(r.toString(), 50)));
if (c != originalValue) { //intentional by-reference
result.getContext().set(dep.getName(), c);
}
if (walkingIntoScope) {
result.getContext().popScope();
}
}
}
EvaluatorResult evaluate = new DMNDecisionServiceEvaluator(decisionService, true, false).evaluate(this, result); // please note singleton output coercion does not influence anyway when invoked DS on a model.
} else {
MsgUtil.reportMessage(logger,
DMNMessage.Severity.ERROR,
null,
result,
null,
null,
Msg.DECISION_SERVICE_NOT_FOUND_FOR_NAME,
decisionServiceName);
}
return result;
}
private void evaluateDecisionService(DMNContext context, DMNResultImpl result, DecisionServiceNode d, boolean typeCheck) {
DecisionServiceNodeImpl ds = (DecisionServiceNodeImpl) d;
if (isNodeValueDefined(result, ds, ds)) {
// already resolved
return;
}
// Note: a Decision Service is expected to always have an evaluator, it does not depend on an xml expression, it is done always by the compiler.
try {
// a Decision Service when is evaluated as a function does not require any dependency check, as they will be passed as params.
EvaluatorResult er = ds.getEvaluator().evaluate(this, result);
if (er.getResultType() == EvaluatorResult.ResultType.SUCCESS) {
FEELFunction resultFn = (FEELFunction) er.getResult();
result.getContext().set(ds.getName(), resultFn);
}
} catch (Throwable t) {
MsgUtil.reportMessage(logger,
DMNMessage.Severity.ERROR,
ds.getSource(),
result,
t,
null,
Msg.ERROR_EVAL_DS_NODE,
getIdentifier(ds),
t.getMessage());
}
}
private void evaluateBKM(DMNContext context, DMNResultImpl result, BusinessKnowledgeModelNode b, boolean typeCheck) {
BusinessKnowledgeModelNodeImpl bkm = (BusinessKnowledgeModelNodeImpl) b;
if (isNodeValueDefined(result, bkm, bkm)) {
// already resolved
// TODO: do we need to check if the defined variable is a function as it should?
return;
}
// TODO: do we need to check/resolve dependencies?
if( bkm.getEvaluator() == null ) {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.WARN,
bkm.getSource(),
result,
null,
null,
Msg.MISSING_EXPRESSION_FOR_BKM,
getIdentifier( bkm ) );
return;
}
try {
DMNRuntimeEventManagerUtils.fireBeforeEvaluateBKM( eventManager, bkm, result );
for( DMNNode dep : bkm.getDependencies().values() ) {
if (typeCheck && !checkDependencyValueIsValid(dep, result)) {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
((DMNBaseNode) dep).getSource(),
result,
null,
null,
Msg.ERROR_EVAL_NODE_DEP_WRONG_TYPE,
getIdentifier( bkm ),
getDependencyIdentifier(bkm, dep),
MsgUtil.clipString(Objects.toString(result.getContext().get(dep.getName())), 50),
((DMNBaseNode) dep).getType()
);
return;
}
if (!isNodeValueDefined(result, bkm, dep)) {
boolean walkingIntoScope = walkIntoImportScope(result, bkm, dep);
if( dep instanceof BusinessKnowledgeModelNode ) {
evaluateBKM(context, result, (BusinessKnowledgeModelNode) dep, typeCheck);
} else if (dep instanceof DecisionServiceNode) {
evaluateDecisionService(context, result, (DecisionServiceNode) dep, typeCheck);
} else {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
bkm.getSource(),
result,
null,
null,
Msg.REQ_DEP_NOT_FOUND_FOR_NODE,
getDependencyIdentifier(bkm, dep),
getIdentifier( bkm )
);
return;
}
if (walkingIntoScope) {
result.getContext().popScope();
}
}
}
EvaluatorResult er = bkm.getEvaluator().evaluate( this, result );
if( er.getResultType() == EvaluatorResult.ResultType.SUCCESS ) {
final FEELFunction original_fn = (FEELFunction) er.getResult();
FEELFunction resultFn = original_fn;
if (typeCheck) {
DMNType resultType = b.getResultType();
resultFn = new DMNFunctionWithReturnType(original_fn, resultType, result, b);
}
result.getContext().set(bkm.getBusinessKnowledModel().getVariable().getName(), resultFn);
}
} catch( Throwable t ) {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
bkm.getSource(),
result,
t,
null,
Msg.ERROR_EVAL_BKM_NODE,
getIdentifier( bkm ),
t.getMessage() );
} finally {
DMNRuntimeEventManagerUtils.fireAfterEvaluateBKM( eventManager, bkm, result );
}
}
public static Object coerceUsingType(Object value, DMNType type, boolean typeCheck, BiConsumer<Object, DMNType> nullCallback) {
Object result = value;
if (!type.isCollection() && value instanceof Collection && ((Collection<?>) value).size() == 1) {
// as per Decision evaluation result.
result = ((Collection<?>) value).toArray()[0];
}
if (!typeCheck) {
return result;
}
if (type.isAssignableValue(result)) {
return result;
} else {
nullCallback.accept(value, type);
return null;
}
}
private boolean isNodeValueDefined(DMNResultImpl result, DMNNode callerNode, DMNNode node) {
if (node.getModelNamespace().equals(result.getContext().scopeNamespace().orElse(result.getModel().getNamespace()))) {
return result.getContext().isDefined(node.getName());
} else {
Optional<String> importAlias = callerNode.getModelImportAliasFor(node.getModelNamespace(), node.getModelName());
if (importAlias.isPresent()) {
Object aliasContext = result.getContext().get(importAlias.get());
if (aliasContext != null && (aliasContext instanceof Map<?, ?>)) {
Map<?, ?> map = (Map<?, ?>) aliasContext;
return map.containsKey(node.getName());
}
}
return false;
}
}
private boolean walkIntoImportScopeInternalDecisionInvocation(DMNResultImpl result, DMNModel dmnModel, DMNNode destinationNode) {
if (destinationNode.getModelNamespace().equals(dmnModel.getNamespace())) {
return false;
} else {
DMNModelImpl model = (DMNModelImpl) dmnModel;
Optional<String> importAlias = model.getImportAliasFor(destinationNode.getModelNamespace(), destinationNode.getModelName());
if (importAlias.isPresent()) {
result.getContext().pushScope(importAlias.get(), destinationNode.getModelNamespace());
return true;
} else {
MsgUtil.reportMessage(logger,
DMNMessage.Severity.ERROR,
dmnModel.getDefinitions(),
result,
null,
null,
Msg.IMPORT_NOT_FOUND_FOR_NODE_MISSING_ALIAS,
new QName(destinationNode.getModelNamespace(), destinationNode.getModelName()),
dmnModel.getName());
return false;
}
}
}
private boolean walkIntoImportScope(DMNResultImpl result, DMNNode callerNode, DMNNode destinationNode) {
if (!result.getContext().scopeNamespace().isPresent()) {
if (destinationNode.getModelNamespace().equals(result.getModel().getNamespace())) {
return false;
} else {
Optional<String> importAlias = callerNode.getModelImportAliasFor(destinationNode.getModelNamespace(), destinationNode.getModelName());
if (importAlias.isPresent()) {
result.getContext().pushScope(importAlias.get(), destinationNode.getModelNamespace());
return true;
} else {
MsgUtil.reportMessage(logger,
DMNMessage.Severity.ERROR,
((DMNBaseNode) callerNode).getSource(),
result,
null,
null,
Msg.IMPORT_NOT_FOUND_FOR_NODE_MISSING_ALIAS,
new QName(destinationNode.getModelNamespace(), destinationNode.getModelName()),
callerNode.getName()
);
return false;
}
}
} else {
if (destinationNode.getModelNamespace().equals(result.getContext().scopeNamespace().get())) {
return false;
} else {
Optional<String> importAlias = callerNode.getModelImportAliasFor(destinationNode.getModelNamespace(), destinationNode.getModelName());
if (importAlias.isPresent()) {
result.getContext().pushScope(importAlias.get(), destinationNode.getModelNamespace());
return true;
} else {
MsgUtil.reportMessage(logger,
DMNMessage.Severity.ERROR,
((DMNBaseNode) callerNode).getSource(),
result,
null,
null,
Msg.IMPORT_NOT_FOUND_FOR_NODE_MISSING_ALIAS,
new QName(destinationNode.getModelNamespace(), destinationNode.getModelName()),
callerNode.getName());
return false;
}
}
}
}
private boolean evaluateDecision(DMNContext context, DMNResultImpl result, DecisionNode d, boolean typeCheck) {
DecisionNodeImpl decision = (DecisionNodeImpl) d;
String decisionId = d.getModelNamespace().equals(result.getModel().getNamespace()) ? decision.getId() : decision.getModelNamespace() + "#" + decision.getId();
if (isNodeValueDefined(result, decision, decision)) {
// already resolved
return true;
} else {
// check if the decision was already evaluated before and returned error
DMNDecisionResult.DecisionEvaluationStatus status = Optional.ofNullable(result.getDecisionResultById(decisionId))
.map(DMNDecisionResult::getEvaluationStatus)
.orElse(DMNDecisionResult.DecisionEvaluationStatus.NOT_EVALUATED); // it might be an imported Decision.
if ( FAILED == status || SKIPPED == status || EVALUATING == status ) {
return false;
}
}
BeforeEvaluateDecisionEvent beforeEvaluateDecisionEvent = null;
try {
beforeEvaluateDecisionEvent = DMNRuntimeEventManagerUtils.fireBeforeEvaluateDecision(eventManager, decision, result);
boolean missingInput = false;
DMNDecisionResultImpl dr = (DMNDecisionResultImpl) result.getDecisionResultById(decisionId);
if (dr == null) { // an imported Decision now evaluated, requires the creation of the decision result:
String decisionResultName = d.getName();
Optional<String> importAliasFor = ((DMNModelImpl) result.getModel()).getImportAliasFor(d.getModelNamespace(), d.getModelName());
if (importAliasFor.isPresent()) {
decisionResultName = importAliasFor.get() + "." + d.getName();
}
dr = new DMNDecisionResultImpl(decisionId, decisionResultName);
if (importAliasFor.isPresent()) { // otherwise is a transitive, skipped and not to be added to the results:
result.addDecisionResult(dr);
}
}
dr.setEvaluationStatus(DMNDecisionResult.DecisionEvaluationStatus.EVALUATING);
for( DMNNode dep : decision.getDependencies().values() ) {
try {
if (typeCheck && !checkDependencyValueIsValid(dep, result)) {
missingInput = true;
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
((DMNBaseNode) dep).getSource(),
result,
null,
null,
Msg.ERROR_EVAL_NODE_DEP_WRONG_TYPE,
getIdentifier( decision ),
getDependencyIdentifier(decision, dep),
MsgUtil.clipString(Objects.toString(result.getContext().get(dep.getName())), 50),
((DMNBaseNode) dep).getType()
);
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.SKIPPED );
}
} catch ( Exception e ) {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
((DMNBaseNode)dep).getSource(),
result,
e,
null,
Msg.ERROR_CHECKING_ALLOWED_VALUES,
getDependencyIdentifier(decision, dep),
e.getMessage() );
}
if (!isNodeValueDefined(result, decision, dep)) {
boolean walkingIntoScope = walkIntoImportScope(result, decision, dep);
if( dep instanceof DecisionNode ) {
if (!evaluateDecision(context, result, (DecisionNode) dep, typeCheck)) {
missingInput = true;
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
decision.getSource(),
result,
null,
null,
Msg.UNABLE_TO_EVALUATE_DECISION_REQ_DEP,
getIdentifier( decision ),
getDependencyIdentifier(decision, dep) );
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.SKIPPED );
}
} else if( dep instanceof BusinessKnowledgeModelNode ) {
evaluateBKM(context, result, (BusinessKnowledgeModelNode) dep, typeCheck);
} else if (dep instanceof DecisionServiceNode) {
evaluateDecisionService(context, result, (DecisionServiceNode) dep, typeCheck);
} else {
missingInput = true;
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
decision.getSource(),
result,
null,
null,
Msg.REQ_DEP_NOT_FOUND_FOR_NODE,
getDependencyIdentifier(decision, dep),
getIdentifier( decision )
);
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.SKIPPED );
}
if (walkingIntoScope) {
result.getContext().popScope();
}
}
}
if( missingInput ) {
return false;
}
if( decision.getEvaluator() == null ) {
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.WARN,
decision.getSource(),
result,
null,
null,
Msg.MISSING_EXPRESSION_FOR_DECISION,
getIdentifier( decision ) );
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.SKIPPED );
return false;
}
try {
EvaluatorResult er = decision.getEvaluator().evaluate( this, result );
if( er.getResultType() == EvaluatorResult.ResultType.SUCCESS ) {
Object value = er.getResult();
if( ! decision.getResultType().isCollection() && value instanceof Collection &&
((Collection)value).size()==1 ) {
// spec defines that "a=[a]", i.e., singleton collections should be treated as the single element
// and vice-versa
value = ((Collection)value).toArray()[0];
}
try {
if (typeCheck && !d.getResultType().isAssignableValue(value)) {
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
decision.getSource(),
result,
null,
null,
Msg.ERROR_EVAL_NODE_RESULT_WRONG_TYPE,
getIdentifier( decision ),
decision.getResultType(),
value);
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.FAILED );
return false;
}
} catch ( Exception e ) {
MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
decision.getSource(),
result,
e,
null,
Msg.ERROR_CHECKING_ALLOWED_VALUES,
getIdentifier( decision ),
e.getMessage() );
return false;
}
result.getContext().set(decision.getDecision().getVariable().getName(), value);
dr.setResult( value );
dr.setEvaluationStatus( DMNDecisionResult.DecisionEvaluationStatus.SUCCEEDED );
} else {
dr.setEvaluationStatus( DMNDecisionResult.DecisionEvaluationStatus.FAILED );
return false;
}
} catch( Throwable t ) {
DMNMessage message = MsgUtil.reportMessage( logger,
DMNMessage.Severity.ERROR,
decision.getSource(),
result,
t,
null,
Msg.ERROR_EVAL_DECISION_NODE,
getIdentifier( decision ),
t.getMessage() );
reportFailure( dr, message, DMNDecisionResult.DecisionEvaluationStatus.FAILED );
}
return true;
} finally {
DMNRuntimeEventManagerUtils.fireAfterEvaluateDecision( eventManager, decision, result, beforeEvaluateDecisionEvent);
}
}
private boolean checkDependencyValueIsValid(DMNNode dep, DMNResultImpl result) {
if (dep instanceof InputDataNode) {
InputDataNodeImpl inputDataNode = (InputDataNodeImpl) dep;
BaseDMNTypeImpl dmnType = (BaseDMNTypeImpl) inputDataNode.getType();
return dmnType.isAssignableValue( result.getContext().get( dep.getName() ) );
}
// if the dependency is NOT an InputData, the type coherence was checked at evaluation result assignment.
return true;
}
private static String getIdentifier(DMNNode node) {
return node.getName() != null ? node.getName() : node.getId();
}
private static String getDependencyIdentifier(DMNNode callerNode, DMNNode node) {
if (node.getModelNamespace().equals(callerNode.getModelNamespace())) {
return getIdentifier(node);
} else {
Optional<String> importAlias = callerNode.getModelImportAliasFor(node.getModelNamespace(), node.getModelName());
String prefix = "{" + node.getModelNamespace() + "}";
if (importAlias.isPresent()) {
prefix = importAlias.get();
}
return prefix + "." + getIdentifier(node);
}
}
public boolean performRuntimeTypeCheck(DMNModel model) {
Objects.requireNonNull(model, () -> MsgUtil.createMessage(Msg.PARAM_CANNOT_BE_NULL, "model"));
return overrideRuntimeTypeCheck || ((DMNModelImpl) model).isRuntimeTypeCheck();
}
public final <T extends DMNOption> void setOption(T option) {
if (option instanceof RuntimeTypeCheckOption) {
this.overrideRuntimeTypeCheck = ((RuntimeTypeCheckOption) option).isRuntimeTypeCheck();
}
}
private void reportFailure(DMNDecisionResultImpl dr, DMNMessage message, DMNDecisionResult.DecisionEvaluationStatus status) {
dr.getMessages().add( message );
dr.setEvaluationStatus( status );
}
@Override
public DMNContext newContext() {
return DMNFactory.newContext();
}
@Override
public DMNRuntime getRuntime() {
return this;
}
public List<DMNProfile> getProfiles() {
return runtimeKB.getProfiles();
}
@Override
public ClassLoader getRootClassLoader() {
return runtimeKB.getRootClassLoader();
}
public InternalKnowledgeBase getInternalKnowledgeBase() {
return runtimeKB.getInternalKnowledgeBase();
}
}
|
apache-2.0
|
Justice-love/tiger
|
tiger/src/test/java/tiger/test/deep/package-info.java
|
108
|
/**
*
* @creatTime 上午11:44:46
* @author Eddy
*/
/**
* @author Eddy
*
*/
package tiger.test.deep;
|
apache-2.0
|
TransitHelper/TransitAngel
|
app/src/main/java/com/transitangel/transitangel/search/SearchActivity.java
|
5509
|
package com.transitangel.transitangel.search;
import android.content.Intent;
import android.graphics.Color;
import android.os.Bundle;
import android.support.design.widget.CoordinatorLayout;
import android.support.v4.view.MenuItemCompat;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.SearchView;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.TextView;
import com.transitangel.transitangel.Manager.BartTransitManager;
import com.transitangel.transitangel.Manager.CaltrainTransitManager;
import com.transitangel.transitangel.R;
import com.transitangel.transitangel.model.Transit.Stop;
import com.transitangel.transitangel.model.Transit.Train;
import java.util.ArrayList;
import butterknife.BindView;
import butterknife.ButterKnife;
public class SearchActivity extends AppCompatActivity implements SearchAdapter.OnItemClickListener, SearchView.OnQueryTextListener {
private static final String TAG = SearchActivity.class.getSimpleName();
public static final String EXTRA_SELECTED_STATION = TAG + ".EXTRA_SELECTED_STATION";
public static final String EXTRA_MODE = TAG + ".EXTRA_MODE";
private static final String EXTRA_TRAIN_INFO = TAG + ".EXTRA_TRAIN_INFO";
public static final String EXTRA_SERVICE = TAG + ".EXTRA_SERVICE";
public static final String EXTRA_SERVICE_BART = TAG + ".EXTRA_SERVICE_BART";
public static final String EXTRA_SERVICE_CALTRAIN = TAG + ".EXTRA_SERVICE_CALTRAIN";
public static final String EXTRA_FROM = TAG + ".EXTRA_FROM";
public static final int MODE_TYPE_SEARCH = 1;
public static final int MODE_TYPE_DETAILS = 2;
@BindView(R.id.tvTitle)
TextView tvTitle;
@BindView(R.id.toolbar)
Toolbar toolbar;
@BindView(R.id.rvStationList)
RecyclerView rvStationList;
@BindView(R.id.clMainContent)
CoordinatorLayout clMainContent;
private ArrayList<Stop> mStops;
private Train train;
private SearchAdapter adapter;
private String serviceType;
private int mode;
private boolean isFrom;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_search2);
ButterKnife.bind(this);
init();
}
private void init() {
mode = getIntent().getIntExtra(EXTRA_MODE, MODE_TYPE_SEARCH);
serviceType = getIntent().getStringExtra(EXTRA_SERVICE);
isFrom = getIntent().getBooleanExtra(EXTRA_FROM, false);
setSupportActionBar(toolbar);
if (EXTRA_SERVICE_CALTRAIN.equalsIgnoreCase(serviceType)) {
mStops = CaltrainTransitManager.getSharedInstance().getStops();
if(isFrom) {
getSupportActionBar().setTitle(getString(R.string.search_from_title, "Caltrain"));
} else {
getSupportActionBar().setTitle(getString(R.string.search_to_title, "Caltrain"));
}
} else {
mStops = BartTransitManager.getSharedInstance().getStops();
if(isFrom) {
getSupportActionBar().setTitle(getString(R.string.search_from_title, "Bart"));
} else {
getSupportActionBar().setTitle(getString(R.string.search_to_title, "Bart"));
}
}
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
// Create the recents adapter.
adapter = new SearchAdapter(this, mStops);
rvStationList.setAdapter(adapter);
rvStationList.setLayoutManager(new LinearLayoutManager(this));
adapter.setOnItemClickListener(this);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_search, menu);
final MenuItem searchItem = menu.findItem(R.id.action_search);
final SearchView searchView = (SearchView) MenuItemCompat.getActionView(searchItem);
searchView.setOnQueryTextListener(this);
// Use a custom search icon for the SearchView in AppBar
int searchImgId = android.support.v7.appcompat.R.id.search_button;
ImageView v = (ImageView) searchView.findViewById(searchImgId);
v.setImageResource(R.drawable.search);
// Customize searchview text and hint colors
int searchEditId = android.support.v7.appcompat.R.id.search_src_text;
EditText et = (EditText) searchView.findViewById(searchEditId);
et.setTextColor(Color.WHITE);
et.setHintTextColor(Color.WHITE);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
finish();
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
public void onItemClick(int position) {
Intent resultIntent = new Intent();
resultIntent.putExtra(EXTRA_SELECTED_STATION, adapter.getItem(position));
setResult(RESULT_OK, resultIntent);
finish();
}
@Override
public boolean onQueryTextSubmit(String query) {
adapter.setFilter(query);
return false;
}
@Override
public boolean onQueryTextChange(String newText) {
adapter.setFilter(newText);
return false;
}
}
|
apache-2.0
|
aws/aws-sdk-java
|
aws-java-sdk-nimblestudio/src/main/java/com/amazonaws/services/nimblestudio/waiters/StudioComponentDeleted.java
|
4452
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.nimblestudio.waiters;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.waiters.WaiterAcceptor;
import com.amazonaws.waiters.WaiterState;
import com.amazonaws.waiters.AcceptorPathMatcher;
import com.amazonaws.services.nimblestudio.model.*;
import com.fasterxml.jackson.databind.JsonNode;
import com.amazonaws.jmespath.*;
import java.io.IOException;
import javax.annotation.Generated;
@SdkInternalApi
@Generated("com.amazonaws:aws-java-sdk-code-generator")
class StudioComponentDeleted {
static class IsDELETEDMatcher extends WaiterAcceptor<GetStudioComponentResult> {
private static final JsonNode expectedResult;
static {
try {
expectedResult = ObjectMapperSingleton.getObjectMapper().readTree("\"DELETED\"");
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static final JmesPathExpression ast = new JmesPathSubExpression(new JmesPathField("studioComponent"), new JmesPathField("state"));
/**
* Takes the result and determines whether the state of the resource matches the expected state. To determine
* the current state of the resource, JmesPath expression is evaluated and compared against the expected result.
*
* @param result
* Corresponding result of the operation
* @return True if current state of the resource matches the expected state, False otherwise
*/
@Override
public boolean matches(GetStudioComponentResult result) {
JsonNode queryNode = ObjectMapperSingleton.getObjectMapper().valueToTree(result);
JsonNode finalResult = ast.accept(new JmesPathEvaluationVisitor(), queryNode);
return AcceptorPathMatcher.path(expectedResult, finalResult);
}
/**
* Represents the current waiter state in the case where resource state matches the expected state
*
* @return Corresponding state of the waiter
*/
@Override
public WaiterState getState() {
return WaiterState.SUCCESS;
}
}
static class IsDELETE_FAILEDMatcher extends WaiterAcceptor<GetStudioComponentResult> {
private static final JsonNode expectedResult;
static {
try {
expectedResult = ObjectMapperSingleton.getObjectMapper().readTree("\"DELETE_FAILED\"");
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static final JmesPathExpression ast = new JmesPathSubExpression(new JmesPathField("studioComponent"), new JmesPathField("state"));
/**
* Takes the result and determines whether the state of the resource matches the expected state. To determine
* the current state of the resource, JmesPath expression is evaluated and compared against the expected result.
*
* @param result
* Corresponding result of the operation
* @return True if current state of the resource matches the expected state, False otherwise
*/
@Override
public boolean matches(GetStudioComponentResult result) {
JsonNode queryNode = ObjectMapperSingleton.getObjectMapper().valueToTree(result);
JsonNode finalResult = ast.accept(new JmesPathEvaluationVisitor(), queryNode);
return AcceptorPathMatcher.path(expectedResult, finalResult);
}
/**
* Represents the current waiter state in the case where resource state matches the expected state
*
* @return Corresponding state of the waiter
*/
@Override
public WaiterState getState() {
return WaiterState.FAILURE;
}
}
}
|
apache-2.0
|
labsai/EDDI
|
apiserver/src/main/resources/js/jquery.urldecoder.edited.js
|
12649
|
/** --------------------------------------------------------------------------
* jQuery URL Decoder
* Version 1.0
* Parses URL and return its components. Can also build URL from components
*
* ---------------------------------------------------------------------------
* HOW TO USE:
*
* $.url.decode('http://username:password@hostname/path?arg1=value%40+1&arg2=touch%C3%A9#anchor')
* // returns
* // http://username:password@hostname/path?arg1=value@ 1&arg2=touché#anchor
* // Note: "%40" is replaced with "@", "+" is replaced with " " and "%C3%A9" is replaced with "é"
*
* $.url.encode('file.htm?arg1=value1 @#456&arg2=value2 touché')
* // returns
* // file.htm%3Farg1%3Dvalue1%20%40%23456%26arg2%3Dvalue2%20touch%C3%A9
* // Note: "@" is replaced with "%40" and "é" is replaced with "%C3%A9"
*
* $.url.parse('http://username:password@hostname/path?arg1=value%40+1&arg2=touch%C3%A9#anchor')
* // returns
* {
* source: 'http://username:password@hostname/path?arg1=value%40+1&arg2=touch%C3%A9#anchor',
* protocol: 'http',
* authority: 'username:password@hostname',
* userInfo: 'username:password',
* user: 'username',
* password: 'password',
* host: 'hostname',
* port: '',
* path: '/path',
* directory: '/path',
* file: '',
* relative: '/path?arg1=value%40+1&arg2=touch%C3%A9#anchor',
* query: 'arg1=value%40+1&arg2=touch%C3%A9',
* anchor: 'anchor',
* params: {
* 'arg1': 'value@ 1',
* 'arg2': 'touché'
* }
* }
*
* $.url.build({
* protocol: 'http',
* username: 'username',
* password: 'password',
* host: 'hostname',
* path: '/path',
* query: 'arg1=value%40+1&arg2=touch%C3%A9',
* // or
* //params: {
* // 'arg1': 'value@ 1',
* // 'arg2': 'touché'
* //}
* anchor: 'anchor',
* })
* // returns
* // http://username:password@hostname/path?arg1=value%40+1&arg2=touch%C3%A9#anchor
*
* ---------------------------------------------------------------------------
* OTHER PARTIES' CODE:
*
* Parser based on the Regex-based URI ai.labs.parser by Steven Levithan.
* For more information visit http://blog.stevenlevithan.com/archives/parseuri
*
* Deparam taken from jQuery BBQ by Ben Alman. Dual licensed under the MIT and GPL licenses (http://benalman.com/about/license/)
* http://benalman.com/projects/jquery-bbq-plugin/
*
* ---------------------------------------------------------------------------
*/
jQuery.url = function () {
/**
* private function to encode URL
*
* @param {String} string //required
* @return {String}
*/
function utf8_encode(string) {
string = string.replace(/\r\n/g, "\n");
var utftext = "";
for (var n = 0; n < string.length; n++) {
var c = string.charCodeAt(n);
if (c < 128) {
utftext += String.fromCharCode(c);
}
else if ((c > 127) && (c < 2048)) {
utftext += String.fromCharCode((c >> 6) | 192);
utftext += String.fromCharCode((c & 63) | 128);
}
else {
utftext += String.fromCharCode((c >> 12) | 224);
utftext += String.fromCharCode(((c >> 6) & 63) | 128);
utftext += String.fromCharCode((c & 63) | 128);
}
}
return utftext;
}
/**
* private function to decode URL
*
* @param {String} utftext //required
* @return {String}
*/
function utf8_decode(utftext) {
var string = "";
var i = 0;
var c = 0;
var c2 = 0;
while (i < utftext.length) {
c = utftext.charCodeAt(i);
if (c < 128) {
string += String.fromCharCode(c);
i++;
}
else if ((c > 191) && (c < 224)) {
c2 = utftext.charCodeAt(i + 1);
string += String.fromCharCode(((c & 31) << 6) | (c2 & 63));
i += 2;
}
else {
c2 = utftext.charCodeAt(i + 1);
c3 = utftext.charCodeAt(i + 2);
string += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63));
i += 3;
}
}
return string;
}
/**
* private function to convert urlencoded query string to javascript object
*
* @param {String} params //required
* @param {Boolean} coerce //optional
* @return {Object}
*
* @author Ben Alman
*/
function deparam(params, coerce) {
var obj = {},
coerce_types = {
'true': !0,
'false': !1,
'null': null
};
// Iterate over all name=value pairs.
$.each(params.replace(/\+/g, ' ').split('&'), function (j, v) {
var param = v.split('=');
/* edited: hd: to avoid false query params added to the params array. */
if (param.length !== 2 || !param[0] || !param[1]) {
return true;
}
var key = decode(param[0]),
val, cur = obj,
i = 0,
// If key is more complex than 'foo', like 'a[]' or 'a[b][c]', split it
// into its component parts.
keys = key.split(']['),
keys_last = keys.length - 1;
// If the first keys part contains [ and the last ends with ], then []
// are correctly balanced.
if (/\[/.test(keys[0]) && /\]$/.test(keys[keys_last])) {
// Remove the trailing ] from the last keys part.
keys[keys_last] = keys[keys_last].replace(/\]$/, '');
// Split first keys part into two parts on the [ and add them back onto
// the beginning of the keys array.
keys = keys.shift().split('[').concat(keys);
keys_last = keys.length - 1;
} else {
// Basic 'foo' style key.
keys_last = 0;
}
// Are we dealing with a name=value pair, or just a name?
if (param.length === 2) {
val = decode(param[1]);
// Coerce values.
if (coerce) {
val = val && !isNaN(val) ? +val // number
: val === 'undefined' ? undefined // undefined
: coerce_types[val] !== undefined ? coerce_types[val] // true, false, null
: val; // string
}
if (keys_last) {
// Complex key, build deep object structure based on a few rules:
// * The 'cur' pointer starts at the object top-level.
// * [] = array push (n is set to array length), [n] = array if n is
// numeric, otherwise object.
// * If at the last keys part, set the value.
// * For each keys part, if the current level is undefined create an
// object or array based on the type of the next keys part.
// * Move the 'cur' pointer to the next level.
// * Rinse & repeat.
for (; i <= keys_last; i++) {
key = keys[i] === '' ? cur.length : keys[i];
cur = cur[key] = i < keys_last ? cur[key] || (keys[i + 1] && isNaN(keys[i + 1]) ? {} : []) : val;
}
} else {
// Simple key, even simpler rules, since only scalars and shallow
// arrays are allowed.
if ($.isArray(obj[key])) {
// val is already an array, so push on the next value.
obj[key].push(val);
} else if (obj[key] !== undefined) {
// val isn't an array, but since a second value has been specified,
// convert val into an array.
obj[key] = [obj[key], val];
} else {
// val is a scalar.
obj[key] = val;
}
}
} else if (key) {
// No value was defined, so set something meaningful.
obj[key] = coerce ? undefined : '';
}
});
return obj;
}
/**
* private function to parse URL to components
*
* @param {String} url_str //optional, if omited using current location
* @return {Object}
*/
function parse(url_str) {
url_str = url_str || window.location;
/**
* @author of RegExp Steven Levithan
*/
var re = /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/;
var keys = ["source", "protocol", "authority", "userInfo", "user", "password", "host", "port", "relative", "path", "directory", "file", "query", "anchor"];
var m = re.exec(url_str);
var uri = {};
var i = keys.length;
while (i--) {
uri[keys[i]] = m[i] || "";
}
/*
uri.params = {};
uri.query.replace( /(?:^|&)([^&=]*)=?([^&]*)/g, function ( $0, $1, $2 ) {
if ($1) {
uri.params[decode($1)] = decode($2);
}
});
*/
if (uri.query) {
uri.params = deparam(uri.query, true);
}
return uri;
}
/**
* private function to build URL string from components
*
* @param {Object} url_obj //required
* @return {String}
*/
function build(url_obj) {
if (url_obj.source) {
return encodeURI(url_obj.source);
}
var resultArr = [];
if (url_obj.protocol) {
if (url_obj.protocol == 'file') {
resultArr.push('file:///');
} else if (url_obj.protocol == 'mailto') {
resultArr.push('mailto:');
} else {
resultArr.push(url_obj.protocol + '://');
}
}
if (url_obj.authority) {
resultArr.push(url_obj.authority);
} else {
if (url_obj.userInfo) {
resultArr.push(url_obj.userInfo + '@');
} else if (url_obj.user) {
resultArr.push(url_obj.user);
if (url_obj.password) {
resultArr.push(':' + url_obj.password);
}
resultArr.push('@');
}
if (url_obj.host) {
resultArr.push(url_obj.host);
if (url_obj.port) {
resultArr.push(':' + url_obj.port);
}
}
}
if (url_obj.path) {
resultArr.push(url_obj.path);
} else {
if (url_obj.directory) {
resultArr.push(url_obj.directory);
}
if (url_obj.file) {
resultArr.push(url_obj.file);
}
}
if (url_obj.query) {
resultArr.push('?' + url_obj.query);
} else if (url_obj.params) {
resultArr.push('?' + $.param(url_obj.params));
}
if (url_obj.anchor) {
resultArr.push('#' + url_obj.anchor);
}
return resultArr.join('');
}
/**
* wrapper around encoder
*
* @param {String} string //required
* @return {String}
*/
function encode(string) {
//return build(parse(string));
//return escape(utf8_encode(string));
return encodeURIComponent(string);
}
/**
* wrapper around decoder
*
* @param {String} string //optional, if omited using current location
* @return {String}
*/
function decode(string) {
string = string || window.location.toString();
return utf8_decode(unescape(string.replace(/\+/g, ' ')));
}
/**
* public functions
*
* @see #encode
* @see #decode
* @see #parse
* @see #build
*
* @return {Object}
*/
return {
encode: encode,
decode: decode,
parse: parse,
build: build
};
}();
|
apache-2.0
|
noddi/druid
|
indexing-service/src/test/java/io/druid/indexing/common/task/RealtimeIndexTaskTest.java
|
36911
|
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.indexing.common.task;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.metamx.common.logger.Logger;
import com.metamx.emitter.EmittingLogger;
import com.metamx.emitter.core.LoggingEmitter;
import com.metamx.emitter.service.ServiceEmitter;
import com.metamx.metrics.MonitorScheduler;
import io.druid.client.cache.CacheConfig;
import io.druid.client.cache.MapCache;
import io.druid.concurrent.Execs;
import io.druid.data.input.Firehose;
import io.druid.data.input.FirehoseFactory;
import io.druid.data.input.InputRow;
import io.druid.data.input.MapBasedInputRow;
import io.druid.data.input.impl.InputRowParser;
import io.druid.indexing.common.SegmentLoaderFactory;
import io.druid.indexing.common.TaskStatus;
import io.druid.indexing.common.TaskToolbox;
import io.druid.indexing.common.TaskToolboxFactory;
import io.druid.indexing.common.TestUtils;
import io.druid.indexing.common.actions.LocalTaskActionClientFactory;
import io.druid.indexing.common.actions.TaskActionClientFactory;
import io.druid.indexing.common.actions.TaskActionToolbox;
import io.druid.indexing.common.config.TaskConfig;
import io.druid.indexing.common.config.TaskStorageConfig;
import io.druid.indexing.overlord.HeapMemoryTaskStorage;
import io.druid.indexing.overlord.IndexerMetadataStorageCoordinator;
import io.druid.indexing.overlord.TaskLockbox;
import io.druid.indexing.overlord.TaskStorage;
import io.druid.indexing.overlord.supervisor.SupervisorManager;
import io.druid.indexing.test.TestDataSegmentAnnouncer;
import io.druid.indexing.test.TestDataSegmentKiller;
import io.druid.indexing.test.TestDataSegmentPusher;
import io.druid.indexing.test.TestIndexerMetadataStorageCoordinator;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.java.util.common.ISE;
import io.druid.java.util.common.Pair;
import io.druid.java.util.common.StringUtils;
import io.druid.java.util.common.granularity.Granularities;
import io.druid.java.util.common.guava.Sequences;
import io.druid.java.util.common.parsers.ParseException;
import io.druid.metadata.EntryExistsException;
import io.druid.query.DefaultQueryRunnerFactoryConglomerate;
import io.druid.query.Druids;
import io.druid.query.IntervalChunkingQueryRunnerDecorator;
import io.druid.query.Query;
import io.druid.query.QueryRunner;
import io.druid.query.QueryRunnerFactory;
import io.druid.query.QueryRunnerFactoryConglomerate;
import io.druid.query.QueryToolChest;
import io.druid.query.QueryWatcher;
import io.druid.query.Result;
import io.druid.query.SegmentDescriptor;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.CountAggregatorFactory;
import io.druid.query.aggregation.LongSumAggregatorFactory;
import io.druid.query.timeseries.TimeseriesQuery;
import io.druid.query.timeseries.TimeseriesQueryEngine;
import io.druid.query.timeseries.TimeseriesQueryQueryToolChest;
import io.druid.query.timeseries.TimeseriesQueryRunnerFactory;
import io.druid.query.timeseries.TimeseriesResultValue;
import io.druid.segment.indexing.DataSchema;
import io.druid.segment.indexing.RealtimeIOConfig;
import io.druid.segment.indexing.RealtimeTuningConfig;
import io.druid.segment.indexing.granularity.UniformGranularitySpec;
import io.druid.segment.loading.SegmentLoaderConfig;
import io.druid.segment.loading.SegmentLoaderLocalCacheManager;
import io.druid.segment.loading.StorageLocationConfig;
import io.druid.segment.realtime.FireDepartment;
import io.druid.segment.realtime.plumber.SegmentHandoffNotifier;
import io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory;
import io.druid.segment.realtime.plumber.ServerTimeRejectionPolicyFactory;
import io.druid.server.coordination.DataSegmentServerAnnouncer;
import io.druid.timeline.DataSegment;
import org.easymock.EasyMock;
import org.hamcrest.CoreMatchers;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.internal.matchers.ThrowableCauseMatcher;
import org.junit.internal.matchers.ThrowableMessageMatcher;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
@RunWith(Parameterized.class)
public class RealtimeIndexTaskTest
{
private static final Logger log = new Logger(RealtimeIndexTaskTest.class);
private static final ObjectMapper jsonMapper = new DefaultObjectMapper();
private static final ServiceEmitter emitter = new ServiceEmitter(
"service",
"host",
new LoggingEmitter(
log,
LoggingEmitter.Level.ERROR,
jsonMapper
)
);
private static final String FAIL_DIM = "__fail__";
private static class TestFirehose implements Firehose
{
private final List<InputRow> queue = Lists.newLinkedList();
private boolean closed = false;
public void addRows(List<InputRow> rows)
{
synchronized (this) {
queue.addAll(rows);
notifyAll();
}
}
@Override
public boolean hasMore()
{
try {
synchronized (this) {
while (queue.isEmpty() && !closed) {
wait();
}
return !queue.isEmpty();
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
@Override
public InputRow nextRow()
{
synchronized (this) {
final InputRow row = queue.remove(0);
if (row != null && row.getDimensions().contains(FAIL_DIM)) {
throw new ParseException(FAIL_DIM);
}
return row;
}
}
@Override
public Runnable commit()
{
return new Runnable()
{
@Override
public void run()
{
// do nothing
}
};
}
@Override
public void close() throws IOException
{
synchronized (this) {
closed = true;
notifyAll();
}
}
}
private static class TestFirehoseFactory implements FirehoseFactory
{
public TestFirehoseFactory()
{
}
@Override
public Firehose connect(InputRowParser parser, File temporaryDirectory) throws IOException, ParseException
{
return new TestFirehose();
}
}
@Rule
public final ExpectedException expectedException = ExpectedException.none();
@Rule
public final TemporaryFolder tempFolder = new TemporaryFolder();
private final boolean buildV9Directly;
private DateTime now;
private ListeningExecutorService taskExec;
private Map<SegmentDescriptor, Pair<Executor, Runnable>> handOffCallbacks;
@Parameterized.Parameters(name = "buildV9Directly = {0}")
public static Collection<?> constructorFeeder() throws IOException
{
return ImmutableList.of(
new Object[]{true},
new Object[]{false}
);
}
public RealtimeIndexTaskTest(boolean buildV9Directly)
{
this.buildV9Directly = buildV9Directly;
}
@Before
public void setUp()
{
EmittingLogger.registerEmitter(emitter);
emitter.start();
taskExec = MoreExecutors.listeningDecorator(Execs.singleThreaded("realtime-index-task-test-%d"));
now = new DateTime();
}
@After
public void tearDown()
{
taskExec.shutdownNow();
}
@Test
public void testMakeTaskId() throws Exception
{
Assert.assertEquals(
"index_realtime_test_0_2015-01-02T00:00:00.000Z_abcdefgh",
RealtimeIndexTask.makeTaskId("test", 0, new DateTime("2015-01-02"), 0x76543210)
);
}
@Test(timeout = 60_000L)
public void testDefaultResource() throws Exception
{
final RealtimeIndexTask task = makeRealtimeTask(null);
Assert.assertEquals(task.getId(), task.getTaskResource().getAvailabilityGroup());
}
@Test(timeout = 60_000L, expected = ExecutionException.class)
public void testHandoffTimeout() throws Exception
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null, true, 100L);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")
)
)
);
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
Assert.assertEquals(1, task.getMetrics().processed());
Assert.assertNotNull(Iterables.getOnlyElement(mdc.getPublished()));
// handoff would timeout, resulting in exception
statusFuture.get();
}
@Test(timeout = 60_000L)
public void testBasics() throws Exception
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
final DataSegment publishedSegment;
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")
),
new MapBasedInputRow(
now.minus(new Period("P1D")),
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)
),
new MapBasedInputRow(
now,
ImmutableList.of("dim2"),
ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0)
)
)
);
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Check metrics.
Assert.assertEquals(2, task.getMetrics().processed());
Assert.assertEquals(1, task.getMetrics().thrownAway());
Assert.assertEquals(0, task.getMetrics().unparseable());
// Do some queries.
Assert.assertEquals(2, sumMetric(task, "rows"));
Assert.assertEquals(3, sumMetric(task, "met1"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(
new SegmentDescriptor(
publishedSegment.getInterval(),
publishedSegment.getVersion(),
publishedSegment.getShardSpec().getPartitionNum()
),
entry.getKey()
);
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
@Test(timeout = 60_000L)
public void testReportParseExceptionsOnBadMetric() throws Exception
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null, true);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")
),
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")
),
new MapBasedInputRow(
now.minus(new Period("P1D")),
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")
),
new MapBasedInputRow(
now,
ImmutableList.of("dim2"),
ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0)
)
)
);
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for the task to finish.
expectedException.expect(ExecutionException.class);
expectedException.expectCause(CoreMatchers.<Throwable>instanceOf(ParseException.class));
expectedException.expectCause(
ThrowableMessageMatcher.hasMessage(
CoreMatchers.containsString("Encountered parse error for aggregator[met1]")
)
);
expectedException.expect(
ThrowableCauseMatcher.hasCause(
ThrowableCauseMatcher.hasCause(
CoreMatchers.allOf(
CoreMatchers.<Throwable>instanceOf(ParseException.class),
ThrowableMessageMatcher.hasMessage(
CoreMatchers.containsString("Unable to parse metrics[met1], value[foo]")
)
)
)
)
);
statusFuture.get();
}
@Test(timeout = 60_000L)
public void testNoReportParseExceptions() throws Exception
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null, false);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
final DataSegment publishedSegment;
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(
Arrays.<InputRow>asList(
// Good row- will be processed.
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")
),
// Null row- will be unparseable.
null,
// Bad metric- will count as processed, but that particular metric won't update.
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")
),
// Bad row- will be unparseable.
new MapBasedInputRow(
now,
ImmutableList.of("dim1", FAIL_DIM),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)
),
// Old row- will be thrownAway.
new MapBasedInputRow(
now.minus(new Period("P1D")),
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)
),
// Good row- will be processed.
new MapBasedInputRow(
now,
ImmutableList.of("dim2"),
ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0)
)
)
);
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Check metrics.
Assert.assertEquals(3, task.getMetrics().processed());
Assert.assertEquals(1, task.getMetrics().thrownAway());
Assert.assertEquals(2, task.getMetrics().unparseable());
// Do some queries.
Assert.assertEquals(3, sumMetric(task, "rows"));
Assert.assertEquals(3, sumMetric(task, "met1"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(
new SegmentDescriptor(
publishedSegment.getInterval(),
publishedSegment.getVersion(),
publishedSegment.getShardSpec().getPartitionNum()
),
entry.getKey()
);
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
@Test(timeout = 60_000L)
public void testRestore() throws Exception
{
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo")
)
)
);
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter, but we'll check it anyway.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
// Nothing should be published.
Assert.assertEquals(Sets.newHashSet(), mdc.getPublished());
}
// Second run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Do a query, at this point the previous data should be loaded.
Assert.assertEquals(1, sumMetric(task2, "rows"));
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim2"),
ImmutableMap.<String, Object>of("dim2", "bar")
)
)
);
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(2, sumMetric(task2, "rows"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(
new SegmentDescriptor(
publishedSegment.getInterval(),
publishedSegment.getVersion(),
publishedSegment.getShardSpec().getPartitionNum()
),
entry.getKey()
);
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception
{
final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo")
)
)
);
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(1, sumMetric(task1, "rows"));
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter.
while (!statusFuture.isDone()) {
Thread.sleep(50);
}
}
// Second run:
{
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Stop the firehose again, this will start another handoff.
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// publishedSegment is still published. No reason it shouldn't be.
Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
// Wait for a handoffCallback to show up.
while (handOffCallbacks.isEmpty()) {
Thread.sleep(50);
}
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(
new SegmentDescriptor(
publishedSegment.getInterval(),
publishedSegment.getVersion(),
publishedSegment.getShardSpec().getPartitionNum()
),
entry.getKey()
);
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
@Test(timeout = 60_000L)
public void testRestoreCorruptData() throws Exception
{
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
// First run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(
ImmutableList.<InputRow>of(
new MapBasedInputRow(
now,
ImmutableList.of("dim1"),
ImmutableMap.<String, Object>of("dim1", "foo")
)
)
);
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter, but we'll check it anyway.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
// Nothing should be published.
Assert.assertEquals(Sets.newHashSet(), mdc.getPublished());
}
// Corrupt the data:
final File smooshFile = new File(
String.format(
"%s/persistent/task/%s/work/persist/%s/%s_%s/0/00000.smoosh",
directory,
task1.getId(),
task1.getDataSource(),
Granularities.DAY.bucketStart(now),
Granularities.DAY.bucketEnd(now)
)
);
Files.write(smooshFile.toPath(), StringUtils.toUtf8("oops!"));
// Second run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for the task to finish.
boolean caught = false;
try {
statusFuture.get();
}
catch (Exception e) {
caught = true;
}
Assert.assertTrue("expected exception", caught);
}
}
@Test(timeout = 60_000L)
public void testStopBeforeStarting() throws Exception
{
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
task1.stopGracefully();
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
private ListenableFuture<TaskStatus> runTask(final Task task, final TaskToolbox toolbox)
{
return taskExec.submit(
new Callable<TaskStatus>()
{
@Override
public TaskStatus call() throws Exception
{
try {
if (task.isReady(toolbox.getTaskActionClient())) {
return task.run(toolbox);
} else {
throw new ISE("Task is not ready");
}
}
catch (Exception e) {
log.warn(e, "Task failed");
throw e;
}
}
}
);
}
private RealtimeIndexTask makeRealtimeTask(final String taskId)
{
return makeRealtimeTask(taskId, true, 0);
}
private RealtimeIndexTask makeRealtimeTask(final String taskId, boolean reportParseExceptions)
{
return makeRealtimeTask(taskId, reportParseExceptions, 0);
}
private RealtimeIndexTask makeRealtimeTask(final String taskId, boolean reportParseExceptions, long handoffTimeout)
{
ObjectMapper objectMapper = new DefaultObjectMapper();
DataSchema dataSchema = new DataSchema(
"test_ds",
null,
new AggregatorFactory[]{new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("met1", "met1")},
new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, null),
objectMapper
);
RealtimeIOConfig realtimeIOConfig = new RealtimeIOConfig(
new TestFirehoseFactory(),
null,
null
);
RealtimeTuningConfig realtimeTuningConfig = new RealtimeTuningConfig(
1000,
new Period("P1Y"),
new Period("PT10M"),
null,
null,
new ServerTimeRejectionPolicyFactory(),
null,
null,
null,
buildV9Directly,
0,
0,
reportParseExceptions,
handoffTimeout,
null
);
return new RealtimeIndexTask(
taskId,
null,
new FireDepartment(dataSchema, realtimeIOConfig, realtimeTuningConfig),
null
)
{
@Override
protected boolean isFirehoseDrainableByClosing(FirehoseFactory firehoseFactory)
{
return true;
}
};
}
private TaskToolbox makeToolbox(
final Task task,
final IndexerMetadataStorageCoordinator mdc,
final File directory
)
{
return makeToolbox(
task,
new HeapMemoryTaskStorage(new TaskStorageConfig(null)),
mdc,
directory
);
}
private TaskToolbox makeToolbox(
final Task task,
final TaskStorage taskStorage,
final IndexerMetadataStorageCoordinator mdc,
final File directory
)
{
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null, null);
final TaskLockbox taskLockbox = new TaskLockbox(taskStorage);
try {
taskStorage.insert(task, TaskStatus.running(task.getId()));
}
catch (EntryExistsException e) {
// suppress
}
taskLockbox.syncFromStorage();
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(
taskLockbox,
mdc,
emitter,
EasyMock.createMock(SupervisorManager.class)
);
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(
taskStorage,
taskActionToolbox
);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(
ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(
TimeseriesQuery.class,
new TimeseriesQueryRunnerFactory(
new TimeseriesQueryQueryToolChest(
new IntervalChunkingQueryRunnerDecorator(null, null, null)
{
@Override
public <T> QueryRunner<T> decorate(
QueryRunner<T> delegate, QueryToolChest<T, ? extends Query<T>> toolChest
)
{
return delegate;
}
}
),
new TimeseriesQueryEngine(),
new QueryWatcher()
{
@Override
public void registerQuery(Query query, ListenableFuture future)
{
// do nothing
}
}
)
)
);
handOffCallbacks = Maps.newConcurrentMap();
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory()
{
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource)
{
return new SegmentHandoffNotifier()
{
@Override
public boolean registerSegmentHandoffCallback(
SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable
)
{
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
return true;
}
@Override
public void start()
{
//Noop
}
@Override
public void close()
{
//Noop
}
Map<SegmentDescriptor, Pair<Executor, Runnable>> getHandOffCallbacks()
{
return handOffCallbacks;
}
};
}
};
final TestUtils testUtils = new TestUtils();
final TaskToolboxFactory toolboxFactory = new TaskToolboxFactory(
taskConfig,
taskActionClientFactory,
emitter,
new TestDataSegmentPusher(),
new TestDataSegmentKiller(),
null, // DataSegmentMover
null, // DataSegmentArchiver
new TestDataSegmentAnnouncer(),
EasyMock.createNiceMock(DataSegmentServerAnnouncer.class),
handoffNotifierFactory,
conglomerate,
MoreExecutors.sameThreadExecutor(), // queryExecutorService
EasyMock.createMock(MonitorScheduler.class),
new SegmentLoaderFactory(
new SegmentLoaderLocalCacheManager(
null,
new SegmentLoaderConfig()
{
@Override
public List<StorageLocationConfig> getLocations()
{
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper()
)
),
testUtils.getTestObjectMapper(),
testUtils.getTestIndexMerger(),
testUtils.getTestIndexIO(),
MapCache.create(1024),
new CacheConfig(),
testUtils.getTestIndexMergerV9()
);
return toolboxFactory.build(task);
}
public long sumMetric(final Task task, final String metric) throws Exception
{
// Do a query.
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
.dataSource("test_ds")
.aggregators(
ImmutableList.<AggregatorFactory>of(
new LongSumAggregatorFactory(metric, metric)
)
).granularity(Granularities.ALL)
.intervals("2000/3000")
.build();
ArrayList<Result<TimeseriesResultValue>> results = Sequences.toList(
task.getQueryRunner(query).run(query, ImmutableMap.<String, Object>of()),
Lists.<Result<TimeseriesResultValue>>newArrayList()
);
return results.isEmpty() ? 0 : results.get(0).getValue().getLongMetric(metric);
}
}
|
apache-2.0
|
leapframework/framework
|
oauth2/server/src/main/java/leap/oauth2/server/endpoint/token/GrantTypeHandleFailHandler.java
|
1167
|
/*
*
* * Copyright 2013 the original author or authors.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package leap.oauth2.server.endpoint.token;
import leap.oauth2.server.OAuth2Error;
import leap.oauth2.server.OAuth2Params;
import leap.web.Request;
import leap.web.Response;
/**
* Created by kael on 2017/2/28.
*/
public interface GrantTypeHandleFailHandler {
/**
* handle this fail in grant type handler
* @return true if handle or false when not handle
*/
boolean handle(Request request, Response response, OAuth2Params params, OAuth2Error error, GrantTypeHandler handler);
}
|
apache-2.0
|
GoogleChrome/lighthouse
|
lighthouse-core/scripts/benchmark-plus-extras.js
|
3567
|
/**
* @license Copyright 2020 The Lighthouse Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
/**
* @fileoverview This script computes the BenchmarkIndex and a few other related browser benchmarks.
* node lighthouse-core/scripts/benchmark-plus-extras.js
*/
import puppeteer from 'puppeteer';
import pageFunctions from '../lib/page-functions.js';
/** @param {import('puppeteer').Page} page */
async function runOctane(page) {
/** @param {import('puppeteer').ConsoleMessage} message */
const pageLogger = message => process.stdout.write(` ${message.text()}\n`);
process.stdout.write(`Running Octane...\n`);
await page.goto('https://chromium.github.io/octane/', {waitUntil: 'networkidle0'});
await page.waitFor('#run-octane');
await page.waitFor(5000);
page.on('console', pageLogger);
await page.click('#run-octane');
await page.waitForFunction(() => {
const banner = document.querySelector('#main-banner');
return /Octane Score: \d+/.test(banner?.textContent || '');
}, {timeout: 300e3});
const score = await page.evaluate(() => {
const banner = document.querySelector('#main-banner');
if (!banner || !banner.textContent) return 0;
const [_, score] = banner.textContent.match(/Octane Score: (\d+)/) || [];
return Number(score);
});
process.stdout.write(` Octane: ${score}\n`);
page.off('console', pageLogger);
}
/** @param {import('puppeteer').Page} page */
async function runSpeedometer(page) {
process.stdout.write(`Running Speedometer...\n`);
await page.goto('https://browserbench.org/Speedometer2.0/', {waitUntil: 'networkidle0'});
await page.waitFor('#home button');
await page.waitFor(5000);
await page.click('#home button');
const loggerInterval = setInterval(async () => {
const progress = await page.evaluate(() => {
const infoEl = document.querySelector('#running #info');
return infoEl?.textContent || 'Unknown';
});
process.stdout.write(` Progress: ${progress}\n`);
}, 10000);
await page.waitForSelector('#summarized-results.selected', {timeout: 600e3});
clearInterval(loggerInterval);
const score = await page.evaluate(() => {
const result = document.querySelector('#result-number');
if (!result || !result.textContent) return 0;
return Number(result.textContent);
});
process.stdout.write(` Speedometer: ${score}\n`);
}
async function main() {
process.stdout.write(`Launching Chrome...\n`);
const browser = await puppeteer.launch({
headless: true,
executablePath: process.env.CHROME_PATH,
});
const page = await browser.newPage();
await page.goto('about:blank');
process.stdout.write(`Running BenchmarkIndex...\n`);
for (let i = 0; i < 10; i++) {
const BenchmarkIndex = await page.evaluate(pageFunctions.computeBenchmarkIndex);
process.stdout.write(` ${i + 1}: BenchmarkIndex=${BenchmarkIndex}\n`);
}
await runOctane(page);
await runSpeedometer(page);
await browser.close();
}
main().catch(err => {
process.stderr.write(err.stack);
process.exit(1);
});
|
apache-2.0
|
qframe/types
|
messages/base_test.go
|
5205
|
package qtypes_messages
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/zpatrick/go-config"
"reflect"
"github.com/qframe/types/plugin"
"github.com/qframe/types/qchannel"
)
func NewConfig(kv map[string]string) *config.Config {
return config.NewConfig([]config.Provider{config.NewStatic(kv)})
}
func TestNewBase(t *testing.T) {
before := time.Now()
b := NewBase("src1")
after := time.Now()
assert.Equal(t, version, b.BaseVersion)
assert.Equal(t, "src1", b.SourcePath[0])
assert.True(t, before.UnixNano() < b.Time.UnixNano())
assert.True(t, after.UnixNano() > b.Time.UnixNano())
}
func TestNewTimedBase(t *testing.T) {
now := time.Now()
b := NewTimedBase("src1", now)
assert.Equal(t, now, b.Time)
}
func TestBase_GetTimeUnix(t *testing.T) {
now := time.Now()
b := NewTimedBase("src1", now)
assert.Equal(t, now.Unix(), b.GetTimeUnix())
}
func TestBase_GetTimeUnixNano(t *testing.T) {
now := time.Now()
b := NewTimedBase( "src1", now)
assert.Equal(t, now.UnixNano(), b.GetTimeUnixNano())
}
func TestBase_AppendSrc(t *testing.T) {
b := NewBase("src1")
b.AppendSource("src2")
assert.Equal(t, "src1", b.SourcePath[0])
assert.Equal(t, "src2", b.SourcePath[1])
}
func TestBase_IsLastSource(t *testing.T) {
b := NewBase("src1")
assert.True(t, b.IsLastSource("src1"), "Last source should be 'src1'")
b.AppendSource("src2")
assert.True(t, b.IsLastSource("src2"), "Last source should be 'src2'")
}
func TestBase_InputsMatch(t *testing.T) {
b := NewBase("src1")
assert.True(t, b.InputsMatch([]string{"src2", "src1"}), "Should match input list 'src2', 'src1'")
assert.False(t, b.InputsMatch([]string{"src2"}), "Should not match input list 'src2'")
}
func TestSha1HashString(t *testing.T) {
s := "sha1 this string"
assert.Equal(t, "cf23df2207d99a74fbe169e3eba035e633b65d94", Sha1HashString(s))
}
func TestBase_GenDefaultID(t *testing.T) {
ts := time.Unix(1499156134, 0)
b := NewTimedBase("src1", ts)
exp := "27188913c2c2ce1a6cfc5c3a56d072b0297a202f"
got := b.GenDefaultID()
assert.Equal(t, exp, got)
}
func TestBase_GetMessageDigest(t *testing.T) {
b := NewBase("src")
b.ID = "27188913c2c2ce1a6cfc5c3a56d072b0297a202f"
exp := "27188913c2c2c"
got := b.GetMessageDigest()
assert.Equal(t, exp, got)
}
func TestBase_GetTimeRFC(t *testing.T) {
ts := time.Unix(1499156134, 123124)
b := NewTimedBase("src1", ts)
exp := "2017-07-04T10:15:34.000123+02:00"
got := b.GetTimeRFC()
assert.Equal(t, exp, got)
}
func TestBase_ToJSON(t *testing.T) {
ts := time.Unix(1499156134, 123124)
b := NewTimedBase("src1", ts)
b.ID = "0.1"
exp := map[string]interface{}{
"base_version": b.BaseVersion,
"id": "0.1",
"time": ts.String(),
"time_unix_nano": ts.UnixNano(),
"source_id": 0,
"source_path": []string{"src1"},
"source_success": true,
"tags": map[string]string{},
}
got := b.ToJSON()
assert.Equal(t, exp["time"], got["time"])
assert.True(t, reflect.DeepEqual(exp, got), "Not deeply equal")
assert.True(t, true)
}
func TestNewBaseFromBase(t *testing.T) {
ts := time.Unix(1499156134, 123124)
b1 := NewTimedBase("src1", ts)
b1.Tags["key1"] = "val1"
b2 := NewBaseFromBase("src2", b1)
assert.Equal(t, b1.BaseVersion, b2.BaseVersion)
assert.Equal(t, b1.ID, b2.ID)
assert.Equal(t, b1.Time, b2.Time)
assert.Equal(t, b1.SourceID, b2.SourceID)
assert.Equal(t, append(b1.SourcePath,"src2"), b2.SourcePath)
assert.Equal(t, b1.SourceSuccess, b2.SourceSuccess)
assert.Equal(t, b1.Tags, b2.Tags)
}
func TestBase_StopProcessing(t *testing.T) {
qChan := qtypes_qchannel.NewQChan()
ts := time.Unix(1499156134, 123124)
b := NewTimedBase("src1", ts)
b.SourceID = 1
cfg := NewConfig(map[string]string{})
p := qtypes_plugin.NewNamedPlugin(qChan, cfg,"typ", "pkg", "name", "0.0.0" )
p.MyID = 1
assert.True(t, b.StopProcessing(p, false), "Same GID (p.MyID == b.SourceID), so we should stop here")
p.MyID = 2
assert.True(t, b.StopProcessing(p, false), "No empty input allowed, should stop here")
cfg = NewConfig(map[string]string{"typ.name.inputs": "src2"})
p = qtypes_plugin.NewNamedPlugin(qChan, cfg,"typ", "pkg", "name", "0.0.0" )
assert.True(t, b.StopProcessing(p, false), "Input should not match, therefore expect to be stopped.")
cfg = NewConfig(map[string]string{
"typ.name.inputs": "src1",
"typ.name.source-success": "false",
})
p = qtypes_plugin.NewNamedPlugin(qChan, cfg,"typ", "pkg", "name", "0.0.0" )
assert.True(t, b.StopProcessing(p, false), "Source-success is false, therefore expect to be stopped.")
cfg = NewConfig(map[string]string{"typ.name.inputs": "src1"})
p = qtypes_plugin.NewNamedPlugin(qChan, cfg,"typ", "pkg", "name", "0.0.0" )
assert.False(t, b.StopProcessing(p, false), "Input should match, therefore expect to not be stopped.")
}
func TestBase_ToFlatJSON(t *testing.T) {
ts := time.Unix(1499156134, 123124)
b := NewTimedBase("src1", ts)
b.Tags = map[string]string{"key1":"val1","key2":"val2"}
got := b.ToFlatJSON()
assert.Equal(t, "src1", got["msg_source_path"])
assert.Equal(t, "val1", got["msg_tag_key1"])
assert.Equal(t, "val2", got["msg_tag_key2"])
assert.Equal(t, version, got["msg_base_version"])
assert.Equal(t, "1499156134000123124", got["msg_time_unix_nano"])
}
|
apache-2.0
|
hetianpeng/shareApk
|
shareapk/src/main/java/com/codehe/shareapk/ui/BaseActivity.java
|
605
|
package com.codehe.shareapk.ui;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AppCompatActivity;
import org.greenrobot.eventbus.EventBus;
/**
* Created by hetianpeng on 2017/2/12.
*/
public class BaseActivity extends AppCompatActivity {
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
EventBus.getDefault().register(this);
}
@Override
protected void onDestroy() {
super.onDestroy();
EventBus.getDefault().unregister(this);
}
}
|
apache-2.0
|
woolparty/DontEatMyFish
|
DonTouchMyFish/Assets/Assets/Effects/Cartoon FX/Demo/Assets/CFX_Demo.cs
|
4824
|
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System.Text.RegularExpressions;
// Script handling the Demo scene of CartoonFX particles.
public class CFX_Demo : MonoBehaviour
{
public bool orderedSpawns = true;
public float step = 1.0f;
public float range = 5.0f;
private float order = -5.0f;
public Material groundMat, waterMat;
public GameObject[] ParticleExamples;
private Dictionary<string,float> ParticlesYOffsetD = new Dictionary<string, float>
{
{"CFX_ElectricGround", 0.15f},
{"CFX_ElectricityBall", 1.0f},
{"CFX_ElectricityBolt", 1.0f},
{"CFX_Explosion", 2.0f},
{"CFX_SmallExplosion", 1.5f},
{"CFX_SmokeExplosion", 2.5f},
{"CFX_Flame", 1.0f},
{"CFX_DoubleFlame", 1.0f},
{"CFX_Hit", 1.0f},
{"CFX_CircularLightWall", 0.05f},
{"CFX_LightWall", 0.05f},
{"CFX_Flash", 2.0f},
{"CFX_Poof", 1.5f},
{"CFX_Virus", 1.0f},
{"CFX_SmokePuffs", 2.0f},
{"CFX_Slash", 1.0f},
{"CFX_Splash", 0.05f},
{"CFX_Fountain", 0.05f},
{"CFX_Ripple", 0.05f},
{"CFX_Magic", 2.0f},
};
private int exampleIndex;
private string randomSpawnsDelay = "0.5";
private bool randomSpawns;
private bool slowMo;
void OnMouseDown()
{
RaycastHit hit = new RaycastHit();
if(this.collider.Raycast(Camera.main.ScreenPointToRay(Input.mousePosition), out hit, 9999f))
{
GameObject particle = spawnParticle();
particle.transform.position = hit.point + particle.transform.position;
}
}
private GameObject spawnParticle()
{
GameObject particles = (GameObject)Instantiate(ParticleExamples[exampleIndex]);
float Y = 0.0f;
foreach(KeyValuePair<string,float> kvp in ParticlesYOffsetD)
{
if(particles.name.StartsWith(kvp.Key))
{
Y = kvp.Value;
break;
}
}
particles.transform.position = new Vector3(0,Y,0);
return particles;
}
void OnGUI()
{
GUILayout.BeginArea(new Rect(5,20,Screen.width-10,30));
GUILayout.BeginHorizontal();
GUILayout.Label("Effect", GUILayout.Width(50));
if(GUILayout.Button("<",GUILayout.Width(20)))
{
prevParticle();
}
GUILayout.Label(ParticleExamples[exampleIndex].name, GUILayout.Width(190));
if(GUILayout.Button(">",GUILayout.Width(20)))
{
nextParticle();
}
GUILayout.Label("Click on the ground to spawn selected particles");
if(GUILayout.Button(CFX_Demo_RotateCamera.rotating ? "Pause Camera" : "Rotate Camera", GUILayout.Width(140)))
{
CFX_Demo_RotateCamera.rotating = !CFX_Demo_RotateCamera.rotating;
}
if(GUILayout.Button(randomSpawns ? "Stop Random Spawns" : "Start Random Spawns", GUILayout.Width(140)))
{
randomSpawns = !randomSpawns;
if(randomSpawns) StartCoroutine("RandomSpawnsCoroutine");
else StopCoroutine("RandomSpawnsCoroutine");
}
randomSpawnsDelay = GUILayout.TextField(randomSpawnsDelay, 10, GUILayout.Width(42));
randomSpawnsDelay = Regex.Replace(randomSpawnsDelay, @"[^0-9.]", "");
if(GUILayout.Button(this.renderer.enabled ? "Hide Ground" : "Show Ground", GUILayout.Width(90)))
{
this.renderer.enabled = !this.renderer.enabled;
}
if(GUILayout.Button(slowMo ? "Normal Speed" : "Slow Motion", GUILayout.Width(100)))
{
slowMo = !slowMo;
if(slowMo) Time.timeScale = 0.33f;
else Time.timeScale = 1.0f;
}
GUILayout.EndHorizontal();
GUILayout.EndArea();
}
IEnumerator RandomSpawnsCoroutine()
{
LOOP:
GameObject particles = spawnParticle();
if(orderedSpawns)
{
particles.transform.position = this.transform.position + new Vector3(order,particles.transform.position.y,0);
order -= step;
if(order < -range) order = range;
}
else particles.transform.position = this.transform.position + new Vector3(Random.Range(-range,range),0,Random.Range(-range,range)) + new Vector3(0,particles.transform.position.y,0);
yield return new WaitForSeconds(float.Parse(randomSpawnsDelay));
goto LOOP;
}
void Update()
{
if(Input.GetKeyDown(KeyCode.LeftArrow))
{
prevParticle();
}
else if(Input.GetKeyDown(KeyCode.RightArrow))
{
nextParticle();
}
}
private void prevParticle()
{
exampleIndex--;
if(exampleIndex < 0) exampleIndex = ParticleExamples.Length - 1;
if(ParticleExamples[exampleIndex].name.Contains("Splash") || ParticleExamples[exampleIndex].name == "CFX_Ripple" || ParticleExamples[exampleIndex].name == "CFX_Fountain")
this.renderer.material = waterMat;
else
this.renderer.material = groundMat;
}
private void nextParticle()
{
exampleIndex++;
if(exampleIndex >= ParticleExamples.Length) exampleIndex = 0;
if(ParticleExamples[exampleIndex].name.Contains("Splash") || ParticleExamples[exampleIndex].name == "CFX_Ripple" || ParticleExamples[exampleIndex].name == "CFX_Fountain")
this.renderer.material = waterMat;
else
this.renderer.material = groundMat;
}
}
|
apache-2.0
|
shinfan/artman
|
artman/cli/main2.py
|
16431
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The new artman CLI with the following syntax.
artman [Options] generate|publish <artifact_name>
.. note::
Only local execution is supported at this moment. The CLI syntax is
beta, and might have changes in the future.
"""
from __future__ import absolute_import
from logging import DEBUG, INFO
import argparse
import io
import os
import pprint
import subprocess
import sys
from ruamel import yaml
from taskflow import engines
from artman.config import converter, loader
from artman.config.proto.config_pb2 import Artifact, Config
from artman.cli import support
from artman.pipelines import pipeline_factory
from artman.utils import config_util
from artman.utils.logger import logger, setup_logging
ARTMAN_DOCKER_IMAGE = 'googleapis/artman:0.4.12'
def main(*args):
"""Main method of artman."""
# If no arguments are sent, we are using the entry point; derive
# them from sys.argv.
if not args:
args = sys.argv[1:]
# Get to a normalized set of arguments.
flags = parse_args(*args)
user_config = read_user_config(flags)
pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)
if flags.local:
pipeline = pipeline_factory.make_pipeline(pipeline_name, False,
**pipeline_kwargs)
# Hardcoded to run pipeline in serial engine, though not necessarily.
engine = engines.load(
pipeline.flow, engine='serial', store=pipeline.kwargs)
engine.run()
_chown_for_artman_output(os.path.abspath(flags.output_dir))
else:
support.check_docker_requirements(flags.image)
# Note: artman currently won't work if input directory doesn't contain
# shared configuration files (e.g. gapic/packaging/dependencies.yaml).
# This will make artman less useful for non-Google APIs.
# TODO(ethanbao): Fix that by checking the input directory and
# pulling the shared configuration files if necessary.
logger.info('Running artman command in a Docker instance.')
_run_artman_in_docker(flags)
def parse_args(*args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--config',
type=str,
default='artman.yaml',
help='[Optional] Specify path to artman config yaml, which can be '
'either an absolute path, or a path relative to the input '
'directory (specified by `--input-dir` flag). Default to '
'`artman.yaml`', )
parser.add_argument(
'--output-dir',
type=str,
default='./artman-genfiles',
help='[Optional] Directory to store output generated by artman. '
'Default to `./artman-genfiles`', )
parser.add_argument(
'--input-dir',
type=str,
default='.',
help='[Optional] Directory with all input that is needed by artman, '
'which include but not limited to API protos, service config yaml '
'and GAPIC config yaml. Default to `.`', )
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=10,
default=None,
dest='verbosity',
help='Show verbose / debug output.', )
parser.add_argument(
'--user-config',
default='~/.artman/config.yaml',
help='[Optional] User configuration file to stores credentials like '
'GitHub credentials. Default to `~/.artman/config.yaml`', )
parser.add_argument(
'--local',
dest='local',
action='store_true',
help='[Optional] If specified, running the artman on the local host '
'machine instead of artman docker instance that have all binaries '
'installed. Note: one will have to make sure all binaries get '
'installed on the local machine with this flag, a full list can '
'be found at '
'https://github.com/googleapis/artman/blob/master/Dockerfile', )
parser.set_defaults(local=False)
parser.add_argument(
'--image',
default=ARTMAN_DOCKER_IMAGE,
help=('[Optional] Specify docker image used by artman when running in '
'a Docker instance. Default to `%s`' % ARTMAN_DOCKER_IMAGE)),
# Add sub-commands.
subparsers = parser.add_subparsers(
dest='subcommand', help='Support [generate|publish] sub-commands')
# `generate` sub-command.
parser_generate = subparsers.add_parser(
'generate', help='Generate artifact')
parser_generate.add_argument(
'artifact_name',
type=str,
help='[Required] Name of the artifact for artman to generate. Must '
'match an artifact in the artman config yaml.')
# `publish` sub-command.
parser_publish = subparsers.add_parser('publish', help='Publish artifact')
parser_publish.add_argument(
'artifact_name',
type=str,
help='[Required] Name of the artifact for artman to generate. Must '
'match an artifact in the artman config yaml.')
parser_publish.add_argument(
'--target',
type=str,
default=None,
required=True,
help='[Required] Specify where the generated artifact should be '
'published to. It is defined as publishing targets in artman '
'config at artifact level.', )
parser_publish.add_argument(
'--github-username',
default=None,
help='[Optional] The GitHub username. Must be set if publishing the '
'artifact to github, but can come from the user config file.', )
parser_publish.add_argument(
'--github-token',
default=None,
help='[Optional] The GitHub personal access token. Must be set if '
'publishing the artifact to github, but can come from the user '
'config file.', )
parser_publish.add_argument(
'--dry-run',
dest='dry_run',
action='store_true',
help='[Optional] When specified, artman will skip the remote '
'publishing step.', )
parser_publish.set_defaults(dry_run=False)
return parser.parse_args(args=args)
def read_user_config(flags):
"""Read the user config from disk and return it.
Args:
flags (argparse.Namespace): The flags from sys.argv.
Returns:
dict: The user config.
"""
# Load the user configuration if it exists and save a dictionary.
user_config = {}
user_config_file = os.path.realpath(os.path.expanduser(flags.user_config))
if os.path.isfile(user_config_file):
with io.open(user_config_file) as ucf:
user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {}
# Sanity check: Is there a configuration? If not, abort.
if not user_config:
setup_logging(INFO)
logger.critical('No user configuration found.')
logger.warn('This is probably your first time running Artman.')
logger.warn('Run `configure-artman` to get yourself set up.')
sys.exit(64)
# Done; return the user config.
return user_config
def normalize_flags(flags, user_config):
"""Combine the argparse flags and user configuration together.
Args:
flags (argparse.Namespace): The flags parsed from sys.argv
user_config (dict): The user configuration taken from
~/.artman/config.yaml.
Returns:
tuple (str, dict): 2-tuple containing:
- pipeline name
- pipeline arguments
"""
flags.input_dir = os.path.abspath(flags.input_dir)
flags.output_dir = os.path.abspath(flags.output_dir)
flags.config = os.path.abspath(flags.config)
pipeline_args = {}
# Determine logging verbosity and then set up logging.
verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
setup_logging(verbosity)
# Save local paths, if applicable.
# This allows the user to override the path to api-client-staging or
# toolkit on his or her machine.
pipeline_args['local_paths'] = support.parse_local_paths(
user_config, flags.input_dir)
artman_config_path = flags.config
if not os.path.isfile(artman_config_path):
logger.error(
'Artman config file `%s` doesn\'t exist.' % artman_config_path)
sys.exit(96)
try:
artifact_config = loader.load_artifact_config(
artman_config_path, flags.artifact_name, flags.input_dir)
except ValueError as ve:
logger.error('Artifact config loading failed with `%s`' % ve)
sys.exit(96)
# If we were given just an API or BATCH, then expand it into the --config
# syntax.
shared_config_name = 'common.yaml'
if artifact_config.language == Artifact.RUBY:
shared_config_name = 'doc.yaml'
legacy_config_dict = converter.convert_to_legacy_config_dict(
artifact_config, flags.input_dir, flags.output_dir)
logger.debug('Below is the legacy config after conversion:\n%s' %
pprint.pformat(legacy_config_dict))
tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
with io.open(tmp_legacy_config_yaml, 'w') as outfile:
yaml.dump(legacy_config_dict, outfile, default_flow_style=False)
googleapis = os.path.realpath(
os.path.expanduser(
pipeline_args['local_paths']['googleapis'], ))
config = ','.join([
'{artman_config_path}',
'{googleapis}/gapic/lang/{shared_config_name}',
]).format(
artman_config_path=tmp_legacy_config_yaml,
googleapis=googleapis,
shared_config_name=shared_config_name, )
# Set the pipeline as well as package_type and packaging
artifact_type = artifact_config.type
if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
pipeline_name = 'GapicClientPipeline'
elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
pipeline_name = 'GrpcClientPipeline'
elif artifact_type == Artifact.GAPIC_CONFIG:
pipeline_name = 'GapicConfigPipeline'
else:
raise ValueError('Unrecognized artifact.')
language = Artifact.Language.Name(
artifact_config.language).lower()
pipeline_args['language'] = language
# Parse out the full configuration.
# Note: the var replacement is still needed because they are still being
# used in some shared/common config yamls.
config_sections = ['common']
for config_spec in config.split(','):
config_args = config_util.load_config_spec(
config_spec=config_spec,
config_sections=config_sections,
repl_vars={
k.upper(): v
for k, v in pipeline_args['local_paths'].items()
},
language=language, )
pipeline_args.update(config_args)
# Setup publishing related config if needed.
if flags.subcommand == 'generate':
pipeline_args['publish'] = 'noop'
elif flags.subcommand == 'publish':
publishing_config = _get_publishing_config(artifact_config,
flags.target)
if publishing_config.type == Artifact.PublishTarget.GITHUB:
pipeline_args['publish'] = 'local' if flags.dry_run else 'github'
pipeline_args['github'] = support.parse_github_credentials(
argv_flags=flags,
config=user_config.get('github', {}), )
repos = pipeline_args.pop('git_repos')
pipeline_args['git_repo'] = support.select_git_repo(
repos, publishing_config.name)
else:
logger.error(
'Publishing type `%s` is not supported yet.' %
Artifact.PublishTarget.Type.Name(publishing_config.type))
sys.exit(96)
# Print out the final arguments to stdout, to help the user with
# possible debugging.
pipeline_args_repr = yaml.dump(
pipeline_args,
block_seq_indent=2,
default_flow_style=False,
indent=2, )
logger.info('Final args:')
for line in pipeline_args_repr.split('\n'):
if 'token' in line:
index = line.index(':')
line = line[:index + 2] + '<< REDACTED >>'
logger.info(' {0}'.format(line))
# Clean up the tmp legacy artman config.
os.remove(tmp_legacy_config_yaml)
# Return the final arguments.
return pipeline_name, pipeline_args
def _get_publishing_config(artifact_config_pb, publish_target):
valid_options = []
for target in artifact_config_pb.publish_targets:
valid_options.append(target.name)
if target.name == publish_target:
return target
logger.error('No publish target with `%s` configured in artifact `%s`. '
'Valid options are %s' %
(publish_target, artifact_config_pb.name, valid_options))
sys.exit(96)
def _run_artman_in_docker(flags):
"""Executes artman command.
Args:
input_dir: The input directory that will be mounted to artman docker
container as local googleapis directory.
Returns:
The output directory with artman-generated files.
"""
ARTMAN_CONTAINER_NAME = 'artman-docker'
input_dir = flags.input_dir
output_dir = flags.output_dir
artman_config_dirname = os.path.dirname(flags.config)
user_config = os.path.join(os.path.expanduser('~'), '.artman')
docker_image = flags.image
inner_artman_cmd_str = ' '.join(sys.argv[1:])
# TODO(ethanbao): Such folder to folder mounting won't work on windows.
base_cmd = [
'docker', 'run', '--name', ARTMAN_CONTAINER_NAME, '--rm', '-i', '-t',
'-e', 'HOST_USER_ID=%s' % os.getuid(), '-e',
'HOST_GROUP_ID=%s' % os.getgid(), '-v', '%s:%s' % (input_dir,
input_dir), '-v',
'%s:%s' % (output_dir, output_dir), '-v', '%s:%s' %
(artman_config_dirname,
artman_config_dirname), '-v', '%s:/home/.artman' % user_config, '-w',
os.getcwd(), docker_image, '/bin/bash', '-c'
]
debug_cmd = list(base_cmd)
debug_cmd.append('"artman2 %s; bash"' % inner_artman_cmd_str)
cmd = base_cmd
cmd.append('artman2 --local %s' % (inner_artman_cmd_str))
try:
output = subprocess.check_output(cmd)
logger.info(output.decode('utf8'))
return output_dir
except subprocess.CalledProcessError as e:
logger.error(e.output.decode('utf8'))
logger.error(
'Artman execution failed. For additional logging, re-run the '
'command with the "--verbose" flag')
raise
finally:
logger.debug('For further inspection inside docker container, run `%s`'
% ' '.join(debug_cmd))
def _chown_for_artman_output(output_dir):
"""Change ownership of artman output if necessary.
When artman runs in Docker instance, all output files are by default owned
by `root`, making it non-editable by Docker host user. When user passes
host user id and group id through environment variables via `-e` flag,
artman will change the owner based on the specified user id and group id.
"""
if os.getenv('HOST_USER_ID') and os.getenv('HOST_GROUP_ID'):
for root, dirs, files in os.walk(output_dir):
os.chown(root,
int(os.getenv('HOST_USER_ID')),
int(os.getenv('HOST_GROUP_ID')))
for d in dirs:
os.chown(
os.path.join(root, d),
int(os.getenv('HOST_USER_ID')),
int(os.getenv('HOST_GROUP_ID')))
for f in files:
os.chown(
os.path.join(root, f),
int(os.getenv('HOST_USER_ID')),
int(os.getenv('HOST_GROUP_ID')))
if __name__ == "__main__":
main()
|
apache-2.0
|
anpingli/origin
|
test/integration/buildpod_admission_test.go
|
12550
|
package integration
import (
"reflect"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
watchapi "k8s.io/apimachinery/pkg/watch"
kclientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kapi "k8s.io/kubernetes/pkg/apis/core"
kapiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
buildv1 "github.com/openshift/api/build/v1"
buildclient "github.com/openshift/client-go/build/clientset/versioned"
buildapi "github.com/openshift/openshift-apiserver/pkg/build/apis/build"
buildutil "github.com/openshift/openshift-controller-manager/pkg/build/buildutil"
buildtestutil "github.com/openshift/openshift-controller-manager/pkg/build/controller/common/testutil"
testutil "github.com/openshift/origin/test/util"
testserver "github.com/openshift/origin/test/util/server"
configapi "github.com/openshift/origin/test/util/server/deprecated_openshift/apis/config"
)
var buildPodAdmissionTestTimeout = 30 * time.Second
func TestBuildDefaultGitHTTPProxy(t *testing.T) {
httpProxy := "http://my.test.proxy:12345"
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
GitHTTPProxy: httpProxy,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Source.Git.HTTPProxy; actual == nil || *actual != httpProxy {
t.Errorf("Resulting build did not get expected HTTP proxy: %v", actual)
}
}
func TestBuildDefaultGitHTTPSProxy(t *testing.T) {
httpsProxy := "https://my.test.proxy:12345"
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
GitHTTPSProxy: httpsProxy,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Source.Git.HTTPSProxy; actual == nil || *actual != httpsProxy {
t.Errorf("Resulting build did not get expected HTTPS proxy: %v", actual)
}
}
func TestBuildDefaultEnvironment(t *testing.T) {
env := []kapi.EnvVar{
{
Name: "VAR1",
Value: "VALUE1",
},
{
Name: "VAR2",
Value: "VALUE2",
},
}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
Env: env,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
internalDockerStrategy := &buildapi.DockerBuildStrategy{}
if err := legacyscheme.Scheme.Convert(build.Spec.Strategy.DockerStrategy, internalDockerStrategy, nil); err != nil {
t.Errorf("Failed to convert build strategy: %v", err)
}
if actual := internalDockerStrategy.Env; !reflect.DeepEqual(env, actual) {
t.Errorf("Resulting build did not get expected environment: %+#v", actual)
}
}
func TestBuildDefaultLabels(t *testing.T) {
labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
ImageLabels: labels,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
internalOutput := &buildapi.BuildOutput{}
if err := legacyscheme.Scheme.Convert(&build.Spec.Output, internalOutput, nil); err != nil {
t.Errorf("Failed to convert build output: %v", err)
}
if actual := internalOutput.ImageLabels; !reflect.DeepEqual(labels, actual) {
t.Errorf("Resulting build did not get expected labels: %v", actual)
}
}
func TestBuildDefaultNodeSelectors(t *testing.T) {
selectors := map[string]string{"KEY": "VALUE", v1.LabelOSStable: "linux"}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
NodeSelector: selectors,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
t.Errorf("Resulting pod did not get expected nodeselectors: %v", actual)
}
}
func TestBuildDefaultAnnotations(t *testing.T) {
annotations := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &configapi.BuildDefaultsConfig{
Annotations: annotations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
t.Errorf("Resulting pod did not get expected annotations: actual: %v, expected: %v", actual["KEY"], annotations["KEY"])
}
}
func TestBuildOverrideTolerations(t *testing.T) {
tolerations := map[string]v1.Toleration{
"myKey1": {
Key: "mykey1",
Value: "myvalue1",
Effect: "NoSchedule",
Operator: "Equal",
},
"mykey2": {
Key: "mykey2",
Value: "myvalue2",
Effect: "NoSchedule",
Operator: "Equal",
},
}
overrideTolerations := []kapi.Toleration{}
for _, v := range tolerations {
coreToleration := kapi.Toleration{}
err := kapiv1.Convert_v1_Toleration_To_core_Toleration(&v, &coreToleration, nil)
if err != nil {
t.Errorf("Unable to convert v1.Toleration to core.Toleration: %v", err)
} else {
overrideTolerations = append(overrideTolerations, coreToleration)
}
}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
Tolerations: overrideTolerations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
for _, podToleration := range pod.Spec.Tolerations {
expectedTol, ok := tolerations[podToleration.Key]
if !ok {
t.Logf("Toleration %s found on pod, but is not in required list of tolerations", podToleration.Key)
} else if !reflect.DeepEqual(expectedTol, podToleration) {
t.Errorf("Resulting pod did not get expected tolerations, expected: %#v, actual: %#v", expectedTol, podToleration)
}
}
}
func TestBuildOverrideForcePull(t *testing.T) {
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
ForcePull: true,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if !build.Spec.Strategy.DockerStrategy.ForcePull {
t.Errorf("ForcePull was not set on resulting build")
}
}
func TestBuildOverrideForcePullCustomStrategy(t *testing.T) {
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
ForcePull: true,
})
defer fn()
build, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestCustomBuild())
if pod.Spec.Containers[0].ImagePullPolicy != v1.PullAlways {
t.Errorf("Pod ImagePullPolicy is not PullAlways")
}
if !build.Spec.Strategy.CustomStrategy.ForcePull {
t.Errorf("ForcePull was not set on resulting build")
}
}
func TestBuildOverrideLabels(t *testing.T) {
labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
ImageLabels: labels,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
internalOutput := &buildapi.BuildOutput{}
if err := legacyscheme.Scheme.Convert(&build.Spec.Output, internalOutput, nil); err != nil {
t.Errorf("Failed to convert build output: %v", err)
}
if actual := internalOutput.ImageLabels; !reflect.DeepEqual(labels, actual) {
t.Errorf("Resulting build did not get expected labels: %v", actual)
}
}
func TestBuildOverrideNodeSelectors(t *testing.T) {
selectors := map[string]string{"KEY": "VALUE", v1.LabelOSStable: "linux"}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
NodeSelector: selectors,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
t.Errorf("Resulting build did not get expected nodeselectors: %v", actual)
}
}
func TestBuildOverrideAnnotations(t *testing.T) {
annotations := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &configapi.BuildOverridesConfig{
Annotations: annotations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
t.Errorf("Resulting build did not get expected annotations: %v", actual)
}
}
func buildPodAdmissionTestCustomBuild() *buildv1.Build {
build := &buildv1.Build{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
buildv1.BuildConfigLabel: "mock-build-config",
buildv1.BuildRunPolicyLabel: string(buildv1.BuildRunPolicyParallel),
},
}}
build.Name = "test-custom-build"
build.Spec.Source.Git = &buildv1.GitBuildSource{URI: "http://test/src"}
build.Spec.Strategy.CustomStrategy = &buildv1.CustomBuildStrategy{}
build.Spec.Strategy.CustomStrategy.From.Kind = "DockerImage"
build.Spec.Strategy.CustomStrategy.From.Name = "test/image"
return build
}
func buildPodAdmissionTestDockerBuild() *buildv1.Build {
build := &buildv1.Build{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
buildv1.BuildConfigLabel: "mock-build-config",
buildv1.BuildRunPolicyLabel: string(buildv1.BuildRunPolicyParallel),
},
}}
build.Name = "test-build"
build.Spec.Source.Git = &buildv1.GitBuildSource{URI: "http://test/src"}
build.Spec.Strategy.DockerStrategy = &buildv1.DockerBuildStrategy{}
return build
}
func runBuildPodAdmissionTest(t *testing.T, client buildclient.Interface, kclientset kclientset.Interface, build *buildv1.Build) (*buildv1.Build,
*v1.Pod) {
ns := testutil.Namespace()
_, err := client.BuildV1().Builds(ns).Create(build)
if err != nil {
t.Fatalf("%v", err)
}
watchOpt := metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector(
"metadata.name",
buildutil.GetBuildPodName(build),
).String(),
}
podWatch, err := kclientset.CoreV1().Pods(ns).Watch(watchOpt)
if err != nil {
t.Fatalf("%v", err)
}
type resultObjs struct {
build *buildv1.Build
pod *v1.Pod
}
result := make(chan resultObjs)
defer podWatch.Stop()
go func() {
for e := range podWatch.ResultChan() {
if e.Type == watchapi.Added {
pod, ok := e.Object.(*v1.Pod)
if !ok {
t.Fatalf("unexpected object: %v", e.Object)
}
build := (*buildtestutil.TestPod)(pod).GetBuild(t)
result <- resultObjs{build: build, pod: pod}
}
}
}()
select {
case <-time.After(buildPodAdmissionTestTimeout):
t.Fatalf("timed out after %v", buildPodAdmissionTestTimeout)
case objs := <-result:
return objs.build, objs.pod
}
return nil, nil
}
func setupBuildDefaultsAdmissionTest(t *testing.T, defaultsConfig *configapi.BuildDefaultsConfig) (buildclient.Interface, kclientset.Interface, func()) {
return setupBuildPodAdmissionTest(t, map[string]*configapi.AdmissionPluginConfig{
"BuildDefaults": {
Configuration: defaultsConfig,
},
})
}
func setupBuildOverridesAdmissionTest(t *testing.T, overridesConfig *configapi.BuildOverridesConfig) (buildclient.Interface, kclientset.Interface, func()) {
return setupBuildPodAdmissionTest(t, map[string]*configapi.AdmissionPluginConfig{
"BuildOverrides": {
Configuration: overridesConfig,
},
})
}
func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]*configapi.AdmissionPluginConfig) (buildclient.Interface, kclientset.Interface, func()) {
master, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatal(err)
}
master.AdmissionConfig.PluginConfig = pluginConfig
clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(master)
if err != nil {
t.Fatal(err)
}
clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatal(err)
}
clusterAdminKubeClientset, err := kclientset.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
_, err = clusterAdminKubeClientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: testutil.Namespace()},
})
if err != nil {
t.Fatalf("%v", err)
}
err = testserver.WaitForServiceAccounts(
clusterAdminKubeClientset,
testutil.Namespace(),
[]string{
"builder",
"default",
})
if err != nil {
t.Fatalf("%v", err)
}
return buildclient.NewForConfigOrDie(clientConfig), clusterAdminKubeClientset, func() {
testserver.CleanupMasterEtcd(t, master)
}
}
|
apache-2.0
|
hirofumi/commons-feedparser
|
src/java/org/apache/commons/feedparser/post/MetaWeblogPostAgent.java
|
1701
|
/*
* Copyright 1999,2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.feedparser.post;
import java.util.Hashtable;
import java.util.Vector;
import org.apache.xmlrpc.XmlRpcClient;
/**
* A PostAgent allows a developer to post to a given weblog.
*
* @author <a href="mailto:burton@apache.org">Kevin A. Burton (burtonator)</a>
* @version $Id$
*/
public class MetaWeblogPostAgent {
public void newPost( String router,
String weblog,
String username,
String password,
PostEntry entry ) throws Exception {
XmlRpcClient xmlrpc = new XmlRpcClient( router );
Vector params = new Vector();
params.add( weblog);
params.add( username );
params.add( password );
Hashtable struct = new Hashtable();
struct.put( "title", entry.title );
struct.put( "description", entry.description );
params.add( struct );
params.add( new Boolean( true ) );
Vector v = (Vector)xmlrpc.execute( "metaWeblog.newPost", params );
}
}
|
apache-2.0
|
itgeeker/jdk
|
src/org/omg/DynamicAny/DynArray.java
|
718
|
package org.omg.DynamicAny;
/**
* org/omg/DynamicAny/DynArray.java .
* Generated by the IDL-to-Java compiler (portable), version "3.2"
* from c:/re/workspace/8-2-build-windows-amd64-cygwin/jdk8u72/5732/corba/src/share/classes/org/omg/DynamicAny/DynamicAny.idl
* Tuesday, December 22, 2015 7:17:37 PM PST
*/
/**
* DynArray objects support the manipulation of IDL arrays.
* Note that the dimension of the array is contained in the TypeCode which is accessible
* through the type attribute. It can also be obtained by calling the component_count operation.
*/
public interface DynArray extends DynArrayOperations, org.omg.DynamicAny.DynAny, org.omg.CORBA.portable.IDLEntity
{
} // interface DynArray
|
apache-2.0
|
cegesoma/rspub-core
|
rspub/core/executors.py
|
16471
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
:samp:`Events and base classes for execution`
"""
import logging
import os
import re
from abc import ABCMeta, abstractmethod
from enum import Enum
from glob import glob
from resync import CapabilityList
from resync import Resource
from resync import SourceDescription
from resync.list_base_with_index import ListBaseWithIndex
from resync.sitemap import Sitemap
from rspub.core.rs_enum import Capability
from rspub.core.rs_paras import RsParameters
from rspub.pluggable.gate import ResourceGateBuilder
from rspub.util import defaults
from rspub.util.gates import PluggedInGateBuilder, gate
from rspub.util.observe import Observable, ObserverInterruptException
LOG = logging.getLogger(__name__)
WELL_KNOWN_PATH = os.path.join(".well-known", "resourcesync")
CLASS_NAME_RESOURCE_GATE_BUILDER = "ResourceGateBuilder"
class ExecutorEvent(Enum):
"""
:samp:`Events fired by {Executors}`
There are information events (``inform``) and confirmation events (``confirm``). If an
:class:`~rspub.util.observe.Observer` overrides the method :func:`~rspub.util.observe.Observer.confirm`
and returns ``False`` on a ``confirm`` event,
an :class:`~rspub.util.observe.ObserverInterruptException` is raised.
All events are broadcast in the format::
[inform][confirm](source, event, **kwargs)
where ``source`` is the calling instance, ``event`` is the relevant event and ``**kwargs`` hold relevant
information about the event.
.. note:: All events are numbered, although numbers may not show in generated documentation.
"""
# # information events
# common low-level events
rejected_file = 1
"""
``1`` ``inform`` :samp:`File rejected by resource gate`
"""
start_file_search = 2
"""
``2`` ``inform`` :samp:`File search was started`
"""
created_resource = 3
"""
``3`` ``inform`` :samp:`The metadata for a resource was created`
"""
# common mid-level events
completed_document = 10
"""
``10`` ``inform`` :samp:`A sitemap document was completed`
"""
# common high-level events
found_changes = 20
"""
``20`` ``inform`` :samp:`Resources that changed were found`
"""
execution_start = 30
"""
``30`` ``inform`` :samp:`Execution of resource synchronization started`
"""
execution_end = 31
"""
``31`` ``inform`` :samp:`Execution of resource synchronization did end`
"""
# # confirmation events
clear_metadata_directory = 100
"""
``100`` ``confirm`` :samp:`Files in metadata directory will be erased`
"""
class SitemapData(object):
"""
:samp:`Holds metadata about sitemaps`
"""
def __init__(self, resource_count=0, ordinal=0, uri=None, path=None, capability_name=None,
document_saved=False):
"""
:samp:`Initialization`
:param int resource_count: the amount of records in the sitemap
:param int ordinal: the ordinal number as reflected in the sitemap filename and url
:param str uri: the url of the sitemap
:param str path: the local path of the sitemap
:param str capability_name: the capability of the sitemap
:param bool document_saved: True if the sitemap was saved to disk, False otherwise
"""
self.resource_count = resource_count
self.ordinal = ordinal
self.uri = uri
self.path = path
self.capability_name = capability_name
self.document_saved = document_saved
self.doc_start = None
self.doc_end = defaults.w3c_now()
def __str__(self):
return "%s, resource_count: %d, ordinal: %d, saved: %s\n\t uri: %s\n\t path: %s" \
% (self.capability_name, self.resource_count, self.ordinal, str(self.document_saved),
self.uri, self.path)
class Executor(Observable, metaclass=ABCMeta):
"""
:samp:`Abstract base class for ResourceSync execution`
There are 6 ``build steps`` that concrete subclasses may override (or 7 if they want to completely take over
the execution). Two steps are mandatory for subclasses to implement: :func:`generate_rs_documents`
and :func:`create_index`. Steps :func:`create_capabilitylist` and :func:`update_resource_sync` are not abstract -
they can safely be done by this :class:`Executor`.
"""
def __init__(self, rs_parameters: RsParameters=None):
"""
:samp:`Initialization`
If no :class:`~rspub.core.rs_paras.RsParameters` were given will construct
new :class:`~rspub.core.rs_paras.RsParameters` from
configuration found under :func:`~rspub.core.config.Configurations.current_configuration_name`.
:param rs_parameters: :class:`~rspub.core.rs_paras.RsParameters` for execution
"""
Observable.__init__(self)
self.para = rs_parameters if rs_parameters else RsParameters()
self.passes_resource_gate = None
self.date_start_processing = None
self.date_end_processing = None
def resource_gate(self):
"""
:samp:`Construct or return the resource gate`
:return: resource gate
"""
if self.passes_resource_gate is None:
default_builder = ResourceGateBuilder(resource_dir=self.para.resource_dir,
metadata_dir=self.para.abs_metadata_dir(),
plugin_dir=self.para.plugin_dir)
gate_builder = PluggedInGateBuilder(CLASS_NAME_RESOURCE_GATE_BUILDER, default_builder, self.para.plugin_dir)
self.passes_resource_gate = gate_builder.build_gate()
return self.passes_resource_gate
def execute(self, filenames: iter):
"""
``build step 0`` :samp:`Publish ResourceSync documents`
Publish ResourceSync documents under conditions of
current :class:`~rspub.core.rs_paras.RsParameters`.
:param filenames: iter of filenames and/or directories to scan
"""
self.date_start_processing = defaults.w3c_now()
self.observers_inform(self, ExecutorEvent.execution_start, date_start_processing=self.date_start_processing)
if not os.path.exists(self.para.abs_metadata_dir()):
os.makedirs(self.para.abs_metadata_dir())
self.prepare_metadata_dir()
sitemap_data_iter = self.generate_rs_documents(filenames)
self.post_process_documents(sitemap_data_iter)
self.date_end_processing = defaults.w3c_now()
self.create_index(sitemap_data_iter)
capabilitylist_data = self.create_capabilitylist()
self.update_resource_sync(capabilitylist_data)
self.observers_inform(self, ExecutorEvent.execution_end, date_end_processing = self.date_end_processing,
new_sitemaps=sitemap_data_iter)
# # Execution steps - start
def prepare_metadata_dir(self):
"""
``build step 1`` :samp:`Does nothing`
Subclasses that want to prepare metadata directory before generating new documents may override.
"""
pass
@abstractmethod
def generate_rs_documents(self, filenames: iter) -> [SitemapData]:
"""
``build step 2`` :samp:`Raises {NotImplementedError}`
Subclasses must walk resources found in ``filenames`` and, if appropriate, generate sitemaps
and produce sitemap data.
:param filenames: list of filenames and/or directories to scan
:return: list of :class:`SitemapData` of generated sitemaps
"""
raise NotImplementedError
def post_process_documents(self, sitemap_data_iter: iter):
"""
``build step 3`` :samp:`Does nothing`
Subclasses that want to post proces the documents in metadata directory may override.
:param sitemap_data_iter: iter over :class:`SitemapData` of sitemaps generated in build step 2
"""
pass
@abstractmethod
def create_index(self, sitemap_data_iter: iter):
"""
``build step 4`` :samp:`Raises {NotImplementedError}`
Subclasses must create sitemap indexes if appropriate.
:param sitemap_data_iter: iter over :class:`SitemapData` of sitemaps generated in build step 2
"""
raise NotImplementedError
def create_capabilitylist(self) -> SitemapData:
"""
``build step 5`` :samp:`Create a new capabilitylist over sitemaps found in metadata directory`
:return: :class:`SitemapData` over the newly created capabilitylist
"""
capabilitylist_path = self.para.abs_metadata_path("capabilitylist.xml")
if os.path.exists(capabilitylist_path) and self.para.is_saving_sitemaps:
os.remove(capabilitylist_path)
doc_types = ["resourcelist", "changelist", "resourcedump", "changedump"]
capabilitylist = CapabilityList()
for doc_type in doc_types:
index_path = self.para.abs_metadata_path(doc_type + "-index.xml")
if os.path.exists(index_path):
capabilitylist.add(Resource(uri=self.para.uri_from_path(index_path), capability=doc_type))
else:
doc_list_files = sorted(glob(self.para.abs_metadata_path(doc_type + "_*.xml")))
for doc_list in doc_list_files:
capabilitylist.add(Resource(uri=self.para.uri_from_path(doc_list), capability=doc_type))
return self.finish_sitemap(-1, capabilitylist)
def update_resource_sync(self, capabilitylist_data):
"""
``build step 6`` :samp:`Update description with newly created capabilitylist`
:param capabilitylist_data: :class:`SitemapData` over the newly created capabilitylist
:return: :class:`SitemapData` over updated description
"""
src_desc_path = self.para.abs_description_path()
well_known_dir = os.path.dirname(src_desc_path)
os.makedirs(well_known_dir, exist_ok=True)
src_description = SourceDescription()
if os.path.exists(src_desc_path):
src_description = self.read_sitemap(src_desc_path, src_description)
src_description.add(Resource(uri=capabilitylist_data.uri, capability=Capability.capabilitylist.name),
replace=True)
sitemap_data = SitemapData(len(src_description), -1, self.para.description_url(), src_desc_path,
Capability.description.name)
if self.para.is_saving_sitemaps:
self.save_sitemap(src_description, src_desc_path)
sitemap_data.document_saved = True
self.observers_inform(self, ExecutorEvent.completed_document, document=src_description, sitemap_data=sitemap_data)
return sitemap_data
# # Execution steps - end
def clear_metadata_dir(self):
ok = self.observers_confirm(self, ExecutorEvent.clear_metadata_directory, metadata_dir=self.para.abs_metadata_dir())
if not ok:
raise ObserverInterruptException("Process interrupted by observer: event: %s, metadata directory: %s"
% (ExecutorEvent.clear_metadata_directory, self.para.abs_metadata_dir()))
xml_files = glob(self.para.abs_metadata_path("*.xml"))
for xml_file in xml_files:
os.remove(xml_file)
wellknown = os.path.join(self.para.abs_metadata_dir(), WELL_KNOWN_PATH)
if os.path.exists(wellknown):
os.remove(wellknown)
def resource_generator(self) -> iter:
def generator(filenames: iter, count=0) -> [int, Resource]:
passes_gate = self.resource_gate()
for filename in filenames:
if not isinstance(filename, str):
LOG.warning("Not a string: %s" % filename)
filename = str(filename)
file = os.path.abspath(filename)
if not os.path.exists(file):
LOG.warning("File does not exist: %s" % file)
elif os.path.isdir(file):
for cr, rsc in generator(self.walk_directories(file), count=count):
yield cr, rsc
count = cr
elif os.path.isfile(file):
if passes_gate(file):
count += 1
path = os.path.relpath(file, self.para.resource_dir)
uri = self.para.url_prefix + defaults.sanitize_url_path(path)
stat = os.stat(file)
resource = Resource(uri=uri, length=stat.st_size,
lastmod=defaults.w3c_datetime(stat.st_ctime),
md5=defaults.md5_for_file(file),
mime_type=defaults.mime_type(file))
yield count, resource
self.observers_inform(self, ExecutorEvent.created_resource, resource=resource,
count=count, file=file)
else:
self.observers_inform(self, ExecutorEvent.rejected_file, file=file)
else:
LOG.warning("Not a regular file: %s" % file)
return generator
def walk_directories(self, *directories) -> [str]:
for directory in directories:
abs_dir = os.path.abspath(directory)
self.observers_inform(self, ExecutorEvent.start_file_search, directory=abs_dir)
for root, _directories, _filenames in os.walk(abs_dir):
for filename in _filenames:
file = os.path.join(root, filename)
yield file
def find_ordinal(self, capability):
rs_files = sorted(glob(self.para.abs_metadata_path(capability + "_*.xml")))
if len(rs_files) == 0:
return -1
else:
filename = os.path.basename(rs_files[len(rs_files) - 1])
digits = re.findall("\d+", filename)
return int(digits[0]) if len(digits) > 0 else 0
def format_ordinal(self, ordinal):
# prepends '_' before zfill to distinguish between indexes (*list-index.xml) and regular lists (*list_001.xml)
return "_" + str(ordinal).zfill(self.para.zero_fill_filename)
def finish_sitemap(self, ordinal, sitemap, doc_start=None, doc_end=None) -> SitemapData:
capability_name = sitemap.capability_name
file_name = capability_name
if sitemap.sitemapindex:
file_name += "-index"
elif ordinal >= 0:
file_name += self.format_ordinal(ordinal)
file_name += ".xml"
path = self.para.abs_metadata_path(file_name)
url = self.para.uri_from_path(path)
sitemap.link_set(rel="up", href=self.current_rel_up_for(sitemap))
sitemap_data = SitemapData(len(sitemap), ordinal, url, path, capability_name)
sitemap_data.doc_start = doc_start
sitemap_data.doc_end = doc_end if doc_end else defaults.w3c_now()
if self.para.is_saving_sitemaps:
sitemap.pretty_xml = self.para.is_saving_pretty_xml
self.save_sitemap(sitemap, path)
sitemap_data.document_saved = True
self.observers_inform(self, ExecutorEvent.completed_document, document=sitemap, sitemap_data=sitemap_data)
return sitemap_data
def current_rel_up_for(self, sitemap):
if sitemap.capability_name == Capability.capabilitylist.name:
return self.para.description_url()
else:
return self.para.capabilitylist_url()
def update_rel_index(self, index_url, path):
sitemap = self.read_sitemap(path)
sitemap.link_set(rel="index", href=index_url)
self.save_sitemap(sitemap, path)
def save_sitemap(self, sitemap, path):
sitemap.pretty_xml = self.para.is_saving_pretty_xml
# writing the string sitemap.as_xml() to disk results in encoding=ASCII on some systems.
# due to https://docs.python.org/3.4/library/xml.etree.elementtree.html#write
sitemap.write(path)
def read_sitemap(self, path, sitemap=None):
if sitemap is None:
sitemap = ListBaseWithIndex()
with open(path, "r", encoding="utf-8") as file:
sm = Sitemap()
sm.parse_xml(file, resources=sitemap)
return sitemap
|
apache-2.0
|
sarvex/tensorflow
|
tensorflow/python/eager/run_eager_op_as_function_test.py
|
4936
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for wrapping an eager op in a call op at runtime."""
import time
from tensorflow.python.eager import benchmarks_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import tf_inspect
def run_benchmark(func, num_iters, unused_execution_mode):
# warm up
func()
start = time.time()
for _ in range(num_iters):
func()
end = time.time()
return end - start
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
# TODO(srbs): Why can't we use absl parameterized here?
class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
def __init__(self):
super().__init__()
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_100 = random_ops.random_uniform((100, 100))
self._m_1000_by_1000 = random_ops.random_uniform((1000, 1000))
def _get_benchmark_name(self):
"""Copied from benchmarks_test.py."""
stack = tf_inspect.stack()
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, test.Benchmark):
name = frame[3] # Get the method name
# This is a hack to get around the fact that some methods might have a
# disable_tfrt decorator around them. In that case a function called
# 'decorated' wraps the real called function underneath and so we
# peek one deeper into the stack to get the real name.
if name == "decorated":
continue
else:
break
if name is None:
raise ValueError("Unable to determine calling Benchmark function.")
if context.is_tfrt_enabled():
name = name + "_tfrt"
return name
def _run(self, func, num_iters):
self.run_report(run_benchmark, func, num_iters)
def _benchmark_matmul(self, mat, device):
if device == GPU and not context.num_gpus():
return
with context.device(device):
if device == GPU:
mat = mat.gpu()
func = lambda: math_ops.matmul(mat, mat)
self._run(func, num_iters=1000)
def benchmark_tf_matmul_2_by_2_CPU(self):
self._benchmark_matmul(self._m_2_by_2, CPU)
def benchmark_tf_matmul_2_by_2_GPU(self):
self._benchmark_matmul(self._m_2_by_2, GPU)
def benchmark_tf_matmul_100_by_100_CPU(self):
self._benchmark_matmul(self._m_100_by_100, CPU)
def benchmark_tf_matmul_100_by_100_GPU(self):
self._benchmark_matmul(self._m_100_by_100, GPU)
def benchmark_tf_matmul_1000_by_1000_CPU(self):
self._benchmark_matmul(self._m_1000_by_1000, CPU)
def benchmark_tf_matmul_1000_by_1000_GPU(self):
self._benchmark_matmul(self._m_1000_by_1000, GPU)
class RunEagerOpAsFunctionTest(test.TestCase):
def setUp(self):
super().setUp()
self._m_2_by_2 = random_ops.random_uniform((2, 2))
def testMatmul(self):
math_ops.matmul(self._m_2_by_2, self._m_2_by_2)
def testMixedTypeListInputFastPath(self):
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2])
def testMixedTypeListInputEagerFallback(self):
array_ops.identity_n([1, 1])
def testMixedTypeListInputFastPathDifferentArity(self):
# This tests that the FunctionDef cache key contains the number of args.
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2])
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2, self._m_2_by_2])
def testMixedTypeListInputEagerFallbackDifferentArity(self):
array_ops.identity_n([1, 1])
array_ops.identity_n([1, 1, 1])
def testSingleTypeListFastPath(self):
array_ops.concat([self._m_2_by_2, self._m_2_by_2], axis=-1)
def testSingleTypeListEagerFallback(self):
array_ops.concat([[1], [2]], axis=-1)
def testSingleTypeListFastPathDifferentArity(self):
array_ops.concat([self._m_2_by_2, self._m_2_by_2], axis=-1)
array_ops.concat([self._m_2_by_2, self._m_2_by_2, self._m_2_by_2], axis=-1)
def testSingleTypeListEagerFallbackDifferentArity(self):
array_ops.concat([[1], [2]], axis=-1)
array_ops.concat([[1], [2], [3]], axis=-1)
if __name__ == "__main__":
context.enable_run_eager_op_as_function()
test.main()
|
apache-2.0
|
zhubinqiang/myTMS
|
src/main/java/com/intel/media/mts/action/execution/SendCommandFromResultAction.java
|
1543
|
package com.intel.media.mts.action.execution;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import org.apache.struts2.ServletActionContext;
import com.intel.media.mts.util.SocketUtil;
import com.opensymphony.xwork2.ActionSupport;
public class SendCommandFromResultAction extends ActionSupport {
private List<Integer> ids;
private Integer executionId;
private String host;
private Integer port;
public List<Integer> getIds() {
return ids;
}
public void setIds(List<Integer> ids) {
this.ids = ids;
}
public Integer getExecutionId() {
return executionId;
}
public void setExecutionId(Integer executionId) {
this.executionId = executionId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
@Override
public String execute() throws Exception {
HttpServletRequest request = ServletActionContext.getRequest();
StringBuffer buff = new StringBuffer();
for(Integer id : ids){
buff.append(id);
buff.append(",");
}
buff.deleteCharAt(buff.length()-1);
String addr = "http://" + request.getServerName() + ":" + request.getServerPort();
String cmd = "addtask " + addr + request.getContextPath()
+ "/executionFromResultToXmlAction?execution.id=" + executionId
+ "&ids=" + buff.toString();
SocketUtil.sendCommand(host, port, cmd);
return SUCCESS;
}
}
|
apache-2.0
|
s3git/s3git
|
cmd/snapshot.go
|
4419
|
/*
* Copyright 2016 Frank Wessels <fwessels@xs4all.nl>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/s3git/s3git-go"
"github.com/spf13/cobra"
)
var pushAfterCreate bool
var dedupe bool
var hash bool
var presignedUrls bool
var jsonOutput bool
// snapshotCmd represents the snapshot command
var snapshotCmd = &cobra.Command{
Use: "snapshot",
Short: "Manage snapshots",
Long: "Create, checkout and list snapshots",
}
var snapshotCreateCmd = &cobra.Command{
Use: "create [directory]",
Short: "Create a snapshot",
Long: "Create a snapshot",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
er("Directory for snapshot must be specified")
} else if message == "" {
er("Commit message for snapshot must be specified")
}
repo, err := s3git.OpenRepository(".")
if err != nil {
er(err)
}
key, nothing, err := repo.SnapshotCreate(args[0], message)
if err != nil {
er(err)
}
if nothing {
fmt.Println("No changes to snapshot")
return
}
fmt.Printf("[commit %s]\n", key)
if pushAfterCreate {
pushCmd.Run(pushCmd, []string{})
}
},
}
var snapshotCheckoutCmd = &cobra.Command{
Use: "checkout [directory] ([commit])",
Short: "Checkout a snapshot",
Long: "Checkout a snapshot",
Run: func(cmd *cobra.Command, args []string) {
// TODO: Partial checkout would be nice (eg specify path as filter)
if len(args) == 0 {
er("Directory for snapshot must be specified")
}
repo, err := s3git.OpenRepository(".")
if err != nil {
er(err)
}
var commit string
if len(args) == 2 {
commit = args[1]
}
err = repo.SnapshotCheckout(args[0], commit, dedupe)
if err != nil {
er(err)
}
},
}
var snapshotListCmd = &cobra.Command{
Use: "ls ([commit])",
Short: "List a snapshot",
Long: "List a snapshot",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
er("Commit for snapshot must be specified")
}
repo, err := s3git.OpenRepository(".")
if err != nil {
er(err)
}
var commit string
if len(args) == 1 {
commit = args[0]
}
options := []s3git.SnapshotListOptions{}
options = append(options, s3git.SnapshotListOptionSetShowHash(hash))
options = append(options, s3git.SnapshotListOptionSetPresignedUrls(presignedUrls))
options = append(options, s3git.SnapshotListOptionSetJsonOutput(jsonOutput))
err = repo.SnapshotList(commit, options...)
if err != nil {
er(err)
}
},
}
var snapshotStatusCmd = &cobra.Command{
Use: "status [directory] ([commit])",
Short: "Show changes for snapshot",
Long: "Show changes for snapshot",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
er("Directory for snapshot must be specified")
}
repo, err := s3git.OpenRepository(".")
if err != nil {
er(err)
}
var commit string
if len(args) == 2 {
commit = args[1]
}
err = repo.SnapshotStatus(args[0], commit)
if err != nil {
er(err)
}
},
}
func init() {
RootCmd.AddCommand(snapshotCmd)
snapshotCmd.AddCommand(snapshotCreateCmd)
snapshotCmd.AddCommand(snapshotCheckoutCmd)
snapshotCmd.AddCommand(snapshotListCmd)
snapshotCmd.AddCommand(snapshotStatusCmd)
// Local flags for create
snapshotCreateCmd.Flags().StringVarP(&message, "message", "m", "", "Message for the commit of create snapshot")
snapshotCreateCmd.Flags().BoolVarP(&pushAfterCreate, "push", "p", false, "Perform immediate push after create")
// Local flags for checkout
snapshotCheckoutCmd.Flags().BoolVar(&dedupe, "dedupe", false, "Checkout in deduped (pointers) format")
// Local flags for list
snapshotListCmd.Flags().BoolVar(&hash, "hash", false, "Show hash of object")
snapshotListCmd.Flags().BoolVar(&presignedUrls, "presigned", false, "Generate presigned urls for direct access from S3")
snapshotListCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output result in JSON")
}
|
apache-2.0
|
givemeahigh5/ImageQuiz
|
src/main/java/scripteditor/HelpPopup.java
|
10318
|
/**
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* HelpPopup.java
*
* Created on April 13, 2007, 8:45 PM
*/
package scripteditor;
import java.io.IOException;
import java.io.*;
import javax.swing.JOptionPane;
import java.awt.*;
import java.awt.event.*;
import javax.swing.*;
/**
*
* @author Ben
*/
public class HelpPopup extends javax.swing.JDialog {
// String userPath = System.getenv("USERPROFILE");
private String mName;
private String mFirstLine;
private ActionListener myListener;
/** Creates new form HelpPopup */
public HelpPopup(String name, String firstLine, ActionListener al) {
mName = name;
mFirstLine = firstLine;
myListener = al;
initComponents();
addEscapeListener(this);//Added by preethy
//
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
chkShowOnStartUp = new javax.swing.JCheckBox();
jPanel1 = new javax.swing.JPanel();
jLabel3 = new javax.swing.JLabel();
jScrollPane1 = new javax.swing.JScrollPane();
jTextArea1 = new javax.swing.JTextArea();
setTitle("Visual Learning:");
setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
setResizable(false);
addWindowListener(new java.awt.event.WindowAdapter() {
public void windowClosing(java.awt.event.WindowEvent evt) {
formWindowClosing(evt);
}
public void windowOpened(java.awt.event.WindowEvent evt) {
formWindowOpened(evt);
}
});
chkShowOnStartUp.setSelected(true);
chkShowOnStartUp.setText("Show this menu on startup.");
chkShowOnStartUp.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0));
chkShowOnStartUp.setMargin(new java.awt.Insets(0, 0, 0, 0));
chkShowOnStartUp.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
chkShowOnStartUpActionPerformed(evt);
}
});
jPanel1.setBackground(new java.awt.Color(255, 255, 255));
jLabel3.setBackground(new java.awt.Color(255, 255, 255));
jLabel3.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N
jLabel3.setForeground(new java.awt.Color(0, 51, 204));
jLabel3.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
jLabel3.setText("Take a Tutorial");
jLabel3.setDebugGraphicsOptions(javax.swing.DebugGraphics.NONE_OPTION);
jLabel3.setOpaque(true);
jLabel3.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseReleased(java.awt.event.MouseEvent evt) {
jLabel3MouseReleased(evt);
}
public void mouseExited(java.awt.event.MouseEvent evt) {
jLabel3MouseExited(evt);
}
public void mouseEntered(java.awt.event.MouseEvent evt) {
jLabel3MouseEntered(evt);
}
});
jScrollPane1.setBorder(null);
jTextArea1.setColumns(20);
jTextArea1.setFont(new java.awt.Font("Tahoma", 0, 14)); // NOI18N
jTextArea1.setRows(5);
jScrollPane1.setViewportView(jTextArea1);
org.jdesktop.layout.GroupLayout jPanel1Layout = new org.jdesktop.layout.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(org.jdesktop.layout.GroupLayout.TRAILING, jPanel1Layout.createSequentialGroup()
.addContainerGap()
.add(jLabel3, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addContainerGap())
.add(org.jdesktop.layout.GroupLayout.TRAILING, jPanel1Layout.createSequentialGroup()
.addContainerGap(18, Short.MAX_VALUE)
.add(jScrollPane1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 510, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)
.add(19, 19, 19))
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(jPanel1Layout.createSequentialGroup()
.addContainerGap(17, Short.MAX_VALUE)
.add(jScrollPane1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 170, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(org.jdesktop.layout.LayoutStyle.UNRELATED)
.add(jLabel3, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 36, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)
.addContainerGap())
);
org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(layout.createSequentialGroup()
.addContainerGap()
.add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(jPanel1, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.add(layout.createSequentialGroup()
.add(chkShowOnStartUp)
.add(0, 0, Short.MAX_VALUE)))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING)
.add(layout.createSequentialGroup()
.addContainerGap()
.add(jPanel1, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED)
.add(chkShowOnStartUp)
.addContainerGap())
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void jLabel3MouseReleased(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jLabel3MouseReleased
this.setVisible(false);
myListener.actionPerformed(new ActionEvent(this, 1, "Tutorial"));
//this.dispose();
}//GEN-LAST:event_jLabel3MouseReleased
private void jLabel3MouseExited(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jLabel3MouseExited
// TODO add your handling code here:
jLabel3.setBorder(null);
// jLabel3.setBorder(new javax.swing.border.LineBorder(new java.awt.Color(0, 0, 0), 1, true));
}//GEN-LAST:event_jLabel3MouseExited
private void formWindowClosing(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosing
mFirstLine = mFirstLine.substring(0, mFirstLine.length()-1);
if(!chkShowOnStartUp.isSelected()){
mFirstLine = mFirstLine + "0";
}
else
mFirstLine = mFirstLine + "1";
FileWriter myFileWriter = null;
// String path = Configuration.UserPath();
String path = Configuration.ApplicationPath();
String filename = path + "/UserFiles/" + mName + ".csv";
try{
myFileWriter = new FileWriter(filename);
} catch(IOException ioe){
}
PrintWriter diskfile = new PrintWriter(myFileWriter);
diskfile.println(mFirstLine);// 1 is a flag for the help Popup
diskfile.close();
}//GEN-LAST:event_formWindowClosing
private void jLabel3MouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jLabel3MouseEntered
// TODO add your handling code here:
// jLabel3.setBackground(this.getBackground());
jLabel3.setBorder(new javax.swing.border.LineBorder(new java.awt.Color(0, 0, 0), 1, true));
}//GEN-LAST:event_jLabel3MouseEntered
private void chkShowOnStartUpActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_chkShowOnStartUpActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_chkShowOnStartUpActionPerformed
private void formWindowOpened(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowOpened
this.setTitle(ConfigFileReader.getProjectName());
jTextArea1.setText(ConfigFileReader.getWelcomeText());
}//GEN-LAST:event_formWindowOpened
public static void addEscapeListener(final JDialog dialog) {
ActionListener escListener = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
// esc=true;
dialog.setVisible(false); } };
dialog.getRootPane().registerKeyboardAction(escListener,KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0),JComponent.WHEN_IN_FOCUSED_WINDOW);
}
/**
* @param args the command line arguments
*/
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JCheckBox chkShowOnStartUp;
private javax.swing.JLabel jLabel3;
private javax.swing.JPanel jPanel1;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JTextArea jTextArea1;
// End of variables declaration//GEN-END:variables
}
|
apache-2.0
|
papoose/papoose-core
|
cnd-perm-admin/src/main/java/org/papoose/core/ConditionalPermissionAdminImpl.java
|
7935
|
/**
*
* Copyright 2008-2009 (C) The original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.papoose.core;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.security.AccessControlContext;
import java.security.CodeSource;
import java.security.PermissionCollection;
import java.security.Permissions;
import java.security.ProtectionDomain;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleEvent;
import org.osgi.framework.ServiceRegistration;
import org.osgi.framework.SynchronousBundleListener;
import org.osgi.service.condpermadmin.ConditionInfo;
import org.osgi.service.condpermadmin.ConditionalPermissionAdmin;
import org.osgi.service.condpermadmin.ConditionalPermissionInfo;
import org.osgi.service.condpermadmin.ConditionalPermissionUpdate;
import org.osgi.service.permissionadmin.PermissionInfo;
import org.papoose.core.spi.ProtectionDomainFactory;
/**
*
*/
public class ConditionalPermissionAdminImpl implements ConditionalPermissionAdmin, SynchronousBundleListener, ProtectionDomainFactory
{
private final Map<String, ConditionalPermissionInfo> permissionTable = new HashMap<String, ConditionalPermissionInfo>();
private Papoose framework;
private ServiceRegistration registration;
private ProtectionDomainFactory savedFactory;
public void start(Papoose framework)
{
this.framework = framework;
this.savedFactory = framework.getBundleManager().getProtectionDomainFactory();
framework.getBundleManager().setProtectionDomainFactory(this);
BundleContext context = framework.getSystemBundleContext();
context.addBundleListener(this);
this.registration = context.registerService(ConditionalPermissionAdmin.class.getName(), this, null);
}
public void stop()
{
BundleContext context = framework.getSystemBundleContext();
registration.unregister();
context.removeBundleListener(this);
framework.getBundleManager().setProtectionDomainFactory(savedFactory);
savedFactory = null;
registration = null;
framework = null;
}
public ConditionalPermissionInfo addConditionalPermissionInfo(ConditionInfo[] conditionInfos, PermissionInfo[] permissionInfos)
{
return null; //todo: consider this autogenerated code
}
public ConditionalPermissionInfo setConditionalPermissionInfo(String name, ConditionInfo[] conditionInfos, PermissionInfo[] permissionInfos)
{
return null; //todo: consider this autogenerated code
}
public Enumeration getConditionalPermissionInfos()
{
return null; //todo: consider this autogenerated code
}
public ConditionalPermissionInfo getConditionalPermissionInfo(String name)
{
return null; //todo: consider this autogenerated code
}
public AccessControlContext getAccessControlContext(String[] signers)
{
return null; //todo: consider this autogenerated code
}
public ConditionalPermissionUpdate newConditionalPermissionUpdate()
{
return null; //Todo change body of implemented methods use File | Settings | File Templates.
}
public ConditionalPermissionInfo newConditionalPermissionInfo(String name, ConditionInfo[] conditions, PermissionInfo[] permissions, String access)
{
return null; //Todo change body of implemented methods use File | Settings | File Templates.
}
public ConditionalPermissionInfo newConditionalPermissionInfo(String encodedConditionalPermissionInfo)
{
return null; //Todo change body of implemented methods use File | Settings | File Templates.
}
public void bundleChanged(BundleEvent event)
{
Bundle bundle = event.getBundle();
if (event.getType() == BundleEvent.INSTALLED)
{
URL url = bundle.getEntry("OSGI-INF/permissions.perm");
if (url != null)
{
}
}
if (event.getType() == BundleEvent.UPDATED)
{
URL url = bundle.getEntry("OSGI-INF/permissions.perm");
if (url != null)
{
}
}
if (event.getType() == BundleEvent.UNINSTALLED)
{
}
//Todo: change body of implemented methods use File | Settings | File Templates.
}
public ProtectionDomain assignProtectionDomain(BundleGeneration bundle, CodeSource codesource, PermissionCollection permissions)
{
return null; //Todo: change body of implemented methods use File | Settings | File Templates.
}
private class ConditionalPermissionInfoImpl implements ConditionalPermissionInfo
{
private final String name;
private final ConditionInfo[] conditionInfos;
private final PermissionInfo[] permissionInfos;
private ConditionalPermissionInfoImpl(String name, ConditionInfo[] conditionInfos, PermissionInfo[] permissionInfos)
{
assert name != null;
this.name = name;
this.conditionInfos = new ConditionInfo[conditionInfos.length];
for (int i = 0; i < conditionInfos.length; i++)
{
ConditionInfo from = conditionInfos[i];
String[] args = new String[from.getArgs().length];
System.arraycopy(from.getArgs(), 0, args, 0, args.length);
this.conditionInfos[i] = new ConditionInfo(from.getType(), args);
}
this.permissionInfos = new PermissionInfo[permissionInfos.length];
for (int i = 0; i < permissionInfos.length; i++)
{
PermissionInfo from = permissionInfos[i];
this.permissionInfos[i] = new PermissionInfo(from.getType(), from.getName(), from.getActions());
}
}
public ConditionInfo[] getConditionInfos()
{
ConditionInfo[] result = new ConditionInfo[conditionInfos.length];
System.arraycopy(conditionInfos, 0, result, 0, result.length);
return result;
}
public PermissionInfo[] getPermissionInfos()
{
PermissionInfo[] result = new PermissionInfo[permissionInfos.length];
System.arraycopy(permissionInfos, 0, result, 0, result.length);
return result;
}
public void delete()
{
permissionTable.remove(name);
}
public String getName()
{
return name;
}
public String getAccessDecision()
{
return null; //Todo change body of implemented methods use File | Settings | File Templates.
}
public String getEncoded()
{
return null; //Todo change body of implemented methods use File | Settings | File Templates.
}
}
static private PermissionCollection parsePermissionCollection(URL url) throws IOException
{
PermissionCollection collection = new Permissions();
BufferedReader reader = new BufferedReader(new InputStreamReader(url.openStream()));
String line;
while ((line = reader.readLine()) != null)
{
}
return collection;
}
}
|
apache-2.0
|
ssaarela/javersion
|
javersion-json-webapp/src/main/java/org/javersion/json/web/VersionMetadata.java
|
1549
|
/*
* Copyright 2014 Samppa Saarela
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.javersion.json.web;
import static org.javersion.core.Persistent.GENERIC_TYPE;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.javersion.core.Revision;
import org.javersion.core.VersionProperty;
import org.javersion.object.Versionable;
import org.javersion.path.PropertyPath;
import com.google.common.collect.Multimap;
@Versionable(alias = GENERIC_TYPE)
public class VersionMetadata {
public String _id;
public List<Revision> _revs;
public Map<PropertyPath, Collection<VersionProperty<Object>>> _conflicts;
public VersionMetadata() {}
public VersionMetadata(String _id, Set<Revision> _revs, Multimap<PropertyPath, VersionProperty<Object>> conflicts) {
this._id = _id;
this._revs = _revs.isEmpty() ? null : new ArrayList<>(_revs);
this._conflicts = conflicts.isEmpty() ? null : conflicts.asMap();
}
}
|
apache-2.0
|
googleprojectzero/sandbox-attacksurface-analysis-tools
|
NtApiDotNet/Win32/RunningService.cs
|
846
|
// Copyright 2021 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
namespace NtApiDotNet.Win32
{
/// <summary>
/// Dummy class to mark the old name as obsolete.
/// </summary>
[Obsolete("Use Win32Service.")]
public abstract class RunningService
{
}
}
|
apache-2.0
|
cloudfoundry/cf-java-client
|
cloudfoundry-client/src/main/java/org/cloudfoundry/uaa/users/_ExpirePasswordResponse.java
|
1287
|
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.uaa.users;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.cloudfoundry.Nullable;
import org.immutables.value.Value;
/**
* The response from the expire password request
*/
@JsonDeserialize
@Value.Immutable
abstract class _ExpirePasswordResponse {
/**
* Whether the account is locked
*/
@JsonProperty("locked")
@Nullable
abstract Boolean getLocked();
/**
* Whether user's password will be expired
*/
@JsonProperty("passwordChangeRequired")
@Nullable
abstract Boolean getPasswordChangeRequired();
}
|
apache-2.0
|
Talend/data-prep
|
dataprep-upgrade/src/test/java/org/talend/dataprep/upgrade/to_2_4_0_PE/Base_2_4_0_PE_Test.java
|
913
|
/*
* Copyright (C) 2006-2018 Talend Inc. - www.talend.com
*
* This source code is available under agreement available at
* https://github.com/Talend/data-prep/blob/master/LICENSE
*
* You should have received a copy of the agreement
* along with this program; if not, write to Talend SA
* 9 rue Pages 92150 Suresnes, France
*/
package org.talend.dataprep.upgrade.to_2_4_0_PE;
import org.junit.BeforeClass;
import org.springframework.test.context.TestPropertySource;
import org.talend.dataprep.upgrade.BasePEUpgradeTest;
/**
* Base class for all 2.3.0 PE tests.
*/
@TestPropertySource(locations = { "to_2_4_0_PE.properties" })
public abstract class Base_2_4_0_PE_Test extends BasePEUpgradeTest {
@BeforeClass
public static void baseSetUp() throws Exception {
setupStore("2.3.0-PE");
}
@Override
protected String getExpectedVersion() {
return "2.4.0-PE";
}
}
|
apache-2.0
|
Deus0/Zeltexium
|
Assets/Plugins/Wasabimole/ProceduralTree/ProceduralTree.cs
|
16823
|
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System.IO;
#if UNITY_EDITOR
using UnityEditor;
#endif
// ---------------------------------------------------------------------------------------------------------------------------
// Procedural Tree - Simple tree mesh generation - � 2015 Wasabimole http://wasabimole.com
// ---------------------------------------------------------------------------------------------------------------------------
// BASIC USER GUIDE
//
// - Choose GameObject > Create Procedural > Procedural Tree from the Unity menu
// - Select the object to adjust the tree's properties
// - Click on Rand Seed to get a new tree of the same type
// - Click on Rand Tree to change the tree type
//
// ADVANCED USER GUIDE
//
// - Drag the object to a project folder to create a Prefab (to keep a static snapshot of the tree)
// - To add a collision mesh to the object, choose Add Component > Physics > Mesh Collider
// - To add or remove detail, change the number of sides
// - You can change the default diffuse bark materials for more complex ones (with bump-map, specular, etc.)
// - Add or replace default materials by adding them to the SampleMaterials\ folder
// - You can also change the tree generation parameters in REAL-TIME from your scripts (*)
// - Use Unity's undo to roll back any unwanted changes
//
// ADDITIONAL NOTES
//
// The generated mesh will remain on your scene, and will only be re-computed if/when you change any tree parameters.
//
// Branch(...) is the main tree generation function (called recursively), you can inspect/change the code to add new
// tree features. If you add any new generation parameters, remember to add them to the checksum in the Update() function
// (so the mesh gets re-computed when they change). If you add any cool new features, please share!!! ;-)
//
// To generate a new tree at runtime, just follow the example in Editor\ProceduralTreeEditor.cs:CreateProceduralTree()
// Additional scripts under ProceduralTree\Editor are optional, used to better integrate the trees into Unity.
//
// (*) To change the tree parameters in real-time, just get/keep a reference to the ProceduralTree component of the
// tree GameObject, and change any of the public properties of the class.
//
// >>> Please visit http://wasabimole.com/procedural-tree for more information
// ---------------------------------------------------------------------------------------------------------------------------
// VERSION HISTORY
//
// 1.02 Error fixes update
// - Fixed bug when generating the mesh on a rotated GameObject
// - Fix error when building the project
//
// 1.00 First public release
// ---------------------------------------------------------------------------------------------------------------------------
// Thank you for choosing Procedural Tree, we sincerely hope you like it!
//
// Please send your feedback and suggestions to mailto://contact@wasabimole.com
// ---------------------------------------------------------------------------------------------------------------------------
namespace Wasabimole.ProceduralTree
{
[ExecuteInEditMode]
public class ProceduralTree : MonoBehaviour
{
public const int CurrentVersion = 102;
// ---------------------------------------------------------------------------------------------------------------------------
// Tree parameters (can be changed real-time in editor or game)
// ---------------------------------------------------------------------------------------------------------------------------
public int Seed; // Random seed on which the generation is based
[Range(1024, 65000)]
public int MaxNumVertices = 65000; // Maximum number of vertices for the tree mesh
[Range(3, 32)]
public int NumberOfSides = 16; // Number of sides for tree
[Range(0.25f, 4f)]
public float BaseRadius = 2f; // Base radius in meters
[Range(0.75f, 0.95f)]
public float RadiusStep = 0.9f; // Controls how quickly radius decreases
[Range(0.01f, 0.2f)]
public float MinimumRadius = 0.02f; // Minimum radius for the tree's smallest branches
[Range(0f, 1f)]
public float BranchRoundness = 0.8f; // Controls how round branches are
[Range(0.1f, 2f)]
public float SegmentLength = 0.5f; // Length of branch segments
[Range(0f, 40f)]
public float Twisting = 20f; // How much branches twist
[Range(0f, 0.25f)]
public float BranchProbability = 0.1f; // Branch probability
// ---------------------------------------------------------------------------------------------------------------------------
float checksum; // Serialized & Non-Serialized checksums for tree rebuilds only on undo operations, or when parameters change (mesh kept on scene otherwise)
[SerializeField, HideInInspector]
float checksumSerialized;
List<Vector3> vertexList; // Vertex list
List<Vector2> uvList; // UV list
List<int> triangleList; // Triangle list
float[] ringShape; // Tree ring shape array
[HideInInspector, System.NonSerialized]
public MeshRenderer Renderer; // MeshRenderer component
MeshFilter filter; // MeshFilter component
#if UNITY_EDITOR
[HideInInspector]
public string MeshInfo; // Used in ProceduralTreeEditor to show info about the tree mesh
#endif
// ---------------------------------------------------------------------------------------------------------------------------
// Initialise object, make sure it has MeshFilter and MeshRenderer components
// ---------------------------------------------------------------------------------------------------------------------------
void OnEnable()
{
if (filter != null && Renderer != null) return;
gameObject.isStatic = true;
filter = gameObject.GetComponent<MeshFilter>();
if (filter == null) filter = gameObject.AddComponent<MeshFilter>();
if (filter.sharedMesh != null) checksum = checksumSerialized;
Renderer = gameObject.GetComponent<MeshRenderer>();
if (Renderer == null) Renderer = gameObject.AddComponent<MeshRenderer>();
}
// ---------------------------------------------------------------------------------------------------------------------------
// Generate tree (only called when parameters change, or there's an undo operation)
// ---------------------------------------------------------------------------------------------------------------------------
public void GenerateTree()
{
gameObject.isStatic = false;
var originalRotation = transform.localRotation;
var originalSeed = Random.state;
if (vertexList == null) // Create lists for holding generated vertices
{
vertexList = new List<Vector3>();
uvList = new List<Vector2>();
triangleList = new List<int>();
}
else // Clear lists for holding generated vertices
{
vertexList.Clear();
uvList.Clear();
triangleList.Clear();
}
SetTreeRingShape(); // Init shape array for current number of sides
Random.InitState(Seed);
// Main recursive call, starts creating the ring of vertices in the trunk's base
Branch(new Quaternion(), Vector3.zero, -1, BaseRadius, 0f);
Random.state = originalSeed;
transform.localRotation = originalRotation; // Restore original object rotation
SetTreeMesh(); // Create/Update MeshFilter's mesh
}
// ---------------------------------------------------------------------------------------------------------------------------
// Set the tree mesh from the generated vertex lists (vertexList, uvList, triangleLists)
// ---------------------------------------------------------------------------------------------------------------------------
private void SetTreeMesh()
{
// Get mesh or create one
var mesh = filter.sharedMesh;
if (mesh == null)
mesh = filter.sharedMesh = new Mesh();
else
mesh.Clear();
// Assign vertex data
mesh.vertices = vertexList.ToArray();
mesh.uv = uvList.ToArray();
mesh.triangles = triangleList.ToArray();
// Update mesh
mesh.RecalculateNormals();
mesh.RecalculateBounds();
; // Do not call this if we are going to change the mesh dynamically!
#if UNITY_EDITOR
MeshInfo = "Mesh has " + vertexList.Count + " vertices and " + triangleList.Count / 3 + " triangles";
#endif
}
// ---------------------------------------------------------------------------------------------------------------------------
// Main branch recursive function to generate tree
// ---------------------------------------------------------------------------------------------------------------------------
void Branch(Quaternion quaternion, Vector3 position, int lastRingVertexIndex, float radius, float texCoordV)
{
var offset = Vector3.zero;
var texCoord = new Vector2(0f, texCoordV);
var textureStepU = 1f / NumberOfSides;
var angInc = 2f * Mathf.PI * textureStepU;
var ang = 0f;
// Add ring vertices
for (var n = 0; n <= NumberOfSides; n++, ang += angInc)
{
var r = ringShape[n] * radius;
offset.x = r * Mathf.Cos(ang); // Get X, Z vertex offsets
offset.z = r * Mathf.Sin(ang);
vertexList.Add(position + quaternion * offset); // Add Vertex position
uvList.Add(texCoord); // Add UV coord
texCoord.x += textureStepU;
}
if (lastRingVertexIndex >= 0) // After first base ring is added ...
{
// Create new branch segment quads, between last two vertex rings
for (var currentRingVertexIndex = vertexList.Count - NumberOfSides - 1; currentRingVertexIndex < vertexList.Count - 1; currentRingVertexIndex++, lastRingVertexIndex++)
{
triangleList.Add(lastRingVertexIndex + 1); // Triangle A
triangleList.Add(lastRingVertexIndex);
triangleList.Add(currentRingVertexIndex);
triangleList.Add(currentRingVertexIndex); // Triangle B
triangleList.Add(currentRingVertexIndex + 1);
triangleList.Add(lastRingVertexIndex + 1);
}
}
// Do we end current branch?
radius *= RadiusStep;
if (radius < MinimumRadius || vertexList.Count + NumberOfSides >= MaxNumVertices) // End branch if reached minimum radius, or ran out of vertices
{
// Create a cap for ending the branch
vertexList.Add(position); // Add central vertex
uvList.Add(texCoord + Vector2.one); // Twist UVs to get rings effect
for (var n = vertexList.Count - NumberOfSides - 2; n < vertexList.Count - 2; n++) // Add cap
{
triangleList.Add(n);
triangleList.Add(vertexList.Count - 1);
triangleList.Add(n + 1);
}
return;
}
// Continue current branch (randomizing the angle)
texCoordV += 0.0625f * (SegmentLength + SegmentLength / radius);
position += quaternion * new Vector3(0f, SegmentLength, 0f);
transform.rotation = quaternion;
var x = (Random.value - 0.5f) * Twisting;
var z = (Random.value - 0.5f) * Twisting;
transform.Rotate(x, 0f, z);
lastRingVertexIndex = vertexList.Count - NumberOfSides - 1;
Branch(transform.rotation, position, lastRingVertexIndex, radius, texCoordV); // Next segment
// Do we branch?
if (vertexList.Count + NumberOfSides >= MaxNumVertices || Random.value > BranchProbability) return;
// Yes, add a new branch
transform.rotation = quaternion;
x = Random.value * 70f - 35f;
x += x > 0 ? 10f : -10f;
z = Random.value * 70f - 35f;
z += z > 0 ? 10f : -10f;
transform.Rotate(x, 0f, z);
Branch(transform.rotation, position, lastRingVertexIndex, radius, texCoordV);
}
// ---------------------------------------------------------------------------------------------------------------------------
// Try to get shared mesh for new prefab instances
// ---------------------------------------------------------------------------------------------------------------------------
#if UNITY_EDITOR
bool CanGetPrefabMesh()
{
// Return false if we are not instancing a new procedural tree prefab
if (PrefabUtility.GetPrefabType(this) != PrefabType.PrefabInstance) return false;
if (filter.sharedMesh != null) return true;
// Try to get mesh from an existing instance
var parentPrefab = PrefabUtility.GetPrefabParent(this);
var list = (ProceduralTree[])FindObjectsOfType(typeof(ProceduralTree));
foreach (var go in list)
if (go != this && PrefabUtility.GetPrefabParent(go) == parentPrefab)
{
filter.sharedMesh = go.filter.sharedMesh;
return true;
}
return false;
}
#endif
// ---------------------------------------------------------------------------------------------------------------------------
// Set tree shape, by computing a random offset for every ring vertex
// ---------------------------------------------------------------------------------------------------------------------------
private void SetTreeRingShape()
{
ringShape = new float[NumberOfSides + 1];
var k = (1f - BranchRoundness) * 0.5f;
// Randomize the vertex offsets, according to BranchRoundness
Random.InitState(Seed);
for (var n = 0; n < NumberOfSides; n++) ringShape[n] = 1f - (Random.value - 0.5f) * k;
ringShape[NumberOfSides] = ringShape[0];
}
// ---------------------------------------------------------------------------------------------------------------------------
// Update function will return, unless the tree parameters have changed
// ---------------------------------------------------------------------------------------------------------------------------
public void Update()
{
// Tree parameter checksum (add any new parameters here!)
var newChecksum = (Seed & 0xFFFF) + NumberOfSides + SegmentLength + BaseRadius + MaxNumVertices +
RadiusStep + MinimumRadius + Twisting + BranchProbability + BranchRoundness;
// Return (do nothing) unless tree parameters change
if (newChecksum != checksum || filter.sharedMesh == null)
{
checksumSerialized = checksum = newChecksum;
#if UNITY_EDITOR
if (!CanGetPrefabMesh())
#endif
GenerateTree(); // Update tree mesh
}
}
// ---------------------------------------------------------------------------------------------------------------------------
// Destroy procedural mesh when object is deleted
// ---------------------------------------------------------------------------------------------------------------------------
#if UNITY_EDITOR
void OnDisable()
{
if (filter.sharedMesh == null) return; // If tree has a mesh
if (PrefabUtility.GetPrefabType(this) == PrefabType.PrefabInstance) // If it's a prefab instance, look for siblings
{
var parentPrefab = PrefabUtility.GetPrefabParent(this);
var list = (ProceduralTree[])FindObjectsOfType(typeof(ProceduralTree));
foreach (var go in list)
if (go != this && PrefabUtility.GetPrefabParent(go) == parentPrefab)
return; // Return if there's another prefab instance still using the mesh
}
DestroyImmediate(filter.sharedMesh, true); // Delete procedural mesh
}
#endif
}
}
|
apache-2.0
|
ShootGame/Arcade2
|
src/main/java/pl/themolka/arcade/score/ScoreIncrementEvent.java
|
1839
|
/*
* Copyright 2018 Aleksander Jagiełło
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pl.themolka.arcade.score;
import pl.themolka.arcade.ArcadePlugin;
import pl.themolka.arcade.event.Cancelable;
import pl.themolka.arcade.game.Participator;
public class ScoreIncrementEvent extends ScoreEvent implements Cancelable {
private boolean cancel;
private Participator completer;
private double points;
public ScoreIncrementEvent(ArcadePlugin plugin, Score score, Participator completer, double points) {
super(plugin, score);
this.completer = completer;
this.points = points;
}
@Override
public boolean isCanceled() {
return this.cancel;
}
@Override
public void setCanceled(boolean cancel) {
this.cancel = cancel;
}
public Participator getCompleter() {
return this.completer;
}
public double getNewScore() {
return this.getScore().getScore() + this.getPoints();
}
public double getPoints() {
return this.points;
}
public boolean hasCompleter() {
return this.completer != null;
}
public void setCompleter(Participator completer) {
this.completer = completer;
}
public void setPoints(double points) {
this.points = points;
}
}
|
apache-2.0
|
gaplyk/go-swagger
|
examples/generated/cmd/petstore-server/main.go
|
1485
|
package main
import (
"log"
"os"
loads "github.com/go-openapi/loads"
flags "github.com/jessevdk/go-flags"
"github.com/go-swagger/go-swagger/examples/generated/restapi"
"github.com/go-swagger/go-swagger/examples/generated/restapi/operations"
)
// This file was generated by the swagger tool.
// Make sure not to overwrite this file after you generated it because all your edits would be lost!
func main() {
swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "")
if err != nil {
log.Fatalln(err)
}
api := operations.NewPetstoreAPI(swaggerSpec)
server := restapi.NewServer(api)
defer server.Shutdown()
parser := flags.NewParser(server, flags.Default)
parser.ShortDescription = "Swagger Petstore"
parser.LongDescription = "This is a sample server Petstore server.\n\n[Learn about Swagger](http://swagger.wordnik.com) or join the IRC channel '#swagger' on irc.freenode.net.\n\nFor this sample, you can use the api key 'special-key' to test the authorization filters\n"
server.ConfigureFlags()
for _, optsGroup := range api.CommandLineOptionsGroups {
_, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options)
if err != nil {
log.Fatalln(err)
}
}
if _, err := parser.Parse(); err != nil {
code := 1
if fe, ok := err.(*flags.Error); ok {
if fe.Type == flags.ErrHelp {
code = 0
}
}
os.Exit(code)
}
server.ConfigureAPI()
if err := server.Serve(); err != nil {
log.Fatalln(err)
}
}
|
apache-2.0
|
sportingsolutions/SS.Integration.Adapter
|
SS.Integration.Adapter.WindowsService/AdapterService.cs
|
9190
|
//Copyright 2014 Spin Services Limited
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
using System;
using System.Collections.Generic;
using System.ComponentModel.Composition;
using System.ComponentModel.Composition.Hosting;
using System.Configuration;
using System.Reflection;
using System.ServiceProcess;
using System.Threading.Tasks;
using Ninject.Modules;
using SS.Integration.Adapter.Interface;
using log4net;
using Ninject;
using SS.Integration.Adapter.Diagnostics;
using SS.Integration.Adapter.Diagnostics.Model;
using SS.Integration.Adapter.Model.Interfaces;
namespace SS.Integration.Adapter.WindowsService
{
public partial class AdapterService : ServiceBase
{
#region Fields
private readonly ILog _logger = LogManager.GetLogger(typeof(AdapterService).ToString());
private static Task _adapterWorkerThread;
private Adapter _adapter;
private StandardKernel _iocContainer;
private int _fatalExceptionsCounter = 0;
private bool _skipRestartOnFatalException;
#endregion
#region Properties
[Import]
public IAdapterPlugin PlatformConnector { get; set; }
[Import(AllowDefault = true)]
public IPluginBootstrapper<NinjectModule> PluginBootstrapper { get; set; }
#endregion
#region Constructors
public AdapterService()
{
InitializeComponent();
TaskScheduler.UnobservedTaskException += TaskSchedulerOnUnobservedTaskException;
AppDomain.CurrentDomain.UnhandledException += CurrentDomainOnUnhandledException;
Compose();
}
#endregion
#region Protected methods
protected override void OnStart(string[] args)
{
_adapterWorkerThread = Task.Factory.StartNew(InitialiseAdapter, TaskCreationOptions.LongRunning);
_adapterWorkerThread.ContinueWith(t =>
{
if (t.IsFaulted || t.Status == TaskStatus.Faulted)
{
_logger.FatalFormat("Problem starting adapter {0}", t.Exception);
}
});
}
protected override void OnStop()
{
_logger.Info("Requesting Adapter Stop");
SupervisorStartUp.Dispose();
_adapter.Stop();
_adapterWorkerThread.Wait();
_adapterWorkerThread.ContinueWith(task => { _logger.InfoFormat("Adapter successfully stopped"); Environment.Exit(0); });
}
#endregion
#region Private methods
private void InitialiseAdapter()
{
_logger.Info("Requesting Adapter Start");
if (PlatformConnector == null)
{
_logger.Fatal("Plugin could not be found. Ensure that plugin is copied in folder and restart the service");
return;
}
List<INinjectModule> modules = new List<INinjectModule>
{
new BootStrapper(PlatformConnector)
};
if (PluginBootstrapper != null)
{
_logger.InfoFormat("Plugin Bootstrapper found of type={0}", PluginBootstrapper.GetType().Name);
modules.AddRange(PluginBootstrapper.BootstrapModules);
}
_iocContainer = new StandardKernel(modules.ToArray());
var settings = _iocContainer.Get<ISettings>();
var service = _iocContainer.Get<IServiceFacade>();
var streamHealthCheckValidation = _iocContainer.Get<IStreamHealthCheckValidation>();
var fixtureValidation = _iocContainer.Get<IFixtureValidation>();
var stateManager = _iocContainer.Get<IStateManager>();
var stateProvider = _iocContainer.Get<IStateProvider>();
var suspensionManager = _iocContainer.Get<ISuspensionManager>();
_iocContainer.Settings.InjectNonPublic = true;
//needed for Plugin properties since plugin is not instantiated by Ninject
_iocContainer.Inject(PlatformConnector);
_adapter =
new Adapter(
settings,
service,
PlatformConnector,
stateManager,
stateProvider,
suspensionManager,
streamHealthCheckValidation,
fixtureValidation);
_adapter.Start();
InitializeSupervisor();
_logger.Info("Adapter has started");
}
private void InitializeSupervisor()
{
var settings = _iocContainer.Get<ISettings>();
var objectProvider = _iocContainer.Get<IObjectProvider<Dictionary<string, FixtureOverview>>>();
if (settings.UseSupervisor)
{
SupervisorStartUp.Initialize(objectProvider);
}
}
private void DisposeSupervisor()
{
SupervisorStartUp.Dispose();
}
private void RestartAdapter()
{
if (_skipRestartOnFatalException)
return;
_fatalExceptionsCounter++;
DisposeSupervisor();
_adapterWorkerThread.Wait();
_adapter.Stop();
int maxFailures = GetMaxFailures();
//0 means no limit
maxFailures = maxFailures > 0 ? maxFailures : int.MaxValue;
if (maxFailures > _fatalExceptionsCounter)
{
_adapter.Start();
InitializeSupervisor();
}
else
{
_logger.WarnFormat("Adapter registered {0} FATAL/Unhandled exceptions and will stop the service now", GetMaxFailures());
}
}
private int GetMaxFailures()
{
int maxFailures = 0;
int.TryParse(ConfigurationManager.AppSettings["maxUnhandledExceptions"], out maxFailures);
_skipRestartOnFatalException = !bool.Parse(ConfigurationManager.AppSettings["skipRestartOnFatalException"]) ;
return maxFailures;
}
private void Compose()
{
_logger.Info("Adapter Service is looking for a plugin");
CompositionContainer container = null;
try
{
string codebase = AppDomain.CurrentDomain.BaseDirectory;
var pluginAssembly = ConfigurationManager.AppSettings["pluginAssembly"];
var catalog = new SafeDirectoryCatalog(codebase, pluginAssembly);
container = new CompositionContainer(catalog);
container.ComposeParts(this);
}
catch (CompositionException ex)
{
foreach (var error in ex.Errors)
{
_logger.Fatal("Error when loading plugin", error.Exception);
}
}
catch (ReflectionTypeLoadException ex)
{
foreach (var error in ex.LoaderExceptions)
{
_logger.Fatal("Error when searching for plugin", error);
}
}
catch (Exception ex)
{
_logger.Fatal("Error when loading plugin", ex);
}
finally
{
if (container != null)
{
container.Dispose();
}
}
}
private void CurrentDomainOnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
_logger.FatalFormat("Adapter termination in progress={1} caused by UNHANDLED Exception {0}", (Exception)e.ExceptionObject, e.IsTerminating);
if(e.IsTerminating)
OnStop();
else
RestartAdapter();
}
private void TaskSchedulerOnUnobservedTaskException(object sender, UnobservedTaskExceptionEventArgs unobservedTaskExceptionEventArgs)
{
unobservedTaskExceptionEventArgs.SetObserved();
if (unobservedTaskExceptionEventArgs.Exception is AggregateException)
{
foreach (var exception in unobservedTaskExceptionEventArgs.Exception.Flatten().InnerExceptions)
{
_logger.Fatal("Adapter received unobserved exception from TaskScheduler: ", exception);
}
}
else
{
_logger.Fatal("Adapter received unobserved exception from TaskScheduler: ", unobservedTaskExceptionEventArgs.Exception);
}
RestartAdapter();
}
#endregion
}
}
|
apache-2.0
|
mohanaraosv/commons-vfs
|
core/src/test/java/org/apache/commons/vfs2/test/ProviderRandomReadTests.java
|
3177
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.vfs2.test;
import org.apache.commons.vfs2.Capability;
import org.apache.commons.vfs2.FileObject;
import org.apache.commons.vfs2.RandomAccessContent;
import org.apache.commons.vfs2.util.RandomAccessMode;
/**
* Random read-only test case for file providers.
*
* @version $Id$
*/
public class ProviderRandomReadTests
extends AbstractProviderTestCase
{
private static final String TEST_DATA = "This is a test file.";
/**
* Returns the capabilities required by the tests of this test case.
*/
@Override
protected Capability[] getRequiredCaps()
{
return new Capability[]
{
Capability.GET_TYPE,
Capability.RANDOM_ACCESS_READ
};
}
/**
* Read a file
*/
public void testRandomRead() throws Exception
{
FileObject file = null;
try
{
file = getReadFolder().resolveFile("file1.txt");
final RandomAccessContent ra = file.getContent().getRandomAccessContent(RandomAccessMode.READ);
// read first byte
byte c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(0));
assertEquals("fp", ra.getFilePointer(), 1);
// start at pos 4
ra.seek(3);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(3));
assertEquals("fp", ra.getFilePointer(), 4);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(4));
assertEquals("fp", ra.getFilePointer(), 5);
// restart at pos 4
ra.seek(3);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(3));
assertEquals("fp", ra.getFilePointer(), 4);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(4));
assertEquals("fp", ra.getFilePointer(), 5);
// advance to pos 11
ra.seek(10);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(10));
assertEquals("fp", ra.getFilePointer(), 11);
c = ra.readByte();
assertEquals(c, TEST_DATA.charAt(11));
assertEquals("fp", ra.getFilePointer(), 12);
}
finally
{
if (file != null)
{
file.close();
}
}
}
}
|
apache-2.0
|
googleapis/google-api-java-client
|
google-api-client/src/main/java/com/google/api/client/googleapis/testing/compute/MockMetadataServerTransport.java
|
4143
|
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.api.client.googleapis.testing.compute;
import com.google.api.client.googleapis.auth.oauth2.OAuth2Utils;
import com.google.api.client.http.LowLevelHttpRequest;
import com.google.api.client.http.LowLevelHttpResponse;
import com.google.api.client.json.GenericJson;
import com.google.api.client.json.Json;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.gson.GsonFactory;
import com.google.api.client.testing.http.MockHttpTransport;
import com.google.api.client.testing.http.MockLowLevelHttpRequest;
import com.google.api.client.testing.http.MockLowLevelHttpResponse;
import com.google.api.client.util.Beta;
import java.io.IOException;
/**
* {@link Beta} <br>
* Transport that simulates the GCE metadata server for access tokens.
*
* @since 1.19
*/
@Beta
public class MockMetadataServerTransport extends MockHttpTransport {
private static final String METADATA_SERVER_URL = OAuth2Utils.getMetadataServerUrl();
private static final String METADATA_TOKEN_SERVER_URL =
METADATA_SERVER_URL + "/computeMetadata/v1/instance/service-accounts/default/token";
static final JsonFactory JSON_FACTORY = new GsonFactory();
String accessToken;
Integer tokenRequestStatusCode;
public MockMetadataServerTransport(String accessToken) {
this.accessToken = accessToken;
}
public void setTokenRequestStatusCode(Integer tokenRequestStatusCode) {
this.tokenRequestStatusCode = tokenRequestStatusCode;
}
@Override
public LowLevelHttpRequest buildRequest(String method, String url) throws IOException {
if (url.equals(METADATA_TOKEN_SERVER_URL)) {
MockLowLevelHttpRequest request =
new MockLowLevelHttpRequest(url) {
@Override
public LowLevelHttpResponse execute() throws IOException {
if (tokenRequestStatusCode != null) {
MockLowLevelHttpResponse response =
new MockLowLevelHttpResponse()
.setStatusCode(tokenRequestStatusCode)
.setContent("Token Fetch Error");
return response;
}
String metadataRequestHeader = getFirstHeaderValue("Metadata-Flavor");
if (!"Google".equals(metadataRequestHeader)) {
throw new IOException("Metadata request header not found.");
}
// Create the JSon response
GenericJson refreshContents = new GenericJson();
refreshContents.setFactory(JSON_FACTORY);
refreshContents.put("access_token", accessToken);
refreshContents.put("expires_in", 3600000);
refreshContents.put("token_type", "Bearer");
String refreshText = refreshContents.toPrettyString();
MockLowLevelHttpResponse response =
new MockLowLevelHttpResponse()
.setContentType(Json.MEDIA_TYPE)
.setContent(refreshText);
return response;
}
};
return request;
} else if (url.equals(METADATA_SERVER_URL)) {
MockLowLevelHttpRequest request =
new MockLowLevelHttpRequest(url) {
@Override
public LowLevelHttpResponse execute() {
MockLowLevelHttpResponse response = new MockLowLevelHttpResponse();
response.addHeader("Metadata-Flavor", "Google");
return response;
}
};
return request;
}
return super.buildRequest(method, url);
}
}
|
apache-2.0
|
windchopper/common
|
common-preferences/src/main/java/com/github/windchopper/common/preferences/PreferencesEntryFlatContainerType.java
|
1231
|
package com.github.windchopper.common.preferences;
import java.util.HashMap;
import java.util.Map;
import static java.util.function.Predicate.not;
public abstract class PreferencesEntryFlatContainerType<T, C> extends PreferencesEntryType<C, Map<String, T>> {
private final PreferencesEntryFlatType<T> valueType;
public PreferencesEntryFlatContainerType(PreferencesEntryFlatType<T> valueType) {
this.valueType = valueType;
}
@Override protected Map<String, T> loadValue(PreferencesStorage storage, String name) throws Throwable {
var storageValue = new HashMap<String, T>();
Iterable<String> elementNames = storage.valueNames().stream()
.filter(not("timestamp"::equals))::iterator;
for (var elementName : elementNames) {
storageValue.put(elementName, valueType.decode(valueType.loadValue(storage, elementName)));
}
return storageValue;
}
@Override protected void saveValue(PreferencesStorage storage, String name, Map<String, T> storageValue) throws Throwable {
for (var entry : storageValue.entrySet()) {
valueType.saveValue(storage, entry.getKey(), valueType.encode(entry.getValue()));
}
}
}
|
apache-2.0
|
hynguyen2610/OlympicGym
|
GymFitnessOlympic/Models/entity/PhieuThu.cs
|
1858
|
using System;
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
using System.Linq;
using System.Text;
namespace GymFitnessOlympic.Models
{
public class PhieuThu
{
public int MaPhieuThu { get; set; }
public DateTime NgayLap { get; set; }
public int SoTien { get; set; }
public NhanVien NhanVien { get; set; }
public HoiVien HoiVien { get; set; }
public GoiTap GoiTap { get; set; }
public string LyDo { get; set; }
public string TenGiamGia { get; set; }
// public int TienGoi;
public int PhanTramGiam{get;set;}
public int TienSauGiam {
get {
return SoTien - SoTien * PhanTramGiam / 100;
}
}
public string TenLoai {
get {
return GoiTap.LoaiVe;
}
}
public string MaTheHoiVien {
get {
return HoiVien != null ? HoiVien.MaThe : "Không có";
}
}
public string TenHoiVien
{
get
{
return HoiVien != null ? HoiVien.TenHoiVien : "Khách lẻ";
}
}
public string TenPhong
{
get
{
return NhanVien.PhongTap.TenPhongTap;
}
}
//public int TienPhaiTraDangKy {
// get {
// return GiamGia != null ? GoiTap.Gia - GoiTap.Gia * GiamGia.PhanTramGiam / 100 : GoiTap.Gia;
// }
//}
public string TenGoi
{
get {
return GoiTap.TenGoiTap;
}
}
//public in SoTienGoi
//{
// get
// {
// return GoiTap.Gia.ToString();
// }
//}
}
}
|
apache-2.0
|
etechi/ServiceFramework
|
Projects/Server/Common/SF.Common.Implements/Common/TextMessages/Management/MsgRecordManager.cs
|
3593
|
#region Apache License Version 2.0
/*----------------------------------------------------------------
Copyright 2017 Yang Chen (cy2000@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
Detail: https://github.com/etechi/ServiceFramework/blob/master/license.md
----------------------------------------------------------------*/
#endregion Apache License Version 2.0
using SF.Core;
using SF.Core.ServiceManagement;
using SF.Core.Times;
using SF.Data;
using SF.Entities;
using System;
using System.Linq;
using System.Threading.Tasks;
namespace SF.Common.TextMessages.Management
{
public class EntityMsgRecordManager :
QuerableEntitySource<ObjectKey<long>,MsgRecord,MsgRecordQueryArgument,DataModels.TextMessageRecord>,
IMsgRecordManager,
ITextMessageLogger
{
public EntityMsgRecordManager(IEntityServiceContext ServiceContext) : base(ServiceContext)
{
}
protected override PagingQueryBuilder<DataModels.TextMessageRecord> PagingQueryBuilder =>
PagingQueryBuilder<DataModels.TextMessageRecord>.Simple("time", b => b.Time, true);
protected override IContextQueryable<MsgRecord> OnMapModelToDetail(IContextQueryable<DataModels.TextMessageRecord> Query)
{
return Query.SelectEventEntity(m => new MsgRecord
{
Id = m.Id,
Args=m.Args,
Body=m.Body,
CompletedTime=m.CompletedTime,
Error=m.Error,
Headers=m.Headers,
Sender=m.Sender,
ServiceId=m.ServiceId,
Status=m.Status,
Target=m.Target,
Title=m.Title,
Result=m.Result,
TrackEntityId=m.TrackEntityId
});
}
protected override IContextQueryable<DataModels.TextMessageRecord> OnBuildQuery(IContextQueryable<DataModels.TextMessageRecord> Query, MsgRecordQueryArgument Arg, Paging paging)
{
var scopeid = ServiceInstanceDescriptor.ParentInstanceId;
var q = Query.Where(m=>m.ScopeId==scopeid)
.Filter(Arg.Id, r => r.Id)
.Filter(Arg.ServiceId,r=>r.ServiceId)
.Filter(Arg.Target,r=>r.Target)
.Filter(Arg.TargeUserId, r => r.UserId)
.Filter(Arg.Time, r => r.Time)
;
return q;
}
public async Task<long> BeginSend(long ServiceId, string Target, long? TargetUserId,Message message)
{
var re=DataSet.Add(new DataModels.TextMessageRecord
{
Id = await IdentGenerator.GenerateAsync<DataModels.TextMessageRecord>(),
Args = Json.Stringify(message.Arguments),
Body = message.Body,
Headers = Json.Stringify(message.Headers),
ScopeId = ServiceInstanceDescriptor.ParentInstanceId,
Sender = message.Sender,
ServiceId = ServiceId,
Status = SendStatus.Sending,
Target = Target,
UserId = TargetUserId,
Time = Now,
TrackEntityId = message.TrackEntityId
});
await DataSet.Context.SaveChangesAsync();
return re.Id;
}
public async Task EndSend(long MessageId, string ExtIdent, string Error)
{
var r = await DataSet.FindAsync(MessageId);
if (r == null)
return;
r.Error = Error;
r.Result = ExtIdent;
r.CompletedTime = Now;
r.Status = Error == null ? SendStatus.Completed : SendStatus.Failed;
DataSet.Update(r);
await DataSet.Context.SaveChangesAsync();
}
}
}
|
apache-2.0
|
TANGKUO/beautifulDay
|
src/main/java/com/tk/cn/thread/myThread.java
|
289
|
package com.tk.cn.thread;
public class myThread extends Thread{
public void run(){
for(int i=0;i<10;i++){
System.out.println("Thread"+i);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException();
}
}
}
}
|
apache-2.0
|
karlmdavis/jessentials
|
jessentials-misc/src/test/java/com/justdavis/karl/misc/datasources/provisioners/postgresql/PostgreSqlProvisionerIT.java
|
4829
|
package com.justdavis.karl.misc.datasources.provisioners.postgresql;
import java.net.URL;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.junit.Assert;
import org.junit.Test;
import com.justdavis.karl.misc.datasources.postgresql.PostgreSqlConnector;
import com.justdavis.karl.misc.datasources.postgresql.PostgreSqlCoordinates;
import com.justdavis.karl.misc.datasources.provisioners.DataSourceProvisionersManager;
import com.justdavis.karl.misc.datasources.provisioners.IProvisioningTargetsProvider;
import com.justdavis.karl.misc.datasources.provisioners.XmlProvisioningTargetsProvider;
/**
* Integration tests for {@link PostgreSqlProvisioner}.
*/
public final class PostgreSqlProvisionerIT {
/**
* Verifies that
* {@link PostgreSqlProvisioner#provision(PostgreSqlProvisioningTarget, PostgreSqlProvisioningRequest)}
* works as expected.
*
* @throws SQLException
* (would indicate a problem with the code under test)
*/
@Test
public void provision() throws SQLException {
// Find the available provisioning target.
@SuppressWarnings("unchecked")
DataSourceProvisionersManager provisionersManager = new DataSourceProvisionersManager(
new PostgreSqlProvisioner());
URL availableTargetsUrl = Thread.currentThread().getContextClassLoader()
.getResource("datasource-provisioning-targets.xml");
IProvisioningTargetsProvider targetsProvider = new XmlProvisioningTargetsProvider(provisionersManager,
availableTargetsUrl);
PostgreSqlProvisioningTarget target = targetsProvider.findTarget(PostgreSqlProvisioningTarget.class);
// Create and run a provisioning request.
PostgreSqlProvisioner provisioner = new PostgreSqlProvisioner();
PostgreSqlProvisioningRequest request = new PostgreSqlProvisioningRequest("integrationtest");
try {
PostgreSqlCoordinates provisionedCoords = provisioner.provision(target, request);
Assert.assertNotNull(provisionedCoords);
// Create a DataSource
PostgreSqlConnector connector = new PostgreSqlConnector();
DataSource postgreSqlDataSource = connector.createDataSource(provisionedCoords);
Assert.assertNotNull(postgreSqlDataSource);
/*
* Create and test a Connection. The query here is taken from
* http://stackoverflow.com/a/3670000/1851299.
*/
Connection hsqlConnection = null;
try {
hsqlConnection = postgreSqlDataSource.getConnection();
Assert.assertNotNull(hsqlConnection);
PreparedStatement statement = hsqlConnection.prepareStatement("SELECT 1");
ResultSet resultSet = statement.executeQuery();
Assert.assertTrue(resultSet.next());
Assert.assertEquals(1, resultSet.getInt(1));
} finally {
if (hsqlConnection != null)
hsqlConnection.close();
}
} finally {
provisioner.delete(target, request);
}
}
/**
* Verifies that
* {@link PostgreSqlProvisioner#delete(PostgreSqlProvisioningTarget, PostgreSqlProvisioningRequest)}
* works as expected.
*
* @throws SQLException
* (would indicate a problem with the code under test)
*/
@Test
public void delete() throws SQLException {
// Find the available provisioning target.
@SuppressWarnings("unchecked")
DataSourceProvisionersManager provisionersManager = new DataSourceProvisionersManager(
new PostgreSqlProvisioner());
URL availableTargetsUrl = Thread.currentThread().getContextClassLoader()
.getResource("datasource-provisioning-targets.xml");
IProvisioningTargetsProvider targetsProvider = new XmlProvisioningTargetsProvider(provisionersManager,
availableTargetsUrl);
PostgreSqlProvisioningTarget target = targetsProvider.findTarget(PostgreSqlProvisioningTarget.class);
// Create and run a provisioning request.
PostgreSqlProvisioner provisioner = new PostgreSqlProvisioner();
PostgreSqlProvisioningRequest request = new PostgreSqlProvisioningRequest("IntegrationTest");
provisioner.provision(target, request);
// Delete the provisioned database.
provisioner.delete(target, request);
/*
* Run a query on the server to see if the database still exists.
*/
PostgreSqlConnector connector = new PostgreSqlConnector();
DataSource postgreSqlDataSource = connector.createDataSource(target.getServerCoords());
Connection postgreSqlConnection = null;
try {
postgreSqlConnection = postgreSqlDataSource.getConnection();
Assert.assertNotNull(postgreSqlConnection);
PreparedStatement statement = postgreSqlConnection
.prepareStatement("SELECT datname" + " FROM pg_database" + " WHERE datname = 'IntegrationTest';");
ResultSet dbsResult = statement.executeQuery();
Assert.assertFalse(dbsResult.next());
} finally {
if (postgreSqlConnection != null)
postgreSqlConnection.close();
}
}
}
|
apache-2.0
|
TNG/ArchUnit
|
archunit-example/example-junit4/src/test/java/com/tngtech/archunit/exampletest/junit4/ControllerRulesTest.java
|
2921
|
package com.tngtech.archunit.exampletest.junit4;
import com.tngtech.archunit.base.DescribedPredicate;
import com.tngtech.archunit.base.PackageMatchers;
import com.tngtech.archunit.core.domain.JavaClass;
import com.tngtech.archunit.core.domain.JavaMember;
import com.tngtech.archunit.example.layers.security.Secured;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchUnitRunner;
import com.tngtech.archunit.lang.ArchRule;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import static com.tngtech.archunit.core.domain.JavaClass.Functions.GET_PACKAGE_NAME;
import static com.tngtech.archunit.core.domain.JavaMember.Predicates.declaredIn;
import static com.tngtech.archunit.core.domain.properties.CanBeAnnotated.Predicates.annotatedWith;
import static com.tngtech.archunit.lang.conditions.ArchPredicates.are;
import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.classes;
@Category(Example.class)
@RunWith(ArchUnitRunner.class)
@AnalyzeClasses(packages = "com.tngtech.archunit.example.layers")
public class ControllerRulesTest {
@ArchTest
static final ArchRule controllers_should_only_call_secured_methods =
classes().that().resideInAPackage("..controller..")
.should().onlyCallMethodsThat(areDeclaredInController().or(are(annotatedWith(Secured.class))));
@ArchTest
static final ArchRule controllers_should_only_call_secured_constructors =
classes()
.that().resideInAPackage("..controller..")
.should().onlyCallConstructorsThat(areDeclaredInController().or(are(annotatedWith(Secured.class))));
@ArchTest
static final ArchRule controllers_should_only_call_secured_code_units =
classes()
.that().resideInAPackage("..controller..")
.should().onlyCallCodeUnitsThat(areDeclaredInController().or(are(annotatedWith(Secured.class))));
@ArchTest
static final ArchRule controllers_should_only_access_secured_fields =
classes()
.that().resideInAPackage("..controller..")
.should().onlyAccessFieldsThat(areDeclaredInController().or(are(annotatedWith(Secured.class))));
@ArchTest
static final ArchRule controllers_should_only_access_secured_members =
classes()
.that().resideInAPackage("..controller..")
.should().onlyAccessMembersThat(areDeclaredInController().or(are(annotatedWith(Secured.class))));
private static DescribedPredicate<JavaMember> areDeclaredInController() {
DescribedPredicate<JavaClass> aPackageController = GET_PACKAGE_NAME.is(PackageMatchers.of("..controller..", "java.."))
.as("a package '..controller..'");
return are(declaredIn(aPackageController));
}
}
|
apache-2.0
|
DG-i/openshift-ansible
|
roles/lib_openshift/library/oc_configmap.py
|
55697
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/configmap -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_configmap
short_description: Modify, and idempotently manage openshift configmaps
description:
- Modify openshift configmaps programmatically.
options:
state:
description:
- Supported states, present, absent, list
- present - will ensure object is created or updated to the value specified
- list - will return a configmap
- absent - will remove the configmap
required: False
default: present
choices: ["present", 'absent', 'list']
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: True
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
from_file:
description:
- A dict of key, value pairs representing the configmap key and the value represents the file path.
required: false
default: None
aliases: []
from_literal:
description:
- A dict of key, value pairs representing the configmap key and the value represents the string content
required: false
default: None
aliases: []
author:
- "kenny woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create group
oc_configmap:
state: present
name: testmap
from_file:
secret: /path/to/secret
from_literal:
title: systemadmin
register: configout
'''
# -*- -*- -*- End included fragment: doc/configmap -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_configmap.py -*- -*- -*-
# pylint: disable=too-many-arguments
class OCConfigMap(OpenShiftCLI):
''' Openshift ConfigMap Class
ConfigMaps are a way to store data inside of objects
'''
def __init__(self,
name,
from_file,
from_literal,
state,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.state = state
self._configmap = None
self._inc_configmap = None
self.from_file = from_file if from_file is not None else {}
self.from_literal = from_literal if from_literal is not None else {}
@property
def configmap(self):
if self._configmap is None:
self._configmap = self.get()
return self._configmap
@configmap.setter
def configmap(self, inc_map):
self._configmap = inc_map
@property
def inc_configmap(self):
if self._inc_configmap is None:
results = self.create(dryrun=True, output=True)
self._inc_configmap = results['results']
return self._inc_configmap
@inc_configmap.setter
def inc_configmap(self, inc_map):
self._inc_configmap = inc_map
def from_file_to_params(self):
'''return from_files in a string ready for cli'''
return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
def from_literal_to_params(self):
'''return from_literal in a string ready for cli'''
return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
def get(self):
'''return a configmap by name '''
results = self._get('configmap', self.name)
if results['returncode'] == 0 and results['results'][0]:
self.configmap = results['results'][0]
if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a configmap by name'''
return self._delete('configmap', self.name)
def create(self, dryrun=False, output=False):
'''Create a configmap
:dryrun: Product what you would have done. default: False
:output: Whether to parse output. default: False
'''
cmd = ['create', 'configmap', self.name]
if self.from_literal is not None:
cmd.extend(self.from_literal_to_params())
if self.from_file is not None:
cmd.extend(self.from_file_to_params())
if dryrun:
cmd.extend(['--dry-run', '-ojson'])
results = self.openshift_cmd(cmd, output=output)
return results
def update(self):
'''run update configmap '''
return self._replace_content('configmap', self.name, self.inc_configmap)
def needs_update(self):
'''compare the current configmap with the proposed and return if they are equal'''
return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
oc_cm = OCConfigMap(params['name'],
params['from_file'],
params['from_literal'],
params['state'],
params['namespace'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_cm.get()
if 'failed' in api_rval:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_cm.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Create
########
if state == 'present':
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
api_rval = oc_cm.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_cm.needs_update():
api_rval = oc_cm.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
# -*- -*- -*- End included fragment: class/oc_configmap.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_configmap.py -*- -*- -*-
def main():
'''
ansible oc module for managing OpenShift configmap objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
from_file=dict(default=None, type='dict'),
from_literal=dict(default=None, type='dict'),
),
supports_check_mode=True,
)
rval = OCConfigMap.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_configmap.py -*- -*- -*-
|
apache-2.0
|
ValeriyaSyomina/DigitSignature
|
src/DigitSignProject/Comparator.cs
|
913
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace DigitSignProject
{
public class Comparator
{
/// <summary>
/// Сравнивает 2 байтовых массива поэлементно
/// </summary>
/// <param name="firstArray">Первый массив</param>
/// <param name="secondArray">Второй массив</param>
/// <returns>1 - массивы одинаковы, 0 - иначе</returns>
static public bool CompareByteArrays(byte[] firstArray, byte[] secondArray)
{
if (firstArray.Length != secondArray.Length)
return false;
for (int i = 0; i < firstArray.Length; i++)
if (firstArray[i] != secondArray[i])
return false;
return true;
}
}
}
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.