repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
deeplearning4j/deeplearning4j
nd4j/nd4j-parameter-server-parent/nd4j-parameter-server-node/src/main/java/org/nd4j/parameterserver/distributed/enums/TransportType.java
1273
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ package org.nd4j.parameterserver.distributed.enums; /** * TransportType enum describes different Transport usable for ParameterServers implementation * * @author raver119@gmail.com */ public enum TransportType { /** * This is default Transport implementation, suitable for network environments without UDP Broadcast support */ ROUTED_UDP, /** * This option means you'll provide own Transport interface implementation via VoidParameterServer.init() method */ CUSTOM, }
apache-2.0
Azure/azure-mobile-apps-js-client
sdk/test/tests/target/cordova/purge.tests.js
9178
// ---------------------------------------------------------------------------- // Copyright (c) Microsoft Corporation. All rights reserved. // ---------------------------------------------------------------------------- /** * @file unit tests for the 'purge' module * */ var Platform = require('../../../../src/Platform'), Query = require('azure-query-js').Query, createPurgeManager = require('../../../../src/sync/purge').createPurgeManager, tableConstants = require('../../../../src/constants').table, MobileServiceClient = require('../../../../src/MobileServiceClient'), storeTestHelper = require('./storeTestHelper'), testHelper = require('../../shared/testHelper'), runner = require('../../../../src/Utilities/taskRunner'), createOperationTableManager = require('../../../../src/sync/operations').createOperationTableManager, MobileServiceSqliteStore = require('../../../../src/Platform/cordova/MobileServiceSqliteStore'), store, client, syncContext, tableName = storeTestHelper.testTableName, purgeManager; $testGroup('purge tests') // Clear the store before running each test. .beforeEachAsync(function() { return storeTestHelper.createEmptyStore().then(function(emptyStore) { store = emptyStore; purgeManager = createPurgeManager(store, runner()); client = new MobileServiceClient('http://someurl'); syncContext = client.getSyncContext(); return store.defineTable({ name: tableName, columnDefinitions: { id: 'string', text: 'text' } }).then(function() { return client.getSyncContext().initialize(store); }); }); }).tests( $test('Vanilla purge - purge query matching entire table') .checkAsync(function () { var record1 = {id: '1', text: 'a'}, record2 = {id: '2', text: 'b'}, records = [record1, record2], tableQuery = new Query(tableName), purgeQuery = tableQuery; var actions = [ // Add record and incremental state [store, store.upsert, tableName, records], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery], // Verify purge verifyIncrementalSyncStateIsRemoved, [store, store.read, tableQuery], function(result) { $assert.areEqual(result, []); } ]; return testHelper.runActions(actions); }), $test('Vanilla purge - purge query matching entire table where table has too many records') .checkAsync(function () { var records = [], tableQuery = new Query(tableName), purgeQuery = tableQuery; for (var i = 0; i < 3000; i++) { records.push({id: 'id'+i, text: 'sometext'}); } var actions = [ // Add record and incremental state [store, store.upsert, tableName, records], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery], // Verify purge verifyIncrementalSyncStateIsRemoved, [store, store.read, tableQuery], function(result) { $assert.areEqual(result, []); } ]; return testHelper.runActions(actions); }), $test('Vanilla purge - purge query not matching all records') .checkAsync(function () { var record1 = {id: '1', text: 'a'}, record2 = {id: '2', text: 'b'}, records = [record1, record2], tableQuery = new Query(tableName), purgeQuery = new Query(tableName).where(function() { return this.id === '1'; }); var actions = [ // Add record and incremental state [store, store.upsert, tableName, records], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery], // Verify purge verifyIncrementalSyncStateIsRemoved, [store, store.read, tableQuery], function(result) { $assert.areEqual(result, [record2]); } ]; return testHelper.runActions(actions); }), $test('Vanilla purge - purge query matching no record') .checkAsync(function () { var record1 = {id: '1', text: 'a'}, record2 = {id: '2', text: 'b'}, records = [record1, record2], tableQuery = new Query(tableName), purgeQuery = new Query(tableName).where(function() { return this.id === 'non existent id'; }); var actions = [ // Add record and incremental state [store, store.upsert, tableName, records], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery], // Verify purge verifyIncrementalSyncStateIsRemoved, [store, store.read, tableQuery], function(result) { $assert.areEqual(result, records); } ]; return testHelper.runActions(actions); }), $test('Vanilla purge - pending operations in the operation table') .checkAsync(function () { var record = {id: '1', text: 'a'}, tableQuery = new Query(tableName), purgeQuery = tableQuery; var actions = [ // Add record, pending operation and incremental state [syncContext, syncContext.insert, tableName, record], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery], // Verify purge { fail: function(error) { // failure expected - pending operations in the queue } }, // purge shouldn't have remove the table data [store, store.read, tableQuery], function(result) { $assert.areEqual(result, [record]); }, // purge shouldn't have removed the pending operation [store, store.read, new Query(tableConstants.operationTableName)], function(result) { $assert.areEqual(result.length, 1); }, // purge shouldn't have removed the incremental sync state [store, store.read, new Query(tableConstants.pulltimeTableName)], function(result) { var incrementalSyncReset = true; result.forEach(function(record) { if (record.tableName === tableName) { incrementalSyncReset = false; } }); $assert.isFalse(incrementalSyncReset); } ]; return testHelper.runActions(actions); }), $test('Force purge - pending operations in the operation table') .checkAsync(function () { var record = {id: '1', text: 'a'}, tableQuery = new Query(tableName), purgeQuery = tableQuery; var actions = [ // Add record, pending operation and incremental state [syncContext, syncContext.insert, tableName, record], addIncrementalSyncState, // Perform purge [purgeManager, purgeManager.purge, purgeQuery, true /* force purge */], // Verify purge verifyIncrementalSyncStateIsRemoved, verifyPendingOperationsAreRemoved, [store, store.read, tableQuery], function(result) { $assert.areEqual(result, []); } ]; return testHelper.runActions(actions); }) ); function addIncrementalSyncState() { return store.upsert(tableConstants.pulltimeTableName, [ { id: '1', tableName: tableName, value: new Date() }, { id: '2', tableName: 'someothertablename', value: new Date() } ]).then(function() { // no action needed }, function(error) { $assert.fail(error); }); } function verifyIncrementalSyncStateIsRemoved() { return store.read(new Query(tableConstants.pulltimeTableName)).then(function(result) { result.forEach(function(record) { if (record.tableName === tableName) { $assert.fail('incremental sync state not reset'); } }); }, function(error) { $assert.fail(error); }); } function verifyPendingOperationsAreRemoved() { var query = new Query(tableConstants.operationTableName).where(function(tableName) { return this.tableName === tableName; }, tableName); return store.read(query).then(function(result) { $assert.areEqual(result, []); }, function(error) { $assert.fail(error); }); }
apache-2.0
tenggyut/HIndex
hbase-secondaryindex/src/main/java/org/apache/hadoop/hbase/index/client/RangeExpression.java
2836
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.index.client; import java.io.Serializable; import org.apache.hadoop.hbase.index.Column; /** * Can be used to specify a range condition on a column associated with an index. When the range is * non closed at one end (to specific upper bound but only lower bound) pass the corresponding bound * value as null. */ public class RangeExpression implements Serializable { private static final long serialVersionUID = 8772267632040419734L; private Column column; private byte[] lowerBoundValue; private byte[] upperBoundValue; private boolean lowerBoundInclusive; private boolean upperBoundInclusive; public Column getColumn() { return column; } public byte[] getLowerBoundValue() { return lowerBoundValue; } public byte[] getUpperBoundValue() { return upperBoundValue; } public boolean isLowerBoundInclusive() { return lowerBoundInclusive; } public boolean isUpperBoundInclusive() { return upperBoundInclusive; } /** * When the range is non closed at one end (to specific upper bound but only lower bound) pass the * corresponding bound value as null. * @param column * @param lowerBoundValue * @param upperBoundValue * @param lowerBoundInclusive * @param upperBoundInclusive */ public RangeExpression(Column column, byte[] lowerBoundValue, byte[] upperBoundValue, boolean lowerBoundInclusive, boolean upperBoundInclusive) { if (column == null || (lowerBoundValue == null && upperBoundValue == null)) { throw new IllegalArgumentException(); } this.column = column; this.lowerBoundValue = lowerBoundValue; this.upperBoundValue = upperBoundValue; this.lowerBoundInclusive = lowerBoundInclusive; this.upperBoundInclusive = upperBoundInclusive; } @Override public String toString() { return "RangeExpression : Column[" + this.column + "], lowerBoundInclusive : " + this.lowerBoundInclusive + ", upperBoundInclusive : " + this.upperBoundInclusive; } }
apache-2.0
roaet/python-neutronclient
neutronclient/neutron/v2_0/lb/v2/member.py
4933
# Copyright 2013 Mirantis Inc. # Copyright 2014 Blue Box Group, Inc. # Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved # # Author: Ilya Shakhat, Mirantis Inc. # Author: Craig Tracey <craigtracey@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.i18n import _ from neutronclient.neutron import v2_0 as neutronV20 def _get_pool_id(client, pool_id_or_name): return neutronV20.find_resourceid_by_name_or_id(client, 'pool', pool_id_or_name, cmd_resource='lbaas_pool') class LbaasMemberMixin(object): def set_extra_attrs(self, parsed_args): self.parent_id = _get_pool_id(self.get_client(), parsed_args.pool) def add_known_arguments(self, parser): parser.add_argument( 'pool', metavar='POOL', help=_('ID or name of the pool that this member belongs to.')) class ListMember(LbaasMemberMixin, neutronV20.ListCommand): """LBaaS v2 List members that belong to a given tenant.""" resource = 'member' shadow_resource = 'lbaas_member' list_columns = [ 'id', 'address', 'protocol_port', 'weight', 'subnet_id', 'admin_state_up', 'status' ] pagination_support = True sorting_support = True class ShowMember(LbaasMemberMixin, neutronV20.ShowCommand): """LBaaS v2 Show information of a given member.""" resource = 'member' shadow_resource = 'lbaas_member' class CreateMember(neutronV20.CreateCommand): """LBaaS v2 Create a member.""" resource = 'member' shadow_resource = 'lbaas_member' def add_known_arguments(self, parser): parser.add_argument( '--admin-state-down', dest='admin_state', action='store_false', help=_('Set admin state up to false')) parser.add_argument( '--weight', help=_('Weight of member in the pool (default:1, [0..256]).')) parser.add_argument( '--subnet', required=True, help=_('Subnet ID or name for the member.')) parser.add_argument( '--address', required=True, help=_('IP address of the pool member in the pool.')) parser.add_argument( '--protocol-port', required=True, help=_('Port on which the pool member listens for requests or ' 'connections.')) parser.add_argument( 'pool', metavar='POOL', help=_('ID or name of the pool that this member belongs to.')) def args2body(self, parsed_args): self.parent_id = _get_pool_id(self.get_client(), parsed_args.pool) _subnet_id = neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'subnet', parsed_args.subnet) body = { self.resource: { 'subnet_id': _subnet_id, 'admin_state_up': parsed_args.admin_state, 'protocol_port': parsed_args.protocol_port, 'address': parsed_args.address, }, } neutronV20.update_dict(parsed_args, body[self.resource], ['weight', 'subnet_id']) return body class UpdateMember(neutronV20.UpdateCommand): """LBaaS v2 Update a given member.""" resource = 'member' shadow_resource = 'lbaas_member' def add_known_arguments(self, parser): parser.add_argument( '--admin-state-down', dest='admin_state', action='store_false', help=_('Set admin state up to false')) parser.add_argument( '--weight', help=_('Weight of member in the pool (default:1, [0..256])')) parser.add_argument( 'pool', metavar='POOL', help=_('ID or name of the pool that this member belongs to')) def args2body(self, parsed_args): self.parent_id = _get_pool_id(self.get_client(), parsed_args.pool) body = { self.resource: {} } neutronV20.update_dict(parsed_args, body[self.resource], ['admin_state_up', 'weight']) return body class DeleteMember(LbaasMemberMixin, neutronV20.DeleteCommand): """LBaaS v2 Delete a given member.""" resource = 'member' shadow_resource = 'lbaas_member'
apache-2.0
openkp/openkp
src/test/java/pl/openkp/business/uzytkownicy/boundary/UzytkownikResourceTest.java
2862
/** * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pl.openkp.business.uzytkownicy.boundary; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import javax.inject.Inject; import javax.ws.rs.core.Response; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.junit.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.junit.Test; import org.junit.runner.RunWith; import pl.openkp.business.uzytkownicy.entity.Uzytkownik; @RunWith(Arquillian.class) public class UzytkownikResourceTest { @Deployment public static Archive<?> createTestArchive() { return ShrinkWrap.create(WebArchive.class, "test.war").addPackages(true, "pl.openkp") .addAsResource("META-INF/test-persistence.xml", "META-INF/persistence.xml").addAsWebInfResource(EmptyAsset.INSTANCE, "beans.xml") // Deploy our test datasource .addAsWebInfResource("test-ds.xml"); } @Inject UzytkownikResource uzytkownikResource; @Test public void testNowy() throws Exception { Uzytkownik encja = utworz(); uzytkownikResource.zapisz(encja); assertNotNull(encja.getId()); } @Test public void testUsun() throws Exception { Uzytkownik encja = utworz(); uzytkownikResource.zapisz(encja); assertNotNull(encja.getId()); Response response = uzytkownikResource.usun(encja.getId()); assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); response = uzytkownikResource.uzytkownik(encja.getId()); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); } private Uzytkownik utworz() throws IOException { Uzytkownik encja = new Uzytkownik(); encja.setEmail("test@wp.pl"); encja.setHaslo("xyz".getBytes()); encja.setImie("Jan"); encja.setLogin("jkowalski"); encja.setNazwisko("Kowalski"); encja.setZdjecie(Files.readAllBytes(Paths.get("src/test/resources/avatar2.png"))); return encja; } }
apache-2.0
ReviakinAleksey/newman
src/test/scala/com/stackmob/newman/test/client/ReadCachingDummyHttpClientSpecs.scala
6680
/** * Copyright 2012-2013 StackMob * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stackmob.newman.test package client import com.stackmob.newman.test.caching.DummyHttpResponseCacher import com.stackmob.newman.test.scalacheck._ import org.specs2.{ScalaCheck, Specification} import com.stackmob.newman.caching._ import com.stackmob.newman.response.HttpResponse import org.scalacheck._ import Prop._ import java.net.URL import com.stackmob.newman._ import com.stackmob.newman.request.HttpRequestWithoutBody class ReadCachingDummyHttpClientSpecs extends Specification with ScalaCheck { def is = "ReadCachingDummyHttpClientSpecs".title ^ end ^ "CachingDummyHttpClient is an HttpClient that caches responses for some defined TTL" ^ end ^ "GET cache hit should not call Client" ! getCallsApplyCacheHit ^ end ^ "HEAD cache hit should not call Client" ! headCallsApplyCacheHit ^ end ^ "GET cache miss should call Client" ! getCallsApplyCacheMiss ^ end ^ "HEAD cache miss should call Client" ! headCallsApplyCacheMiss ^ end ^ "POST, PUT, DELETE should not touch the cache" ! postPutDeleteIgnoreCache ^ end ^ end private case class CacheInteraction(numApplies: Int, numFolds: Int) private def verifyCacheInteraction(cache: DummyHttpResponseCacher, interaction: CacheInteraction) = { val applyCalls = cache.applyCalls.size must beEqualTo(interaction.numApplies) val foldCalls = cache.foldCalls.size must beEqualTo(interaction.numFolds) applyCalls and foldCalls } private case class ClientInteraction(numGets: Int, numPosts: Int, numPuts: Int, numDeletes: Int, numHeads: Int) private def verifyClientInteraction(client: DummyHttpClient, interaction: ClientInteraction) = { val getReqs = client.getRequests.size must beEqualTo(interaction.numGets) val postReqs = client.postRequests.size must beEqualTo(interaction.numPosts) val putReqs = client.putRequests.size must beEqualTo(interaction.numPuts) val deleteReqs = client.deleteRequests.size must beEqualTo(interaction.numDeletes) val headReqs = client.headRequests.size must beEqualTo(interaction.numHeads) getReqs and postReqs and putReqs and deleteReqs and headReqs } private def verifyCallsApplyCacheHit[T <: HttpRequestWithoutBody](fn: (ReadCachingHttpClient, URL, Headers) => T) = { val genOnApply = genEitherSuccessFuture(genHttpResponse) forAll(genURL, genHeaders, genDummyHttpClient, genDummyHttpResponseCache(genOnApply, Gen.value(Right(())))) { (url, headers, dummyClient, dummyCache) => val client = new ReadCachingHttpClient(dummyClient, dummyCache) // ensure repeated calls return the same response val resp1 = fn(client, url, headers).apply.block() val resp2 = fn(client, url, headers).apply.block() val resp3 = fn(client, url, headers).apply.block() val respMatch1 = resp1 must beEqualTo(resp2) val respMatch2 = resp2 must beEqualTo(resp3) // ensure that cache is hit for every call and client is never accessed respMatch1 and respMatch2 and verifyCacheInteraction(dummyCache, CacheInteraction(3, 0)) and verifyClientInteraction(dummyClient, ClientInteraction(0, 0, 0, 0, 0)) } } private def verifyCallsApplyCacheMiss[T <: HttpRequestWithoutBody](expectedClientInteraction: ClientInteraction)(fn: (ReadCachingHttpClient, URL, Headers) => T) = { forAll(genURL, genHeaders, genDummyHttpClient, genDummyHttpResponseCache(Gen.value(Right(())), Gen.value(Right(())))) { (url, headers, dummyClient, dummyCache) => val client = new ReadCachingHttpClient(dummyClient, dummyCache) // ensure repeated calls return the same response val resp1 = fn(client, url, headers).apply.block() val resp2 = fn(client, url, headers).apply.block() val resp3 = fn(client, url, headers).apply.block() val respMatch1 = resp1 must beEqualTo(resp2) val respMatch2 = resp2 must beEqualTo(resp3) // ensure that cache is missed for every call and client is always accessed respMatch1 and respMatch2 and verifyCacheInteraction(dummyCache, CacheInteraction(3, 0)) and verifyClientInteraction(dummyClient, expectedClientInteraction) } } private def getCallsApplyCacheHit = verifyCallsApplyCacheHit { (client, url, headers) => client.get(url, headers) } private def headCallsApplyCacheHit = verifyCallsApplyCacheHit { (client, url, headers) => client.head(url, headers) } private def getCallsApplyCacheMiss = verifyCallsApplyCacheMiss(ClientInteraction(3, 0, 0, 0, 0)) { (client, url, headers) => client.get(url, headers) } private def headCallsApplyCacheMiss = verifyCallsApplyCacheMiss(ClientInteraction(0, 0, 0, 0, 3)) { (client, url, headers) => client.head(url, headers) } private def postPutDeleteIgnoreCache = { forAll(genURL, genHeaders, genRawBody, genDummyHttpClient, genDummyHttpResponseCache(Gen.value(Right(())), Gen.value(Right(())))) { (url, headers, body, dummyClient, dummyCache) => val client = new ReadCachingHttpClient(dummyClient, dummyCache) val postRes = client.post(url, headers, body).block() must beEqualTo(dummyClient.responseToReturn.block()) val putRes = client.put(url, headers, body).block() must beEqualTo(dummyClient.responseToReturn.block()) val deleteRes = client.delete(url, headers).block() must beEqualTo(dummyClient.responseToReturn.block()) postRes and putRes and deleteRes and verifyCacheInteraction(dummyCache, CacheInteraction(0, 0)) and verifyClientInteraction(dummyClient, ClientInteraction(0, 1, 1, 1, 0)) } } }
apache-2.0
lukecwik/incubator-beam
sdks/python/apache_beam/io/external/generate_sequence_test.py
1961
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for cross-language generate sequence.""" # pytype: skip-file import logging import os import re import unittest import pytest from apache_beam.io.external.generate_sequence import GenerateSequence from apache_beam.testing.test_pipeline import TestPipeline from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to @pytest.mark.xlang_transforms @unittest.skipUnless( os.environ.get('EXPANSION_PORT'), "EXPANSION_PORT environment var is not provided.") class XlangGenerateSequenceTest(unittest.TestCase): def test_generate_sequence(self): port = os.environ.get('EXPANSION_PORT') address = 'localhost:%s' % port try: with TestPipeline() as p: res = ( p | GenerateSequence(start=1, stop=10, expansion_service=address)) assert_that(res, equal_to([i for i in range(1, 10)])) except RuntimeError as e: if re.search(GenerateSequence.URN, str(e)): print("looks like URN not implemented in expansion service, skipping.") else: raise e if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
apache-2.0
KittyPass/kitty_pass
db/migrate/20150610101902_create_proteus.rb
499
class CreateProteus < ActiveRecord::Migration def change create_table :proteus do |t| t.string :hostname t.string :identifier t.string :serial_number t.string :inventory_number t.string :root_password t.string :ipv4_gateway t.string :ipv6_gateway t.references :model t.references :status t.timestamps null: false end add_index :proteus, :hostname, unique: true add_index :proteus, :identifier, unique: true end end
apache-2.0
googleapis/google-cloud-ruby
google-cloud-debugger-v2/lib/google-cloud-debugger-v2.rb
839
# frozen_string_literal: true # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Auto-generated by gapic-generator-ruby. DO NOT EDIT! # This gem does not autoload during Bundler.require. To load this gem, # issue explicit require statements for the packages desired, e.g.: # require "google/cloud/debugger/v2"
apache-2.0
huitseeker/deeplearning4j
deeplearning4j-scaleout/spark/dl4j-spark-nlp-java8/src/main/java/org/deeplearning4j/spark/models/sequencevectors/learning/sequence/SparkDM.java
4347
package org.deeplearning4j.spark.models.sequencevectors.learning.sequence; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.models.embeddings.learning.impl.elements.RandomUtils; import org.deeplearning4j.models.sequencevectors.sequence.Sequence; import org.deeplearning4j.models.sequencevectors.sequence.ShallowSequenceElement; import org.deeplearning4j.spark.models.sequencevectors.learning.elements.BaseSparkLearningAlgorithm; import org.deeplearning4j.spark.models.sequencevectors.learning.elements.SparkCBOW; import org.nd4j.parameterserver.distributed.logic.sequence.BasicSequenceProvider; import org.nd4j.parameterserver.distributed.messages.Frame; import org.nd4j.parameterserver.distributed.messages.TrainingMessage; import org.nd4j.parameterserver.distributed.messages.requests.CbowRequestMessage; import org.nd4j.parameterserver.distributed.training.TrainingDriver; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicLong; /** * Spark implementation for PV-DM training algorithm * @author raver119@gmail.com */ public class SparkDM extends SparkCBOW { @Override public String getCodeName() { return "Spark-DM"; } @Override public Frame<? extends TrainingMessage> frameSequence(Sequence<ShallowSequenceElement> sequence, AtomicLong nextRandom, double learningRate) { if (vectorsConfiguration.getSampling() > 0) sequence = BaseSparkLearningAlgorithm.applySubsampling(sequence, nextRandom, 10L, vectorsConfiguration.getSampling()); int currentWindow = vectorsConfiguration.getWindow(); if (vectorsConfiguration.getVariableWindows() != null && vectorsConfiguration.getVariableWindows().length != 0) { currentWindow = vectorsConfiguration.getVariableWindows()[RandomUtils .nextInt(vectorsConfiguration.getVariableWindows().length)]; } if (frame == null) synchronized (this) { if (frame == null) frame = new ThreadLocal<>(); } if (frame.get() == null) frame.set(new Frame<CbowRequestMessage>(BasicSequenceProvider.getInstance().getNextValue())); for (int i = 0; i < sequence.getElements().size(); i++) { nextRandom.set(Math.abs(nextRandom.get() * 25214903917L + 11)); int b = (int) nextRandom.get() % currentWindow; int end = currentWindow * 2 + 1 - b; ShallowSequenceElement currentWord = sequence.getElementByIndex(i); List<Integer> intsList = new ArrayList<>(); for (int a = b; a < end; a++) { if (a != currentWindow) { int c = i - currentWindow + a; if (c >= 0 && c < sequence.size()) { ShallowSequenceElement lastWord = sequence.getElementByIndex(c); intsList.add(lastWord.getIndex()); } } } // basically it's the same as CBOW, we just add labels here if (sequence.getSequenceLabels() != null) { for (ShallowSequenceElement label : sequence.getSequenceLabels()) { intsList.add(label.getIndex()); } } else // FIXME: we probably should throw this exception earlier? throw new DL4JInvalidInputException( "Sequence passed via RDD has no labels within, nothing to learn here"); // just converting values to int int[] windowWords = new int[intsList.size()]; for (int x = 0; x < windowWords.length; x++) { windowWords[x] = intsList.get(x); } if (windowWords.length < 1) continue; iterateSample(currentWord, windowWords, nextRandom, learningRate, false, 0, true, null); } Frame<CbowRequestMessage> currentFrame = frame.get(); frame.set(new Frame<CbowRequestMessage>(BasicSequenceProvider.getInstance().getNextValue())); return currentFrame; } @Override public TrainingDriver<? extends TrainingMessage> getTrainingDriver() { return null; } }
apache-2.0
mccxj/tidb
distsql/distsql_test.go
7493
// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package distsql import ( "sync" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tipb/go-tipb" "golang.org/x/net/context" ) func (s *testSuite) TestSelectNormal(c *C) { request, err := (&RequestBuilder{}).SetKeyRanges(nil). SetDAGRequest(&tipb.DAGRequest{}). SetDesc(false). SetKeepOrder(false). SetFromSessionVars(variable.NewSessionVars()). Build() c.Assert(err, IsNil) /// 4 int64 types. colTypes := []*types.FieldType{ { Tp: mysql.TypeLonglong, Flen: mysql.MaxIntWidth, Decimal: 0, Flag: mysql.BinaryFlag, Charset: charset.CharsetBin, Collate: charset.CollationBin, }, } colTypes = append(colTypes, colTypes[0]) colTypes = append(colTypes, colTypes[0]) colTypes = append(colTypes, colTypes[0]) // Test Next. response, err := Select(context.TODO(), s.sctx, request, colTypes, statistics.NewQueryFeedback(0, nil, 0, false)) c.Assert(err, IsNil) result, ok := response.(*selectResult) c.Assert(ok, IsTrue) c.Assert(result.label, Equals, "dag") c.Assert(result.sqlType, Equals, "general") c.Assert(result.rowLen, Equals, len(colTypes)) response.Fetch(context.TODO()) // Test Next. chk := chunk.New(colTypes, 32, 32) numAllRows := 0 for { err = response.Next(context.TODO(), chk) c.Assert(err, IsNil) numAllRows += chk.NumRows() if chk.NumRows() == 0 { break } } c.Assert(numAllRows, Equals, 2) err = response.Close() c.Assert(err, IsNil) } func (s *testSuite) TestSelectStreaming(c *C) { request, err := (&RequestBuilder{}).SetKeyRanges(nil). SetDAGRequest(&tipb.DAGRequest{}). SetDesc(false). SetKeepOrder(false). SetFromSessionVars(variable.NewSessionVars()). SetStreaming(true). Build() c.Assert(err, IsNil) /// 4 int64 types. colTypes := []*types.FieldType{ { Tp: mysql.TypeLonglong, Flen: mysql.MaxIntWidth, Decimal: 0, Flag: mysql.BinaryFlag, Charset: charset.CharsetBin, Collate: charset.CollationBin, }, } colTypes = append(colTypes, colTypes[0]) colTypes = append(colTypes, colTypes[0]) colTypes = append(colTypes, colTypes[0]) s.sctx.GetSessionVars().EnableStreaming = true // Test Next. response, err := Select(context.TODO(), s.sctx, request, colTypes, statistics.NewQueryFeedback(0, nil, 0, false)) c.Assert(err, IsNil) result, ok := response.(*streamResult) c.Assert(ok, IsTrue) c.Assert(result.rowLen, Equals, len(colTypes)) response.Fetch(context.TODO()) // Test Next. chk := chunk.New(colTypes, 32, 32) numAllRows := 0 for { err = response.Next(context.TODO(), chk) c.Assert(err, IsNil) numAllRows += chk.NumRows() if chk.NumRows() == 0 { break } } c.Assert(numAllRows, Equals, 2) err = response.Close() c.Assert(err, IsNil) } func (s *testSuite) TestAnalyze(c *C) { request, err := (&RequestBuilder{}).SetKeyRanges(nil). SetAnalyzeRequest(&tipb.AnalyzeReq{}). SetKeepOrder(true). Build() c.Assert(err, IsNil) response, err := Analyze(context.TODO(), s.sctx.GetClient(), request, kv.DefaultVars, true) c.Assert(err, IsNil) result, ok := response.(*selectResult) c.Assert(ok, IsTrue) c.Assert(result.label, Equals, "analyze") c.Assert(result.sqlType, Equals, "internal") response.Fetch(context.TODO()) bytes, err := response.NextRaw(context.TODO()) c.Assert(err, IsNil) c.Assert(len(bytes), Equals, 16) err = response.Close() c.Assert(err, IsNil) } // mockResponse implements kv.Response interface. // Used only for test. type mockResponse struct { count int sync.Mutex } // Close implements kv.Response interface. func (resp *mockResponse) Close() error { resp.Lock() defer resp.Unlock() resp.count = 0 return nil } // Next implements kv.Response interface. func (resp *mockResponse) Next(ctx context.Context) (kv.ResultSubset, error) { resp.Lock() defer resp.Unlock() if resp.count == 2 { return nil, nil } defer func() { resp.count++ }() datum := types.NewIntDatum(1) bytes := make([]byte, 0, 100) bytes, _ = codec.EncodeValue(nil, bytes, datum, datum, datum, datum) respPB := &tipb.SelectResponse{ Chunks: []tipb.Chunk{{RowsData: bytes}}, OutputCounts: []int64{1}, } respBytes, err := respPB.Marshal() if err != nil { panic(err) } return &mockResultSubset{respBytes}, nil } // mockResultSubset implements kv.ResultSubset interface. // Used only for test. type mockResultSubset struct{ data []byte } // GetData implements kv.ResultSubset interface. func (r *mockResultSubset) GetData() []byte { return r.data } // GetStartKey implements kv.ResultSubset interface. func (r *mockResultSubset) GetStartKey() kv.Key { return nil } // GetExecDetails implements kv.ResultSubset interface. func (r *mockResultSubset) GetExecDetails() *execdetails.ExecDetails { return &execdetails.ExecDetails{} } func populateBuffer() []byte { numCols := 4 numRows := 1024 buffer := make([]byte, 0, 1024) sc := &stmtctx.StatementContext{TimeZone: time.Local} for rowOrdinal := 0; rowOrdinal < numRows; rowOrdinal++ { for colOrdinal := 0; colOrdinal < numCols; colOrdinal++ { buffer, _ = codec.EncodeValue(sc, buffer, types.NewIntDatum(123)) } } return buffer } func mockReadRowsData(buffer []byte, colTypes []*types.FieldType, chk *chunk.Chunk) (err error) { chk.Reset() numCols := 4 numRows := 1024 decoder := codec.NewDecoder(chk, time.Local) for rowOrdinal := 0; rowOrdinal < numRows; rowOrdinal++ { for colOrdinal := 0; colOrdinal < numCols; colOrdinal++ { buffer, err = decoder.DecodeOne(buffer, colOrdinal, colTypes[colOrdinal]) if err != nil { return errors.Trace(err) } } } return nil } func BenchmarkReadRowsData(b *testing.B) { numCols := 4 numRows := 1024 colTypes := make([]*types.FieldType, numCols) for i := 0; i < numCols; i++ { colTypes[i] = &types.FieldType{Tp: mysql.TypeLonglong} } chk := chunk.New(colTypes, numRows, numRows) buffer := populateBuffer() b.ResetTimer() for i := 0; i < b.N; i++ { mockReadRowsData(buffer, colTypes, chk) } } func BenchmarkDecodeToChunk(b *testing.B) { numCols := 4 numRows := 1024 colTypes := make([]*types.FieldType, numCols) for i := 0; i < numCols; i++ { colTypes[i] = &types.FieldType{Tp: mysql.TypeLonglong} } chk := chunk.New(colTypes, numRows, numRows) for rowOrdinal := 0; rowOrdinal < numRows; rowOrdinal++ { for colOrdinal := 0; colOrdinal < numCols; colOrdinal++ { chk.AppendInt64(colOrdinal, 123) } } codec := chunk.NewCodec(colTypes) buffer := codec.Encode(chk) b.ResetTimer() for i := 0; i < b.N; i++ { codec.DecodeToChunk(buffer, chk) } }
apache-2.0
roambotics/swift
stdlib/public/stubs/Random.cpp
3691
//===--- Random.cpp -------------------------------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // swift_stdlib_random // // Should the implementation of this function add a new platform/change for a // platform, make sure to also update the documentation regarding platform // implementation of this function. // This can be found at: /docs/Random.md #if defined(_WIN32) && !defined(__CYGWIN__) #define WIN32_LEAN_AND_MEAN #include <Windows.h> #include <Bcrypt.h> #pragma comment(lib, "bcrypt.lib") #else #include <errno.h> #include <fcntl.h> #endif #if __has_include(<sys/random.h>) #include <sys/random.h> #endif #include <sys/stat.h> #if __has_include(<sys/syscall.h>) #include <sys/syscall.h> #endif #include <stdlib.h> #include "swift/Runtime/Debug.h" #include "swift/Runtime/Mutex.h" #include "../SwiftShims/Random.h" #include <algorithm> // required for std::min using namespace swift; #if defined(__APPLE__) SWIFT_RUNTIME_STDLIB_API void swift_stdlib_random(void *buf, __swift_size_t nbytes) { arc4random_buf(buf, nbytes); } #elif defined(_WIN32) && !defined(__CYGWIN__) #warning TODO: Test swift_stdlib_random on Windows SWIFT_RUNTIME_STDLIB_API void swift_stdlib_random(void *buf, __swift_size_t nbytes) { if (nbytes > ULONG_MAX) { fatalError(0, "Fatal error: %zd exceeds ULONG_MAX\n", nbytes); } NTSTATUS status = BCryptGenRandom(nullptr, static_cast<PUCHAR>(buf), static_cast<ULONG>(nbytes), BCRYPT_USE_SYSTEM_PREFERRED_RNG); if (!BCRYPT_SUCCESS(status)) { fatalError(0, "Fatal error: 0x%.8X in '%s'\n", status, __func__); } } #else #undef WHILE_EINTR #define WHILE_EINTR(expression) ([&] () -> decltype(expression) { \ decltype(expression) result = -1; \ do { result = (expression); } while (result == -1 && errno == EINTR); \ return result; \ }()) SWIFT_RUNTIME_STDLIB_API void swift_stdlib_random(void *buf, __swift_size_t nbytes) { while (nbytes > 0) { __swift_ssize_t actual_nbytes = -1; #if defined(__NR_getrandom) static const bool getrandom_available = !(syscall(__NR_getrandom, nullptr, 0, 0) == -1 && errno == ENOSYS); if (getrandom_available) { actual_nbytes = WHILE_EINTR(syscall(__NR_getrandom, buf, nbytes, 0)); } #elif __has_include(<sys/random.h>) && (defined(__CYGWIN__) || defined(__Fuchsia__) || defined(__wasi__)) __swift_size_t getentropy_nbytes = std::min(nbytes, __swift_size_t{256}); if (0 == getentropy(buf, getentropy_nbytes)) { actual_nbytes = getentropy_nbytes; } #endif if (actual_nbytes == -1) { static const int fd = WHILE_EINTR(open("/dev/urandom", O_RDONLY | O_CLOEXEC, 0)); if (fd != -1) { static StaticMutex mutex; mutex.withLock([&] { actual_nbytes = WHILE_EINTR(read(fd, buf, nbytes)); }); } } if (actual_nbytes == -1) { fatalError(0, "Fatal error: %d in '%s'\n", errno, __func__); } buf = static_cast<uint8_t *>(buf) + actual_nbytes; nbytes -= actual_nbytes; } } #endif
apache-2.0
maxml/sample-apps
smarthousedemo/source/src/org/kaaproject/kaa/demo/smarthousedemo/ThermostatActivity.java
965
/** * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.demo.smarthousedemo; import org.kaaproject.kaa.demo.smarthousedemo.data.DeviceType; import android.os.Bundle; public class ThermostatActivity extends BaseDeviceActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); init(DeviceType.THERMOSTAT); } }
apache-2.0
shanti/olio
webapp/rails/trunk/vendor/plugins/rspec/spec/spec/matchers/match_spec.rb
1927
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require File.dirname(__FILE__) + '/../../spec_helper.rb' describe "should match(expected)" do it "should pass when target (String) matches expected (Regexp)" do "string".should match(/tri/) end it "should fail when target (String) does not match expected (Regexp)" do lambda { "string".should match(/rings/) }.should fail end it "should provide message, expected and actual on failure" do matcher = match(/rings/) matcher.matches?("string") matcher.failure_message.should == ["expected \"string\" to match /rings/", /rings/, "string"] end end describe "should_not match(expected)" do it "should pass when target (String) matches does not match (Regexp)" do "string".should_not match(/rings/) end it "should fail when target (String) matches expected (Regexp)" do lambda { "string".should_not match(/tri/) }.should fail end it "should provide message, expected and actual on failure" do matcher = match(/tri/) matcher.matches?("string") matcher.negative_failure_message.should == ["expected \"string\" not to match /tri/", /tri/, "string"] end end
apache-2.0
census-instrumentation/opencensus-go
stats/record.go
4429
// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats import ( "context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) func init() { internal.SubscriptionReporter = func(measure string) { mu.Lock() measures[measure].subscribe() mu.Unlock() } } // Recorder provides an interface for exporting measurement information from // the static Record method by using the WithRecorder option. type Recorder interface { // Record records a set of measurements associated with the given tags and attachments. // The second argument is a `[]Measurement`. Record(*tag.Map, interface{}, map[string]interface{}) } type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement recorder Recorder } // WithAttachments applies provided exemplar attachments. func WithAttachments(attachments metricdata.Attachments) Options { return func(ro *recordOptions) { ro.attachments = attachments } } // WithTags applies provided tag mutators. func WithTags(mutators ...tag.Mutator) Options { return func(ro *recordOptions) { ro.mutators = mutators } } // WithMeasurements applies provided measurements. func WithMeasurements(measurements ...Measurement) Options { return func(ro *recordOptions) { ro.measurements = measurements } } // WithRecorder records the measurements to the specified `Recorder`, rather // than to the global metrics recorder. func WithRecorder(meter Recorder) Options { return func(ro *recordOptions) { ro.recorder = meter } } // Options apply changes to recordOptions. type Options func(*recordOptions) func createRecordOption(ros ...Options) *recordOptions { o := &recordOptions{} for _, ro := range ros { ro(o) } return o } type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality // (RecordOptions) we can reduce some allocations to speed up this hot path if len(ms) == 0 { return } recorder := internal.MeasurementRecorder.(measurementRecorder) record := false for _, m := range ms { if m.desc.subscribed() { record = true break } } if !record { return } recorder(tag.FromContext(ctx), ms, nil) return } // RecordWithTags records one or multiple measurements at once. // // Measurements will be tagged with the tags in the context mutated by the mutators. // RecordWithTags is useful if you want to record with tag mutations but don't want // to propagate the mutations in the context. func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) } // RecordWithOptions records measurements from the given options (if any) against context // and tags and attachments in the options (if any). // If there are any tags in the context, measurements will be tagged with them. func RecordWithOptions(ctx context.Context, ros ...Options) error { o := createRecordOption(ros...) if len(o.measurements) == 0 { return nil } recorder := internal.DefaultRecorder if o.recorder != nil { recorder = o.recorder.Record } if recorder == nil { return nil } record := false for _, m := range o.measurements { if m.desc.subscribed() { record = true break } } if !record { return nil } if len(o.mutators) > 0 { var err error if ctx, err = tag.New(ctx, o.mutators...); err != nil { return err } } recorder(tag.FromContext(ctx), o.measurements, o.attachments) return nil }
apache-2.0
srmooney/PiFrame
home/pi/Documents/NodeFrame bak/node_modules/onoff/node_modules/epoll/test/performance-check.js
1041
/* * Determine approximately how many EPOLLIN events can be handled per second. * * This test expects a newline as input on stdin. It polls for events on stdin * but doesn't read stdin until the test has completed. This results in a * continuous stream of events while the test is running. * * Note that the rate determined is misleading as epoll is notifying us about * the same newline all the time. * * The newline should be piped in for reasonable results: * echo | node performance-check */ var Epoll = require('../build/Release/epoll').Epoll, util = require('./util'), time, count = 0, stdin = 0; // fd for stdin var epoll = new Epoll(function (err, fd, events) { count++; }); setTimeout(function () { var rate; time = process.hrtime(time); rate = Math.floor(count / (time[0] + time[1] / 1E9)); console.log(' ' + rate + ' events per second'); epoll.remove(stdin).close(); util.read(stdin); // read stdin (the newline) }, 100); epoll.add(stdin, Epoll.EPOLLIN); time = process.hrtime();
apache-2.0
luotao1/Paddle
paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc
8432
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/platform/mkldnn_reuse.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using framework::DataLayout; template <typename T> class TransposeMKLDNNHandler { public: TransposeMKLDNNHandler(std::vector<int64_t>& dims, // NOLINT std::vector<int>& axis, // NOLINT dnnl::engine engine) : dims_(dims), axis_(axis), logical_axis_(dims.size(), 0), engine_(engine) {} std::shared_ptr<dnnl::memory> AcquireSrcMemory(const MKLDNNMemoryFormat& fmt, void* ptr) { // Make memory descriptor using input format, unless it // cannot be trusted (nchw) then make up memory fmt manually for (size_t i = 0; i < this->logical_axis_.size(); ++i) { this->logical_axis_[i] = i; } auto src_md = fmt != MKLDNNMemoryFormat::nchw ? platform::MKLDNNMemDesc( dims_, platform::MKLDNNGetDataType<T>(), fmt) : Axis2MemoryDesc(dims_, logical_axis_); return std::make_shared<dnnl::memory>(src_md, engine_, ptr); } std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output, platform::Place place) { auto dst_md = Axis2MemoryDesc(dims_, axis_); auto dst_data = output->mutable_data<T>(place, dst_md.get_size()); return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data); } std::shared_ptr<dnnl::reorder> AcquireTranspose( std::shared_ptr<dnnl::memory> dst_memory_p, std::shared_ptr<dnnl::memory> src_memory_p) { return std::make_shared<dnnl::reorder>(*(src_memory_p), *(dst_memory_p)); } protected: dnnl::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz, // NOLINT std::vector<int>& axis // NOLINT ) { size_t ndims = axis.size(); std::vector<int64_t> strides(ndims); unsigned int total_stride = 1; for (int i = ndims - 1; i >= 0; --i) { strides[axis[i]] = total_stride; total_stride *= nchw_tz[axis[i]]; } dnnl::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(), strides); return mem_d; } private: std::vector<int64_t> dims_; std::vector<int> axis_; std::vector<int> logical_axis_; dnnl::engine engine_; }; template <typename T> class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet( "Operator DNNL Transpose must use CPUPlace")); auto& dev_ctx = ctx.template device_context<paddle::platform::MKLDNNDeviceContext>(); const auto& mkldnn_engine = dev_ctx.GetEngine(); std::vector<int> axis = ctx.Attr<std::vector<int>>("axis"); int ndims = axis.size(); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); const T* input_data = input->data<T>(); if (ndims == 1) { framework::TensorCopy(*input, input->place(), output); output->set_format(input->format()); return; } auto nchw_tz = paddle::framework::vectorize<int64_t>(input->dims()); TransposeMKLDNNHandler<T> handler(nchw_tz, axis, mkldnn_engine); auto transpose_src_memory_p = handler.AcquireSrcMemory( input->format(), platform::to_void_cast<T>(input_data)); auto transpose_dst_memory_p = handler.AcquireDstMemory(output, ctx.GetPlace()); auto transpose_p = handler.AcquireTranspose(transpose_dst_memory_p, transpose_src_memory_p); auto& astream = platform::MKLDNNDeviceContext::tls().get_stream(); transpose_p->execute(astream, *transpose_src_memory_p, *transpose_dst_memory_p); astream.wait(); output->set_layout(DataLayout::kNCHW); output->set_format(MKLDNNMemoryFormat::undef); } }; template <typename T> class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet( "Operator DNNL TransposeGrad must use CPUPlace")); auto* out_grad = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X")); if (!x_grad) return; auto& dev_ctx = ctx.template device_context<paddle::platform::MKLDNNDeviceContext>(); const auto& mkldnn_engine = dev_ctx.GetEngine(); std::vector<int> axis = ctx.Attr<std::vector<int>>("axis"); std::vector<int> reversed_axis(axis); int ndims = axis.size(); if (ndims == 1) { framework::TensorCopy(*out_grad, out_grad->place(), x_grad); x_grad->set_format(out_grad->format()); return; } for (size_t i = 0; i < axis.size(); i++) { reversed_axis[axis[i]] = i; } const T* out_grad_data = out_grad->data<T>(); x_grad->mutable_data<T>(ctx.GetPlace()); auto nchw_tz = paddle::framework::vectorize<int64_t>(out_grad->dims()); TransposeMKLDNNHandler<T> handler(nchw_tz, reversed_axis, mkldnn_engine); auto transpose_src_memory_p = handler.AcquireSrcMemory( out_grad->format(), platform::to_void_cast<T>(out_grad_data)); auto transpose_dst_memory_p = handler.AcquireDstMemory(x_grad, ctx.GetPlace()); auto transpose_p = handler.AcquireTranspose(transpose_dst_memory_p, transpose_src_memory_p); auto& astream = platform::MKLDNNDeviceContext::tls().get_stream(); transpose_p->execute(astream, *transpose_src_memory_p, *transpose_dst_memory_p); astream.wait(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(transpose2, MKLDNN, ::paddle::platform::CPUPlace, FP32, ops::kTransposeMKLDNNFP32, ops::TransposeMKLDNNOpKernel<float>); REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(transpose2, MKLDNN, ::paddle::platform::CPUPlace, U8, ops::kTransposeMKLDNNINT8, ops::TransposeMKLDNNOpKernel<uint8_t>); REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(transpose2, MKLDNN, ::paddle::platform::CPUPlace, S8, ops::kTransposeMKLDNNINT8, ops::TransposeMKLDNNOpKernel<int8_t>); REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE( transpose2, MKLDNN, ::paddle::platform::CPUPlace, BF16, ops::kTransposeMKLDNNFP32, ops::TransposeMKLDNNOpKernel<paddle::platform::bfloat16>); REGISTER_OP_KERNEL(transpose, MKLDNN, ::paddle::platform::CPUPlace, ops::TransposeMKLDNNOpKernel<float>); REGISTER_OP_KERNEL(transpose_grad, MKLDNN, ::paddle::platform::CPUPlace, ops::TransposeMKLDNNGradOpKernel<float>); REGISTER_OP_KERNEL(transpose2_grad, MKLDNN, ::paddle::platform::CPUPlace, ops::TransposeMKLDNNGradOpKernel<float>);
apache-2.0
occ-data/tukey-middleware
config_gen/config_gen.py
15536
# Copyright 2013 Open Cloud Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' A temporary solution until I split the tukey_cli/etc/enabled files clearly into drivers and configuration ''' import os import settings import sys from check_config import check_config def write_file(file_name, text): ''' Write text to file filename ''' config_file = open(file_name, 'w') config_file.write(text) config_file.close() # print "writing to file %s" % file_name # print text class ConfigBuilder(object): ''' Base configuration builder to handle things such as what do we do if we should create a login server configuration file and. Thats pretty much it ''' def build(self, cloud, middleware_dir, config_dir, proxy_host='127.0.0.1', nova_proxy_port=8774): ''' Generic build sets vars and controls flow ''' self.cloud = cloud self.middleware_dir = middleware_dir self.config_dir = config_dir self.proxy_host = proxy_host self.nova_proxy_port = nova_proxy_port self._write_configuration() if self.cloud["handle_login_keys"]: self._write_login_config() def _write_configuration(self): ''' Must be called after build. Calls the method that must be implemented configuration''' file_name = os.path.join(self.middleware_dir, self.config_dir, self.cloud["cloud_id"]) self._write_file(file_name, self.configuration()) def _usage(self): ''' Return the usgage string tailored to this cloud. Should be what would appear after 'os-simple-tenant-usage: ' ''' if self.cloud["usage_cloud"]: return "".join([self.middleware_dir, "/tools/with_venv.sh python ", self.middleware_dir, "/tukey_cli/tools/get_usage.py ${start} ${end} ${", self.cloud["cloud_id"], self.username(), "}"]) else: return "echo [{}]" def _write_login_config(self): ''' Write the config file for the login node ''' file_name = os.path.join(self.middleware_dir, self.config_dir, "".join(["login", self.cloud["cloud_id"]])) login_config = "".join(['''[tag] cloud: %(cloud_name)s Login Node cloud_name: %(cloud_name)s Login Node cloud_id: login%(cloud_id)s [enabled] command: if [ '${%(cloud_id)s''', self.username(), "}' = '$'{%(cloud_id)s", self.username(), '''} ]; then false else true fi [commands] basedir=%(middleware_dir)s gpg_home=%(middleware_dir)s/../.gnupg fingerprint='%(gpg_fingerprint)s' passphrase='%(gpg_passphrase)s' host=%(login_host)s:%(login_port)s resource=%(cloud_id)s username=${%(cloud_id)s''', self.username(), '''} keyname=%(cloud_id)s.pub''']) % {"cloud_id": self.cloud["cloud_id"], "cloud_name": self.cloud["cloud_name"], "middleware_dir": self.middleware_dir, "gpg_fingerprint": self.cloud["gpg_fingerprint"], "gpg_passphrase": self.cloud["gpg_passphrase"], "login_host": self.cloud["login_host"], "login_port": self.cloud["login_port"]} login_config = "\n".join([login_config, '''command_base=%(basedir)s/tukey_cli/ venv=%(basedir)s/tools/with_venv.sh keyfile=%(command_base)s/etc/keys/%(keyname)s script_file=%(basedir)s/auth_proxy/ssh_gen.py script=%(venv)s python %(script_file)s %(fingerprint)s %(gpg_home)s %(passphrase)s %(host)s %(keyfile)s os-keypairs: if [ '${method}' = 'POST' ]; then if [ '${public_key}' = '$'{public_key} ]; then %(script)s create %(username)s %(resource)s '${name}' ${password} else %(script)s import %(username)s %(resource)s '${name}' '${public_key}' fi elif [ '${id}' = '$'{id} ];then %(script)s list %(username)s %(resource)s elif [ '${method}' = 'DELETE' ]; then %(script)s delete %(username)s %(resource)s ${id} else %(script)s get %(username)s %(resource)s '${name}' fi ''']) self._write_file(file_name, login_config) def _write_file(self, file_name, text): ''' wrapper for writing to a file so we can test with print''' write_file(file_name, text) def get_all_statement(self, is_first=False, is_last=False): ''' create the portion of the "all" config file where we see if this cloud is enabled do to the user tokens appearing. sorry''' return "".join(['''if [ '${%(cloud_id)s%(username_pattern)s}' = '$'{%(cloud_id)s%(username_pattern)s} ]; then %(echo_command)s ''', "'[" if is_first else "'", ']' if is_last else '', '''' else %(echo_command)s ''', "'[" if is_first else "',", ' "%(cloud_id)s"', ',"login%(cloud_id)s"' if self.cloud["handle_login_keys"] else '', ']' if is_last else '', '''' fi ''']) % {"cloud_id": self.cloud["cloud_id"], "echo_command": "echo -n", "username_pattern": self.username()} class OpenStackConfigBuilder(ConfigBuilder): ''' Configuration builder for openstack clouds ''' def username(self): ''' This clouds username pattern using a JPathlike''' return '/access/user/username' def configuration(self): ''' build the configuration specific to this cloud ''' return "".join(['''[proxy] host: %(nova_host)s port: %(nova_port)s [auth] driver: OpenStackAuth host: %(keystone_host)s port: %(keystone_port)s [tag] cloud: %(cloud_name)s cloud_name: %(cloud_name)s cloud_id: %(cloud_id)s [enabled] command: if [ '${%(cloud_id)s/access/user/username}' = '$'{%(cloud_id)s/access/user/username} ]; then false else true fi [commands] os-simple-tenant-usage: if [ '${detailed}' = '1' ]; then echo "#proxy" else ''', self._usage(), ''' fi''']) % self.cloud class EucalyptusConfigBuilder(ConfigBuilder): ''' Configuration builder for eucalyptus clouds.''' def username(self): ''' JPath like username pattern''' return '/username' def configuration(self): ''' build the configuration specific to this cloud ''' config = '''[enabled] command: if [ '${%(cloud_id)s/username}' = '$'{%(cloud_id)s/username} ]; then false else true fi [tag] cloud: %(cloud_name)s cloud_name: %(cloud_name)s cloud_id: %(cloud_id)s [commands] basedir=%(middleware_dir)s''' % {"middleware_dir": self.middleware_dir, "cloud_id": self.cloud["cloud_id"], "cloud_name": self.cloud["cloud_name"]} config = "".join([config, ''' command_base=%(basedir)s/tukey_cli/ venv=%(basedir)s/tools/with_venv.sh compute=%(venv)s python %(command_base)stools/eucalyptus/compute.py # This should contain the Eucarc files for the # Users. creddir=/var/lib/cloudgui/users/ cred_file=%(creddir)s${username}/.euca/eucarc creds=--credentials %(cred_file)s # The commands servers/detail: %(compute)s %(creds)s --list instances servers: if [ '${method}' = 'DELETE' ]; then %(compute)s %(creds)s --action kill --id ${id} elif [ '${method}' = 'POST' ]; then if [ '${user_data}' = '$'{user_data} ]; then %(compute)s %(creds)s --action launch --id ${imageRef} --size ${flavorRef} --number ${min_count} --keyname ${key_name} else %(compute)s %(creds)s --action launch --id ${imageRef} --size ${flavorRef} --number ${min_count} --keyname ${key_name} --userdata ${user_data} fi else %(compute)s %(creds)s --list instances --id ${id} fi flavors/detail: %(venv)s python %(command_base)stools/eucalyptus/flavors.py flavors: %(venv)s python %(command_base)stools/eucalyptus/flavors.py ${id} images/detail: if [ "${property-image_type}" = 'snapshot' ];then #echo '[{"id":""}]' echo '' else if [ '${marker}' = '$'{marker} ]; then %(compute)s %(creds)s --list images --limit ${limit} else %(compute)s %(creds)s --list images --limit ${limit} --marker ${marker} fi fi images: %(compute)s %(creds)s --list images --id ${id} os-keypairs: if [ '${method}' = 'POST' ]; then if [ '${public_key}' = '$'{public_key} ]; then %(compute)s %(creds)s --action create_keypair --keyname ${name} else KEY=$(tempfile);echo "${public_key}" > $KEY %(compute)s %(creds)s --action import_keypair --keyname ${name} --keyfile $KEY rm $KEY fi elif [ '${id}' = '$'{id} ];then %(compute)s %(creds)s --list keys elif [ '${method}' = 'DELETE' ]; then euca-delete-keypair --config %(cred_file)s ${id} else %(compute)s %(creds)s --list keys --id ${id} fi os-quota-sets: %(venv)s python %(command_base)stools/eucalyptus/get_quota.py 10.103.112.3 9402 ${username} os-simple-tenant-usage: if [ '${detailed}' = '1' ]; then echo [{}] else %(venv)s python %(command_base)stools/get_usage.py ${start} ${end} ${username} ${access/user/username} fi [transformations:listSizes] id: name [transformations:servers/detail] OS-EXT-STS power_state=1 tenant_id: ${username} username_id: ${username} name: $(id) status: $(extra/status) key_name: $(extra/keyname) updated: $(launchdatetime) created: $(launchdatetime) hostId: '' progress: 100 accessIPv4: accessIPv6: image: { "id" "$(extra/imageId)", "links" [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/images/$(extra/imageId)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/images/$(extra/imageId)" } ] } flavor: { "id" "$(extra/instancetype)", "links" [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/flavors/$(extra/instancetype)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/flavors/$(extra/instancetype)" } ] } addresses: { "private" [ { "version" 4, "addr" "$(extra/private_dns)" } ] } metadata: {} links: [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/servers/$(id)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/servers/$(id)" } ] [transformations:servers] OS-EXT-STS power_state=1 tenant_id: ${username} username_id: ${username} name: $(id) status: $(extra/status) key_name: $(extra/keyname) updated: $(launchdatetime) created: $(launchdatetime) hostId: '' progress: 100 accessIPv4: accessIPv6: image: { "id" "$(extra/imageId)", "links" [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/images/$(extra/imageId)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/images/$(extra/imageId)" } ] } flavor: { "id" "$(extra/instancetype)", "links" [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/flavors/$(extra/instancetype)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/flavors/$(extra/instancetype)" } ] } addresses: { "private" [ { "version" 4, "addr" "$(extra/private_dns)" } ] } metadata: {} links: [ { "rel" "self", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/v1.1/${username}/servers/$(id)" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, ''':''', str(self.nova_proxy_port), '''/${username}/servers/$(id)" } ] [transformations:images/detail] properties: { "image_type" "$(extra/imagetype)"} created: updated: container_format: $(extra/container_format) is_public: extra/ispublic owner: extra/ownerid image_type: $(extra/imagetype) tenant_id: ${username} user_id: ${username} status: $(extra/state) metadata: {} links: [ { "rel" "self", "href" "http://''', self.proxy_host, '''/v1.1/${username}/images/${id}" }, { "rel" "bookmark", "href" "http://''', self.proxy_host, '''/${username}/images/${id}"} ] [transformations:os-keypairs] keypair: {"private_key" "$(keyMaterial)", "public_key" "", "fingerprint" "$(keyFingerprint)", "name" "$(keyName)"} [errors] createKeypair: result launchVm: result [auth] driver: EucalyptusAuth ''']) return config def main(): #for cloud in settings.clouds if not check_config(settings): print "Invalid configuration file" exit(1) print "Config file correct!" if len(sys.argv) != 2: print "Requires middleware_dir argument" exit(1) middleware_dir = sys.argv[1] config_dir = "tukey_cli/etc/enabled" os.mkdir(os.path.join(middleware_dir, config_dir)) host_and_ports = getattr(settings, "host_and_ports", {"host": "127.0.0.1", "nova_port": 8874}) # lets build some config files ! count = 1 all_statement = '' for cloud in settings.clouds: # factory if cloud["cloud_type"].lower() == "eucalyptus": config_builder = EucalyptusConfigBuilder() if cloud["cloud_type"].lower() == "openstack": config_builder = OpenStackConfigBuilder() config_builder.build(cloud, middleware_dir, config_dir, proxy_host=host_and_ports["host"], nova_proxy_port=host_and_ports["nova_port"]) # build the all file all_statement += config_builder.get_all_statement(count == 1, count == len(settings.clouds)) + '; \\' count += 1 all_statement = "".join(['''[commands] middledir=''', middleware_dir, ''' venv=%(middledir)s/tools/with_venv.sh script_file=%(middledir)s/auth_proxy/multiple_keys.py script=%(venv)s python %(script_file)s ${auth-project-id} ${auth-token} '${name}' $( ''', all_statement[:-3], ''') os-keypairs: if [ '${public_key}' = '$'{public_key} ]; then %(script)s else %(script)s '${public_key}' fi [tag] cloud: All Resources cloud_name: All Resources cloud_id: all [enabled] command: true''']) write_file(os.path.join(middleware_dir, config_dir, 'all'), all_statement) if __name__ == "__main__": main()
apache-2.0
jingtangliao/ff
Demos/DemoMaskedLaplacianSynthetic.cpp
3204
/*========================================================================= * * Copyright David Doria 2011 daviddoria@gmail.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #include "ImageProcessing/MaskOperations.h" #include "ImageProcessing/Mask.h" #include "Helpers/OutputHelpers.h" static void CreateMask(Mask* const mask); static void CreateImage(UnsignedCharScalarImageType* const image); int main(int argc, char*argv[]) { Mask::Pointer mask = Mask::New(); CreateMask(mask); OutputHelpers::WriteImage(mask.GetPointer(), "mask.png"); UnsignedCharScalarImageType::Pointer image = UnsignedCharScalarImageType::New(); CreateImage(image); OutputHelpers::WriteImage(image.GetPointer(), "image.png"); FloatScalarImageType::Pointer output = FloatScalarImageType::New(); MaskOperations::MaskedLaplacian(image.GetPointer(), mask.GetPointer(), output.GetPointer()); OutputHelpers::WriteImage(output.GetPointer(), "laplacian.mha"); return EXIT_SUCCESS; } void CreateMask(Mask* const mask) { itk::Index<2> maskCorner = {{0,0}}; itk::Size<2> maskSize = {{100, 100}}; itk::ImageRegion<2> maskRegion(maskCorner, maskSize); mask->SetRegions(maskRegion); mask->Allocate(); // Make a valid mask with a hole in the middle for(unsigned int column = 0; column < maskSize[0]; ++column) { for(unsigned int row = 0; row < maskSize[1]; ++row) { itk::Index<2> currentIndex; currentIndex[0] = column; currentIndex[1] = row; if(column > maskSize[0] / 3 && column < 2 * maskSize[0] / 3 && row > maskSize[1] / 3 && row < 2 * maskSize[1] / 3) { mask->SetPixel(currentIndex, mask->GetHoleValue()); } else { mask->SetPixel(currentIndex, mask->GetValidValue()); } } } } void CreateImage(UnsignedCharScalarImageType* const image) { itk::Index<2> corner = {{0,0}}; itk::Size<2> size = {{100, 100}}; itk::ImageRegion<2> region(corner, size); image->SetRegions(region); image->Allocate(); image->FillBuffer(0); // Make a white square on a black background for(unsigned int column = 0; column < size[0]; ++column) { for(unsigned int row = 0; row < size[1]; ++row) { itk::Index<2> currentIndex; currentIndex[0] = column; currentIndex[1] = row; if(column > size[0] / 3 && column < 2 * size[0] / 3 && row > size[1] / 3 && row < 2 * size[1] / 3) { image->SetPixel(currentIndex, 0); } else { image->SetPixel(currentIndex, 255); } } } }
apache-2.0
galpha/gradoop
gradoop-flink/src/main/java/org/gradoop/flink/model/impl/functions/epgm/Clone.java
1268
/* * Copyright © 2014 - 2021 Leipzig University (Database Research Group) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradoop.flink.model.impl.functions.epgm; import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.java.functions.FunctionAnnotation; import org.gradoop.common.model.api.entities.Element; import org.gradoop.common.model.impl.id.GradoopId; /** * Clones an element by replacing its id but keeping label and properties. * * @param <EL> element type */ @FunctionAnnotation.ForwardedFields("label;properties") public class Clone<EL extends Element> implements MapFunction<EL, EL> { @Override public EL map(EL el) throws Exception { el.setId(GradoopId.get()); return el; } }
apache-2.0
QiJune/Paddle
paddle/contrib/float16/float16_transpiler.py
11067
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle.fluid.core as core from paddle.fluid.framework import Program from paddle.fluid.executor import global_scope class Float16Transpiler: def transpile(self, program, place, scope=None): ''' Transpile the program desc and cast the weights to float16 data type to enable float16 inference. Since the operator in a program desc will automatically choose the right compute kernel to run based on the data type of the input tensor. We actually don't need to change the program desc to run in float16 mode. However, in this way, users who are used to feeding and fetching tensors of float32 data type when running typical inference may find it confusing and difficult to run inference in float16 mode as they need to convert input data to float16 dtype and then convert the results back to float32 dtype to match the rest of code. So this function appends cast ops to the program desc where necessary so that users are able to run inference in float16 mode while providing input tensor (feed_holder) of float data type and obtaining output tensor (fetch_holder) of float data type. Moreover, it is desired that when we have the scope and program desc to run inference in float32 mode, we can use a single API to do the necessary modification and then user can run float16 inference on the fly. To make this happen, this function also create new parameters in the scope to have the converted float16 weights and change the operators in program desc to use these new parameters. :param program: program to transpile :type program: Program :param place: inference place :type place: Place :param scope: inference scope :type scope: Scope ''' if not isinstance(program, Program): raise TypeError("program should be as Program type") if not isinstance(place, core.CPUPlace) and not isinstance( place, core.CUDAPlace): raise TypeError("place should be as CPUPlace/CUDAPlace type") if scope is None: scope = global_scope() if not isinstance(scope, core.Scope): raise TypeError("scope should be as Scope type or None") self.scope = scope self.place = place self.block = program.block(0) self.input_map = {} # store the input names should be adjusted self._modify_feed_fetch() self._convert_param_to_float16() self._adjust_input(skip=True) self._remove_unused_var() # TODO(luotao): use clone() method to flush the program.desc in force, # since some large program.desc will not be flushed immediately. # And a better solution will be considered later. program = program.clone() # ====================== private transpiler functions ===================== def _adjust_input(self, skip=False): ''' Change the input variable name in operators. When we are in the process of modifying a program desc, we usually replace some variables with some other variables, where we create a dictionary input_map to record the one-to-one correspondence between each old variable and the new one. After that, this function will search all the operators that use the old variables and change the info in op to use the new variables. There maybe some exceptions to this rule when we are using the float16 transpiler and insert cast ops to cast float32 variable to float16 one. After we insert the cast op to cast var_1 to var_1_fp16, we don't want to change the input of cast op to var_1_fp16 after using this function. ''' skip_ops = {"cast"} for i in range(len(self.block.ops)): current_op = self.block.ops[i] if skip and current_op.type in skip_ops: continue for input_arg in current_op.input_arg_names: if input_arg in self.input_map: current_op.rename_input(input_arg, self.input_map[input_arg]) def _remove_unused_var(self): ''' remove unused varibles in program ''' args = [] for i in range(len(self.block.ops)): current_op = self.block.ops[i] args += current_op.input_arg_names args += current_op.output_arg_names args = list(set(args)) # unique the input and output arguments for var in self.block.vars.keys(): if var not in args: self.block._remove_var(var) def _modify_feed_fetch(self): ''' Modify feed fetch op/vars for float16 inference. For each feed op: feed_op->feed_target_var Change it to: feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var For each fetch op: fetch_target_var->fetch_op Change it to: tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op :return: None ''' def find_op(var): # It is possible that var.op is not up to date after some # modifications to program desc. Here we force to make it up to date. var.op = None for op in self.block.ops: if var.name in op.output_arg_names: var.op = op break if var.op is None: raise ValueError("The target variable must have an " "associated operator that generates it.") i = 0 while i < len(self.block.ops): cur_op = self.block.ops[i] if cur_op.type == "feed": var_name = cur_op.output("Out")[0] tmp_var_name = var_name + ".fp16" var = self.block.vars[var_name] tmp_var = self.block.create_var( name=tmp_var_name.encode('ascii'), type=var.type, dtype=core.VarDesc.VarType.FP16, shape=var.shape, persistable=var.persistable) self.block._insert_op( i + 1, type="cast", inputs={"X": var}, outputs={"Out": tmp_var}, attrs={ 'in_dtype': int(var.dtype), 'out_dtype': int(tmp_var.dtype) }) self.input_map[var_name] = tmp_var_name i = i + 1 elif cur_op.type == "fetch": var_name = cur_op.input("X")[0] tmp_var_name = var_name + ".fp16" var = self.block.vars[var_name] tmp_var = self.block.create_var( name=tmp_var_name.encode('ascii'), type=var.type, dtype=core.VarDesc.VarType.FP16, shape=var.shape, persistable=var.persistable) find_op(var) var.op.rename_output(var_name, tmp_var_name) self.block._insert_op( i, type="cast", inputs={"X": tmp_var}, outputs={"Out": var}, attrs={ 'in_dtype': int(tmp_var.dtype), 'out_dtype': int(var.dtype) }) i = i + 1 i = i + 1 def _convert_param_to_float16(self): def _get_no_fp16_conversion_var_names(): ''' Get the set of input variable names that shouldn't be converted to float16. When we want to run inference in float16 mode, most parameters need to be firstly converted to float16. However, there are some parameters that shouldn't be converted to float16 because the corresponding operator requires float32 parameters even in float16 mode (when the input data is of float16 data type). Currently, the only operator that has this exclusion is the batch norm op. :return: set of input variable names :type var_names: set ''' op_names = {'batch_norm'} var_names = [] for op in self.block.ops: if op.type in op_names: var_names += op.input_arg_names return set(var_names) def _should_be_converted(var): return var.persistable and \ var.name not in self.no_conversion_vars and \ var.type != core.VarDesc.VarType.FEED_MINIBATCH and \ var.type != core.VarDesc.VarType.FETCH_LIST self.no_conversion_vars = _get_no_fp16_conversion_var_names() conversion_var_list = filter(_should_be_converted, self.block.vars.values()) for var in conversion_var_list: fp16_var_name = var.name + ".fp16" fp16_var = self.block.create_parameter( name=fp16_var_name.encode('ascii'), type=var.type, dtype=core.VarDesc.VarType.FP16, shape=var.shape) # cast the data in the tensor of the original var to float16 # data type and store it in the tensor of the new float16 var self.scope.var(fp16_var_name) fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor() tensor = np.array(self.scope.find_var(var.name).get_tensor()) # After the old tensor data is converted to np.float16, view(np.uint16) # is used so that the internal memory of the numpy array will be # reinterpreted to be of np.uint16 data type, which is binded to fluid # float16 data type via the help of pybind in tensor_py.h. fp16_tensor.set( tensor.astype(np.float16).view(np.uint16), self.place) # old var will be replaced by the fp16 var in program desc self.input_map[var.name] = fp16_var_name self.block._remove_var(var.name)
apache-2.0
youdonghai/intellij-community
python/src/com/jetbrains/python/codeInsight/intentions/PyConvertMethodToPropertyIntention.java
4481
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.codeInsight.intentions; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.usageView.UsageInfo; import com.intellij.util.IncorrectOperationException; import com.jetbrains.python.PyBundle; import com.jetbrains.python.psi.*; import com.jetbrains.python.refactoring.PyRefactoringUtil; import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.List; /** * User: ktisha */ public class PyConvertMethodToPropertyIntention extends PyBaseIntentionAction { @NotNull public String getFamilyName() { return PyBundle.message("INTN.convert.method.to.property"); } @NotNull public String getText() { return PyBundle.message("INTN.convert.method.to.property"); } public boolean isAvailable(@NotNull Project project, Editor editor, PsiFile file) { if (!(file instanceof PyFile)) { return false; } if (!LanguageLevel.forElement(file).isAtLeast(LanguageLevel.PYTHON26)) return false; final PsiElement element = PyUtil.findNonWhitespaceAtOffset(file, editor.getCaretModel().getOffset()); final PyFunction function = PsiTreeUtil.getParentOfType(element, PyFunction.class); if (function == null) return false; final PyClass containingClass = function.getContainingClass(); if (containingClass == null) return false; if (function.getParameterList().getParameters().length > 1) return false; final PyDecoratorList decoratorList = function.getDecoratorList(); if (decoratorList != null) return false; final boolean[] available = {false}; function.accept(new PyRecursiveElementVisitor() { @Override public void visitPyReturnStatement(PyReturnStatement node) { if (node.getExpression() != null) available[0] = true; } @Override public void visitPyYieldExpression(PyYieldExpression node) { available[0] = true; } }); return available[0]; } public void doInvoke(@NotNull Project project, Editor editor, PsiFile file) throws IncorrectOperationException { final PsiElement element = PyUtil.findNonWhitespaceAtOffset(file, editor.getCaretModel().getOffset()); PyFunction problemFunction = PsiTreeUtil.getParentOfType(element, PyFunction.class); if (problemFunction == null) return; final PyClass containingClass = problemFunction.getContainingClass(); if (containingClass == null) return; final List<UsageInfo> usages = PyRefactoringUtil.findUsages(problemFunction, false); final PyDecoratorList problemDecoratorList = problemFunction.getDecoratorList(); List<String> decoTexts = new ArrayList<>(); decoTexts.add("@property"); if (problemDecoratorList != null) { final PyDecorator[] decorators = problemDecoratorList.getDecorators(); for (PyDecorator deco : decorators) { decoTexts.add(deco.getText()); } } PyElementGenerator generator = PyElementGenerator.getInstance(project); final PyDecoratorList decoratorList = generator.createDecoratorList(decoTexts.toArray(new String[decoTexts.size()])); if (problemDecoratorList != null) { problemDecoratorList.replace(decoratorList); } else { problemFunction.addBefore(decoratorList, problemFunction.getFirstChild()); } for (UsageInfo usage : usages) { final PsiElement usageElement = usage.getElement(); if (usageElement instanceof PyReferenceExpression) { final PsiElement parent = usageElement.getParent(); if (parent instanceof PyCallExpression) { final PyArgumentList argumentList = ((PyCallExpression)parent).getArgumentList(); if (argumentList != null) argumentList.delete(); } } } } }
apache-2.0
sbespalov/strongbox
strongbox-commons/src/main/java/org/carlspring/strongbox/yaml/YAMLMapperFactory.java
297
package org.carlspring.strongbox.yaml; import javax.annotation.Nonnull; import java.util.Set; import com.fasterxml.jackson.dataformat.yaml.YAMLMapper; /** * @author Przemyslaw Fusik */ public interface YAMLMapperFactory { YAMLMapper create(@Nonnull final Set<Class<?>> contextClasses); }
apache-2.0
EnMasseProject/enmasse
vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go
8650
// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tsdb import ( "context" "sync" "time" "github.com/alecthomas/units" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" ) // ErrNotReady is returned if the underlying storage is not ready yet. var ErrNotReady = errors.New("TSDB not ready") // ReadyStorage implements the Storage interface while allowing to set the actual // storage at a later point in time. type ReadyStorage struct { mtx sync.RWMutex a *adapter } // Set the storage. func (s *ReadyStorage) Set(db *tsdb.DB, startTimeMargin int64) { s.mtx.Lock() defer s.mtx.Unlock() s.a = &adapter{db: db, startTimeMargin: startTimeMargin} } // Get the storage. func (s *ReadyStorage) Get() *tsdb.DB { if x := s.get(); x != nil { return x.db } return nil } func (s *ReadyStorage) get() *adapter { s.mtx.RLock() x := s.a s.mtx.RUnlock() return x } // StartTime implements the Storage interface. func (s *ReadyStorage) StartTime() (int64, error) { if x := s.get(); x != nil { return x.StartTime() } return int64(model.Latest), ErrNotReady } // Querier implements the Storage interface. func (s *ReadyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { if x := s.get(); x != nil { return x.Querier(ctx, mint, maxt) } return nil, ErrNotReady } // Appender implements the Storage interface. func (s *ReadyStorage) Appender() (storage.Appender, error) { if x := s.get(); x != nil { return x.Appender() } return nil, ErrNotReady } // Close implements the Storage interface. func (s *ReadyStorage) Close() error { if x := s.Get(); x != nil { return x.Close() } return nil } // Adapter return an adapter as storage.Storage. func Adapter(db *tsdb.DB, startTimeMargin int64) storage.Storage { return &adapter{db: db, startTimeMargin: startTimeMargin} } // adapter implements a storage.Storage around TSDB. type adapter struct { db *tsdb.DB startTimeMargin int64 } // Options of the DB storage. type Options struct { // The timestamp range of head blocks after which they get persisted. // It's the minimum duration of any persisted block. MinBlockDuration model.Duration // The maximum timestamp range of compacted blocks. MaxBlockDuration model.Duration // The maximum size of each WAL segment file. WALSegmentSize units.Base2Bytes // Duration for how long to retain data. RetentionDuration model.Duration // Maximum number of bytes to be retained. MaxBytes units.Base2Bytes // Disable creation and consideration of lockfile. NoLockfile bool // When true it disables the overlapping blocks check. // This in-turn enables vertical compaction and vertical query merge. AllowOverlappingBlocks bool // When true records in the WAL will be compressed. WALCompression bool } var ( startTime prometheus.GaugeFunc headMaxTime prometheus.GaugeFunc headMinTime prometheus.GaugeFunc ) func registerMetrics(db *tsdb.DB, r prometheus.Registerer) { startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_lowest_timestamp_seconds", Help: "Lowest timestamp value stored in the database.", }, func() float64 { bb := db.Blocks() if len(bb) == 0 { return float64(db.Head().MinTime()) / 1000 } return float64(db.Blocks()[0].Meta().MinTime) / 1000 }) headMinTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_min_time_seconds", Help: "Minimum time bound of the head block.", }, func() float64 { return float64(db.Head().MinTime()) / 1000 }) headMaxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_max_time_seconds", Help: "Maximum timestamp of the head block.", }, func() float64 { return float64(db.Head().MaxTime()) / 1000 }) if r != nil { r.MustRegister( startTime, headMaxTime, headMinTime, ) } } // Open returns a new storage backed by a TSDB database that is configured for Prometheus. func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) { if opts.MinBlockDuration > opts.MaxBlockDuration { opts.MaxBlockDuration = opts.MinBlockDuration } // Start with smallest block duration and create exponential buckets until the exceed the // configured maximum block duration. rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3) for i, v := range rngs { if v > int64(time.Duration(opts.MaxBlockDuration).Seconds()*1000) { rngs = rngs[:i] break } } db, err := tsdb.Open(path, l, r, &tsdb.Options{ WALSegmentSize: int(opts.WALSegmentSize), RetentionDuration: uint64(time.Duration(opts.RetentionDuration).Seconds() * 1000), MaxBytes: int64(opts.MaxBytes), BlockRanges: rngs, NoLockfile: opts.NoLockfile, AllowOverlappingBlocks: opts.AllowOverlappingBlocks, WALCompression: opts.WALCompression, }) if err != nil { return nil, err } registerMetrics(db, r) return db, nil } // StartTime implements the Storage interface. func (a adapter) StartTime() (int64, error) { var startTime int64 if len(a.db.Blocks()) > 0 { startTime = a.db.Blocks()[0].Meta().MinTime } else { startTime = time.Now().Unix() * 1000 } // Add a safety margin as it may take a few minutes for everything to spin up. return startTime + a.startTimeMargin, nil } func (a adapter) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) { q, err := a.db.Querier(mint, maxt) if err != nil { return nil, err } return querier{q: q}, nil } // Appender returns a new appender against the storage. func (a adapter) Appender() (storage.Appender, error) { return appender{a: a.db.Appender()}, nil } // Close closes the storage and all its underlying resources. func (a adapter) Close() error { return a.db.Close() } type querier struct { q tsdb.Querier } func (q querier) Select(_ *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { set, err := q.q.Select(ms...) if err != nil { return nil, nil, err } return seriesSet{set: set}, nil, nil } func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) { v, err := q.q.LabelValues(name) return v, nil, err } func (q querier) LabelNames() ([]string, storage.Warnings, error) { v, err := q.q.LabelNames() return v, nil, err } func (q querier) Close() error { return q.q.Close() } type seriesSet struct { set tsdb.SeriesSet } func (s seriesSet) Next() bool { return s.set.Next() } func (s seriesSet) Err() error { return s.set.Err() } func (s seriesSet) At() storage.Series { return series{s: s.set.At()} } type series struct { s tsdb.Series } func (s series) Labels() labels.Labels { return s.s.Labels() } func (s series) Iterator() storage.SeriesIterator { return s.s.Iterator() } type appender struct { a tsdb.Appender } func (a appender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { ref, err := a.a.Add(lset, t, v) switch errors.Cause(err) { case tsdb.ErrNotFound: return 0, storage.ErrNotFound case tsdb.ErrOutOfOrderSample: return 0, storage.ErrOutOfOrderSample case tsdb.ErrAmendSample: return 0, storage.ErrDuplicateSampleForTimestamp case tsdb.ErrOutOfBounds: return 0, storage.ErrOutOfBounds } return ref, err } func (a appender) AddFast(_ labels.Labels, ref uint64, t int64, v float64) error { err := a.a.AddFast(ref, t, v) switch errors.Cause(err) { case tsdb.ErrNotFound: return storage.ErrNotFound case tsdb.ErrOutOfOrderSample: return storage.ErrOutOfOrderSample case tsdb.ErrAmendSample: return storage.ErrDuplicateSampleForTimestamp case tsdb.ErrOutOfBounds: return storage.ErrOutOfBounds } return err } func (a appender) Commit() error { return a.a.Commit() } func (a appender) Rollback() error { return a.a.Rollback() }
apache-2.0
osswangxining/mvel-jsr223
mvel-jsr223/src/org/mvel2/ast/ForEachNode.java
7886
/** * MVEL 2.0 * Copyright (C) 2007 The Codehaus * Mike Brock, Dhanji Prasanna, John Graham, Mark Proctor * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.mvel2.ast; import org.mvel2.CompileException; import org.mvel2.DataConversion; import org.mvel2.MVEL; import org.mvel2.ParserContext; import org.mvel2.compiler.ExecutableStatement; import org.mvel2.integration.VariableResolverFactory; import org.mvel2.integration.impl.DefaultLocalVariableResolverFactory; import org.mvel2.integration.impl.ItemResolverFactory; import org.mvel2.util.ParseTools; import java.lang.reflect.Array; import static org.mvel2.util.ParseTools.*; /** * @author Christopher Brock */ public class ForEachNode extends BlockNode { protected String item; protected Class itemType; protected ExecutableStatement condition; private static final int ITERABLE = 0; private static final int ARRAY = 1; private static final int CHARSEQUENCE = 2; private static final int INTEGER = 3; private int type = -1; public ForEachNode(char[] expr, int start, int offset, int blockStart, int blockOffset, int fields, ParserContext pCtx) { super(pCtx); handleCond(this.expr = expr, this.start = start, this.offset = offset, this.fields = fields, pCtx); this.blockStart = blockStart; this.blockOffset = blockOffset; if ((fields & COMPILE_IMMEDIATE) != 0) { if (pCtx.isStrictTypeEnforcement() && itemType != null) { pCtx = pCtx.createSubcontext(); pCtx.addInput(item, itemType); } pCtx.pushVariableScope(); pCtx.makeVisible(item); this.compiledBlock = (ExecutableStatement) subCompileExpression(expr, blockStart, blockOffset, pCtx); pCtx.popVariableScope(); } } public Object getReducedValueAccelerated(Object ctx, Object thisValue, VariableResolverFactory factory) { ItemResolverFactory.ItemResolver itemR = new ItemResolverFactory.ItemResolver(item); ItemResolverFactory itemFactory = new ItemResolverFactory(itemR, new DefaultLocalVariableResolverFactory(factory)); Object iterCond = condition.getValue(ctx, thisValue, factory); if (type == -1) { determineIterType(iterCond.getClass()); } Object v; switch (type) { case ARRAY: int len = Array.getLength(iterCond); for (int i = 0; i < len; i++) { itemR.setValue(Array.get(iterCond, i)); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } break; case CHARSEQUENCE: for (Object o : iterCond.toString().toCharArray()) { itemR.setValue(o); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } break; case INTEGER: int max = (Integer) iterCond + 1; for (int i = 1; i != max; i++) { itemR.setValue(i); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } break; case ITERABLE: for (Object o : (Iterable) iterCond) { itemR.setValue(o); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } break; } return null; } public Object getReducedValue(Object ctx, Object thisValue, VariableResolverFactory factory) { ItemResolverFactory.ItemResolver itemR = new ItemResolverFactory.ItemResolver(item); ItemResolverFactory itemFactory = new ItemResolverFactory(itemR, new DefaultLocalVariableResolverFactory(factory)); Object iterCond = MVEL.eval(expr, start, offset, thisValue, factory); if (itemType != null && itemType.isArray()) enforceTypeSafety(itemType, getBaseComponentType(iterCond.getClass())); this.compiledBlock = (ExecutableStatement) subCompileExpression(expr, blockStart, blockOffset); Object v; if (iterCond instanceof Iterable) { for (Object o : (Iterable) iterCond) { itemR.setValue(o); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } } else if (iterCond != null && iterCond.getClass().isArray()) { int len = Array.getLength(iterCond); for (int i = 0; i < len; i++) { itemR.setValue(Array.get(iterCond, i)); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } } else if (iterCond instanceof CharSequence) { for (Object o : iterCond.toString().toCharArray()) { itemR.setValue(o); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } } else if (iterCond instanceof Integer) { int max = (Integer) iterCond + 1; for (int i = 1; i != max; i++) { itemR.setValue(i); v = compiledBlock.getValue(ctx, thisValue, itemFactory); if (itemFactory.tiltFlag()) return v; } } else { throw new CompileException("non-iterable type: " + (iterCond != null ? iterCond.getClass().getName() : "null"), expr, start); } return null; } private void handleCond(char[] condition, int start, int offset, int fields, ParserContext pCtx) { int cursor = start; int end = start + offset; while (cursor < end && condition[cursor] != ':') cursor++; if (cursor == end || condition[cursor] != ':') throw new CompileException("expected : in foreach", condition, cursor); int x; if ((x = (item = createStringTrimmed(condition, start, cursor - start)).indexOf(' ')) != -1) { String tk = new String(condition, start, x).trim(); try { itemType = ParseTools.findClass(null, tk, pCtx); item = new String(condition, start + x, (cursor - start) - x).trim(); } catch (ClassNotFoundException e) { throw new CompileException("cannot resolve identifier: " + tk, condition, start); } } // this.start = ++cursor; this.start = cursor + 1; this.offset = offset - (cursor - start) - 1; if ((fields & COMPILE_IMMEDIATE) != 0) { Class egress = (this.condition = (ExecutableStatement) subCompileExpression(expr, this.start, this.offset, pCtx)).getKnownEgressType(); if (itemType != null && egress.isArray()) { enforceTypeSafety(itemType, getBaseComponentType(this.condition.getKnownEgressType())); } else if (pCtx.isStrongTyping()) { determineIterType(egress); } } } private void determineIterType(Class t) { if (Iterable.class.isAssignableFrom(t)) { type = ITERABLE; } else if (t.isArray()) { type = ARRAY; } else if (CharSequence.class.isAssignableFrom(t)) { type = CHARSEQUENCE; } else if (Integer.class.isAssignableFrom(t)) { type = INTEGER; } else { throw new CompileException("non-iterable type: " + t.getName(), expr, start); } } private void enforceTypeSafety(Class required, Class actual) { if (!required.isAssignableFrom(actual) && !DataConversion.canConvert(actual, required)) { throw new CompileException("type mismatch in foreach: expected: " + required.getName() + "; but found: " + getBaseComponentType(actual), expr, start); } } }
apache-2.0
knoguchi/druid
server/src/main/java/org/apache/druid/client/indexing/ClientCompactQueryTuningConfig.java
4864
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.client.indexing; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.druid.segment.IndexSpec; import org.apache.druid.server.coordinator.DataSourceCompactionConfig.UserCompactTuningConfig; import javax.annotation.Nullable; import java.util.Objects; public class ClientCompactQueryTuningConfig { @Nullable private final Integer maxRowsPerSegment; @Nullable private final Integer maxRowsInMemory; @Nullable private final Integer maxTotalRows; @Nullable private final IndexSpec indexSpec; @Nullable private final Integer maxPendingPersists; @Nullable private final Long pushTimeout; public static ClientCompactQueryTuningConfig from( @Nullable UserCompactTuningConfig userCompactTuningConfig, @Nullable Integer maxRowsPerSegment ) { return new ClientCompactQueryTuningConfig( maxRowsPerSegment, userCompactTuningConfig == null ? null : userCompactTuningConfig.getMaxRowsInMemory(), userCompactTuningConfig == null ? null : userCompactTuningConfig.getMaxTotalRows(), userCompactTuningConfig == null ? null : userCompactTuningConfig.getIndexSpec(), userCompactTuningConfig == null ? null : userCompactTuningConfig.getMaxPendingPersists(), userCompactTuningConfig == null ? null : userCompactTuningConfig.getPushTimeout() ); } @JsonCreator public ClientCompactQueryTuningConfig( @JsonProperty("maxRowsPerSegment") @Nullable Integer maxRowsPerSegment, @JsonProperty("maxRowsInMemory") @Nullable Integer maxRowsInMemory, @JsonProperty("maxTotalRows") @Nullable Integer maxTotalRows, @JsonProperty("indexSpec") @Nullable IndexSpec indexSpec, @JsonProperty("maxPendingPersists") @Nullable Integer maxPendingPersists, @JsonProperty("pushTimeout") @Nullable Long pushTimeout ) { this.maxRowsPerSegment = maxRowsPerSegment; this.maxRowsInMemory = maxRowsInMemory; this.maxTotalRows = maxTotalRows; this.indexSpec = indexSpec; this.maxPendingPersists = maxPendingPersists; this.pushTimeout = pushTimeout; } @JsonProperty public String getType() { return "index"; } @JsonProperty @Nullable public Integer getMaxRowsPerSegment() { return maxRowsPerSegment; } @JsonProperty @Nullable public Integer getMaxRowsInMemory() { return maxRowsInMemory; } @JsonProperty @Nullable public Integer getMaxTotalRows() { return maxTotalRows; } @JsonProperty @Nullable public IndexSpec getIndexSpec() { return indexSpec; } @JsonProperty @Nullable public Integer getMaxPendingPersists() { return maxPendingPersists; } @JsonProperty @Nullable public Long getPushTimeout() { return pushTimeout; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ClientCompactQueryTuningConfig that = (ClientCompactQueryTuningConfig) o; return Objects.equals(maxRowsPerSegment, that.maxRowsPerSegment) && Objects.equals(maxRowsInMemory, that.maxRowsInMemory) && Objects.equals(maxTotalRows, that.maxTotalRows) && Objects.equals(indexSpec, that.indexSpec) && Objects.equals(maxPendingPersists, that.maxPendingPersists) && Objects.equals(pushTimeout, that.pushTimeout); } @Override public int hashCode() { return Objects.hash(maxRowsPerSegment, maxRowsInMemory, maxTotalRows, indexSpec, maxPendingPersists, pushTimeout); } @Override public String toString() { return "ClientCompactQueryTuningConfig{" + "maxRowsPerSegment=" + maxRowsPerSegment + ", maxRowsInMemory=" + maxRowsInMemory + ", maxTotalRows=" + maxTotalRows + ", indexSpec=" + indexSpec + ", maxPendingPersists=" + maxPendingPersists + ", pushTimeout=" + pushTimeout + '}'; } }
apache-2.0
tdiesler/camel
core/camel-management/src/test/java/org/apache/camel/management/ManagedNamePatternJvmSystemPropertyTest.java
2485
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.management; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.camel.api.management.JmxSystemPropertyKeys; import org.apache.camel.builder.RouteBuilder; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.parallel.ResourceLock; import org.junit.jupiter.api.parallel.Resources; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @ResourceLock(Resources.SYSTEM_PROPERTIES) public class ManagedNamePatternJvmSystemPropertyTest extends ManagementTestSupport { @Override @BeforeEach public void setUp() throws Exception { System.setProperty(JmxSystemPropertyKeys.MANAGEMENT_NAME_PATTERN, "cool-#name#"); super.setUp(); } @Override @AfterEach public void tearDown() throws Exception { System.clearProperty(JmxSystemPropertyKeys.MANAGEMENT_NAME_PATTERN); super.tearDown(); } @Test public void testManagedNamePattern() throws Exception { MBeanServer mbeanServer = getMBeanServer(); assertEquals("cool-" + context.getName(), context.getManagementName()); ObjectName on = getContextObjectName(); assertTrue(mbeanServer.isRegistered(on), "Should be registered"); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { from("direct:start").to("mock:result"); } }; } }
apache-2.0
snnn/bazel
src/test/java/com/google/devtools/build/lib/rules/objc/LegacyObjcFrameworkTest.java
1238
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.objc; import com.google.devtools.build.lib.rules.objc.ObjcCommandLineOptions.ObjcCrosstoolMode; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * Legacy test: These tests test --experimental_objc_crosstool=off. See README. The class is empty * because each test is also run in the superclass, which tests --experimental_objc_crosstool=all. */ @RunWith(JUnit4.class) @LegacyTest public class LegacyObjcFrameworkTest extends ObjcFrameworkTest { @Override protected ObjcCrosstoolMode getObjcCrosstoolMode() { return ObjcCrosstoolMode.OFF; } }
apache-2.0
pedroSG94/rtmp-rtsp-stream-client-java
rtplibrary/src/main/java/com/pedro/rtplibrary/network/AdapterBitrateParser.java
1817
/* * Copyright (C) 2021 pedroSG94. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.pedro.rtplibrary.network; @Deprecated public class AdapterBitrateParser { public static long DELAY = 1000; public static long DIFFERENCE = 500; private static long cont = 0; public static int maxVideoBitrate = 0; public interface Callback { void onNewBitrate(int bitrate); } public static void parseBitrate(int oldBitrate, int bandwidth, Callback callback) { if (cont == 0) cont = System.currentTimeMillis(); if (System.currentTimeMillis() - cont > DELAY) { cont = 0; if (oldBitrate / 1000 - bandwidth >= DIFFERENCE || maxVideoBitrate != 0 && oldBitrate / 1000 >= maxVideoBitrate) { callback.onNewBitrate((int) (oldBitrate - (DIFFERENCE * 1000))); } else if (oldBitrate / 1000 - bandwidth <= DIFFERENCE) { callback.onNewBitrate((int) (oldBitrate + (DIFFERENCE * 1000))); } } } public static void calculateMaxVideoBitrate(int resolutionValue) { maxVideoBitrate = (int) (1.65287774651705E-10 * Math.pow(resolutionValue, 2) + 0.002653652033201 * resolutionValue + 640.220156152395); } public static void reset() { DELAY = 1000; DIFFERENCE = 500; cont = 0; maxVideoBitrate = 0; } }
apache-2.0
hustcat/docker
vendor/src/github.com/docker/libnetwork/network.go
30348
package libnetwork import ( "encoding/json" "fmt" "net" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/stringid" "github.com/docker/libnetwork/config" "github.com/docker/libnetwork/datastore" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/etchosts" "github.com/docker/libnetwork/ipamapi" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/options" "github.com/docker/libnetwork/types" ) // A Network represents a logical connectivity zone that containers may // join using the Link method. A Network is managed by a specific driver. type Network interface { // A user chosen name for this network. Name() string // A system generated id for this network. ID() string // The type of network, which corresponds to its managing driver. Type() string // Create a new endpoint to this network symbolically identified by the // specified unique name. The options parameter carry driver specific options. CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) // Delete the network. Delete() error // Endpoints returns the list of Endpoint(s) in this network. Endpoints() []Endpoint // WalkEndpoints uses the provided function to walk the Endpoints WalkEndpoints(walker EndpointWalker) // EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned. EndpointByName(name string) (Endpoint, error) // EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned. EndpointByID(id string) (Endpoint, error) // Return certain operational data belonging to this network Info() NetworkInfo } // NetworkInfo returns some configuration and operational information about the network type NetworkInfo interface { IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) IpamInfo() ([]*IpamInfo, []*IpamInfo) DriverOptions() map[string]string Scope() string IPv6Enabled() bool Internal() bool } // EndpointWalker is a client provided function which will be used to walk the Endpoints. // When the function returns true, the walk will stop. type EndpointWalker func(ep Endpoint) bool type svcInfo struct { svcMap map[string][]net.IP ipMap map[string]string } // IpamConf contains all the ipam related configurations for a network type IpamConf struct { // The master address pool for containers and network interfaces PreferredPool string // A subset of the master pool. If specified, // this becomes the container pool SubPool string // Preferred Network Gateway address (optional) Gateway string // Auxiliary addresses for network driver. Must be within the master pool. // libnetwork will reserve them if they fall into the container pool AuxAddresses map[string]string } // Validate checks whether the configuration is valid func (c *IpamConf) Validate() error { if c.Gateway != "" && nil == net.ParseIP(c.Gateway) { return types.BadRequestErrorf("invalid gateway address %s in Ipam configuration", c.Gateway) } return nil } // IpamInfo contains all the ipam related operational info for a network type IpamInfo struct { PoolID string Meta map[string]string driverapi.IPAMData } // MarshalJSON encodes IpamInfo into json message func (i *IpamInfo) MarshalJSON() ([]byte, error) { m := map[string]interface{}{ "PoolID": i.PoolID, } v, err := json.Marshal(&i.IPAMData) if err != nil { return nil, err } m["IPAMData"] = string(v) if i.Meta != nil { m["Meta"] = i.Meta } return json.Marshal(m) } // UnmarshalJSON decodes json message into PoolData func (i *IpamInfo) UnmarshalJSON(data []byte) error { var ( m map[string]interface{} err error ) if err = json.Unmarshal(data, &m); err != nil { return err } i.PoolID = m["PoolID"].(string) if v, ok := m["Meta"]; ok { b, _ := json.Marshal(v) if err = json.Unmarshal(b, &i.Meta); err != nil { return err } } if v, ok := m["IPAMData"]; ok { if err = json.Unmarshal([]byte(v.(string)), &i.IPAMData); err != nil { return err } } return nil } type network struct { ctrlr *controller name string networkType string id string scope string ipamType string ipamOptions map[string]string addrSpace string ipamV4Config []*IpamConf ipamV6Config []*IpamConf ipamV4Info []*IpamInfo ipamV6Info []*IpamInfo enableIPv6 bool postIPv6 bool epCnt *endpointCnt generic options.Generic dbIndex uint64 dbExists bool persist bool stopWatchCh chan struct{} drvOnce *sync.Once internal bool sync.Mutex } func (n *network) Name() string { n.Lock() defer n.Unlock() return n.name } func (n *network) ID() string { n.Lock() defer n.Unlock() return n.id } func (n *network) Type() string { n.Lock() defer n.Unlock() return n.networkType } func (n *network) Key() []string { n.Lock() defer n.Unlock() return []string{datastore.NetworkKeyPrefix, n.id} } func (n *network) KeyPrefix() []string { return []string{datastore.NetworkKeyPrefix} } func (n *network) Value() []byte { n.Lock() defer n.Unlock() b, err := json.Marshal(n) if err != nil { return nil } return b } func (n *network) SetValue(value []byte) error { return json.Unmarshal(value, n) } func (n *network) Index() uint64 { n.Lock() defer n.Unlock() return n.dbIndex } func (n *network) SetIndex(index uint64) { n.Lock() n.dbIndex = index n.dbExists = true n.Unlock() } func (n *network) Exists() bool { n.Lock() defer n.Unlock() return n.dbExists } func (n *network) Skip() bool { n.Lock() defer n.Unlock() return !n.persist } func (n *network) New() datastore.KVObject { n.Lock() defer n.Unlock() return &network{ ctrlr: n.ctrlr, drvOnce: &sync.Once{}, scope: n.scope, } } // CopyTo deep copies to the destination IpamConfig func (c *IpamConf) CopyTo(dstC *IpamConf) error { dstC.PreferredPool = c.PreferredPool dstC.SubPool = c.SubPool dstC.Gateway = c.Gateway if c.AuxAddresses != nil { dstC.AuxAddresses = make(map[string]string, len(c.AuxAddresses)) for k, v := range c.AuxAddresses { dstC.AuxAddresses[k] = v } } return nil } // CopyTo deep copies to the destination IpamInfo func (i *IpamInfo) CopyTo(dstI *IpamInfo) error { dstI.PoolID = i.PoolID if i.Meta != nil { dstI.Meta = make(map[string]string) for k, v := range i.Meta { dstI.Meta[k] = v } } dstI.AddressSpace = i.AddressSpace dstI.Pool = types.GetIPNetCopy(i.Pool) dstI.Gateway = types.GetIPNetCopy(i.Gateway) if i.AuxAddresses != nil { dstI.AuxAddresses = make(map[string]*net.IPNet) for k, v := range i.AuxAddresses { dstI.AuxAddresses[k] = types.GetIPNetCopy(v) } } return nil } func (n *network) CopyTo(o datastore.KVObject) error { n.Lock() defer n.Unlock() dstN := o.(*network) dstN.name = n.name dstN.id = n.id dstN.networkType = n.networkType dstN.scope = n.scope dstN.ipamType = n.ipamType dstN.enableIPv6 = n.enableIPv6 dstN.persist = n.persist dstN.postIPv6 = n.postIPv6 dstN.dbIndex = n.dbIndex dstN.dbExists = n.dbExists dstN.drvOnce = n.drvOnce dstN.internal = n.internal for _, v4conf := range n.ipamV4Config { dstV4Conf := &IpamConf{} v4conf.CopyTo(dstV4Conf) dstN.ipamV4Config = append(dstN.ipamV4Config, dstV4Conf) } for _, v4info := range n.ipamV4Info { dstV4Info := &IpamInfo{} v4info.CopyTo(dstV4Info) dstN.ipamV4Info = append(dstN.ipamV4Info, dstV4Info) } for _, v6conf := range n.ipamV6Config { dstV6Conf := &IpamConf{} v6conf.CopyTo(dstV6Conf) dstN.ipamV6Config = append(dstN.ipamV6Config, dstV6Conf) } for _, v6info := range n.ipamV6Info { dstV6Info := &IpamInfo{} v6info.CopyTo(dstV6Info) dstN.ipamV6Info = append(dstN.ipamV6Info, dstV6Info) } dstN.generic = options.Generic{} for k, v := range n.generic { dstN.generic[k] = v } return nil } func (n *network) DataScope() string { return n.Scope() } func (n *network) getEpCnt() *endpointCnt { n.Lock() defer n.Unlock() return n.epCnt } // TODO : Can be made much more generic with the help of reflection (but has some golang limitations) func (n *network) MarshalJSON() ([]byte, error) { netMap := make(map[string]interface{}) netMap["name"] = n.name netMap["id"] = n.id netMap["networkType"] = n.networkType netMap["scope"] = n.scope netMap["ipamType"] = n.ipamType netMap["addrSpace"] = n.addrSpace netMap["enableIPv6"] = n.enableIPv6 if n.generic != nil { netMap["generic"] = n.generic } netMap["persist"] = n.persist netMap["postIPv6"] = n.postIPv6 if len(n.ipamV4Config) > 0 { ics, err := json.Marshal(n.ipamV4Config) if err != nil { return nil, err } netMap["ipamV4Config"] = string(ics) } if len(n.ipamV4Info) > 0 { iis, err := json.Marshal(n.ipamV4Info) if err != nil { return nil, err } netMap["ipamV4Info"] = string(iis) } if len(n.ipamV6Config) > 0 { ics, err := json.Marshal(n.ipamV6Config) if err != nil { return nil, err } netMap["ipamV6Config"] = string(ics) } if len(n.ipamV6Info) > 0 { iis, err := json.Marshal(n.ipamV6Info) if err != nil { return nil, err } netMap["ipamV6Info"] = string(iis) } netMap["internal"] = n.internal return json.Marshal(netMap) } // TODO : Can be made much more generic with the help of reflection (but has some golang limitations) func (n *network) UnmarshalJSON(b []byte) (err error) { var netMap map[string]interface{} if err := json.Unmarshal(b, &netMap); err != nil { return err } n.name = netMap["name"].(string) n.id = netMap["id"].(string) n.networkType = netMap["networkType"].(string) n.enableIPv6 = netMap["enableIPv6"].(bool) if v, ok := netMap["generic"]; ok { n.generic = v.(map[string]interface{}) // Restore opts in their map[string]string form if v, ok := n.generic[netlabel.GenericData]; ok { var lmap map[string]string ba, err := json.Marshal(v) if err != nil { return err } if err := json.Unmarshal(ba, &lmap); err != nil { return err } n.generic[netlabel.GenericData] = lmap } } if v, ok := netMap["persist"]; ok { n.persist = v.(bool) } if v, ok := netMap["postIPv6"]; ok { n.postIPv6 = v.(bool) } if v, ok := netMap["ipamType"]; ok { n.ipamType = v.(string) } else { n.ipamType = ipamapi.DefaultIPAM } if v, ok := netMap["addrSpace"]; ok { n.addrSpace = v.(string) } if v, ok := netMap["ipamV4Config"]; ok { if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Config); err != nil { return err } } if v, ok := netMap["ipamV4Info"]; ok { if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Info); err != nil { return err } } if v, ok := netMap["ipamV6Config"]; ok { if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Config); err != nil { return err } } if v, ok := netMap["ipamV6Info"]; ok { if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Info); err != nil { return err } } if v, ok := netMap["internal"]; ok { n.internal = v.(bool) } if s, ok := netMap["scope"]; ok { n.scope = s.(string) } return nil } // NetworkOption is an option setter function type used to pass various options to // NewNetwork method. The various setter functions of type NetworkOption are // provided by libnetwork, they look like NetworkOptionXXXX(...) type NetworkOption func(n *network) // NetworkOptionGeneric function returns an option setter for a Generic option defined // in a Dictionary of Key-Value pair func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption { return func(n *network) { if n.generic == nil { n.generic = make(map[string]interface{}) } if val, ok := generic[netlabel.EnableIPv6]; ok { n.enableIPv6 = val.(bool) } if val, ok := generic[netlabel.Internal]; ok { n.internal = val.(bool) } for k, v := range generic { n.generic[k] = v } } } // NetworkOptionPersist returns an option setter to set persistence policy for a network func NetworkOptionPersist(persist bool) NetworkOption { return func(n *network) { n.persist = persist } } // NetworkOptionEnableIPv6 returns an option setter to explicitly configure IPv6 func NetworkOptionEnableIPv6(enableIPv6 bool) NetworkOption { return func(n *network) { if n.generic == nil { n.generic = make(map[string]interface{}) } n.enableIPv6 = enableIPv6 n.generic[netlabel.EnableIPv6] = enableIPv6 } } // NetworkOptionInternalNetwork returns an option setter to config the network // to be internal which disables default gateway service func NetworkOptionInternalNetwork() NetworkOption { return func(n *network) { if n.generic == nil { n.generic = make(map[string]interface{}) } n.internal = true n.generic[netlabel.Internal] = true } } // NetworkOptionIpam function returns an option setter for the ipam configuration for this network func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ipV6 []*IpamConf, opts map[string]string) NetworkOption { return func(n *network) { if ipamDriver != "" { n.ipamType = ipamDriver } n.ipamOptions = opts n.addrSpace = addrSpace n.ipamV4Config = ipV4 n.ipamV6Config = ipV6 } } // NetworkOptionDriverOpts function returns an option setter for any parameter described by a map func NetworkOptionDriverOpts(opts map[string]string) NetworkOption { return func(n *network) { if n.generic == nil { n.generic = make(map[string]interface{}) } if opts == nil { opts = make(map[string]string) } // Store the options n.generic[netlabel.GenericData] = opts } } // NetworkOptionDeferIPv6Alloc instructs the network to defer the IPV6 address allocation until after the endpoint has been created // It is being provided to support the specific docker daemon flags where user can deterministically assign an IPv6 address // to a container as combination of fixed-cidr-v6 + mac-address // TODO: Remove this option setter once we support endpoint ipam options func NetworkOptionDeferIPv6Alloc(enable bool) NetworkOption { return func(n *network) { n.postIPv6 = enable } } func (n *network) processOptions(options ...NetworkOption) { for _, opt := range options { if opt != nil { opt(n) } } } func (n *network) driverScope() string { c := n.getController() c.Lock() // Check if a driver for the specified network type is available dd, ok := c.drivers[n.networkType] c.Unlock() if !ok { var err error dd, err = c.loadDriver(n.networkType) if err != nil { // If driver could not be resolved simply return an empty string return "" } } return dd.capability.DataScope } func (n *network) driver(load bool) (driverapi.Driver, error) { c := n.getController() c.Lock() // Check if a driver for the specified network type is available dd, ok := c.drivers[n.networkType] c.Unlock() if !ok && load { var err error dd, err = c.loadDriver(n.networkType) if err != nil { return nil, err } } else if !ok { // don't fail if driver loading is not required return nil, nil } n.Lock() n.scope = dd.capability.DataScope n.Unlock() return dd.driver, nil } func (n *network) Delete() error { n.Lock() c := n.ctrlr name := n.name id := n.id n.Unlock() n, err := c.getNetworkFromStore(id) if err != nil { return &UnknownNetworkError{name: name, id: id} } numEps := n.getEpCnt().EndpointCnt() if numEps != 0 { return &ActiveEndpointsError{name: n.name, id: n.id} } if err = n.deleteNetwork(); err != nil { return err } defer func() { if err != nil { if e := c.addNetwork(n); e != nil { log.Warnf("failed to rollback deleteNetwork for network %s: %v", n.Name(), err) } } }() // deleteFromStore performs an atomic delete operation and the // network.epCnt will help prevent any possible // race between endpoint join and network delete if err = n.getController().deleteFromStore(n.getEpCnt()); err != nil { return fmt.Errorf("error deleting network endpoint count from store: %v", err) } n.ipamRelease() if err = n.getController().deleteFromStore(n); err != nil { return fmt.Errorf("error deleting network from store: %v", err) } return nil } func (n *network) deleteNetwork() error { d, err := n.driver(true) if err != nil { return fmt.Errorf("failed deleting network: %v", err) } if err := d.DeleteNetwork(n.ID()); err != nil { // Forbidden Errors should be honored if _, ok := err.(types.ForbiddenError); ok { return err } if _, ok := err.(types.MaskableError); !ok { log.Warnf("driver error deleting network %s : %v", n.name, err) } } return nil } func (n *network) addEndpoint(ep *endpoint) error { d, err := n.driver(true) if err != nil { return fmt.Errorf("failed to add endpoint: %v", err) } err = d.CreateEndpoint(n.id, ep.id, ep.Interface(), ep.generic) if err != nil { return types.InternalErrorf("failed to create endpoint %s on network %s: %v", ep.Name(), n.Name(), err) } return nil } func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) { var err error if !config.IsValidName(name) { return nil, ErrInvalidName(name) } if _, err = n.EndpointByName(name); err == nil { return nil, types.ForbiddenErrorf("service endpoint with name %s already exists", name) } ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}} ep.id = stringid.GenerateRandomID() // Initialize ep.network with a possibly stale copy of n. We need this to get network from // store. But once we get it from store we will have the most uptodate copy possibly. ep.network = n ep.locator = n.getController().clusterHostID() ep.network, err = ep.getNetworkFromStore() if err != nil { return nil, fmt.Errorf("failed to get network during CreateEndpoint: %v", err) } n = ep.network ep.processOptions(options...) if opt, ok := ep.generic[netlabel.MacAddress]; ok { if mac, ok := opt.(net.HardwareAddr); ok { ep.iface.mac = mac } } ipam, err := n.getController().getIPAM(n.ipamType) if err != nil { return nil, err } if ipam.capability.RequiresMACAddress { if ep.iface.mac == nil { ep.iface.mac = netutils.GenerateRandomMAC() } if ep.ipamOptions == nil { ep.ipamOptions = make(map[string]string) } ep.ipamOptions[netlabel.MacAddress] = ep.iface.mac.String() } if err = ep.assignAddress(ipam.driver, true, !n.postIPv6); err != nil { return nil, err } defer func() { if err != nil { ep.releaseAddress() } }() if err = n.addEndpoint(ep); err != nil { return nil, err } defer func() { if err != nil { if e := ep.deleteEndpoint(false); e != nil { log.Warnf("cleaning up endpoint failed %s : %v", name, e) } } }() if err = ep.assignAddress(ipam.driver, false, n.postIPv6); err != nil { return nil, err } if err = n.getController().updateToStore(ep); err != nil { return nil, err } defer func() { if err != nil { if e := n.getController().deleteFromStore(ep); e != nil { log.Warnf("error rolling back endpoint %s from store: %v", name, e) } } }() // Watch for service records n.getController().watchSvcRecord(ep) defer func() { if err != nil { n.getController().unWatchSvcRecord(ep) } }() // Increment endpoint count to indicate completion of endpoint addition if err = n.getEpCnt().IncEndpointCnt(); err != nil { return nil, err } return ep, nil } func (n *network) Endpoints() []Endpoint { var list []Endpoint endpoints, err := n.getEndpointsFromStore() if err != nil { log.Error(err) } for _, ep := range endpoints { list = append(list, ep) } return list } func (n *network) WalkEndpoints(walker EndpointWalker) { for _, e := range n.Endpoints() { if walker(e) { return } } } func (n *network) EndpointByName(name string) (Endpoint, error) { if name == "" { return nil, ErrInvalidName(name) } var e Endpoint s := func(current Endpoint) bool { if current.Name() == name { e = current return true } return false } n.WalkEndpoints(s) if e == nil { return nil, ErrNoSuchEndpoint(name) } return e, nil } func (n *network) EndpointByID(id string) (Endpoint, error) { if id == "" { return nil, ErrInvalidID(id) } ep, err := n.getEndpointFromStore(id) if err != nil { return nil, ErrNoSuchEndpoint(id) } return ep, nil } func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) { epName := ep.Name() if iface := ep.Iface(); iface.Address() != nil { myAliases := ep.MyAliases() if isAdd { // If anonymous endpoint has an alias use the first alias // for ip->name mapping. Not having the reverse mapping // breaks some apps if ep.isAnonymous() { if len(myAliases) > 0 { n.addSvcRecords(myAliases[0], iface.Address().IP, true) } } else { n.addSvcRecords(epName, iface.Address().IP, true) } for _, alias := range myAliases { n.addSvcRecords(alias, iface.Address().IP, false) } } else { if ep.isAnonymous() { if len(myAliases) > 0 { n.deleteSvcRecords(myAliases[0], iface.Address().IP, true) } } else { n.deleteSvcRecords(epName, iface.Address().IP, true) } for _, alias := range myAliases { n.deleteSvcRecords(alias, iface.Address().IP, false) } } } } func (n *network) addSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { c := n.getController() c.Lock() defer c.Unlock() sr, ok := c.svcDb[n.ID()] if !ok { sr = svcInfo{ svcMap: make(map[string][]net.IP), ipMap: make(map[string]string), } c.svcDb[n.ID()] = sr } if ipMapUpdate { reverseIP := netutils.ReverseIP(epIP.String()) if _, ok := sr.ipMap[reverseIP]; !ok { sr.ipMap[reverseIP] = name } } ipList := sr.svcMap[name] for _, ip := range ipList { if ip.Equal(epIP) { return } } sr.svcMap[name] = append(sr.svcMap[name], epIP) } func (n *network) deleteSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { c := n.getController() c.Lock() defer c.Unlock() sr, ok := c.svcDb[n.ID()] if !ok { return } if ipMapUpdate { delete(sr.ipMap, netutils.ReverseIP(epIP.String())) } ipList := sr.svcMap[name] for i, ip := range ipList { if ip.Equal(epIP) { ipList = append(ipList[:i], ipList[i+1:]...) break } } sr.svcMap[name] = ipList if len(ipList) == 0 { delete(sr.svcMap, name) } } func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record { n.Lock() defer n.Unlock() var recs []etchosts.Record sr, _ := n.ctrlr.svcDb[n.id] for h, ip := range sr.svcMap { if ep != nil && strings.Split(h, ".")[0] == ep.Name() { continue } recs = append(recs, etchosts.Record{ Hosts: h, IP: ip[0].String(), }) } return recs } func (n *network) getController() *controller { n.Lock() defer n.Unlock() return n.ctrlr } func (n *network) ipamAllocate() error { // For now also exclude bridge from using new ipam if n.Type() == "host" || n.Type() == "null" { return nil } ipam, err := n.getController().getIpamDriver(n.ipamType) if err != nil { return err } if n.addrSpace == "" { if n.addrSpace, err = n.deriveAddressSpace(); err != nil { return err } } err = n.ipamAllocateVersion(4, ipam) if err != nil { return err } defer func() { if err != nil { n.ipamReleaseVersion(4, ipam) } }() return n.ipamAllocateVersion(6, ipam) } func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { var ( cfgList *[]*IpamConf infoList *[]*IpamInfo err error ) switch ipVer { case 4: cfgList = &n.ipamV4Config infoList = &n.ipamV4Info case 6: cfgList = &n.ipamV6Config infoList = &n.ipamV6Info default: return types.InternalErrorf("incorrect ip version passed to ipam allocate: %d", ipVer) } if len(*cfgList) == 0 { if ipVer == 6 { return nil } *cfgList = []*IpamConf{{}} } *infoList = make([]*IpamInfo, len(*cfgList)) log.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID()) for i, cfg := range *cfgList { if err = cfg.Validate(); err != nil { return err } d := &IpamInfo{} (*infoList)[i] = d d.PoolID, d.Pool, d.Meta, err = ipam.RequestPool(n.addrSpace, cfg.PreferredPool, cfg.SubPool, n.ipamOptions, ipVer == 6) if err != nil { return err } defer func() { if err != nil { if err := ipam.ReleasePool(d.PoolID); err != nil { log.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID()) } } }() if gws, ok := d.Meta[netlabel.Gateway]; ok { if d.Gateway, err = types.ParseCIDR(gws); err != nil { return types.BadRequestErrorf("failed to parse gateway address (%v) returned by ipam driver: %v", gws, err) } } // If user requested a specific gateway, libnetwork will allocate it // irrespective of whether ipam driver returned a gateway already. // If none of the above is true, libnetwork will allocate one. if cfg.Gateway != "" || d.Gateway == nil { var gatewayOpts = map[string]string{ ipamapi.RequestAddressType: netlabel.Gateway, } if d.Gateway, _, err = ipam.RequestAddress(d.PoolID, net.ParseIP(cfg.Gateway), gatewayOpts); err != nil { return types.InternalErrorf("failed to allocate gateway (%v): %v", cfg.Gateway, err) } } // Auxiliary addresses must be part of the master address pool // If they fall into the container addressable pool, libnetwork will reserve them if cfg.AuxAddresses != nil { var ip net.IP d.IPAMData.AuxAddresses = make(map[string]*net.IPNet, len(cfg.AuxAddresses)) for k, v := range cfg.AuxAddresses { if ip = net.ParseIP(v); ip == nil { return types.BadRequestErrorf("non parsable secondary ip address (%s:%s) passed for network %s", k, v, n.Name()) } if !d.Pool.Contains(ip) { return types.ForbiddenErrorf("auxilairy address: (%s:%s) must belong to the master pool: %s", k, v, d.Pool) } // Attempt reservation in the container addressable pool, silent the error if address does not belong to that pool if d.IPAMData.AuxAddresses[k], _, err = ipam.RequestAddress(d.PoolID, ip, nil); err != nil && err != ipamapi.ErrIPOutOfRange { return types.InternalErrorf("failed to allocate secondary ip address (%s:%s): %v", k, v, err) } } } } return nil } func (n *network) ipamRelease() { // For now exclude host and null if n.Type() == "host" || n.Type() == "null" { return } ipam, err := n.getController().getIpamDriver(n.ipamType) if err != nil { log.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err) return } n.ipamReleaseVersion(4, ipam) n.ipamReleaseVersion(6, ipam) } func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) { var infoList []*IpamInfo switch ipVer { case 4: infoList = n.ipamV4Info case 6: infoList = n.ipamV6Info default: log.Warnf("incorrect ip version passed to ipam release: %d", ipVer) return } if infoList == nil { return } log.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID()) for _, d := range infoList { if d.Gateway != nil { if err := ipam.ReleaseAddress(d.PoolID, d.Gateway.IP); err != nil { log.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err) } } if d.IPAMData.AuxAddresses != nil { for k, nw := range d.IPAMData.AuxAddresses { if d.Pool.Contains(nw.IP) { if err := ipam.ReleaseAddress(d.PoolID, nw.IP); err != nil && err != ipamapi.ErrIPOutOfRange { log.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err) } } } } if err := ipam.ReleasePool(d.PoolID); err != nil { log.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err) } } } func (n *network) getIPInfo(ipVer int) []*IpamInfo { var info []*IpamInfo switch ipVer { case 4: info = n.ipamV4Info case 6: info = n.ipamV6Info default: return nil } l := make([]*IpamInfo, 0, len(info)) n.Lock() for _, d := range info { l = append(l, d) } n.Unlock() return l } func (n *network) getIPData(ipVer int) []driverapi.IPAMData { var info []*IpamInfo switch ipVer { case 4: info = n.ipamV4Info case 6: info = n.ipamV6Info default: return nil } l := make([]driverapi.IPAMData, 0, len(info)) n.Lock() for _, d := range info { l = append(l, d.IPAMData) } n.Unlock() return l } func (n *network) deriveAddressSpace() (string, error) { c := n.getController() c.Lock() ipd, ok := c.ipamDrivers[n.ipamType] c.Unlock() if !ok { return "", types.NotFoundErrorf("could not find ipam driver %s to get default address space", n.ipamType) } if n.DataScope() == datastore.GlobalScope { return ipd.defaultGlobalAddressSpace, nil } return ipd.defaultLocalAddressSpace, nil } func (n *network) Info() NetworkInfo { return n } func (n *network) DriverOptions() map[string]string { n.Lock() defer n.Unlock() if n.generic != nil { if m, ok := n.generic[netlabel.GenericData]; ok { return m.(map[string]string) } } return map[string]string{} } func (n *network) Scope() string { n.Lock() defer n.Unlock() return n.scope } func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) { n.Lock() defer n.Unlock() v4L := make([]*IpamConf, len(n.ipamV4Config)) v6L := make([]*IpamConf, len(n.ipamV6Config)) for i, c := range n.ipamV4Config { cc := &IpamConf{} c.CopyTo(cc) v4L[i] = cc } for i, c := range n.ipamV6Config { cc := &IpamConf{} c.CopyTo(cc) v6L[i] = cc } return n.ipamType, n.ipamOptions, v4L, v6L } func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { n.Lock() defer n.Unlock() v4Info := make([]*IpamInfo, len(n.ipamV4Info)) v6Info := make([]*IpamInfo, len(n.ipamV6Info)) for i, info := range n.ipamV4Info { ic := &IpamInfo{} info.CopyTo(ic) v4Info[i] = ic } for i, info := range n.ipamV6Info { ic := &IpamInfo{} info.CopyTo(ic) v6Info[i] = ic } return v4Info, v6Info } func (n *network) Internal() bool { n.Lock() defer n.Unlock() return n.internal } func (n *network) IPv6Enabled() bool { n.Lock() defer n.Unlock() return n.enableIPv6 }
apache-2.0
aidancasey/azure-sdk-tools
WindowsAzurePowershell/src/Management.Test/Tests/Services/SubscriptionsManagerTests.cs
4186
using System; using System.IO; using System.Security.Cryptography.X509Certificates; using Microsoft.VisualStudio.TestTools.UnitTesting; using Microsoft.WindowsAzure.Management.Model; using Microsoft.WindowsAzure.Management.Properties; using Microsoft.WindowsAzure.Management.Services; using Microsoft.WindowsAzure.Management.Test.TestData; using System.Linq; using Microsoft.WindowsAzure.Management.Utilities; using Microsoft.WindowsAzure.Management.XmlSchema; namespace Microsoft.WindowsAzure.Management.Test.Tests.Services { [TestClass] public class SubscriptionsManagerTests { [TestInitialize] public void TestInitialize() { GlobalPathInfo.GlobalSettingsDirectory = Data.AzureAppDir; Directory.CreateDirectory(GlobalPathInfo.GlobalSettingsDirectory); } [TestMethod] public void TestImportSubscriptions() { for (var i = 0; i < Data.ValidPublishSettings.Count; i++) { var publishSettings = General.DeserializeXmlFile<PublishData>(Data.ValidPublishSettings[i]); var subscriptionsManager = SubscriptionsManager.Import( Data.ValidSubscriptionsData[i], publishSettings); // All subscriptions from both the publish settings file and the subscriptions file were imported Assert.AreEqual(5, subscriptionsManager.Subscriptions.Count); Assert.IsTrue(Data.ValidSubscriptionName.SequenceEqual(subscriptionsManager.Subscriptions.Keys)); } } [TestMethod] [ExpectedException(typeof(InvalidOperationException))] public void TestImportSubscriptionsInvalidSubscriptionData() { for (var i = 0; i < Data.ValidPublishSettings.Count; i++) { try { var publishSettings = General.DeserializeXmlFile<PublishData>(Data.ValidPublishSettings[i]); SubscriptionsManager.Import( Data.InvalidSubscriptionsData[i], publishSettings); } catch (InvalidOperationException exception) { Assert.AreEqual( string.Format(Resources.InvalidSubscriptionsDataSchema, Data.InvalidSubscriptionsData[i]), exception.Message); throw; } } } [TestMethod] public void TestSaveSubscriptions() { for (var i = 0; i < Data.ValidPublishSettings.Count; i++) { var globalComponents = GlobalComponents.CreateFromPublishSettings(GlobalPathInfo.GlobalSettingsDirectory, null, Data.ValidPublishSettings[i]); var subscriptionsManager = SubscriptionsManager.Import( Data.ValidSubscriptionsData[i], globalComponents.PublishSettings, globalComponents.Certificate); var newSubscription = new SubscriptionData { SubscriptionName = "newsubscription", IsDefault = false, SubscriptionId = "id" }; subscriptionsManager.Subscriptions[newSubscription.SubscriptionName] = newSubscription; subscriptionsManager.SaveSubscriptions(Path.Combine(GlobalPathInfo.GlobalSettingsDirectory, "test.xml")); var newSubscriptionsManager = SubscriptionsManager.Import( Path.Combine(GlobalPathInfo.GlobalSettingsDirectory, "test.xml"), globalComponents.PublishSettings, globalComponents.Certificate); var addedSubscription = newSubscriptionsManager.Subscriptions.Values.Single( subscription => subscription.SubscriptionName == newSubscription.SubscriptionName); Assert.AreEqual(newSubscription.SubscriptionId, addedSubscription.SubscriptionId); globalComponents.DeleteGlobalComponents(); } } } }
apache-2.0
markosbg/debug
rabix-bindings-draft2/src/main/java/org/rabix/bindings/draft2/helper/Draft2BeanHelper.java
1015
package org.rabix.bindings.draft2.helper; import java.util.List; import java.util.Map; public class Draft2BeanHelper { public static <T> T getValue(String key, Object raw) { return getValue(key, raw, null); } @SuppressWarnings("unchecked") public static <T> T getValue(String key, Object raw, T defaultValue) { if (raw == null) { return null; } if (raw instanceof Map<?, ?>) { T value = (T) ((Map<?, ?>) raw).get(key); if (value != null) { return value; } } if (raw instanceof List<?>) { for (Object rawItem : (List<?>) raw) { T result = getValue(key, rawItem, defaultValue); if (result != null) { return result; } } } return defaultValue; } @SuppressWarnings("unchecked") public static void setValue(String key, Object value, Object raw) { if (raw == null) { return; } if (raw instanceof Map<?, ?>) { ((Map<String, Object>) raw).put(key, value); } } }
apache-2.0
jamiepg1/xcode-maven-plugin
modules/xcode-maven-plugin/src/main/java/com/sap/prd/mobile/ios/mios/XCodeVersionInfoMojo.java
13368
/* * #%L * xcode-maven-plugin * %% * Copyright (C) 2012 SAP AG * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.sap.prd.mobile.ios.mios; import static java.lang.String.format; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import javax.xml.bind.JAXBException; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.OutputKeys; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.TransformerFactoryConfigurationError; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.stream.StreamSource; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.maven.artifact.Artifact; import org.apache.maven.execution.MavenSession; import org.apache.maven.plugin.MojoExecutionException; import org.apache.maven.plugin.MojoFailureException; import org.apache.maven.project.MavenProjectHelper; import org.sonatype.aether.RepositorySystem; import org.sonatype.aether.RepositorySystemSession; import org.sonatype.aether.repository.RemoteRepository; import org.xml.sax.SAXException; import com.sap.prd.mobile.ios.mios.CodeSignManager.ExecResult; import com.sap.prd.mobile.ios.mios.CodeSignManager.ExecutionResultVerificationException; import com.sap.prd.mobile.ios.mios.versioninfo.v_1_2_2.Dependency; /** * Generates a &lt;artifact-id&gt;-&lt;version&gt;-version.xml for reproducibility reasons. This * versions.xml contains information about the scm location and revision of the built project and * all its dependencies. Expects a sync.info file in the root folder of the project as input. * * * The sync.info file is a property file. If used with perforce it must contain the following entries: * <code> * <ul> * <li> type=perforce * <li> port=&lt;The url of the perforce server&gt; * <li> depotpath=&lt;The path synced on the perforce server&gt; * <li> changelist=&lt;The changelist of the change that is being built&gt * </ul> * </code> * * * If used with git it must contain the following entries: * * <code> * <ul> * <li> type=git * <li> repo=&lt;The git repository&gt; * <li> commitId=&lt;The commitId of the change that is being built&gt; * </ul> * </code> * * For git based projects the sync.info file can be created with the following code snipped executed before the xcode-maven-plugin is triggered. * * <pre> * echo "type=git" > sync.info * echo "repo=scm:git:$(git remote -v |awk '/fetch/ {print $2;}')" >> sync.info * echo "commitId=$(git rev-parse HEAD)" >> sync.info * </pre> * * @goal attach-version-info * @requiresDependencyResolution */ public class XCodeVersionInfoMojo extends BuildContextAwareMojo { /** * * @parameter default-value="${session}" * @required * @readonly */ protected MavenSession mavenSession; /** * The entry point to Aether, i.e. the component doing all the work. * * @component */ protected RepositorySystem repoSystem; /** * The current repository/network configuration of Maven. * * @parameter default-value="${repositorySystemSession}" * @readonly */ protected RepositorySystemSession repoSession; /** * The project's remote repositories to use for the resolution of project dependencies. * * @parameter default-value="${project.remoteProjectRepositories}" * @readonly */ protected List<RemoteRepository> projectRepos; /** * @component */ private MavenProjectHelper projectHelper; /** * @parameter expression="${sync.info.file}" default-value="sync.info" */ private String syncInfo; /** * If <code>true</code> the build fails if it does not find a sync.info file in the root directory * * @parameter expression="${xcode.failOnMissingSyncInfo}" default-value="false" */ private boolean failOnMissingSyncInfo; /** * If <code>true</code> confidential information is removed from artifacts to be released. * * @parameter expression="${xcode.hideConfidentialInformation}" default-value="true" */ private boolean hideConfidentialInformation; @Override public void execute() throws MojoExecutionException, MojoFailureException { final File syncInfoFile = new File(mavenSession.getExecutionRootDirectory(), syncInfo); if (!syncInfoFile.exists()) { if (failOnMissingSyncInfo) { throw new MojoExecutionException("Sync info file '" + syncInfoFile.getAbsolutePath() + "' not found. Please configure your SCM plugin accordingly."); } getLog().info("The optional sync info file '" + syncInfoFile.getAbsolutePath() + "' not found. Cannot attach versions.xml to build results."); return; } getLog().info("Sync info file found: '" + syncInfoFile.getAbsolutePath() + "'. Creating versions.xml file."); final File versionsXmlFile = new File(project.getBuild().getDirectory(), "versions.xml"); FileOutputStream os = null; try { os = new FileOutputStream(versionsXmlFile); new VersionInfoXmlManager().createVersionInfoFile(project.getGroupId(), project.getArtifactId(), project.getVersion(), syncInfoFile, getDependencies(), os); } catch (IOException e) { throw new MojoExecutionException(e.getMessage(), e); } finally { IOUtils.closeQuietly(os); } final File versionsPlistFile = new File(project.getBuild().getDirectory(), "versions.plist"); if (versionsPlistFile.exists()) { if(!versionsPlistFile.delete()) { throw new IllegalStateException(String.format("Cannot delete already existing plist file (%s)", versionsPlistFile)); } } try { new VersionInfoPListManager().createVersionInfoPlistFile(project.getGroupId(), project.getArtifactId(), project.getVersion(), syncInfoFile, getDependencies(), versionsPlistFile, hideConfidentialInformation); } catch (IOException e) { throw new MojoExecutionException(e.getMessage(), e); } try { if (PackagingType.getByMavenType(packaging) == PackagingType.APP) { try { copyVersionsFilesAndSign(); } catch (IOException e) { throw new MojoExecutionException(e.getMessage(), e); } catch (ExecutionResultVerificationException e) { throw new MojoExecutionException(e.getMessage(), e); } catch (XCodeException e) { throw new MojoExecutionException(e.getMessage(), e); } } } catch (PackagingType.UnknownPackagingTypeException ex) { getLog().warn("Unknown packaing type detected.", ex); } projectHelper.attachArtifact(project, "xml", "versions", versionsXmlFile); getLog().info("versions.xml '" + versionsXmlFile + " attached as additional artifact."); } private void copyVersionsFilesAndSign() throws IOException, ExecutionResultVerificationException, XCodeException, MojoExecutionException { for (final String configuration : getConfigurations()) { for (final String sdk : getSDKs()) { if (sdk.startsWith("iphoneos")) { File versionsXmlInBuild = new File(project.getBuild().getDirectory(), "versions.xml"); File versionsPListInBuild = new File(project.getBuild().getDirectory(), "versions.plist"); File rootDir = XCodeBuildLayout.getAppFolder(getXCodeCompileDirectory(), configuration, sdk); String productName = getProductName(configuration, sdk); File appFolder = new File(rootDir, productName + ".app"); File versionsXmlInApp = new File(appFolder, "versions.xml"); File versionsPListInApp = new File(appFolder, "versions.plist"); CodeSignManager.verify(appFolder); final ExecResult originalCodesignEntitlementsInfo = CodeSignManager .getCodesignEntitlementsInformation(appFolder); final ExecResult originalSecurityCMSMessageInfo = CodeSignManager.getSecurityCMSInformation(appFolder); try { if (hideConfidentialInformation) { transformVersionsXml(versionsXmlInBuild, versionsXmlInApp); } else { FileUtils.copyFile(versionsXmlInBuild, versionsXmlInApp); } } catch (Exception e) { throw new MojoExecutionException("Could not transform versions.xml: " + e.getMessage(), e); } getLog().info("Versions.xml file copied from: '" + versionsXmlInBuild + " ' to ' " + versionsXmlInApp); FileUtils.copyFile(versionsPListInBuild, versionsPListInApp); getLog().info("Versions.plist file copied from: '" + versionsPListInBuild + " ' to ' " + versionsPListInApp); sign(rootDir, configuration, sdk); final ExecResult resignedCodesignEntitlementsInfo = CodeSignManager .getCodesignEntitlementsInformation(appFolder); final ExecResult resignedSecurityCMSMessageInfo = CodeSignManager.getSecurityCMSInformation(appFolder); CodeSignManager.verify(appFolder); CodeSignManager.verify(originalCodesignEntitlementsInfo, resignedCodesignEntitlementsInfo); CodeSignManager.verify(originalSecurityCMSMessageInfo, resignedSecurityCMSMessageInfo); } } } } void transformVersionsXml(File versionsXmlInBuild, File versionsXmlInApp) throws ParserConfigurationException, SAXException, IOException, TransformerFactoryConfigurationError, TransformerException, XCodeException { final InputStream transformerRule = getClass().getClassLoader().getResourceAsStream( "versionInfoCensorTransformation.xml"); if (transformerRule == null) { throw new XCodeException("Could not read transformer rule."); } try { final Transformer transformer = TransformerFactory.newInstance().newTransformer( new StreamSource(transformerRule)); transformer.setOutputProperty(OutputKeys.STANDALONE, "yes"); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); transformer.transform(new StreamSource(versionsXmlInBuild), new StreamResult(versionsXmlInApp)); } finally { IOUtils.closeQuietly(transformerRule); } } private void sign(File rootDir, String configuration, String sdk) throws IOException, XCodeException { String csi = EffectiveBuildSettings.getBuildSetting( getXCodeContext(XCodeContext.SourceCodeLocation.WORKING_COPY, configuration, sdk), EffectiveBuildSettings.CODE_SIGN_IDENTITY); File appFolder = new File(EffectiveBuildSettings.getBuildSetting( getXCodeContext(XCodeContext.SourceCodeLocation.WORKING_COPY, configuration, sdk), EffectiveBuildSettings.CODESIGNING_FOLDER_PATH)); CodeSignManager.sign(csi, appFolder, true); } private List<Dependency> getDependencies() throws IOException { List<Dependency> result = new ArrayList<Dependency>(); for (@SuppressWarnings("rawtypes") final Iterator it = project.getDependencyArtifacts().iterator(); it.hasNext();) { final Artifact mainArtifact = (Artifact) it.next(); try { org.sonatype.aether.artifact.Artifact sideArtifact = new XCodeDownloadManager(projectRepos, repoSystem, repoSession).resolveSideArtifact(mainArtifact, "versions", "xml"); getLog().info("Version information retrieved for artifact: " + mainArtifact); addParsedVersionsXmlDependency(result, sideArtifact); } catch (SideArtifactNotFoundException e) { getLog().warn("Could not retrieve version information for artifact:" + mainArtifact); } } return result; } void addParsedVersionsXmlDependency(List<Dependency> result, org.sonatype.aether.artifact.Artifact sideArtifact) throws IOException { try { result.add(VersionInfoXmlManager.parseDependency(sideArtifact.getFile())); } catch (SAXException e) { getLog().warn(format( "Version file '%s' for artifact '%s' contains invalid content (Non parsable XML). Ignoring this file.", (sideArtifact.getFile() != null ? sideArtifact.getFile() : "<n/a>"), sideArtifact)); } catch (JAXBException e) { getLog().warn(format( "Version file '%s' for artifact '%s' contains invalid content (Scheme violation). Ignoring this file.", (sideArtifact.getFile() != null ? sideArtifact.getFile() : "<n/a>"), sideArtifact)); } } }
apache-2.0
victorbrodsky/order-lab
orderflex/src/App/OrderformBundle/Repository/PartRepository.php
16367
<?php /** * Copyright (c) 2017 Cornell University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace App\OrderformBundle\Repository; use App\OrderformBundle\Entity\Part; /** * PartRepository * * This class was generated by the Doctrine ORM. Add your own custom * repository methods below. */ class PartRepository extends ArrayFieldAbstractRepository { // //if this element does not have any slide belonging to this order (with id=null) or children (empty branch for this message), // //so remove this element and all its parents from message // public function attachToMessage( $entity, $message ) { // // $children = $entity->getChildren(); // // $ret = 0; // $countNotEmptyChildren = 0; // // foreach( $children as $child ) { // $childClass = new \ReflectionClass($child); // $childClassName = $childClass->getShortName(); // if( $childClassName == "Block" ) { // //echo "check if this block has slides belongs to this message <br>"; // $slides = $child->getChildren(); // foreach( $slides as $slide ) { // $res = $this->isEntityBelongsToMessage( $slide, $message ); // if( $res ) { // $countNotEmptyChildren++; // } // } // } else // if( $childClassName == "Slide") { // //echo "check if this slide belongs to this message <br>"; // $res = $this->isEntityBelongsToMessage( $child, $message ); // if( $res ) { // $countNotEmptyChildren++; // } // } else { // throw new \Exception('Part has not valid child of the class ' . $childClassName ); // } // } // // if( $countNotEmptyChildren == 0 ) { // $this->removeThisAndAllParentsFromMessage($entity,$message); // $ret = -1; // } else { // //echo "added to message: Part ret=".$ret.", count=".count($entity->getChildren())."<br>"; // //echo $entity."<br>"; // $message->addPart($entity); // $ret = 1; // } // // return $ret; // } // public function attachToParent( $part, $block ) { // // $childClass = new \ReflectionClass($block); // $childClassName = $childClass->getShortName(); // //echo "childClassName=".$childClassName."<br>"; // // if( $childClassName == "Slide" ) { // parent::attachToParent( $part, $block ); //call parent method to simple attach slide to part // return; // } // // if( $block ) { // // //echo "adding block?: ".$block; // //do it, if the block is new. If block has ID then it was found in DB and it was created by someone else. // //if( !$block->getId() || $block->getId() == null || $block->getId() == "" ) { // //echo "block slides=".count($block->getChildren())."<br>"; // //add only if this block has slides // if( count($block->getChildren()) > 0 ) { //TODO: testing // //echo "block has slides<br>"; // $part->addChildren($block); // //// //replace similar child. For example, the form can have two blocks: Block 1 and Block 1 attached to the same Part. //// //So, use only one block instead of creating two same entity in DB. //// $sameChild = $this->findSimilarChild($part,$block); //// if( $sameChild ) { //// //attach all sub-children to found similar child //// $children = $block->getChildren(); //// foreach( $children as $child ) { //// $sameChild->addChildren($child); //// } //// } else { //// $part->addChildren($block); //// } // } else { // //remove block if it does not have any slides // //echo "remove block <br>"; // $part->removeBlock($block); // $block->setPart(null); // } // //} // //echo $block; // // } // // } //override parent method to get next key string public function getNextNonProvided( $entity, $extra=null, $message=null, $prefixname=null ) { $accession= $entity->getParent(); //echo $entity; //echo $accession; $key = $accession->obtainValidKeyfield(); $accessionNumber = $key.""; $keytype = $key->getKeytype()->getId(); return $this->findNextPartnameByAccession( $entity->getInstitution()->getId(), $accessionNumber, $keytype, $message ); } public function findNextPartnameByAccession( $institution, $accessionNumber, $keytype, $message=null ) { if( !$institution || $institution == "" || !$accessionNumber || $accessionNumber == "" ) { return null; } //echo "findNextPartnameByAccession: accessionNumber=".$accessionNumber."<br>"; //$name = "NOPARTNAMEPROVIDED"; $reflPart = new Part(); $name = $reflPart->obtainNoprovidedKeyPrefix(); //institution //TODO: change institution hierarchy and add collaboration //$inst = " AND p.institution=".$institution; $permittedInstitution = $this->_em->getRepository('AppUserdirectoryBundle:Institution')->find($institution); $inst = " AND (" . $this->_em->getRepository('AppUserdirectoryBundle:Institution')-> getCriterionStrForCollaborationsByNode($permittedInstitution,"institution",array("Union","Intersection")) . ")"; $query = $this->getEntityManager() ->createQuery(' SELECT MAX(ppartname.field) as max'.'partname'.' FROM AppOrderformBundle:Part p JOIN p.institution institution JOIN p.partname ppartname JOIN p.accession a JOIN a.accession aa WHERE ppartname.field LIKE :name AND aa.field = :accession AND aa.keytype = :keytype' . $inst )->setParameter('name', '%'.$name.'%')->setParameter('accession', $accessionNumber."")->setParameter('keytype', $keytype); $lastField = $query->getSingleResult(); $index = 'max'.'partname'; $lastFieldStr = $lastField[$index]; //echo "lastFieldStr=".$lastFieldStr."<br>"; //return $this->getNextByMax($lastFieldStr, $name); $maxKey = $this->getNextByMax($lastFieldStr, $name); //check if the valid bigger key was already assigned to the element of the same class attached to this order if( $message ) { $className = "Part"; $getSameEntity = "get".$className; foreach( $message->$getSameEntity() as $same ) { if( $same->getStatus() == self::STATUS_VALID ) { $key = $same->obtainValidKeyfield(); $newBiggerKey = $this->getBiggerKey($maxKey,$key,$name); if( $newBiggerKey != -1 ) { $maxKey = $newBiggerKey; } } } } //return $this->getNextByMax($lastFieldStr, $name); return $maxKey; } //create new Part by provided accession number. Used only by check controller, when user click generate part number public function createPartByAccession( $institution, $accessionNumber, $keytype, $provider ) { //echo "accessionNumber=".$accessionNumber."<br>"; if( !$accessionNumber || $accessionNumber == "" ) { return null; } $institutions = array($institution); $accessionNumber = $accessionNumber.""; $extra = array(); $extra['keytype'] = $keytype; $extra['accession'] = $accessionNumber; $validity = array(self::STATUS_RESERVED); //false; $withfields = false; $em = $this->_em; //1a) Check accession $accession = $em->getRepository('AppOrderformBundle:Accession')->findOneByIdJoinedToField($institutions, $accessionNumber,"Accession","accession",$validity,true,$extra); //find multi: all accessions with given $accessionNumber if( !$accession ) { //echo "accession is not found in DB, accessionNumber=".$accessionNumber."<br>"; //1) create Accession if not existed. We must create parent (accession), because we will create part object which must be linked to its parent // $status, $provider, $className, $fieldName, $parent, $fieldValue $accession = $em->getRepository('AppOrderformBundle:Accession')->createElement($institution, null,$provider,"Accession","accession",null,$accessionNumber,$extra,$withfields); } //2) find next available part name by accession number $partname = $em->getRepository('AppOrderformBundle:Part')->findNextPartnameByAccession($institution,$accessionNumber,$keytype); //3) before part create: check if part with $partname does not exists in DB $partFound = $this->findOnePartByJoinedToField( $institutions, $accessionNumber, $keytype, $partname, null ); //validity=null - it was called by check button //TODO: If someone generated this name already (very low probability), so regenerate key field name (?) if( $partFound ) { return $partFound; } //echo "create part, accession=".$accession->getAccession()->first().", partid=".$accession->getId()."<br>"; //exit(); //echo "create part <br>"; //4) create part object by partname and link it to the parent $part = $em->getRepository('AppOrderformBundle:Part')->createElement($institution,null,$provider,"Part","partname",$accession,$partname,$extra,$withfields); return $part; } //override parent method to find unique entity in DB public function findUniqueByKey($entity) { $partname = $entity->obtainValidKeyfield().""; $accession = $entity->getAccession(); $key = $accession->obtainValidKeyfield(); $accessionNumber = $key.""; $keytype = $key->getKeytype()->getId(); $validity = array(self::STATUS_VALID,self::STATUS_RESERVED); //false; $institutions = array($entity->getInstitution()->getId()); return $this->findOnePartByJoinedToField( $institutions, $accessionNumber, $keytype, $partname, $validity ); } public function findOneByIdJoinedToField( $institutions, $fieldStr, $className, $fieldName, $validity=null, $single=true, $extra=null ) { $accessionNumber = $extra['accession']; $keytype = $extra['keytype']; //echo "accessionNumber=".$accessionNumber."|, keytype=".$keytype."| "; return $this->findOnePartByJoinedToField( $institutions, $accessionNumber, $keytype, $fieldStr, $validity, $single ); } //$accession - Accession number (string) //$partname - Part name (string) public function findOnePartByJoinedToField( $institutions, $accession, $keytype, $partname, $validities=null, $single=true ) { //echo "PART find: accession=".$accession.", keytype=".$keytype.", partname=".$partname.", validities=".implode(",", $validities)." \n <br>"; if( count($institutions) == 0 || !$accession || $accession == "" || !$keytype || $keytype == "" || !$partname || $partname == "" ) { return null; } if( $validities != null && is_array($validities) == false ) { throw new \Exception( 'Validity is provided, but not as array; validities=' . $validities ); } $parameters = array(); $parameters['field'] = $partname.""; $extraStr = ""; if( $accession && $accession != "" ) { $extraStr = ' AND aa.field = :accession AND aa.keytype = :keytype'; $parameters['accession'] = $accession.""; $parameters['keytype'] = $keytype.""; } //add validity conditions $validityStr = ""; if( $validities && is_array($validities) && count($validities)>0 ) { $validityStr = " AND aa.status=:keyfieldstatus AND pfield.status=:keyfieldstatus"; $validityStr .= " AND aa.status='".self::STATUS_VALID."' AND ("; $count = 1; foreach( $validities as $validity ) { $validityStr .= "p.status='".$validity."'"; if( $count < count($validities) ) { $validityStr .= " OR "; } $count++; } $validityStr .= ")"; $parameters['keyfieldstatus'] = self::STATUS_VALID; } //echo "validityStr=".$validityStr." <br> "; //add institution conditions //TODO: change institution hierarchy and add collaboration $instStr = ""; if( $institutions && is_array($institutions) && count($institutions)>0 ) { $instStr = " AND ("; $count = 1; foreach( $institutions as $inst ) { //$instStr .= "p.institution=".$inst.""; $permittedInstitution = $this->_em->getRepository('AppUserdirectoryBundle:Institution')->find($inst); $instStr .= $this->_em->getRepository('AppUserdirectoryBundle:Institution')-> getCriterionStrForCollaborationsByNode($permittedInstitution,"institution",array("Union","Intersection")); if( $count < count($institutions) ) { $instStr .= " OR "; } $count++; } $instStr .= ")"; } //echo "instStr=".$instStr." <br> "; $dql = ' SELECT p FROM AppOrderformBundle:Part p JOIN p.institution institution JOIN p.partname pfield JOIN p.accession a JOIN a.accession aa WHERE pfield.field = :field' . $extraStr . $validityStr . $instStr; //echo "dql=".$dql."<br>"; $query = $this->getEntityManager()->createQuery($dql)->setParameters($parameters); // ->createQuery($dql)->setParameter('field', $partname.""); // if( $accession && $accession != "" ) { // $query->setParameter('accession', $accession."") // ->setParameter('keytype', $keytype.""); // } $parts = $query->getResult(); if( $parts ) { //echo "parts count=".count($parts)."|"; if( $single ) { return $parts[0]; } else { return $parts; } } else { //echo "parts with partname=".$partname.",accession=".$accession." is not found |<br>"; return null; } } public function findOneByInstAccessionPart($institution,$accessionTypeStr,$accessionStr,$partStr) { $accessiontype = $this->_em->getRepository('AppOrderformBundle:AccessionType')->findOneByName($accessionTypeStr); $institutions = array(); $institutions[] = $institution; $validity = array(self::STATUS_VALID,self::STATUS_RESERVED); $single = true; //$institutions, $accession, $keytype, $partname, $validities=null, $single=true $part = $this->_em->getRepository('AppOrderformBundle:Part')->findOnePartByJoinedToField( $institutions, $accessionStr, $accessiontype->getId(), $partStr, $validity, $single ); return $part; } }
apache-2.0
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.eclipse.gmf.esb.diagram/src/org/wso2/developerstudio/eclipse/gmf/esb/diagram/providers/assistants/EsbModelingAssistantProviderOfSmooksMediatorEditPart.java
899
package org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.assistants; import java.util.ArrayList; import java.util.List; import org.eclipse.core.runtime.IAdaptable; import org.eclipse.gmf.runtime.emf.type.core.IElementType; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.EsbElementTypes; import org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.EsbModelingAssistantProvider; /** * @generated */ public class EsbModelingAssistantProviderOfSmooksMediatorEditPart extends EsbModelingAssistantProvider { /** * @generated */ @Override public List<IElementType> getTypesForPopupBar(IAdaptable host) { List<IElementType> types = new ArrayList<IElementType>(2); types.add(EsbElementTypes.SmooksMediatorInputConnector_3082); types.add(EsbElementTypes.SmooksMediatorOutputConnector_3083); return types; } }
apache-2.0
sachindeorah/geofeatures
GeoFeatures/boost/geometry/strategies/geographic/distance_andoyer.hpp
6683
// Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands. // This file was modified by Oracle on 2014. // Modifications copyright (c) 2014 Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_STRATEGIES_GEOGRAPHIC_ANDOYER_HPP #define BOOST_GEOMETRY_STRATEGIES_GEOGRAPHIC_ANDOYER_HPP #include <boost/geometry/core/coordinate_type.hpp> #include <boost/geometry/core/radian_access.hpp> #include <boost/geometry/core/radius.hpp> #include <boost/geometry/core/srs.hpp> #include <boost/geometry/algorithms/detail/flattening.hpp> #include <boost/geometry/strategies/distance.hpp> #include <boost/geometry/util/math.hpp> #include <boost/geometry/util/promote_floating_point.hpp> #include <boost/geometry/util/select_calculation_type.hpp> namespace geofeatures_boost {} namespace boost = geofeatures_boost; namespace geofeatures_boost { namespace geometry { namespace strategy { namespace distance { /*! \brief Point-point distance approximation taking flattening into account \ingroup distance \tparam Spheroid The reference spheroid model \tparam CalculationType \tparam_calculation \author After Andoyer, 19xx, republished 1950, republished by Meeus, 1999 \note Although not so well-known, the approximation is very good: in all cases the results are about the same as Vincenty. In my (Barend's) testcases the results didn't differ more than 6 m \see http://nacc.upc.es/tierra/node16.html \see http://sci.tech-archive.net/Archive/sci.geo.satellite-nav/2004-12/2724.html \see http://home.att.net/~srschmitt/great_circle_route.html (implementation) \see http://www.codeguru.com/Cpp/Cpp/algorithms/article.php/c5115 (implementation) \see http://futureboy.homeip.net/frinksamp/navigation.frink (implementation) \see http://www.voidware.com/earthdist.htm (implementation) */ template < typename Spheroid, typename CalculationType = void > class andoyer { public : template <typename Point1, typename Point2> struct calculation_type : promote_floating_point < typename select_calculation_type < Point1, Point2, CalculationType >::type > {}; typedef Spheroid model_type; inline andoyer() : m_spheroid() {} explicit inline andoyer(Spheroid const& spheroid) : m_spheroid(spheroid) {} template <typename Point1, typename Point2> inline typename calculation_type<Point1, Point2>::type apply(Point1 const& point1, Point2 const& point2) const { return calc<typename calculation_type<Point1, Point2>::type> ( get_as_radian<0>(point1), get_as_radian<1>(point1), get_as_radian<0>(point2), get_as_radian<1>(point2) ); } inline Spheroid const& model() const { return m_spheroid; } private : template <typename CT, typename T> inline CT calc(T const& lon1, T const& lat1, T const& lon2, T const& lat2) const { CT const G = (lat1 - lat2) / 2.0; CT const lambda = (lon1 - lon2) / 2.0; if (geometry::math::equals(lambda, 0.0) && geometry::math::equals(G, 0.0)) { return 0.0; } CT const F = (lat1 + lat2) / 2.0; CT const sinG2 = math::sqr(sin(G)); CT const cosG2 = math::sqr(cos(G)); CT const sinF2 = math::sqr(sin(F)); CT const cosF2 = math::sqr(cos(F)); CT const sinL2 = math::sqr(sin(lambda)); CT const cosL2 = math::sqr(cos(lambda)); CT const S = sinG2 * cosL2 + cosF2 * sinL2; CT const C = cosG2 * cosL2 + sinF2 * sinL2; CT const c0 = 0; CT const c1 = 1; CT const c2 = 2; CT const c3 = 3; if (geometry::math::equals(S, c0) || geometry::math::equals(C, c0)) { return c0; } CT const radius_a = CT(get_radius<0>(m_spheroid)); CT const flattening = geometry::detail::flattening<CT>(m_spheroid); CT const omega = atan(math::sqrt(S / C)); CT const r3 = c3 * math::sqrt(S * C) / omega; // not sure if this is r or greek nu CT const D = c2 * omega * radius_a; CT const H1 = (r3 - c1) / (c2 * C); CT const H2 = (r3 + c1) / (c2 * S); return D * (c1 + flattening * (H1 * sinF2 * cosG2 - H2 * cosF2 * sinG2) ); } Spheroid m_spheroid; }; #ifndef DOXYGEN_NO_STRATEGY_SPECIALIZATIONS namespace services { template <typename Spheroid, typename CalculationType> struct tag<andoyer<Spheroid, CalculationType> > { typedef strategy_tag_distance_point_point type; }; template <typename Spheroid, typename CalculationType, typename P1, typename P2> struct return_type<andoyer<Spheroid, CalculationType>, P1, P2> : andoyer<Spheroid, CalculationType>::template calculation_type<P1, P2> {}; template <typename Spheroid, typename CalculationType> struct comparable_type<andoyer<Spheroid, CalculationType> > { typedef andoyer<Spheroid, CalculationType> type; }; template <typename Spheroid, typename CalculationType> struct get_comparable<andoyer<Spheroid, CalculationType> > { static inline andoyer<Spheroid, CalculationType> apply(andoyer<Spheroid, CalculationType> const& input) { return input; } }; template <typename Spheroid, typename CalculationType, typename P1, typename P2> struct result_from_distance<andoyer<Spheroid, CalculationType>, P1, P2> { template <typename T> static inline typename return_type<andoyer<Spheroid, CalculationType>, P1, P2>::type apply(andoyer<Spheroid, CalculationType> const& , T const& value) { return value; } }; template <typename Point1, typename Point2> struct default_strategy<point_tag, point_tag, Point1, Point2, geographic_tag, geographic_tag> { typedef strategy::distance::andoyer < srs::spheroid < typename select_coordinate_type<Point1, Point2>::type > > type; }; } // namespace services #endif // DOXYGEN_NO_STRATEGY_SPECIALIZATIONS }} // namespace strategy::distance }} // namespace geofeatures_boost::geometry #endif // BOOST_GEOMETRY_STRATEGIES_GEOGRAPHIC_ANDOYER_HPP
apache-2.0
javimosch/hideberg
src/client/smc/js/controllers/Orders/Modals/FinishCtrl.js
1942
angular.module('shopmycourse.controllers') /** * @name OrdersFinishCtrl * @function Controleur * @memberOf shopmycourse.controllers * @description Finalisation de la commande */ .controller('OrdersFinishCtrl', function($scope, $ionicLoading, $ionicSlideBoxDelegate, $ionicPopup, $ionicHistory, DeliveryAPI, CurrentDelivery) { /** * @name $scope.disableSwipe * @description Désactivation du défilement des vues */ $scope.disableSwipe = function() { $ionicSlideBoxDelegate.enableSlide(false); }; /** * @name $scope.nextSlide * @description Défiler vers la prochaine vue */ $scope.nextSlide = function () { $ionicSlideBoxDelegate.next(); }; /** * @name finalize * @description Finalisation de la commande */ function finalize(order) { $ionicLoading.show({ template: 'Nous enregistrons votre avis...' }); DeliveryAPI.finalize({ 'idDelivery': order.id, 'rating': $scope.ratingStar }, function() { $ionicLoading.hide(); CurrentDelivery.clear(); $ionicHistory.clearHistory(); $ionicSlideBoxDelegate.next(); }, function(err) { console.error(err); $ionicLoading.hide(); }); }; /** * @name $scope.finalizeDelivery * @description Lancement de la finalisation de la commande avec vérification de l'avis */ $scope.finalizeDelivery = function(order) { if (!$scope.ratingStar) { var myPopup = $ionicPopup.confirm({ template: 'Vous n\'avez pas noté le livreur, êtes-vous sûr ?', title: 'Notation du livreur', okText: 'OK', cancelText: 'retour' }); myPopup.then(function(res) { if (res) { finalize(order); } }); } else { finalize(order); } }; /** * @name $scope.setRatingStar * @description Mise à jour du nombre d'étoiles pour un avis */ $scope.setRatingStar = function(newRating) { if(!$scope.order.buyer_rating) { $scope.ratingStar = newRating; } }; })
apache-2.0
codepoke/libgdx
gdx/src/com/badlogic/gdx/maps/tiled/BaseTmxMapLoader.java
15450
package com.badlogic.gdx.maps.tiled; import com.badlogic.gdx.assets.AssetLoaderParameters; import com.badlogic.gdx.assets.loaders.AsynchronousAssetLoader; import com.badlogic.gdx.assets.loaders.FileHandleResolver; import com.badlogic.gdx.files.FileHandle; import com.badlogic.gdx.graphics.Color; import com.badlogic.gdx.graphics.Texture.TextureFilter; import com.badlogic.gdx.graphics.g2d.TextureRegion; import com.badlogic.gdx.maps.ImageResolver; import com.badlogic.gdx.maps.MapLayer; import com.badlogic.gdx.maps.MapObject; import com.badlogic.gdx.maps.MapProperties; import com.badlogic.gdx.maps.objects.EllipseMapObject; import com.badlogic.gdx.maps.objects.PolygonMapObject; import com.badlogic.gdx.maps.objects.PolylineMapObject; import com.badlogic.gdx.maps.objects.RectangleMapObject; import com.badlogic.gdx.maps.tiled.TiledMapTileLayer.Cell; import com.badlogic.gdx.maps.tiled.objects.TiledMapTileMapObject; import com.badlogic.gdx.math.Polygon; import com.badlogic.gdx.math.Polyline; import com.badlogic.gdx.utils.Base64Coder; import com.badlogic.gdx.utils.GdxRuntimeException; import com.badlogic.gdx.utils.StreamUtils; import com.badlogic.gdx.utils.XmlReader; import com.badlogic.gdx.utils.XmlReader.Element; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.util.StringTokenizer; import java.util.zip.GZIPInputStream; import java.util.zip.InflaterInputStream; public abstract class BaseTmxMapLoader<P extends AssetLoaderParameters<TiledMap>> extends AsynchronousAssetLoader<TiledMap, P> { public static class Parameters extends AssetLoaderParameters<TiledMap> { /** generate mipmaps? **/ public boolean generateMipMaps = false; /** The TextureFilter to use for minification **/ public TextureFilter textureMinFilter = TextureFilter.Nearest; /** The TextureFilter to use for magnification **/ public TextureFilter textureMagFilter = TextureFilter.Nearest; /** Whether to convert the objects' pixel position and size to the equivalent in tile space. **/ public boolean convertObjectToTileSpace = false; /** Whether to flip all Y coordinates so that Y positive is down. All LibGDX renderers require flipped Y coordinates, and * thus flipY set to true. This parameter is included for non-rendering related purposes of TMX files, or custom renderers. */ public boolean flipY = true; } protected static final int FLAG_FLIP_HORIZONTALLY = 0x80000000; protected static final int FLAG_FLIP_VERTICALLY = 0x40000000; protected static final int FLAG_FLIP_DIAGONALLY = 0x20000000; protected static final int MASK_CLEAR = 0xE0000000; protected XmlReader xml = new XmlReader(); protected Element root; protected boolean convertObjectToTileSpace; protected boolean flipY = true; protected int mapTileWidth; protected int mapTileHeight; protected int mapWidthInPixels; protected int mapHeightInPixels; protected TiledMap map; public BaseTmxMapLoader (FileHandleResolver resolver) { super(resolver); } protected void loadTileLayer (TiledMap map, Element element) { if (element.getName().equals("layer")) { int width = element.getIntAttribute("width", 0); int height = element.getIntAttribute("height", 0); int tileWidth = element.getParent().getIntAttribute("tilewidth", 0); int tileHeight = element.getParent().getIntAttribute("tileheight", 0); TiledMapTileLayer layer = new TiledMapTileLayer(width, height, tileWidth, tileHeight); loadBasicLayerInfo(layer, element); int[] ids = getTileIds(element, width, height); TiledMapTileSets tilesets = map.getTileSets(); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { int id = ids[y * width + x]; boolean flipHorizontally = ((id & FLAG_FLIP_HORIZONTALLY) != 0); boolean flipVertically = ((id & FLAG_FLIP_VERTICALLY) != 0); boolean flipDiagonally = ((id & FLAG_FLIP_DIAGONALLY) != 0); TiledMapTile tile = tilesets.getTile(id & ~MASK_CLEAR); if (tile != null) { Cell cell = createTileLayerCell(flipHorizontally, flipVertically, flipDiagonally); cell.setTile(tile); layer.setCell(x, flipY ? height - 1 - y : y, cell); } } } Element properties = element.getChildByName("properties"); if (properties != null) { loadProperties(layer.getProperties(), properties); } map.getLayers().add(layer); } } protected void loadObjectGroup (TiledMap map, Element element) { if (element.getName().equals("objectgroup")) { String name = element.getAttribute("name", null); MapLayer layer = new MapLayer(); layer.setName(name); Element properties = element.getChildByName("properties"); if (properties != null) { loadProperties(layer.getProperties(), properties); } for (Element objectElement : element.getChildrenByName("object")) { loadObject(map, layer, objectElement); } map.getLayers().add(layer); } } protected void loadImageLayer (TiledMap map, Element element, FileHandle tmxFile, ImageResolver imageResolver) { if (element.getName().equals("imagelayer")) { int x = Integer.parseInt(element.getAttribute("x", "0")); int y = Integer.parseInt(element.getAttribute("y", "0")); if (flipY) y = mapHeightInPixels - y; TextureRegion texture = null; Element image = element.getChildByName("image"); if (image != null) { String source = image.getAttribute("source"); FileHandle handle = getRelativeFileHandle(tmxFile, source); texture = imageResolver.getImage(handle.path()); y -= texture.getRegionHeight(); } TiledMapImageLayer layer = new TiledMapImageLayer(texture, x, y); loadBasicLayerInfo(layer, element); Element properties = element.getChildByName("properties"); if (properties != null) { loadProperties(layer.getProperties(), properties); } map.getLayers().add(layer); } } protected void loadBasicLayerInfo (MapLayer layer, Element element) { String name = element.getAttribute("name", null); float opacity = Float.parseFloat(element.getAttribute("opacity", "1.0")); boolean visible = element.getIntAttribute("visible", 1) == 1; float offsetX = element.getFloatAttribute("offsetx", 0); float offsetY = element.getFloatAttribute("offsety", 0); layer.setName(name); layer.setOpacity(opacity); layer.setVisible(visible); layer.setOffsetX(offsetX); layer.setOffsetY(offsetY); } protected void loadObject (TiledMap map, MapLayer layer, Element element) { if (element.getName().equals("object")) { MapObject object = null; float scaleX = convertObjectToTileSpace ? 1.0f / mapTileWidth : 1.0f; float scaleY = convertObjectToTileSpace ? 1.0f / mapTileHeight : 1.0f; float x = element.getFloatAttribute("x", 0) * scaleX; float y = (flipY ? (mapHeightInPixels - element.getFloatAttribute("y", 0)) : element.getFloatAttribute("y", 0)) * scaleY; float width = element.getFloatAttribute("width", 0) * scaleX; float height = element.getFloatAttribute("height", 0) * scaleY; if (element.getChildCount() > 0) { Element child = null; if ((child = element.getChildByName("polygon")) != null) { String[] points = child.getAttribute("points").split(" "); float[] vertices = new float[points.length * 2]; for (int i = 0; i < points.length; i++) { String[] point = points[i].split(","); vertices[i * 2] = Float.parseFloat(point[0]) * scaleX; vertices[i * 2 + 1] = Float.parseFloat(point[1]) * scaleY * (flipY ? -1 : 1); } Polygon polygon = new Polygon(vertices); polygon.setPosition(x, y); object = new PolygonMapObject(polygon); } else if ((child = element.getChildByName("polyline")) != null) { String[] points = child.getAttribute("points").split(" "); float[] vertices = new float[points.length * 2]; for (int i = 0; i < points.length; i++) { String[] point = points[i].split(","); vertices[i * 2] = Float.parseFloat(point[0]) * scaleX; vertices[i * 2 + 1] = Float.parseFloat(point[1]) * scaleY * (flipY ? -1 : 1); } Polyline polyline = new Polyline(vertices); polyline.setPosition(x, y); object = new PolylineMapObject(polyline); } else if ((child = element.getChildByName("ellipse")) != null) { object = new EllipseMapObject(x, flipY ? y - height : y, width, height); } } if (object == null) { String gid = null; if ((gid = element.getAttribute("gid", null)) != null) { int id = (int)Long.parseLong(gid); boolean flipHorizontally = ((id & FLAG_FLIP_HORIZONTALLY) != 0); boolean flipVertically = ((id & FLAG_FLIP_VERTICALLY) != 0); TiledMapTile tile = map.getTileSets().getTile(id & ~MASK_CLEAR); TiledMapTileMapObject tiledMapTileMapObject = new TiledMapTileMapObject(tile, flipHorizontally, flipVertically); TextureRegion textureRegion = tiledMapTileMapObject.getTextureRegion(); tiledMapTileMapObject.getProperties().put("gid", id); tiledMapTileMapObject.setX(x); tiledMapTileMapObject.setY(flipY ? y : y - height); float objectWidth = element.getFloatAttribute("width", textureRegion.getRegionWidth()); float objectHeight = element.getFloatAttribute("height", textureRegion.getRegionHeight()); tiledMapTileMapObject.setScaleX(scaleX * (objectWidth / textureRegion.getRegionWidth())); tiledMapTileMapObject.setScaleY(scaleY * (objectHeight / textureRegion.getRegionHeight())); tiledMapTileMapObject.setRotation(element.getFloatAttribute("rotation", 0)); object = tiledMapTileMapObject; } else { object = new RectangleMapObject(x, flipY ? y - height : y, width, height); } } object.setName(element.getAttribute("name", null)); String rotation = element.getAttribute("rotation", null); if (rotation != null) { object.getProperties().put("rotation", Float.parseFloat(rotation)); } String type = element.getAttribute("type", null); if (type != null) { object.getProperties().put("type", type); } int id = element.getIntAttribute("id", 0); if (id != 0) { object.getProperties().put("id", id); } object.getProperties().put("x", x); if (object instanceof TiledMapTileMapObject) { object.getProperties().put("y", y); } else { object.getProperties().put("y", (flipY ? y - height : y)); } object.getProperties().put("width", width); object.getProperties().put("height", height); object.setVisible(element.getIntAttribute("visible", 1) == 1); Element properties = element.getChildByName("properties"); if (properties != null) { loadProperties(object.getProperties(), properties); } layer.getObjects().add(object); } } protected void loadProperties (MapProperties properties, Element element) { if (element == null) return; if (element.getName().equals("properties")) { for (Element property : element.getChildrenByName("property")) { String name = property.getAttribute("name", null); String value = property.getAttribute("value", null); String type = property.getAttribute("type", null); if (value == null) { value = property.getText(); } Object castValue = castProperty(name, value, type); properties.put(name, castValue); } } } private Object castProperty (String name, String value, String type) { if (type == null) { return value; } else if (type.equals("int")) { return Integer.valueOf(value); } else if (type.equals("float")) { return Float.valueOf(value); } else if (type.equals("bool")) { return Boolean.valueOf(value); } else if (type.equals("color")) { // Tiled uses the format #AARRGGBB String opaqueColor = value.substring(3); String alpha = value.substring(1, 3); return Color.valueOf(opaqueColor + alpha); } else { throw new GdxRuntimeException("Wrong type given for property " + name + ", given : " + type + ", supported : string, bool, int, float, color"); } } protected Cell createTileLayerCell (boolean flipHorizontally, boolean flipVertically, boolean flipDiagonally) { Cell cell = new Cell(); if (flipDiagonally) { if (flipHorizontally && flipVertically) { cell.setFlipHorizontally(true); cell.setRotation(Cell.ROTATE_270); } else if (flipHorizontally) { cell.setRotation(Cell.ROTATE_270); } else if (flipVertically) { cell.setRotation(Cell.ROTATE_90); } else { cell.setFlipVertically(true); cell.setRotation(Cell.ROTATE_270); } } else { cell.setFlipHorizontally(flipHorizontally); cell.setFlipVertically(flipVertically); } return cell; } static public int[] getTileIds (Element element, int width, int height) { Element data = element.getChildByName("data"); String encoding = data.getAttribute("encoding", null); if (encoding == null) { // no 'encoding' attribute means that the encoding is XML throw new GdxRuntimeException("Unsupported encoding (XML) for TMX Layer Data"); } int[] ids = new int[width * height]; if (encoding.equals("csv")) { String[] array = data.getText().split(","); for (int i = 0; i < array.length; i++) ids[i] = (int)Long.parseLong(array[i].trim()); } else { if (true) if (encoding.equals("base64")) { InputStream is = null; try { String compression = data.getAttribute("compression", null); byte[] bytes = Base64Coder.decode(data.getText()); if (compression == null) is = new ByteArrayInputStream(bytes); else if (compression.equals("gzip")) is = new BufferedInputStream(new GZIPInputStream(new ByteArrayInputStream(bytes), bytes.length)); else if (compression.equals("zlib")) is = new BufferedInputStream(new InflaterInputStream(new ByteArrayInputStream(bytes))); else throw new GdxRuntimeException("Unrecognised compression (" + compression + ") for TMX Layer Data"); byte[] temp = new byte[4]; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { int read = is.read(temp); while (read < temp.length) { int curr = is.read(temp, read, temp.length - read); if (curr == -1) break; read += curr; } if (read != temp.length) throw new GdxRuntimeException("Error Reading TMX Layer Data: Premature end of tile data"); ids[y * width + x] = unsignedByteToInt(temp[0]) | unsignedByteToInt(temp[1]) << 8 | unsignedByteToInt(temp[2]) << 16 | unsignedByteToInt(temp[3]) << 24; } } } catch (IOException e) { throw new GdxRuntimeException("Error Reading TMX Layer Data - IOException: " + e.getMessage()); } finally { StreamUtils.closeQuietly(is); } } else { // any other value of 'encoding' is one we're not aware of, probably a feature of a future version of Tiled // or another editor throw new GdxRuntimeException("Unrecognised encoding (" + encoding + ") for TMX Layer Data"); } } return ids; } protected static int unsignedByteToInt (byte b) { return b & 0xFF; } protected static FileHandle getRelativeFileHandle (FileHandle file, String path) { StringTokenizer tokenizer = new StringTokenizer(path, "\\/"); FileHandle result = file.parent(); while (tokenizer.hasMoreElements()) { String token = tokenizer.nextToken(); if (token.equals("..")) result = result.parent(); else { result = result.child(token); } } return result; } }
apache-2.0
nagyist/marketcetera
photon/photon/plugins/org.marketcetera.photon.module.ui/src/main/java/org/marketcetera/photon/internal/module/ui/ModulePropertiesPreferencePage.java
12254
package org.marketcetera.photon.internal.module.ui; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import org.eclipse.jface.action.Action; import org.eclipse.jface.action.IMenuListener; import org.eclipse.jface.action.IMenuManager; import org.eclipse.jface.action.MenuManager; import org.eclipse.jface.dialogs.IDialogConstants; import org.eclipse.jface.layout.GridDataFactory; import org.eclipse.jface.layout.GridLayoutFactory; import org.eclipse.jface.preference.PreferencePage; import org.eclipse.jface.viewers.CellEditor; import org.eclipse.jface.viewers.StructuredSelection; import org.eclipse.jface.viewers.TextCellEditor; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Menu; import org.eclipse.swt.widgets.Tree; import org.eclipse.swt.widgets.TreeItem; import org.eclipse.swt.widgets.Widget; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.IWorkbenchPreferencePage; import org.eclipse.ui.views.properties.IPropertyDescriptor; import org.eclipse.ui.views.properties.IPropertySource; import org.eclipse.ui.views.properties.IPropertySourceProvider; import org.eclipse.ui.views.properties.PropertyDescriptor; import org.eclipse.ui.views.properties.PropertySheetEntry; import org.eclipse.ui.views.properties.PropertySheetPage; import org.eclipse.ui.views.properties.TextPropertyDescriptor; import org.marketcetera.photon.module.IModuleAttributeDefaults; import org.marketcetera.photon.module.IModuleAttributeSupport; import org.marketcetera.photon.module.ModuleSupport; import org.marketcetera.photon.module.ui.NewPropertyInputDialog; import org.marketcetera.util.misc.ClassVersion; /* $License$ */ /** * The Module Properties preference page. All properties are stored in a single * Eclipse runtime preference. * * @author <a href="mailto:will@marketcetera.com">Will Horn</a> * @version $Id: ModulePropertiesPreferencePage.java 9999 2008-11-04 22:49:55Z * will $ * @since 1.0.0 */ @ClassVersion("$Id: ModulePropertiesPreferencePage.java 16154 2012-07-14 16:34:05Z colin $") public final class ModulePropertiesPreferencePage extends PreferencePage implements IWorkbenchPreferencePage { /** * Properties separator character. */ private static final String SEPARATOR = "."; //$NON-NLS-1$ /** * Pattern to split properties. */ private static final Pattern SEPARATOR_PATTERN = Pattern.compile("\\."); //$NON-NLS-1$ /** * Pattern to identify passwords. */ private static final Pattern PASSWORD_PATTERN = Pattern.compile(Messages.MODULE_PROPERTIES_PREFERENCE_PAGE_PASSWORD_MATCH.getText(), Pattern.CASE_INSENSITIVE); /** * Masks passwords. */ private static final String PASSWORD_MASK = "**********"; //$NON-NLS-1$ /** * Holds the properties being edited */ private final PropertiesTree mProperties; /** * Root of the UI */ private PropertySheetPage mPage; private final PreferencesAdapter mPreferencesAdapter; /** * Default Constructor. * * Initialized by extension point. */ public ModulePropertiesPreferencePage() { mPreferencesAdapter = new PreferencesAdapter(ModuleSupport.getModuleAttributeSupport()); mProperties = mPreferencesAdapter.toTree(); } @Override public void init(IWorkbench workbench) { } @Override protected Control createContents(Composite parent) { Composite composite = new Composite(parent, SWT.NO_FOCUS); GridLayoutFactory.fillDefaults().applyTo(composite); Label warningLabel = new Label(composite, SWT.WRAP); warningLabel .setText(Messages.MODULE_PROPERTIES_PREFERENCE_PAGE_RESTART_WARNING .getText()); GridDataFactory.defaultsFor(warningLabel).applyTo(warningLabel); // Nest the property sheet page used by the Properties view. mPage = new PropertySheetPage(); mPage.setPropertySourceProvider(new IPropertySourceProvider() { @Override public IPropertySource getPropertySource(Object object) { if (object instanceof IPropertySource) return (IPropertySource) object; return null; } }); mPage.createControl(composite); GridDataFactory.fillDefaults().grab(true, true).applyTo( mPage.getControl()); // Simulate selection of a root property "" mPage.selectionChanged(null, new StructuredSelection( new ModulePropertyNode(""))); //$NON-NLS-1$ // By default properties are lazily loaded when the user expands nodes, // but here we want the user can see all the properties right away. expandAll(); // Right click actions initPopupMenu(); return composite; } private void initPopupMenu() { MenuManager menuMgr = new MenuManager("#PopupMenu"); //$NON-NLS-1$ menuMgr.setRemoveAllWhenShown(true); menuMgr.addMenuListener(new IMenuListener() { @Override public void menuAboutToShow(IMenuManager manager) { final TreeItem[] selection = ((Tree) mPage.getControl()) .getSelection(); // Add if (selection.length <= 1) { manager .add(new Action( Messages.MODULE_PROPERTIES_PREFERENCE_PAGE_ADD_ACTION__LABEL .getText()) { @Override public void run() { String key = ""; //$NON-NLS-1$ if (selection.length == 1) key = ((ModulePropertyNode) ((PropertySheetEntry) selection[0] .getData()).getValues()[0]).mKey + SEPARATOR; // Show Instance Defaults if the selected // property // is level 2, i.e. a module provider final boolean allowInstanceDefault = (SEPARATOR_PATTERN .split(key).length == 2); NewPropertyInputDialog dialog = new NewPropertyInputDialog( getShell(), allowInstanceDefault); if (dialog.open() == IDialogConstants.OK_ID) { if (dialog.isInstanceDefault()) key += IModuleAttributeDefaults.INSTANCE_DEFAULTS_IDENTIFIER + SEPARATOR; key += dialog.getPropertyKey(); if (!mProperties.containsKey(key)) mProperties.put(key, dialog .getPropertyValue()); mPage.refresh(); if (selection.length == 1) { expand(selection[0]); } else { expandAll(); } } } }); } // Delete if (selection.length >= 1) { manager .add(new Action( Messages.MODULE_PROPERTIES_PREFERENCE_PAGE_DELETE_ACTION__LABEL .getText()) { @Override public void run() { for (int i = 0; i < selection.length; i++) { final String root = ((ModulePropertyNode) ((PropertySheetEntry) selection[i] .getData()).getValues()[0]).mKey; mProperties.remove(root); } mPage.refresh(); } }); } } }); Menu menu = menuMgr.createContextMenu(mPage.getControl()); mPage.getControl().setMenu(menu); } @Override protected void contributeButtons(Composite parent) { // A button to add properties Button button = new Button(parent, SWT.PUSH); button .setText(Messages.MODULE_PROPERTIES_PREFERENCE_PAGE_ADD_BUTTON__LABEL .getText()); GridDataFactory.defaultsFor(button).applyTo(button); button.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e) { NewPropertyInputDialog dialog = new NewPropertyInputDialog( getShell(), false); if (dialog.open() == IDialogConstants.OK_ID) { final String key = dialog.getPropertyKey(); if (!mProperties.containsKey(key)) mProperties.put(key, dialog.getPropertyValue()); mPage.refresh(); expandAll(); } } }); ((GridLayout) parent.getLayout()).numColumns++; } /** * Helper method to expand entire property tree */ private void expandAll() { Tree tree = (Tree) mPage.getControl(); for (TreeItem item : tree.getItems()) { expand(item); } } /** * Helper method to expand an item in the property tree. * * @param item * tree item to expand. */ private void expand(TreeItem item) { // try to expand the tree using reflection try { Field field = mPage.getClass().getDeclaredField("viewer"); //$NON-NLS-1$ field.setAccessible(true); Object viewer = field.get(mPage); Method method = viewer.getClass().getDeclaredMethod( "createChildren", Widget.class); //$NON-NLS-1$ method.setAccessible(true); method.invoke(viewer, item); } catch (Exception e) { // something went wrong - user can still manually expand return; } item.setExpanded(true); // recurse for (TreeItem child : item.getItems()) { expand(child); } } @Override public boolean performOk() { mPreferencesAdapter.fromTree(mProperties); ModuleSupport.getModuleAttributeSupport().flush(); return true; } /** * {@link IPropertySource} for adapting a {@link PropertiesTree} for * the standard property sheet. * * This class also serves as the ID object for property descriptors. * * @author <a href="mailto:will@marketcetera.com">Will Horn</a> * @version $Id: ModulePropertiesPreferencePage.java 9999 2008-11-04 * 22:49:55Z will $ * @since 1.0.0 */ @ClassVersion("$Id: ModulePropertiesPreferencePage.java 16154 2012-07-14 16:34:05Z colin $") private final class ModulePropertyNode implements IPropertySource { String mKey; /** * Constructor. * * @param key * key this node is rooted at */ ModulePropertyNode(String key) { super(); mKey = key; } @Override public int hashCode() { return mKey.hashCode(); } @Override public boolean equals(Object obj) { return mKey.equals(((ModulePropertyNode) obj).mKey); } @Override public Object getEditableValue() { final String value = mProperties.get(mKey); if (value != null) { // mask text if the key is a password final String[] split = SEPARATOR_PATTERN.split(mKey); final String display = split[split.length - 1]; return (!value.isEmpty() && PASSWORD_PATTERN.matcher(display).matches()) ? PASSWORD_MASK : value; } return null; } @Override public IPropertyDescriptor[] getPropertyDescriptors() { List<IPropertyDescriptor> descriptors = new ArrayList<IPropertyDescriptor>(); for (String prefix : mProperties.getChildKeys(mKey)) { final String[] split = SEPARATOR_PATTERN.split(prefix); final String display = split[split.length - 1]; if (split.length <= 2) { descriptors.add(new PropertyDescriptor( new ModulePropertyNode(prefix), display)); } else { if (PASSWORD_PATTERN.matcher(display).matches()) { // mask text if the key is a password descriptors.add(new PropertyDescriptor( new ModulePropertyNode(prefix), display) { @Override public CellEditor createPropertyEditor( Composite parent) { return new TextCellEditor(parent, SWT.PASSWORD); } }); } else { descriptors.add(new TextPropertyDescriptor( new ModulePropertyNode(prefix), display .equals(IModuleAttributeSupport.INSTANCE_DEFAULTS_IDENTIFIER) ? "*Instance Defaults*" //$NON-NLS-1$ : display)); } } } return (IPropertyDescriptor[]) descriptors .toArray(new IPropertyDescriptor[descriptors.size()]); } @Override public Object getPropertyValue(Object id) { // The id of a subtree is the subtree itself. By returning it here, // the properties viewer will recurse into it and use // getEditableValue if (id instanceof ModulePropertyNode) return id; else return mProperties.get(mKey); } @Override public boolean isPropertySet(Object id) { // no defaults return false; } @Override public void resetPropertyValue(Object id) { // no defaults } @Override public void setPropertyValue(Object id, Object value) { if (value == null) return; String key = ((ModulePropertyNode) id).mKey; mProperties.put(key, (String) value); } } }
apache-2.0
bshp/midPoint
gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/icon/TopRightIconCssStyle.java
744
/* * Copyright (c) 2010-2019 Evolveum and contributors * * This work is dual-licensed under the Apache License 2.0 * and European Union Public License. See LICENSE file for details. */ package com.evolveum.midpoint.gui.impl.component.icon; /** * Created by honchar */ public class TopRightIconCssStyle implements LayeredIconCssStyle { @Override public String getBasicCssClass() { return "icon-basic-transparent"; } @Override public String getBasicLayerCssClass() { return "icon-basic-layer"; } @Override public String getLayerCssClass() { return "top-right-layer"; } @Override public String getStrokeLayerCssClass() { return "icon-stroke-layer"; } }
apache-2.0
dstufft/cryptography
cryptography/hazmat/primitives/constant_time.py
1590
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import hmac import os import sys import cffi from cryptography.hazmat.bindings.utils import _create_modulename with open(os.path.join(os.path.dirname(__file__), "src/constant_time.h")) as f: TYPES = f.read() with open(os.path.join(os.path.dirname(__file__), "src/constant_time.c")) as f: FUNCTIONS = f.read() _ffi = cffi.FFI() _ffi.cdef(TYPES) _lib = _ffi.verify( source=FUNCTIONS, modulename=_create_modulename([TYPES], FUNCTIONS, sys.version), ext_package="cryptography", ) if hasattr(hmac, "compare_digest"): def bytes_eq(a, b): if not isinstance(a, bytes) or not isinstance(b, bytes): raise TypeError("a and b must be bytes.") return hmac.compare_digest(a, b) else: def bytes_eq(a, b): if not isinstance(a, bytes) or not isinstance(b, bytes): raise TypeError("a and b must be bytes.") return _lib.Cryptography_constant_time_bytes_eq( a, len(a), b, len(b) ) == 1
apache-2.0
lburgazzoli/spring-boot
spring-boot-project/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java
4598
/* * Copyright 2012-2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.springframework.core.env.ConfigurableEnvironment; import org.springframework.core.env.Environment; import org.springframework.core.env.MutablePropertySources; import org.springframework.core.env.PropertySource; import org.springframework.core.env.StandardEnvironment; import org.springframework.util.ClassUtils; import org.springframework.web.context.support.StandardServletEnvironment; /** * Utility class for converting one type of {@link Environment} to another. * * @author Ethan Rubinson * @author Andy Wilkinson * @author Madhura Bhave */ final class EnvironmentConverter { private static final String CONFIGURABLE_WEB_ENVIRONMENT_CLASS = "org.springframework.web.context.ConfigurableWebEnvironment"; private static final Set<String> SERVLET_ENVIRONMENT_SOURCE_NAMES; static { Set<String> names = new HashSet<>(); names.add(StandardServletEnvironment.SERVLET_CONTEXT_PROPERTY_SOURCE_NAME); names.add(StandardServletEnvironment.SERVLET_CONFIG_PROPERTY_SOURCE_NAME); names.add(StandardServletEnvironment.JNDI_PROPERTY_SOURCE_NAME); SERVLET_ENVIRONMENT_SOURCE_NAMES = Collections.unmodifiableSet(names); } private final ClassLoader classLoader; /** * Creates a new {@link EnvironmentConverter} that will use the given * {@code classLoader} during conversion. * @param classLoader the class loader to use */ EnvironmentConverter(ClassLoader classLoader) { this.classLoader = classLoader; } /** * Converts the given {@code environment} to the given {@link StandardEnvironment} * type. If the environment is already of the same type, no conversion is performed * and it is returned unchanged. * @param environment the Environment to convert * @param type the type to convert the Environment to * @return the converted Environment */ StandardEnvironment convertEnvironmentIfNecessary(ConfigurableEnvironment environment, Class<? extends StandardEnvironment> type) { if (type.equals(environment.getClass())) { return (StandardEnvironment) environment; } return convertEnvironment(environment, type); } private StandardEnvironment convertEnvironment(ConfigurableEnvironment environment, Class<? extends StandardEnvironment> type) { StandardEnvironment result = createEnvironment(type); result.setActiveProfiles(environment.getActiveProfiles()); result.setConversionService(environment.getConversionService()); copyPropertySources(environment, result); return result; } private StandardEnvironment createEnvironment( Class<? extends StandardEnvironment> type) { try { return type.newInstance(); } catch (Exception ex) { return new StandardEnvironment(); } } private void copyPropertySources(ConfigurableEnvironment source, StandardEnvironment target) { removePropertySources(target.getPropertySources(), isServletEnvironment(target.getClass(), this.classLoader)); for (PropertySource<?> propertySource : source.getPropertySources()) { if (!SERVLET_ENVIRONMENT_SOURCE_NAMES.contains(propertySource.getName())) { target.getPropertySources().addLast(propertySource); } } } private boolean isServletEnvironment(Class<?> conversionType, ClassLoader classLoader) { try { Class<?> webEnvironmentClass = ClassUtils .forName(CONFIGURABLE_WEB_ENVIRONMENT_CLASS, classLoader); return webEnvironmentClass.isAssignableFrom(conversionType); } catch (Throwable ex) { return false; } } private void removePropertySources(MutablePropertySources propertySources, boolean isServletEnvironment) { Set<String> names = new HashSet<>(); for (PropertySource<?> propertySource : propertySources) { names.add(propertySource.getName()); } for (String name : names) { if (!isServletEnvironment || !SERVLET_ENVIRONMENT_SOURCE_NAMES.contains(name)) { propertySources.remove(name); } } } }
apache-2.0
iouri-s/azure-powershell
src/ServiceManagement/Services/Commands.Test/CloudService/Development/DisableAzureRemoteDesktopCommandTest.cs
9616
// ---------------------------------------------------------------------------------- // // Copyright Microsoft Corporation // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ---------------------------------------------------------------------------------- using System.Collections.Generic; using System.Linq; using Microsoft.WindowsAzure.Commands.CloudService.Development; using Microsoft.WindowsAzure.Commands.CloudService.Development.Scaffolding; using Microsoft.WindowsAzure.Commands.Common.Test.Mocks; using Microsoft.WindowsAzure.Commands.Test.Utilities.Common; using Microsoft.WindowsAzure.Commands.Utilities.CloudService; using Microsoft.WindowsAzure.Commands.Utilities.Common; using Microsoft.WindowsAzure.Commands.Utilities.Common.XmlSchema.ServiceConfigurationSchema; using Microsoft.WindowsAzure.Commands.Common; using Xunit; using Microsoft.Azure.Common.Extensions; namespace Microsoft.WindowsAzure.Commands.Test.CloudService.Development { /// <summary> /// Basic unit tests for the Enable-Enable-AzureServiceProjectRemoteDesktop command. /// </summary> public class DisableAzureRemoteDesktopCommandTest : TestBase { private MockCommandRuntime mockCommandRuntime; private AddAzureNodeWebRoleCommand addNodeWebCmdlet; private AddAzureNodeWorkerRoleCommand addNodeWorkerCmdlet; private DisableAzureServiceProjectRemoteDesktopCommand disableRDCmdlet; public DisableAzureRemoteDesktopCommandTest() { AzurePowerShell.ProfileDirectory = Test.Utilities.Common.Data.AzureSdkAppDir; mockCommandRuntime = new MockCommandRuntime(); disableRDCmdlet = new DisableAzureServiceProjectRemoteDesktopCommand(); disableRDCmdlet.CommandRuntime = mockCommandRuntime; } private static void VerifyDisableRoleSettings(CloudServiceProject service) { IEnumerable<RoleSettings> settings = Enumerable.Concat( service.Components.CloudConfig.Role, service.Components.LocalConfig.Role); foreach (RoleSettings roleSettings in settings) { Assert.Equal( 1, roleSettings.ConfigurationSettings .Where(c => c.name == "Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" && c.value == "false") .Count()); } } /// <summary> /// Enable remote desktop for an empty service. /// </summary> [Fact] public void DisableRemoteDesktopForEmptyService() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); files.CreateNewService("NEW_SERVICE"); disableRDCmdlet.DisableRemoteDesktop(); } } /// <summary> /// Disable remote desktop for a simple web role. /// </summary> [Fact] public void DisableRemoteDesktopForWebRole() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); string rootPath = files.CreateNewService("NEW_SERVICE"); addNodeWebCmdlet = new AddAzureNodeWebRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WebRole" }; addNodeWebCmdlet.ExecuteCmdlet(); disableRDCmdlet.PassThru = true; disableRDCmdlet.DisableRemoteDesktop(); Assert.True((bool)mockCommandRuntime.OutputPipeline[1]); } } /// <summary> /// Disable remote desktop for web and worker roles. /// </summary> [Fact] public void DisableRemoteDesktopForWebAndWorkerRoles() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); string rootPath = files.CreateNewService("NEW_SERVICE"); addNodeWebCmdlet = new AddAzureNodeWebRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WebRole" }; addNodeWebCmdlet.ExecuteCmdlet(); addNodeWorkerCmdlet = new AddAzureNodeWorkerRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WorkerRole" }; addNodeWorkerCmdlet.ExecuteCmdlet(); disableRDCmdlet.DisableRemoteDesktop(); } } /// <summary> /// Enable then disable remote desktop for a simple web role. /// </summary> [Fact] public void EnableDisableRemoteDesktopForWebRole() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); string rootPath = files.CreateNewService("NEW_SERVICE"); addNodeWebCmdlet = new AddAzureNodeWebRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WebRole" }; addNodeWebCmdlet.ExecuteCmdlet(); EnableAzureRemoteDesktopCommandTest.EnableRemoteDesktop("user", "GoodPassword!"); disableRDCmdlet.DisableRemoteDesktop(); // Verify the role has been setup with forwarding, access, // and certs CloudServiceProject service = new CloudServiceProject(rootPath, FileUtilities.GetContentFilePath("Services")); EnableAzureRemoteDesktopCommandTest.VerifyWebRole(service.Components.Definition.WebRole[0], true); VerifyDisableRoleSettings(service); } } /// <summary> /// Enable then disable remote desktop for web and worker roles. /// </summary> [Fact] public void EnableDisableRemoteDesktopForWebAndWorkerRoles() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); string rootPath = files.CreateNewService("NEW_SERVICE"); addNodeWebCmdlet = new AddAzureNodeWebRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WebRole" }; addNodeWebCmdlet.ExecuteCmdlet(); addNodeWorkerCmdlet = new AddAzureNodeWorkerRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WorkerRole" }; addNodeWorkerCmdlet.ExecuteCmdlet(); EnableAzureRemoteDesktopCommandTest.EnableRemoteDesktop("user", "GoodPassword!"); disableRDCmdlet.DisableRemoteDesktop(); // Verify the roles have been setup with forwarding, access, // and certs CloudServiceProject service = new CloudServiceProject(rootPath, FileUtilities.GetContentFilePath("Services")); EnableAzureRemoteDesktopCommandTest.VerifyWebRole(service.Components.Definition.WebRole[0], false); EnableAzureRemoteDesktopCommandTest.VerifyWorkerRole(service.Components.Definition.WorkerRole[0], true); VerifyDisableRoleSettings(service); } } /// <summary> /// Enable then disable remote desktop for web and worker roles. /// </summary> [Fact] public void EnableDisableEnableRemoteDesktopForWebAndWorkerRoles() { using (FileSystemHelper files = new FileSystemHelper(this)) { files.CreateAzureSdkDirectoryAndImportPublishSettings(); string rootPath = files.CreateNewService("NEW_SERVICE"); addNodeWebCmdlet = new AddAzureNodeWebRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WebRole" }; addNodeWebCmdlet.ExecuteCmdlet(); addNodeWorkerCmdlet = new AddAzureNodeWorkerRoleCommand() { RootPath = rootPath, CommandRuntime = mockCommandRuntime, Name = "WorkerRole" }; addNodeWorkerCmdlet.ExecuteCmdlet(); EnableAzureRemoteDesktopCommandTest.EnableRemoteDesktop("user", "GoodPassword!"); disableRDCmdlet.DisableRemoteDesktop(); EnableAzureRemoteDesktopCommandTest.EnableRemoteDesktop("user", "GoodPassword!"); // Verify the roles have been setup with forwarding, access, // and certs CloudServiceProject service = new CloudServiceProject(rootPath, FileUtilities.GetContentFilePath("Services")); EnableAzureRemoteDesktopCommandTest.VerifyWebRole(service.Components.Definition.WebRole[0], false); EnableAzureRemoteDesktopCommandTest.VerifyWorkerRole(service.Components.Definition.WorkerRole[0], true); EnableAzureRemoteDesktopCommandTest.VerifyRoleSettings(service); } } } }
apache-2.0
zeliard/aws-sdk-cpp
aws-cpp-sdk-sqs/source/model/CreateQueueResult.cpp
1953
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/sqs/model/CreateQueueResult.h> #include <aws/core/utils/xml/XmlSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/logging/LogMacros.h> #include <utility> using namespace Aws::SQS::Model; using namespace Aws::Utils::Xml; using namespace Aws::Utils::Logging; using namespace Aws::Utils; using namespace Aws; CreateQueueResult::CreateQueueResult() { } CreateQueueResult::CreateQueueResult(const AmazonWebServiceResult<XmlDocument>& result) { *this = result; } CreateQueueResult& CreateQueueResult::operator =(const AmazonWebServiceResult<XmlDocument>& result) { const XmlDocument& xmlDocument = result.GetPayload(); XmlNode rootNode = xmlDocument.GetRootElement(); XmlNode resultNode = rootNode; if (rootNode.GetName() != "CreateQueueResult") { resultNode = rootNode.FirstChild("CreateQueueResult"); } if(!resultNode.IsNull()) { XmlNode queueUrlNode = resultNode.FirstChild("QueueUrl"); if(!queueUrlNode.IsNull()) { m_queueUrl = StringUtils::Trim(queueUrlNode.GetText().c_str()); } } XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata"); m_responseMetadata = responseMetadataNode; AWS_LOGSTREAM_DEBUG("Aws::SQS::Model::CreateQueueResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() ); return *this; }
apache-2.0
thebsdbox/infrakit
pkg/fsm/types.go
4453
package fsm // import "github.com/docker/infrakit/pkg/fsm" // ID is the id of the instance in a given set. It's unique in that set. type ID uint64 // FSM is the interface that returns ID and state of the fsm instance safely. type FSM interface { // ID returns the ID of the instance ID() ID // State returns the state of the instance. This is an expensive call to be submitted to queue to view State() Index // Data returns the custom data attached to the instance. It's set via the optional arg in Signal Data() interface{} // Signal signals the instance with optional custom data Signal(Signal, ...interface{}) error // CanReceive returns true if the current state of the instance can receive the given signal CanReceive(Signal) bool } // Index is the index of the state in a FSM type Index int // Action is the action to take when a signal is received, prior to transition // to the next state. The error returned by the function is an exception which // will put the state machine in an error state. This error state is not the same // as some application-specific error state which is a state defined to correspond // to some external event indicating a real-world error event (as opposed to a // programming error here). type Action func(FSM) error // Tick is a unit of time. Time is in relative terms and synchronized with an actual // timer that's provided by the client. type Tick int64 // Time is a unit of time not corresponding to wall time type Time int64 // Expiry specifies the rule for TTL.. A state can have TTL / deadline that when it // expires a signal can be raised. type Expiry struct { TTL Tick Raise Signal } // Limit is a struct that captures the limit and what signal to raise type Limit struct { Value int Raise Signal } // Signal is a signal that can drive the state machine to transfer from one state to next. type Signal int // State encapsulates all the possible transitions and actions to perform during the // state transition. A state can have a TTL so that it is allowed to be in that // state for a given TTL. On expiration, a signal is raised. type State struct { // Index is a unique key of the state Index Index // Transitions fully specifies all the possible transitions from this state, by the way of signals. Transitions map[Signal]Index // Actions specify for each signal, what code / action is to be executed as the fsm transits from one state to next. Actions map[Signal]Action // Errors specifies the handling of errors when executing action. On action error, the mapped state is transitioned. Errors map[Signal]Index // TTL specifies how long this state can last before a signal is raised. TTL Expiry // Visit specifies a limit on the number of times the fsm can visit this state before raising a signal. Visit Limit } // DefaultOptions returns default values func DefaultOptions(name string) Options { return Options{ Name: name, BufferSize: defaultBufferSize, IgnoreUndefinedTransitions: true, IgnoreUndefinedSignals: true, IgnoreUndefinedStates: true, } } // Options contains options for the set type Options struct { // Name is the name of the set Name string // BufferSize is the size of transaction queue/buffered channel BufferSize int // IgnoreUndefinedStates will not report error from undefined states for transition on Error() chan, if true IgnoreUndefinedStates bool // IgnoreUndefinedTransitions will not report error from undefined transitions for signal on Error() chan, if true IgnoreUndefinedTransitions bool // IgnoreUndefinedSignals will not report error from undefined signal for the state on Error() chan, if true IgnoreUndefinedSignals bool } type addOp struct { initial Index result chan FSM } // Set is a collection of fsm instances that follow a given spec. This is // the primary interface to manipulate the instances... by sending signals to it via channels. type Set struct { options Options spec Spec now Time next ID clock *Clock members map[ID]*instance bystate map[Index]map[ID]*instance reads chan func(Set) // given a view which is a copy of the Set stop chan struct{} add chan addOp delete chan ID // delete an instance with id errors chan error events chan *event transactions chan *txn deadlines *queue running bool }
apache-2.0
nickmain/blockly
demos/blockfactory_old/blocks.js
25828
/** * Blockly Demos: Block Factory Blocks * * Copyright 2012 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Blocks for Blockly's Block Factory application. * @author fraser@google.com (Neil Fraser) */ 'use strict'; Blockly.Blocks['factory_base'] = { // Base of new block. init: function() { this.setColour(120); this.appendDummyInput() .appendField('name') .appendField(new Blockly.FieldTextInput('block_type'), 'NAME'); this.appendStatementInput('INPUTS') .setCheck('Input') .appendField('inputs'); var dropdown = new Blockly.FieldDropdown([ ['automatic inputs', 'AUTO'], ['external inputs', 'EXT'], ['inline inputs', 'INT']]); this.appendDummyInput() .appendField(dropdown, 'INLINE'); dropdown = new Blockly.FieldDropdown([ ['no connections', 'NONE'], ['← left output', 'LEFT'], ['↕ top+bottom connections', 'BOTH'], ['↑ top connection', 'TOP'], ['↓ bottom connection', 'BOTTOM']], function(option) { this.sourceBlock_.updateShape_(option); // Connect a shadow block to this new input. this.sourceBlock_.spawnOutputShadow_(option); }); this.appendDummyInput() .appendField(dropdown, 'CONNECTIONS'); this.appendValueInput('COLOUR') .setCheck('Colour') .appendField('colour'); this.setTooltip('Build a custom block by plugging\n' + 'fields, inputs and other blocks here.'); this.setHelpUrl( 'https://developers.google.com/blockly/guides/create-custom-blocks/block-factory'); }, mutationToDom: function() { var container = Blockly.utils.xml.createElement('mutation'); container.setAttribute('connections', this.getFieldValue('CONNECTIONS')); return container; }, domToMutation: function(xmlElement) { var connections = xmlElement.getAttribute('connections'); this.updateShape_(connections); }, spawnOutputShadow_: function(option) { // Helper method for deciding which type of outputs this block needs // to attach shaddow blocks to. switch (option) { case 'LEFT': this.connectOutputShadow_('OUTPUTTYPE'); break; case 'TOP': this.connectOutputShadow_('TOPTYPE'); break; case 'BOTTOM': this.connectOutputShadow_('BOTTOMTYPE'); break; case 'BOTH': this.connectOutputShadow_('TOPTYPE'); this.connectOutputShadow_('BOTTOMTYPE'); break; } }, connectOutputShadow_: function(outputType) { // Helper method to create & connect shadow block. var type = this.workspace.newBlock('type_null'); type.setShadow(true); type.outputConnection.connect(this.getInput(outputType).connection); type.initSvg(); type.render(); }, updateShape_: function(option) { var outputExists = this.getInput('OUTPUTTYPE'); var topExists = this.getInput('TOPTYPE'); var bottomExists = this.getInput('BOTTOMTYPE'); if (option == 'LEFT') { if (!outputExists) { this.addTypeInput_('OUTPUTTYPE', 'output type'); } } else if (outputExists) { this.removeInput('OUTPUTTYPE'); } if (option == 'TOP' || option == 'BOTH') { if (!topExists) { this.addTypeInput_('TOPTYPE', 'top type'); } } else if (topExists) { this.removeInput('TOPTYPE'); } if (option == 'BOTTOM' || option == 'BOTH') { if (!bottomExists) { this.addTypeInput_('BOTTOMTYPE', 'bottom type'); } } else if (bottomExists) { this.removeInput('BOTTOMTYPE'); } }, addTypeInput_: function(name, label) { this.appendValueInput(name) .setCheck('Type') .appendField(label); this.moveInputBefore(name, 'COLOUR'); } }; var FIELD_MESSAGE = 'fields %1 %2'; var FIELD_ARGS = [ { "type": "field_dropdown", "name": "ALIGN", "options": [['left', 'LEFT'], ['right', 'RIGHT'], ['centre', 'CENTRE']], }, { "type": "input_statement", "name": "FIELDS", "check": "Field" } ]; var TYPE_MESSAGE = 'type %1'; var TYPE_ARGS = [ { "type": "input_value", "name": "TYPE", "check": "Type", "align": "RIGHT" } ]; Blockly.Blocks['input_value'] = { // Value input. init: function() { this.jsonInit({ "message0": "value input %1 %2", "args0": [ { "type": "field_input", "name": "INPUTNAME", "text": "NAME" }, { "type": "input_dummy" } ], "message1": FIELD_MESSAGE, "args1": FIELD_ARGS, "message2": TYPE_MESSAGE, "args2": TYPE_ARGS, "previousStatement": "Input", "nextStatement": "Input", "colour": 210, "tooltip": "A value socket for horizontal connections.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=71" }); }, onchange: function() { inputNameCheck(this); } }; Blockly.Blocks['input_statement'] = { // Statement input. init: function() { this.jsonInit({ "message0": "statement input %1 %2", "args0": [ { "type": "field_input", "name": "INPUTNAME", "text": "NAME" }, { "type": "input_dummy" }, ], "message1": FIELD_MESSAGE, "args1": FIELD_ARGS, "message2": TYPE_MESSAGE, "args2": TYPE_ARGS, "previousStatement": "Input", "nextStatement": "Input", "colour": 210, "tooltip": "A statement socket for enclosed vertical stacks.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=246" }); }, onchange: function() { inputNameCheck(this); } }; Blockly.Blocks['input_dummy'] = { // Dummy input. init: function() { this.jsonInit({ "message0": "dummy input", "message1": FIELD_MESSAGE, "args1": FIELD_ARGS, "previousStatement": "Input", "nextStatement": "Input", "colour": 210, "tooltip": "For adding fields on a separate row with no " + "connections. Alignment options (left, right, centre) " + "apply only to multi-line fields.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=293" }); } }; Blockly.Blocks['field_static'] = { // Text value. init: function() { this.setColour(160); this.appendDummyInput() .appendField('text') .appendField(new Blockly.FieldTextInput(''), 'TEXT'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Static text that serves as a label.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=88'); } }; Blockly.Blocks['field_input'] = { // Text input. init: function() { this.setColour(160); this.appendDummyInput() .appendField('text input') .appendField(new Blockly.FieldTextInput('default'), 'TEXT') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('An input field for the user to enter text.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=319'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_number'] = { // Numeric input. init: function() { this.setColour(160); this.appendDummyInput() .appendField('numeric input') .appendField(new Blockly.FieldNumber(0), 'VALUE') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.appendDummyInput() .appendField('min') .appendField(new Blockly.FieldNumber(-Infinity), 'MIN') .appendField('max') .appendField(new Blockly.FieldNumber(Infinity), 'MAX') .appendField('precision') .appendField(new Blockly.FieldNumber(0, 0), 'PRECISION'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('An input field for the user to enter a number.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=319'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_angle'] = { // Angle input. init: function() { this.setColour(160); this.appendDummyInput() .appendField('angle input') .appendField(new Blockly.FieldAngle('90'), 'ANGLE') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('An input field for the user to enter an angle.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=372'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_dropdown'] = { // Dropdown menu. init: function() { this.appendDummyInput() .appendField('dropdown') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.optionCount_ = 3; this.updateShape_(); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setMutator(new Blockly.Mutator(['field_dropdown_option'])); this.setColour(160); this.setTooltip('Dropdown menu with a list of options.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=386'); }, mutationToDom: function(workspace) { // Create XML to represent menu options. var container = Blockly.utils.xml.createElement('mutation'); container.setAttribute('options', this.optionCount_); return container; }, domToMutation: function(container) { // Parse XML to restore the menu options. this.optionCount_ = parseInt(container.getAttribute('options'), 10); this.updateShape_(); }, decompose: function(workspace) { // Populate the mutator's dialog with this block's components. var containerBlock = workspace.newBlock('field_dropdown_container'); containerBlock.initSvg(); var connection = containerBlock.getInput('STACK').connection; for (var i = 0; i < this.optionCount_; i++) { var optionBlock = workspace.newBlock('field_dropdown_option'); optionBlock.initSvg(); connection.connect(optionBlock.previousConnection); connection = optionBlock.nextConnection; } return containerBlock; }, compose: function(containerBlock) { // Reconfigure this block based on the mutator dialog's components. var optionBlock = containerBlock.getInputTargetBlock('STACK'); // Count number of inputs. var data = []; while (optionBlock) { data.push([optionBlock.userData_, optionBlock.cpuData_]); optionBlock = optionBlock.nextConnection && optionBlock.nextConnection.targetBlock(); } this.optionCount_ = data.length; this.updateShape_(); // Restore any data. for (var i = 0; i < this.optionCount_; i++) { this.setFieldValue(data[i][0] || 'option', 'USER' + i); this.setFieldValue(data[i][1] || 'OPTIONNAME', 'CPU' + i); } }, saveConnections: function(containerBlock) { // Store names and values for each option. var optionBlock = containerBlock.getInputTargetBlock('STACK'); var i = 0; while (optionBlock) { optionBlock.userData_ = this.getFieldValue('USER' + i); optionBlock.cpuData_ = this.getFieldValue('CPU' + i); i++; optionBlock = optionBlock.nextConnection && optionBlock.nextConnection.targetBlock(); } }, updateShape_: function() { // Modify this block to have the correct number of options. // Add new options. for (var i = 0; i < this.optionCount_; i++) { if (!this.getInput('OPTION' + i)) { this.appendDummyInput('OPTION' + i) .appendField(new Blockly.FieldTextInput('option'), 'USER' + i) .appendField(',') .appendField(new Blockly.FieldTextInput('OPTIONNAME'), 'CPU' + i); } } // Remove deleted options. while (this.getInput('OPTION' + i)) { this.removeInput('OPTION' + i); i++; } }, onchange: function() { if (this.workspace && this.optionCount_ < 1) { this.setWarningText('Drop down menu must\nhave at least one option.'); } else { fieldNameCheck(this); } } }; Blockly.Blocks['field_dropdown_container'] = { // Container. init: function() { this.setColour(160); this.appendDummyInput() .appendField('add options'); this.appendStatementInput('STACK'); this.setTooltip('Add, remove, or reorder options\n' + 'to reconfigure this dropdown menu.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=386'); this.contextMenu = false; } }; Blockly.Blocks['field_dropdown_option'] = { // Add option. init: function() { this.setColour(160); this.appendDummyInput() .appendField('option'); this.setPreviousStatement(true); this.setNextStatement(true); this.setTooltip('Add a new option to the dropdown menu.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=386'); this.contextMenu = false; } }; Blockly.Blocks['field_checkbox'] = { // Checkbox. init: function() { this.setColour(160); this.appendDummyInput() .appendField('checkbox') .appendField(new Blockly.FieldCheckbox('TRUE'), 'CHECKED') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Checkbox field.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=485'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_colour'] = { // Colour input. init: function() { this.setColour(160); this.appendDummyInput() .appendField('colour') .appendField(new Blockly.FieldColour('#ff0000'), 'COLOUR') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Colour input field.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=495'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_date'] = { // Date input. init: function() { this.setColour(160); this.appendDummyInput() .appendField('date') .appendField(new Blockly.FieldDate(), 'DATE') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Date input field.'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_variable'] = { // Dropdown for variables. init: function() { this.setColour(160); this.appendDummyInput() .appendField('variable') .appendField(new Blockly.FieldTextInput('item'), 'TEXT') .appendField(',') .appendField(new Blockly.FieldTextInput('NAME'), 'FIELDNAME'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Dropdown menu for variable names.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=510'); }, onchange: function() { fieldNameCheck(this); } }; Blockly.Blocks['field_image'] = { // Image. init: function() { this.setColour(160); var src = 'https://www.gstatic.com/codesite/ph/images/star_on.gif'; this.appendDummyInput() .appendField('image') .appendField(new Blockly.FieldTextInput(src), 'SRC'); this.appendDummyInput() .appendField('width') .appendField(new Blockly.FieldNumber('15', 0, NaN, 1), 'WIDTH') .appendField('height') .appendField(new Blockly.FieldNumber('15', 0, NaN, 1), 'HEIGHT') .appendField('alt text') .appendField(new Blockly.FieldTextInput('*'), 'ALT'); this.setPreviousStatement(true, 'Field'); this.setNextStatement(true, 'Field'); this.setTooltip('Static image (JPEG, PNG, GIF, SVG, BMP).\n' + 'Retains aspect ratio regardless of height and width.\n' + 'Alt text is for when collapsed.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=567'); } }; Blockly.Blocks['type_group'] = { // Group of types. init: function() { this.typeCount_ = 2; this.updateShape_(); this.setOutput(true, 'Type'); this.setMutator(new Blockly.Mutator(['type_group_item'])); this.setColour(230); this.setTooltip('Allows more than one type to be accepted.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=677'); }, mutationToDom: function(workspace) { // Create XML to represent a group of types. var container = Blockly.utils.xml.createElement('mutation'); container.setAttribute('types', this.typeCount_); return container; }, domToMutation: function(container) { // Parse XML to restore the group of types. this.typeCount_ = parseInt(container.getAttribute('types'), 10); this.updateShape_(); for (var i = 0; i < this.typeCount_; i++) { this.removeInput('TYPE' + i); } for (var i = 0; i < this.typeCount_; i++) { var input = this.appendValueInput('TYPE' + i) .setCheck('Type'); if (i == 0) { input.appendField('any of'); } } }, decompose: function(workspace) { // Populate the mutator's dialog with this block's components. var containerBlock = workspace.newBlock('type_group_container'); containerBlock.initSvg(); var connection = containerBlock.getInput('STACK').connection; for (var i = 0; i < this.typeCount_; i++) { var typeBlock = workspace.newBlock('type_group_item'); typeBlock.initSvg(); connection.connect(typeBlock.previousConnection); connection = typeBlock.nextConnection; } return containerBlock; }, compose: function(containerBlock) { // Reconfigure this block based on the mutator dialog's components. var typeBlock = containerBlock.getInputTargetBlock('STACK'); // Count number of inputs. var connections = []; while (typeBlock) { connections.push(typeBlock.valueConnection_); typeBlock = typeBlock.nextConnection && typeBlock.nextConnection.targetBlock(); } // Disconnect any children that don't belong. for (var i = 0; i < this.typeCount_; i++) { var connection = this.getInput('TYPE' + i).connection.targetConnection; if (connection && connections.indexOf(connection) == -1) { connection.disconnect(); } } this.typeCount_ = connections.length; this.updateShape_(); // Reconnect any child blocks. for (var i = 0; i < this.typeCount_; i++) { Blockly.Mutator.reconnect(connections[i], this, 'TYPE' + i); } }, saveConnections: function(containerBlock) { // Store a pointer to any connected child blocks. var typeBlock = containerBlock.getInputTargetBlock('STACK'); var i = 0; while (typeBlock) { var input = this.getInput('TYPE' + i); typeBlock.valueConnection_ = input && input.connection.targetConnection; i++; typeBlock = typeBlock.nextConnection && typeBlock.nextConnection.targetBlock(); } }, updateShape_: function() { // Modify this block to have the correct number of inputs. // Add new inputs. for (var i = 0; i < this.typeCount_; i++) { if (!this.getInput('TYPE' + i)) { var input = this.appendValueInput('TYPE' + i); if (i == 0) { input.appendField('any of'); } } } // Remove deleted inputs. while (this.getInput('TYPE' + i)) { this.removeInput('TYPE' + i); i++; } } }; Blockly.Blocks['type_group_container'] = { // Container. init: function() { this.jsonInit({ "message0": "add types %1 %2", "args0": [ {"type": "input_dummy"}, {"type": "input_statement", "name": "STACK"} ], "colour": 230, "tooltip": "Add, or remove allowed type.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=677" }); } }; Blockly.Blocks['type_group_item'] = { // Add type. init: function() { this.jsonInit({ "message0": "type", "previousStatement": null, "nextStatement": null, "colour": 230, "tooltip": "Add a new allowed type.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=677" }); } }; Blockly.Blocks['type_null'] = { // Null type. valueType: null, init: function() { this.jsonInit({ "message0": "any", "output": "Type", "colour": 230, "tooltip": "Any type is allowed.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=602" }); } }; Blockly.Blocks['type_boolean'] = { // Boolean type. valueType: 'Boolean', init: function() { this.jsonInit({ "message0": "Boolean", "output": "Type", "colour": 230, "tooltip": "Booleans (true/false) are allowed.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=602" }); } }; Blockly.Blocks['type_number'] = { // Number type. valueType: 'Number', init: function() { this.jsonInit({ "message0": "Number", "output": "Type", "colour": 230, "tooltip": "Numbers (int/float) are allowed.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=602" }); } }; Blockly.Blocks['type_string'] = { // String type. valueType: 'String', init: function() { this.jsonInit({ "message0": "String", "output": "Type", "colour": 230, "tooltip": "Strings (text) are allowed.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=602" }); } }; Blockly.Blocks['type_list'] = { // List type. valueType: 'Array', init: function() { this.jsonInit({ "message0": "Array", "output": "Type", "colour": 230, "tooltip": "Arrays (lists) are allowed.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=602" }); } }; Blockly.Blocks['type_other'] = { // Other type. init: function() { this.jsonInit({ "message0": "other %1", "args0": [{"type": "field_input", "name": "TYPE", "text": ""}], "output": "Type", "colour": 230, "tooltip": "Custom type to allow.", "helpUrl": "https://www.youtube.com/watch?v=s2_xaEvcVI0#t=702" }); } }; Blockly.Blocks['colour_hue'] = { // Set the colour of the block. init: function() { this.appendDummyInput() .appendField('hue:') .appendField(new Blockly.FieldAngle('0', this.validator), 'HUE'); this.setOutput(true, 'Colour'); this.setTooltip('Paint the block with this colour.'); this.setHelpUrl('https://www.youtube.com/watch?v=s2_xaEvcVI0#t=55'); }, validator: function(text) { // Update the current block's colour to match. var hue = parseInt(text, 10); if (!isNaN(hue)) { this.sourceBlock_.setColour(hue); } }, mutationToDom: function(workspace) { var container = Blockly.utils.xml.createElement('mutation'); container.setAttribute('colour', this.getColour()); return container; }, domToMutation: function(container) { this.setColour(container.getAttribute('colour')); } }; /** * Check to see if more than one field has this name. * Highly inefficient (On^2), but n is small. * @param {!Blockly.Block} referenceBlock Block to check. */ function fieldNameCheck(referenceBlock) { if (!referenceBlock.workspace) { // Block has been deleted. return; } var name = referenceBlock.getFieldValue('FIELDNAME').toLowerCase(); var count = 0; var blocks = referenceBlock.workspace.getAllBlocks(false); for (var i = 0, block; block = blocks[i]; i++) { var otherName = block.getFieldValue('FIELDNAME'); if (!block.disabled && !block.getInheritedDisabled() && otherName && otherName.toLowerCase() == name) { count++; } } var msg = (count > 1) ? 'There are ' + count + ' field blocks\n with this name.' : null; referenceBlock.setWarningText(msg); } /** * Check to see if more than one input has this name. * Highly inefficient (On^2), but n is small. * @param {!Blockly.Block} referenceBlock Block to check. */ function inputNameCheck(referenceBlock) { if (!referenceBlock.workspace) { // Block has been deleted. return; } var name = referenceBlock.getFieldValue('INPUTNAME').toLowerCase(); var count = 0; var blocks = referenceBlock.workspace.getAllBlocks(false); for (var i = 0, block; block = blocks[i]; i++) { var otherName = block.getFieldValue('INPUTNAME'); if (!block.disabled && !block.getInheritedDisabled() && otherName && otherName.toLowerCase() == name) { count++; } } var msg = (count > 1) ? 'There are ' + count + ' input blocks\n with this name.' : null; referenceBlock.setWarningText(msg); }
apache-2.0
liuzcgithub/chef-repo
cookbooks/CLM_E1_APP_COOKBOOK/files/default/ssea-clm-full-distribute-db2-cfg_nonroot/InstallCLMapps.py
7844
################################################### def deployJazzApp(app_name,nname,sname): print "====== installing " + app_name + " Web Application on WAS on " + nname + "server " + sname + "======\n" app_war= app_name + '.war' installed_appname = app_name + '_war' app_ctx = '/' + app_name appfile = "/opt/IBM/JazzTeamServer/server/webapps/" + app_war #appfile = properties.getProperty("CLM_HOME") + "/server/webapps/" + app_war #appfile = "/tmp/webapps/" + app_war installcmd = "" installcmd = installcmd + "[" installcmd = installcmd + " -nopreCompileJSPs -distributeApp -nouseMetaDataFromBinary -nodeployejb" installcmd = installcmd + " -appname " + installed_appname installcmd = installcmd + " -createMBeansForResources -noreloadEnabled -nodeployws" installcmd = installcmd + " -validateinstall warn" installcmd = installcmd + " -noprocessEmbeddedConfig" installcmd = installcmd + " -filepermission .*\\.dll=755#.*\\.so=755#.*\\.a=755#.*\\.sl=755" installcmd = installcmd + " -noallowDispatchRemoteInclude -noallowServiceRemoteInclude" installcmd = installcmd + " -asyncRequestDispatchType DISABLED" installcmd = installcmd + " -nouseAutoLink" installcmd = installcmd + " -MapModulesToServers [[ " + app_war + " " + app_war + ",WEB-INF/web.xml" installcmd = installcmd + " WebSphere:cell=" + cellName + ",node=" + nname + ",server=" + sname + "+WebSphere:cell=" + cellName + ",node=" + WebserverNodeName + ",server=" + WebserverName + " ]]" installcmd = installcmd + " -MapWebModToVH [[ " + app_war + " " + app_war + ",WEB-INF/web.xml default_host ]]" installcmd = installcmd + " -CtxRootForWebMod [[ " + app_war + " " + app_war + ",WEB-INF/web.xml " + app_ctx +" ]]" installcmd = installcmd + "]" #command doesn't like literal single quotes quoting actural parameters IN the variables rc = AdminApp.install(appfile, installcmd) AdminConfig.save() print "====== application " + app_name + " is installed on WAS =========\n" if (app_name in comp_JazzApps): if os.path.exists('/tmp/WASCommon'): mapRolesToJazzApp(installed_appname) else: oldmapRolesToJazzApp(installed_appname) ################################################### # Function mapRolesToJazzApp(installed_appname) ################################################### def oldmapRolesToJazzApp(installed_appname): print "====== mapping user roles and group roles to jazz ===============" userrealm = properties.getProperty("jazz.userrealm") userrole = properties.getProperty("jazz.userrole.primaryid") if len(userrole) == 0: userrole = "\'\'" userrealmstr = "" else: userrealmstr = " user:" + userrealm + "/" print "userrealm: ",userrealmstr for propname in properties.keys(): if propname.find("jazz.grouprole.") == 0: editcmd = "" editcmd = editcmd + "[" editcmd = editcmd + " -MapRolesToUsers [" editcmd = editcmd + "[ " + propname[15:] + " AppDeploymentOption.No AppDeploymentOption.No" editcmd = editcmd + " " + userrole editcmd = editcmd + " " + properties.getProperty(propname) editcmd = editcmd + " AppDeploymentOption.No" editcmd = editcmd + userrealmstr + userrole editcmd = editcmd + " group:" + userrealm + "/" + properties.getProperty(propname) editcmd = editcmd + "]" editcmd = editcmd + "]]" print "========== mapping role: " + propname[15:] + "========" rc = AdminApp.edit(installed_appname,editcmd) rc = AdminConfig.save() def mapRolesToJazzApp(installed_appname): print "====== mapping user roles and group roles to jazz ===============" userrealm = properties.getProperty("jazz_userrealm") userrole = properties.getProperty("jazz_ldap_primaryid") if len(userrole) == 0: userrole = "\'\'" userrealmstr = "" else: userrealmstr = " user:" + userrealm + "/" print "userrealmstr: ",userrealmstr print "userrole: ",userrole for propname in properties.keys(): #JazzGuests="cn\=RQMSVTJazzUsers,cn\=SVT,dc\=RPTSVT,dc\=domain" if propname.find("jazz_grouprole_") == 0: editcmd = "" editcmd = editcmd + "[" editcmd = editcmd + " -MapRolesToUsers [" editcmd = editcmd + "[ " + propname[15:] + " AppDeploymentOption.No AppDeploymentOption.No" editcmd = editcmd + " " + userrole editcmd = editcmd + " " + properties.getProperty(propname) editcmd = editcmd + " AppDeploymentOption.No" editcmd = editcmd + userrealmstr + userrole editcmd = editcmd + " group:" + userrealm + "/" + properties.getProperty(propname) editcmd = editcmd + "]" editcmd = editcmd + "]]" print "editcmd: ",editcmd print "========== mapping role: " + propname[15:] + "========" rc = AdminApp.edit(installed_appname,editcmd) rc = AdminConfig.save() ########################################333 # Start application ######################################3333 def startApp(installed_appname,nname,sname): print "start application " + installed_appname app_manager_cmd="cell=" + cellName + ",node=" + nname + ",type=ApplicationManager,process=" + sname + ",*" appManager = AdminControl.queryNames(app_manager_cmd) AdminControl.invoke(appManager,'startApplication',installed_appname) #################### # main ############ import sys import java.util as util import java.io as javaio import os import shutil import time #load JVM Properties from properties file properties = util.Properties() propfile="/tmp/WASCommon/LDAP/LDAPSecurity.properties" print "====== loading properties from ",propfile ldappropsfis=javaio.FileInputStream(propfile) properties.load(ldappropsfis) cell=AdminConfig.list('Cell') cellName=AdminConfig.showAttribute(cell,'name') print "cell name is " + cellName + "\n" serverList=AdminTask.listServers('[-serverType APPLICATION_SERVER ]') servers=serverList.split("\n") AfterAppNodes=serverList.split("nodes/")[1] AppNodeName=AfterAppNodes.split("/servers/")[0] print "AppNodeName is " + AppNodeName + "\n" #appNames = ['clm', 'jts', 'ccm', 'qm', 'rm', 'admin', 'clmhelp', 'converter', 'dm', 'rdmhelp'] #appNames = ['jts', 'ccm', 'qm', 'rm', 'admin', 'clmhelp', 'converter'] #jts_appNames = ['jts','admin','clmhelp'] #ccm_appNames = ['ccm'] #qm_appNames = ['qm'] #rm_appNames = ['rm','converter'] comp_JazzApps = ['jts', 'ccm', 'qm', 'rm'] if (len(sys.argv) > 0): opt = sys.argv[0] if opt == 'jts': distribute_appNames=['jts','admin','clmhelp'] elif (opt == 'ccm'): distribute_appNames=['ccm'] elif (opt == 'qm'): distribute_appNames=['qm'] elif (opt == 'rm'): distribute_appNames=['rm','converter'] else: print "Invalid option " + opt sys.exit() else: print "Missing options eg jts" sys.exit() WebserverList=AdminTask.listServers('[-serverType WEB_SERVER ]') WebserverName=AdminConfig.showAttribute(WebserverList,'name') AfterNodes=WebserverList.split("nodes/")[1] WebserverNodeName=AfterNodes.split("/servers/")[0] print "WebServer node name is " + WebserverNodeName + "\n" print "WebServerName is " + WebserverName + "\n" print "===== deploy jts application deployment ======\n" for app_name in distribute_appNames: print app_name + "\n" deployJazzApp(app_name,AppNodeName,"server1") print "============ sleep 30 seconds to wait all applications installed ===============\n" time.sleep(30) for app_name in distribute_appNames: app_name_war=app_name + "_war" print "starting application " + app_name_war startApp(app_name_war,AppNodeName,"server1")
apache-2.0
chenc10/Spark-PAF
network/shuffle/src/main/java/org/apache/spark/network/shuffle/mesos/MesosExternalShuffleClient.java
3077
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.network.shuffle.mesos; import java.io.IOException; import java.nio.ByteBuffer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.spark.network.client.RpcResponseCallback; import org.apache.spark.network.client.TransportClient; import org.apache.spark.network.sasl.SecretKeyHolder; import org.apache.spark.network.shuffle.ExternalShuffleClient; import org.apache.spark.network.shuffle.protocol.mesos.RegisterDriver; import org.apache.spark.network.util.TransportConf; /** * A client for talking to the external shuffle service in Mesos coarse-grained mode. * * This is used by the Spark driver to register with each external shuffle service on the cluster. * The reason why the driver has to talk to the service is for cleaning up shuffle files reliably * after the application exits. Mesos does not provide a great alternative to do this, so Spark * has to detect this itself. */ public class MesosExternalShuffleClient extends ExternalShuffleClient { private final Logger logger = LoggerFactory.getLogger(MesosExternalShuffleClient.class); /** * Creates an Mesos external shuffle client that wraps the {@link ExternalShuffleClient}. * Please refer to docs on {@link ExternalShuffleClient} for more information. */ public MesosExternalShuffleClient( TransportConf conf, SecretKeyHolder secretKeyHolder, boolean saslEnabled, boolean saslEncryptionEnabled) { super(conf, secretKeyHolder, saslEnabled, saslEncryptionEnabled); } public void registerDriverWithShuffleService(String host, int port) throws IOException { checkInit(); ByteBuffer registerDriver = new RegisterDriver(appId).toByteBuffer(); TransportClient client = clientFactory.createClient(host, port); client.sendRpc(registerDriver, new RpcResponseCallback() { @Override public void onSuccess(ByteBuffer response) { logger.info("Successfully registered app " + appId + " with external shuffle service."); } @Override public void onFailure(Throwable e) { logger.warn("Unable to register app " + appId + " with external shuffle service. " + "Please manually remove shuffle data after driver exit. Error: " + e); } }); } }
apache-2.0
Yaliang/presto
presto-main/src/test/java/com/facebook/presto/block/TestMapBlock.java
13291
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.block; import com.facebook.presto.metadata.FunctionRegistry; import com.facebook.presto.spi.block.Block; import com.facebook.presto.spi.block.BlockBuilder; import com.facebook.presto.spi.block.ByteArrayBlock; import com.facebook.presto.spi.block.MapBlockBuilder; import com.facebook.presto.spi.block.SingleMapBlock; import com.facebook.presto.spi.type.MapType; import com.facebook.presto.spi.type.TypeManager; import com.facebook.presto.sql.analyzer.FeaturesConfig; import com.facebook.presto.type.TypeRegistry; import org.testng.annotations.Test; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import static com.facebook.presto.block.BlockAssertions.createLongsBlock; import static com.facebook.presto.block.BlockAssertions.createStringsBlock; import static com.facebook.presto.spi.type.BigintType.BIGINT; import static com.facebook.presto.spi.type.TinyintType.TINYINT; import static com.facebook.presto.spi.type.VarcharType.VARCHAR; import static com.facebook.presto.util.StructuralTestUtil.mapType; import static io.airlift.slice.Slices.utf8Slice; import static java.util.Objects.requireNonNull; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; public class TestMapBlock extends AbstractTestBlock { private static final TypeManager TYPE_MANAGER = new TypeRegistry(); static { // associate TYPE_MANAGER with a function registry new FunctionRegistry(TYPE_MANAGER, new BlockEncodingManager(TYPE_MANAGER), new FeaturesConfig()); } @Test public void test() { testWith(createTestMap(9, 3, 4, 0, 8, 0, 6, 5)); } @Test public void testCompactBlock() { Block emptyBlock = new ByteArrayBlock(0, Optional.empty(), new byte[0]); Block compactKeyBlock = new ByteArrayBlock(16, Optional.empty(), createExpectedValue(16).getBytes()); Block compactValueBlock = new ByteArrayBlock(16, Optional.empty(), createExpectedValue(16).getBytes()); Block inCompactKeyBlock = new ByteArrayBlock(16, Optional.empty(), createExpectedValue(17).getBytes()); Block inCompactValueBlock = new ByteArrayBlock(16, Optional.empty(), createExpectedValue(17).getBytes()); int[] offsets = {0, 1, 1, 2, 4, 8, 16}; boolean[] mapIsNull = {false, true, false, false, false, false}; testCompactBlock(mapType(TINYINT, TINYINT).createBlockFromKeyValue(Optional.empty(), new int[1], emptyBlock, emptyBlock)); testCompactBlock(mapType(TINYINT, TINYINT).createBlockFromKeyValue(Optional.of(mapIsNull), offsets, compactKeyBlock, compactValueBlock)); // TODO: Add test case for a sliced MapBlock // underlying key/value block is not compact testIncompactBlock(mapType(TINYINT, TINYINT).createBlockFromKeyValue(Optional.of(mapIsNull), offsets, inCompactKeyBlock, inCompactValueBlock)); } private Map<String, Long>[] createTestMap(int... entryCounts) { Map<String, Long>[] result = new Map[entryCounts.length]; for (int rowNumber = 0; rowNumber < entryCounts.length; rowNumber++) { int entryCount = entryCounts[rowNumber]; Map<String, Long> map = new HashMap<>(); for (int entryNumber = 0; entryNumber < entryCount; entryNumber++) { map.put("key" + entryNumber, entryNumber == 5 ? null : rowNumber * 100L + entryNumber); } result[rowNumber] = map; } return result; } private void testWith(Map<String, Long>[] expectedValues) { BlockBuilder blockBuilder = createBlockBuilderWithValues(expectedValues); assertBlock(blockBuilder, () -> blockBuilder.newBlockBuilderLike(null), expectedValues); assertBlock(blockBuilder.build(), () -> blockBuilder.newBlockBuilderLike(null), expectedValues); assertBlockFilteredPositions(expectedValues, blockBuilder, () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 3, 4, 7); assertBlockFilteredPositions(expectedValues, blockBuilder.build(), () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 3, 4, 7); assertBlockFilteredPositions(expectedValues, blockBuilder, () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 5, 6); assertBlockFilteredPositions(expectedValues, blockBuilder.build(), () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 5, 6); Block block = createBlockWithValuesFromKeyValueBlock(expectedValues); assertBlock(block, () -> blockBuilder.newBlockBuilderLike(null), expectedValues); assertBlockFilteredPositions(expectedValues, block, () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 3, 4, 7); assertBlockFilteredPositions(expectedValues, block, () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 5, 6); Map<String, Long>[] expectedValuesWithNull = alternatingNullValues(expectedValues); BlockBuilder blockBuilderWithNull = createBlockBuilderWithValues(expectedValuesWithNull); assertBlock(blockBuilderWithNull, () -> blockBuilder.newBlockBuilderLike(null), expectedValuesWithNull); assertBlock(blockBuilderWithNull.build(), () -> blockBuilder.newBlockBuilderLike(null), expectedValuesWithNull); assertBlockFilteredPositions(expectedValuesWithNull, blockBuilderWithNull, () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 5, 6, 7, 10, 11, 12, 15); assertBlockFilteredPositions(expectedValuesWithNull, blockBuilderWithNull.build(), () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 5, 6, 7, 10, 11, 12, 15); assertBlockFilteredPositions(expectedValuesWithNull, blockBuilderWithNull, () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 4, 9, 13, 14); assertBlockFilteredPositions(expectedValuesWithNull, blockBuilderWithNull.build(), () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 4, 9, 13, 14); Block blockWithNull = createBlockWithValuesFromKeyValueBlock(expectedValuesWithNull); assertBlock(blockWithNull, () -> blockBuilder.newBlockBuilderLike(null), expectedValuesWithNull); assertBlockFilteredPositions(expectedValuesWithNull, blockWithNull, () -> blockBuilder.newBlockBuilderLike(null), 0, 1, 5, 6, 7, 10, 11, 12, 15); assertBlockFilteredPositions(expectedValuesWithNull, blockWithNull, () -> blockBuilder.newBlockBuilderLike(null), 2, 3, 4, 9, 13, 14); } private BlockBuilder createBlockBuilderWithValues(Map<String, Long>[] maps) { MapType mapType = mapType(VARCHAR, BIGINT); BlockBuilder mapBlockBuilder = mapType.createBlockBuilder(null, 1); for (Map<String, Long> map : maps) { createBlockBuilderWithValues(map, mapBlockBuilder); } return mapBlockBuilder; } private Block createBlockWithValuesFromKeyValueBlock(Map<String, Long>[] maps) { List<String> keys = new ArrayList<>(); List<Long> values = new ArrayList<>(); int[] offsets = new int[maps.length + 1]; boolean[] mapIsNull = new boolean[maps.length]; for (int i = 0; i < maps.length; i++) { Map<String, Long> map = maps[i]; mapIsNull[i] = map == null; if (map == null) { offsets[i + 1] = offsets[i]; } else { for (Map.Entry<String, Long> entry : map.entrySet()) { keys.add(entry.getKey()); values.add(entry.getValue()); } offsets[i + 1] = offsets[i] + map.size(); } } return mapType(VARCHAR, BIGINT).createBlockFromKeyValue(Optional.of(mapIsNull), offsets, createStringsBlock(keys), createLongsBlock(values)); } private void createBlockBuilderWithValues(Map<String, Long> map, BlockBuilder mapBlockBuilder) { if (map == null) { mapBlockBuilder.appendNull(); } else { BlockBuilder elementBlockBuilder = mapBlockBuilder.beginBlockEntry(); for (Map.Entry<String, Long> entry : map.entrySet()) { VARCHAR.writeSlice(elementBlockBuilder, utf8Slice(entry.getKey())); if (entry.getValue() == null) { elementBlockBuilder.appendNull(); } else { BIGINT.writeLong(elementBlockBuilder, entry.getValue()); } } mapBlockBuilder.closeEntry(); } } @Override protected <T> void assertPositionValue(Block block, int position, T expectedValue) { if (expectedValue instanceof Map) { assertValue(block, position, (Map<String, Long>) expectedValue); return; } super.assertPositionValue(block, position, expectedValue); } private void assertValue(Block mapBlock, int position, Map<String, Long> map) { MapType mapType = mapType(VARCHAR, BIGINT); // null maps are handled by assertPositionValue requireNonNull(map, "map is null"); assertFalse(mapBlock.isNull(position)); SingleMapBlock elementBlock = (SingleMapBlock) mapType.getObject(mapBlock, position); assertEquals(elementBlock.getPositionCount(), map.size() * 2); // Test new/hash-index access: assert inserted keys for (Map.Entry<String, Long> entry : map.entrySet()) { int pos = elementBlock.seekKey(utf8Slice(entry.getKey())); assertNotEquals(pos, -1); if (entry.getValue() == null) { assertTrue(elementBlock.isNull(pos)); } else { assertFalse(elementBlock.isNull(pos)); assertEquals(BIGINT.getLong(elementBlock, pos), (long) entry.getValue()); } } // Test new/hash-index access: assert non-existent keys for (int i = 0; i < 10; i++) { assertEquals(elementBlock.seekKey(utf8Slice("not-inserted-" + i)), -1); } // Test legacy/iterative access for (int i = 0; i < elementBlock.getPositionCount(); i += 2) { String actualKey = VARCHAR.getSlice(elementBlock, i).toStringUtf8(); Long actualValue; if (elementBlock.isNull(i + 1)) { actualValue = null; } else { actualValue = BIGINT.getLong(elementBlock, i + 1); } assertTrue(map.containsKey(actualKey)); assertEquals(actualValue, map.get(actualKey)); } } @Test public void testCloseEntryStrict() throws Exception { MapType mapType = mapType(BIGINT, BIGINT); MapBlockBuilder mapBlockBuilder = (MapBlockBuilder) mapType.createBlockBuilder(null, 1); // Add 100 maps with only one entry but the same key for (int i = 0; i < 100; i++) { BlockBuilder entryBuilder = mapBlockBuilder.beginBlockEntry(); BIGINT.writeLong(entryBuilder, 1); BIGINT.writeLong(entryBuilder, -1); mapBlockBuilder.closeEntry(); } BlockBuilder entryBuilder = mapBlockBuilder.beginBlockEntry(); // Add 50 keys so we get some chance to get hash conflict // The purpose of this test is to make sure offset is calculated correctly in MapBlockBuilder.closeEntryStrict() for (int i = 0; i < 50; i++) { BIGINT.writeLong(entryBuilder, i); BIGINT.writeLong(entryBuilder, -1); } mapBlockBuilder.closeEntryStrict(); } @Test public void testEstimatedDataSizeForStats() { Map<String, Long>[] expectedValues = alternatingNullValues(createTestMap(9, 3, 4, 0, 8, 0, 6, 5)); BlockBuilder blockBuilder = createBlockBuilderWithValues(expectedValues); Block block = blockBuilder.build(); assertEquals(block.getPositionCount(), expectedValues.length); for (int i = 0; i < block.getPositionCount(); i++) { int expectedSize = getExpectedEstimatedDataSize(expectedValues[i]); assertEquals(blockBuilder.getEstimatedDataSizeForStats(i), expectedSize); assertEquals(block.getEstimatedDataSizeForStats(i), expectedSize); } } private static int getExpectedEstimatedDataSize(Map<String, Long> map) { if (map == null) { return 0; } int size = 0; for (Map.Entry<String, Long> entry : map.entrySet()) { size += entry.getKey().length(); size += entry.getValue() == null ? 0 : Long.BYTES; } return size; } }
apache-2.0
Xuvasi/asset-interop
Bacnet to AIP Gateway Example Code/src/com/serotonin/bacnet4j/type/constructed/LogData.java
6832
/* * ============================================================================ * GNU General Public License * ============================================================================ * * Copyright (C) 2006-2011 Serotonin Software Technologies Inc. http://serotoninsoftware.com * @author Matthew Lohbihler * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * When signing a commercial license with Serotonin Software Technologies Inc., * the following extension to GPL is made. A special exception to the GPL is * included to allow you to distribute a combined work that includes BAcnet4J * without being obliged to provide the source code for any proprietary components. */ package com.serotonin.bacnet4j.type.constructed; import java.util.ArrayList; import java.util.List; import com.serotonin.bacnet4j.exception.BACnetException; import com.serotonin.bacnet4j.type.Encodable; import com.serotonin.bacnet4j.type.primitive.BitString; import com.serotonin.bacnet4j.type.primitive.Boolean; import com.serotonin.bacnet4j.type.primitive.Enumerated; import com.serotonin.bacnet4j.type.primitive.Null; import com.serotonin.bacnet4j.type.primitive.Real; import com.serotonin.bacnet4j.type.primitive.SignedInteger; import com.serotonin.bacnet4j.type.primitive.UnsignedInteger; import org.free.bacnet4j.util.ByteQueue; public class LogData extends BaseType { private static final long serialVersionUID = -1976023645603339559L; public static Choice booleanElement(Boolean datum) { return new Choice(0, datum); } public static Choice realElement(Real datum) { return new Choice(1, datum); } public static Choice enumElement(Enumerated datum) { return new Choice(2, datum); } public static Choice unsignedElement(UnsignedInteger datum) { return new Choice(3, datum); } public static Choice signedElement(SignedInteger datum) { return new Choice(4, datum); } public static Choice bitstringElement(BitString datum) { return new Choice(5, datum); } public static Choice nullElement(Null datum) { return new Choice(6, datum); } public static Choice failureElement(BACnetError datum) { return new Choice(7, datum); } public static Choice anyElement(BaseType datum) { return new Choice(8, datum); } private static List<Class<? extends Encodable>> classes; static { classes = new ArrayList<Class<? extends Encodable>>(); classes.add(Boolean.class); classes.add(Real.class); classes.add(Enumerated.class); classes.add(UnsignedInteger.class); classes.add(SignedInteger.class); classes.add(BitString.class); classes.add(Null.class); classes.add(BACnetError.class); classes.add(Encodable.class); } private final LogStatus logStatus; private final SequenceOf<Choice> logData; private final Real timeChange; public LogData(LogStatus logStatus, SequenceOf<Choice> logData, Real timeChange) { this.logStatus = logStatus; this.logData = logData; this.timeChange = timeChange; } @Override public void write(ByteQueue queue) { write(queue, logStatus, 0); write(queue, logData, 1); write(queue, timeChange, 2); } public LogStatus getLogStatus() { return logStatus; } public SequenceOf<Choice> getLogData() { return logData; } public Real getTimeChange() { return timeChange; } public int getChoiceType(int indexBase1) { return logData.get(indexBase1).getContextId(); } public Boolean getBoolean(int indexBase1) { return (Boolean) logData.get(indexBase1).getDatum(); } public Real getReal(int indexBase1) { return (Real) logData.get(indexBase1).getDatum(); } public Enumerated getEnumerated(int indexBase1) { return (Enumerated) logData.get(indexBase1).getDatum(); } public UnsignedInteger getUnsignedInteger(int indexBase1) { return (UnsignedInteger) logData.get(indexBase1).getDatum(); } public SignedInteger getSignedInteger(int indexBase1) { return (SignedInteger) logData.get(indexBase1).getDatum(); } public BitString getBitString(int indexBase1) { return (BitString) logData.get(indexBase1).getDatum(); } public Null getNull(int indexBase1) { return (Null) logData.get(indexBase1).getDatum(); } public BACnetError getBACnetError(int indexBase1) { return (BACnetError) logData.get(indexBase1).getDatum(); } public BaseType getAny(int indexBase1) { return (BaseType) logData.get(indexBase1).getDatum(); } public LogData(ByteQueue queue) throws BACnetException { logStatus = read(queue, LogStatus.class, 0); logData = readSequenceOfChoice(queue, classes, 1); timeChange = read(queue, Real.class, 2); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((logData == null) ? 0 : logData.hashCode()); result = prime * result + ((logStatus == null) ? 0 : logStatus.hashCode()); result = prime * result + ((timeChange == null) ? 0 : timeChange.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; final LogData other = (LogData) obj; if (logData == null) { if (other.logData != null) return false; } else if (!logData.equals(other.logData)) return false; if (logStatus == null) { if (other.logStatus != null) return false; } else if (!logStatus.equals(other.logStatus)) return false; if (timeChange == null) { if (other.timeChange != null) return false; } else if (!timeChange.equals(other.timeChange)) return false; return true; } }
apache-2.0
Xaerxess/guava
guava/src/com/google/common/math/LongMath.java
39444
/* * Copyright (C) 2011 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.common.math; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.math.MathPreconditions.checkNoOverflow; import static com.google.common.math.MathPreconditions.checkNonNegative; import static com.google.common.math.MathPreconditions.checkPositive; import static com.google.common.math.MathPreconditions.checkRoundingUnnecessary; import static java.lang.Math.abs; import static java.lang.Math.min; import static java.math.RoundingMode.HALF_EVEN; import static java.math.RoundingMode.HALF_UP; import com.google.common.annotations.Beta; import com.google.common.annotations.GwtCompatible; import com.google.common.annotations.GwtIncompatible; import com.google.common.annotations.VisibleForTesting; import com.google.common.primitives.UnsignedLongs; import java.math.BigInteger; import java.math.RoundingMode; /** * A class for arithmetic on values of type {@code long}. Where possible, methods are defined and * named analogously to their {@code BigInteger} counterparts. * * <p>The implementations of many methods in this class are based on material from Henry S. Warren, * Jr.'s <i>Hacker's Delight</i>, (Addison Wesley, 2002). * * <p>Similar functionality for {@code int} and for {@link BigInteger} can be found in {@link * IntMath} and {@link BigIntegerMath} respectively. For other common operations on {@code long} * values, see {@link com.google.common.primitives.Longs}. * * @author Louis Wasserman * @since 11.0 */ @GwtCompatible(emulated = true) public final class LongMath { // NOTE: Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, || @VisibleForTesting static final long MAX_SIGNED_POWER_OF_TWO = 1L << (Long.SIZE - 2); /** * Returns the smallest power of two greater than or equal to {@code x}. This is equivalent to * {@code checkedPow(2, log2(x, CEILING))}. * * @throws IllegalArgumentException if {@code x <= 0} * @throws ArithmeticException of the next-higher power of two is not representable as a {@code * long}, i.e. when {@code x > 2^62} * @since 20.0 */ @Beta public static long ceilingPowerOfTwo(long x) { checkPositive("x", x); if (x > MAX_SIGNED_POWER_OF_TWO) { throw new ArithmeticException("ceilingPowerOfTwo(" + x + ") is not representable as a long"); } return 1L << -Long.numberOfLeadingZeros(x - 1); } /** * Returns the largest power of two less than or equal to {@code x}. This is equivalent to {@code * checkedPow(2, log2(x, FLOOR))}. * * @throws IllegalArgumentException if {@code x <= 0} * @since 20.0 */ @Beta public static long floorPowerOfTwo(long x) { checkPositive("x", x); // Long.highestOneBit was buggy on GWT. We've fixed it, but I'm not certain when the fix will // be released. return 1L << ((Long.SIZE - 1) - Long.numberOfLeadingZeros(x)); } /** * Returns {@code true} if {@code x} represents a power of two. * * <p>This differs from {@code Long.bitCount(x) == 1}, because {@code * Long.bitCount(Long.MIN_VALUE) == 1}, but {@link Long#MIN_VALUE} is not a power of two. */ public static boolean isPowerOfTwo(long x) { return x > 0 & (x & (x - 1)) == 0; } /** * Returns 1 if {@code x < y} as unsigned longs, and 0 otherwise. Assumes that x - y fits into a * signed long. The implementation is branch-free, and benchmarks suggest it is measurably faster * than the straightforward ternary expression. */ @VisibleForTesting static int lessThanBranchFree(long x, long y) { // Returns the sign bit of x - y. return (int) (~~(x - y) >>> (Long.SIZE - 1)); } /** * Returns the base-2 logarithm of {@code x}, rounded according to the specified rounding mode. * * @throws IllegalArgumentException if {@code x <= 0} * @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x} * is not a power of two */ @SuppressWarnings("fallthrough") // TODO(kevinb): remove after this warning is disabled globally public static int log2(long x, RoundingMode mode) { checkPositive("x", x); switch (mode) { case UNNECESSARY: checkRoundingUnnecessary(isPowerOfTwo(x)); // fall through case DOWN: case FLOOR: return (Long.SIZE - 1) - Long.numberOfLeadingZeros(x); case UP: case CEILING: return Long.SIZE - Long.numberOfLeadingZeros(x - 1); case HALF_DOWN: case HALF_UP: case HALF_EVEN: // Since sqrt(2) is irrational, log2(x) - logFloor cannot be exactly 0.5 int leadingZeros = Long.numberOfLeadingZeros(x); long cmp = MAX_POWER_OF_SQRT2_UNSIGNED >>> leadingZeros; // floor(2^(logFloor + 0.5)) int logFloor = (Long.SIZE - 1) - leadingZeros; return logFloor + lessThanBranchFree(cmp, x); default: throw new AssertionError("impossible"); } } /** The biggest half power of two that fits into an unsigned long */ @VisibleForTesting static final long MAX_POWER_OF_SQRT2_UNSIGNED = 0xB504F333F9DE6484L; /** * Returns the base-10 logarithm of {@code x}, rounded according to the specified rounding mode. * * @throws IllegalArgumentException if {@code x <= 0} * @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x} * is not a power of ten */ @GwtIncompatible // TODO @SuppressWarnings("fallthrough") // TODO(kevinb): remove after this warning is disabled globally public static int log10(long x, RoundingMode mode) { checkPositive("x", x); int logFloor = log10Floor(x); long floorPow = powersOf10[logFloor]; switch (mode) { case UNNECESSARY: checkRoundingUnnecessary(x == floorPow); // fall through case FLOOR: case DOWN: return logFloor; case CEILING: case UP: return logFloor + lessThanBranchFree(floorPow, x); case HALF_DOWN: case HALF_UP: case HALF_EVEN: // sqrt(10) is irrational, so log10(x)-logFloor is never exactly 0.5 return logFloor + lessThanBranchFree(halfPowersOf10[logFloor], x); default: throw new AssertionError(); } } @GwtIncompatible // TODO static int log10Floor(long x) { /* * Based on Hacker's Delight Fig. 11-5, the two-table-lookup, branch-free implementation. * * The key idea is that based on the number of leading zeros (equivalently, floor(log2(x))), we * can narrow the possible floor(log10(x)) values to two. For example, if floor(log2(x)) is 6, * then 64 <= x < 128, so floor(log10(x)) is either 1 or 2. */ int y = maxLog10ForLeadingZeros[Long.numberOfLeadingZeros(x)]; /* * y is the higher of the two possible values of floor(log10(x)). If x < 10^y, then we want the * lower of the two possible values, or y - 1, otherwise, we want y. */ return y - lessThanBranchFree(x, powersOf10[y]); } // maxLog10ForLeadingZeros[i] == floor(log10(2^(Long.SIZE - i))) @VisibleForTesting static final byte[] maxLog10ForLeadingZeros = { 19, 18, 18, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 12, 12, 11, 11, 11, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0 }; @GwtIncompatible // TODO @VisibleForTesting static final long[] powersOf10 = { 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L, 10000000000L, 100000000000L, 1000000000000L, 10000000000000L, 100000000000000L, 1000000000000000L, 10000000000000000L, 100000000000000000L, 1000000000000000000L }; // halfPowersOf10[i] = largest long less than 10^(i + 0.5) @GwtIncompatible // TODO @VisibleForTesting static final long[] halfPowersOf10 = { 3L, 31L, 316L, 3162L, 31622L, 316227L, 3162277L, 31622776L, 316227766L, 3162277660L, 31622776601L, 316227766016L, 3162277660168L, 31622776601683L, 316227766016837L, 3162277660168379L, 31622776601683793L, 316227766016837933L, 3162277660168379331L }; /** * Returns {@code b} to the {@code k}th power. Even if the result overflows, it will be equal to * {@code BigInteger.valueOf(b).pow(k).longValue()}. This implementation runs in {@code O(log k)} * time. * * @throws IllegalArgumentException if {@code k < 0} */ @GwtIncompatible // TODO public static long pow(long b, int k) { checkNonNegative("exponent", k); if (-2 <= b && b <= 2) { switch ((int) b) { case 0: return (k == 0) ? 1 : 0; case 1: return 1; case (-1): return ((k & 1) == 0) ? 1 : -1; case 2: return (k < Long.SIZE) ? 1L << k : 0; case (-2): if (k < Long.SIZE) { return ((k & 1) == 0) ? 1L << k : -(1L << k); } else { return 0; } default: throw new AssertionError(); } } for (long accum = 1; ; k >>= 1) { switch (k) { case 0: return accum; case 1: return accum * b; default: accum *= ((k & 1) == 0) ? 1 : b; b *= b; } } } /** * Returns the square root of {@code x}, rounded with the specified rounding mode. * * @throws IllegalArgumentException if {@code x < 0} * @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code * sqrt(x)} is not an integer */ @GwtIncompatible // TODO @SuppressWarnings("fallthrough") public static long sqrt(long x, RoundingMode mode) { checkNonNegative("x", x); if (fitsInInt(x)) { return IntMath.sqrt((int) x, mode); } /* * Let k be the true value of floor(sqrt(x)), so that * * k * k <= x < (k + 1) * (k + 1) * (double) (k * k) <= (double) x <= (double) ((k + 1) * (k + 1)) * since casting to double is nondecreasing. * Note that the right-hand inequality is no longer strict. * Math.sqrt(k * k) <= Math.sqrt(x) <= Math.sqrt((k + 1) * (k + 1)) * since Math.sqrt is monotonic. * (long) Math.sqrt(k * k) <= (long) Math.sqrt(x) <= (long) Math.sqrt((k + 1) * (k + 1)) * since casting to long is monotonic * k <= (long) Math.sqrt(x) <= k + 1 * since (long) Math.sqrt(k * k) == k, as checked exhaustively in * {@link LongMathTest#testSqrtOfPerfectSquareAsDoubleIsPerfect} */ long guess = (long) Math.sqrt(x); // Note: guess is always <= FLOOR_SQRT_MAX_LONG. long guessSquared = guess * guess; // Note (2013-2-26): benchmarks indicate that, inscrutably enough, using if statements is // faster here than using lessThanBranchFree. switch (mode) { case UNNECESSARY: checkRoundingUnnecessary(guessSquared == x); return guess; case FLOOR: case DOWN: if (x < guessSquared) { return guess - 1; } return guess; case CEILING: case UP: if (x > guessSquared) { return guess + 1; } return guess; case HALF_DOWN: case HALF_UP: case HALF_EVEN: long sqrtFloor = guess - ((x < guessSquared) ? 1 : 0); long halfSquare = sqrtFloor * sqrtFloor + sqrtFloor; /* * We wish to test whether or not x <= (sqrtFloor + 0.5)^2 = halfSquare + 0.25. Since both x * and halfSquare are integers, this is equivalent to testing whether or not x <= * halfSquare. (We have to deal with overflow, though.) * * If we treat halfSquare as an unsigned long, we know that * sqrtFloor^2 <= x < (sqrtFloor + 1)^2 * halfSquare - sqrtFloor <= x < halfSquare + sqrtFloor + 1 * so |x - halfSquare| <= sqrtFloor. Therefore, it's safe to treat x - halfSquare as a * signed long, so lessThanBranchFree is safe for use. */ return sqrtFloor + lessThanBranchFree(halfSquare, x); default: throw new AssertionError(); } } /** * Returns the result of dividing {@code p} by {@code q}, rounding using the specified {@code * RoundingMode}. * * @throws ArithmeticException if {@code q == 0}, or if {@code mode == UNNECESSARY} and {@code a} * is not an integer multiple of {@code b} */ @GwtIncompatible // TODO @SuppressWarnings("fallthrough") public static long divide(long p, long q, RoundingMode mode) { checkNotNull(mode); long div = p / q; // throws if q == 0 long rem = p - q * div; // equals p % q if (rem == 0) { return div; } /* * Normal Java division rounds towards 0, consistently with RoundingMode.DOWN. We just have to * deal with the cases where rounding towards 0 is wrong, which typically depends on the sign of * p / q. * * signum is 1 if p and q are both nonnegative or both negative, and -1 otherwise. */ int signum = 1 | (int) ((p ^ q) >> (Long.SIZE - 1)); boolean increment; switch (mode) { case UNNECESSARY: checkRoundingUnnecessary(rem == 0); // fall through case DOWN: increment = false; break; case UP: increment = true; break; case CEILING: increment = signum > 0; break; case FLOOR: increment = signum < 0; break; case HALF_EVEN: case HALF_DOWN: case HALF_UP: long absRem = abs(rem); long cmpRemToHalfDivisor = absRem - (abs(q) - absRem); // subtracting two nonnegative longs can't overflow // cmpRemToHalfDivisor has the same sign as compare(abs(rem), abs(q) / 2). if (cmpRemToHalfDivisor == 0) { // exactly on the half mark increment = (mode == HALF_UP | (mode == HALF_EVEN & (div & 1) != 0)); } else { increment = cmpRemToHalfDivisor > 0; // closer to the UP value } break; default: throw new AssertionError(); } return increment ? div + signum : div; } /** * Returns {@code x mod m}, a non-negative value less than {@code m}. This differs from {@code x % * m}, which might be negative. * * <p>For example: * * <pre>{@code * mod(7, 4) == 3 * mod(-7, 4) == 1 * mod(-1, 4) == 3 * mod(-8, 4) == 0 * mod(8, 4) == 0 * }</pre> * * @throws ArithmeticException if {@code m <= 0} * @see <a href="http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.17.3"> * Remainder Operator</a> */ @GwtIncompatible // TODO public static int mod(long x, int m) { // Cast is safe because the result is guaranteed in the range [0, m) return (int) mod(x, (long) m); } /** * Returns {@code x mod m}, a non-negative value less than {@code m}. This differs from {@code x % * m}, which might be negative. * * <p>For example: * * <pre>{@code * mod(7, 4) == 3 * mod(-7, 4) == 1 * mod(-1, 4) == 3 * mod(-8, 4) == 0 * mod(8, 4) == 0 * }</pre> * * @throws ArithmeticException if {@code m <= 0} * @see <a href="http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.17.3"> * Remainder Operator</a> */ @GwtIncompatible // TODO public static long mod(long x, long m) { if (m <= 0) { throw new ArithmeticException("Modulus must be positive"); } long result = x % m; return (result >= 0) ? result : result + m; } /** * Returns the greatest common divisor of {@code a, b}. Returns {@code 0} if {@code a == 0 && b == * 0}. * * @throws IllegalArgumentException if {@code a < 0} or {@code b < 0} */ public static long gcd(long a, long b) { /* * The reason we require both arguments to be >= 0 is because otherwise, what do you return on * gcd(0, Long.MIN_VALUE)? BigInteger.gcd would return positive 2^63, but positive 2^63 isn't an * int. */ checkNonNegative("a", a); checkNonNegative("b", b); if (a == 0) { // 0 % b == 0, so b divides a, but the converse doesn't hold. // BigInteger.gcd is consistent with this decision. return b; } else if (b == 0) { return a; // similar logic } /* * Uses the binary GCD algorithm; see http://en.wikipedia.org/wiki/Binary_GCD_algorithm. This is * >60% faster than the Euclidean algorithm in benchmarks. */ int aTwos = Long.numberOfTrailingZeros(a); a >>= aTwos; // divide out all 2s int bTwos = Long.numberOfTrailingZeros(b); b >>= bTwos; // divide out all 2s while (a != b) { // both a, b are odd // The key to the binary GCD algorithm is as follows: // Both a and b are odd. Assume a > b; then gcd(a - b, b) = gcd(a, b). // But in gcd(a - b, b), a - b is even and b is odd, so we can divide out powers of two. // We bend over backwards to avoid branching, adapting a technique from // http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax long delta = a - b; // can't overflow, since a and b are nonnegative long minDeltaOrZero = delta & (delta >> (Long.SIZE - 1)); // equivalent to Math.min(delta, 0) a = delta - minDeltaOrZero - minDeltaOrZero; // sets a to Math.abs(a - b) // a is now nonnegative and even b += minDeltaOrZero; // sets b to min(old a, b) a >>= Long.numberOfTrailingZeros(a); // divide out all 2s, since 2 doesn't divide b } return a << min(aTwos, bTwos); } /** * Returns the sum of {@code a} and {@code b}, provided it does not overflow. * * @throws ArithmeticException if {@code a + b} overflows in signed {@code long} arithmetic */ @GwtIncompatible // TODO public static long checkedAdd(long a, long b) { long result = a + b; checkNoOverflow((a ^ b) < 0 | (a ^ result) >= 0); return result; } /** * Returns the difference of {@code a} and {@code b}, provided it does not overflow. * * @throws ArithmeticException if {@code a - b} overflows in signed {@code long} arithmetic */ @GwtIncompatible // TODO public static long checkedSubtract(long a, long b) { long result = a - b; checkNoOverflow((a ^ b) >= 0 | (a ^ result) >= 0); return result; } /** * Returns the product of {@code a} and {@code b}, provided it does not overflow. * * @throws ArithmeticException if {@code a * b} overflows in signed {@code long} arithmetic */ public static long checkedMultiply(long a, long b) { // Hacker's Delight, Section 2-12 int leadingZeros = Long.numberOfLeadingZeros(a) + Long.numberOfLeadingZeros(~a) + Long.numberOfLeadingZeros(b) + Long.numberOfLeadingZeros(~b); /* * If leadingZeros > Long.SIZE + 1 it's definitely fine, if it's < Long.SIZE it's definitely * bad. We do the leadingZeros check to avoid the division below if at all possible. * * Otherwise, if b == Long.MIN_VALUE, then the only allowed values of a are 0 and 1. We take * care of all a < 0 with their own check, because in particular, the case a == -1 will * incorrectly pass the division check below. * * In all other cases, we check that either a is 0 or the result is consistent with division. */ if (leadingZeros > Long.SIZE + 1) { return a * b; } checkNoOverflow(leadingZeros >= Long.SIZE); checkNoOverflow(a >= 0 | b != Long.MIN_VALUE); long result = a * b; checkNoOverflow(a == 0 || result / a == b); return result; } /** * Returns the {@code b} to the {@code k}th power, provided it does not overflow. * * @throws ArithmeticException if {@code b} to the {@code k}th power overflows in signed {@code * long} arithmetic */ @GwtIncompatible // TODO public static long checkedPow(long b, int k) { checkNonNegative("exponent", k); if (b >= -2 & b <= 2) { switch ((int) b) { case 0: return (k == 0) ? 1 : 0; case 1: return 1; case (-1): return ((k & 1) == 0) ? 1 : -1; case 2: checkNoOverflow(k < Long.SIZE - 1); return 1L << k; case (-2): checkNoOverflow(k < Long.SIZE); return ((k & 1) == 0) ? (1L << k) : (-1L << k); default: throw new AssertionError(); } } long accum = 1; while (true) { switch (k) { case 0: return accum; case 1: return checkedMultiply(accum, b); default: if ((k & 1) != 0) { accum = checkedMultiply(accum, b); } k >>= 1; if (k > 0) { checkNoOverflow(-FLOOR_SQRT_MAX_LONG <= b && b <= FLOOR_SQRT_MAX_LONG); b *= b; } } } } /** * Returns the sum of {@code a} and {@code b} unless it would overflow or underflow in which case * {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively. * * @since 20.0 */ @Beta public static long saturatedAdd(long a, long b) { long naiveSum = a + b; if ((a ^ b) < 0 | (a ^ naiveSum) >= 0) { // If a and b have different signs or a has the same sign as the result then there was no // overflow, return. return naiveSum; } // we did over/under flow, if the sign is negative we should return MAX otherwise MIN return Long.MAX_VALUE + ((naiveSum >>> (Long.SIZE - 1)) ^ 1); } /** * Returns the difference of {@code a} and {@code b} unless it would overflow or underflow in * which case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively. * * @since 20.0 */ @Beta public static long saturatedSubtract(long a, long b) { long naiveDifference = a - b; if ((a ^ b) >= 0 | (a ^ naiveDifference) >= 0) { // If a and b have the same signs or a has the same sign as the result then there was no // overflow, return. return naiveDifference; } // we did over/under flow return Long.MAX_VALUE + ((naiveDifference >>> (Long.SIZE - 1)) ^ 1); } /** * Returns the product of {@code a} and {@code b} unless it would overflow or underflow in which * case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively. * * @since 20.0 */ @Beta public static long saturatedMultiply(long a, long b) { // see checkedMultiply for explanation int leadingZeros = Long.numberOfLeadingZeros(a) + Long.numberOfLeadingZeros(~a) + Long.numberOfLeadingZeros(b) + Long.numberOfLeadingZeros(~b); if (leadingZeros > Long.SIZE + 1) { return a * b; } // the return value if we will overflow (which we calculate by overflowing a long :) ) long limit = Long.MAX_VALUE + ((a ^ b) >>> (Long.SIZE - 1)); if (leadingZeros < Long.SIZE | (a < 0 & b == Long.MIN_VALUE)) { // overflow return limit; } long result = a * b; if (a == 0 || result / a == b) { return result; } return limit; } /** * Returns the {@code b} to the {@code k}th power, unless it would overflow or underflow in which * case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively. * * @since 20.0 */ @Beta public static long saturatedPow(long b, int k) { checkNonNegative("exponent", k); if (b >= -2 & b <= 2) { switch ((int) b) { case 0: return (k == 0) ? 1 : 0; case 1: return 1; case (-1): return ((k & 1) == 0) ? 1 : -1; case 2: if (k >= Long.SIZE - 1) { return Long.MAX_VALUE; } return 1L << k; case (-2): if (k >= Long.SIZE) { return Long.MAX_VALUE + (k & 1); } return ((k & 1) == 0) ? (1L << k) : (-1L << k); default: throw new AssertionError(); } } long accum = 1; // if b is negative and k is odd then the limit is MIN otherwise the limit is MAX long limit = Long.MAX_VALUE + ((b >>> Long.SIZE - 1) & (k & 1)); while (true) { switch (k) { case 0: return accum; case 1: return saturatedMultiply(accum, b); default: if ((k & 1) != 0) { accum = saturatedMultiply(accum, b); } k >>= 1; if (k > 0) { if (-FLOOR_SQRT_MAX_LONG > b | b > FLOOR_SQRT_MAX_LONG) { return limit; } b *= b; } } } } @VisibleForTesting static final long FLOOR_SQRT_MAX_LONG = 3037000499L; /** * Returns {@code n!}, that is, the product of the first {@code n} positive integers, {@code 1} if * {@code n == 0}, or {@link Long#MAX_VALUE} if the result does not fit in a {@code long}. * * @throws IllegalArgumentException if {@code n < 0} */ @GwtIncompatible // TODO public static long factorial(int n) { checkNonNegative("n", n); return (n < factorials.length) ? factorials[n] : Long.MAX_VALUE; } static final long[] factorials = { 1L, 1L, 1L * 2, 1L * 2 * 3, 1L * 2 * 3 * 4, 1L * 2 * 3 * 4 * 5, 1L * 2 * 3 * 4 * 5 * 6, 1L * 2 * 3 * 4 * 5 * 6 * 7, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19, 1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19 * 20 }; /** * Returns {@code n} choose {@code k}, also known as the binomial coefficient of {@code n} and * {@code k}, or {@link Long#MAX_VALUE} if the result does not fit in a {@code long}. * * @throws IllegalArgumentException if {@code n < 0}, {@code k < 0}, or {@code k > n} */ public static long binomial(int n, int k) { checkNonNegative("n", n); checkNonNegative("k", k); checkArgument(k <= n, "k (%s) > n (%s)", k, n); if (k > (n >> 1)) { k = n - k; } switch (k) { case 0: return 1; case 1: return n; default: if (n < factorials.length) { return factorials[n] / (factorials[k] * factorials[n - k]); } else if (k >= biggestBinomials.length || n > biggestBinomials[k]) { return Long.MAX_VALUE; } else if (k < biggestSimpleBinomials.length && n <= biggestSimpleBinomials[k]) { // guaranteed not to overflow long result = n--; for (int i = 2; i <= k; n--, i++) { result *= n; result /= i; } return result; } else { int nBits = LongMath.log2(n, RoundingMode.CEILING); long result = 1; long numerator = n--; long denominator = 1; int numeratorBits = nBits; // This is an upper bound on log2(numerator, ceiling). /* * We want to do this in long math for speed, but want to avoid overflow. We adapt the * technique previously used by BigIntegerMath: maintain separate numerator and * denominator accumulators, multiplying the fraction into result when near overflow. */ for (int i = 2; i <= k; i++, n--) { if (numeratorBits + nBits < Long.SIZE - 1) { // It's definitely safe to multiply into numerator and denominator. numerator *= n; denominator *= i; numeratorBits += nBits; } else { // It might not be safe to multiply into numerator and denominator, // so multiply (numerator / denominator) into result. result = multiplyFraction(result, numerator, denominator); numerator = n; denominator = i; numeratorBits = nBits; } } return multiplyFraction(result, numerator, denominator); } } } /** Returns (x * numerator / denominator), which is assumed to come out to an integral value. */ static long multiplyFraction(long x, long numerator, long denominator) { if (x == 1) { return numerator / denominator; } long commonDivisor = gcd(x, denominator); x /= commonDivisor; denominator /= commonDivisor; // We know gcd(x, denominator) = 1, and x * numerator / denominator is exact, // so denominator must be a divisor of numerator. return x * (numerator / denominator); } /* * binomial(biggestBinomials[k], k) fits in a long, but not binomial(biggestBinomials[k] + 1, k). */ static final int[] biggestBinomials = { Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, 3810779, 121977, 16175, 4337, 1733, 887, 534, 361, 265, 206, 169, 143, 125, 111, 101, 94, 88, 83, 79, 76, 74, 72, 70, 69, 68, 67, 67, 66, 66, 66, 66 }; /* * binomial(biggestSimpleBinomials[k], k) doesn't need to use the slower GCD-based impl, but * binomial(biggestSimpleBinomials[k] + 1, k) does. */ @VisibleForTesting static final int[] biggestSimpleBinomials = { Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, 2642246, 86251, 11724, 3218, 1313, 684, 419, 287, 214, 169, 139, 119, 105, 95, 87, 81, 76, 73, 70, 68, 66, 64, 63, 62, 62, 61, 61, 61 }; // These values were generated by using checkedMultiply to see when the simple multiply/divide // algorithm would lead to an overflow. static boolean fitsInInt(long x) { return (int) x == x; } /** * Returns the arithmetic mean of {@code x} and {@code y}, rounded toward negative infinity. This * method is resilient to overflow. * * @since 14.0 */ public static long mean(long x, long y) { // Efficient method for computing the arithmetic mean. // The alternative (x + y) / 2 fails for large values. // The alternative (x + y) >>> 1 fails for negative values. return (x & y) + ((x ^ y) >> 1); } /* * This bitmask is used as an optimization for cheaply testing for divisiblity by 2, 3, or 5. * Each bit is set to 1 for all remainders that indicate divisibility by 2, 3, or 5, so * 1, 7, 11, 13, 17, 19, 23, 29 are set to 0. 30 and up don't matter because they won't be hit. */ private static final int SIEVE_30 = ~((1 << 1) | (1 << 7) | (1 << 11) | (1 << 13) | (1 << 17) | (1 << 19) | (1 << 23) | (1 << 29)); /** * Returns {@code true} if {@code n} is a <a * href="http://mathworld.wolfram.com/PrimeNumber.html">prime number</a>: an integer <i>greater * than one</i> that cannot be factored into a product of <i>smaller</i> positive integers. * Returns {@code false} if {@code n} is zero, one, or a composite number (one which <i>can</i> be * factored into smaller positive integers). * * <p>To test larger numbers, use {@link BigInteger#isProbablePrime}. * * @throws IllegalArgumentException if {@code n} is negative * @since 20.0 */ @GwtIncompatible // TODO @Beta public static boolean isPrime(long n) { if (n < 2) { checkNonNegative("n", n); return false; } if (n == 2 || n == 3 || n == 5 || n == 7 || n == 11 || n == 13) { return true; } if ((SIEVE_30 & (1 << (n % 30))) != 0) { return false; } if (n % 7 == 0 || n % 11 == 0 || n % 13 == 0) { return false; } if (n < 17 * 17) { return true; } for (long[] baseSet : millerRabinBaseSets) { if (n <= baseSet[0]) { for (int i = 1; i < baseSet.length; i++) { if (!MillerRabinTester.test(baseSet[i], n)) { return false; } } return true; } } throw new AssertionError(); } /* * If n <= millerRabinBases[i][0], then testing n against bases millerRabinBases[i][1..] suffices * to prove its primality. Values from miller-rabin.appspot.com. * * NOTE: We could get slightly better bases that would be treated as unsigned, but benchmarks * showed negligible performance improvements. */ private static final long[][] millerRabinBaseSets = { {291830, 126401071349994536L}, {885594168, 725270293939359937L, 3569819667048198375L}, {273919523040L, 15, 7363882082L, 992620450144556L}, {47636622961200L, 2, 2570940, 211991001, 3749873356L}, { 7999252175582850L, 2, 4130806001517L, 149795463772692060L, 186635894390467037L, 3967304179347715805L }, { 585226005592931976L, 2, 123635709730000L, 9233062284813009L, 43835965440333360L, 761179012939631437L, 1263739024124850375L }, {Long.MAX_VALUE, 2, 325, 9375, 28178, 450775, 9780504, 1795265022} }; private enum MillerRabinTester { /** Works for inputs ≤ FLOOR_SQRT_MAX_LONG. */ SMALL { @Override long mulMod(long a, long b, long m) { /* * NOTE(lowasser, 2015-Feb-12): Benchmarks suggest that changing this to * UnsignedLongs.remainder and increasing the threshold to 2^32 doesn't pay for itself, and * adding another enum constant hurts performance further -- I suspect because bimorphic * implementation is a sweet spot for the JVM. */ return (a * b) % m; } @Override long squareMod(long a, long m) { return (a * a) % m; } }, /** Works for all nonnegative signed longs. */ LARGE { /** Returns (a + b) mod m. Precondition: {@code 0 <= a}, {@code b < m < 2^63}. */ private long plusMod(long a, long b, long m) { return (a >= m - b) ? (a + b - m) : (a + b); } /** Returns (a * 2^32) mod m. a may be any unsigned long. */ private long times2ToThe32Mod(long a, long m) { int remainingPowersOf2 = 32; do { int shift = Math.min(remainingPowersOf2, Long.numberOfLeadingZeros(a)); // shift is either the number of powers of 2 left to multiply a by, or the biggest shift // possible while keeping a in an unsigned long. a = UnsignedLongs.remainder(a << shift, m); remainingPowersOf2 -= shift; } while (remainingPowersOf2 > 0); return a; } @Override long mulMod(long a, long b, long m) { long aHi = a >>> 32; // < 2^31 long bHi = b >>> 32; // < 2^31 long aLo = a & 0xFFFFFFFFL; // < 2^32 long bLo = b & 0xFFFFFFFFL; // < 2^32 /* * a * b == aHi * bHi * 2^64 + (aHi * bLo + aLo * bHi) * 2^32 + aLo * bLo. * == (aHi * bHi * 2^32 + aHi * bLo + aLo * bHi) * 2^32 + aLo * bLo * * We carry out this computation in modular arithmetic. Since times2ToThe32Mod accepts any * unsigned long, we don't have to do a mod on every operation, only when intermediate * results can exceed 2^63. */ long result = times2ToThe32Mod(aHi * bHi /* < 2^62 */, m); // < m < 2^63 result += aHi * bLo; // aHi * bLo < 2^63, result < 2^64 if (result < 0) { result = UnsignedLongs.remainder(result, m); } // result < 2^63 again result += aLo * bHi; // aLo * bHi < 2^63, result < 2^64 result = times2ToThe32Mod(result, m); // result < m < 2^63 return plusMod(result, UnsignedLongs.remainder(aLo * bLo /* < 2^64 */, m), m); } @Override long squareMod(long a, long m) { long aHi = a >>> 32; // < 2^31 long aLo = a & 0xFFFFFFFFL; // < 2^32 /* * a^2 == aHi^2 * 2^64 + aHi * aLo * 2^33 + aLo^2 * == (aHi^2 * 2^32 + aHi * aLo * 2) * 2^32 + aLo^2 * We carry out this computation in modular arithmetic. Since times2ToThe32Mod accepts any * unsigned long, we don't have to do a mod on every operation, only when intermediate * results can exceed 2^63. */ long result = times2ToThe32Mod(aHi * aHi /* < 2^62 */, m); // < m < 2^63 long hiLo = aHi * aLo * 2; if (hiLo < 0) { hiLo = UnsignedLongs.remainder(hiLo, m); } // hiLo < 2^63 result += hiLo; // result < 2^64 result = times2ToThe32Mod(result, m); // result < m < 2^63 return plusMod(result, UnsignedLongs.remainder(aLo * aLo /* < 2^64 */, m), m); } }; static boolean test(long base, long n) { // Since base will be considered % n, it's okay if base > FLOOR_SQRT_MAX_LONG, // so long as n <= FLOOR_SQRT_MAX_LONG. return ((n <= FLOOR_SQRT_MAX_LONG) ? SMALL : LARGE).testWitness(base, n); } /** Returns a * b mod m. */ abstract long mulMod(long a, long b, long m); /** Returns a^2 mod m. */ abstract long squareMod(long a, long m); /** Returns a^p mod m. */ private long powMod(long a, long p, long m) { long res = 1; for (; p != 0; p >>= 1) { if ((p & 1) != 0) { res = mulMod(res, a, m); } a = squareMod(a, m); } return res; } /** Returns true if n is a strong probable prime relative to the specified base. */ private boolean testWitness(long base, long n) { int r = Long.numberOfTrailingZeros(n - 1); long d = (n - 1) >> r; base %= n; if (base == 0) { return true; } // Calculate a := base^d mod n. long a = powMod(base, d, n); // n passes this test if // base^d = 1 (mod n) // or base^(2^j * d) = -1 (mod n) for some 0 <= j < r. if (a == 1) { return true; } int j = 0; while (a != n - 1) { if (++j == r) { return false; } a = squareMod(a, n); } return true; } } private LongMath() {} }
apache-2.0
amitmawkin/Hygieia
api/src/test/java/com/capitalone/dashboard/service/ScoreCriteriaSettingsServiceTest.java
3556
package com.capitalone.dashboard.service; import com.capitalone.dashboard.model.score.ScoreValueType; import com.capitalone.dashboard.model.score.settings.*; import com.capitalone.dashboard.repository.ScoreCriteriaSettingsRepository; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.Resources; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.when; import static org.hamcrest.Matchers.is; @RunWith(MockitoJUnitRunner.class) public class ScoreCriteriaSettingsServiceTest { @Mock private ScoreCriteriaSettingsRepository scoreCriteriaSettingsRepository; @InjectMocks private ScoreCriteriaSettingsServiceImpl scoreCriteriaSettingsService; @Test public void getScoreCriteriaSettingsByType() throws Exception { ObjectMapper mapper = getObjectMapper(); byte[] content = Resources.asByteSource(Resources.getResource("score-criteria-settings.json")).read(); ScoreCriteriaSettings scoreCriteriaSettings = mapper.readValue(content, ScoreCriteriaSettings.class); when(this.scoreCriteriaSettingsRepository.findByType(ScoreValueType.DASHBOARD)).thenReturn(scoreCriteriaSettings); ScoreCriteriaSettings scoreCriteriaSettingsResult = this.scoreCriteriaSettingsService.getScoreCriteriaSettingsByType( ScoreValueType.DASHBOARD ); assertNotNull(scoreCriteriaSettingsResult); assertThat(scoreCriteriaSettingsResult.getMaxScore(), is(5)); assertThat(scoreCriteriaSettingsResult.getType(), is(ScoreValueType.DASHBOARD)); assertThat(scoreCriteriaSettingsResult.getComponentAlert().getValue(), is(0d)); assertThat(scoreCriteriaSettingsResult.getComponentAlert().getComparator(), is(ScoreThresholdSettings.ComparatorType.less_or_equal)); BuildScoreSettings buildScoreSettings = scoreCriteriaSettingsResult.getBuild(); assertNotNull(buildScoreSettings); assertThat(buildScoreSettings.getWeight(), is(25)); assertThat(buildScoreSettings.getNumberOfDays(), is(14)); ScoreTypeValue noWidgetFound = buildScoreSettings.getCriteria().getNoWidgetFound(); assertNotNull(noWidgetFound); assertThat( noWidgetFound.getScoreType(), is(ScoreType.zero_score) ); assertThat( noWidgetFound.getPropagate(), is(PropagateType.no) ); ScoreTypeValue noDataFound = buildScoreSettings.getCriteria().getNoDataFound(); assertNotNull(noDataFound); assertThat( noDataFound.getScoreType(), is(ScoreType.zero_score) ); assertThat( noDataFound.getPropagate(), is(PropagateType.no) ); ScoreComponentSettings buildStatusSettings = buildScoreSettings.getStatus(); assertNotNull(buildStatusSettings); assertThat(buildStatusSettings.getWeight(), is(50)); BuildScoreSettings.BuildDurationScoreSettings buildDurationSettings = buildScoreSettings.getDuration(); assertNotNull(buildDurationSettings); assertThat(buildDurationSettings.getWeight(), is(50)); } private ObjectMapper getObjectMapper() { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); return mapper; } }
apache-2.0
zamattiac/SHARE
tests/share/disambiguation/test_agent.py
5002
import pytest from share import models from share.change import ChangeGraph from share.models import ChangeSet from tests.share.models.factories import NormalizedDataFactory from tests.share.normalize.factories import * initial = [ Preprint( identifiers=[WorkIdentifier(1)], agent_relations=[ Contributor(agent=Organization(1, name='American Heart Association')), Creator(agent=Organization(2)), Creator(agent=Organization(3)), ] ), CreativeWork( identifiers=[WorkIdentifier(2)], agent_relations=[ Host(agent=Institution(4)), Funder(agent=Institution(name='NIH')), Publisher(agent=Institution(6)), ] ), CreativeWork( identifiers=[WorkIdentifier(2)], related_agents=[ Institution(), Consortium(name='COS'), Organization(), ] ), Publication( identifiers=[WorkIdentifier(3)], agent_relations=[ Creator(agent=Organization(7)) ], related_works=[ Patent( agent_relations=[ Contributor(agent=Institution(8)) ], identifiers=[WorkIdentifier(4)] ) ] ), Report( identifiers=[WorkIdentifier(4)], agent_relations=[ Creator(agent=Person(name='Berkeley')), Publisher(agent=Institution(name='Berkeley')) ] ) ] @pytest.mark.django_db class TestAgentDisambiguation: @pytest.mark.parametrize('input, model, delta', [ # institution with same name already exists ([Institution(name='NIH')], models.Institution, 0), # same organization already exists ([Organization(2)], models.Organization, 0), # same institution already exists ([Publication(related_agents=[Institution(4)])], models.Institution, 0), # consortium with same name already exists ([Publication(related_agents=[Consortium(name='COS')])], models.Consortium, 0), # institution already exists on a related work ([Preprint(related_agents=[Institution(8)])], models.Institution, 0), # organization where the name does not exist ([CreativeWork(related_agents=[Organization(name='Bill Gates')])], models.Organization, 1), # organization and person exist with the same name ([Organization(name='Berkeley')], models.Organization, 0), # institution and person exist with the same name ([Institution(name='Berkeley')], models.Institution, 0), # person doesn't disambiguate on name ([Person(name='Berkeley')], models.Person, 1), ]) def test_disambiguate(self, input, model, delta, Graph): initial_cg = ChangeGraph(Graph(*initial)) initial_cg.process(disambiguate=False) ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept() Graph.reseed() # Nasty hack to avoid progres' fuzzy counting before = model.objects.exclude(change=None).count() cg = ChangeGraph(Graph(*input)) cg.process() cs = ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id) if cs is not None: cs.accept() assert (model.objects.exclude(change=None).count() - before) == delta @pytest.mark.parametrize('input', [ [Institution()], [Institution(name='Money Money')], [Organization(name='Money Makers'), Consortium()], [Institution(identifiers=[AgentIdentifier()])], [Publication(identifiers=[WorkIdentifier()], agent_relations=[Funder(agent=Organization()), Publisher(agent=Institution())])], [Preprint(identifiers=[WorkIdentifier()], related_agents=[Institution(), Organization()], agent_relations=[Funder(agent=Institution()), Publisher(agent=Organization())])] ]) def test_reaccept(self, input, Graph): initial_cg = ChangeGraph(Graph(*initial)) initial_cg.process() ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept() Graph.reseed() # Force new values to be generated first_cg = ChangeGraph(Graph(*input)) first_cg.process() first_cs = ChangeSet.objects.from_graph(first_cg, NormalizedDataFactory().id) assert first_cs is not None first_cs.accept() second_cg = ChangeGraph(Graph(*input)) second_cg.process() second_cs = ChangeSet.objects.from_graph(second_cg, NormalizedDataFactory().id) assert second_cs is None def test_no_changes(self, Graph): initial_cg = ChangeGraph(Graph(*initial)) initial_cg.process() ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept() Graph.discarded_ids.clear() cg = ChangeGraph(Graph(*initial)) cg.process() assert ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id) is None
apache-2.0
c3bd/cassandraxml
src/java/org/apache/cassandra/db/marshal/FileSizeType.java
2389
package org.apache.cassandra.db.marshal; import imc.disxmldb.dom.typesystem.ValueType; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import org.apache.cassandra.utils.ByteBufferUtil; public class FileSizeType extends AbstractType<String> { public static int KILO_BYTES = 1024; public static int MEGA_BYTES = KILO_BYTES * 1024; public static int GIGA_BYTES = MEGA_BYTES * 1024; private static Map<ValueType, FileSizeType> instances = new HashMap<ValueType, FileSizeType>(); private int scale = KILO_BYTES; private String suffix = "k"; private FileSizeType(ValueType valueType) { if (valueType == ValueType.KILOSIZE || valueType == ValueType.FILESIZE) { //nothing todo } else if (valueType == ValueType.MEGASIZE) { scale = MEGA_BYTES; suffix = "MB"; } else if (valueType == ValueType.GIGASIZE) { scale = GIGA_BYTES; suffix = "G"; } } public static FileSizeType getInstance(ValueType valueType) { if (instances.get(valueType) == null) { instances.put(valueType, new FileSizeType(valueType)); } return instances.get(valueType); } @Override public int compare(ByteBuffer o1, ByteBuffer o2) { return DoubleType.instance.compare(o1, o2); } @Override public String compose(ByteBuffer bytes) { Double size = ByteBufferUtil.toDouble(bytes); return size.toString(); } @Override public ByteBuffer decompose(String value) { value = value.trim(); if (value.length() == 0) return ByteBufferUtil.EMPTY_BYTE_BUFFER; else { value = value.substring(0, value.length() - 1); double size = Double.parseDouble(value); size = size * scale; return ByteBufferUtil.bytes(size); } } @Override public String getString(ByteBuffer bytes) { double size = ByteBufferUtil.toDouble(bytes); size = size / scale; return (size + suffix); } @Override public ByteBuffer fromString(String text) { int i = text.length() - 1; for (; i >= 0; i--) { if (Character.isDigit(text.charAt(i))) break; } if (i >= 0) { text = text.substring(0, i + 1); double size = Double.parseDouble(text); size *= scale; return DoubleType.instance.decompose(size); } else { return ByteBufferUtil.EMPTY_BYTE_BUFFER; } } @Override public void validate(ByteBuffer bytes) throws MarshalException { //bytes only needs to be double DoubleType.instance.validate(bytes); } }
apache-2.0
graetzer/arangodb
3rdParty/boost/1.71.0/libs/graph/example/astar-cities.cpp
6681
// //======================================================================= // Copyright (c) 2004 Kristopher Beevers // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) //======================================================================= // #include <boost/graph/astar_search.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/random.hpp> #include <boost/random.hpp> #include <boost/graph/graphviz.hpp> #include <ctime> #include <vector> #include <list> #include <iostream> #include <fstream> #include <math.h> // for sqrt using namespace boost; using namespace std; // auxiliary types struct location { float y, x; // lat, long }; typedef float cost; template <class Name, class LocMap> class city_writer { public: city_writer(Name n, LocMap l, float _minx, float _maxx, float _miny, float _maxy, unsigned int _ptx, unsigned int _pty) : name(n), loc(l), minx(_minx), maxx(_maxx), miny(_miny), maxy(_maxy), ptx(_ptx), pty(_pty) {} template <class Vertex> void operator()(ostream& out, const Vertex& v) const { float px = 1 - (loc[v].x - minx) / (maxx - minx); float py = (loc[v].y - miny) / (maxy - miny); out << "[label=\"" << name[v] << "\", pos=\"" << static_cast<unsigned int>(ptx * px) << "," << static_cast<unsigned int>(pty * py) << "\", fontsize=\"11\"]"; } private: Name name; LocMap loc; float minx, maxx, miny, maxy; unsigned int ptx, pty; }; template <class WeightMap> class time_writer { public: time_writer(WeightMap w) : wm(w) {} template <class Edge> void operator()(ostream &out, const Edge& e) const { out << "[label=\"" << wm[e] << "\", fontsize=\"11\"]"; } private: WeightMap wm; }; // euclidean distance heuristic template <class Graph, class CostType, class LocMap> class distance_heuristic : public astar_heuristic<Graph, CostType> { public: typedef typename graph_traits<Graph>::vertex_descriptor Vertex; distance_heuristic(LocMap l, Vertex goal) : m_location(l), m_goal(goal) {} CostType operator()(Vertex u) { CostType dx = m_location[m_goal].x - m_location[u].x; CostType dy = m_location[m_goal].y - m_location[u].y; return ::sqrt(dx * dx + dy * dy); } private: LocMap m_location; Vertex m_goal; }; struct found_goal {}; // exception for termination // visitor that terminates when we find the goal template <class Vertex> class astar_goal_visitor : public boost::default_astar_visitor { public: astar_goal_visitor(Vertex goal) : m_goal(goal) {} template <class Graph> void examine_vertex(Vertex u, Graph& g) { if(u == m_goal) throw found_goal(); } private: Vertex m_goal; }; int main(int argc, char **argv) { // specify some types typedef adjacency_list<listS, vecS, undirectedS, no_property, property<edge_weight_t, cost> > mygraph_t; typedef property_map<mygraph_t, edge_weight_t>::type WeightMap; typedef mygraph_t::vertex_descriptor vertex; typedef mygraph_t::edge_descriptor edge_descriptor; typedef std::pair<int, int> edge; // specify data enum nodes { Troy, LakePlacid, Plattsburgh, Massena, Watertown, Utica, Syracuse, Rochester, Buffalo, Ithaca, Binghamton, Woodstock, NewYork, N }; const char *name[] = { "Troy", "Lake Placid", "Plattsburgh", "Massena", "Watertown", "Utica", "Syracuse", "Rochester", "Buffalo", "Ithaca", "Binghamton", "Woodstock", "New York" }; location locations[] = { // lat/long {42.73, 73.68}, {44.28, 73.99}, {44.70, 73.46}, {44.93, 74.89}, {43.97, 75.91}, {43.10, 75.23}, {43.04, 76.14}, {43.17, 77.61}, {42.89, 78.86}, {42.44, 76.50}, {42.10, 75.91}, {42.04, 74.11}, {40.67, 73.94} }; edge edge_array[] = { edge(Troy,Utica), edge(Troy,LakePlacid), edge(Troy,Plattsburgh), edge(LakePlacid,Plattsburgh), edge(Plattsburgh,Massena), edge(LakePlacid,Massena), edge(Massena,Watertown), edge(Watertown,Utica), edge(Watertown,Syracuse), edge(Utica,Syracuse), edge(Syracuse,Rochester), edge(Rochester,Buffalo), edge(Syracuse,Ithaca), edge(Ithaca,Binghamton), edge(Ithaca,Rochester), edge(Binghamton,Troy), edge(Binghamton,Woodstock), edge(Binghamton,NewYork), edge(Syracuse,Binghamton), edge(Woodstock,Troy), edge(Woodstock,NewYork) }; unsigned int num_edges = sizeof(edge_array) / sizeof(edge); cost weights[] = { // estimated travel time (mins) 96, 134, 143, 65, 115, 133, 117, 116, 74, 56, 84, 73, 69, 70, 116, 147, 173, 183, 74, 71, 124 }; // create graph mygraph_t g(N); WeightMap weightmap = get(edge_weight, g); for(std::size_t j = 0; j < num_edges; ++j) { edge_descriptor e; bool inserted; boost::tie(e, inserted) = add_edge(edge_array[j].first, edge_array[j].second, g); weightmap[e] = weights[j]; } // pick random start/goal boost::mt19937 gen(std::time(0)); vertex start = random_vertex(g, gen); vertex goal = random_vertex(g, gen); cout << "Start vertex: " << name[start] << endl; cout << "Goal vertex: " << name[goal] << endl; ofstream dotfile; dotfile.open("test-astar-cities.dot"); write_graphviz(dotfile, g, city_writer<const char **, location*> (name, locations, 73.46, 78.86, 40.67, 44.93, 480, 400), time_writer<WeightMap>(weightmap)); vector<mygraph_t::vertex_descriptor> p(num_vertices(g)); vector<cost> d(num_vertices(g)); try { // call astar named parameter interface astar_search_tree (g, start, distance_heuristic<mygraph_t, cost, location*> (locations, goal), predecessor_map(make_iterator_property_map(p.begin(), get(vertex_index, g))). distance_map(make_iterator_property_map(d.begin(), get(vertex_index, g))). visitor(astar_goal_visitor<vertex>(goal))); } catch(found_goal fg) { // found a path to the goal list<vertex> shortest_path; for(vertex v = goal;; v = p[v]) { shortest_path.push_front(v); if(p[v] == v) break; } cout << "Shortest path from " << name[start] << " to " << name[goal] << ": "; list<vertex>::iterator spi = shortest_path.begin(); cout << name[start]; for(++spi; spi != shortest_path.end(); ++spi) cout << " -> " << name[*spi]; cout << endl << "Total travel time: " << d[goal] << endl; return 0; } cout << "Didn't find a path from " << name[start] << "to" << name[goal] << "!" << endl; return 0; }
apache-2.0
raja15792/googleads-java-lib
modules/ads_lib/src/main/java/com/google/api/ads/adwords/lib/conf/AdWordsApiConfiguration.java
2573
// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.api.ads.adwords.lib.conf; import com.google.api.ads.adwords.lib.client.AdWordsServiceDescriptor.AdWordsSubProduct; import com.google.api.ads.common.lib.conf.AdsApiConfiguration; import com.google.common.collect.Lists; import com.google.inject.Inject; import com.google.inject.name.Named; import org.apache.commons.configuration.Configuration; /** * Configuration information for AdWords library. */ public class AdWordsApiConfiguration extends AdsApiConfiguration { public static final String NAMESPACE_PREFIX_KEY = "api.adwords.namespace.prefix"; /** * Constructor. * * @param config the backing configuration */ @Inject public AdWordsApiConfiguration(@Named("api") Configuration config) { super(config); } /** * @see AdsApiConfiguration#getNamespacePrefix() */ @Override public String getNamespacePrefix() { return getString(NAMESPACE_PREFIX_KEY); } /** * Gets the service URL group for the service and version. */ public String getServiceUrlGroup(String version, String service) { String[] groups = config.getStringArray("api.adwords.version." + version + ".groups"); for (String group : groups) { String[] services = config.getStringArray("api.adwords.version." + version + "." + group + ".services"); if (Lists.newArrayList(services).contains(service)) { return group; } } throw new NullPointerException("No group found for service: " + version + "." + service); } /** * Gets the sub product for the service and version. */ public AdWordsSubProduct getServiceSubProduct(String version, String service) { String subProductName = config.getString("api.adwords.version." + version + "." + getServiceUrlGroup(version, service) + ".subproduct"); return subProductName != null ? Enum.valueOf(AdWordsSubProduct.class, subProductName.toUpperCase()) : AdWordsSubProduct.DEFAULT; } }
apache-2.0
davidkarlsen/camel
components/camel-mail/src/test/java/org/apache/camel/component/mail/MailPollEnrichTest.java
3345
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.mail; import javax.mail.Folder; import javax.mail.Message; import javax.mail.Store; import javax.mail.internet.MimeMessage; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.test.junit4.CamelTestSupport; import org.junit.Before; import org.junit.Test; import org.jvnet.mock_javamail.Mailbox; /** * Unit test with poll enrich */ public class MailPollEnrichTest extends CamelTestSupport { @Override @Before public void setUp() throws Exception { prepareMailbox(); super.setUp(); } @Test public void testPollEnrich() throws Exception { Mailbox mailbox = Mailbox.get("bill@localhost"); assertEquals(5, mailbox.size()); MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedBodiesReceived("Message 0"); template.sendBody("direct:start", ""); mock.assertIsSatisfied(); } @Test public void testPollEnrichNullBody() throws Exception { Mailbox mailbox = Mailbox.get("bill@localhost"); assertEquals(5, mailbox.size()); MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedBodiesReceived("Message 0"); template.sendBody("direct:start", null); mock.assertIsSatisfied(); } private void prepareMailbox() throws Exception { // connect to mailbox Mailbox.clearAll(); JavaMailSender sender = new DefaultJavaMailSender(); Store store = sender.getSession().getStore("pop3"); store.connect("localhost", 25, "bill", "secret"); Folder folder = store.getFolder("INBOX"); folder.open(Folder.READ_WRITE); folder.expunge(); // inserts 5 new messages Message[] messages = new Message[5]; for (int i = 0; i < 5; i++) { messages[i] = new MimeMessage(sender.getSession()); messages[i].setHeader("Message-ID", "" + i); messages[i].setText("Message " + i); } folder.appendMessages(messages); folder.close(true); } protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { public void configure() throws Exception { from("direct:start") .pollEnrich("pop3://bill@localhost?password=secret&consumer.initialDelay=100&consumer.delay=100", 5000) .to("log:mail", "mock:result"); } }; } }
apache-2.0
MiriamAG/reto-3
programacion/Proyecto/src/com/clases/Parte.java
8465
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.clases; import java.math.BigDecimal; import java.sql.CallableStatement; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import javax.swing.JOptionPane; import oracle.jdbc.OracleTypes; /** * * @author 7fprog03 */ public class Parte { private String fecha; private BigDecimal kmInicial; private BigDecimal kmFinal; private BigDecimal gastoPeaje; private BigDecimal gastoDietas; private BigDecimal gastoCombustible; private BigDecimal gastoVarios; private String incidencias; private String estado; private String validado; private BigDecimal horasExtras; private BigDecimal idTrabajador; private String notasAdministrativas; //asociacion con aviso private Aviso aviso; //asosiacion con logistica private Logistica logistica; //asociacion con administracion //private Administracion administracion; //asociacion con viajes private List<Viaje>viaje=new ArrayList<>(); public Parte() { } public Parte(BigDecimal idTrabajador, BigDecimal kmInicial, BigDecimal kmFinal, BigDecimal gastoPeaje, BigDecimal gastoDietas, BigDecimal gastoCombustible, BigDecimal gastoVarios, String incidencias) { this.idTrabajador=idTrabajador; this.kmInicial = kmInicial; this.kmFinal = kmFinal; this.gastoPeaje = gastoPeaje; this.gastoDietas = gastoDietas; this.gastoCombustible = gastoCombustible; this.gastoVarios = gastoVarios; this.incidencias = incidencias; } public Parte(String fecha, BigDecimal kmInicial, BigDecimal kmFinal, BigDecimal gastoPeaje, BigDecimal gastoDietas, BigDecimal gastoCombustible, BigDecimal gastoVarios, String incidencias, String estado, String validado, BigDecimal horasExtras, BigDecimal idTrabajador, String notasAdministrativas) { this.fecha = fecha; this.kmInicial = kmInicial; this.kmFinal = kmFinal; this.gastoPeaje = gastoPeaje; this.gastoDietas = gastoDietas; this.gastoCombustible = gastoCombustible; this.gastoVarios = gastoVarios; this.incidencias = incidencias; this.estado = estado; this.validado = validado; this.horasExtras = horasExtras; this.idTrabajador = idTrabajador; this.notasAdministrativas = notasAdministrativas; } public Parte(String fecha, BigDecimal idTrabajador) { this.fecha = fecha; this.idTrabajador = idTrabajador; } public Parte(BigDecimal idTrabajador) { this.idTrabajador = idTrabajador; } //metodo para recuperar un parte de un trabajador public static Parte parte(BigDecimal idt){ Parte p = new Parte(); Conexion.conectar(); try { CallableStatement cs = Conexion.getConexion().prepareCall("call recuperarParte(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); cs.setBigDecimal(1, idt); cs.registerOutParameter(2, OracleTypes.VARCHAR); cs.registerOutParameter(3, OracleTypes.INTEGER); cs.registerOutParameter(4, OracleTypes.INTEGER); cs.registerOutParameter(5, OracleTypes.INTEGER); cs.registerOutParameter(6, OracleTypes.INTEGER); cs.registerOutParameter(7, OracleTypes.INTEGER); cs.registerOutParameter(8, OracleTypes.INTEGER); cs.registerOutParameter(9, OracleTypes.VARCHAR); cs.registerOutParameter(10, OracleTypes.VARCHAR); cs.registerOutParameter(11, OracleTypes.VARCHAR); cs.registerOutParameter(12, OracleTypes.INTEGER); cs.registerOutParameter(13, OracleTypes.INTEGER); cs.registerOutParameter(14, OracleTypes.VARCHAR); cs.execute(); String fecha = cs.getString(2); BigDecimal kmi = cs.getBigDecimal(3); BigDecimal kmf = cs.getBigDecimal(4); BigDecimal gp = cs.getBigDecimal(5); BigDecimal gd = cs.getBigDecimal(6); BigDecimal gc = cs.getBigDecimal(7); BigDecimal og = cs.getBigDecimal(8); String in=cs.getString(9); String es = cs.getString(10); String va = cs.getString(11); BigDecimal ho = cs.getBigDecimal(12); BigDecimal ift = cs.getBigDecimal(13); String no = cs.getString(14); p = new Parte(fecha, kmi, kmf, gp, gd, gc, og, in, es, va, ho, ift, no); cs.close(); Conexion.desconectar(); return p; } catch (SQLException ex) { } return null; } public boolean iniciarParte(){ Conexion.conectar(); String sql = "insert into partes (fecha, trabajadores_id) values (?,?)"; try { PreparedStatement smt = Conexion.getConexion().prepareStatement(sql); smt.setString(1, fecha); smt.setBigDecimal(2, idTrabajador); smt.executeUpdate(); smt.close(); Conexion.desconectar(); return true; } catch (SQLException ex) { JOptionPane.showMessageDialog(null, "No se puede efectuar la conexión, hable con el administrador del sistema" + ex.getMessage()); } return false; } public String getFecha() { return fecha; } public void setFecha(String fecha) { this.fecha = fecha; } public BigDecimal getKmInicial() { return kmInicial; } public void setKmInicial(BigDecimal kmInicial) { this.kmInicial = kmInicial; } public BigDecimal getKmFinal() { return kmFinal; } public void setKmFinal(BigDecimal kmFinal) { this.kmFinal = kmFinal; } public BigDecimal getGastoPeaje() { return gastoPeaje; } public void setGastoPeaje(BigDecimal gastoPeaje) { this.gastoPeaje = gastoPeaje; } public BigDecimal getGastoDietas() { return gastoDietas; } public void setGastoDietas(BigDecimal gastoDietas) { this.gastoDietas = gastoDietas; } public BigDecimal getGastoCombustible() { return gastoCombustible; } public void setGastoCombustible(BigDecimal gastoCombustible) { this.gastoCombustible = gastoCombustible; } public BigDecimal getGastoVarios() { return gastoVarios; } public void setGastoVarios(BigDecimal gastoVarios) { this.gastoVarios = gastoVarios; } public String getIncidencias() { return incidencias; } public void setIncidencias(String incidencias) { this.incidencias = incidencias; } public String getEstado() { return estado; } public void setEstado(String estado) { this.estado = estado; } public String getValidado() { return validado; } public void setValidado(String validado) { this.validado = validado; } public BigDecimal getHorasExtras() { return horasExtras; } public void setHorasExtras(BigDecimal horasExtras) { this.horasExtras = horasExtras; } public BigDecimal getIdTrabajador() { return idTrabajador; } public void setIdTrabajador(BigDecimal idTrabajador) { this.idTrabajador = idTrabajador; } public String getNotasAdministrativas() { return notasAdministrativas; } public void setNotasAdministrativas(String notasAdministrativas) { this.notasAdministrativas = notasAdministrativas; } public Logistica getLogistica() { return logistica; } public void setLogistica(Logistica logistica) { this.logistica = logistica; } public void añadirViaje(Viaje v){ viaje.add(v); v.setParte(this); } }
apache-2.0
jasongzcity/MyAlgorithm
leetcode/src/MajorityElement_169/Solution.java
678
package MajorityElement_169; /** * Given an array of size n, find the majority element. * The majority element is the element that appears more than ⌊ n/2 ⌋ times. * * You may assume that the array is non-empty and the majority element * always exist in the array. */ public class Solution { // O(1) space one pass solution public int majorityElement(int[] nums) { int maj = 0,count = 0; for(int i=0;i<nums.length;i++){ if(count==0){ maj=nums[i]; ++count; }else{ if(maj==nums[i]) ++count; else --count; } } return maj; } }
apache-2.0
billchen198318/bamboobsc
core-lib/BASE/com/netsteadfast/greenstep/base/service/IBaseService.java
2259
/* * Copyright 2012-2016 bambooCORE, greenstep of copyright Chen Xin Nien * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ----------------------------------------------------------------------- * * author: Chen Xin Nien * contact: chen.xin.nien@gmail.com * */ package com.netsteadfast.greenstep.base.service; import java.util.List; import java.util.Map; import com.netsteadfast.greenstep.base.exception.ServiceException; import com.netsteadfast.greenstep.base.model.DefaultResult; /** * * @author CXN * * @param <T> T is VO * @param <E> E is PO * @param <E> PK */ public interface IBaseService<T extends java.io.Serializable, E extends java.io.Serializable, PK extends java.io.Serializable> extends ISimpleService<E, PK> { public void doMapper(Object sourceObject, Object targetObject, String mapperId) throws org.dozer.MappingException, ServiceException; public DefaultResult<T> saveIgnoreUK(T object) throws ServiceException, Exception; public DefaultResult<T> mergeIgnoreUK(T object) throws ServiceException, Exception; public DefaultResult<T> findObjectByOid(T object) throws ServiceException, Exception; public DefaultResult<T> saveObject(T object) throws ServiceException, Exception; public DefaultResult<T> updateObject(T object) throws ServiceException, Exception; public DefaultResult<T> mergeObject(T object) throws ServiceException, Exception; public DefaultResult<Boolean> deleteObject(T object) throws ServiceException, Exception; public List<T> findListVOByParams(Map<String, Object> params) throws ServiceException, Exception; public DefaultResult<T> findByUK(T object) throws ServiceException, Exception; public int countByUK(T object) throws ServiceException, Exception; }
apache-2.0
libow/hobbes
lib/hobbes/lang/typepreds.C
3781
#include <hobbes/lang/typepreds.H> #include <hobbes/lang/expr.H> #include <hobbes/lang/typeinf.H> namespace hobbes { void UnqualifierSet::add(const std::string& name, const UnqualifierPtr& uq) { if (this->uqs.find(name) != this->uqs.end()) { throw std::runtime_error("Redefinition of the '" + name + "' type predicate."); } else { this->uqs[name] = uq; } } UnqualifierPtr UnqualifierSet::findUnqualifier(const std::string& name) { Unqualifiers::const_iterator uq = this->uqs.find(name); if (uq == this->uqs.end()) { throw std::runtime_error("Undefined predicate: " + name); } else { return uq->second; } } const UnqualifierSet::Unqualifiers& UnqualifierSet::unqualifiers() const { return this->uqs; } bool UnqualifierSet::refine(const TEnvPtr& tenv, const ConstraintPtr& cst, MonoTypeUnifier* u, Definitions* ds) { bool upd = false; Unqualifiers::const_iterator uq = this->uqs.find(cst->name()); if (uq != this->uqs.end()) { upd |= uq->second->refine(tenv, cst, u, ds); } return upd; } bool UnqualifierSet::satisfied(const TEnvPtr& tenv, const ConstraintPtr& cst, Definitions* ds) const { Unqualifiers::const_iterator uq = this->uqs.find(cst->name()); if (uq == this->uqs.end()) { return false; } else { return uq->second->satisfied(tenv, cst, ds); } } bool UnqualifierSet::satisfiable(const TEnvPtr& tenv, const ConstraintPtr& cst, Definitions* ds) const { Unqualifiers::const_iterator uq = this->uqs.find(cst->name()); if (uq == this->uqs.end()) { return false; } else { return uq->second->satisfiable(tenv, cst, ds); } } void UnqualifierSet::explain(const TEnvPtr& tenv, const ConstraintPtr& cst, const ExprPtr& e, Definitions* ds, annmsgs* msgs) { auto uq = this->uqs.find(cst->name()); if (uq != this->uqs.end()) { uq->second->explain(tenv, cst, e, ds, msgs); } } ExprPtr UnqualifierSet::unqualify(const TEnvPtr& tenv, const ConstraintPtr& cst, const ExprPtr& e, Definitions* ds) const { Unqualifiers::const_iterator uq = this->uqs.find(cst->name()); if (uq == this->uqs.end()) { throw annotated_error(*e, "Unknown predicate '" + cst->name() + "' can't be unqualified."); } else { return uq->second->unqualify(tenv, cst, e, ds); } } PolyTypePtr UnqualifierSet::lookup(const std::string& vn) const { for (Unqualifiers::const_iterator uq = this->uqs.begin(); uq != this->uqs.end(); ++uq) { PolyTypePtr pt = uq->second->lookup(vn); if (pt != PolyTypePtr()) { return pt; } } return PolyTypePtr(); } SymSet UnqualifierSet::bindings() const { SymSet r; for (Unqualifiers::const_iterator uq = this->uqs.begin(); uq != this->uqs.end(); ++uq) { SymSet qr = uq->second->bindings(); r.insert(qr.begin(), qr.end()); } return r; } FunDeps UnqualifierSet::dependencies(const ConstraintPtr& cst) const { Unqualifiers::const_iterator uq = this->uqs.find(cst->name()); if (uq == this->uqs.end()) { return FunDeps(); } else { return uq->second->dependencies(cst); } } bool hasConstraint(const ConstraintPtr& c, const Constraints& cs) { Constraints r; for (Constraints::const_iterator ci = cs.begin(); ci != cs.end(); ++ci) { if (*c == **ci) { return true; } } return false; } bool hasConstraint(const ConstraintPtr& c, const QualTypePtr& qt) { return hasConstraint(c, qt->constraints()); } Constraints removeConstraint(const ConstraintPtr& c, const Constraints& cs) { Constraints r; for (Constraints::const_iterator ci = cs.begin(); ci != cs.end(); ++ci) { if (!(*c == **ci)) { r.push_back(*ci); } } return r; } QualTypePtr removeConstraint(const ConstraintPtr& c, const QualTypePtr& qt) { return qualtype(removeConstraint(c, qt->constraints()), qt->monoType()); } }
apache-2.0
unlimitedlabs/orchestra
orchestra/tests/workflows/test_certifications.py
6548
from orchestra.models import WorkerCertification from orchestra.tests.helpers import OrchestraTestCase from orchestra.tests.helpers.fixtures import CertificationFactory from orchestra.tests.helpers.fixtures import UserFactory from orchestra.tests.helpers.fixtures import WorkerCertificationFactory from orchestra.tests.helpers.fixtures import WorkerFactory from orchestra.tests.helpers.fixtures import WorkflowFactory from orchestra.workflow.certifications import migrate_certifications class ManageCertificationsTestCase(OrchestraTestCase): def setUp(self): self.workflow_old = WorkflowFactory( slug='workflow_old', name='Old workflow', description='Old workflow to migrate certifications from.', code_directory='workflow_old') self.workflow_new = WorkflowFactory( slug='workflow_new', name='New workflow', description='New workflow to migrate certifications from.', code_directory='workflow_new') for workflow in (self.workflow_old, self.workflow_new): # Certifications must exist in both workflows for certification # to be migrated CertificationFactory( slug='certification1', name='Certification 1', description='First certification to migrate.', workflow=workflow) CertificationFactory( slug='certification2', name='Certification 2', description='Second certification to migrate.', workflow=workflow) user = (UserFactory(username='test', first_name='test', last_name='test', password='test', email='test@test.com')) self.worker = WorkerFactory(user=user) for certification in self.workflow_old.certifications.all(): # Worker certifications exist only for old workflow WorkerCertificationFactory( worker=self.worker, certification=certification, role=WorkerCertification.Role.ENTRY_LEVEL) WorkerCertificationFactory( worker=self.worker, certification=certification, role=WorkerCertification.Role.REVIEWER) super().setUp() def test_migrate_certifications(self): def _check_old_certifications_unchanged(): self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_old).count(), 4) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_old, certification__slug='certification1', role=WorkerCertification.Role.ENTRY_LEVEL).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_old, certification__slug='certification1', role=WorkerCertification.Role.REVIEWER).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_old, certification__slug='certification2', role=WorkerCertification.Role.ENTRY_LEVEL).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_old, certification__slug='certification2', role=WorkerCertification.Role.REVIEWER).count(), 1) _check_old_certifications_unchanged() # New workflow should have no worker certifications self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new).count(), 0) # Migrate `certification1` migrate_certifications( self.workflow_old.slug, self.workflow_new.slug, ['certification1']) _check_old_certifications_unchanged() # New workflow should have only `certification1` worker certifications self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new).count(), 2) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification1', role=WorkerCertification.Role.ENTRY_LEVEL).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification1', role=WorkerCertification.Role.REVIEWER).count(), 1) # Migrate all source certifications migrate_certifications( self.workflow_old.slug, self.workflow_new.slug, []) _check_old_certifications_unchanged() # `certification2` should now be migrated as well self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new).count(), 4) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification1', role=WorkerCertification.Role.ENTRY_LEVEL).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification1', role=WorkerCertification.Role.REVIEWER).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification2', role=WorkerCertification.Role.ENTRY_LEVEL).count(), 1) self.assertEqual(WorkerCertification.objects.filter( worker=self.worker, certification__workflow=self.workflow_new, certification__slug='certification2', role=WorkerCertification.Role.REVIEWER).count(), 1)
apache-2.0
msebire/intellij-community
platform/core-api/src/com/intellij/lang/ASTNode.java
8963
/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang; import com.intellij.openapi.util.Key; import com.intellij.openapi.util.TextRange; import com.intellij.openapi.util.UserDataHolder; import com.intellij.psi.PsiElement; import com.intellij.psi.tree.IElementType; import com.intellij.psi.tree.TokenSet; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * A node in the AST tree. The AST is an intermediate parsing tree created by {@link PsiBuilder}, * out of which a PSI tree is then created. * * @author max * @see PsiElement */ public interface ASTNode extends UserDataHolder { ASTNode[] EMPTY_ARRAY = new ASTNode[0]; /** * Returns the type of this node. * * @return the element type. */ @NotNull IElementType getElementType(); /** * Returns the text of this node. * * @return the node text. */ @NotNull String getText(); /** * Returns same text getText() returns but might be more effective eliminating toString() transformation from internal CharSequence representation * * @return the node text. */ @NotNull CharSequence getChars(); /** * Checks if the specified character is present in the text of this node. * * @param c the character to search for. * @return true if the character is found, false otherwise. */ boolean textContains(char c); /** * Returns the starting offset of the node text in the document. * * @return the start offset. */ int getStartOffset(); /** * Returns the length of the node text. * * @return the text length. */ int getTextLength(); /** * Returns the text range (a combination of starting offset in the document and length) for this node. * * @return the text range. */ TextRange getTextRange(); /** * Returns the parent of this node in the tree. * * @return the parent node. */ ASTNode getTreeParent(); /** * Returns the first child of this node in the tree. * * @return the first child node. */ ASTNode getFirstChildNode(); /** * Returns the last child of this node in the tree. * * @return the last child node. */ ASTNode getLastChildNode(); /** * Returns the next sibling of this node in the tree. * * @return the next sibling node. */ ASTNode getTreeNext(); /** * Returns the previous sibling of this node in the tree. * * @return the previous sibling node. */ ASTNode getTreePrev(); /** * Returns the list of children of the specified node, optionally filtered by the * specified token type filter. * * @param filter the token set used to filter the returned children, or null if * all children should be returned. * @return the children array. */ @NotNull ASTNode[] getChildren(@Nullable TokenSet filter); /** * Adds the specified child node as the last child of this node. * * @param child the child node to add. */ void addChild(@NotNull ASTNode child); /** * Adds the specified child node at the specified position in the child list. * * @param child the child node to add. * @param anchorBefore the node before which the child node is inserted ({@code null} to add a child as a last node). */ void addChild(@NotNull ASTNode child, @Nullable ASTNode anchorBefore); /** * Add leaf element with specified type and text in the child list. * @param leafType type of leaf element to add. * @param leafText text of added leaf. * @param anchorBefore the node before which the child node is inserted. */ void addLeaf(@NotNull IElementType leafType, @NotNull CharSequence leafText, @Nullable ASTNode anchorBefore); /** * Removes the specified node from the list of children of this node. * * @param child the child node to remove. */ void removeChild(@NotNull ASTNode child); /** * Removes a range of nodes from the list of children, starting with {@code firstNodeToRemove}, * up to and not including {@code firstNodeToKeep}. * * @param firstNodeToRemove the first child node to remove from the tree. * @param firstNodeToKeep the first child node to keep in the tree. */ void removeRange(@NotNull ASTNode firstNodeToRemove, ASTNode firstNodeToKeep); /** * Replaces the specified child node with another node. * * @param oldChild the child node to replace. * @param newChild the node to replace with. */ void replaceChild(@NotNull ASTNode oldChild, @NotNull ASTNode newChild); /** * Replaces all child nodes with the children of the specified node. * * @param anotherParent the parent node whose children are used for replacement. */ void replaceAllChildrenToChildrenOf(@NotNull ASTNode anotherParent); /** * Adds a range of nodes belonging to the same parent to the list of children of this node, * starting with {@code firstChild}, up to and not including {@code firstChildToNotAdd}. * * @param firstChild the first node to add. * @param firstChildToNotAdd the first child node following firstChild which will not be added to the tree. * @param anchorBefore the node before which the child nodes are inserted. */ void addChildren(@NotNull ASTNode firstChild, ASTNode firstChildToNotAdd, ASTNode anchorBefore); /** * Creates and returns a deep copy of the AST tree part starting at this node. * * @return the top node of the copied tree (as an ASTNode object) */ @NotNull Object clone(); /** * Creates a copy of the entire AST tree containing this node and returns a counterpart * of this node in the resulting tree. * * @return the counterpart of this node in the copied tree. */ ASTNode copyElement(); /** * Finds a leaf child node at the specified offset from the start of the text range of this node. * * @param offset the relative offset for which the child node is requested. * @return the child node, or null if none is found. */ @Nullable ASTNode findLeafElementAt(int offset); /** * Returns a copyable user data object attached to this node. * * @param key the key for accessing the user data object. * @return the user data object, or null if no such object is found in the current node. * @see #putCopyableUserData(Key, Object) */ @Nullable <T> T getCopyableUserData(@NotNull Key<T> key); /** * Attaches a copyable user data object to this node. Copyable user data objects are copied * when the AST tree nodes are copied. * * @param key the key for accessing the user data object. * @param value the user data object to attach. * @see #getCopyableUserData(Key) */ <T> void putCopyableUserData(@NotNull Key<T> key, T value); /** * Returns the first child of the specified node which has the specified type. * * @param type the type of the node to return. * @return the found node, or null if none was found. */ @Nullable ASTNode findChildByType(@NotNull IElementType type); /** * Returns the first child after anchor of the specified node which has the specified type. * * @param type the type of the node to return. * @param anchor to start search from * @return the found node, or null if none was found. */ @Nullable ASTNode findChildByType(@NotNull IElementType type, @Nullable ASTNode anchor); /** * Returns the first child of the specified node which has type from specified set. * * @param typesSet the token set used to filter the returned children. * @return the found node, or null if none was found. */ @Nullable ASTNode findChildByType(@NotNull TokenSet typesSet); /** * Returns the first child after anchor of the specified node which has type from specified set. * * @param typesSet the token set used to filter the returned children. * @param anchor to start search from * @return the found node, or null if none was found. */ @Nullable ASTNode findChildByType(@NotNull TokenSet typesSet, @Nullable ASTNode anchor); /** * Returns the PSI element for this node. * * @return the PSI element. */ PsiElement getPsi(); /** * Checks and returns the PSI element for this node. * * @param clazz expected psi class * @return the PSI element. */ <T extends PsiElement> T getPsi(@NotNull Class<T> clazz); }
apache-2.0
mesos/mesos-rxjava
mesos-rxjava-recordio/src/main/java/com/mesosphere/mesos/rx/java/recordio/package-info.java
766
/* * Copyright (C) 2015 Mesosphere, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This package provides an {@link rx.Observable.Operator} to process a RecordIO encoded stream of data. */ package com.mesosphere.mesos.rx.java.recordio;
apache-2.0
PerfectMemory/rabbitmq
recipes/user_management.rb
1298
# -*- coding: utf-8 -*- # # Cookbook Name:: rabbitmq # Recipe:: user_management # # Copyright 2013, Grégoire Seux # Copyright 2013, Chef Software, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include_recipe 'rabbitmq::default' include_recipe 'rabbitmq::virtualhost_management' node['rabbitmq']['enabled_users'].each do |user| rabbitmq_user user['name'] do password user['password'] action :add end rabbitmq_user user['name'] do tag user['tag'] action :set_tags end user['rights'].each do |r| rabbitmq_user user['name'] do vhost r['vhost'] permissions "#{r['conf']} #{r['write']} #{r['read']}" action :set_permissions end end end node['rabbitmq']['disabled_users'].each do |user| rabbitmq_user user do action :delete end end
apache-2.0
detiber/lib_openshift
lib_openshift/models/v1_secret_spec.py
4171
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1SecretSpec(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ operations = [ ] # The key is attribute name # and the value is attribute type. swagger_types = { 'secret_source': 'V1LocalObjectReference', 'mount_path': 'str' } # The key is attribute name # and the value is json key in definition. attribute_map = { 'secret_source': 'secretSource', 'mount_path': 'mountPath' } def __init__(self, secret_source=None, mount_path=None): """ V1SecretSpec - a model defined in Swagger """ self._secret_source = secret_source self._mount_path = mount_path @property def secret_source(self): """ Gets the secret_source of this V1SecretSpec. SecretSource is a reference to the secret :return: The secret_source of this V1SecretSpec. :rtype: V1LocalObjectReference """ return self._secret_source @secret_source.setter def secret_source(self, secret_source): """ Sets the secret_source of this V1SecretSpec. SecretSource is a reference to the secret :param secret_source: The secret_source of this V1SecretSpec. :type: V1LocalObjectReference """ self._secret_source = secret_source @property def mount_path(self): """ Gets the mount_path of this V1SecretSpec. MountPath is the path at which to mount the secret :return: The mount_path of this V1SecretSpec. :rtype: str """ return self._mount_path @mount_path.setter def mount_path(self, mount_path): """ Sets the mount_path of this V1SecretSpec. MountPath is the path at which to mount the secret :param mount_path: The mount_path of this V1SecretSpec. :type: str """ self._mount_path = mount_path def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(V1SecretSpec.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
apache-2.0
luci/luci-go
machine-db/client/cli/oses.go
2342
// Copyright 2017 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cli import ( "github.com/maruel/subcommands" "go.chromium.org/luci/common/cli" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/flag" "go.chromium.org/luci/machine-db/api/crimson/v1" ) // printOSes prints operating system data to stdout in tab-separated columns. func printOSes(tsv bool, oses ...*crimson.OS) { if len(oses) > 0 { p := newStdoutPrinter(tsv) defer p.Flush() if !tsv { p.Row("Name", "Description") } for _, os := range oses { p.Row(os.Name, os.Description) } } } // GetOSesCmd is the command to get operating systems. type GetOSesCmd struct { commandBase req crimson.ListOSesRequest } // Run runs the command to get operating systems. func (c *GetOSesCmd) Run(app subcommands.Application, args []string, env subcommands.Env) int { ctx := cli.GetContext(app, c, env) client := getClient(ctx) resp, err := client.ListOSes(ctx, &c.req) if err != nil { errors.Log(ctx, err) return 1 } printOSes(c.f.tsv, resp.Oses...) return 0 } // getOSesCmd returns a command to get operating systems. func getOSesCmd(params *Parameters) *subcommands.Command { return &subcommands.Command{ UsageLine: "get-oses [-name <name>]...", ShortDesc: "retrieves operating systems", LongDesc: "Retrieves operating systems matching the given names, or all operating systems if names are omitted.\n\nExample to get all OSes:\ncrimson get-oses\nExample to get Mac 10.13.3:\ncrimson get-oses -name 'Mac 10.13.3 (Darwin 17.4.0)'", CommandRun: func() subcommands.CommandRun { cmd := &GetOSesCmd{} cmd.Initialize(params) cmd.Flags.Var(flag.StringSlice(&cmd.req.Names), "name", "Name of an operating system to filter by. Can be specified multiple times.") return cmd }, } }
apache-2.0
stephane-martin/salt-debian-packaging
salt-2016.3.2/salt/modules/freebsdservice.py
8692
# -*- coding: utf-8 -*- ''' The service module for FreeBSD .. important:: If you feel that Salt should be using this module to manage services on a minion, and it is using a different module (or gives an error similar to *'service.start' is not available*), see :ref:`here <module-provider-override>`. ''' from __future__ import absolute_import # Import python libs import logging import os # Import salt libs import salt.utils import salt.utils.decorators as decorators from salt.exceptions import CommandNotFoundError __func_alias__ = { 'reload_': 'reload' } log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'service' def __virtual__(): ''' Only work on FreeBSD ''' # Disable on these platforms, specific service modules exist: if __grains__['os'] == 'FreeBSD': return __virtualname__ return (False, 'The freebsdservice execution module cannot be loaded: only available on FreeBSD systems.') @decorators.memoize def _cmd(): ''' Return full path to service command ''' service = salt.utils.which('service') if not service: raise CommandNotFoundError('\'service\' command not found') return service def _get_rcscript(name): ''' Return full path to service rc script ''' cmd = '{0} -r'.format(_cmd()) for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines(): if line.endswith('{0}{1}'.format(os.path.sep, name)): return line return None def _get_rcvar(name): ''' Return rcvar ''' if not available(name): log.error('Service {0} not found'.format(name)) return False cmd = '{0} {1} rcvar'.format(_cmd(), name) for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines(): if '_enable="' not in line: continue rcvar, _ = line.split('=', 1) return rcvar return None def get_enabled(): ''' Return what services are set to run on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' ret = [] service = _cmd() for svc in __salt__['cmd.run']('{0} -e'.format(service)).splitlines(): ret.append(os.path.basename(svc)) # This is workaround for bin/173454 bug for svc in get_all(): if svc in ret: continue if not os.path.exists('/etc/rc.conf.d/{0}'.format(svc)): continue if enabled(svc): ret.append(svc) return sorted(ret) def get_disabled(): ''' Return what services are available but not enabled to start at boot CLI Example: .. code-block:: bash salt '*' service.get_disabled ''' en_ = get_enabled() all_ = get_all() return sorted(set(all_) - set(en_)) def _switch(name, # pylint: disable=C0103 on, # pylint: disable=C0103 **kwargs): ''' Switch on/off service start at boot. ''' if not available(name): return False rcvar = _get_rcvar(name) if not rcvar: log.error('rcvar for service {0} not found'.format(name)) return False config = kwargs.get('config', __salt__['config.option']('service.config', default='/etc/rc.conf' ) ) if not config: rcdir = '/etc/rc.conf.d' if not os.path.exists(rcdir) or not os.path.isdir(rcdir): log.error('{0} not exists'.format(rcdir)) return False config = os.path.join(rcdir, rcvar.replace('_enable', '')) nlines = [] edited = False if on: val = 'YES' else: val = 'NO' if os.path.exists(config): with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if not line.startswith('{0}='.format(rcvar)): nlines.append(line) continue rest = line[len(line.split()[0]):] # keep comments etc nlines.append('{0}="{1}"{2}'.format(rcvar, val, rest)) edited = True if not edited: # Ensure that the file ends in a \n if len(nlines) > 1 and nlines[-1][-1] != '\n': nlines[-1] = '{0}\n'.format(nlines[-1]) nlines.append('{0}="{1}"\n'.format(rcvar, val)) with salt.utils.fopen(config, 'w') as ofile: ofile.writelines(nlines) return True def enable(name, **kwargs): ''' Enable the named service to start at boot name service name config : /etc/rc.conf Config file for managing service. If config value is empty string, then /etc/rc.conf.d/<service> used. See man rc.conf(5) for details. Also service.config variable can be used to change default. CLI Example: .. code-block:: bash salt '*' service.enable <service name> ''' return _switch(name, True, **kwargs) def disable(name, **kwargs): ''' Disable the named service to start at boot Arguments the same as for enable() CLI Example: .. code-block:: bash salt '*' service.disable <service name> ''' return _switch(name, False, **kwargs) def enabled(name, **kwargs): ''' Return True if the named service is enabled, false otherwise name Service name CLI Example: .. code-block:: bash salt '*' service.enabled <service name> ''' if not available(name): log.error('Service {0} not found'.format(name)) return False cmd = '{0} {1} rcvar'.format(_cmd(), name) for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines(): if '_enable="' not in line: continue _, state, _ = line.split('"', 2) return state.lower() in ('yes', 'true', 'on', '1') # probably will never reached return False def disabled(name): ''' Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> ''' return not enabled(name) def available(name): ''' Check that the given service is available. CLI Example: .. code-block:: bash salt '*' service.available sshd ''' return name in get_all() def missing(name): ''' The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd ''' return name not in get_all() def get_all(): ''' Return a list of all available services CLI Example: .. code-block:: bash salt '*' service.get_all ''' ret = [] service = _cmd() for srv in __salt__['cmd.run']('{0} -l'.format(service)).splitlines(): if not srv.isupper(): ret.append(srv) return sorted(ret) def start(name): ''' Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> ''' cmd = '{0} {1} onestart'.format(_cmd(), name) return not __salt__['cmd.retcode'](cmd, python_shell=False) def stop(name): ''' Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> ''' cmd = '{0} {1} onestop'.format(_cmd(), name) return not __salt__['cmd.retcode'](cmd, python_shell=False) def restart(name): ''' Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> ''' cmd = '{0} {1} onerestart'.format(_cmd(), name) return not __salt__['cmd.retcode'](cmd, python_shell=False) def reload_(name): ''' Restart the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> ''' cmd = '{0} {1} onereload'.format(_cmd(), name) return not __salt__['cmd.retcode'](cmd, python_shell=False) def status(name, sig=None): ''' Return the status for a service (True or False). name Name of service CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' if sig: return bool(__salt__['status.pid'](sig)) cmd = '{0} {1} onestatus'.format(_cmd(), name) return not __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=True)
apache-2.0
bollwyvl/CesiumWidget
CesiumWidget/static/CesiumWidget/cesium/Specs/Scene/QuadtreeTileSpec.js
6082
/*global defineSuite*/ defineSuite([ 'Scene/QuadtreeTile', 'Core/GeographicTilingScheme', 'Core/Math', 'Core/Rectangle', 'Core/WebMercatorTilingScheme' ], function( QuadtreeTile, GeographicTilingScheme, CesiumMath, Rectangle, WebMercatorTilingScheme) { "use strict"; /*global jasmine,describe,xdescribe,it,xit,expect,beforeEach,afterEach,beforeAll,afterAll,spyOn*/ it('throws without a options', function() { expect(function() { return new QuadtreeTile(); }).toThrowDeveloperError(); }); it('throws without options.rectangle', function() { expect(function() { return new QuadtreeTile({ x : 0, y : 0 }); }).toThrowDeveloperError(); }); it('throws without options.level', function() { expect(function() { return new QuadtreeTile({ rectangle : new Rectangle( -CesiumMath.PI_OVER_FOUR, 0.0, CesiumMath.PI_OVER_FOUR, CesiumMath.PI_OVER_FOUR ), x : 0, y : 0 }); }).toThrowDeveloperError(); }); it('throws with negative x or y properties', function() { expect(function() { return new QuadtreeTile({ x : -1.0, y : -1.0, level : 1.0 }); }).toThrowDeveloperError(); }); it('creates rectangle on construction', function() { var desc = {tilingScheme : new WebMercatorTilingScheme(), x : 0, y : 0, level : 0}; var tile = new QuadtreeTile(desc); var rectangle = desc.tilingScheme.tileXYToRectangle(desc.x, desc.y, desc.level); expect(tile.rectangle).toEqual(rectangle); }); it('throws if constructed improperly', function() { expect(function() { return new QuadtreeTile(); }).toThrowDeveloperError(); expect(function() { return new QuadtreeTile({ y : 0, level : 0, tilingScheme : { tileXYToRectangle : function() { return undefined; } } }); }).toThrowDeveloperError(); expect(function() { return new QuadtreeTile({ x : 0, level : 0, tilingScheme : { tileXYToRectangle : function() { return undefined; } } }); }).toThrowDeveloperError(); expect(function() { return new QuadtreeTile({ x : 0, y : 0, tilingScheme : { tileXYToRectangle : function() { return undefined; } } }); }).toThrowDeveloperError(); expect(function() { return new QuadtreeTile({ x : 0, y : 0, level : 0 }); }).toThrowDeveloperError(); }); describe('createLevelZeroTiles', function() { var tilingScheme1x1; var tilingScheme2x2; var tilingScheme2x1; var tilingScheme1x2; beforeEach(function() { tilingScheme1x1 = new GeographicTilingScheme({ numberOfLevelZeroTilesX : 1, numberOfLevelZeroTilesY : 1 }); tilingScheme2x2 = new GeographicTilingScheme({ numberOfLevelZeroTilesX : 2, numberOfLevelZeroTilesY : 2 }); tilingScheme2x1 = new GeographicTilingScheme({ numberOfLevelZeroTilesX : 2, numberOfLevelZeroTilesY : 1 }); tilingScheme1x2 = new GeographicTilingScheme({ numberOfLevelZeroTilesX : 1, numberOfLevelZeroTilesY : 2 }); }); it('requires tilingScheme', function() { expect(function() { return QuadtreeTile.createLevelZeroTiles(undefined); }).toThrowDeveloperError(); }); it('creates expected number of tiles', function() { var tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme1x1); expect(tiles.length).toBe(1); tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme2x2); expect(tiles.length).toBe(4); tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme2x1); expect(tiles.length).toBe(2); tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme1x2); expect(tiles.length).toBe(2); }); it('created tiles are associated with specified tiling scheme', function() { var tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme2x2); for (var i = 0; i < tiles.length; ++i) { expect(tiles[i].tilingScheme).toBe(tilingScheme2x2); } }); it('created tiles are ordered from the northwest and proceeding east and then south', function() { var tiles = QuadtreeTile.createLevelZeroTiles(tilingScheme2x2); var northwest = tiles[0]; var northeast = tiles[1]; var southwest = tiles[2]; var southeast = tiles[3]; expect(northeast.rectangle.west).toBeGreaterThan(northwest.rectangle.west); expect(southeast.rectangle.west).toBeGreaterThan(southwest.rectangle.west); expect(northeast.rectangle.south).toBeGreaterThan(southeast.rectangle.south); expect(northwest.rectangle.south).toBeGreaterThan(southwest.rectangle.south); }); }); });
apache-2.0
sdnwiselab/onos
protocols/pcep/pcepio/src/main/java/org/onosproject/pcepio/types/LocalNodeDescriptorsTlv.java
8612
/* * Copyright 2016-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.pcepio.types; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Objects; import org.jboss.netty.buffer.ChannelBuffer; import org.onosproject.pcepio.exceptions.PcepParseException; import org.onosproject.pcepio.protocol.PcepVersion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.MoreObjects; /** * Provides Local TE Node Descriptors TLV which contains Node Descriptor Sub-TLVs. */ public class LocalNodeDescriptorsTlv implements PcepValueType { /* REFERENCE :draft-dhodylee-pce-pcep-ls-01, section 9.2.2 * 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type=[TBD8] | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | // Node Descriptor Sub-TLVs (variable) // | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Note: Length is including header here. Refer Routing Universe TLV. */ protected static final Logger log = LoggerFactory.getLogger(LocalNodeDescriptorsTlv.class); public static final short TYPE = (short) 65282; short hLength; public static final int TLV_HEADER_LENGTH = 4; // Node Descriptor Sub-TLVs (variable) private List<PcepValueType> llNodeDescriptorSubTLVs; /** * Constructor to initialize llNodeDescriptorSubTLVs. * * @param llNodeDescriptorSubTLVs List of PcepValueType */ public LocalNodeDescriptorsTlv(List<PcepValueType> llNodeDescriptorSubTLVs) { this.llNodeDescriptorSubTLVs = llNodeDescriptorSubTLVs; } /** * Returns a new object of LocalNodeDescriptorsTLV. * * @param llNodeDescriptorSubTLVs linked list of Node Descriptor Sub TLVs * @return object of LocalNodeDescriptorsTLV */ public static LocalNodeDescriptorsTlv of(final List<PcepValueType> llNodeDescriptorSubTLVs) { return new LocalNodeDescriptorsTlv(llNodeDescriptorSubTLVs); } /** * Returns Linked List of tlvs. * * @return llNodeDescriptorSubTLVs linked list of Node Descriptor Sub TLV */ public List<PcepValueType> getllNodeDescriptorSubTLVs() { return llNodeDescriptorSubTLVs; } @Override public PcepVersion getVersion() { return PcepVersion.PCEP_1; } @Override public short getType() { return TYPE; } @Override public short getLength() { return hLength; } @Override public int hashCode() { return Objects.hash(llNodeDescriptorSubTLVs.hashCode()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } /* * Here we have a list of Tlv so to compare each sub tlv between the object * we have to take a list iterator so one by one we can get each sub tlv object * and can compare them. * it may be possible that the size of 2 lists is not equal so we have to first check * the size, if both are same then we should check for the subtlv objects otherwise * we should return false. */ if (obj instanceof LocalNodeDescriptorsTlv) { int countObjSubTlv = 0; int countOtherSubTlv = 0; boolean isCommonSubTlv = true; LocalNodeDescriptorsTlv other = (LocalNodeDescriptorsTlv) obj; Iterator<PcepValueType> objListIterator = ((LocalNodeDescriptorsTlv) obj).llNodeDescriptorSubTLVs .iterator(); countObjSubTlv = ((LocalNodeDescriptorsTlv) obj).llNodeDescriptorSubTLVs.size(); countOtherSubTlv = other.llNodeDescriptorSubTLVs.size(); if (countObjSubTlv != countOtherSubTlv) { return false; } else { while (objListIterator.hasNext() && isCommonSubTlv) { PcepValueType subTlv = objListIterator.next(); isCommonSubTlv = Objects.equals(llNodeDescriptorSubTLVs.contains(subTlv), other.llNodeDescriptorSubTLVs.contains(subTlv)); } return isCommonSubTlv; } } return false; } @Override public int write(ChannelBuffer c) { int tlvStartIndex = c.writerIndex(); c.writeShort(TYPE); int tlvLenIndex = c.writerIndex(); hLength = 0; c.writeShort(0); ListIterator<PcepValueType> listIterator = llNodeDescriptorSubTLVs.listIterator(); while (listIterator.hasNext()) { PcepValueType tlv = listIterator.next(); if (tlv == null) { log.debug("TLV is null from subTlv list"); continue; } tlv.write(c); // need to take care of padding int pad = tlv.getLength() % 4; if (0 != pad) { pad = 4 - pad; for (int i = 0; i < pad; ++i) { c.writeByte((byte) 0); } } } hLength = (short) (c.writerIndex() - tlvStartIndex); c.setShort(tlvLenIndex, (hLength - TLV_HEADER_LENGTH)); return c.writerIndex() - tlvStartIndex; } /** * Reads the channel buffer and returns object of AutonomousSystemTlv. * * @param c input channel buffer * @param hLength length of subtlvs. * @return object of AutonomousSystemTlv * @throws PcepParseException if mandatory fields are missing */ public static PcepValueType read(ChannelBuffer c, short hLength) throws PcepParseException { // Node Descriptor Sub-TLVs (variable) List<PcepValueType> llNodeDescriptorSubTLVs = new LinkedList<>(); ChannelBuffer tempCb = c.readBytes(hLength); while (TLV_HEADER_LENGTH <= tempCb.readableBytes()) { PcepValueType tlv; short hType = tempCb.readShort(); int iValue = 0; short length = tempCb.readShort(); switch (hType) { case AutonomousSystemSubTlv.TYPE: iValue = tempCb.readInt(); tlv = new AutonomousSystemSubTlv(iValue); break; case BgpLsIdentifierSubTlv.TYPE: iValue = tempCb.readInt(); tlv = new BgpLsIdentifierSubTlv(iValue); break; case OspfAreaIdSubTlv.TYPE: iValue = tempCb.readInt(); tlv = new OspfAreaIdSubTlv(iValue); break; case IgpRouterIdSubTlv.TYPE: tlv = IgpRouterIdSubTlv.read(tempCb, length); break; default: throw new PcepParseException("Unsupported Sub TLV type :" + hType); } // Check for the padding int pad = length % 4; if (0 < pad) { pad = 4 - pad; if (pad <= tempCb.readableBytes()) { tempCb.skipBytes(pad); } } llNodeDescriptorSubTLVs.add(tlv); } if (0 < tempCb.readableBytes()) { throw new PcepParseException("Sub Tlv parsing error. Extra bytes received."); } return new LocalNodeDescriptorsTlv(llNodeDescriptorSubTLVs); } @Override public String toString() { return MoreObjects.toStringHelper(getClass()) .add("Type", TYPE) .add("Length", hLength) .add("NodeDescriptorSubTLVs", llNodeDescriptorSubTLVs) .toString(); } }
apache-2.0
Zhangsongsong/GraduationPro
毕业设计/code/android/YYFramework/src/com/easemob/livestream/data/TestAvatarRepository.java
1232
package com.easemob.livestream.data; import android.content.Context; import java.util.ArrayList; import java.util.List; import java.util.Random; import app.config.DemoApplication; /** * Created by wei on 2016/7/4. */ public class TestAvatarRepository { static List<Integer> avatarlist = new ArrayList<>(); List<Integer> indexList = new ArrayList<>(); static int SIZE = 9; static { Context context = DemoApplication.getInstance().getApplicationContext(); for(int i = 1; i <= SIZE; i++){ String name = "test_avatar"+i; int resId = context.getResources().getIdentifier(name,"drawable",context.getPackageName()); avatarlist.add(resId); } } public TestAvatarRepository(){ fillIndexList(); } private void fillIndexList(){ for(int i = 0; i < SIZE; i++){ indexList.add(i); } } public int getAvatar(){ if(indexList.size() != 0) { int index = new Random().nextInt(indexList.size()); int gotIndex = indexList.remove(index); return avatarlist.get(gotIndex); }else{ fillIndexList(); return getAvatar(); } } }
apache-2.0
cmanlh/jquery-component
com/lifeonwalden/jqc/contextmenu/contextmenu.js
15620
/* Copyright 2017 cmanlh Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * contextmenu * */ (function ($) { $JqcLoader.importComponents('com.lifeonwalden.jqc', ['baseElement', 'uniqueKey', 'toolkit']) .importCss($JqcLoader.getCmpParentURL('com.lifeonwalden.jqc', 'contextmenu').concat('css/contextmenu.css')) .execute(function () { var T = $.jqcToolkit; var $body = $('body'); /** * menus数据解析,为menu数组 * * { * id: optional, * text : requried, * valid : true, function(data) optional, default value: true * } */ const DEFAULT_OPTIONS = { menus: [], // 构建菜单的数据 max: null, onSelect: null, adapter: { id: 'id', text: 'text', child: 'child', valid: 'valid' }, autoSkip: true, width: 160, height: 32, }; $.jqcContextMenu = function (params) { this.options = Object.assign({}, DEFAULT_OPTIONS, params); this.box = null; //view this.level = 1; //菜单层级 this.needShowMenus = []; //显示的菜单 this.pageWidth = window.innerWidth; //页面可视宽度 this.pageHeight = window.innerHeight; //页面可视高度 this.width = this.options.width; //单个菜单的宽度 this.height = this.options.height; //单个菜单的高度 this.scrollTop = $(window).scrollTop(); //页面上卷高度 this.data = null; //显示条件 this.toLeft = false; this.toTop = false; this.maxHeight = this.options.max ? this.options.max * this.height : 'auto'; var firstLevelLen = this.options.menus.length; this.firstLevlHeight = (this.options.max && firstLevelLen > this.options.max) ? this.maxHeight : firstLevelLen * this.height; bindEvent.call(this); } /** * 根据传入数据,弹性显示菜单项,并在菜单被选择时,将data作为参数,并上menu数据传回给onSelect */ $.jqcContextMenu.prototype.show = function (data) { $body.find('.jqcContextMenu').remove(); var _this = this; this.level = 1; this.data = Array.prototype.slice.call(arguments); var _menus = [].concat(_this.options.menus); this.needShowMenus = getNeedShowMenus.call(_this, _menus, _this.data); if (this.options.autoSkip) { this.needShowMenus = skip.call(_this, this.needShowMenus); } this.level = getLevel.call(_this); setTimeout(function () { render.call(_this); }, 0); }; $.jqcContextMenu.prototype.hide = function () { $body.find('.jqcContextMenu').remove(); }; // 绑定事件 function bindEvent() { var _this = this; $(window).resize(function () { _this.pageWidth = window.innerWidth; _this.pageHeight = window.innerHeight; }).scroll(function () { _this.scrollTop = $(this).scrollTop(); }); $(document).on('contextmenu click', function (e) { _this.pageX = e.pageX; _this.pageY = e.pageY; }).on('mouseup', function () { $(this).off('mousemove.comtextmenu-slide'); }).click(function (e) { _this.options.onCancel && _this.options.onCancel(); _this.box && _this.box.remove(); }); } function render() { var _this = this; $body.append(createBox.call(_this)); } function createBox() { var _this = this; var _left = 0; this.box = $('<div>') .addClass('jqcContextMenu') .css({ left: _this.pageX, top: _this.pageY }).click(function (e) { e.stopPropagation(); }).on('contextmenu', function (e) { e.stopPropagation(); e.preventDefault(); }); if (this.pageX + (this.level * this.width) > this.pageWidth) { this.box.addClass('to-left'); this.toLeft = true; _left = -this.width; } else { this.box.removeClass('to-left'); this.toLeft = false; _left = 0; } if (this.pageY + this.firstLevlHeight - this.scrollTop > this.pageHeight) { this.box.addClass('to-top'); this.toTop = true; } else { this.box.removeClass('to-top'); this.toTop = false; } this.box.append(createMenu.call(_this, _this.needShowMenus, 0, _left)); return this.box; } function skip(data) { var _this = this; if (Array.isArray(data) && data.length === 1 && data[0]['__temp']) { return skip.call(_this, data[0]['__temp']); } else { return data; } } function createMenu(menu, offsetTop, left) { var _this = this; var _scrollBox = $('<div>') .addClass('jqcContextMenu-scrollBox') .css({ 'max-height': _this.maxHeight, 'width': _this.width + 20 }); menu.forEach(item => { if (_this.options.autoSkip) { item = simplifyData.call(_this, item); } var _item = $('<div>') .addClass('jqcContextMenu-item') .text(item[_this.options.adapter.text]) .on('mouseenter', function (e) { e.stopPropagation(); $(this).siblings() .removeClass('jqcContextMenu-active'); var _parent = $(this).parents('.jqcContextMenu-fakeScrollBox'); var _index = _parent.index(); _this.box.find('.jqcContextMenu-fakeScrollBox:gt(' + _index + ')').remove(); if (Array.isArray(item['__temp']) && item['__temp'].length > 0) { $(this).addClass('jqcContextMenu-active'); var _offsetTop = $(this).offset().top; _index++; var _left = _this.toLeft ? -(_this.width * (_index + 1) - _index) : (_this.width * _index - _index); _this.box.append(createMenu.call(_this, item['__temp'], _offsetTop, _left)); } }).click(function (e) { e.stopPropagation(); if (!Array.isArray(item['__temp']) || item['__temp'].length === 0) { _this.options.onSelect && _this.options.onSelect({ menu: item, showData: menu.showData }); _this.box.remove(); } }).css({ 'height': _this.height, 'width': _this.width - 2, 'line-height': _this.height + 'px' }); _scrollBox.append(_item); }); var _fakeScrollBox = $('<div>') .addClass('jqcContextMenu-fakeScrollBox') .append(_scrollBox) .css({ 'max-height': _this.maxHeight, 'width': _this.width - 2, 'left': left }); // 是否限制最大个数 if (this.options.max && menu.length > _this.options.max) { var _height = parseInt(_this.options.max * _this.maxHeight / menu.length); var _slide = $('<span><span>') .addClass('jqcContextMenu-slide') .on('mousedown click', function (e) { e.stopPropagation(); var _sibling = $(this).siblings('.jqcContextMenu-scrollBox'); var _parentOffsetTop = $(this).parent().offset().top + 1; var _y = e.pageY; var _offsetTop = $(this).offset().top - _parentOffsetTop; $(document).on('mousemove.comtextmenu-slide', function (e) { var _top = e.pageY - _y + _offsetTop; _top = _top < 0 ? 0 : _top; _top = _top + _height > _this.maxHeight ? _this.maxHeight - _height : _top; _slide.css('top', _top); var _scrollTop = (_top + _height) * menu.length * _this.height / _this.maxHeight - _this.maxHeight; _sibling.scrollTop(_scrollTop); }); }); _slide.height(_height); _fakeScrollBox.append(_slide); // 控制slide滚动 _scrollBox.scroll(function (e) { e.stopPropagation(); var _scrollTop = $(this).scrollTop(); var _top = (_this.maxHeight + _scrollTop) * _this.maxHeight / _this.height / menu.length - _height; _slide.css('top', _top); }); } if (offsetTop) { var _currentHeight = menu.length * _this.height; if (_this.options.max) { _currentHeight = menu.length > _this.options.max ? _this.options.max * _this.height : _currentHeight; } if (offsetTop - _this.scrollTop + _currentHeight + 20 > this.pageHeight) { _fakeScrollBox.css({ 'top': 'auto', 'bottom': _this.box.offset().top - offsetTop - _this.height - 1 + _this.scrollTop }); } else { _fakeScrollBox.css({ 'top': offsetTop - _this.box.offset().top - 1 - _this.scrollTop, 'bottom': 'auto' }); } } return _fakeScrollBox; } function getNeedShowMenus(menus, data) { var _this = this; var _menus = []; var rest = [].concat(data); var _data = null; if (rest.length > 1) { _data = rest.shift(); } else { _data = rest[0]; } _menus = menus.filter(item => { var _valid = item[_this.options.adapter.valid]; var _child = item[_this.options.adapter.child]; if (T.rawType(_valid) === 'Undefined') { if (_child) { // item[_this.options.adapter.child] = getNeedShowMenus.call(_this, _child, rest); item['__temp'] = getNeedShowMenus.call(_this, _child, rest); } return true; } else if (T.rawType(_valid) === 'Function') { if (_data === undefined || _valid.apply(this, data)) { if (_child) { // item[_this.options.adapter.child] = getNeedShowMenus.call(_this, _child, rest); item['__temp'] = getNeedShowMenus.call(_this, _child, rest); } return true; } else { return false; } } else if (T.rawType(_valid) === 'Boolean') { if (_valid) { if (_child) { // item[_this.options.adapter.child] = getNeedShowMenus.call(_this, _child, rest); item['__temp'] = getNeedShowMenus.call(_this, _child, rest); } return true; } else { return false; } } else { throw new Error('jqcContextMenu: valid expects a Boolean or Function'); } }); _menus.showData = _data; return _menus; } function getLevel() { var _this = this; var _child = []; var _level = 1; this.needShowMenus.forEach(function (item) { if (item['__temp']) { _level = 2; _child = _child.concat(item.child); } }); _child.forEach(function (item) { if (item['__temp']) { _level = 3; } }); return _level; }; function simplifyData(data) { var _this = this; var child = []; var _child = '__temp'; if (!data.hasOwnProperty(_child) || !Array.isArray(data[_child]) || data[_child].length === 0) { return data; } child = data[_child]; var len = child.length; if (len >= 2) { return data; } if (len === 1) { return simplifyData.call(_this, child[0]); } } }); }(jQuery));
apache-2.0
missedone/testng
src/main/java/org/testng/util/Strings.java
2202
package org.testng.util; import org.testng.collections.Maps; import java.util.Map; public final class Strings { private Strings() { // Utility class. Defeat instantiation. } //TODO: When TestNG moves to JDK11 as the default JDK this method needs to be deprecated and removed //because now this method is present in JDK11 as part of the JDK itself. //See http://hg.openjdk.java.net/jdk/jdk/file/fc16b5f193c7/src/java.base/share/classes/java/lang/String.java#l2984 public static String repeat(String text, int count) { StringBuilder builder = new StringBuilder(); for (int i = 0; i < count; i++) { builder.append(text); } return builder.toString(); } public static boolean isNullOrEmpty(String string) { return string == null || string.trim().isEmpty(); } public static boolean isNotNullAndNotEmpty(String string) { return !(isNullOrEmpty(string)); } /** * @param string - The input String. * @return - Returns an empty string if the input String is <code>null</code> (or) empty, else it * returns back the input string. */ public static String getValueOrEmpty(String string) { return isNotNullAndNotEmpty(string) ? string : ""; } private static final Map<String, String> ESCAPE_HTML_MAP = Maps.newLinkedHashMap(); static { ESCAPE_HTML_MAP.put("&", "&amp;"); ESCAPE_HTML_MAP.put("<", "&lt;"); ESCAPE_HTML_MAP.put(">", "&gt;"); } public static String escapeHtml(String text) { String result = text; for (Map.Entry<String, String> entry : ESCAPE_HTML_MAP.entrySet()) { result = result.replace(entry.getKey(), entry.getValue()); } return result; } public static String valueOf(Map<?, ?> m) { StringBuilder result = new StringBuilder(); for (Object o : m.values()) { result.append(o).append(" "); } return result.toString(); } public static String join(String delimiter, String[] parts) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < parts.length - 1; i++) { sb.append(parts[i]).append(delimiter); } if (parts.length > 1) { sb.append(parts[parts.length - 1]); } return sb.toString(); } }
apache-2.0
kou/zulip
static/js/stats/stats.js
36820
"use strict"; const Plotly = require("plotly.js/lib/core"); Plotly.register([require("plotly.js/lib/bar"), require("plotly.js/lib/pie")]); const font_14pt = { family: "Source Sans Pro", size: 14, color: "#000000", }; let last_full_update = Infinity; // TODO: should take a dict of arrays and do it for all keys function partial_sums(array) { let accumulator = 0; return array.map((o) => { accumulator += o; return accumulator; }); } // Assumes date is a round number of hours function floor_to_local_day(date) { const date_copy = new Date(date.getTime()); date_copy.setHours(0); return date_copy; } // Assumes date is a round number of hours function floor_to_local_week(date) { const date_copy = floor_to_local_day(date); date_copy.setHours(-24 * date.getDay()); return date_copy; } function format_date(date, include_hour) { const months = [ i18n.t("January"), i18n.t("February"), i18n.t("March"), i18n.t("April"), i18n.t("May"), i18n.t("June"), i18n.t("July"), i18n.t("August"), i18n.t("September"), i18n.t("October"), i18n.t("November"), i18n.t("December"), ]; const month_str = months[date.getMonth()]; const year = date.getFullYear(); const day = date.getDate(); if (include_hour) { const hour = date.getHours(); const str = hour >= 12 ? "PM" : "AM"; return month_str + " " + day + ", " + (hour % 12) + ":00" + str; } return month_str + " " + day + ", " + year; } function update_last_full_update(end_times) { if (end_times.length === 0) { return; } last_full_update = Math.min(last_full_update, end_times[end_times.length - 1]); const update_time = new Date(last_full_update * 1000); const locale_date = update_time.toLocaleDateString("en-US", { year: "numeric", month: "long", day: "numeric", }); const locale_time = update_time.toLocaleTimeString().replace(":00 ", " "); $("#id_last_full_update").text(locale_time + " on " + locale_date); $("#id_last_full_update").closest(".last-update").show(); } $(() => { $('span[data-toggle="tooltip"]').tooltip({ animation: false, placement: "top", trigger: "manual", }); $("#id_last_update_question_sign") .on("mouseenter", () => { $("span.last_update_tooltip").tooltip("show"); }) .on("mouseleave", () => { $("span.last_update_tooltip").tooltip("hide"); }); // Add configuration for any additional tooltips here. }); function populate_messages_sent_over_time(data) { if (data.end_times.length === 0) { // TODO: do something nicer here return; } // Helper functions function make_traces(dates, values, type, date_formatter) { const text = dates.map((date) => date_formatter(date)); const common = {x: dates, type, hoverinfo: "none", text}; return { human: { // 5062a0 name: i18n.t("Humans"), y: values.human, marker: {color: "#5f6ea0"}, ...common, }, bot: { // a09b5f bbb56e name: i18n.t("Bots"), y: values.bot, marker: {color: "#b7b867"}, ...common, }, me: { name: i18n.t("Me"), y: values.me, marker: {color: "#be6d68"}, ...common, }, }; } const layout = { barmode: "group", width: 750, height: 400, margin: {l: 40, r: 0, b: 40, t: 0}, xaxis: { fixedrange: true, rangeslider: {bordercolor: "#D8D8D8", borderwidth: 1}, type: "date", }, yaxis: {fixedrange: true, rangemode: "tozero"}, legend: { x: 0.62, y: 1.12, orientation: "h", font: font_14pt, }, font: font_14pt, }; function make_rangeselector(x, y, button1, button2) { return { x, y, buttons: [ {stepmode: "backward", ...button1}, {stepmode: "backward", ...button2}, {step: "all", label: i18n.t("All time")}, ], }; } // This is also the cumulative rangeselector const daily_rangeselector = make_rangeselector( 0.68, -0.62, {count: 10, label: i18n.t("Last 10 days"), step: "day"}, {count: 30, label: i18n.t("Last 30 days"), step: "day"}, ); const weekly_rangeselector = make_rangeselector( 0.656, -0.62, {count: 2, label: i18n.t("Last 2 months"), step: "month"}, {count: 6, label: i18n.t("Last 6 months"), step: "month"}, ); function add_hover_handler() { document.querySelector("#id_messages_sent_over_time").on("plotly_hover", (data) => { $("#hoverinfo").show(); document.querySelector("#hover_date").textContent = data.points[0].data.text[data.points[0].pointNumber]; const values = [null, null, null]; data.points.forEach((trace) => { values[trace.curveNumber] = trace.y; }); const hover_text_ids = ["#hover_me", "#hover_human", "#hover_bot"]; const hover_value_ids = ["#hover_me_value", "#hover_human_value", "#hover_bot_value"]; for (const [i, value] of values.entries()) { if (value !== null) { document.querySelector(hover_text_ids[i]).style.display = "inline"; document.querySelector(hover_value_ids[i]).style.display = "inline"; document.querySelector(hover_value_ids[i]).textContent = value; } else { document.querySelector(hover_text_ids[i]).style.display = "none"; document.querySelector(hover_value_ids[i]).style.display = "none"; } } }); } const start_dates = data.end_times.map( (timestamp) => // data.end_times are the ends of hour long intervals. new Date(timestamp * 1000 - 60 * 60 * 1000), ); function aggregate_data(aggregation) { let start; let is_boundary; if (aggregation === "day") { start = floor_to_local_day(start_dates[0]); is_boundary = function (date) { return date.getHours() === 0; }; } else if (aggregation === "week") { start = floor_to_local_week(start_dates[0]); is_boundary = function (date) { return date.getHours() === 0 && date.getDay() === 0; }; } const dates = [start]; const values = {human: [], bot: [], me: []}; let current = {human: 0, bot: 0, me: 0}; let i_init = 0; if (is_boundary(start_dates[0])) { current = { human: data.everyone.human[0], bot: data.everyone.bot[0], me: data.user.human[0], }; i_init = 1; } for (let i = i_init; i < start_dates.length; i += 1) { if (is_boundary(start_dates[i])) { dates.push(start_dates[i]); values.human.push(current.human); values.bot.push(current.bot); values.me.push(current.me); current = {human: 0, bot: 0, me: 0}; } current.human += data.everyone.human[i]; current.bot += data.everyone.bot[i]; current.me += data.user.human[i]; } values.human.push(current.human); values.bot.push(current.bot); values.me.push(current.me); return { dates, values, last_value_is_partial: !is_boundary( new Date(start_dates[start_dates.length - 1].getTime() + 60 * 60 * 1000), ), }; } // Generate traces let date_formatter = function (date) { return format_date(date, true); }; let values = {me: data.user.human, human: data.everyone.human, bot: data.everyone.bot}; let info = aggregate_data("day"); date_formatter = function (date) { return format_date(date, false); }; const last_day_is_partial = info.last_value_is_partial; const daily_traces = make_traces(info.dates, info.values, "bar", date_formatter); info = aggregate_data("week"); date_formatter = function (date) { return i18n.t("Week of __date__", {date: format_date(date, false)}); }; const last_week_is_partial = info.last_value_is_partial; const weekly_traces = make_traces(info.dates, info.values, "bar", date_formatter); const dates = data.end_times.map((timestamp) => new Date(timestamp * 1000)); values = { human: partial_sums(data.everyone.human), bot: partial_sums(data.everyone.bot), me: partial_sums(data.user.human), }; date_formatter = function (date) { return format_date(date, true); }; const cumulative_traces = make_traces(dates, values, "scatter", date_formatter); // Functions to draw and interact with the plot // We need to redraw plot entirely if switching from (the cumulative) line // graph to any bar graph, since otherwise the rangeselector shows both (plotly bug) let clicked_cumulative = false; function draw_or_update_plot(rangeselector, traces, last_value_is_partial, initial_draw) { $("#daily_button, #weekly_button, #cumulative_button").removeClass("selected"); $("#id_messages_sent_over_time > div").removeClass("spinner"); if (initial_draw) { traces.human.visible = true; traces.bot.visible = "legendonly"; traces.me.visible = "legendonly"; } else { const plotDiv = document.querySelector("#id_messages_sent_over_time"); traces.me.visible = plotDiv.data[0].visible; traces.human.visible = plotDiv.data[1].visible; traces.bot.visible = plotDiv.data[2].visible; } layout.xaxis.rangeselector = rangeselector; if (clicked_cumulative || initial_draw) { Plotly.newPlot( "id_messages_sent_over_time", [traces.me, traces.human, traces.bot], layout, {displayModeBar: false}, ); add_hover_handler(); } else { Plotly.deleteTraces("id_messages_sent_over_time", [0, 1, 2]); Plotly.addTraces("id_messages_sent_over_time", [traces.me, traces.human, traces.bot]); Plotly.relayout("id_messages_sent_over_time", layout); } $("#id_messages_sent_over_time").attr("last_value_is_partial", last_value_is_partial); } // Click handlers for aggregation buttons $("#daily_button").on("click", function () { draw_or_update_plot(daily_rangeselector, daily_traces, last_day_is_partial, false); $(this).addClass("selected"); clicked_cumulative = false; }); $("#weekly_button").on("click", function () { draw_or_update_plot(weekly_rangeselector, weekly_traces, last_week_is_partial, false); $(this).addClass("selected"); clicked_cumulative = false; }); $("#cumulative_button").on("click", function () { clicked_cumulative = false; draw_or_update_plot(daily_rangeselector, cumulative_traces, false, false); $(this).addClass("selected"); clicked_cumulative = true; }); // Initial drawing of plot if (weekly_traces.human.x.length < 12) { draw_or_update_plot(daily_rangeselector, daily_traces, last_day_is_partial, true); $("#daily_button").addClass("selected"); } else { draw_or_update_plot(weekly_rangeselector, weekly_traces, last_week_is_partial, true); $("#weekly_button").addClass("selected"); } } function round_to_percentages(values, total) { return values.map((x) => { if (x === total) { return "100%"; } if (x === 0) { return "0%"; } const unrounded = (x / total) * 100; const precision = Math.min( 6, // this is the max precision (two #, 4 decimal points; 99.9999%). Math.max( 2, // the minimum amount of precision (40% or 6.0%). Math.floor(-Math.log10(100 - unrounded)) + 3, ), ); return unrounded.toPrecision(precision) + "%"; }); } // Last label will turn into "Other" if time_series data has a label not in labels function compute_summary_chart_data(time_series_data, num_steps, labels_) { const data = new Map(); for (const [key, array] of Object.entries(time_series_data)) { if (array.length < num_steps) { num_steps = array.length; } let sum = 0; for (let i = 1; i <= num_steps; i += 1) { sum += array[array.length - i]; } data.set(key, sum); } const labels = labels_.slice(); const values = []; labels.forEach((label) => { if (data.has(label)) { values.push(data.get(label)); data.delete(label); } else { values.push(0); } }); if (data.size !== 0) { labels[labels.length - 1] = "Other"; for (const sum of data.values()) { values[labels.length - 1] += sum; } } let total = 0; for (const value of values) { total += value; } return { values, labels, percentages: round_to_percentages(values, total), total, }; } function populate_messages_sent_by_client(data) { const layout = { width: 750, height: null, // set in draw_plot() margin: {l: 3, r: 40, b: 40, t: 0}, font: font_14pt, xaxis: {range: null}, // set in draw_plot() yaxis: {showticklabels: false}, showlegend: false, }; // sort labels so that values are descending in the default view const everyone_month = compute_summary_chart_data( data.everyone, 30, data.display_order.slice(0, 12), ); const label_values = []; for (let i = 0; i < everyone_month.values.length; i += 1) { label_values.push({ label: everyone_month.labels[i], value: everyone_month.labels[i] === "Other" ? -1 : everyone_month.values[i], }); } label_values.sort((a, b) => b.value - a.value); const labels = []; label_values.forEach((item) => { labels.push(item.label); }); function make_plot_data(time_series_data, num_steps) { const plot_data = compute_summary_chart_data(time_series_data, num_steps, labels); plot_data.values.reverse(); plot_data.labels.reverse(); plot_data.percentages.reverse(); const annotations = {values: [], labels: [], text: []}; for (let i = 0; i < plot_data.values.length; i += 1) { if (plot_data.values[i] > 0) { annotations.values.push(plot_data.values[i]); annotations.labels.push(plot_data.labels[i]); annotations.text.push( " " + plot_data.labels[i] + " (" + plot_data.percentages[i] + ")", ); } } return { trace: { x: plot_data.values, y: plot_data.labels, type: "bar", orientation: "h", sort: false, textinfo: "text", hoverinfo: "none", marker: {color: "#537c5e"}, font: {family: "Source Sans Pro", size: 18, color: "#000000"}, }, trace_annotations: { x: annotations.values, y: annotations.labels, mode: "text", type: "scatter", textposition: "middle right", text: annotations.text, }, }; } const plot_data = { everyone: { cumulative: make_plot_data(data.everyone, data.end_times.length), year: make_plot_data(data.everyone, 365), month: make_plot_data(data.everyone, 30), week: make_plot_data(data.everyone, 7), }, user: { cumulative: make_plot_data(data.user, data.end_times.length), year: make_plot_data(data.user, 365), month: make_plot_data(data.user, 30), week: make_plot_data(data.user, 7), }, }; let user_button = "everyone"; let time_button; if (data.end_times.length >= 30) { time_button = "month"; $("#messages_by_client_last_month_button").addClass("selected"); } else { time_button = "cumulative"; $("#messages_by_client_cumulative_button").addClass("selected"); } if (data.end_times.length < 365) { $("#pie_messages_sent_by_client button[data-time='year']").remove(); if (data.end_times.length < 30) { $("#pie_messages_sent_by_client button[data-time='month']").remove(); if (data.end_times.length < 7) { $("#pie_messages_sent_by_client button[data-time='week']").remove(); } } } function draw_plot() { $("#id_messages_sent_by_client > div").removeClass("spinner"); const data_ = plot_data[user_button][time_button]; layout.height = layout.margin.b + data_.trace.x.length * 30; layout.xaxis.range = [0, Math.max(...data_.trace.x) * 1.3]; Plotly.newPlot( "id_messages_sent_by_client", [data_.trace, data_.trace_annotations], layout, {displayModeBar: false, staticPlot: true}, ); } draw_plot(); // Click handlers function set_user_button(button) { $("#pie_messages_sent_by_client button[data-user]").removeClass("selected"); button.addClass("selected"); } function set_time_button(button) { $("#pie_messages_sent_by_client button[data-time]").removeClass("selected"); button.addClass("selected"); } $("#pie_messages_sent_by_client button").on("click", function () { if ($(this).attr("data-user")) { set_user_button($(this)); user_button = $(this).attr("data-user"); } if ($(this).attr("data-time")) { set_time_button($(this)); time_button = $(this).attr("data-time"); } draw_plot(); }); // handle links with @href started with '#' only $(document).on("click", 'a[href^="#"]', function (e) { // target element id const id = $(this).attr("href"); // target element const $id = $(id); if ($id.length === 0) { return; } // prevent standard hash navigation (avoid blinking in IE) e.preventDefault(); const pos = $id.offset().top + $(".page-content")[0].scrollTop - 50; $(".page-content").animate({scrollTop: pos + "px"}, 500); }); } function populate_messages_sent_by_message_type(data) { const layout = { margin: {l: 90, r: 0, b: 0, t: 0}, width: 750, height: 300, font: font_14pt, }; function make_plot_data(time_series_data, num_steps) { const plot_data = compute_summary_chart_data( time_series_data, num_steps, data.display_order, ); const labels = []; for (let i = 0; i < plot_data.labels.length; i += 1) { labels.push(plot_data.labels[i] + " (" + plot_data.percentages[i] + ")"); } const total_string = plot_data.total.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); return { trace: { values: plot_data.values, labels, type: "pie", direction: "clockwise", rotation: -90, sort: false, textinfo: "text", text: plot_data.labels.map(() => ""), hoverinfo: "label+value", pull: 0.05, marker: { colors: ["#68537c", "#be6d68", "#b3b348"], }, }, total_str: i18n.t("<b>Total messages</b>: __total_messages__", { total_messages: total_string, }), }; } const plot_data = { everyone: { cumulative: make_plot_data(data.everyone, data.end_times.length), year: make_plot_data(data.everyone, 365), month: make_plot_data(data.everyone, 30), week: make_plot_data(data.everyone, 7), }, user: { cumulative: make_plot_data(data.user, data.end_times.length), year: make_plot_data(data.user, 365), month: make_plot_data(data.user, 30), week: make_plot_data(data.user, 7), }, }; let user_button = "everyone"; let time_button; if (data.end_times.length >= 30) { time_button = "month"; $("#messages_by_type_last_month_button").addClass("selected"); } else { time_button = "cumulative"; $("#messages_by_type_cumulative_button").addClass("selected"); } const totaldiv = document.querySelector("#pie_messages_sent_by_type_total"); if (data.end_times.length < 365) { $("#pie_messages_sent_by_type button[data-time='year']").remove(); if (data.end_times.length < 30) { $("#pie_messages_sent_by_type button[data-time='month']").remove(); if (data.end_times.length < 7) { $("#pie_messages_sent_by_type button[data-time='week']").remove(); } } } function draw_plot() { $("#id_messages_sent_by_message_type > div").removeClass("spinner"); Plotly.newPlot( "id_messages_sent_by_message_type", [plot_data[user_button][time_button].trace], layout, {displayModeBar: false}, ); totaldiv.innerHTML = plot_data[user_button][time_button].total_str; } draw_plot(); // Click handlers function set_user_button(button) { $("#pie_messages_sent_by_type button[data-user]").removeClass("selected"); button.addClass("selected"); } function set_time_button(button) { $("#pie_messages_sent_by_type button[data-time]").removeClass("selected"); button.addClass("selected"); } $("#pie_messages_sent_by_type button").on("click", function () { if ($(this).attr("data-user")) { set_user_button($(this)); user_button = $(this).attr("data-user"); } if ($(this).attr("data-time")) { set_time_button($(this)); time_button = $(this).attr("data-time"); } draw_plot(); }); } function populate_number_of_users(data) { const layout = { width: 750, height: 370, margin: {l: 40, r: 0, b: 65, t: 20}, xaxis: { fixedrange: true, rangeslider: {bordercolor: "#D8D8D8", borderwidth: 1}, rangeselector: { x: 0.64, y: -0.79, buttons: [ {count: 2, label: i18n.t("Last 2 months"), step: "month", stepmode: "backward"}, {count: 6, label: i18n.t("Last 6 months"), step: "month", stepmode: "backward"}, {step: "all", label: i18n.t("All time")}, ], }, }, yaxis: {fixedrange: true, rangemode: "tozero"}, font: font_14pt, }; const end_dates = data.end_times.map((timestamp) => new Date(timestamp * 1000)); const text = end_dates.map((date) => format_date(date, false)); function make_traces(values, type) { return { x: end_dates, y: values, type, name: i18n.t("Active users"), hoverinfo: "none", text, visible: true, }; } function add_hover_handler() { document.querySelector("#id_number_of_users").on("plotly_hover", (data) => { $("#users_hover_info").show(); document.querySelector("#users_hover_date").textContent = data.points[0].data.text[data.points[0].pointNumber]; const values = [null, null, null]; data.points.forEach((trace) => { values[trace.curveNumber] = trace.y; }); const hover_value_ids = [ "#users_hover_1day_value", "#users_hover_15day_value", "#users_hover_all_time_value", ]; for (const [i, value] of values.entries()) { if (value !== null) { document.querySelector(hover_value_ids[i]).style.display = "inline"; document.querySelector(hover_value_ids[i]).textContent = value; } else { document.querySelector(hover_value_ids[i]).style.display = "none"; } } }); } const _1day_trace = make_traces(data.everyone._1day, "bar"); const _15day_trace = make_traces(data.everyone._15day, "scatter"); const all_time_trace = make_traces(data.everyone.all_time, "scatter"); $("#id_number_of_users > div").removeClass("spinner"); // Redraw the plot every time for simplicity. If we have perf problems with this in the // future, we can copy the update behavior from populate_messages_sent_over_time function draw_or_update_plot(trace) { $("#1day_actives_button, #15day_actives_button, #all_time_actives_button").removeClass( "selected", ); Plotly.newPlot("id_number_of_users", [trace], layout, {displayModeBar: false}); add_hover_handler(); } $("#1day_actives_button").on("click", function () { draw_or_update_plot(_1day_trace); $(this).addClass("selected"); }); $("#15day_actives_button").on("click", function () { draw_or_update_plot(_15day_trace); $(this).addClass("selected"); }); $("#all_time_actives_button").on("click", function () { draw_or_update_plot(all_time_trace); $(this).addClass("selected"); }); // Initial drawing of plot draw_or_update_plot(all_time_trace, true); $("#all_time_actives_button").addClass("selected"); } function populate_messages_read_over_time(data) { if (data.end_times.length === 0) { // TODO: do something nicer here return; } // Helper functions function make_traces(dates, values, type, date_formatter) { const text = dates.map((date) => date_formatter(date)); const common = {x: dates, type, hoverinfo: "none", text}; return { everyone: { name: i18n.t("Everyone"), y: values.everyone, marker: {color: "#5f6ea0"}, ...common, }, me: { name: i18n.t("Me"), y: values.me, marker: {color: "#be6d68"}, ...common, }, }; } const layout = { barmode: "group", width: 750, height: 400, margin: {l: 40, r: 0, b: 40, t: 0}, xaxis: { fixedrange: true, rangeslider: {bordercolor: "#D8D8D8", borderwidth: 1}, type: "date", }, yaxis: {fixedrange: true, rangemode: "tozero"}, legend: { x: 0.62, y: 1.12, orientation: "h", font: font_14pt, }, font: font_14pt, }; function make_rangeselector(x, y, button1, button2) { return { x, y, buttons: [ {stepmode: "backward", ...button1}, {stepmode: "backward", ...button2}, {step: "all", label: i18n.t("All time")}, ], }; } // This is also the cumulative rangeselector const daily_rangeselector = make_rangeselector( 0.68, -0.62, {count: 10, label: i18n.t("Last 10 days"), step: "day"}, {count: 30, label: i18n.t("Last 30 days"), step: "day"}, ); const weekly_rangeselector = make_rangeselector( 0.656, -0.62, {count: 2, label: i18n.t("Last 2 months"), step: "month"}, {count: 6, label: i18n.t("Last 6 months"), step: "month"}, ); function add_hover_handler() { document.querySelector("#id_messages_read_over_time").on("plotly_hover", (data) => { $("#read_hover_info").show(); document.querySelector("#read_hover_date").textContent = data.points[0].data.text[data.points[0].pointNumber]; const values = [null, null]; data.points.forEach((trace) => { values[trace.curveNumber] = trace.y; }); const read_hover_text_ids = ["#read_hover_me", "#read_hover_everyone"]; const read_hover_value_ids = ["#read_hover_me_value", "#read_hover_everyone_value"]; for (const [i, value] of values.entries()) { if (value !== null) { document.querySelector(read_hover_text_ids[i]).style.display = "inline"; document.querySelector(read_hover_value_ids[i]).style.display = "inline"; document.querySelector(read_hover_value_ids[i]).textContent = value; } else { document.querySelector(read_hover_text_ids[i]).style.display = "none"; document.querySelector(read_hover_value_ids[i]).style.display = "none"; } } }); } const start_dates = data.end_times.map( (timestamp) => // data.end_times are the ends of hour long intervals. new Date(timestamp * 1000 - 60 * 60 * 1000), ); function aggregate_data(aggregation) { let start; let is_boundary; if (aggregation === "day") { start = floor_to_local_day(start_dates[0]); is_boundary = function (date) { return date.getHours() === 0; }; } else if (aggregation === "week") { start = floor_to_local_week(start_dates[0]); is_boundary = function (date) { return date.getHours() === 0 && date.getDay() === 0; }; } const dates = [start]; const values = {everyone: [], me: []}; let current = {everyone: 0, me: 0}; let i_init = 0; if (is_boundary(start_dates[0])) { current = {everyone: data.everyone.read[0], me: data.user.read[0]}; i_init = 1; } for (let i = i_init; i < start_dates.length; i += 1) { if (is_boundary(start_dates[i])) { dates.push(start_dates[i]); values.everyone.push(current.everyone); values.me.push(current.me); current = {everyone: 0, me: 0}; } current.everyone += data.everyone.read[i]; current.me += data.user.read[i]; } values.everyone.push(current.everyone); values.me.push(current.me); return { dates, values, last_value_is_partial: !is_boundary( new Date(start_dates[start_dates.length - 1].getTime() + 60 * 60 * 1000), ), }; } // Generate traces let date_formatter = function (date) { return format_date(date, true); }; let values = {me: data.user.read, everyone: data.everyone.read}; let info = aggregate_data("day"); date_formatter = function (date) { return format_date(date, false); }; const last_day_is_partial = info.last_value_is_partial; const daily_traces = make_traces(info.dates, info.values, "bar", date_formatter); info = aggregate_data("week"); date_formatter = function (date) { return i18n.t("Week of __date__", {date: format_date(date, false)}); }; const last_week_is_partial = info.last_value_is_partial; const weekly_traces = make_traces(info.dates, info.values, "bar", date_formatter); const dates = data.end_times.map((timestamp) => new Date(timestamp * 1000)); values = {everyone: partial_sums(data.everyone.read), me: partial_sums(data.user.read)}; date_formatter = function (date) { return format_date(date, true); }; const cumulative_traces = make_traces(dates, values, "scatter", date_formatter); // Functions to draw and interact with the plot // We need to redraw plot entirely if switching from (the cumulative) line // graph to any bar graph, since otherwise the rangeselector shows both (plotly bug) let clicked_cumulative = false; function draw_or_update_plot(rangeselector, traces, last_value_is_partial, initial_draw) { $("#read_daily_button, #read_weekly_button, #read_cumulative_button").removeClass( "selected", ); $("#id_messages_read_over_time > div").removeClass("spinner"); if (initial_draw) { traces.everyone.visible = true; traces.me.visible = "legendonly"; } else { const plotDiv = document.querySelector("#id_messages_read_over_time"); traces.me.visible = plotDiv.data[0].visible; traces.everyone.visible = plotDiv.data[1].visible; } layout.xaxis.rangeselector = rangeselector; if (clicked_cumulative || initial_draw) { Plotly.newPlot("id_messages_read_over_time", [traces.me, traces.everyone], layout, { displayModeBar: false, }); add_hover_handler(); } else { Plotly.deleteTraces("id_messages_read_over_time", [0, 1]); Plotly.addTraces("id_messages_read_over_time", [traces.me, traces.everyone]); Plotly.relayout("id_messages_read_over_time", layout); } $("#id_messages_read_over_time").attr("last_value_is_partial", last_value_is_partial); } // Click handlers for aggregation buttons $("#read_daily_button").on("click", function () { draw_or_update_plot(daily_rangeselector, daily_traces, last_day_is_partial, false); $(this).addClass("selected"); clicked_cumulative = false; }); $("#read_weekly_button").on("click", function () { draw_or_update_plot(weekly_rangeselector, weekly_traces, last_week_is_partial, false); $(this).addClass("selected"); clicked_cumulative = false; }); $("#read_cumulative_button").on("click", function () { clicked_cumulative = false; draw_or_update_plot(daily_rangeselector, cumulative_traces, false, false); $(this).addClass("selected"); clicked_cumulative = true; }); // Initial drawing of plot if (weekly_traces.everyone.x.length < 12) { draw_or_update_plot(daily_rangeselector, daily_traces, last_day_is_partial, true); $("#read_daily_button").addClass("selected"); } else { draw_or_update_plot(weekly_rangeselector, weekly_traces, last_week_is_partial, true); $("#read_weekly_button").addClass("selected"); } } function get_chart_data(data, callback) { $.get({ url: "/json/analytics/chart_data" + page_params.data_url_suffix, data, idempotent: true, success(data) { callback(data); update_last_full_update(data.end_times); }, error(xhr) { $("#id_stats_errors").show().text(JSON.parse(xhr.responseText).msg); }, }); } get_chart_data( {chart_name: "messages_sent_over_time", min_length: "10"}, populate_messages_sent_over_time, ); get_chart_data( {chart_name: "messages_sent_by_client", min_length: "10"}, populate_messages_sent_by_client, ); get_chart_data( {chart_name: "messages_sent_by_message_type", min_length: "10"}, populate_messages_sent_by_message_type, ); get_chart_data({chart_name: "number_of_humans", min_length: "10"}, populate_number_of_users); get_chart_data( {chart_name: "messages_read_over_time", min_length: "10"}, populate_messages_read_over_time, );
apache-2.0
ijuma/kafka
streams/src/main/java/org/apache/kafka/streams/KafkaStreams.java
31930
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.errors.InvalidStateStoreException; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KStreamBuilder; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.processor.Processor; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StreamPartitioner; import org.apache.kafka.streams.processor.TopologyBuilder; import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier; import org.apache.kafka.streams.processor.internals.GlobalStreamThread; import org.apache.kafka.streams.processor.internals.ProcessorTopology; import org.apache.kafka.streams.processor.internals.StateDirectory; import org.apache.kafka.streams.processor.internals.StreamThread; import org.apache.kafka.streams.processor.internals.StreamsKafkaClient; import org.apache.kafka.streams.processor.internals.StreamsMetadataState; import org.apache.kafka.streams.state.HostInfo; import org.apache.kafka.streams.state.QueryableStoreType; import org.apache.kafka.streams.state.StreamsMetadata; import org.apache.kafka.streams.state.internals.GlobalStateStoreProvider; import org.apache.kafka.streams.state.internals.QueryableStoreProvider; import org.apache.kafka.streams.state.internals.StateStoreProvider; import org.apache.kafka.streams.state.internals.StreamThreadStateStoreProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.utils.Utils.getHost; import static org.apache.kafka.common.utils.Utils.getPort; /** * A Kafka client that allows for performing continuous computation on input coming from one or more input topics and * sends output to zero, one, or more output topics. * <p> * The computational logic can be specified either by using the {@link TopologyBuilder} to define a DAG topology of * {@link Processor}s or by using the {@link KStreamBuilder} which provides the high-level DSL to define transformations. * <p> * One {@code KafkaStreams} instance can contain one or more threads specified in the configs for the processing work. * <p> * A {@code KafkaStreams} instance can co-ordinate with any other instances with the same * {@link StreamsConfig#APPLICATION_ID_CONFIG application ID} (whether in the same process, on other processes on this * machine, or on remote machines) as a single (possibly distributed) stream processing application. * These instances will divide up the work based on the assignment of the input topic partitions so that all partitions * are being consumed. * If instances are added or fail, all (remaining) instances will rebalance the partition assignment among themselves * to balance processing load and ensure that all input topic partitions are processed. * <p> * Internally a {@code KafkaStreams} instance contains a normal {@link KafkaProducer} and {@link KafkaConsumer} instance * that is used for reading input and writing output. * <p> * A simple example might look like this: * <pre>{@code * Map&lt;String, Object&gt; props = new HashMap&lt;&gt;(); * props.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-stream-processing-application"); * props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); * props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); * props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); * StreamsConfig config = new StreamsConfig(props); * * KStreamBuilder builder = new KStreamBuilder(); * builder.stream("my-input-topic").mapValues(value -&gt; value.length().toString()).to("my-output-topic"); * * KafkaStreams streams = new KafkaStreams(builder, config); * streams.start(); * }</pre> * * @see KStreamBuilder * @see TopologyBuilder */ @InterfaceStability.Unstable public class KafkaStreams { private static final Logger log = LoggerFactory.getLogger(KafkaStreams.class); private static final String JMX_PREFIX = "kafka.streams"; private static final int DEFAULT_CLOSE_TIMEOUT = 0; private GlobalStreamThread globalStreamThread; private final StreamThread[] threads; private final Map<Long, StreamThread.State> threadState; private final Metrics metrics; private final QueryableStoreProvider queryableStoreProvider; // processId is expected to be unique across JVMs and to be used // in userData of the subscription request to allow assignor be aware // of the co-location of stream thread's consumers. It is for internal // usage only and should not be exposed to users at all. private final UUID processId; private final StreamsMetadataState streamsMetadataState; private final StreamsConfig config; // container states /** * Kafka Streams states are the possible state that a Kafka Streams instance can be in. * An instance must only be in one state at a time. * Note this instance will be in "Rebalancing" state if any of its threads is rebalancing * The expected state transition with the following defined states is: * * <pre> * +--------------+ * +<----- | Created | * | +-----+--------+ * | | * | v * | +-----+--------+ * +<----- | Rebalancing | <----+ * | +--------------+ | * | | * | | * | +--------------+ | * +-----> | Running | ---->+ * | +-----+--------+ * | | * | v * | +-----+--------+ * +-----> | Pending | * | Shutdown | * +-----+--------+ * | * v * +-----+--------+ * | Not Running | * +--------------+ * </pre> */ public enum State { CREATED(1, 2, 3), RUNNING(2, 3), REBALANCING(1, 2, 3), PENDING_SHUTDOWN(4), NOT_RUNNING; private final Set<Integer> validTransitions = new HashSet<>(); State(final Integer... validTransitions) { this.validTransitions.addAll(Arrays.asList(validTransitions)); } public boolean isRunning() { return equals(RUNNING) || equals(REBALANCING); } public boolean isCreatedOrRunning() { return isRunning() || equals(CREATED); } public boolean isValidTransition(final State newState) { return validTransitions.contains(newState.ordinal()); } } private volatile State state = State.CREATED; private StateListener stateListener = null; /** * Listen to {@link State} change events. */ public interface StateListener { /** * Called when state changes. * * @param newState new state * @param oldState previous state */ void onChange(final State newState, final State oldState); } /** * An app can set a single {@link StateListener} so that the app is notified when state changes. * @param listener a new state listener */ public void setStateListener(final StateListener listener) { stateListener = listener; } private synchronized void setState(final State newState) { final State oldState = state; if (!state.isValidTransition(newState)) { log.warn("Unexpected state transition from {} to {}.", oldState, newState); } state = newState; if (stateListener != null) { stateListener.onChange(state, oldState); } } /** * Return the current {@link State} of this {@code KafkaStreams} instance. * * @return the currnt state of this Kafka Streams instance */ public synchronized State state() { return state; } /** * Get read-only handle on global metrics registry. * * @return Map of all metrics. */ public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(metrics.metrics()); } private class StreamStateListener implements StreamThread.StateListener { @Override public synchronized void onChange(final StreamThread thread, final StreamThread.State newState, final StreamThread.State oldState) { threadState.put(thread.getId(), newState); if (newState == StreamThread.State.PARTITIONS_REVOKED || newState == StreamThread.State.ASSIGNING_PARTITIONS) { setState(State.REBALANCING); } else if (newState == StreamThread.State.RUNNING) { for (final StreamThread.State state : threadState.values()) { if (state != StreamThread.State.RUNNING) { return; } } setState(State.RUNNING); } } } /** * Create a {@code KafkaStreams} instance. * * @param builder the processor topology builder specifying the computational logic * @param props properties for {@link StreamsConfig} */ public KafkaStreams(final TopologyBuilder builder, final Properties props) { this(builder, new StreamsConfig(props), new DefaultKafkaClientSupplier()); } /** * Create a {@code KafkaStreams} instance. * * @param builder the processor topology builder specifying the computational logic * @param config the Kafka Streams configuration */ public KafkaStreams(final TopologyBuilder builder, final StreamsConfig config) { this(builder, config, new DefaultKafkaClientSupplier()); } /** * Create a {@code KafkaStreams} instance. * * @param builder the processor topology builder specifying the computational logic * @param config the Kafka Streams configuration * @param clientSupplier the Kafka clients supplier which provides underlying producer and consumer clients * for the new {@code KafkaStreams} instance */ public KafkaStreams(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier) { // create the metrics final Time time = Time.SYSTEM; processId = UUID.randomUUID(); this.config = config; // The application ID is a required config and hence should always have value final String applicationId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG); builder.setApplicationId(applicationId); String clientId = config.getString(StreamsConfig.CLIENT_ID_CONFIG); if (clientId.length() <= 0) clientId = applicationId + "-" + processId; final List<MetricsReporter> reporters = config.getConfiguredInstances(StreamsConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); reporters.add(new JmxReporter(JMX_PREFIX)); final MetricConfig metricConfig = new MetricConfig().samples(config.getInt(StreamsConfig.METRICS_NUM_SAMPLES_CONFIG)) .recordLevel(Sensor.RecordingLevel.forName(config.getString(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG))) .timeWindow(config.getLong(StreamsConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS); metrics = new Metrics(metricConfig, reporters, time); threads = new StreamThread[config.getInt(StreamsConfig.NUM_STREAM_THREADS_CONFIG)]; threadState = new HashMap<>(threads.length); final ArrayList<StateStoreProvider> storeProviders = new ArrayList<>(); streamsMetadataState = new StreamsMetadataState(builder, parseHostInfo(config.getString(StreamsConfig.APPLICATION_SERVER_CONFIG))); final ProcessorTopology globalTaskTopology = builder.buildGlobalStateTopology(); if (config.getLong(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) < 0) { log.warn("Negative cache size passed in. Reverting to cache size of 0 bytes."); } final long cacheSizeBytes = Math.max(0, config.getLong(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) / (config.getInt(StreamsConfig.NUM_STREAM_THREADS_CONFIG) + (globalTaskTopology == null ? 0 : 1))); if (globalTaskTopology != null) { globalStreamThread = new GlobalStreamThread(globalTaskTopology, config, clientSupplier.getRestoreConsumer(config.getRestoreConsumerConfigs(clientId + "-global")), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time), metrics, time, clientId); } for (int i = 0; i < threads.length; i++) { threads[i] = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, metrics, time, streamsMetadataState, cacheSizeBytes); threads[i].setStateListener(new StreamStateListener()); threadState.put(threads[i].getId(), threads[i].state()); storeProviders.add(new StreamThreadStateStoreProvider(threads[i])); } final GlobalStateStoreProvider globalStateStoreProvider = new GlobalStateStoreProvider(builder.globalStateStores()); queryableStoreProvider = new QueryableStoreProvider(storeProviders, globalStateStoreProvider); } private static HostInfo parseHostInfo(final String endPoint) { if (endPoint == null || endPoint.trim().isEmpty()) { return StreamsMetadataState.UNKNOWN_HOST; } final String host = getHost(endPoint); final Integer port = getPort(endPoint); if (host == null || port == null) { throw new ConfigException(String.format("Error parsing host address %s. Expected format host:port.", endPoint)); } return new HostInfo(host, port); } /** * Check if the used brokers have version 0.10.1.x or higher. * <p> * Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry * until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}. * * @throws StreamsException if brokers have version 0.10.0.x */ private void checkBrokerVersionCompatibility() throws StreamsException { final StreamsKafkaClient client = new StreamsKafkaClient(config); client.checkBrokerCompatibility(); try { client.close(); } catch (final IOException e) { log.warn("Could not close StreamKafkaClient.", e); } } /** * Start the {@code KafkaStreams} instance by starting all its threads. * <p> * Note, for brokers with version {@code 0.9.x} or lower, the broker version cannot be checked. * There will be no error and the client will hang and retry to verify the broker version until it * {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}. * @throws IllegalStateException if process was already started * @throws StreamsException if the Kafka brokers have version 0.10.0.x */ public synchronized void start() throws IllegalStateException, StreamsException { log.debug("Starting Kafka Stream process."); if (state == State.CREATED) { checkBrokerVersionCompatibility(); setState(State.RUNNING); if (globalStreamThread != null) { globalStreamThread.start(); } for (final StreamThread thread : threads) { thread.start(); } log.info("Started Kafka Stream process"); } else { throw new IllegalStateException("Cannot start again."); } } /** * Shutdown this {@code KafkaStreams} instance by signaling all the threads to stop, and then wait for them to join. * This will block until all threads have stopped. */ public void close() { close(DEFAULT_CLOSE_TIMEOUT, TimeUnit.SECONDS); } /** * Shutdown this {@code KafkaStreams} by signaling all the threads to stop, and then wait up to the timeout for the * threads to join. * A {@code timeout} of 0 means to wait forever. * * @param timeout how long to wait for the threads to shutdown * @param timeUnit unit of time used for timeout * @return {@code true} if all threads were successfully stopped&mdash;{@code false} if the timeout was reached * before all threads stopped */ public synchronized boolean close(final long timeout, final TimeUnit timeUnit) { log.debug("Stopping Kafka Stream process."); if (state.isCreatedOrRunning()) { setState(State.PENDING_SHUTDOWN); // save the current thread so that if it is a stream thread // we don't attempt to join it and cause a deadlock final Thread shutdown = new Thread(new Runnable() { @Override public void run() { // signal the threads to stop and wait for (final StreamThread thread : threads) { // avoid deadlocks by stopping any further state reports // from the thread since we're shutting down thread.setStateListener(null); thread.close(); } if (globalStreamThread != null) { globalStreamThread.close(); if (!globalStreamThread.stillRunning()) { try { globalStreamThread.join(); } catch (final InterruptedException e) { Thread.interrupted(); } } } for (final StreamThread thread : threads) { try { if (!thread.stillRunning()) { thread.join(); } } catch (final InterruptedException ex) { Thread.interrupted(); } } metrics.close(); log.info("Stopped Kafka Streams process."); } }, "kafka-streams-close-thread"); shutdown.setDaemon(true); shutdown.start(); try { shutdown.join(TimeUnit.MILLISECONDS.convert(timeout, timeUnit)); } catch (final InterruptedException e) { Thread.interrupted(); } setState(State.NOT_RUNNING); return !shutdown.isAlive(); } return true; } /** * Produce a string representation containing useful information about this {@code KafkaStream} instance such as * thread IDs, task IDs, and a representation of the topology DAG including {@link StateStore}s (cf. * {@link TopologyBuilder} and {@link KStreamBuilder}). * * @return A string representation of the Kafka Streams instance. */ @Override public String toString() { return toString(""); } /** * Produce a string representation containing useful information about this {@code KafkaStream} instance such as * thread IDs, task IDs, and a representation of the topology DAG including {@link StateStore}s (cf. * {@link TopologyBuilder} and {@link KStreamBuilder}). * * @param indent the top-level indent for each line * @return A string representation of the Kafka Streams instance. */ public String toString(final String indent) { final StringBuilder sb = new StringBuilder() .append(indent) .append("KafkaStreams processID: ") .append(processId) .append("\n"); for (final StreamThread thread : threads) { sb.append(thread.toString(indent + "\t")); } sb.append("\n"); return sb.toString(); } /** * Do a clean up of the local {@link StateStore} directory ({@link StreamsConfig#STATE_DIR_CONFIG}) by deleting all * data with regard to the {@link StreamsConfig#APPLICATION_ID_CONFIG application ID}. * <p> * May only be called either before this {@code KafkaStreams} instance is {@link #start() started} or after the * instance is {@link #close() closed}. * <p> * Calling this method triggers a restore of local {@link StateStore}s on the next {@link #start() application start}. * * @throws IllegalStateException if the instance is currently running */ public void cleanUp() { if (state.isRunning()) { throw new IllegalStateException("Cannot clean up while running."); } final String appId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG); final String stateDir = config.getString(StreamsConfig.STATE_DIR_CONFIG); final String localApplicationDir = stateDir + File.separator + appId; log.debug("Removing local Kafka Streams application data in {} for application {}.", localApplicationDir, appId); final StateDirectory stateDirectory = new StateDirectory(appId, stateDir, Time.SYSTEM); stateDirectory.cleanRemovedTasks(0); } /** * Set the handler invoked when a {@link StreamsConfig#NUM_STREAM_THREADS_CONFIG internal thread} abruptly * terminates due to an uncaught exception. * * @param eh the uncaught exception handler for all internal threads; {@code null} deletes the current handler */ public void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh) { for (final StreamThread thread : threads) { thread.setUncaughtExceptionHandler(eh); } if (globalStreamThread != null) { globalStreamThread.setUncaughtExceptionHandler(eh); } } /** * Find all currently running {@code KafkaStreams} instances (potentially remotely) that use the same * {@link StreamsConfig#APPLICATION_ID_CONFIG application ID} as this instance (i.e., all instances that belong to * the same Kafka Streams application) and return {@link StreamsMetadata} for each discovered instance. * <p> * Note: this is a point in time view and it may change due to partition reassignment. * * @return {@link StreamsMetadata} for each {@code KafkaStreams} instances of this application */ public Collection<StreamsMetadata> allMetadata() { validateIsRunning(); return streamsMetadataState.getAllMetadata(); } /** * Find all currently running {@code KafkaStreams} instances (potentially remotely) that * <ul> * <li>use the same {@link StreamsConfig#APPLICATION_ID_CONFIG application ID} as this instance (i.e., all * instances that belong to the same Kafka Streams application)</li> * <li>and that contain a {@link StateStore} with the given {@code storeName}</li> * </ul> * and return {@link StreamsMetadata} for each discovered instance. * <p> * Note: this is a point in time view and it may change due to partition reassignment. * * @param storeName the {@code storeName} to find metadata for * @return {@link StreamsMetadata} for each {@code KafkaStreams} instances with the provide {@code storeName} of * this application */ public Collection<StreamsMetadata> allMetadataForStore(final String storeName) { validateIsRunning(); return streamsMetadataState.getAllMetadataForStore(storeName); } /** * Find the currently running {@code KafkaStreams} instance (potentially remotely) that * <ul> * <li>use the same {@link StreamsConfig#APPLICATION_ID_CONFIG application ID} as this instance (i.e., all * instances that belong to the same Kafka Streams application)</li> * <li>and that contain a {@link StateStore} with the given {@code storeName}</li> * <li>and the {@link StateStore} contains the given {@code key}</li> * </ul> * and return {@link StreamsMetadata} for it. * <p> * This will use the default Kafka Streams partitioner to locate the partition. * If a {@link StreamPartitioner custom partitioner} has been * {@link ProducerConfig#PARTITIONER_CLASS_CONFIG configured} via {@link StreamsConfig}, * {@link KStream#through(StreamPartitioner, String)}, or {@link KTable#through(StreamPartitioner, String, String)}, * or if the original {@link KTable}'s input {@link KStreamBuilder#table(String, String) topic} is partitioned * differently, please use {@link #metadataForKey(String, Object, StreamPartitioner)}. * <p> * Note: * <ul> * <li>this is a point in time view and it may change due to partition reassignment</li> * <li>the key may not exist in the {@link StateStore}; this method provides a way of finding which host it * <em>would</em> exist on</li> * <li>if this is for a window store the serializer should be the serializer for the record key, * not the window serializer</li> * </ul> * * @param storeName the {@code storeName} to find metadata for * @param key the key to find metadata for * @param keySerializer serializer for the key * @param <K> key type * @return {@link StreamsMetadata} for the {@code KafkaStreams} instance with the provide {@code storeName} and * {@code key} of this application or {@link StreamsMetadata#NOT_AVAILABLE} if Kafka Streams is (re-)initializing */ public <K> StreamsMetadata metadataForKey(final String storeName, final K key, final Serializer<K> keySerializer) { validateIsRunning(); return streamsMetadataState.getMetadataWithKey(storeName, key, keySerializer); } /** * Find the currently running {@code KafkaStreams} instance (potentially remotely) that * <ul> * <li>use the same {@link StreamsConfig#APPLICATION_ID_CONFIG application ID} as this instance (i.e., all * instances that belong to the same Kafka Streams application)</li> * <li>and that contain a {@link StateStore} with the given {@code storeName}</li> * <li>and the {@link StateStore} contains the given {@code key}</li> * </ul> * and return {@link StreamsMetadata} for it. * <p> * Note: * <ul> * <li>this is a point in time view and it may change due to partition reassignment</li> * <li>the key may not exist in the {@link StateStore}; this method provides a way of finding which host it * <em>would</em> exist on</li> * </ul> * * @param storeName the {@code storeName} to find metadata for * @param key the key to find metadata for * @param partitioner the partitioner to be use to locate the host for the key * @param <K> key type * @return {@link StreamsMetadata} for the {@code KafkaStreams} instance with the provide {@code storeName} and * {@code key} of this application or {@link StreamsMetadata#NOT_AVAILABLE} if Kafka Streams is (re-)initializing */ public <K> StreamsMetadata metadataForKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner) { validateIsRunning(); return streamsMetadataState.getMetadataWithKey(storeName, key, partitioner); } /** * Get a facade wrapping the local {@link StateStore} instances with the provided {@code storeName} if the Store's * type is accepted by the provided {@link QueryableStoreType#accepts(StateStore) queryableStoreType}. * The returned object can be used to query the {@link StateStore} instances. * * @param storeName name of the store to find * @param queryableStoreType accept only stores that are accepted by {@link QueryableStoreType#accepts(StateStore)} * @param <T> return type * @return A facade wrapping the local {@link StateStore} instances * @throws InvalidStateStoreException if Kafka Streams is (re-)initializing or a store with {@code storeName} and * {@code queryableStoreType} doesnt' exist */ public <T> T store(final String storeName, final QueryableStoreType<T> queryableStoreType) { validateIsRunning(); return queryableStoreProvider.getStore(storeName, queryableStoreType); } private void validateIsRunning() { if (!state.isRunning()) { throw new IllegalStateException("KafkaStreams is not running. State is " + state + "."); } } }
apache-2.0
apparentlymart/camlistore-debian
pkg/blobserver/localdisk/localdisk.go
3920
/* Copyright 2011 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package localdisk registers the "filesystem" blobserver storage type, storing blobs in a forest of sharded directories at the specified root. Example low-level config: "/storage/": { "handler": "storage-filesystem", "handlerArgs": { "path": "/var/camlistore/blobs" } }, */ package localdisk import ( "fmt" "io" "os" "path/filepath" "sync" "camlistore.org/pkg/blob" "camlistore.org/pkg/blobserver" "camlistore.org/pkg/blobserver/local" "camlistore.org/pkg/jsonconfig" "camlistore.org/pkg/osutil" "camlistore.org/pkg/types" ) // DiskStorage implements the blobserver.Storage interface using the // local filesystem. type DiskStorage struct { root string // dirLockMu must be held for writing when deleting an empty directory // and for read when receiving blobs. dirLockMu *sync.RWMutex // gen will be nil if partition != "" gen *local.Generationer } // IsDir reports whether root is a localdisk (file-per-blob) storage directory. func IsDir(root string) (bool, error) { if osutil.DirExists(filepath.Join(root, blob.RefFromString("").HashName())) { return true, nil } return false, nil } // New returns a new local disk storage implementation at the provided // root directory, which must already exist. func New(root string) (*DiskStorage, error) { // Local disk. fi, err := os.Stat(root) if os.IsNotExist(err) { return nil, fmt.Errorf("Storage root %q doesn't exist", root) } if err != nil { return nil, fmt.Errorf("Failed to stat directory %q: %v", root, err) } if !fi.IsDir() { return nil, fmt.Errorf("Storage root %q exists but is not a directory.", root) } ds := &DiskStorage{ root: root, dirLockMu: new(sync.RWMutex), gen: local.NewGenerationer(root), } if err := ds.migrate3to2(); err != nil { return nil, fmt.Errorf("Error updating localdisk format: %v", err) } if _, _, err := ds.StorageGeneration(); err != nil { return nil, fmt.Errorf("Error initialization generation for %q: %v", root, err) } return ds, nil } func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { path := config.RequiredString("path") if err := config.Validate(); err != nil { return nil, err } return New(path) } func init() { blobserver.RegisterStorageConstructor("filesystem", blobserver.StorageConstructor(newFromConfig)) } func (ds *DiskStorage) tryRemoveDir(dir string) { ds.dirLockMu.Lock() defer ds.dirLockMu.Unlock() os.Remove(dir) // ignore error } func (ds *DiskStorage) FetchStreaming(blob blob.Ref) (io.ReadCloser, uint32, error) { return ds.Fetch(blob) } func (ds *DiskStorage) Fetch(blob blob.Ref) (types.ReadSeekCloser, uint32, error) { fileName := ds.blobPath(blob) stat, err := os.Stat(fileName) if os.IsNotExist(err) { return nil, 0, os.ErrNotExist } size := types.U32(stat.Size()) file, err := os.Open(fileName) if err != nil { if os.IsNotExist(err) { err = os.ErrNotExist } return nil, 0, err } return file, size, nil } func (ds *DiskStorage) RemoveBlobs(blobs []blob.Ref) error { for _, blob := range blobs { fileName := ds.blobPath(blob) err := os.Remove(fileName) switch { case err == nil: continue case os.IsNotExist(err): // deleting already-deleted file; harmless. continue default: return err } } return nil }
apache-2.0
apache/incubator-asterixdb
hyracks-fullstack/hyracks/hyracks-dataflow-std/src/main/java/org/apache/hyracks/dataflow/std/buffermanager/IPartitionedTupleBufferManager.java
4853
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hyracks.dataflow.std.buffermanager; import org.apache.hyracks.api.comm.IFrameTupleAccessor; import org.apache.hyracks.api.comm.IFrameWriter; import org.apache.hyracks.api.dataflow.value.RecordDescriptor; import org.apache.hyracks.api.exceptions.HyracksDataException; import org.apache.hyracks.dataflow.std.structures.TuplePointer; public interface IPartitionedTupleBufferManager { int getNumPartitions(); int getNumTuples(int partition); int getPhysicalSize(int partition); /** * Insert tuple from (byte[] byteArray,int[] fieldEndOffsets, int start, int size) into * specified partition. The handle is written into the tuplepointer. * <br> * If {@code byteArray} contains the {@code fieldEndOffsets} already, then please set the {@code fieldEndOffsets} as NULL * * @param partition * the id of the partition to insert the tuple into * @param byteArray * the byteArray which contains the tuple * @param fieldEndOffsets * the fieldEndOffsets which comes from the ArrayTupleBuilder, please set it to NULL if the {@code byteArray} already contains the fieldEndOffsets * @param start * the start offset in the {@code byteArray} * @param size * the size of the tuple * @param pointer * the returned pointer indicating the handler inside this buffer manager * @return a boolean value to indicate if the insertion succeed or not */ boolean insertTuple(int partition, byte[] byteArray, int[] fieldEndOffsets, int start, int size, TuplePointer pointer) throws HyracksDataException; /** * Insert tuple {@code tupleId} from the {@code tupleAccessor} into the given partition. * The returned handle is written into the tuplepointer * * @param partition * the id of the partition to insert the tuple * @param tupleAccessor * the FrameTupleAccessor storage * @param tupleId * the id of the tuple from the tupleAccessor * @param pointer * the returned pointer indicating the handler to later fetch the tuple from the buffer maanager * @return true if the insertion succeed. Otherwise return false. * @throws HyracksDataException */ boolean insertTuple(int partition, IFrameTupleAccessor tupleAccessor, int tupleId, TuplePointer pointer) throws HyracksDataException; /** * Cancels the effect of last insertTuple() operation. i.e. undoes the last insertTuple() operation. */ void cancelInsertTuple(int partition) throws HyracksDataException; /** * Sets the constrain. * @param constrain * the constrain to be set. */ void setConstrain(IPartitionedMemoryConstrain constrain); /** * Reset to the initial states. The previous allocated resources won't be released in order to be used in the next round. * * @throws HyracksDataException */ void reset() throws HyracksDataException; /** * Close the managers which will explicitly release all the allocated resources. */ void close(); ITuplePointerAccessor getTuplePointerAccessor(RecordDescriptor recordDescriptor); /** * Flush the particular partition {@code pid} to {@code writer}. * This partition will not be cleared. * Currently it is used by Join where we flush the inner partition to the join (as a frameWriter), * but we will still keep the inner for the next outer partition. * * @param pid * @param writer * @throws HyracksDataException */ void flushPartition(int pid, IFrameWriter writer) throws HyracksDataException; /** * Clear the memory occupation of the particular partition. * * @param partition * @throws HyracksDataException */ void clearPartition(int partition) throws HyracksDataException; IPartitionedMemoryConstrain getConstrain(); }
apache-2.0
goose3/goose3
docs/source/conf.py
5445
# -*- coding: utf-8 -*- # # goose3 documentation build configuration file, created by # sphinx-quickstart on Sat Jan 6 10:10:16 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../../')) sys.path.append(os.path.abspath('_themes')) import goose3 import goose3.version as gver # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.todo' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'goose3' copyright = '2018, Mahmoud Lababidi' author = 'Mahmoud Lababidi' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = gver.__version__ # The full version, including alpha/beta/rc tags. release = gver.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = 'custom_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} html_theme_path = ['_themes'] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'goose3doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'goose3.tex', u'goose3 Documentation', u'maintainers', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'goose3', u'goose3 Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'goose3', u'goose3 Documentation', author, 'goose3', 'One line description of project.', 'Miscellaneous'), ]
apache-2.0
ruks/carbon-apimgt
components/apimgt/org.wso2.carbon.apimgt.gateway/src/test/java/org/wso2/carbon/apimgt/gateway/handlers/throttling/ThrottleHandlerTest.java
35510
/* * Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * * */ package org.wso2.carbon.apimgt.gateway.handlers.throttling; import org.apache.synapse.MessageContext; import org.apache.synapse.api.ApiConstants; import org.apache.synapse.commons.throttle.core.AccessInformation; import org.apache.synapse.commons.throttle.core.ThrottleException; import org.apache.synapse.core.SynapseEnvironment; import org.apache.synapse.core.axis2.Axis2MessageContext; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.wso2.carbon.apimgt.api.dto.ConditionGroupDTO; import org.wso2.carbon.apimgt.gateway.APIMgtGatewayConstants; import org.wso2.carbon.apimgt.common.gateway.extensionlistener.ExtensionListener; import org.wso2.carbon.apimgt.gateway.TestUtils; import org.wso2.carbon.apimgt.gateway.handlers.security.AuthenticationContext; import org.wso2.carbon.apimgt.gateway.internal.ServiceReferenceHolder; import org.wso2.carbon.apimgt.gateway.throttling.ThrottleDataHolder; import org.wso2.carbon.apimgt.gateway.throttling.publisher.ThrottleDataPublisher; import org.wso2.carbon.apimgt.impl.APIConstants; import org.wso2.carbon.apimgt.impl.APIManagerConfiguration; import org.wso2.carbon.apimgt.impl.APIManagerConfigurationService; import org.wso2.carbon.apimgt.impl.dto.VerbInfoDTO; import org.wso2.carbon.metrics.manager.Timer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Test cases for for ThrottleHandler. */ @RunWith(PowerMockRunner.class) @PrepareForTest({org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder.class}) public class ThrottleHandlerTest { private Timer timer; private Timer.Context context; private ThrottleConditionEvaluator throttleEvaluator; private AccessInformation accessInformation; private ConditionGroupDTO conditionGroupDTO; private ConditionGroupDTO[] conditionGroupDTOs; private List<VerbInfoDTO> verbInfoDTO; private VerbInfoDTO verbInfo; private String resourceLevelThrottleKey; private String apiLevelThrottleKey; private String apiContext = "weatherAPI"; private String apiVersion = "v1"; private String httpVerb = "GET"; private String resourceUri = "/foo"; private String throttlingTier = "50KPerMin"; private static final String RESPONSE = "RESPONSE"; private static final String API_AUTH_CONTEXT = "__API_AUTH_CONTEXT"; private static final String VERB_INFO_DTO = "VERB_INFO"; private static final String blockedUserWithTenantDomain = "blockedUser@carbon.super"; private static final String userWithTenantDomain = "user@carbon.super"; private static final String blockedUserWithOutTenantDomain = "blockedUser"; private Map<String, ExtensionListener> extensionListenerMap = new HashMap<>(); @Before public void init() { timer = Mockito.mock(Timer.class); timer = Mockito.mock(Timer.class); context = Mockito.mock(Timer.Context.class); throttleEvaluator = Mockito.mock(ThrottleConditionEvaluator.class); accessInformation = Mockito.mock(AccessInformation.class); Mockito.when(timer.start()).thenReturn(context); verbInfoDTO = new ArrayList<>(); verbInfo = new VerbInfoDTO(); verbInfo.setHttpVerb(httpVerb); verbInfo.setRequestKey(apiContext + "/" + apiVersion + resourceUri + ":" + httpVerb); verbInfo.setThrottling(throttlingTier); verbInfoDTO.add(verbInfo); conditionGroupDTO = new ConditionGroupDTO(); conditionGroupDTO.setConditionGroupId("_default"); conditionGroupDTOs = new ConditionGroupDTO[1]; conditionGroupDTOs[0] = conditionGroupDTO; apiLevelThrottleKey = apiContext + ":" + apiVersion; resourceLevelThrottleKey = apiContext + "/" + apiVersion + resourceUri + ":" + httpVerb; org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder serviceReferenceHolder = Mockito.mock(org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder.class); PowerMockito.mockStatic(org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder.class); Mockito.when(org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder .getInstance()).thenReturn(serviceReferenceHolder); APIManagerConfigurationService apiManagerConfigurationService = Mockito.mock(APIManagerConfigurationService .class); APIManagerConfiguration apiManagerConfiguration = Mockito.mock(APIManagerConfiguration.class); Mockito.when(serviceReferenceHolder.getAPIManagerConfigurationService()).thenReturn (apiManagerConfigurationService); Mockito.when(apiManagerConfigurationService.getAPIManagerConfiguration()).thenReturn(apiManagerConfiguration); Mockito.when(apiManagerConfiguration.getExtensionListenerMap()).thenReturn(extensionListenerMap); } @Test public void testDoNotThrottleWhenMsgIsAResponseAndAuthCtxNotAvailable() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContext(apiContext, apiVersion); messageContext.setProperty(RESPONSE, "true"); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testSubscriptionLevelThrottlingInitWhenThrottleCtxIsNull() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(RESPONSE, "true"); //Test subscription level throttle context initialisation when throttle holder is null Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testSubscriptionLevelThrottlingInitialization() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(RESPONSE, "true"); //Test subscription level throttle context initialisation when throttle holder is null Assert.assertTrue(throttleHandler.handleRequest(messageContext)); //Test subscription level throttle context initialisation when throttle holder is already initialized by first //request Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenBlockingConditionsAreSatisfied() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); throttleDataHolder.addIpBlockingCondition("carbon.super", 1, "{\"fixedIp\":\"127.0.0.1\",\"invert\":false}", APIConstants.BLOCKING_CONDITIONS_IP); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); // Mockito.when(throttleDataHolder.isRequestBlocked(apiContext, authenticationContext // .getSubscriber() + ":" + authenticationContext.getApplicationName(), authenticationContext // .getUsername(), "carbon.super" + ":" + "127.0.0.1")).thenReturn(true); Assert.assertFalse(throttleHandler.handleRequest(messageContext)); throttleDataHolder.removeIpBlockingCondition("carbon.super", 1); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleContinueWhenAPITierIsNotAvailable() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); //Make sure that the tier info is not available in the message context Assert.assertNull((VerbInfoDTO) messageContext.getProperty(VERB_INFO_DTO)); //Should continue the message flow if the message context does not have throttling tier information Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgDoContinueWhenAllThrottlingLevelsAreNotThrolled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ServiceReferenceHolder.getInstance().setThrottleDataPublisher(new ThrottleDataPublisher()); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); //Should continue the message flow if API level, application level, resource level, subscription level, //subscription spike level and hard throttling limit levels are not throttled Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgDoThrottleWhenUserLevelThrottlingIsTriggerred() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); verbInfo.setApplicableLevel("userLevel"); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); //Should continue the message flow, when user level throttling is triggered and not exceeded Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenAPILevelIsThrottled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); //Set conditional group verbInfo.setConditionGroups(conditionGroupDTOs); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String combinedResourceLevelThrottleKey = apiLevelThrottleKey + conditionGroupDTO.getConditionGroupId(); throttleDataHolder.addThrottledAPIKey(apiLevelThrottleKey, System.currentTimeMillis() + 10000); throttleDataHolder.addThrottleData(combinedResourceLevelThrottleKey, System.currentTimeMillis() + 10000); Mockito.when(throttleEvaluator.getApplicableConditions(messageContext, authenticationContext, conditionGroupDTOs)).thenReturn(matchingConditions); //Should throttle out and discontinue message flow, when api level is throttled out Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenResourceLevelIsThrottled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier("Unlimited"); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String combinedResourceLevelThrottleKey = resourceLevelThrottleKey + conditionGroupDTO.getConditionGroupId(); throttleDataHolder.addThrottledAPIKey(resourceLevelThrottleKey, System.currentTimeMillis() + 10000); throttleDataHolder.addThrottleData(combinedResourceLevelThrottleKey, System.currentTimeMillis() + 10000); Mockito.when(throttleEvaluator.getApplicableConditions(messageContext, authenticationContext, conditionGroupDTOs)).thenReturn(matchingConditions); //Should throttle out and discontinue message flow, when resource level is throttled out Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenSubscriptionLevelIsThrottledAndStopOnQuotaReachIsEnabled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setStopOnQuotaReach(true); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String subscriptionLevelThrottleKey = authenticationContext.getApplicationId() + ":" + apiContext + ":" + apiVersion + ":" + authenticationContext.getTier(); throttleDataHolder.addThrottleData(subscriptionLevelThrottleKey, System.currentTimeMillis() + 10000); //Should throttle out and discontinue message flow, when subscription level is throttled out //and stop on quota reach is enabled Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgContinueWhenSubscriptionLevelIsThrottledAndStopOnQuotaReachIsDisabled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); //Set stopOnQuota authenticationContext.setStopOnQuotaReach(false); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String subscriptionLevelThrottleKey = authenticationContext.getApplicationId() + ":" + apiContext + ":" + apiVersion; //Set subscription level throttled out throttleDataHolder.addThrottleData(subscriptionLevelThrottleKey, System.currentTimeMillis() + 10000); //Though subscription level is throttled out, should continue the message flow, if stop on quota reach is //disabled Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenApplicationLevelIsThrottled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String applicationLevelThrottleKey = authenticationContext.getApplicationId() + ":" + authenticationContext .getUsername()+ "@" + throttleHandler.getTenantDomain(); //Set application level throttled out throttleDataHolder.addThrottleData(applicationLevelThrottleKey, System.currentTimeMillis() + 10000); //Should discontinue message flow, when application level is throttled Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenProductionHardThrottlingLimitsThrottled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setProductionMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setStopOnQuotaReach(false); authenticationContext.setKeyType("PRODUCTION"); authenticationContext.setSpikeArrestLimit(0); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); Mockito.when(accessInformation.isAccessAllowed()).thenReturn(false); //Should discontinue message flow if PRODUCTION hard throttling limits are exceeded Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenSandBoxHardThrottlingLimitsThrottled() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setSandboxMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setStopOnQuotaReach(false); authenticationContext.setKeyType("SANDBOX"); authenticationContext.setSpikeArrestLimit(0); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); String subscriptionLevelThrottleKey = authenticationContext.getApplicationId() + ":" + apiContext + ":" + apiVersion; String applicationLevelThrottleKey = authenticationContext.getApplicationId() + ":" + authenticationContext .getUsername()+ "@" + throttleHandler.getTenantDomain(); String combinedResourceLevelThrottleKey = resourceLevelThrottleKey + conditionGroupDTO.getConditionGroupId(); // Mockito.when(throttleDataHolder.isThrottled(combinedResourceLevelThrottleKey)).thenReturn(false); // Mockito.when(throttleDataHolder.isThrottled(subscriptionLevelThrottleKey)).thenReturn(false); // Mockito.when(throttleDataHolder.isThrottled(applicationLevelThrottleKey)).thenReturn(false); // Mockito.when(throttleDataHolder.isKeyTemplatesPresent()).thenReturn(false); // Mockito.when(accessInformation.isAccessAllowed()).thenReturn(false); //Should discontinue message flow if SANDBOX hard throttling limits are exceeded Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenHardThrottlingFailedWithThrottleException() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setProductionMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setKeyType("SANDBOX"); authenticationContext.setSpikeArrestLimit(0); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); //Throw ThrottleException while retrieving access information Mockito.doThrow(ThrottleException.class).when(accessInformation).isAccessAllowed(); //Should discontinue message flow, when an exception is thrown during hard limit throttling information //process time Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenCustomThrottlingLimitExceeded() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setProductionMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setSpikeArrestLimit(0); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); Mockito.when(accessInformation.isAccessAllowed()).thenReturn(false); matchingConditions.add(conditionGroupDTO); throttleDataHolder.addKeyTemplate("$user", "$user"); throttleDataHolder.addKeyTemplate("testKeyTemplate", "testKeyTemplateValue"); throttleDataHolder.addThrottleData("testKeyTemplate", System.currentTimeMillis() + 10000); Assert.assertFalse(throttleHandler.handleRequest(messageContext)); throttleDataHolder.removeKeyTemplate("testKeyTemplate"); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWhenHittingSubscriptionLevelSpike() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setSandboxMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setKeyType("SANDBOX"); authenticationContext.setSpikeArrestLimit(100); authenticationContext.setStopOnQuotaReach(true); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); matchingConditions.add(conditionGroupDTO); throttleDataHolder.addKeyTemplate("$user", "$user"); Mockito.when(accessInformation.isAccessAllowed()).thenReturn(false); Assert.assertFalse(throttleHandler.handleRequest(messageContext)); } @Test public void testHandleResponse() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); Assert.assertTrue(throttleHandler.handleResponse(messageContext)); } @Test public void testCheckForStaledThrottleData() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ServiceReferenceHolder.getInstance().setThrottleDataPublisher(new ThrottleDataPublisher()); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator, accessInformation); throttleHandler.setProductionMaxCount("100"); SynapseEnvironment synapseEnvironment = Mockito.mock(SynapseEnvironment.class); throttleHandler.init(synapseEnvironment); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); authenticationContext.setApiTier(throttlingTier); authenticationContext.setSpikeArrestLimit(0); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); Mockito.when(accessInformation.isAccessAllowed()).thenReturn(false); matchingConditions.add(conditionGroupDTO); throttleDataHolder.addKeyTemplate("testKeyTemplate", "testKeyTemplateValue"); throttleDataHolder.addThrottleData("testKeyTemplate", System.currentTimeMillis() - 10000); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } @Test public void testMsgThrottleOutWithUserBlockingConditions() { ThrottleDataHolder throttleDataHolder = new ThrottleDataHolder(); ThrottleHandler throttleHandler = new ThrottlingHandlerWrapper(timer, throttleDataHolder, throttleEvaluator); MessageContext messageContext = TestUtils.getMessageContextWithAuthContext(apiContext, apiVersion); messageContext.setProperty(VERB_INFO_DTO, verbInfoDTO); ((Axis2MessageContext) messageContext).getAxis2MessageContext().getProperty(org.apache.axis2.context .MessageContext.TRANSPORT_HEADERS); AuthenticationContext authenticationContext = (AuthenticationContext) messageContext.getProperty (API_AUTH_CONTEXT); verbInfo.setConditionGroups(conditionGroupDTOs); ArrayList<ConditionGroupDTO> matchingConditions = new ArrayList<>(); // Adding a user blocking condition throttleDataHolder.addUserBlockingCondition(blockedUserWithTenantDomain, blockedUserWithTenantDomain); matchingConditions.add(conditionGroupDTO); authenticationContext.setApiTier("Unlimited"); // When a blocked user is invoking authenticationContext.setUsername(blockedUserWithTenantDomain); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); throttleDataHolder.addThrottledAPIKey(resourceLevelThrottleKey, System.currentTimeMillis() + 10000); Assert.assertFalse(throttleHandler.handleRequest(messageContext)); // When an unblocked user is invoking authenticationContext.setUsername(userWithTenantDomain); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); throttleDataHolder.addThrottledAPIKey(resourceLevelThrottleKey, System.currentTimeMillis() + 10000); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); // When a blocked user without tenant domain in the username is invoking authenticationContext.setUsername(blockedUserWithOutTenantDomain); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); throttleDataHolder.addThrottledAPIKey(resourceLevelThrottleKey, System.currentTimeMillis() + 10000); Assert.assertFalse(throttleHandler.handleRequest(messageContext)); // Remove the user block condition and use blocked user to invoke throttleDataHolder.removeUserBlockingCondition(blockedUserWithTenantDomain); authenticationContext.setUsername(blockedUserWithTenantDomain); messageContext.setProperty(API_AUTH_CONTEXT, authenticationContext); throttleDataHolder.addThrottledAPIKey(resourceLevelThrottleKey, System.currentTimeMillis() + 10000); Assert.assertTrue(throttleHandler.handleRequest(messageContext)); } }
apache-2.0
hardfish/justTest
cdmi/src/test/java/org/jclouds/snia/cdmi/v1/features/DataApiExpectTest.java
1067
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.snia.cdmi.v1.features; import org.jclouds.snia.cdmi.v1.internal.BaseCDMIApiExpectTest; import org.testng.annotations.Test; @Test(groups = "unit", testName = "DataAsyncApiTest") public class DataApiExpectTest extends BaseCDMIApiExpectTest { }
apache-2.0
iheartradio/fastly-sqs
src/main/scala/com/iheart/sqs/DBUtils.scala
3785
package com.iheart.sqs import java.text.SimpleDateFormat import java.time.{LocalDateTime, ZoneId} import java.util.Date import scala.concurrent.duration._ import com.iheart.sqs.Utils._ import org.mapdb.{DBMaker, Serializer} import play.Logger import scala.collection.JavaConverters._ import java.util.concurrent.Executors import scala.concurrent._ import java.util.concurrent.atomic._ import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.Serialization.write import play.api.libs.ws.ning.NingWSClient object DBUtils { implicit val formats = DefaultFormats val format = new SimpleDateFormat("dd/MM/yyyy HH:mm:ss") val dbFile = DBMaker.memoryDB().make() val dbHash = dbFile.hashMap("msgMap").keySerializer(Serializer.STRING).valueSerializer(Serializer.LONG).createOrOpen() val zoneId = ZoneId.of("America/New_York") val ddApiKey = conf.getString("datadog.apiKey") val ddAppKey = conf.getString("datadog.appKey") val ddHostname = java.net.InetAddress.getLocalHost.getHostName val ddHostTag = "hostname:" + ddHostname val ddUrl = "https://app.datadoghq.com/api/v1/series?api_key=" + ddApiKey //Lets create our own thread pool val numWorkers = sys.runtime.availableProcessors val pool = Executors.newFixedThreadPool(numWorkers) implicit val ec = ExecutionContext.fromExecutorService(pool) val wsClient = NingWSClient() private val s3Counter = new AtomicInteger() private val newRelicCounter = new AtomicInteger() case class DDEntry(metric: String, points: Seq[(Long,Int)], metricType: String, tags: Seq[String]) def incrS3Counter = { s3Counter.incrementAndGet() } def decrS3Counter = { s3Counter.decrementAndGet() } def incrNewRelicCounter(i: Int) = newRelicCounter.addAndGet(i) def storeHostname(hostname: String) = { val now = LocalDateTime.now().atZone(zoneId).toEpochSecond dbHash.put(hostname,now) } private def submitToDD(metrics: Seq[DDEntry]) = { val json = "series" -> metrics.map { m => ("metric" -> m.metric) ~ ("type" -> m.metricType) ~ ("tags" -> m.tags) ~ ("points" -> m.points.map(tuple => Seq(JInt(tuple._1), JDouble(tuple._2)))) } Logger.debug("Sending json : " + write(json)) wsClient.url(ddUrl) .withHeaders(("Content-Type", "application/json")) .withRequestTimeout(2000) .post(write(json)).map { response => if (response.status >= 400 ) { Logger.error("Invalid Status Code from DataDog: " + response.status.toString + " Error: " + response.body) } } } def startDataDogTimer() = { Logger.info("Starting datadog timer...") Future { blocking { while (true) { val nrCount = newRelicCounter.getAndSet(0) val s3Count = s3Counter.getAndSet(0) val threads = Thread.activeCount() val now = LocalDateTime.now().atZone(zoneId).toEpochSecond //sendToDatadog val m1 = DDEntry("fastlyinsights.s3Count",Seq((now,s3Count)),"guage",Seq(ddHostTag)) val m2 = DDEntry("fastlyinsights.newRelicCount",Seq((now,nrCount)),"guage",Seq(ddHostTag)) val m3 = DDEntry("fastlyinsights.jvmThreadCount",Seq((now,threads)),"guage",Seq(ddHostTag)) submitToDD(Seq(m1,m2,m3)) Thread.sleep(10000) } } } } def startTimer() = { Logger.info("Starting DB timer") Future { blocking { while (true) { Logger.info("****************************") dbHash.getKeys.asScala.foreach { key => val date = new Date(dbHash.get(key) * 1000) Logger.info(key + " -> " + date.toString) } Logger.info("****************************") Thread.sleep(30000) } } } } }
apache-2.0
SebastianKersten/Mimoto
src/userinterface/MimotoCMS/ComponentController.php
1994
<?php // classpath namespace Mimoto\UserInterface\MimotoCMS; // Silex classes use Mimoto\Data\MimotoDataUtils; use Mimoto\Mimoto; use Mimoto\Core\CoreConfig; // Symfony classes use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\JsonResponse; // Silex classes use Silex\Application; /** * ComponentController * * @author Sebastian Kersten (@supertaboo) */ class ComponentController { /** * View component overview * @return string The rendered html output */ public function viewComponentOverview() { // 1. init page $page = Mimoto::service('output')->createPage($eRoot = Mimoto::service('data')->get(CoreConfig::MIMOTO_ROOT, CoreConfig::MIMOTO_ROOT)); // 2. create and connect content $page->addComponent('content', Mimoto::service('output')->createComponent('MimotoCMS_components_ComponentOverview', $eRoot)); // 3. setup page $page->setVar('pageTitle', array( (object) array( "label" => 'Components, layouts and input elements', "url" => '/mimoto.cms/components' ) ) ); // 4. output return $page->render(); } public function componentView(Application $app, $nComponentId) { // 1. init popup $page = Mimoto::service('output')->createPage(Mimoto::service('data')->get(CoreConfig::MIMOTO_ROOT, CoreConfig::MIMOTO_ROOT)); // 2. load data $eComponent = Mimoto::service('data')->get(CoreConfig::MIMOTO_COMPONENT, $nComponentId); // 3. validate data if (empty($eComponent)) return $app->redirect("/mimoto.cms/entities"); // 4. create content $component = Mimoto::service('output')->createComponent('MimotoCMS_components_ComponentDetail', $eComponent); // 6. connect $page->addComponent('content', $component); // 7. output return $page->render(); } }
apache-2.0
achristianson/nifi-minifi-cpp
nanofi/tests/CTailFileDelimitedTests.cpp
8412
/** * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "catch.hpp" #include "CTestsBase.h" /**** * ################################################################## * CTAILFILE DELIMITED TESTS * ################################################################## */ TEST_CASE("Test tailfile delimited. Empty file", "[tailfileDelimitedEmptyFileTest]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; //Create empty file FileManager fm(file); auto pp = invoke_processor(mgr, file); //Test that no flowfiles were created REQUIRE(pp != NULL); REQUIRE(pp->ff_list == NULL); } TEST_CASE("Test tailfile delimited. File has less than 4096 chars", "[tailfileDelimitedLessThan4096Chars]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; FileManager fm(file); fm.WriteNChars(34, 'a'); fm.CloseStream(); auto pp = invoke_processor(mgr, file); //No flow files will be created REQUIRE(pp != NULL); REQUIRE(pp->ff_list != NULL); REQUIRE(flow_files_size(pp->ff_list) == 1); REQUIRE(pp->ff_list->complete == 0); //Test that the current offset in the file is 34 REQUIRE(pp->curr_offset == 34); } TEST_CASE("Test tailfile delimited. Simple test", "[tailfileDelimitedSimpleTest]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; //Write 8192 bytes to the file FileManager fm(file); fm.WriteNChars(34, 'a'); fm.WriteNChars(1, ';'); fm.WriteNChars(6, 'b'); fm.WriteNChars(1, ';'); fm.CloseStream(); auto pp = invoke_processor(mgr, file); //Test that two flow file records were created REQUIRE(pp != NULL); REQUIRE(pp->ff_list != NULL); REQUIRE(pp->ff_list->ff_record != NULL); REQUIRE(flow_files_size(pp->ff_list) == 2); //Test that the current offset in the file is 42 bytes REQUIRE(pp->curr_offset == 42); //Test the flow file sizes const char * flowfile1_path = pp->ff_list->ff_record->contentLocation; const char * flowfile2_path = pp->ff_list->next->ff_record->contentLocation; struct stat fstat; stat(flowfile1_path, &fstat); REQUIRE(fstat.st_size == 34); stat(flowfile2_path, &fstat); REQUIRE(fstat.st_size == 6); REQUIRE(pp->ff_list->complete == 1); REQUIRE(pp->ff_list->next->complete == 1); } TEST_CASE("Test tailfile delimited. trailing non delimited string", "[tailfileNonDelimitedTest]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; //Write 8192 bytes to the file FileManager fm(file); fm.WriteNChars(34, 'a'); fm.WriteNChars(1, ';'); fm.WriteNChars(32, 'b'); fm.CloseStream(); auto pp = invoke_processor(mgr, file); //Test that two flow file records were created REQUIRE(pp != NULL); REQUIRE(pp->ff_list != NULL); REQUIRE(pp->ff_list->ff_record != NULL); REQUIRE(flow_files_size(pp->ff_list) == 2); //Test that the current offset in the file is 35 bytes REQUIRE(pp->curr_offset == 67); REQUIRE(pp->ff_list->complete == 1); REQUIRE(pp->ff_list->next->complete == 0); struct stat fstat; stat(pp->ff_list->ff_record->contentLocation, &fstat); REQUIRE(fstat.st_size == 34); //Append a delimiter at the end of the file fm.OpenStream(); fm.WriteNChars(1, ';'); fm.CloseStream(); pp = invoke_processor(mgr, file); REQUIRE(pp != NULL); REQUIRE(flow_files_size(pp->ff_list) == 2); stat(pp->ff_list->next->ff_record->contentLocation, &fstat); REQUIRE(fstat.st_size == 32); REQUIRE(pp->ff_list->next->complete == 1); } TEST_CASE("Test tailfile delimited 4096 chars non delimited", "[tailfileDelimitedSimpleTest]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; //Write 4096 bytes to the file FileManager fm(file); fm.WriteNChars(4096, 'a'); fm.CloseStream(); auto pp = invoke_processor(mgr, file); REQUIRE(pp != NULL); REQUIRE(pp->ff_list != NULL); REQUIRE(flow_files_size(pp->ff_list) == 1); REQUIRE(pp->ff_list->complete == 0); //Test that the current offset in the file is 4096 bytes REQUIRE(pp->curr_offset == 4096); //Write another 2048 characters fm.OpenStream(); fm.WriteNChars(2048, 'b'); fm.CloseStream(); pp = invoke_processor(mgr, file); REQUIRE(pp->ff_list != NULL); REQUIRE(flow_files_size(pp->ff_list) == 1); REQUIRE(pp->ff_list->complete == 0); //Test that the current offset in the file is (4096 + 2048) REQUIRE(pp->curr_offset == 6144); //Write another 2048 characters fm.OpenStream(); fm.WriteNChars(2048, 'c'); fm.CloseStream(); pp = invoke_processor(mgr, file); REQUIRE(pp->ff_list != NULL); REQUIRE(flow_files_size(pp->ff_list) == 1); //Test that the current offset in the file is 8192 bytes only REQUIRE(pp->curr_offset == 8192); //Write a delimiter at the end and expect a flow file size of 8192 bytes fm.OpenStream(); fm.WriteNChars(1, ';'); fm.CloseStream(); pp = invoke_processor(mgr, file); REQUIRE(pp->ff_list != NULL); REQUIRE(pp->ff_list->ff_record != NULL); REQUIRE(flow_files_size(pp->ff_list) == 1); REQUIRE(pp->ff_list->complete == 1); const char * flowfile_path = pp->ff_list->ff_record->contentLocation; struct stat fstat; stat(flowfile_path, &fstat); REQUIRE(fstat.st_size == 8192); } TEST_CASE("Test tailfile delimited. string starting with delimiter", "[tailfileDelimiterStartStringTest]") { TestControllerWithTemporaryWorkingDirectory test_controller; TailFileTestResourceManager mgr("TailFileDelimited", on_trigger_tailfiledelimited); const char * file = "./e.txt"; const char * delimiter = ";"; //Write 8192 bytes to the file FileManager fm(file); fm.WriteNChars(5, ';'); fm.WriteNChars(34, 'a'); fm.WriteNChars(4, ';'); fm.WriteNChars(32, 'b'); fm.CloseStream(); auto pp = invoke_processor(mgr, file); //Test that two flow file records were created REQUIRE(pp != NULL); REQUIRE(pp->ff_list != NULL); REQUIRE(pp->ff_list->ff_record != NULL); REQUIRE(flow_files_size(pp->ff_list) == 2); //Test that the current offset in the file is 35 bytes REQUIRE(pp->curr_offset == 75); REQUIRE(pp->ff_list->complete == 1); REQUIRE(pp->ff_list->next->complete == 0); struct stat fstat; stat(pp->ff_list->ff_record->contentLocation, &fstat); REQUIRE(fstat.st_size == 34); //Append a delimiter at the end of the file fm.OpenStream(); fm.WriteNChars(1, ';'); fm.CloseStream(); pp = invoke_processor(mgr, file); REQUIRE(pp != NULL); REQUIRE(flow_files_size(pp->ff_list) == 2); stat(pp->ff_list->next->ff_record->contentLocation, &fstat); REQUIRE(fstat.st_size == 32); REQUIRE(pp->ff_list->next->complete == 1); }
apache-2.0
omegaga/peloton
tests/concurrency/transaction_test.cpp
7562
//===----------------------------------------------------------------------===// // // Peloton // // transaction_test.cpp // // Identification: tests/concurrency/transaction_test.cpp // // Copyright (c) 2015-16, Carnegie Mellon University Database Group // //===----------------------------------------------------------------------===// #include "harness.h" #include "concurrency/transaction_tests_util.h" namespace peloton { namespace test { //===--------------------------------------------------------------------===// // Transaction Tests //===--------------------------------------------------------------------===// class TransactionTests : public PelotonTest {}; static std::vector<ConcurrencyType> TEST_TYPES = { CONCURRENCY_TYPE_OPTIMISTIC, CONCURRENCY_TYPE_PESSIMISTIC, CONCURRENCY_TYPE_SSI, CONCURRENCY_TYPE_SPECULATIVE_READ, CONCURRENCY_TYPE_EAGER_WRITE, CONCURRENCY_TYPE_TO }; void TransactionTest(concurrency::TransactionManager *txn_manager) { uint64_t thread_id = TestingHarness::GetInstance().GetThreadId(); for (oid_t txn_itr = 1; txn_itr <= 50; txn_itr++) { txn_manager->BeginTransaction(); if (thread_id % 2 == 0) { std::chrono::microseconds sleep_time(1); std::this_thread::sleep_for(sleep_time); } if (txn_itr % 25 != 0) { txn_manager->CommitTransaction(); } else { txn_manager->AbortTransaction(); } } } TEST_F(TransactionTests, TransactionTest) { for (auto test_type : TEST_TYPES) { concurrency::TransactionManagerFactory::Configure(test_type); auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); LaunchParallelTest(8, TransactionTest, &txn_manager); LOG_INFO("next Commit Id :: %lu", txn_manager.GetNextCommitId()); } } TEST_F(TransactionTests, SingleTransactionTest) { for (auto test_type : TEST_TYPES) { concurrency::TransactionManagerFactory::Configure(test_type); auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); std::unique_ptr<storage::DataTable> table( TransactionTestsUtil::CreateTable()); // read, read, read, read, update, read, read not exist // another txn read { TransactionScheduler scheduler(2, table.get(), &txn_manager); scheduler.Txn(0).Read(0); scheduler.Txn(0).Read(0); scheduler.Txn(0).Read(0); scheduler.Txn(0).Read(0); scheduler.Txn(0).Update(0, 1); scheduler.Txn(0).Read(0); scheduler.Txn(0).Read(100); scheduler.Txn(0).Commit(); scheduler.Txn(1).Read(0); scheduler.Txn(1).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[0].txn_result); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[1].txn_result); EXPECT_EQ(0, scheduler.schedules[0].results[0]); EXPECT_EQ(0, scheduler.schedules[0].results[1]); EXPECT_EQ(0, scheduler.schedules[0].results[2]); EXPECT_EQ(0, scheduler.schedules[0].results[3]); EXPECT_EQ(1, scheduler.schedules[0].results[4]); EXPECT_EQ(-1, scheduler.schedules[0].results[5]); EXPECT_EQ(1, scheduler.schedules[1].results[0]); } // update, update, update, update, read { TransactionScheduler scheduler(1, table.get(), &txn_manager); scheduler.Txn(0).Update(0, 1); scheduler.Txn(0).Update(0, 2); scheduler.Txn(0).Update(0, 3); scheduler.Txn(0).Update(0, 4); scheduler.Txn(0).Read(0); scheduler.Txn(0).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[0].txn_result); EXPECT_EQ(4, scheduler.schedules[0].results[0]); } // delete not exist, delete exist, read deleted, update deleted, // read deleted, insert back, update inserted, read newly updated, // delete inserted, read deleted { TransactionScheduler scheduler(1, table.get(), &txn_manager); scheduler.Txn(0).Delete(100); scheduler.Txn(0).Delete(0); scheduler.Txn(0).Read(0); scheduler.Txn(0).Update(0, 1); scheduler.Txn(0).Read(0); scheduler.Txn(0).Insert(0, 2); scheduler.Txn(0).Update(0, 3); scheduler.Txn(0).Read(0); scheduler.Txn(0).Delete(0); scheduler.Txn(0).Read(0); scheduler.Txn(0).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[0].txn_result); EXPECT_EQ(-1, scheduler.schedules[0].results[0]); EXPECT_EQ(-1, scheduler.schedules[0].results[1]); EXPECT_EQ(3, scheduler.schedules[0].results[2]); EXPECT_EQ(-1, scheduler.schedules[0].results[3]); LOG_INFO("FINISH THIS"); } // insert, delete inserted, read deleted, insert again, delete again // read deleted, insert again, read inserted, update inserted, read updated { TransactionScheduler scheduler(1, table.get(), &txn_manager); scheduler.Txn(0).Insert(1000, 0); scheduler.Txn(0).Delete(1000); scheduler.Txn(0).Read(1000); scheduler.Txn(0).Insert(1000, 1); scheduler.Txn(0).Delete(1000); scheduler.Txn(0).Read(1000); scheduler.Txn(0).Insert(1000, 2); scheduler.Txn(0).Read(1000); scheduler.Txn(0).Update(1000, 3); scheduler.Txn(0).Read(1000); scheduler.Txn(0).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[0].txn_result); EXPECT_EQ(-1, scheduler.schedules[0].results[0]); EXPECT_EQ(-1, scheduler.schedules[0].results[1]); EXPECT_EQ(2, scheduler.schedules[0].results[2]); EXPECT_EQ(3, scheduler.schedules[0].results[3]); } // Deadlock detection test for eager write // T0: R0 W0 C0 // T1: R1 W1 C1 if (concurrency::TransactionManagerFactory::GetProtocol() == CONCURRENCY_TYPE_EAGER_WRITE) { TransactionScheduler scheduler(2, table.get(), &txn_manager); scheduler.Txn(0).Read(2); scheduler.Txn(1).Read(3); scheduler.Txn(0).Update(3,1); scheduler.Txn(1).Update(2,2); scheduler.Txn(0).Commit(); scheduler.Txn(1).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[1].txn_result); EXPECT_EQ(RESULT_ABORTED, scheduler.schedules[0].txn_result); } } } TEST_F(TransactionTests, AbortTest) { for (auto test_type : TEST_TYPES) { concurrency::TransactionManagerFactory::Configure(test_type); auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); std::unique_ptr<storage::DataTable> table( TransactionTestsUtil::CreateTable()); { TransactionScheduler scheduler(2, table.get(), &txn_manager); scheduler.Txn(0).Update(0, 100); scheduler.Txn(0).Abort(); scheduler.Txn(1).Read(0); scheduler.Txn(1).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_ABORTED, scheduler.schedules[0].txn_result); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[1].txn_result); //printf("==========result=%d\n", int(scheduler.schedules[1].results[0])); EXPECT_EQ(0, scheduler.schedules[1].results[0]); } { TransactionScheduler scheduler(2, table.get(), &txn_manager); scheduler.Txn(0).Insert(100, 0); scheduler.Txn(0).Abort(); scheduler.Txn(1).Read(100); scheduler.Txn(1).Commit(); scheduler.Run(); EXPECT_EQ(RESULT_ABORTED, scheduler.schedules[0].txn_result); EXPECT_EQ(RESULT_SUCCESS, scheduler.schedules[1].txn_result); EXPECT_EQ(-1, scheduler.schedules[1].results[0]); } } } } // End test namespace } // End peloton namespace
apache-2.0
MyRobotLab/pyrobotlab
home/brotherbrown831/inmoov/bots/gestures/eyeslooking.py
579
def eyeslooking(data): for y in range(0, 5): if (data == "can i have your attention"): i01.mouth.speak("ok you have my attention") stopit() if (data == "inmoov"): stopit() x = (random.randint(1, 6)) if x == 1: i01.head.eyeX.moveTo(80) if x == 2: i01.head.eyeY.moveTo(80) if x == 3: eyesdown() if x == 4: eyesupp() if x == 5: eyesleft() if x == 6: eyesright() sleep(0.5) eyesfront()
apache-2.0
hangxin1940/elasticsearch-cn-out-of-box
plugins/HQ/js/view/document/DocumentListView.js
7449
/* Copyright 2013 Roy Russo Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Latest Builds: https://github.com/royrusso/elasticsearch-HQ */ var DocumentListView = Backbone.View.extend({ currentPage:1, maxPages:0, pageFrom:0, // defines the offset from the first result you want to fetch pageSize:0, columnArray:undefined, resultsModel:undefined, requestBody:undefined, resultBody:undefined, render:function () { this.pageSize = this.model.get('queryObj').size; var _this = this; var requestBodyObject = QueryUtil.buildBody(this.model, this.pageFrom); _this.requestBody = JSON.stringify(requestBodyObject, undefined, 2); var searchRequest = $.ajax({ url:this.model.getInstanceURL(), type:"POST", data:JSON.stringify(requestBodyObject) }); searchRequest.success(function (data, textStatus, jqXHR) { var queryResultsModel = new QueryResultsListModel(); queryResultsModel.responseTime = data.took; queryResultsModel.timeOut = data.timed_out; _this.resultBody = JSON.stringify(data, undefined, 2); if (data.hits && data.hits.hits.length > 0) { // Columns... _this.columnArray = [ {key:"_index", name:"Index"}, {key:"_type", name:"Type"}, {key:"_score", name:"Score"}, {key:"_id", name:"ID"} ]; if (!_.isEmpty(_this.model.get('queryObj').fields)) { var fieldsArray = _this.model.get('queryObj').fields; _.each(fieldsArray, function (field) { var col = {key:field, name:uppercaseFirst(field), type:"source" }; _this.columnArray.push(col); }); } // add columns for type data based on indices selected /* var selectedIndices = _this.model.indicesArray; var clusterState = cluster.get("clusterState").toJSON(); var allIndices = clusterState.metadata.indices; for (var $i = 0; $i < selectedIndices.length; $i++) { if (selectedIndices[$i] in allIndices) { var foundIndex = allIndices[selectedIndices[$i]]; var mappingTypeKeys = _.keys(foundIndex.mappings); var mappingTypeVals = _.values(foundIndex.mappings); if (mappingTypeKeys != undefined) { for (var $j = 0; $j < mappingTypeKeys.length; $j++) { if (mappingTypeVals[$j] != undefined) { var prop = mappingTypeVals[$j].properties; if (prop != undefined) { var tempTypes = _.keys(prop); for (var $k = 0; $k < tempTypes.length; $k++) { var col = {key:tempTypes[$k], name:uppercaseFirst(tempTypes[$k]), type:"source" }; var found = false; for (var $m = 0; $m < _this.columnArray.length; $m++) { if (_this.columnArray[$m].key == tempTypes[$k]) { found = true; break; } } if (!found) { _this.columnArray.push(col); } } } } } } } }*/ // Results... queryResultsModel.totalHits = data.hits.total; queryResultsModel.maxScore = data.hits.max_score; // loop results.. // 1/ move _source items to root of the result tree for easier looping in the ui. // 2/ create JSON representation of the _source object. queryResultsModel.results = []; _.each(data.hits.hits, function (item) { var result = {}; result = item; result._raw = JSON.stringify(item, undefined, 2); jQuery.extend(result, item.fields); // merge _source items in to root level of object. result.fields = undefined; // dont need this object nested in here. queryResultsModel.results.push(result); }); _this.resultsModel = queryResultsModel; // calc max pages if (_this.maxPages === 0) // only do this once { _this.maxPages = Math.floor(((queryResultsModel.totalHits - 1) / _this.pageSize) + 1); } } else { queryResultsModel.totalHits = 0; queryResultsModel.maxScore = 0; _this.resultsModel = queryResultsModel; } }); searchRequest.error(function (jqXHR, textStatus, errorThrown) { }); searchRequest.complete(function () { var tpl = _.template(queryTemplate.results); $('#searchResults').html(tpl({ columns:_this.columnArray, requestBody:_this.requestBody, results:_this.resultsModel, resultBody:_this.resultBody, currentPage:_this.currentPage, pageSize:_this.pageSize, maxPages:_this.maxPages })); // for view of row-level json data $(".itemjsoncl").click(function () { $("#itemraw").val($(this).data('id')); var htmlStr = _this.resultsModel.results[$(this).data('id')]._raw; $("#itemraw").text(htmlStr); prettyPrint(); }); $("[rel=tipRight]").tooltip(); _this.calcPager(); // Adding a button for clearing results in Query search // https://github.com/royrusso/elasticsearch-HQ/issues/189 _this.clearResults(); prettyPrint(); return this; }); }, pageNext:function () { this.pageFrom = this.pageFrom + this.pageSize; this.currentPage++; }, pagePrev:function () { if (this.pageFrom !== 0) { this.pageFrom = this.pageFrom - this.pageSize; this.currentPage--; } }, calcPager:function () { var _this = this; // pagination bindings don't seem to work on using backbone event binding, so ... $("#loadNext").click(function () { _this.pageNext(); _this.render(); }); $("#loadPrev").click(function () { _this.pagePrev(); _this.render(); }); }, clearResults:function () { var _this = this; $('#clearResults').click(function () { $('#searchResults').empty(); }); } });
apache-2.0
nssales/Strata
modules/collect/src/main/java/com/opengamma/strata/collect/io/PropertySet.java
7405
/** * Copyright (C) 2014 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.strata.collect.io; import java.util.Map; import java.util.Map.Entry; import com.google.common.base.MoreObjects; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ListMultimap; import com.google.common.collect.Multimap; import com.opengamma.strata.collect.ArgChecker; /** * A map of key-value properties. * <p> * This class represents a map of key to value. * Multiple values may be associated with each key. * <p> * This class is generally created by reading an INI or properties file. * See {@link IniFile} and {@link PropertiesFile}. */ public final class PropertySet { // this class is common between IniFile and PropertiesFile /** * The empty instance. */ private static final PropertySet EMPTY = new PropertySet(ImmutableListMultimap.of()); /** * The key-value pairs. */ private final ImmutableListMultimap<String, String> keyValueMap; //------------------------------------------------------------------------- /** * Obtains an empty property set. * <p> * The result contains no properties. * * @return an empty property set */ public static PropertySet empty() { return EMPTY; } /** * Obtains an instance from a map. * <p> * The returned instance will have one value for each key. * * @param keyValues the key-values to create the instance with * @return the property set */ public static PropertySet of(Map<String, String> keyValues) { ArgChecker.notNull(keyValues, "keyValues"); ImmutableListMultimap.Builder<String, String> builder = ImmutableListMultimap.builder(); for (Entry<String, String> entry : keyValues.entrySet()) { builder.put(entry); } return new PropertySet(builder.build()); } /** * Obtains an instance from a map allowing for multiple values for each key. * <p> * The returned instance may have more than one value for each key. * * @param keyValues the key-values to create the instance with * @return the property set */ public static PropertySet of(Multimap<String, String> keyValues) { ArgChecker.notNull(keyValues, "keyValues"); return new PropertySet(ImmutableListMultimap.copyOf(keyValues)); } //------------------------------------------------------------------------- /** * Restricted constructor. * * @param keyValues the key-value pairs */ private PropertySet(ImmutableListMultimap<String, String> keyValues) { this.keyValueMap = keyValues; } //------------------------------------------------------------------------- /** * Returns the set of keys of this property set. * <p> * The iteration order of the map matches that of the input data. * * @return the set of keys */ public ImmutableSet<String> keys() { return ImmutableSet.copyOf(keyValueMap.keySet()); } /** * Returns the property set as a multimap. * <p> * The iteration order of the map matches that of the input data. * * @return the key-value map */ public ImmutableListMultimap<String, String> asMultimap() { return keyValueMap; } /** * Returns the property set as a map, throwing an exception if any key has multiple values. * <p> * The iteration order of the map matches that of the input data. * * @return the key-value map */ public ImmutableMap<String, String> asMap() { ImmutableMap.Builder<String, String> builder = ImmutableMap.builder(); for (String key : keys()) { builder.put(key, value(key)); } return builder.build(); } //------------------------------------------------------------------------- /** * Checks if this property set is empty. * * @return true if the set is empty */ public boolean isEmpty() { return keyValueMap.isEmpty(); } /** * Checks if this property set contains the specified key. * * @param key the key name * @return true if the key exists */ public boolean contains(String key) { ArgChecker.notNull(key, "key"); return keyValueMap.containsKey(key); } /** * Gets a single value from this property set. * <p> * This returns the value associated with the specified key. * If more than one value, or no value, is associated with the key an exception is thrown. * * @param key the key name * @return the value * @throws IllegalArgumentException if the key does not exist, or if more than one value is associated */ public String value(String key) { ArgChecker.notNull(key, "key"); ImmutableList<String> values = keyValueMap.get(key); if (values.size() == 0) { throw new IllegalArgumentException("Unknown key: " + key); } if (values.size() > 1) { throw new IllegalArgumentException("Multiple values for key: " + key); } return values.get(0); } /** * Gets the list of values associated with the specified key. * <p> * A key-values instance may contain multiple values for each key. * This method returns that list of values. * The iteration order of the map matches that of the input data. * The returned list may be empty. * * @param key the key name * @return the list of values associated with the key */ public ImmutableList<String> valueList(String key) { ArgChecker.notNull(key, "key"); return MoreObjects.firstNonNull(keyValueMap.get(key), ImmutableList.<String>of()); } //------------------------------------------------------------------------- /** * Combines this property set with another. * <p> * The specified property set takes precedence. * * @param other the other property set * @return the combined property set */ public PropertySet combinedWith(PropertySet other) { ArgChecker.notNull(other, "other"); if (other.isEmpty()) { return this; } if (isEmpty()) { return other; } ListMultimap<String, String> map = ArrayListMultimap.create(keyValueMap); for (String key : other.asMultimap().keySet()) { map.removeAll(key); map.putAll(key, other.valueList(key)); } return new PropertySet(ImmutableListMultimap.copyOf(map)); } //------------------------------------------------------------------------- /** * Checks if this property set equals another. * <p> * The comparison checks the content. * * @param obj the other section, null returns false * @return true if equal */ @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof PropertySet) { return keyValueMap.equals(((PropertySet) obj).keyValueMap); } return false; } /** * Returns a suitable hash code for the property set. * * @return the hash code */ @Override public int hashCode() { return keyValueMap.hashCode(); } /** * Returns a string describing the property set. * * @return the descriptive string */ @Override public String toString() { return keyValueMap.toString(); } }
apache-2.0
houjinyun/hjy
FileUploaderAndDownloader/src/main/java/com/hjy/http/upload/uploader/BaseUploader.java
458
package com.hjy.http.upload.uploader; import com.hjy.http.upload.FileUploadInfo; import com.hjy.http.upload.listener.OnFileTransferredListener; import java.io.IOException; /** * Created by hjy on 7/9/15.<br> */ public abstract class BaseUploader { public abstract String upload(FileUploadInfo fileUploadInfo, OnFileTransferredListener fileTransferredListener) throws IOException; public abstract void cancel(FileUploadInfo fileUploadInfo); }
apache-2.0
tqchen/tvm
python/tvm/micro/__init__.py
1292
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """MicroTVM module for bare-metal backends""" from .artifact import Artifact from .build import build_static_runtime, default_options, TVM_ROOT_DIR from .build import CRT_ROOT_DIR, Workspace from .compiler import Compiler, DefaultCompiler, Flasher from .debugger import GdbRemoteDebugger from .micro_library import MicroLibrary from .micro_binary import MicroBinary from .session import create_local_graph_runtime, Session from .transport import TransportLogger, DebugWrapperTransport, SubprocessTransport
apache-2.0
ARM-software/astc-encoder
Source/Fuzzers/fuzz_astc_physical_to_symbolic.cpp
3026
// SPDX-License-Identifier: Apache-2.0 // ---------------------------------------------------------------------------- // Copyright 2020-2021 Arm Limited // // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy // of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. // ---------------------------------------------------------------------------- /** * @brief Fuzz target for physical_to_symbolic(). * * This function is the first entrypoint for decompressing a 16 byte block of * input ASTC data from disk. The 16 bytes can contain arbitrary data; they * are read from an external source, but the block size used must be a valid * ASTC block footprint. */ #include "astcenc_internal.h" #include <fuzzer/FuzzedDataProvider.h> #include <array> #include <vector> struct BlockSizes { int x; int y; int z; }; std::array<BlockSizes, 3> testSz {{ { 4, 4, 1}, // Highest bitrate {12, 12, 1}, // Largest 2D block {6, 6, 6} // Largest 3D block }}; std::array<block_size_descriptor, 3> testBSD; /** * @brief Utility function to create all of the block size descriptors needed. * * This is triggered once via a static initializer. * * Triggering once is important so that we only create a single BSD per block * size we need, rather than one per fuzzer iteration (it's expensive). This * improves fuzzer throughput by ~ 1000x! * * Triggering via a static initializer, rather than a lazy init in the fuzzer * function, is important because is means that the BSD is allocated before * fuzzing starts. This means that leaksanitizer will ignore the fact that we * "leak" the dynamic allocations inside the BSD (we never call term()). */ bool bsd_initializer() { for (int i = 0; i < testSz.size(); i++) { init_block_size_descriptor( testSz[i].x, testSz[i].y, testSz[i].z, false, 1.0f, testBSD[i]); } return true; } extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { // Preinitialize the block size descriptors we need static bool init = bsd_initializer(); // Must have 4 (select block size) and 16 (payload) bytes if (size < 4 + 16) { return 0; } FuzzedDataProvider stream(data, size); // Select a block size to test int i = stream.ConsumeIntegralInRange<int>(0, testSz.size() - 1); // Populate the physical block physical_compressed_block pcb; std::vector<uint8_t> buffer = stream.ConsumeBytes<uint8_t>(16); std::memcpy(&pcb, buffer.data(), 16); // Call the function under test symbolic_compressed_block scb; physical_to_symbolic(testBSD[i], pcb, scb); return 0; }
apache-2.0
noddi/druid
processing/src/main/java/io/druid/segment/incremental/IncrementalIndexAdapter.java
10373
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.segment.incremental; import com.google.common.base.Function; import com.google.common.collect.Iterators; import com.google.common.collect.Maps; import io.druid.collections.bitmap.BitmapFactory; import io.druid.collections.bitmap.MutableBitmap; import io.druid.java.util.common.logger.Logger; import io.druid.query.monomorphicprocessing.RuntimeShapeInspector; import io.druid.segment.DimensionHandler; import io.druid.segment.DimensionIndexer; import io.druid.segment.IndexableAdapter; import io.druid.segment.IntIteratorUtils; import io.druid.segment.Metadata; import io.druid.segment.Rowboat; import io.druid.segment.column.ColumnCapabilities; import io.druid.segment.data.EmptyIndexedInts; import io.druid.segment.data.Indexed; import io.druid.segment.data.IndexedInts; import io.druid.segment.data.ListIndexed; import it.unimi.dsi.fastutil.ints.IntIterator; import org.joda.time.Interval; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Map; /** */ public class IncrementalIndexAdapter implements IndexableAdapter { private static final Logger log = new Logger(IncrementalIndexAdapter.class); private final Interval dataInterval; private final IncrementalIndex<?> index; private final Map<String, DimensionAccessor> accessors; private static class DimensionAccessor { private final IncrementalIndex.DimensionDesc dimensionDesc; private final MutableBitmap[] invertedIndexes; private final DimensionIndexer indexer; public DimensionAccessor(IncrementalIndex.DimensionDesc dimensionDesc) { this.dimensionDesc = dimensionDesc; this.indexer = dimensionDesc.getIndexer(); if(dimensionDesc.getCapabilities().hasBitmapIndexes()) { this.invertedIndexes = new MutableBitmap[indexer.getCardinality() + 1]; } else { this.invertedIndexes = null; } } } public IncrementalIndexAdapter( Interval dataInterval, IncrementalIndex<?> index, BitmapFactory bitmapFactory ) { this.dataInterval = dataInterval; this.index = index; /* Sometimes it's hard to tell whether one dimension contains a null value or not. * If one dimension had show a null or empty value explicitly, then yes, it contains * null value. But if one dimension's values are all non-null, it still early to say * this dimension does not contain null value. Consider a two row case, first row had * "dimA=1" and "dimB=2", the second row only had "dimA=3". To dimB, its value are "2" and * never showed a null or empty value. But when we combines these two rows, dimB is null * in row 2. So we should iterate all rows to determine whether one dimension contains * a null value. */ final List<IncrementalIndex.DimensionDesc> dimensions = index.getDimensions(); accessors = Maps.newHashMapWithExpectedSize(dimensions.size()); for (IncrementalIndex.DimensionDesc dimension : dimensions) { accessors.put(dimension.getName(), new DimensionAccessor(dimension)); } int rowNum = 0; for (IncrementalIndex.TimeAndDims timeAndDims : index.getFacts().keySet()) { final Object[] dims = timeAndDims.getDims(); for (IncrementalIndex.DimensionDesc dimension : dimensions) { final int dimIndex = dimension.getIndex(); DimensionAccessor accessor = accessors.get(dimension.getName()); // Add 'null' to the dimension's dictionary. if (dimIndex >= dims.length || dims[dimIndex] == null) { accessor.indexer.processRowValsToUnsortedEncodedKeyComponent(null); continue; } final ColumnCapabilities capabilities = dimension.getCapabilities(); if(capabilities.hasBitmapIndexes()) { final MutableBitmap[] bitmapIndexes = accessor.invertedIndexes; final DimensionIndexer indexer = accessor.indexer; indexer.fillBitmapsFromUnsortedEncodedKeyComponent(dims[dimIndex], rowNum, bitmapIndexes, bitmapFactory); } } ++rowNum; } } @Override public Interval getDataInterval() { return dataInterval; } @Override public int getNumRows() { return index.size(); } @Override public Indexed<String> getDimensionNames() { return new ListIndexed<String>(index.getDimensionNames(), String.class); } @Override public Indexed<String> getMetricNames() { return new ListIndexed<String>(index.getMetricNames(), String.class); } @Override public Indexed<Comparable> getDimValueLookup(String dimension) { final DimensionAccessor accessor = accessors.get(dimension); if (accessor == null) { return null; } final DimensionIndexer indexer = accessor.dimensionDesc.getIndexer(); return indexer.getSortedIndexedValues(); } @Override public Iterable<Rowboat> getRows() { return new Iterable<Rowboat>() { @Override public Iterator<Rowboat> iterator() { final List<IncrementalIndex.DimensionDesc> dimensions = index.getDimensions(); final DimensionHandler[] handlers = new DimensionHandler[dimensions.size()]; final DimensionIndexer[] indexers = new DimensionIndexer[dimensions.size()]; for (IncrementalIndex.DimensionDesc dimension : dimensions) { handlers[dimension.getIndex()] = dimension.getHandler(); indexers[dimension.getIndex()] = dimension.getIndexer(); } /* * Note that the transform function increments a counter to determine the rowNum of * the iterated Rowboats. We need to return a new iterator on each * iterator() call to ensure the counter starts at 0. */ return Iterators.transform( index.getFacts().keySet().iterator(), new Function<IncrementalIndex.TimeAndDims, Rowboat>() { int count = 0; @Override public Rowboat apply(IncrementalIndex.TimeAndDims timeAndDims) { final Object[] dimValues = timeAndDims.getDims(); final int rowOffset = timeAndDims.getRowIndex(); Object[] dims = new Object[dimValues.length]; for (IncrementalIndex.DimensionDesc dimension : dimensions) { final int dimIndex = dimension.getIndex(); if (dimIndex >= dimValues.length || dimValues[dimIndex] == null) { continue; } final DimensionIndexer indexer = indexers[dimIndex]; Object sortedDimVals = indexer.convertUnsortedEncodedKeyComponentToSortedEncodedKeyComponent(dimValues[dimIndex]); dims[dimIndex] = sortedDimVals; } Object[] metrics = new Object[index.getMetricAggs().length]; for (int i = 0; i < metrics.length; i++) { metrics[i] = index.getMetricObjectValue(rowOffset, i); } return new Rowboat( timeAndDims.getTimestamp(), dims, metrics, count++, handlers ); } } ); } }; } @Override public IndexedInts getBitmapIndex(String dimension, int index) { DimensionAccessor accessor = accessors.get(dimension); if (accessor == null) { return EmptyIndexedInts.EMPTY_INDEXED_INTS; } ColumnCapabilities capabilities = accessor.dimensionDesc.getCapabilities(); DimensionIndexer indexer = accessor.dimensionDesc.getIndexer(); if (!capabilities.hasBitmapIndexes()) { return EmptyIndexedInts.EMPTY_INDEXED_INTS; } final int id = (Integer) indexer.getUnsortedEncodedValueFromSorted(index); if (id < 0 || id >= indexer.getCardinality()) { return EmptyIndexedInts.EMPTY_INDEXED_INTS; } MutableBitmap bitmapIndex = accessor.invertedIndexes[id]; if (bitmapIndex == null) { return EmptyIndexedInts.EMPTY_INDEXED_INTS; } return new BitmapIndexedInts(bitmapIndex); } @Override public String getMetricType(String metric) { return index.getMetricType(metric); } @Override public ColumnCapabilities getCapabilities(String column) { return index.getCapabilities(column); } static class BitmapIndexedInts implements IndexedInts { private final MutableBitmap bitmapIndex; BitmapIndexedInts(MutableBitmap bitmapIndex) { this.bitmapIndex = bitmapIndex; } @Override public int size() { return bitmapIndex.size(); } @Override public int get(int index) { // Slow for concise bitmaps, but is fast with roaring bitmaps, so it's just not supported. throw new UnsupportedOperationException("Not supported."); } @Override public IntIterator iterator() { return IntIteratorUtils.fromRoaringBitmapIntIterator(bitmapIndex.iterator()); } @Override public void fill(int index, int[] toFill) { throw new UnsupportedOperationException("fill not supported"); } @Override public void close() throws IOException { } @Override public void inspectRuntimeShape(RuntimeShapeInspector inspector) { inspector.visit("bitmapIndex", bitmapIndex); } } @Override public Metadata getMetadata() { return index.getMetadata(); } @Override public Map<String, DimensionHandler> getDimensionHandlers() { return index.getDimensionHandlers(); } }
apache-2.0
partofthething/home-assistant
tests/components/huisbaasje/test_config_flow.py
5127
"""Test the Huisbaasje config flow.""" from unittest.mock import patch from homeassistant import config_entries, data_entry_flow, setup from homeassistant.components.huisbaasje.config_flow import ( HuisbaasjeConnectionException, HuisbaasjeException, ) from homeassistant.components.huisbaasje.const import DOMAIN from tests.common import MockConfigEntry async def test_form(hass): """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {} with patch( "huisbaasje.Huisbaasje.authenticate", return_value=None ) as mock_authenticate, patch( "huisbaasje.Huisbaasje.get_user_id", return_value="test-id", ) as mock_get_user_id, patch( "homeassistant.components.huisbaasje.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.huisbaasje.async_setup_entry", return_value=True, ) as mock_setup_entry: form_result = await hass.config_entries.flow.async_configure( result["flow_id"], { "username": "test-username", "password": "test-password", }, ) await hass.async_block_till_done() assert form_result["type"] == "create_entry" assert form_result["title"] == "test-username" assert form_result["data"] == { "id": "test-id", "username": "test-username", "password": "test-password", } assert len(mock_authenticate.mock_calls) == 1 assert len(mock_get_user_id.mock_calls) == 1 assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 async def test_form_invalid_auth(hass): """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "huisbaasje.Huisbaasje.authenticate", side_effect=HuisbaasjeException, ): form_result = await hass.config_entries.flow.async_configure( result["flow_id"], { "username": "test-username", "password": "test-password", }, ) assert form_result["type"] == data_entry_flow.RESULT_TYPE_FORM assert form_result["errors"] == {"base": "invalid_auth"} async def test_form_cannot_connect(hass): """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "huisbaasje.Huisbaasje.authenticate", side_effect=HuisbaasjeConnectionException, ): form_result = await hass.config_entries.flow.async_configure( result["flow_id"], { "username": "test-username", "password": "test-password", }, ) assert form_result["type"] == data_entry_flow.RESULT_TYPE_FORM assert form_result["errors"] == {"base": "connection_exception"} async def test_form_unknown_error(hass): """Test we handle an unknown error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "huisbaasje.Huisbaasje.authenticate", side_effect=Exception, ): form_result = await hass.config_entries.flow.async_configure( result["flow_id"], { "username": "test-username", "password": "test-password", }, ) assert form_result["type"] == data_entry_flow.RESULT_TYPE_FORM assert form_result["errors"] == {"base": "unknown"} async def test_form_entry_exists(hass): """Test we handle an already existing entry.""" MockConfigEntry( unique_id="test-id", domain=DOMAIN, data={ "id": "test-id", "username": "test-username", "password": "test-password", }, title="test-username", ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("huisbaasje.Huisbaasje.authenticate", return_value=None), patch( "huisbaasje.Huisbaasje.get_user_id", return_value="test-id", ), patch( "homeassistant.components.huisbaasje.async_setup", return_value=True ), patch( "homeassistant.components.huisbaasje.async_setup_entry", return_value=True, ): form_result = await hass.config_entries.flow.async_configure( result["flow_id"], { "username": "test-username", "password": "test-password", }, ) assert form_result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert form_result["reason"] == "already_configured"
apache-2.0
doom369/netty
testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java
28575
/* * Copyright 2017 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.testsuite.transport.socket; import io.netty.bootstrap.Bootstrap; import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelConfig; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.ChannelInputShutdownEvent; import io.netty.channel.socket.ChannelInputShutdownReadComplete; import io.netty.channel.socket.ChannelOutputShutdownEvent; import io.netty.channel.socket.DuplexChannel; import io.netty.util.UncheckedBooleanSupplier; import io.netty.util.internal.PlatformDependent; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; public class SocketHalfClosedTest extends AbstractSocketTest { @Test @Timeout(value = 10000, unit = MILLISECONDS) public void testHalfClosureOnlyOneEventWhenAutoRead(TestInfo testInfo) throws Throwable { run(testInfo, new Runner<ServerBootstrap, Bootstrap>() { @Override public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable { testHalfClosureOnlyOneEventWhenAutoRead(serverBootstrap, bootstrap); } }); } public void testHalfClosureOnlyOneEventWhenAutoRead(ServerBootstrap sb, Bootstrap cb) throws Throwable { Channel serverChannel = null; try { cb.option(ChannelOption.ALLOW_HALF_CLOSURE, true) .option(ChannelOption.AUTO_READ, true); sb.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void channelActive(ChannelHandlerContext ctx) { ((DuplexChannel) ctx).shutdownOutput(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); final AtomicInteger shutdownEventReceivedCounter = new AtomicInteger(); final AtomicInteger shutdownReadCompleteEventReceivedCounter = new AtomicInteger(); cb.handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void userEventTriggered(final ChannelHandlerContext ctx, Object evt) { if (evt == ChannelInputShutdownEvent.INSTANCE) { shutdownEventReceivedCounter.incrementAndGet(); } else if (evt == ChannelInputShutdownReadComplete.INSTANCE) { shutdownReadCompleteEventReceivedCounter.incrementAndGet(); ctx.executor().schedule(new Runnable() { @Override public void run() { ctx.close(); } }, 100, MILLISECONDS); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); serverChannel = sb.bind().sync().channel(); Channel clientChannel = cb.connect(serverChannel.localAddress()).sync().channel(); clientChannel.closeFuture().await(); assertEquals(1, shutdownEventReceivedCounter.get()); assertEquals(1, shutdownReadCompleteEventReceivedCounter.get()); } finally { if (serverChannel != null) { serverChannel.close().sync(); } } } @Test public void testAllDataReadAfterHalfClosure(TestInfo testInfo) throws Throwable { run(testInfo, new Runner<ServerBootstrap, Bootstrap>() { @Override public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable { testAllDataReadAfterHalfClosure(serverBootstrap, bootstrap); } }); } public void testAllDataReadAfterHalfClosure(ServerBootstrap sb, Bootstrap cb) throws Throwable { testAllDataReadAfterHalfClosure(true, sb, cb); testAllDataReadAfterHalfClosure(false, sb, cb); } private static void testAllDataReadAfterHalfClosure(final boolean autoRead, ServerBootstrap sb, Bootstrap cb) throws Throwable { final int totalServerBytesWritten = 1024 * 16; final int numReadsPerReadLoop = 2; final CountDownLatch serverInitializedLatch = new CountDownLatch(1); final CountDownLatch clientReadAllDataLatch = new CountDownLatch(1); final CountDownLatch clientHalfClosedLatch = new CountDownLatch(1); final AtomicInteger clientReadCompletes = new AtomicInteger(); Channel serverChannel = null; Channel clientChannel = null; try { cb.option(ChannelOption.ALLOW_HALF_CLOSURE, true) .option(ChannelOption.AUTO_READ, autoRead) .option(ChannelOption.RCVBUF_ALLOCATOR, new TestNumReadsRecvByteBufAllocator(numReadsPerReadLoop)); sb.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { ByteBuf buf = ctx.alloc().buffer(totalServerBytesWritten); buf.writerIndex(buf.capacity()); ctx.writeAndFlush(buf).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { ((DuplexChannel) future.channel()).shutdownOutput(); } }); serverInitializedLatch.countDown(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); cb.handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { private int bytesRead; @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { ByteBuf buf = (ByteBuf) msg; bytesRead += buf.readableBytes(); buf.release(); } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (evt == ChannelInputShutdownEvent.INSTANCE) { clientHalfClosedLatch.countDown(); } else if (evt == ChannelInputShutdownReadComplete.INSTANCE) { ctx.close(); } } @Override public void channelReadComplete(ChannelHandlerContext ctx) { clientReadCompletes.incrementAndGet(); if (bytesRead == totalServerBytesWritten) { clientReadAllDataLatch.countDown(); } if (!autoRead) { ctx.read(); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); serverChannel = sb.bind().sync().channel(); clientChannel = cb.connect(serverChannel.localAddress()).sync().channel(); clientChannel.read(); serverInitializedLatch.await(); clientReadAllDataLatch.await(); clientHalfClosedLatch.await(); assertTrue(totalServerBytesWritten / numReadsPerReadLoop + 10 > clientReadCompletes.get(), "too many read complete events: " + clientReadCompletes.get()); } finally { if (clientChannel != null) { clientChannel.close().sync(); } if (serverChannel != null) { serverChannel.close().sync(); } } } @Test public void testAutoCloseFalseDoesShutdownOutput(TestInfo testInfo) throws Throwable { // This test only works on Linux / BSD / MacOS as we assume some semantics that are not true for Windows. assumeFalse(PlatformDependent.isWindows()); run(testInfo, new Runner<ServerBootstrap, Bootstrap>() { @Override public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable { testAutoCloseFalseDoesShutdownOutput(serverBootstrap, bootstrap); } }); } public void testAutoCloseFalseDoesShutdownOutput(ServerBootstrap sb, Bootstrap cb) throws Throwable { testAutoCloseFalseDoesShutdownOutput(false, false, sb, cb); testAutoCloseFalseDoesShutdownOutput(false, true, sb, cb); testAutoCloseFalseDoesShutdownOutput(true, false, sb, cb); testAutoCloseFalseDoesShutdownOutput(true, true, sb, cb); } private static void testAutoCloseFalseDoesShutdownOutput(boolean allowHalfClosed, final boolean clientIsLeader, ServerBootstrap sb, Bootstrap cb) throws InterruptedException { final int expectedBytes = 100; final CountDownLatch serverReadExpectedLatch = new CountDownLatch(1); final CountDownLatch doneLatch = new CountDownLatch(1); final AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>(); Channel serverChannel = null; Channel clientChannel = null; try { cb.option(ChannelOption.ALLOW_HALF_CLOSURE, allowHalfClosed) .option(ChannelOption.AUTO_CLOSE, false) .option(ChannelOption.SO_LINGER, 0); sb.childOption(ChannelOption.ALLOW_HALF_CLOSURE, allowHalfClosed) .childOption(ChannelOption.AUTO_CLOSE, false) .childOption(ChannelOption.SO_LINGER, 0); final SimpleChannelInboundHandler<ByteBuf> leaderHandler = new AutoCloseFalseLeader(expectedBytes, serverReadExpectedLatch, doneLatch, causeRef); final SimpleChannelInboundHandler<ByteBuf> followerHandler = new AutoCloseFalseFollower(expectedBytes, serverReadExpectedLatch, doneLatch, causeRef); sb.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(clientIsLeader ? followerHandler :leaderHandler); } }); cb.handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(clientIsLeader ? leaderHandler : followerHandler); } }); serverChannel = sb.bind().sync().channel(); clientChannel = cb.connect(serverChannel.localAddress()).sync().channel(); doneLatch.await(); assertNull(causeRef.get()); } finally { if (clientChannel != null) { clientChannel.close().sync(); } if (serverChannel != null) { serverChannel.close().sync(); } } } private static final class AutoCloseFalseFollower extends SimpleChannelInboundHandler<ByteBuf> { private final int expectedBytes; private final CountDownLatch followerCloseLatch; private final CountDownLatch doneLatch; private final AtomicReference<Throwable> causeRef; private int bytesRead; AutoCloseFalseFollower(int expectedBytes, CountDownLatch followerCloseLatch, CountDownLatch doneLatch, AtomicReference<Throwable> causeRef) { this.expectedBytes = expectedBytes; this.followerCloseLatch = followerCloseLatch; this.doneLatch = doneLatch; this.causeRef = causeRef; } @Override public void channelInactive(ChannelHandlerContext ctx) { checkPrematureClose(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); checkPrematureClose(); } @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { bytesRead += msg.readableBytes(); if (bytesRead >= expectedBytes) { // We write a reply and immediately close our end of the socket. ByteBuf buf = ctx.alloc().buffer(expectedBytes); buf.writerIndex(buf.writerIndex() + expectedBytes); ctx.writeAndFlush(buf).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { future.channel().close().addListener(new ChannelFutureListener() { @Override public void operationComplete(final ChannelFuture future) throws Exception { // This is a bit racy but there is no better way how to handle this in Java11. // The problem is that on close() the underlying FD will not actually be closed directly // but the close will be done after the Selector did process all events. Because of // this we will need to give it a bit time to ensure the FD is actual closed before we // count down the latch and try to write. future.channel().eventLoop().schedule(new Runnable() { @Override public void run() { followerCloseLatch.countDown(); } }, 200, TimeUnit.MILLISECONDS); } }); } }); } } private void checkPrematureClose() { if (bytesRead < expectedBytes) { causeRef.set(new IllegalStateException("follower premature close")); doneLatch.countDown(); } } } private static final class AutoCloseFalseLeader extends SimpleChannelInboundHandler<ByteBuf> { private final int expectedBytes; private final CountDownLatch followerCloseLatch; private final CountDownLatch doneLatch; private final AtomicReference<Throwable> causeRef; private int bytesRead; private boolean seenOutputShutdown; AutoCloseFalseLeader(int expectedBytes, CountDownLatch followerCloseLatch, CountDownLatch doneLatch, AtomicReference<Throwable> causeRef) { this.expectedBytes = expectedBytes; this.followerCloseLatch = followerCloseLatch; this.doneLatch = doneLatch; this.causeRef = causeRef; } @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { ByteBuf buf = ctx.alloc().buffer(expectedBytes); buf.writerIndex(buf.writerIndex() + expectedBytes); ctx.writeAndFlush(buf.retainedDuplicate()); // We wait here to ensure that we write before we have a chance to process the outbound // shutdown event. followerCloseLatch.await(); // This write should fail, but we should still be allowed to read the peer's data ctx.writeAndFlush(buf).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.cause() == null) { causeRef.set(new IllegalStateException("second write should have failed!")); doneLatch.countDown(); } } }); } @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { bytesRead += msg.readableBytes(); if (bytesRead >= expectedBytes) { if (!seenOutputShutdown) { causeRef.set(new IllegalStateException( ChannelOutputShutdownEvent.class.getSimpleName() + " event was not seen")); } doneLatch.countDown(); } } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (evt instanceof ChannelOutputShutdownEvent) { seenOutputShutdown = true; } } @Override public void channelInactive(ChannelHandlerContext ctx) { checkPrematureClose(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); checkPrematureClose(); } private void checkPrematureClose() { if (bytesRead < expectedBytes || !seenOutputShutdown) { causeRef.set(new IllegalStateException("leader premature close")); doneLatch.countDown(); } } } @Test public void testAllDataReadClosure(TestInfo testInfo) throws Throwable { run(testInfo, new Runner<ServerBootstrap, Bootstrap>() { @Override public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable { testAllDataReadClosure(serverBootstrap, bootstrap); } }); } public void testAllDataReadClosure(ServerBootstrap sb, Bootstrap cb) throws Throwable { testAllDataReadClosure(true, false, sb, cb); testAllDataReadClosure(true, true, sb, cb); testAllDataReadClosure(false, false, sb, cb); testAllDataReadClosure(false, true, sb, cb); } private static void testAllDataReadClosure(final boolean autoRead, final boolean allowHalfClosed, ServerBootstrap sb, Bootstrap cb) throws Throwable { final int totalServerBytesWritten = 1024 * 16; final int numReadsPerReadLoop = 2; final CountDownLatch serverInitializedLatch = new CountDownLatch(1); final CountDownLatch clientReadAllDataLatch = new CountDownLatch(1); final CountDownLatch clientHalfClosedLatch = new CountDownLatch(1); final AtomicInteger clientReadCompletes = new AtomicInteger(); Channel serverChannel = null; Channel clientChannel = null; try { cb.option(ChannelOption.ALLOW_HALF_CLOSURE, allowHalfClosed) .option(ChannelOption.AUTO_READ, autoRead) .option(ChannelOption.RCVBUF_ALLOCATOR, new TestNumReadsRecvByteBufAllocator(numReadsPerReadLoop)); sb.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { ByteBuf buf = ctx.alloc().buffer(totalServerBytesWritten); buf.writerIndex(buf.capacity()); ctx.writeAndFlush(buf).addListener(ChannelFutureListener.CLOSE); serverInitializedLatch.countDown(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); cb.handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { private int bytesRead; @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { ByteBuf buf = (ByteBuf) msg; bytesRead += buf.readableBytes(); buf.release(); } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (evt == ChannelInputShutdownEvent.INSTANCE && allowHalfClosed) { clientHalfClosedLatch.countDown(); } else if (evt == ChannelInputShutdownReadComplete.INSTANCE) { ctx.close(); } } @Override public void channelInactive(ChannelHandlerContext ctx) { if (!allowHalfClosed) { clientHalfClosedLatch.countDown(); } } @Override public void channelReadComplete(ChannelHandlerContext ctx) { clientReadCompletes.incrementAndGet(); if (bytesRead == totalServerBytesWritten) { clientReadAllDataLatch.countDown(); } if (!autoRead) { ctx.read(); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { ctx.close(); } }); } }); serverChannel = sb.bind().sync().channel(); clientChannel = cb.connect(serverChannel.localAddress()).sync().channel(); clientChannel.read(); serverInitializedLatch.await(); clientReadAllDataLatch.await(); clientHalfClosedLatch.await(); assertTrue(totalServerBytesWritten / numReadsPerReadLoop + 10 > clientReadCompletes.get(), "too many read complete events: " + clientReadCompletes.get()); } finally { if (clientChannel != null) { clientChannel.close().sync(); } if (serverChannel != null) { serverChannel.close().sync(); } } } /** * Designed to read a single byte at a time to control the number of reads done at a fine granularity. */ private static final class TestNumReadsRecvByteBufAllocator implements RecvByteBufAllocator { private final int numReads; TestNumReadsRecvByteBufAllocator(int numReads) { this.numReads = numReads; } @Override public ExtendedHandle newHandle() { return new ExtendedHandle() { private int attemptedBytesRead; private int lastBytesRead; private int numMessagesRead; @Override public ByteBuf allocate(ByteBufAllocator alloc) { return alloc.ioBuffer(guess(), guess()); } @Override public int guess() { return 1; // only ever allocate buffers of size 1 to ensure the number of reads is controlled. } @Override public void reset(ChannelConfig config) { numMessagesRead = 0; } @Override public void incMessagesRead(int numMessages) { numMessagesRead += numMessages; } @Override public void lastBytesRead(int bytes) { lastBytesRead = bytes; } @Override public int lastBytesRead() { return lastBytesRead; } @Override public void attemptedBytesRead(int bytes) { attemptedBytesRead = bytes; } @Override public int attemptedBytesRead() { return attemptedBytesRead; } @Override public boolean continueReading() { return numMessagesRead < numReads; } @Override public boolean continueReading(UncheckedBooleanSupplier maybeMoreDataSupplier) { return continueReading() && maybeMoreDataSupplier.get(); } @Override public void readComplete() { // Nothing needs to be done or adjusted after each read cycle is completed. } }; } } }
apache-2.0
xunzhang/orc
java/core/src/java/org/apache/orc/impl/IntegerWriter.java
1330
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.orc.impl; import java.io.IOException; /** * Interface for writing integers. */ public interface IntegerWriter { /** * Get position from the stream. * @param recorder * @throws IOException */ void getPosition(PositionRecorder recorder) throws IOException; /** * Write the integer value * @param value * @throws IOException */ void write(long value) throws IOException; /** * Flush the buffer * @throws IOException */ void flush() throws IOException; }
apache-2.0
sekikn/ambari
ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/MetadataHolder.java
6111
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ambari.server.agent.stomp; import java.util.Map; import java.util.TreeMap; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ClusterNotFoundException; import org.apache.ambari.server.agent.stomp.dto.MetadataCluster; import org.apache.ambari.server.controller.AmbariManagementControllerImpl; import org.apache.ambari.server.events.AmbariPropertiesChangedEvent; import org.apache.ambari.server.events.ClusterComponentsRepoChangedEvent; import org.apache.ambari.server.events.MetadataUpdateEvent; import org.apache.ambari.server.events.ServiceCredentialStoreUpdateEvent; import org.apache.ambari.server.events.ServiceInstalledEvent; import org.apache.ambari.server.events.UpdateEventType; import org.apache.ambari.server.events.publishers.AmbariEventPublisher; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import com.google.common.eventbus.Subscribe; import com.google.inject.Inject; import com.google.inject.Provider; import com.google.inject.Singleton; @Singleton public class MetadataHolder extends AgentClusterDataHolder<MetadataUpdateEvent> { @Inject private AmbariManagementControllerImpl ambariManagementController; @Inject private Provider<Clusters> m_clusters; @Inject public MetadataHolder(AmbariEventPublisher ambariEventPublisher) { ambariEventPublisher.register(this); } @Override public MetadataUpdateEvent getCurrentData() throws AmbariException { return ambariManagementController.getClustersMetadata(); } public MetadataUpdateEvent getDeleteMetadata(Long clusterId) throws AmbariException { TreeMap<String, MetadataCluster> clusterToRemove = new TreeMap<>(); if (clusterId != null) { clusterToRemove.put(Long.toString(clusterId), MetadataCluster.emptyMetadataCluster()); } MetadataUpdateEvent deleteEvent = new MetadataUpdateEvent(clusterToRemove, null , null, UpdateEventType.DELETE); return deleteEvent; } @Override protected boolean handleUpdate(MetadataUpdateEvent update) throws AmbariException { boolean changed = false; UpdateEventType eventType = update.getEventType(); if (MapUtils.isNotEmpty(update.getMetadataClusters())) { for (Map.Entry<String, MetadataCluster> metadataClusterEntry : update.getMetadataClusters().entrySet()) { MetadataCluster updatedCluster = metadataClusterEntry.getValue(); String clusterId = metadataClusterEntry.getKey(); Map<String, MetadataCluster> clusters = getData().getMetadataClusters(); if (clusters.containsKey(clusterId)) { if (eventType.equals(UpdateEventType.DELETE)) { getData().getMetadataClusters().remove(clusterId); changed = true; } else { MetadataCluster cluster = clusters.get(clusterId); if (cluster.updateClusterLevelParams(updatedCluster.getClusterLevelParams())) { changed = true; } if (cluster.updateServiceLevelParams(updatedCluster.getServiceLevelParams(), updatedCluster.isFullServiceLevelMetadata())) { changed = true; } if (CollectionUtils.isNotEmpty(updatedCluster.getStatusCommandsToRun()) && !cluster.getStatusCommandsToRun().containsAll(updatedCluster.getStatusCommandsToRun())) { cluster.getStatusCommandsToRun().addAll(updatedCluster.getStatusCommandsToRun()); changed = true; } } } else { if (eventType.equals(UpdateEventType.UPDATE)) { clusters.put(clusterId, updatedCluster); changed = true; } else { throw new ClusterNotFoundException(Long.parseLong(clusterId)); } } } } return changed; } @Override protected MetadataUpdateEvent getEmptyData() { return MetadataUpdateEvent.emptyUpdate(); } @Subscribe public void onServiceCreate(ServiceInstalledEvent serviceInstalledEvent) throws AmbariException { Cluster cluster = m_clusters.get().getCluster(serviceInstalledEvent.getClusterId()); updateData(ambariManagementController.getClusterMetadataOnServiceInstall(cluster, serviceInstalledEvent.getServiceName())); } @Subscribe public void onClusterComponentsRepoUpdate(ClusterComponentsRepoChangedEvent clusterComponentsRepoChangedEvent) throws AmbariException { Cluster cluster = m_clusters.get().getCluster(clusterComponentsRepoChangedEvent.getClusterId()); updateData(ambariManagementController.getClusterMetadataOnRepoUpdate(cluster)); } @Subscribe public void onServiceCredentialStoreUpdate(ServiceCredentialStoreUpdateEvent serviceCredentialStoreUpdateEvent) throws AmbariException { Cluster cluster = m_clusters.get().getCluster(serviceCredentialStoreUpdateEvent.getClusterId()); updateData(ambariManagementController.getClusterMetadataOnServiceCredentialStoreUpdate(cluster, serviceCredentialStoreUpdateEvent.getServiceName())); } @Subscribe public void onAmbariPropertiesChange(AmbariPropertiesChangedEvent event) throws AmbariException { updateData(ambariManagementController.getClustersMetadata()); } }
apache-2.0
fanwu123/mvptodo
Application/src/main/java/com/fanwu/chat/utils/ActivityCollector.java
1411
package com.fanwu.chat.utils; import android.content.Intent; import com.fanwu.chat.mvp.BaseActivity; import java.util.ArrayList; import java.util.List; /** * Created by yuyidong on 15/8/9. */ public class ActivityCollector { private static final String TAG = ActivityCollector.class.getSimpleName(); private static List<BaseActivity> activities = new ArrayList<>(); public static void addActivity(BaseActivity activity) { YLog.i(TAG, "addActivity(" + activity.getClass().getSimpleName() + ")"); activities.add(activity); } public static void removeActivity(BaseActivity activity) { YLog.i(TAG, "removeActivity(" + activity.getClass().getSimpleName() + ")"); activities.remove(activity); } public static void reStart(BaseActivity fromActivity, Class<?>... toClass) { if (toClass != null) { for (int i = 0; i < toClass.length; i++) { YLog.i(TAG, "reStart(),start this activity :" + toClass.getClass().getSimpleName()); fromActivity.startActivity(new Intent(fromActivity, toClass[i])); } } for (BaseActivity baseActivity : activities) { if (!baseActivity.isFinishing()) { YLog.i(TAG, "reStart(),finish this activity :" + baseActivity.getClass().getSimpleName()); baseActivity.finish(); } } } }
apache-2.0
xasx/assertj-core
src/test/java/org/assertj/core/api/longarray/LongArrayAssert_doesNotHaveDuplicates_Test.java
1259
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Copyright 2012-2019 the original author or authors. */ package org.assertj.core.api.longarray; import org.assertj.core.api.LongArrayAssert; import org.assertj.core.api.LongArrayAssertBaseTest; import static org.mockito.Mockito.verify; /** * Tests for <code>{@link LongArrayAssert#doesNotHaveDuplicates()}</code>. * * @author Alex Ruiz */ public class LongArrayAssert_doesNotHaveDuplicates_Test extends LongArrayAssertBaseTest { @Override protected LongArrayAssert invoke_api_method() { return assertions.doesNotHaveDuplicates(); } @Override protected void verify_internal_effects() { verify(arrays).assertDoesNotHaveDuplicates(getInfo(assertions), getActual(assertions)); } }
apache-2.0
lwf/proxymgr
spec/unit/watcher/base_spec.rb
854
require 'spec_helper' describe ProxyMgr::Watcher::Base do before(:each) do @sm = double(ProxyMgr::ServiceManager) end it 'is valid if port is correctly specified' do watcher = ProxyMgr::Watcher::Dummy.new('test', {'port' => 8080}, @sm) watcher.valid?.should == true end it 'does not start if port is not an integer' do watcher = ProxyMgr::Watcher::Dummy.new('test', {'port' => 'false'}, @sm) watcher.valid?.should == false end it 'does not start if port is invalid' do watcher = ProxyMgr::Watcher::Dummy.new('test', {'port' => 0}, @sm) watcher.valid?.should == false end it 'is valid if listen_options specified and is an array' do watcher = ProxyMgr::Watcher::Dummy.new('test', {'port' => 65535, 'listen_options' => ['a config option', 'another']}, @sm) watcher.valid?.should == true end end
apache-2.0
tgraf/cilium
api/v1/server/restapi/daemon/patch_config_responses.go
3217
// Code generated by go-swagger; DO NOT EDIT. // Copyright 2017-2021 Authors of Cilium // SPDX-License-Identifier: Apache-2.0 package daemon // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "net/http" "github.com/go-openapi/runtime" "github.com/cilium/cilium/api/v1/models" ) // PatchConfigOKCode is the HTTP code returned for type PatchConfigOK const PatchConfigOKCode int = 200 /*PatchConfigOK Success swagger:response patchConfigOK */ type PatchConfigOK struct { } // NewPatchConfigOK creates PatchConfigOK with default headers values func NewPatchConfigOK() *PatchConfigOK { return &PatchConfigOK{} } // WriteResponse to the client func (o *PatchConfigOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses rw.WriteHeader(200) } // PatchConfigBadRequestCode is the HTTP code returned for type PatchConfigBadRequest const PatchConfigBadRequestCode int = 400 /*PatchConfigBadRequest Bad configuration parameters swagger:response patchConfigBadRequest */ type PatchConfigBadRequest struct { /* In: Body */ Payload models.Error `json:"body,omitempty"` } // NewPatchConfigBadRequest creates PatchConfigBadRequest with default headers values func NewPatchConfigBadRequest() *PatchConfigBadRequest { return &PatchConfigBadRequest{} } // WithPayload adds the payload to the patch config bad request response func (o *PatchConfigBadRequest) WithPayload(payload models.Error) *PatchConfigBadRequest { o.Payload = payload return o } // SetPayload sets the payload to the patch config bad request response func (o *PatchConfigBadRequest) SetPayload(payload models.Error) { o.Payload = payload } // WriteResponse to the client func (o *PatchConfigBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(400) payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } // PatchConfigFailureCode is the HTTP code returned for type PatchConfigFailure const PatchConfigFailureCode int = 500 /*PatchConfigFailure Recompilation failed swagger:response patchConfigFailure */ type PatchConfigFailure struct { /* In: Body */ Payload models.Error `json:"body,omitempty"` } // NewPatchConfigFailure creates PatchConfigFailure with default headers values func NewPatchConfigFailure() *PatchConfigFailure { return &PatchConfigFailure{} } // WithPayload adds the payload to the patch config failure response func (o *PatchConfigFailure) WithPayload(payload models.Error) *PatchConfigFailure { o.Payload = payload return o } // SetPayload sets the payload to the patch config failure response func (o *PatchConfigFailure) SetPayload(payload models.Error) { o.Payload = payload } // WriteResponse to the client func (o *PatchConfigFailure) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(500) payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } }
apache-2.0
mbode/flink
flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/MaxWithRetractAggFunction.java
11887
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.functions.aggfunctions; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.common.typeinfo.Types; import org.apache.flink.api.java.typeutils.PojoField; import org.apache.flink.api.java.typeutils.PojoTypeInfo; import org.apache.flink.api.java.typeutils.TypeExtractor; import org.apache.flink.table.api.dataview.MapView; import org.apache.flink.table.dataformat.BinaryString; import org.apache.flink.table.dataformat.Decimal; import org.apache.flink.table.functions.AggregateFunction; import org.apache.flink.table.runtime.typeutils.BinaryStringTypeInfo; import org.apache.flink.table.runtime.typeutils.DecimalTypeInfo; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.List; import java.util.Map; /** * built-in Max with retraction aggregate function. */ public abstract class MaxWithRetractAggFunction<T extends Comparable> extends AggregateFunction<T, MaxWithRetractAggFunction.MaxWithRetractAccumulator<T>> { private static final long serialVersionUID = -5860934997657147836L; /** The initial accumulator for Max with retraction aggregate function. */ public static class MaxWithRetractAccumulator<T> { public T max; public Long mapSize; public MapView<T, Long> map; } @Override public MaxWithRetractAccumulator<T> createAccumulator() { MaxWithRetractAccumulator<T> acc = new MaxWithRetractAccumulator<>(); acc.max = null; // max acc.mapSize = 0L; // store the count for each value acc.map = new MapView<>(getValueTypeInfo(), BasicTypeInfo.LONG_TYPE_INFO); return acc; } public void accumulate(MaxWithRetractAccumulator<T> acc, Object value) throws Exception { if (value != null) { T v = (T) value; if (acc.mapSize == 0L || acc.max.compareTo(v) < 0) { acc.max = v; } Long count = acc.map.get(v); if (count == null) { count = 0L; } count += 1L; if (count == 0) { // remove it when count is increased from -1 to 0 acc.map.remove(v); } else { // store it when count is NOT zero acc.map.put(v, count); } if (count == 1L) { // previous count is zero, this is the first time to see the key acc.mapSize += 1; } } } public void retract(MaxWithRetractAccumulator<T> acc, Object value) throws Exception { if (value != null) { T v = (T) value; Long count = acc.map.get(v); if (count == null) { count = 0L; } count -= 1; if (count == 0) { // remove it when count is decreased from 1 to 0 acc.map.remove(v); acc.mapSize -= 1L; //if the total count is 0, we could just simply set the f0(max) to the initial value if (acc.mapSize == 0) { acc.max = null; return; } //if v is the current max value, we have to iterate the map to find the 2nd biggest // value to replace v as the max value if (v.equals(acc.max)) { updateMax(acc); } } else { // store it when count is NOT zero acc.map.put(v, count); // we do not take negative number account into mapSize } } } private void updateMax(MaxWithRetractAccumulator<T> acc) throws Exception { boolean hasMax = false; for (T key : acc.map.keys()) { if (!hasMax || acc.max.compareTo(key) < 0) { acc.max = key; hasMax = true; } } // The behavior of deleting expired data in the state backend is uncertain. // so `mapSize` data may exist, while `map` data may have been deleted // when both of them are expired. if (!hasMax) { acc.mapSize = 0L; // we should also override max value, because it may have an old value. acc.max = null; } } public void merge(MaxWithRetractAccumulator<T> acc, Iterable<MaxWithRetractAccumulator<T>> its) throws Exception { boolean needUpdateMax = false; for (MaxWithRetractAccumulator<T> a : its) { // set max element if (acc.mapSize == 0 || (a.mapSize > 0 && a.max != null && acc.max.compareTo(a.max) < 0)) { acc.max = a.max; } // merge the count for each key for (Map.Entry entry : a.map.entries()) { T key = (T) entry.getKey(); Long otherCount = (Long) entry.getValue(); // non-null Long thisCount = acc.map.get(key); if (thisCount == null) { thisCount = 0L; } long mergedCount = otherCount + thisCount; if (mergedCount == 0) { // remove it when count is increased from -1 to 0 acc.map.remove(key); if (thisCount > 0) { // origin is > 0, and retract to 0 acc.mapSize -= 1; if (key.equals(acc.max)) { needUpdateMax = true; } } } else if (mergedCount < 0) { acc.map.put(key, mergedCount); if (thisCount > 0) { // origin is > 0, and retract to < 0 acc.mapSize -= 1; if (key.equals(acc.max)) { needUpdateMax = true; } } } else { // mergedCount > 0 acc.map.put(key, mergedCount); if (thisCount <= 0) { // origin is <= 0, and accumulate to > 0 acc.mapSize += 1; } } } } if (needUpdateMax) { updateMax(acc); } } public void resetAccumulator(MaxWithRetractAccumulator<T> acc) { acc.max = null; acc.mapSize = 0L; acc.map.clear(); } @Override public T getValue(MaxWithRetractAccumulator<T> acc) { if (acc.mapSize > 0) { return acc.max; } else { return null; } } @Override public TypeInformation<MaxWithRetractAccumulator<T>> getAccumulatorType() { PojoTypeInfo pojoType = (PojoTypeInfo) TypeExtractor.createTypeInfo(MaxWithRetractAccumulator.class); List<PojoField> pojoFields = new ArrayList<>(); for (int i = 0; i < pojoType.getTotalFields(); i++) { PojoField field = pojoType.getPojoFieldAt(i); if (field.getField().getName().equals("max")) { pojoFields.add(new PojoField(field.getField(), getValueTypeInfo())); } else { pojoFields.add(field); } } //noinspection unchecked return new PojoTypeInfo(pojoType.getTypeClass(), pojoFields); } @Override public TypeInformation<T> getResultType() { return getValueTypeInfo(); } protected abstract TypeInformation<T> getValueTypeInfo(); /** * Built-in Byte Max with retraction aggregate function. */ public static class ByteMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Byte> { private static final long serialVersionUID = 7383980948808353819L; @Override protected TypeInformation<Byte> getValueTypeInfo() { return BasicTypeInfo.BYTE_TYPE_INFO; } } /** * Built-in Short Max with retraction aggregate function. */ public static class ShortMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Short> { private static final long serialVersionUID = 7579072678911328694L; @Override protected TypeInformation<Short> getValueTypeInfo() { return BasicTypeInfo.SHORT_TYPE_INFO; } } /** * Built-in Int Max with retraction aggregate function. */ public static class IntMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Integer> { private static final long serialVersionUID = 3833976566544263072L; @Override protected TypeInformation<Integer> getValueTypeInfo() { return BasicTypeInfo.INT_TYPE_INFO; } } /** * Built-in Long Max with retraction aggregate function. */ public static class LongMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Long> { private static final long serialVersionUID = 8585384188523017375L; @Override protected TypeInformation<Long> getValueTypeInfo() { return BasicTypeInfo.LONG_TYPE_INFO; } } /** * Built-in Float Max with retraction aggregate function. */ public static class FloatMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Float> { private static final long serialVersionUID = -1433882434794024584L; @Override protected TypeInformation<Float> getValueTypeInfo() { return BasicTypeInfo.FLOAT_TYPE_INFO; } } /** * Built-in Double Max with retraction aggregate function. */ public static class DoubleMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Double> { private static final long serialVersionUID = -1525221057708740308L; @Override protected TypeInformation<Double> getValueTypeInfo() { return BasicTypeInfo.DOUBLE_TYPE_INFO; } } /** * Built-in Boolean Max with retraction aggregate function. */ public static class BooleanMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Boolean> { private static final long serialVersionUID = -8408715018822625309L; @Override protected TypeInformation<Boolean> getValueTypeInfo() { return BasicTypeInfo.BOOLEAN_TYPE_INFO; } } /** * Built-in Big Decimal Max with retraction aggregate function. */ public static class DecimalMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Decimal> { private static final long serialVersionUID = 5301860581297042635L; private DecimalTypeInfo decimalType; public DecimalMaxWithRetractAggFunction(DecimalTypeInfo decimalType) { this.decimalType = decimalType; } public void accumulate(MaxWithRetractAccumulator<Decimal> acc, Decimal value) throws Exception { super.accumulate(acc, value); } public void retract(MaxWithRetractAccumulator<Decimal> acc, Decimal value) throws Exception { super.retract(acc, value); } @Override protected TypeInformation<Decimal> getValueTypeInfo() { return decimalType; } } /** * Built-in String Max with retraction aggregate function. */ public static class StringMaxWithRetractAggFunction extends MaxWithRetractAggFunction<BinaryString> { private static final long serialVersionUID = 787528574867514796L; public void accumulate(MaxWithRetractAccumulator<BinaryString> acc, BinaryString value) throws Exception { super.accumulate(acc, value); } public void retract(MaxWithRetractAccumulator<BinaryString> acc, BinaryString value) throws Exception { super.retract(acc, value); } @Override protected TypeInformation<BinaryString> getValueTypeInfo() { return BinaryStringTypeInfo.INSTANCE; } } /** * Built-in Timestamp Max with retraction aggregate function. */ public static class TimestampMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Timestamp> { private static final long serialVersionUID = -7096481949093142944L; @Override protected TypeInformation<Timestamp> getValueTypeInfo() { return Types.SQL_TIMESTAMP; } } /** * Built-in Date Max with retraction aggregate function. */ public static class DateMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Date> { private static final long serialVersionUID = 7452698503075473023L; @Override protected TypeInformation<Date> getValueTypeInfo() { return Types.SQL_DATE; } } /** * Built-in Time Max with retraction aggregate function. */ public static class TimeMaxWithRetractAggFunction extends MaxWithRetractAggFunction<Time> { private static final long serialVersionUID = 3578216747876121493L; @Override protected TypeInformation<Time> getValueTypeInfo() { return Types.SQL_TIME; } } }
apache-2.0