author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
596,240
23.03.2018 10:30:15
-3,600
0cfda7ce4c551acb10b717af6f97cddea9e641eb
removes unneeded workaround
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.7.go", "new_path": "pkg/templates/node_1.7.go", "diff": "@@ -27,21 +27,6 @@ systemd:\nExecStart=/usr/bin/coreos-metadata --provider=openstack-metadata --attributes=/run/metadata/coreos --ssh-keys=core --hostname=/etc/hostname\nRestart=on-failure\nRestartSec=30\n- - name: ccloud-metadata-hostname.service\n- enable: true\n- contents: |\n- [Unit]\n- Description=Workaround for coreos-metadata hostname bug\n- Requires=ccloud-metadata.service\n- After=ccloud-metadata.service\n-\n- [Service]\n- Type=oneshot\n- EnvironmentFile=/run/metadata/coreos\n- ExecStart=/usr/bin/hostnamectl set-hostname ${COREOS_OPENSTACK_HOSTNAME}\n-\n- [Install]\n- WantedBy=multi-user.target\n- name: docker.service\nenable: true\ndropins:\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.8.go", "new_path": "pkg/templates/node_1.8.go", "diff": "@@ -27,21 +27,6 @@ systemd:\nExecStart=/usr/bin/coreos-metadata --provider=openstack-metadata --attributes=/run/metadata/coreos --ssh-keys=core --hostname=/etc/hostname\nRestart=on-failure\nRestartSec=30\n- - name: ccloud-metadata-hostname.service\n- enable: true\n- contents: |\n- [Unit]\n- Description=Workaround for coreos-metadata hostname bug\n- Requires=ccloud-metadata.service\n- After=ccloud-metadata.service\n-\n- [Service]\n- Type=oneshot\n- EnvironmentFile=/run/metadata/coreos\n- ExecStart=/usr/bin/hostnamectl set-hostname ${COREOS_OPENSTACK_HOSTNAME}\n-\n- [Install]\n- WantedBy=multi-user.target\n- name: docker.service\nenable: true\ndropins:\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.9.go", "new_path": "pkg/templates/node_1.9.go", "diff": "@@ -27,21 +27,6 @@ systemd:\nExecStart=/usr/bin/coreos-metadata --provider=openstack-metadata --attributes=/run/metadata/coreos --ssh-keys=core --hostname=/etc/hostname\nRestart=on-failure\nRestartSec=30\n- - name: ccloud-metadata-hostname.service\n- enable: true\n- contents: |\n- [Unit]\n- Description=Workaround for coreos-metadata hostname bug\n- Requires=ccloud-metadata.service\n- After=ccloud-metadata.service\n-\n- [Service]\n- Type=oneshot\n- EnvironmentFile=/run/metadata/coreos\n- ExecStart=/usr/bin/hostnamectl set-hostname ${COREOS_OPENSTACK_HOSTNAME}\n-\n- [Install]\n- WantedBy=multi-user.target\n- name: docker.service\nenable: true\ndropins:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes unneeded workaround
596,240
23.03.2018 11:31:33
-3,600
a123942ecf614b0e28499549fcf183edd1133723
removes loop through sniffer for internal communication
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/Chart.yaml", "new_path": "charts/kube-master/Chart.yaml", "diff": "apiVersion: v1\ndescription: A Helm chart for Kubernetes\nname: kube-master\n-version: 1.9.0-kubernikus.2\n+version: 1.9.0-kubernikus.3\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/configmap.yaml", "new_path": "charts/kube-master/templates/configmap.yaml", "diff": "@@ -14,7 +14,7 @@ data:\n- name: local\ncluster:\ncertificate-authority: /etc/kubernetes/certs/tls-ca.pem\n- server: https://{{ required \"missing .api.apiserverHost\" .Values.api.apiserverHost }}\n+ server: https://{{ include \"master.fullname\" . }}-apiserver\ncontexts:\n- name: local\ncontext:\n" }, { "change_type": "MODIFY", "old_path": "pkg/util/certificates.go", "new_path": "pkg/util/certificates.go", "diff": "@@ -189,7 +189,7 @@ func CreateCertificates(kluster *v1.Kluster, apiURL, authURL, domain string) (ma\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n- []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain)},\n+ []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain), fmt.Sprintf(\"%v-apiserver\", kluster.Name)},\n[]net.IP{net.IPv4(127, 0, 0, 1), apiIP})\ncerts.TLS.Wormhole = certs.signTLS(\"wormhole\",\n[]string{fmt.Sprintf(\"%v-wormhole.%v\", kluster.Name, domain)}, []net.IP{})\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes loop through sniffer for internal communication
596,240
23.03.2018 14:16:46
-3,600
a63a9fd222062156a7af41bc1b2a4bd650fa233a
fixes apiserver service configuration
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/configmap.yaml", "new_path": "charts/kube-master/templates/configmap.yaml", "diff": "@@ -14,7 +14,7 @@ data:\n- name: local\ncluster:\ncertificate-authority: /etc/kubernetes/certs/tls-ca.pem\n- server: https://{{ include \"master.fullname\" . }}-apiserver\n+ server: https://{{ include \"master.fullname\" . }}:6443\ncontexts:\n- name: local\ncontext:\n" }, { "change_type": "MODIFY", "old_path": "pkg/util/certificates.go", "new_path": "pkg/util/certificates.go", "diff": "@@ -189,7 +189,7 @@ func CreateCertificates(kluster *v1.Kluster, apiURL, authURL, domain string) (ma\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n- []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain), fmt.Sprintf(\"%v-apiserver\", kluster.Name)},\n+ []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain)},\n[]net.IP{net.IPv4(127, 0, 0, 1), apiIP})\ncerts.TLS.Wormhole = certs.signTLS(\"wormhole\",\n[]string{fmt.Sprintf(\"%v-wormhole.%v\", kluster.Name, domain)}, []net.IP{})\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes apiserver service configuration
596,240
09.04.2018 11:02:30
-7,200
b50002efddc8c771c6e66ecb07732eb0235d689a
fixes nodepool race creation/deletion. now without caching (tm)
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/kluster/client.go", "new_path": "pkg/client/openstack/kluster/client.go", "diff": "@@ -94,13 +94,13 @@ func (c *klusterClient) DeleteNode(id string) (err error) {\n}\nfunc (c *klusterClient) ListNodes(pool *models.NodePool) (nodes []Node, err error) {\n- obj, exists, err := c.NodeStore.Get(cachedNodesEntry{c.Kluster, pool, nil})\n- if err != nil {\n- return nil, err\n- }\n- if exists {\n- return obj.(cachedNodesEntry).Nodes, nil\n- }\n+ //obj, exists, err := c.NodeStore.Get(cachedNodesEntry{c.Kluster, pool, nil})\n+ //if err != nil {\n+ // return nil, err\n+ //}\n+ //if exists {\n+ // return obj.(cachedNodesEntry).Nodes, nil\n+ //}\nprefix := fmt.Sprintf(\"%v-%v-\", c.Kluster.Spec.Name, pool.Name)\nerr = servers.List(c.ComputeClient, servers.ListOpts{Name: prefix}).EachPage(func(page pagination.Page) (bool, error) {\n@@ -114,10 +114,10 @@ func (c *klusterClient) ListNodes(pool *models.NodePool) (nodes []Node, err erro\nreturn nil, err\n}\n- err = c.NodeStore.Add(cachedNodesEntry{c.Kluster, pool, nodes})\n- if err != nil {\n- return nil, err\n- }\n+ //err = c.NodeStore.Add(cachedNodesEntry{c.Kluster, pool, nodes})\n+ //if err != nil {\n+ // return nil, err\n+ //}\nreturn nodes, nil\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes nodepool race creation/deletion. now without caching (tm)
596,233
13.04.2018 11:07:39
-7,200
8dbf3c188c311183859b4088f059d356be28fea1
add KubernikusKlusterUnavailable alert
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus-system/charts/prometheus/kubernikus.alerts", "new_path": "charts/kubernikus-system/charts/prometheus/kubernikus.alerts", "diff": "@@ -25,14 +25,26 @@ groups:\ndescription: \"{{ $labels.instance }} is unavailable\"\nsummary: \"{{ $labels.instance }} is unavailable\"\n- - alert: BackendUnavailable\n- expr: probe_success != 1\n+ - alert: KubernikusKlusterUnavailable\n+ expr: probe_success{kubernetes_namespace=\"kubernikus\"} != 1\nfor: 10m\nlabels:\ntier: kubernikus\nservice: kubernikus\nseverity: warning\n- context: availability\n+ context: kluster\n+ annotations:\n+ description: \"{{ $labels.instance }} is unavailable\"\n+ summary: \"{{ $labels.instance }} is unavailable\"\n+\n+ - alert: BackendUnavailable\n+ expr: probe_success{kubernetes_name!=\"kubernikus-api\"} != 1\n+ for: 10m\n+ labels:\n+ tier: kubernikus\n+ service: kubernikus\n+ severity: info\n+ context: undecided\nannotations:\ndescription: \"{{ $labels.instance }} is unavailable\"\nsummary: \"{{ $labels.instance }} is unavailable\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add KubernikusKlusterUnavailable alert
596,240
12.04.2018 14:20:58
-7,200
23dcf921dc425b91d5c34bd8cae45e96456fe998
fixes e2e tests name for qa soak tests
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -136,7 +136,7 @@ auth_e2e_qa-de-1: &auth_e2e_qa-de-1\nOS_USER_DOMAIN_NAME: ccadmin\nOS_PROJECT_NAME: kubernikus-e2e\nOS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: e2e_qa-de-1\n+ KUBERNIKUS_NAME: e2e\nKUBERNIKUS_URL: https://kubernikus.qa-de-1.cloud.sap\nresources:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes e2e tests name for qa soak tests
596,240
24.04.2018 13:55:04
-7,200
09e1ef476176fc0016bf79ff049be170d0c0daab
updates pipeline for eu-de-2
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -29,6 +29,10 @@ auth_feature: &auth_feature\nKUBERNIKUS_NAME: k-feature\nKUBERNIKUS_URL: https://k-feature.admin.cloud.sap\n+auth_feature: &auth_infra\n+ KUBERNIKUS_NAME: k-infra\n+ KUBERNIKUS_URL: https://k-infra.admin.cloud.sap\n+\nauth_staging: &auth_staging\nOS_AUTH_URL: https://identity-3.staging.cloud.sap/v3\nOS_USERNAME: {{kubernikus-staging-username}}\n@@ -69,6 +73,16 @@ auth_eu-de-1: &auth_eu-de-1\nKUBERNIKUS_NAME: k-eu-de-1\nKUBERNIKUS_URL: https://k-eu-de-1.admin.cloud.sap\n+auth_eu-de-1: &auth_eu-de-2\n+ OS_AUTH_URL: https://identity-3.eu-de-2.cloud.sap/v3\n+ OS_USERNAME: {{kubernikus-prod-username}}\n+ OS_PASSWORD: {{kubernikus-prod-password}}\n+ OS_USER_DOMAIN_NAME: ccadmin\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-eu-de-2\n+ KUBERNIKUS_URL: https://k-eu-de-2.admin.cloud.sap\n+\nauth_eu-nl-1: &auth_eu-nl-1\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\nOS_USERNAME: {{kubernikus-prod-username}}\n@@ -163,6 +177,17 @@ resources:\nbranch: feature\ndepth: 1\n+ - name: infra.builds\n+ type: gh-status\n+ source:\n+ username: sapcc-bot\n+ password: ((github-com-access-token))\n+ owner: sapcc\n+ repo: kubernikus\n+ access_token: ((github-com-access-token))\n+ branch: infra\n+ depth: 1\n+\n- name: secrets.git\ntype: git\nsource:\n@@ -233,6 +258,22 @@ jobs:\nparams:\n<<: *auth_feature\n+ - name: infra\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.builds\n+ resource: infra.builds\n+ trigger: true\n+ - aggregate:\n+ - task: k-infra\n+ file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n+ params:\n+ REGION: admin\n+ KUBERNIKUS_NAME: k-infra\n+ GITHUB_TOKEN: ((github-access-token))\n+\n- name: e2e\nserial: true\nplan:\n@@ -278,6 +319,10 @@ jobs:\nfile: kubernikus.builds/ci/task_helm_kubernikus.yaml\nparams:\n<<: *auth_eu-de-1\n+ - task: kubernikus_eu-de-2\n+ file: kubernikus.builds/ci/task_helm_kubernikus.yaml\n+ params:\n+ <<: *auth_eu-de-2\n- task: kubernikus_eu-nl-1\nfile: kubernikus.builds/ci/task_helm_kubernikus.yaml\nparams:\n@@ -286,6 +331,10 @@ jobs:\nfile: kubernikus.builds/ci/task_helm_kubernikus-system.yaml\nparams:\n<<: *auth_eu-de-1\n+ - task: kubernikus-system_eu-de-2\n+ file: kubernikus.builds/ci/task_helm_kubernikus-system.yaml\n+ params:\n+ <<: *auth_eu-de-2\n- task: kubernikus-system_eu-nl-1\nfile: kubernikus.builds/ci/task_helm_kubernikus-system.yaml\nparams:\n@@ -363,6 +412,12 @@ jobs:\nREGION: admin\nKUBERNIKUS_NAME: k-eu-de-1\nGITHUB_TOKEN: ((github-access-token))\n+ - task: k-eu-de-2\n+ file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n+ params:\n+ REGION: admin\n+ KUBERNIKUS_NAME: k-eu-de-2\n+ GITHUB_TOKEN: ((github-access-token))\n- task: k-eu-nl-1\nfile: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\nparams:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
updates pipeline for eu-de-2
596,240
24.04.2018 15:48:39
-7,200
af735c3df2d57edfcfb0274e40c0ed4e133f518d
adds auth info for infra kluster
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -29,7 +29,13 @@ auth_feature: &auth_feature\nKUBERNIKUS_NAME: k-feature\nKUBERNIKUS_URL: https://k-feature.admin.cloud.sap\n-auth_feature: &auth_infra\n+auth_infra: &auth_infra\n+ OS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n+ OS_USERNAME: {{kubernikus-prod-username}}\n+ OS_PASSWORD: {{kubernikus-prod-password}}\n+ OS_USER_DOMAIN_NAME: ccadmin\n+ OS_PROJECT_NAME: kubernikus-infra\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-infra\nKUBERNIKUS_URL: https://k-infra.admin.cloud.sap\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds auth info for infra kluster
596,240
26.04.2018 17:08:50
-7,200
413461b9fa96460ed6870c79e8b640b36d0c2321
increases timeout waiting for PVs
[ { "change_type": "MODIFY", "old_path": "test/e2e/util.go", "new_path": "test/e2e/util.go", "diff": "@@ -287,7 +287,7 @@ func (s *E2ETestSuite) waitForPVC(pvc *v1.PersistentVolumeClaim) (*v1.Persistent\nreturn nil, err\n}\n- _, err = watch.Until(5*time.Minute, w, isPVCBound)\n+ _, err = watch.Until(10*time.Minute, w, isPVCBound)\nif err != nil {\nreturn nil, err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
increases timeout waiting for PVs
596,240
03.05.2018 14:44:29
-7,200
fc3226eb665936e9e302fb8c1fe12205a20f5db5
lowercases node name for openstack
[ { "change_type": "MODIFY", "old_path": "pkg/util/namegenerator.go", "new_path": "pkg/util/namegenerator.go", "diff": "@@ -2,6 +2,7 @@ package util\nimport (\n\"fmt\"\n+ \"strings\"\nutilrand \"k8s.io/apimachinery/pkg/util/rand\"\n)\n@@ -32,5 +33,5 @@ func (simpleNameGenerator) GenerateName(base string) string {\nif len(base) > maxGeneratedNameLength {\nbase = base[:maxGeneratedNameLength]\n}\n- return fmt.Sprintf(\"%s%s\", base, utilrand.String(randomLength))\n+ return strings.ToLower(fmt.Sprintf(\"%s%s\", base, utilrand.String(randomLength)))\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
lowercases node name for openstack (#274)
596,240
14.05.2018 16:19:32
-7,200
93555887bc145011de01afee417d3d1e1b4dcee2
less parallelism
[ { "change_type": "MODIFY", "old_path": "test/e2e/kluster_test.go", "new_path": "test/e2e/kluster_test.go", "diff": "@@ -25,8 +25,6 @@ type KlusterTests struct {\n}\nfunc (k *KlusterTests) KlusterPhaseBecomesPending(t *testing.T) {\n- t.Parallel()\n-\nphase, err := k.Kubernikus.WaitForKlusterPhase(k.KlusterName, models.KlusterPhasePending, KlusterPhaseBecomesPendingTimeout)\nif assert.NoError(t, err, \"There should be no error\") {\n@@ -35,8 +33,6 @@ func (k *KlusterTests) KlusterPhaseBecomesPending(t *testing.T) {\n}\nfunc (k *KlusterTests) KlusterPhaseBecomesCreating(t *testing.T) {\n- t.Parallel()\n-\nphase, err := k.Kubernikus.WaitForKlusterPhase(k.KlusterName, models.KlusterPhaseCreating, KlusterPhaseBecomesCreatingTimeout)\nif assert.NoError(t, err, \"There should be no error\") {\n@@ -45,8 +41,6 @@ func (k *KlusterTests) KlusterPhaseBecomesCreating(t *testing.T) {\n}\nfunc (k *KlusterTests) KlusterPhaseBecomesRunning(t *testing.T) {\n- t.Parallel()\n-\nphase, err := k.Kubernikus.WaitForKlusterPhase(k.KlusterName, models.KlusterPhaseRunning, KlusterPhaseBecomesRunningTimeout)\nif assert.NoError(t, err, \"There should be no error\") {\n@@ -55,8 +49,6 @@ func (k *KlusterTests) KlusterPhaseBecomesRunning(t *testing.T) {\n}\nfunc (k *KlusterTests) KlusterPhaseBecomesTerminating(t *testing.T) {\n- t.Parallel()\n-\nphase, err := k.Kubernikus.WaitForKlusterPhase(k.KlusterName, models.KlusterPhaseTerminating, KlusterPhaseBecomesTerminatingTimeout)\nif assert.NoError(t, err, \"There should be no error\") {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
less parallelism
596,240
14.05.2018 16:20:43
-7,200
126c3a0cc6167b14a5caf853a27d7e2cfc535dc1
no need for kubectl anymore
[ { "change_type": "MODIFY", "old_path": "ci/task_e2e_tests.yaml", "new_path": "ci/task_e2e_tests.yaml", "diff": "@@ -16,15 +16,10 @@ run:\nargs:\n- -c\n- |\n- export KUBERNETES_VERSION=v1.9.0\nexport GOPATH=$PWD/gopath\ncd gopath/src/github.com/sapcc/kubernikus\n- apk add --no-cache make git curl\n- curl -fLo /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl \\\n- && chmod +x /usr/bin/kubectl /usr/bin/kubectl \\\n- && /usr/bin/kubectl version --client\n-\n+ apk add --no-cache make git\nmake test-e2e\nparams:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
no need for kubectl anymore
596,240
14.05.2018 16:49:57
-7,200
9b1eb336bd2679fa4f4cd834ce2dd7c5c1019a73
adds eu-de-2 smoketests
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -139,6 +139,16 @@ auth_e2e_eu-de-1: &auth_e2e_eu-de-1\nKUBERNIKUS_NAME: e2e\nKUBERNIKUS_URL: https://kubernikus.eu-de-1.cloud.sap\n+auth_e2e_eu-de-1: &auth_e2e_eu-de-2\n+ OS_AUTH_URL: https://identity-3.eu-de-2.cloud.sap/v3\n+ OS_USERNAME: {{kubernikus-prod-username}}\n+ OS_PASSWORD: {{kubernikus-prod-password}}\n+ OS_USER_DOMAIN_NAME: ccadmin\n+ OS_PROJECT_NAME: kubernikus-e2e\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: e2e\n+ KUBERNIKUS_URL: https://kubernikus.eu-de-2.cloud.sap\n+\nauth_e2e_eu-nl-1: &auth_e2e_na-us-1\nOS_AUTH_URL: https://identity-3.na-us-1.cloud.sap/v3\nOS_USERNAME: {{kubernikus-prod-username}}\n@@ -524,6 +534,23 @@ jobs:\nparams:\n<<: *auth_e2e_eu-de-1\n+ - name: soak_eu-de-2\n+ serial: true\n+ build_logs_to_retain: 168\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.builds\n+ resource: master.builds\n+ passed: [e2e]\n+ - get: hourly\n+ trigger: true\n+ - task: e2e_tests\n+ file: kubernikus.builds/ci/task_e2e_tests.yaml\n+ timeout: 45m\n+ params:\n+ <<: *auth_e2e_eu-de-2\n+\n- name: soak_na-us-1\nserial: true\nbuild_logs_to_retain: 168\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds eu-de-2 smoketests
596,240
14.05.2018 16:53:10
-7,200
ff514f5412175ea954e4ac36ada1aed25d6f87e0
no progress bar for tests
[ { "change_type": "MODIFY", "old_path": "ci/task_e2e_tests.yaml", "new_path": "ci/task_e2e_tests.yaml", "diff": "@@ -19,7 +19,7 @@ run:\nexport GOPATH=$PWD/gopath\ncd gopath/src/github.com/sapcc/kubernikus\n- apk add --no-cache make git\n+ apk add --no-progress --no-cache make git\nmake test-e2e\nparams:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
no progress bar for tests
596,240
14.05.2018 16:55:01
-7,200
5b688e660c48b732c6f8337fc898b31e5c691a5f
again less paralleism
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -123,8 +123,6 @@ func TestRunner(t *testing.T) {\nrequire.NotEqual(t, len(nodes.Items), 1, \"There must be at least 2 nodes\")\nt.Run(\"Network\", func(t *testing.T) {\n- t.Parallel()\n-\nnetwork := NetworkTests{kubernetes, nodes, namespaceNetwork}\ndefer t.Run(\"Cleanup\", network.DeleteNamespace)\n@@ -152,8 +150,6 @@ func TestRunner(t *testing.T) {\n})\nt.Run(\"Volumes\", func(t *testing.T) {\n- t.Parallel()\n-\nvolumes := VolumeTests{kubernetes, nodes, nil, namespaceVolumes}\ndefer t.Run(\"Cleanup\", volumes.DeleteNamespace)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
again less paralleism
596,240
14.05.2018 17:20:52
-7,200
8428cfcfb7ba24a45a14390c1ca8f59afa269678
adds timeout and kluster name
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -118,7 +118,7 @@ clean:\ntest-e2e:\n@cd test/e2e && \\\nset -o pipefail && \\\n- go test -v --kubernikus=kubernikus.eu-nl-1.cloud.sap | \\\n+ go test -v -timeout 20m --kubernikus=kubernikus.eu-nl-1.cloud.sap --kluster=e2e | \\\ngrep -v \"CONT\\|PAUSE\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds timeout and kluster name
596,240
14.05.2018 17:46:21
-7,200
15c1156014c00703e03a6ce0b169afbc663b11e1
new clusters for each test
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -118,7 +118,7 @@ clean:\ntest-e2e:\n@cd test/e2e && \\\nset -o pipefail && \\\n- go test -v -timeout 20m --kubernikus=kubernikus.eu-nl-1.cloud.sap --kluster=e2e | \\\n+ go test -v -timeout 55m --kubernikus=kubernikus.eu-nl-1.cloud.sap | \\\ngrep -v \"CONT\\|PAUSE\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
new clusters for each test
596,240
14.05.2018 18:14:24
-7,200
363f9b95dca2adaf94851827914927bb4f4ca79e
uses urls. fixes e2e pass through
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -116,10 +116,14 @@ clean:\n.PHONY: test-e2e\ntest-e2e:\n+ifndef KUBERNIKUS_URL\n+ $(error set KUBERNIKUS_URL)\n+else\n@cd test/e2e && \\\nset -o pipefail && \\\n- go test -v -timeout 55m --kubernikus=kubernikus.eu-nl-1.cloud.sap | \\\n+ go test -v -timeout 55m --kubernikus=$(KUBERNIKUS_URL) | \\\ngrep -v \"CONT\\|PAUSE\"\n+endif\ninclude code-generate.mk\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/framework/kubernikus.go", "new_path": "test/e2e/framework/kubernikus.go", "diff": "@@ -2,6 +2,7 @@ package framework\nimport (\n\"fmt\"\n+ \"net/url\"\n\"os\"\n\"github.com/go-openapi/runtime\"\n@@ -17,7 +18,7 @@ type Kubernikus struct {\nAuthInfo runtime.ClientAuthInfoWriterFunc\n}\n-func NewKubernikusFramework(host string) (*Kubernikus, error) {\n+func NewKubernikusFramework(kubernikusURL *url.URL) (*Kubernikus, error) {\nauthOptions := &tokens.AuthOptions{\nIdentityEndpoint: os.Getenv(\"OS_AUTH_URL\"),\nUsername: os.Getenv(\"OS_USERNAME\"),\n@@ -48,8 +49,8 @@ func NewKubernikusFramework(host string) (*Kubernikus, error) {\nkubernikusClient := kubernikus.NewHTTPClientWithConfig(\nnil,\n&kubernikus.TransportConfig{\n- Host: host,\n- Schemes: []string{\"https\"},\n+ Host: kubernikusURL.Host,\n+ Schemes: []string{kubernikusURL.Scheme},\n},\n)\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -3,6 +3,7 @@ package main\nimport (\n\"flag\"\n\"fmt\"\n+ \"net/url\"\n\"os\"\n\"testing\"\n@@ -14,17 +15,22 @@ import (\n)\nvar (\n- kubernikusHost = flag.String(\"kubernikus\", \"\", \"Kubernikus API Hostname\")\n+ kubernikusURL = flag.String(\"kubernikus\", \"\", \"Kubernikus URL\")\nkluster = flag.String(\"kluster\", \"\", \"Use existing Kluster\")\nreuse = flag.Bool(\"reuse\", false, \"Reuse exisiting Kluster\")\ncleanup = flag.Bool(\"cleanup\", true, \"Cleanup after tests have been run\")\n)\nfunc validate() error {\n- if *kubernikusHost == \"\" {\n+ if *kubernikusURL == \"\" {\nreturn fmt.Errorf(\"You need to provide the --kubernikus flag\")\n}\n+ _, err := url.Parse(*kubernikusURL)\n+ if err != nil {\n+ return fmt.Errorf(\"You need to provide an URL for --kubernikus: %v\", err)\n+ }\n+\nif reuse != nil && *reuse && (kluster == nil || *kluster == \"\") {\nreturn fmt.Errorf(\"You need to provide the --kluster flag when --reuse is active\")\n}\n@@ -59,6 +65,9 @@ func TestRunner(t *testing.T) {\nklusterName = *kluster\n}\n+ kurl, err := url.Parse(*kubernikusURL)\n+ require.NoError(t, err, \"Must be able to parse Kubernikus URL\")\n+\nfmt.Printf(\"========================================================================\\n\")\nfmt.Printf(\"Authentication\\n\")\nfmt.Printf(\"========================================================================\\n\")\n@@ -71,13 +80,13 @@ func TestRunner(t *testing.T) {\nfmt.Printf(\"========================================================================\\n\")\nfmt.Printf(\"Test Parameters\\n\")\nfmt.Printf(\"========================================================================\\n\")\n- fmt.Printf(\"Kubernikus Host: %v\\n\", *kubernikusHost)\n+ fmt.Printf(\"Kubernikus: %v\\n\", *kubernikusURL)\nfmt.Printf(\"Kluster Name: %v\\n\", klusterName)\nfmt.Printf(\"Reuse: %v\\n\", *reuse)\nfmt.Printf(\"Cleanup: %v\\n\", *cleanup)\nfmt.Printf(\"\\n\\n\")\n- kubernikus, err := framework.NewKubernikusFramework(*kubernikusHost)\n+ kubernikus, err := framework.NewKubernikusFramework(kurl)\nrequire.NoError(t, err, \"Must be able to connect to Kubernikus\")\napi := APITests{kubernikus, klusterName}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses urls. fixes e2e pass through
596,240
14.05.2018 18:39:20
-7,200
9bddb52dba62b1a00ccc7260d4d8a83ffb4b103c
don't wait when deletion failed
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -94,9 +94,10 @@ func TestRunner(t *testing.T) {\nif cleanup != nil && *cleanup == true {\ndefer t.Run(\"Cleanup\", func(t *testing.T) {\n- t.Run(\"TerminateCluster\", api.TerminateCluster)\n+ if t.Run(\"TerminateCluster\", api.TerminateCluster) {\nt.Run(\"BecomesTerminating\", kluster.KlusterPhaseBecomesTerminating)\nt.Run(\"IsDeleted\", api.WaitForKlusterToBeDeleted)\n+ }\n})\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
don't wait when deletion failed
596,240
14.05.2018 19:09:20
-7,200
df1a327e68fae734826f10cd2d02a856cd4c6157
15m is the max time we wait for volumes
[ { "change_type": "MODIFY", "old_path": "test/e2e/kluster_test.go", "new_path": "test/e2e/kluster_test.go", "diff": "@@ -14,7 +14,7 @@ import (\nconst (\nKlusterPhaseBecomesPendingTimeout = 1 * time.Minute\nKlusterPhaseBecomesCreatingTimeout = 1 * time.Minute\n- KlusterPhaseBecomesRunningTimeout = 5 * time.Minute\n+ KlusterPhaseBecomesRunningTimeout = 15 * time.Minute\nKlusterPhaseBecomesTerminatingTimeout = 1 * time.Minute\nKlusterFinishedTerminationTermination = 5 * time.Minute\n)\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/volume_test.go", "new_path": "test/e2e/volume_test.go", "diff": "@@ -16,7 +16,7 @@ import (\n)\nconst (\n- TestWaitForPVCBoundTimeout = 10 * time.Minute\n+ TestWaitForPVCBoundTimeout = 15 * time.Minute\n)\ntype VolumeTests struct {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
15m is the max time we wait for volumes
596,240
15.05.2018 10:17:11
-7,200
8474740bf010c39f9071857ce2dc307724c9eb33
extends timeout for PVC pods. parallelize smoketests
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -26,11 +26,15 @@ func validate() error {\nreturn fmt.Errorf(\"You need to provide the --kubernikus flag\")\n}\n- _, err := url.Parse(*kubernikusURL)\n+ k, err := url.Parse(*kubernikusURL)\nif err != nil {\nreturn fmt.Errorf(\"You need to provide an URL for --kubernikus: %v\", err)\n}\n+ if k.Host == \"\" {\n+ return fmt.Errorf(\"You need to provide an URL for --kubernikus\")\n+ }\n+\nif reuse != nil && *reuse && (kluster == nil || *kluster == \"\") {\nreturn fmt.Errorf(\"You need to provide the --kluster flag when --reuse is active\")\n}\n@@ -67,6 +71,7 @@ func TestRunner(t *testing.T) {\nkurl, err := url.Parse(*kubernikusURL)\nrequire.NoError(t, err, \"Must be able to parse Kubernikus URL\")\n+ require.NotEmpty(t, kurl.Host, \"There must be a host in the Kubernikus URL\")\nfmt.Printf(\"========================================================================\\n\")\nfmt.Printf(\"Authentication\\n\")\n@@ -80,7 +85,7 @@ func TestRunner(t *testing.T) {\nfmt.Printf(\"========================================================================\\n\")\nfmt.Printf(\"Test Parameters\\n\")\nfmt.Printf(\"========================================================================\\n\")\n- fmt.Printf(\"Kubernikus: %v\\n\", *kubernikusURL)\n+ fmt.Printf(\"Kubernikus: %v\\n\", kurl.Host)\nfmt.Printf(\"Kluster Name: %v\\n\", klusterName)\nfmt.Printf(\"Reuse: %v\\n\", *reuse)\nfmt.Printf(\"Cleanup: %v\\n\", *cleanup)\n@@ -132,15 +137,15 @@ func TestRunner(t *testing.T) {\nrequire.NotEqual(t, len(nodes.Items), 0, \"There must be at least 2 nodes\")\nrequire.NotEqual(t, len(nodes.Items), 1, \"There must be at least 2 nodes\")\n+ t.Run(\"Smoke\", func(t *testing.T) {\nt.Run(\"Network\", func(t *testing.T) {\n+ t.Parallel()\nnetwork := NetworkTests{kubernetes, nodes, namespaceNetwork}\ndefer t.Run(\"Cleanup\", network.DeleteNamespace)\nt.Run(\"Setup\", func(t *testing.T) {\n- t.Run(\"Namespace\", func(t *testing.T) {\n- t.Run(\"Create\", network.CreateNamespace)\n- t.Run(\"Wait\", network.WaitForNamespace)\n- })\n+ t.Run(\"Namespace/Create\", network.CreateNamespace)\n+ t.Run(\"Namespace/Wait\", network.WaitForNamespace)\nt.Run(\"Pods\", func(t *testing.T) {\nt.Parallel()\nt.Run(\"Create\", network.CreatePods)\n@@ -152,30 +157,29 @@ func TestRunner(t *testing.T) {\nt.Run(\"Wait\", network.WaitForServiceEndpoints)\n})\n})\n- t.Run(\"Connectivity\", func(t *testing.T) {\n- t.Run(\"Pods\", network.TestPods)\n- t.Run(\"Services\", network.TestServices)\n- t.Run(\"ServicesWithDNS\", network.TestServicesWithDNS)\n- })\n+\n+ t.Run(\"Connectivity/Pods\", network.TestPods)\n+ t.Run(\"Connectivity/Services\", network.TestServices)\n+ t.Run(\"ConnectivityServicesWithDNS\", network.TestServicesWithDNS)\n})\nt.Run(\"Volumes\", func(t *testing.T) {\n+ t.Parallel()\nvolumes := VolumeTests{kubernetes, nodes, nil, namespaceVolumes}\ndefer t.Run(\"Cleanup\", volumes.DeleteNamespace)\n- t.Run(\"Setup\", func(t *testing.T) {\n- t.Run(\"Namespace\", func(t *testing.T) {\n+ t.Run(\"Setup/Namespace\", func(t *testing.T) {\nt.Run(\"Create\", volumes.CreateNamespace)\nt.Run(\"Wait\", volumes.WaitForNamespace)\n})\n- })\nt.Run(\"PVC\", func(t *testing.T) {\nt.Run(\"Create\", volumes.CreatePVC)\nt.Run(\"Wait\", volumes.WaitForPVCBound)\n})\nt.Run(\"Pods\", func(t *testing.T) {\nt.Run(\"Create\", volumes.CreatePod)\n- t.Run(\"Wait\", volumes.WaitForPodsRunning)\n+ t.Run(\"Wait\", volumes.WaitForPVCPodsRunning)\n+ })\n})\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/volume_test.go", "new_path": "test/e2e/volume_test.go", "diff": "@@ -16,7 +16,8 @@ import (\n)\nconst (\n- TestWaitForPVCBoundTimeout = 15 * time.Minute\n+ TestWaitForPVCBoundTimeout = 5 * time.Minute\n+ TestWaitForPVCPodsRunning = 15 * time.Minute\n)\ntype VolumeTests struct {\n@@ -83,9 +84,9 @@ func (p *VolumeTests) CreatePod(t *testing.T) {\n})\n}\n-func (p *VolumeTests) WaitForPodsRunning(t *testing.T) {\n+func (p *VolumeTests) WaitForPVCPodsRunning(t *testing.T) {\nlabel := labels.SelectorFromSet(labels.Set(map[string]string{\"app\": \"pvc-hostname\"}))\n- _, err := p.Kubernetes.WaitForPodsWithLabelRunningReady(p.Namespace, label, 1, TestWaitForPodsRunningTimeout)\n+ _, err := p.Kubernetes.WaitForPodsWithLabelRunningReady(p.Namespace, label, 1, TestWaitForPVCPodsRunning)\nrequire.NoError(t, err, \"There must be no error while waiting for the pod with mounted volume to become ready\")\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
extends timeout for PVC pods. parallelize smoketests
596,240
15.05.2018 14:29:53
-7,200
9483dbc8cec20c431be30f58b5887df8dac6a9a2
increases timeout for kube-dns to start properly
[ { "change_type": "MODIFY", "old_path": "test/e2e/network_test.go", "new_path": "test/e2e/network_test.go", "diff": "@@ -24,7 +24,7 @@ const (\nTestPodTimeout = 1 * time.Minute\nTestServicesTimeout = 1 * time.Minute\n- TestServicesWithDNSTimeout = 1 * time.Minute\n+ TestServicesWithDNSTimeout = 5 * time.Minute\nPollInterval = 6 * time.Second // DNS Timeout is 5s\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
increases timeout for kube-dns to start properly
596,240
15.05.2018 21:02:42
-7,200
cbfa4899a1cd1334c9742465473a4bb2ccf985e5
explicitly wait for kube-dns to become ready
[ { "change_type": "MODIFY", "old_path": "test/e2e/network_test.go", "new_path": "test/e2e/network_test.go", "diff": "@@ -20,6 +20,7 @@ import (\nconst (\nTestWaitForPodsRunningTimeout = 5 * time.Minute\n+ TestWaitForKubeDNSRunningTimeout = 2 * time.Minute\nTestWaitForServiceEndpointsTimeout = 5 * time.Minute\nTestPodTimeout = 1 * time.Minute\n@@ -93,7 +94,13 @@ func (n *NetworkTests) CreatePods(t *testing.T) {\nfunc (n *NetworkTests) WaitForPodsRunning(t *testing.T) {\nlabel := labels.SelectorFromSet(labels.Set(map[string]string{\"app\": \"serve-hostname\"}))\n_, err := n.Kubernetes.WaitForPodsWithLabelRunningReady(n.Namespace, label, len(n.Nodes.Items), TestWaitForPodsRunningTimeout)\n- require.NoError(t, err, \"Pods must become ready\")\n+ assert.NoError(t, err, \"Pods must become ready\")\n+}\n+\n+func (n *NetworkTests) WaitForKubeDNSRunning(t *testing.T) {\n+ label := labels.SelectorFromSet(labels.Set(map[string]string{\"k8s-app\": \"kube-dns\"}))\n+ _, err := n.Kubernetes.WaitForPodsWithLabelRunningReady(\"kube-system\", label, 1, TestWaitForKubeDNSRunningTimeout)\n+ assert.NoError(t, err, \"Kube-DNS must become ready\")\n}\nfunc (n *NetworkTests) CreateServices(t *testing.T) {\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -154,7 +154,8 @@ func TestRunner(t *testing.T) {\nt.Run(\"Services\", func(t *testing.T) {\nt.Parallel()\nt.Run(\"Create\", network.CreateServices)\n- t.Run(\"Wait\", network.WaitForServiceEndpoints)\n+ t.Run(\"WaitForServiceEndpoints\", network.WaitForServiceEndpoints)\n+ t.Run(\"WaitForKubeDNS\", network.WaitForKubeDNSRunning)\n})\n})\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
explicitly wait for kube-dns to become ready
596,240
15.05.2018 22:33:06
-7,200
0b966ebcaf14de11c9866e68bcbbcf34472a3a5e
adds smoke tests that explictly check for node conditions
[ { "change_type": "MODIFY", "old_path": "test/e2e/api_test.go", "new_path": "test/e2e/api_test.go", "diff": "@@ -15,6 +15,8 @@ import (\nconst (\nTestKlusterDeletedTimeout = 5 * time.Minute\nTestKlusterNodesReadyTimeout = 10 * time.Minute\n+\n+ SmokeTestNodeCount = 2\n)\ntype APITests struct {\n@@ -32,7 +34,7 @@ func (a *APITests) CreateCluster(t *testing.T) {\nName: \"small\",\nFlavor: \"m1.small\",\nImage: \"coreos-stable-amd64\",\n- Size: 2,\n+ Size: SmokeTestNodeCount,\n},\n},\n},\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/e2e/node_test.go", "diff": "+package main\n+\n+import (\n+ \"fmt\"\n+ \"testing\"\n+ \"time\"\n+\n+ wormhole \"github.com/sapcc/kubernikus/pkg/wormhole/client\"\n+ \"github.com/sapcc/kubernikus/test/e2e/framework\"\n+ \"github.com/stretchr/testify/assert\"\n+ \"k8s.io/api/core/v1\"\n+ meta_v1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ \"k8s.io/apimachinery/pkg/util/wait\"\n+)\n+\n+const (\n+ TestRegisteredTimeout = 10 * time.Minute\n+ TestRouteBrokenTimeout = 2 * time.Minute\n+ TestNetworkUnavailableTimeout = 2 * time.Minute\n+ TestReadyTimeout = 5 * time.Minute\n+)\n+\n+type NodeTests struct {\n+ Kubernetes *framework.Kubernetes\n+ ExpectedNodeCount int\n+}\n+\n+func (k *NodeTests) Registered(t *testing.T) {\n+ count := 0\n+ err := wait.PollImmediate(framework.Poll, TestRegisteredTimeout,\n+ func() (bool, error) {\n+ nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ if err != nil {\n+ return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n+ }\n+ count = len(nodes.Items)\n+\n+ return count >= k.ExpectedNodeCount, nil\n+ })\n+\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+\n+func (k *NodeTests) RouteBroken(t *testing.T) {\n+ t.Parallel()\n+\n+ count := 0\n+ err := wait.PollImmediate(framework.Poll, TestRouteBrokenTimeout,\n+ func() (bool, error) {\n+ nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ if err != nil {\n+ return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n+ }\n+\n+ count = 0\n+ for _, node := range nodes.Items {\n+ for _, condition := range node.Status.Conditions {\n+ if condition.Type == wormhole.NodeRouteBroken {\n+ if condition.Status == v1.ConditionFalse {\n+ count++\n+ }\n+ break\n+ }\n+ }\n+ }\n+\n+ return count >= k.ExpectedNodeCount, nil\n+ })\n+\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+\n+func (k *NodeTests) NetworkUnavailable(t *testing.T) {\n+ t.Parallel()\n+\n+ count := 0\n+ err := wait.PollImmediate(framework.Poll, TestNetworkUnavailableTimeout,\n+ func() (bool, error) {\n+ nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ if err != nil {\n+ return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n+ }\n+\n+ count = 0\n+ for _, node := range nodes.Items {\n+ for _, condition := range node.Status.Conditions {\n+ if condition.Type == v1.NodeNetworkUnavailable {\n+ if condition.Status == v1.ConditionFalse {\n+ count++\n+ }\n+ break\n+ }\n+ }\n+ }\n+\n+ return count >= k.ExpectedNodeCount, nil\n+ })\n+\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+\n+func (k *NodeTests) Ready(t *testing.T) {\n+ t.Parallel()\n+\n+ count := 0\n+ err := wait.PollImmediate(framework.Poll, TestKlusterDeletedTimeout,\n+ func() (bool, error) {\n+ nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ if err != nil {\n+ return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n+ }\n+\n+ count = 0\n+ for _, node := range nodes.Items {\n+ for _, condition := range node.Status.Conditions {\n+ if condition.Type == v1.NodeReady {\n+ if condition.Status == v1.ConditionTrue {\n+ count++\n+ }\n+ break\n+ }\n+ }\n+ }\n+\n+ return count >= k.ExpectedNodeCount, nil\n+ })\n+\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -61,6 +61,9 @@ func TestMain(m *testing.M) {\n}\nfunc TestRunner(t *testing.T) {\n+ var kubernetes *framework.Kubernetes\n+ var kubernikus *framework.Kubernikus\n+\nnamespaceNetwork := util.SimpleNameGenerator.GenerateName(\"e2e-network-\")\nnamespaceVolumes := util.SimpleNameGenerator.GenerateName(\"e2e-volumes-\")\nklusterName := util.SimpleNameGenerator.GenerateName(\"e2e-\")\n@@ -91,7 +94,7 @@ func TestRunner(t *testing.T) {\nfmt.Printf(\"Cleanup: %v\\n\", *cleanup)\nfmt.Printf(\"\\n\\n\")\n- kubernikus, err := framework.NewKubernikusFramework(kurl)\n+ kubernikus, err = framework.NewKubernikusFramework(kurl)\nrequire.NoError(t, err, \"Must be able to connect to Kubernikus\")\napi := APITests{kubernikus, klusterName}\n@@ -117,6 +120,9 @@ func TestRunner(t *testing.T) {\nrunning := t.Run(\"BecomesRunning\", kluster.KlusterPhaseBecomesRunning)\nrequire.True(t, running, \"The Kluster must be Running\")\n+ kubernetes, err = framework.NewKubernetesFramework(kubernikus, klusterName)\n+ require.NoError(t, err, \"Must be able to create a kubernetes client\")\n+\nready := t.Run(\"NodesBecomeReady\", api.WaitForNodesReady)\nrequire.True(t, ready, \"The Kluster must have Ready nodes\")\n})\n@@ -129,18 +135,24 @@ func TestRunner(t *testing.T) {\nt.Run(\"GetCredentials\", api.GetCredentials)\n})\n- kubernetes, err := framework.NewKubernetesFramework(kubernikus, klusterName)\n- require.NoError(t, err, \"Must be able to create a kubernetes client\")\n+ nodes := t.Run(\"Nodes\", func(t *testing.T) {\n+ nodeTests := NodeTests{kubernetes, SmokeTestNodeCount}\n+\n+ t.Run(\"Registered\", nodeTests.Registered)\n+ t.Run(\"Condition/RouteBroken\", nodeTests.RouteBroken)\n+ t.Run(\"Condition/NetworkUnavailable\", nodeTests.NetworkUnavailable)\n+ t.Run(\"Condition/Ready\", nodeTests.Ready)\n+ })\n+ require.True(t, nodes, \"Node test must complete successfully\")\n- nodes, err := kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ nodeList, err := kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\nrequire.NoError(t, err, \"There must be no error while listing the kluster's nodes\")\n- require.NotEqual(t, len(nodes.Items), 0, \"There must be at least 2 nodes\")\n- require.NotEqual(t, len(nodes.Items), 1, \"There must be at least 2 nodes\")\n+ require.Equal(t, len(nodeList.Items), SmokeTestNodeCount, \"There must be at least %d nodes\", SmokeTestNodeCount)\nt.Run(\"Smoke\", func(t *testing.T) {\nt.Run(\"Network\", func(t *testing.T) {\nt.Parallel()\n- network := NetworkTests{kubernetes, nodes, namespaceNetwork}\n+ network := NetworkTests{kubernetes, nodeList, namespaceNetwork}\ndefer t.Run(\"Cleanup\", network.DeleteNamespace)\nt.Run(\"Setup\", func(t *testing.T) {\n@@ -166,7 +178,7 @@ func TestRunner(t *testing.T) {\nt.Run(\"Volumes\", func(t *testing.T) {\nt.Parallel()\n- volumes := VolumeTests{kubernetes, nodes, nil, namespaceVolumes}\n+ volumes := VolumeTests{kubernetes, nodeList, nil, namespaceVolumes}\ndefer t.Run(\"Cleanup\", volumes.DeleteNamespace)\nt.Run(\"Setup/Namespace\", func(t *testing.T) {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds smoke tests that explictly check for node conditions
596,240
15.05.2018 23:04:23
-7,200
03293a5c1c26320e2c56921b23c0ff1bf8749006
fixes premature cleanup
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -170,11 +170,12 @@ func TestRunner(t *testing.T) {\nt.Run(\"WaitForKubeDNS\", network.WaitForKubeDNSRunning)\n})\n})\n-\n+ t.Run(\"Connectivity\", func(t *testing.T) {\nt.Run(\"Connectivity/Pods\", network.TestPods)\nt.Run(\"Connectivity/Services\", network.TestServices)\nt.Run(\"ConnectivityServicesWithDNS\", network.TestServicesWithDNS)\n})\n+ })\nt.Run(\"Volumes\", func(t *testing.T) {\nt.Parallel()\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes premature cleanup
596,240
15.05.2018 23:11:04
-7,200
37242da2d110c778cc5d2ead0418a109f0e921d9
delays node readyness tests to later
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -61,9 +61,6 @@ func TestMain(m *testing.M) {\n}\nfunc TestRunner(t *testing.T) {\n- var kubernetes *framework.Kubernetes\n- var kubernikus *framework.Kubernikus\n-\nnamespaceNetwork := util.SimpleNameGenerator.GenerateName(\"e2e-network-\")\nnamespaceVolumes := util.SimpleNameGenerator.GenerateName(\"e2e-volumes-\")\nklusterName := util.SimpleNameGenerator.GenerateName(\"e2e-\")\n@@ -94,7 +91,7 @@ func TestRunner(t *testing.T) {\nfmt.Printf(\"Cleanup: %v\\n\", *cleanup)\nfmt.Printf(\"\\n\\n\")\n- kubernikus, err = framework.NewKubernikusFramework(kurl)\n+ kubernikus, err := framework.NewKubernikusFramework(kurl)\nrequire.NoError(t, err, \"Must be able to connect to Kubernikus\")\napi := APITests{kubernikus, klusterName}\n@@ -120,11 +117,6 @@ func TestRunner(t *testing.T) {\nrunning := t.Run(\"BecomesRunning\", kluster.KlusterPhaseBecomesRunning)\nrequire.True(t, running, \"The Kluster must be Running\")\n- kubernetes, err = framework.NewKubernetesFramework(kubernikus, klusterName)\n- require.NoError(t, err, \"Must be able to create a kubernetes client\")\n-\n- ready := t.Run(\"NodesBecomeReady\", api.WaitForNodesReady)\n- require.True(t, ready, \"The Kluster must have Ready nodes\")\n})\nrequire.True(t, setup, \"Test setup must complete successfully\")\n@@ -135,6 +127,9 @@ func TestRunner(t *testing.T) {\nt.Run(\"GetCredentials\", api.GetCredentials)\n})\n+ kubernetes, err := framework.NewKubernetesFramework(kubernikus, klusterName)\n+ require.NoError(t, err, \"Must be able to create a kubernetes client\")\n+\nnodes := t.Run(\"Nodes\", func(t *testing.T) {\nnodeTests := NodeTests{kubernetes, SmokeTestNodeCount}\n@@ -142,6 +137,9 @@ func TestRunner(t *testing.T) {\nt.Run(\"Condition/RouteBroken\", nodeTests.RouteBroken)\nt.Run(\"Condition/NetworkUnavailable\", nodeTests.NetworkUnavailable)\nt.Run(\"Condition/Ready\", nodeTests.Ready)\n+\n+ ready := t.Run(\"NodesBecomeReady\", api.WaitForNodesReady)\n+ require.True(t, ready, \"The Kluster must have Ready nodes\")\n})\nrequire.True(t, nodes, \"Node test must complete successfully\")\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
delays node readyness tests to later
596,240
16.05.2018 09:47:46
-7,200
fb60b18ed6e9553a1b4d0f3a9aadf725136c4905
removes routebroken test
[ { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -134,7 +134,6 @@ func TestRunner(t *testing.T) {\nnodeTests := NodeTests{kubernetes, SmokeTestNodeCount}\nt.Run(\"Registered\", nodeTests.Registered)\n- t.Run(\"Condition/RouteBroken\", nodeTests.RouteBroken)\nt.Run(\"Condition/NetworkUnavailable\", nodeTests.NetworkUnavailable)\nt.Run(\"Condition/Ready\", nodeTests.Ready)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes routebroken test
596,240
17.05.2018 17:34:19
-7,200
5fcb40f7203f99ab7e19f8fa8882243df9edc7e2
adds node condition and state checks
[ { "change_type": "MODIFY", "old_path": "test/e2e/api_test.go", "new_path": "test/e2e/api_test.go", "diff": "@@ -122,8 +122,3 @@ func (a *APITests) WaitForKlusterToBeDeleted(t *testing.T) {\nerr := a.Kubernikus.WaitForKlusterToBeDeleted(a.KlusterName, TestKlusterDeletedTimeout)\nrequire.NoError(t, err, \"There should be no error while waiting %v for the kluster to be deleted\", TestKlusterDeletedTimeout)\n}\n-\n-func (a *APITests) WaitForNodesReady(t *testing.T) {\n- err := a.Kubernikus.WaitForKlusterToHaveEnoughSchedulableNodes(a.KlusterName, TestKlusterNodesReadyTimeout)\n- require.NoError(t, err, \"The should be no error while waiting %v for the nodes to become ready\", TestKlusterNodesReadyTimeout)\n-}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/node_test.go", "new_path": "test/e2e/node_test.go", "diff": "@@ -10,61 +10,79 @@ import (\nmeta_v1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n+ \"github.com/sapcc/kubernikus/pkg/api/client/operations\"\n+ \"github.com/sapcc/kubernikus/pkg/api/models\"\nwormhole \"github.com/sapcc/kubernikus/pkg/wormhole/client\"\n\"github.com/sapcc/kubernikus/test/e2e/framework\"\n)\nconst (\n- TestRegisteredTimeout = 10 * time.Minute\n- TestRouteBrokenTimeout = 2 * time.Minute\n- TestNetworkUnavailableTimeout = 2 * time.Minute\n- TestReadyTimeout = 5 * time.Minute\n+ // Incremental Increasing TImeout\n+ StateRunningTimeout = 15 * time.Minute\n+ RegisteredTimeout = 5 * time.Minute\n+ StateSchedulableTimeout = 1 * time.Minute\n+ StateHealthyTimeout = 1 * time.Minute\n+ ConditionRouteBrokenTimeout = 1 * time.Minute\n+ ConditionNetworkUnavailableTimeout = 1 * time.Minute\n+ ConditionReadyTimeout = 1 * time.Minute\n)\ntype NodeTests struct {\nKubernetes *framework.Kubernetes\n+ Kubernikus *framework.Kubernikus\nExpectedNodeCount int\n+ KlusterName string\n}\n-func (k *NodeTests) Registered(t *testing.T) {\n- count := 0\n- err := wait.PollImmediate(framework.Poll, TestRegisteredTimeout,\n- func() (bool, error) {\n- nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n- if err != nil {\n- return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n+func (k *NodeTests) StateRunning(t *testing.T) {\n+ count, err := k.checkState(t, func(pool models.NodePoolInfo) int64 { return pool.Running }, StateRunningTimeout)\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n}\n- count = len(nodes.Items)\n- return count >= k.ExpectedNodeCount, nil\n- })\n+func (k *NodeTests) StateSchedulable(t *testing.T) {\n+ t.Parallel()\n+ count, err := k.checkState(t, func(pool models.NodePoolInfo) int64 { return pool.Schedulable }, StateSchedulableTimeout)\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+func (k *NodeTests) StateHealthy(t *testing.T) {\n+ t.Parallel()\n+ count, err := k.checkState(t, func(pool models.NodePoolInfo) int64 { return pool.Healthy }, StateHealthyTimeout)\nassert.NoError(t, err)\nassert.Equal(t, k.ExpectedNodeCount, count)\n}\n-func (k *NodeTests) RouteBroken(t *testing.T) {\n+func (k *NodeTests) ConditionRouteBroken(t *testing.T) {\nt.Parallel()\n+ count, err := k.checkCondition(t, wormhole.NodeRouteBroken, v1.ConditionFalse, ConditionRouteBrokenTimeout)\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+\n+func (k *NodeTests) ConditionNetworkUnavailable(t *testing.T) {\n+ t.Parallel()\n+ count, err := k.checkCondition(t, v1.NodeNetworkUnavailable, v1.ConditionFalse, ConditionNetworkUnavailableTimeout)\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+func (k *NodeTests) ConditionReady(t *testing.T) {\n+ count, err := k.checkCondition(t, v1.NodeReady, v1.ConditionTrue, ConditionReadyTimeout)\n+ assert.NoError(t, err)\n+ assert.Equal(t, k.ExpectedNodeCount, count)\n+}\n+\n+func (k *NodeTests) Registered(t *testing.T) {\ncount := 0\n- err := wait.PollImmediate(framework.Poll, TestRouteBrokenTimeout,\n+ err := wait.PollImmediate(framework.Poll, RegisteredTimeout,\nfunc() (bool, error) {\nnodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\nif err != nil {\nreturn false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n}\n-\n- count = 0\n- for _, node := range nodes.Items {\n- for _, condition := range node.Status.Conditions {\n- if condition.Type == wormhole.NodeRouteBroken {\n- if condition.Status == v1.ConditionFalse {\n- count++\n- }\n- break\n- }\n- }\n- }\n+ count = len(nodes.Items)\nreturn count >= k.ExpectedNodeCount, nil\n})\n@@ -73,41 +91,30 @@ func (k *NodeTests) RouteBroken(t *testing.T) {\nassert.Equal(t, k.ExpectedNodeCount, count)\n}\n-func (k *NodeTests) NetworkUnavailable(t *testing.T) {\n- t.Parallel()\n+type poolCount func(models.NodePoolInfo) int64\n+func (k *NodeTests) checkState(t *testing.T, fn poolCount, timeout time.Duration) (int, error) {\ncount := 0\n- err := wait.PollImmediate(framework.Poll, TestNetworkUnavailableTimeout,\n- func() (bool, error) {\n- nodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\n+ err := wait.PollImmediate(framework.Poll, StateRunningTimeout,\n+ func() (done bool, err error) {\n+ cluster, err := k.Kubernikus.Client.Operations.ShowCluster(\n+ operations.NewShowClusterParams().WithName(k.KlusterName),\n+ k.Kubernikus.AuthInfo,\n+ )\nif err != nil {\n- return false, fmt.Errorf(\"Failed to list nodes: %v\", err)\n- }\n-\n- count = 0\n- for _, node := range nodes.Items {\n- for _, condition := range node.Status.Conditions {\n- if condition.Type == v1.NodeNetworkUnavailable {\n- if condition.Status == v1.ConditionFalse {\n- count++\n- }\n- break\n- }\n- }\n+ return false, err\n}\n+ count = int(fn(cluster.Payload.Status.NodePools[0]))\nreturn count >= k.ExpectedNodeCount, nil\n})\n- assert.NoError(t, err)\n- assert.Equal(t, k.ExpectedNodeCount, count)\n+ return count, err\n}\n-func (k *NodeTests) Ready(t *testing.T) {\n- t.Parallel()\n-\n+func (k *NodeTests) checkCondition(t *testing.T, conditionType v1.NodeConditionType, expectedStatus v1.ConditionStatus, timeout time.Duration) (int, error) {\ncount := 0\n- err := wait.PollImmediate(framework.Poll, TestKlusterDeletedTimeout,\n+ err := wait.PollImmediate(framework.Poll, timeout,\nfunc() (bool, error) {\nnodes, err := k.Kubernetes.ClientSet.CoreV1().Nodes().List(meta_v1.ListOptions{})\nif err != nil {\n@@ -117,8 +124,8 @@ func (k *NodeTests) Ready(t *testing.T) {\ncount = 0\nfor _, node := range nodes.Items {\nfor _, condition := range node.Status.Conditions {\n- if condition.Type == v1.NodeReady {\n- if condition.Status == v1.ConditionTrue {\n+ if condition.Type == conditionType {\n+ if condition.Status == expectedStatus {\ncount++\n}\nbreak\n@@ -129,6 +136,5 @@ func (k *NodeTests) Ready(t *testing.T) {\nreturn count >= k.ExpectedNodeCount, nil\n})\n- assert.NoError(t, err)\n- assert.Equal(t, k.ExpectedNodeCount, count)\n+ return count, err\n}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -131,13 +131,21 @@ func TestRunner(t *testing.T) {\nrequire.NoError(t, err, \"Must be able to create a kubernetes client\")\nnodes := t.Run(\"Nodes\", func(t *testing.T) {\n- nodeTests := NodeTests{kubernetes, SmokeTestNodeCount}\n+ nodeTests := NodeTests{kubernetes, kubernikus, SmokeTestNodeCount, klusterName}\n- t.Run(\"Registered\", nodeTests.Registered)\n- t.Run(\"Condition/NetworkUnavailable\", nodeTests.NetworkUnavailable)\n- t.Run(\"Condition/Ready\", nodeTests.Ready)\n+ running := t.Run(\"Running\", nodeTests.StateRunning)\n+ require.True(t, running, \"The Kluster must have Running nodes\")\n- ready := t.Run(\"NodesBecomeReady\", api.WaitForNodesReady)\n+ registered := t.Run(\"Registered\", nodeTests.Registered)\n+ require.True(t, registered, \"The Kluster must have Registered nodes\")\n+\n+ t.Run(\"Conditions\", func(t *testing.T) {\n+ t.Run(\"NetworkUnavailable\", nodeTests.ConditionNetworkUnavailable)\n+ t.Run(\"Schedulable\", nodeTests.StateSchedulable)\n+ t.Run(\"Healthy\", nodeTests.StateHealthy)\n+ })\n+\n+ ready := t.Run(\"Ready\", nodeTests.ConditionReady)\nrequire.True(t, ready, \"The Kluster must have Ready nodes\")\n})\nrequire.True(t, nodes, \"Node test must complete successfully\")\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds node condition and state checks
596,240
17.05.2018 17:51:10
-7,200
e2824e702200d153f9d2309961d96f00a432743f
slight renaming for clarity
[ { "change_type": "MODIFY", "old_path": "test/e2e/node_test.go", "new_path": "test/e2e/node_test.go", "diff": "@@ -18,9 +18,9 @@ import (\nconst (\n// Incremental Increasing TImeout\n- StateRunningTimeout = 15 * time.Minute\n- RegisteredTimeout = 5 * time.Minute\n- StateSchedulableTimeout = 1 * time.Minute\n+ StateRunningTimeout = 1 * time.Minute // Time from cluster ready to nodes being created\n+ RegisteredTimeout = 15 * time.Minute // Time from node created to registered\n+ StateSchedulableTimeout = 1 * time.Minute // Time from registered to schedulable\nStateHealthyTimeout = 1 * time.Minute\nConditionRouteBrokenTimeout = 1 * time.Minute\nConditionNetworkUnavailableTimeout = 1 * time.Minute\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/setup_test.go", "new_path": "test/e2e/setup_test.go", "diff": "@@ -133,7 +133,7 @@ func TestRunner(t *testing.T) {\nnodes := t.Run(\"Nodes\", func(t *testing.T) {\nnodeTests := NodeTests{kubernetes, kubernikus, SmokeTestNodeCount, klusterName}\n- running := t.Run(\"Running\", nodeTests.StateRunning)\n+ running := t.Run(\"Created\", nodeTests.StateRunning)\nrequire.True(t, running, \"The Kluster must have Running nodes\")\nregistered := t.Run(\"Registered\", nodeTests.Registered)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
slight renaming for clarity
596,240
18.05.2018 13:53:31
-7,200
fb19503f5840be2013d9ebecbdfc41f80c9cb493
adds more flatteing
[ { "change_type": "MODIFY", "old_path": "test/e2e/network_test.go", "new_path": "test/e2e/network_test.go", "diff": "@@ -52,8 +52,8 @@ func (n *NetworkTests) Run(t *testing.T) {\ndefer t.Run(\"Cleanup\", n.DeleteNamespace)\nt.Run(\"CreateNamespace\", n.CreateNamespace)\nt.Run(\"WaitNamespace\", n.WaitForNamespace)\n- t.Run(\"CreatePods\", n.CreatePods)\n- t.Run(\"CreateService\", n.CreateServices)\n+ n.CreatePods(t)\n+ n.CreateServices(t)\nt.Run(\"Wait\", func(t *testing.T) {\nt.Run(\"Pods\", n.WaitForPodsRunning)\nt.Run(\"ServiceEndpoints\", n.WaitForServiceEndpoints)\n@@ -85,7 +85,7 @@ func (n *NetworkTests) CreatePods(t *testing.T) {\nfor _, node := range n.Nodes.Items {\nnode := node\n- t.Run(node.Name, func(t *testing.T) {\n+ t.Run(fmt.Sprintf(\"CreatePodForNode-%v\", node.Name), func(t *testing.T) {\n_, err := n.Kubernetes.ClientSet.CoreV1().Pods(n.Namespace).Create(&v1.Pod{\nObjectMeta: meta_v1.ObjectMeta{\nGenerateName: fmt.Sprintf(\"%s-\", node.Name),\n@@ -136,7 +136,7 @@ func (n *NetworkTests) CreateServices(t *testing.T) {\nfor _, node := range n.Nodes.Items {\nnode := node\n- t.Run(node.Name, func(t *testing.T) {\n+ t.Run(fmt.Sprintf(\"CreateServiceForNode-%v\", node.Name), func(t *testing.T) {\nservice := &v1.Service{\nObjectMeta: meta_v1.ObjectMeta{\nName: node.Name,\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds more flatteing
596,232
18.06.2018 18:15:08
-7,200
6c33640d075ccc69464544da35374378da38557b
add guide to deploy hana express on kubernikus
[ { "change_type": "MODIFY", "old_path": "docs/guide/common_addons.md", "new_path": "docs/guide/common_addons.md", "diff": "@@ -225,5 +225,158 @@ Now you can pull or push images\ndocker pull ${local_ip}:5000/<user>/<image>\n```\n+## Deploy HANA Express database on Kubernikus\n+\n+Create a Kubernetes cluster and deploy SAP HANA, express edition containers (database server only).\n+\n+### Step 1: Create Kubernetes Cluster\n+Login to the Converged Cloud Dashboard and navigate to your project. Open `Containers > Kubernetes`. Click `Create Cluster`, choose a cluster name (max. 20 digits), give your nodepool a name, choose a number of nodes and use at least a `m1.large` flavor which offers you `4 vCPU, ~8 GB RAM` per node. Create the `kluster` (Cluster by Kubernikus).\n+\n+### Step 2: Connect to your kluster\n+Use the following instructions to get access to your Kubernetes Cluster. [Authenticating with Kubernetes](https://kubernikus.eu-nl-1.cloud.sap/docs/guide/authentication/#authenticating-with-kubernetes).\n+\n+### Step 3: Create the deployments configuration files\n+At first, you should create a `secret` with your Docker credentials in order to pull images from the docker registry.\n+\n+```\n+kubectl create secret docker-registry docker-secret \\\n+--docker-server=https://index.docker.io/v1/ \\\n+--docker-username=<<DOCKER_USER>> \\\n+--docker-password=<<DOCKER_PASSWORD>> \\\n+--docker-email=<<DOCKER_EMAIL>>\n+```\n+\n+### Step 4: Create the deployments configuration files\n+Create a file `hxe.yaml` on your local machine and copy the following content into it. Replace the password inside the ConfigMap with your own one. Please check the password policy to avoid errors:\n+```\n+SAP HANA, express edition requires a very strong password that complies with these rules:\n+\n+At least 8 characters\n+At least 1 uppercase letter\n+At least 1 lowercase letter\n+At least 1 number\n+Can contain special characters, but not backtick, dollar sign, backslash, single or double quote\n+Cannot contain dictionary words\n+Cannot contain simplistic or systematic values, like strings in ascending or descending numerical or alphabetical order\n+```\n+\n+Create your local yaml file (`hxe.yaml`):\n+\n+```\n+kind: ConfigMap\n+apiVersion: v1\n+metadata:\n+ creationTimestamp: 2018-01-18T19:14:38Z\n+ name: hxe-pass\n+data:\n+ password.json: |+\n+ {\"master_password\" : \"HXEHana1\"}\n+---\n+kind: PersistentVolume\n+apiVersion: v1\n+metadata:\n+ name: persistent-vol-hxe\n+ labels:\n+ type: local\n+spec:\n+ storageClassName: manual\n+ capacity:\n+ storage: 150Gi\n+ accessModes:\n+ - ReadWriteOnce\n+ hostPath:\n+ path: \"/data/hxe_pv\"\n+---\n+kind: PersistentVolumeClaim\n+apiVersion: v1\n+metadata:\n+ name: hxe-pvc\n+spec:\n+ storageClassName: manual\n+ accessModes:\n+ - ReadWriteOnce\n+ resources:\n+ requests:\n+ storage: 50Gi\n+---\n+apiVersion: apps/v1\n+kind: Deployment\n+metadata:\n+ name: hxe\n+spec:\n+ selector:\n+ matchLabels:\n+ app: hxe\n+ replicas: 1\n+ template:\n+ metadata:\n+ labels:\n+ app: hxe\n+ spec:\n+ initContainers:\n+ - name: install\n+ image: busybox\n+ command: [ 'sh', '-c', 'chown 12000:79 /hana/mounts' ]\n+ volumeMounts:\n+ - name: hxe-data\n+ mountPath: /hana/mounts\n+ restartPolicy: Always\n+ volumes:\n+ - name: hxe-data\n+ persistentVolumeClaim:\n+ claimName: hxe-pvc\n+ - name: hxe-config\n+ configMap:\n+ name: hxe-pass\n+ imagePullSecrets:\n+ - name: docker-secret\n+ containers:\n+ - name: hxe-container\n+ image: \"store/saplabs/hanaexpress:2.00.022.00.20171211.1\"\n+ ports:\n+ - containerPort: 39013\n+ name: port1\n+ - containerPort: 39015\n+ name: port2\n+ - containerPort: 39017\n+ name: port3\n+ - containerPort: 8090\n+ name: port4\n+ - containerPort: 39041\n+ name: port5\n+ - containerPort: 59013\n+ name: port6\n+ args: [ \"--agree-to-sap-license\", \"--dont-check-system\", \"--passwords-url\", \"file:///hana/hxeconfig/password.json\" ]\n+ volumeMounts:\n+ - name: hxe-data\n+ mountPath: /hana/mounts\n+ - name: hxe-config\n+ mountPath: /hana/hxeconfig\n+\n+```\n+Now create the resources with `kubectl`:\n+```\n+kubectl create -f hxe.yaml\n+```\n+\n+The deployment creates in this example just one pod. It should be running after some seconds. The name of the pod starts with hxe and is followed by some generated numbers / hash (eg. hxe-699d795cf6-7m6jk)\n+```\n+kubectl get pods\n+```\n+\n+Let's look into the pod for more information\n+```\n+kubectl describe pod hxe-<<value>>\n+kubectl logs hxe-<<value>>\n+```\n+You can check if SAP HANA, express edition is running by using `HDB info` inside the pod with `kubectl exec -it hxe-pod bash`.\n+\n+### Step 5: Get access to the database\n+The container is running and pods are available inside the Kubernetes cluster. Now, you can create a [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/) to reach the pod.\n+\n+`kubectl expose deployment hxe --name=hxe-svc --type=LoadBalancer --port=39013`\n+\n+This example exposes the pod on port 39013. With `kubectl get svc` you can check the assigned floating ip.\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add guide to deploy hana express on kubernikus
596,240
22.06.2018 13:23:04
-7,200
531bdaa7b30a1738582dddbfac24fd90798cc541
Update node_test.go Adds more leeway for VM creations
[ { "change_type": "MODIFY", "old_path": "test/e2e/node_test.go", "new_path": "test/e2e/node_test.go", "diff": "@@ -19,7 +19,7 @@ import (\nconst (\n// Incremental Increasing TImeout\n- StateRunningTimeout = 1 * time.Minute // Time from cluster ready to nodes being created\n+ StateRunningTimeout = 5 * time.Minute // Time from cluster ready to nodes being created\nRegisteredTimeout = 15 * time.Minute // Time from node created to registered\nStateSchedulableTimeout = 1 * time.Minute // Time from registered to schedulable\nStateHealthyTimeout = 1 * time.Minute\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Update node_test.go Adds more leeway for VM creations
596,240
27.06.2018 10:11:13
-7,200
09a892f33b0de5e3b5d30d5009c542b7325b4eb8
suspected fix for left-over pvs
[ { "change_type": "MODIFY", "old_path": "pkg/controller/deorbit/controller.go", "new_path": "pkg/controller/deorbit/controller.go", "diff": "@@ -113,17 +113,13 @@ func (d *DeorbitReconciler) doDeorbit(deorbiter Deorbiter) (err error) {\nreturn err\n}\n- if len(deletedPVCs) > 0 {\nif err := deorbiter.WaitForPersistentVolumeCleanup(); err != nil {\nreturn err\n}\n- }\n- if len(deletedServices) > 0 {\nif err := deorbiter.WaitForServiceCleanup(); err != nil {\nreturn err\n}\n- }\nreturn nil\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
suspected fix for left-over pvs
596,240
27.06.2018 12:49:14
-7,200
a6359e5272785ed213e722b5c804c4a29fec3b87
Clean-Up Errored Instances This commit adds functionality to the flight controller that deletes instances in Error state. Recreation works in tandem with launch control which just ignores errored VMs.
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/kluster/node.go", "new_path": "pkg/client/openstack/kluster/node.go", "diff": "@@ -74,6 +74,14 @@ func (n *Node) Running() bool {\nreturn false\n}\n+func (n *Node) Erroring() bool {\n+ if n.TaskState == \"deleting\" {\n+ return false\n+ }\n+\n+ return n.VMState == \"error\"\n+}\n+\nfunc (n *Node) GetID() string {\nreturn n.ID\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/controller.go", "new_path": "pkg/controller/flight/controller.go", "diff": "@@ -16,30 +16,47 @@ import (\n// This controller takes care about Kluster health. It looks for obvious\n// problems and tries to repair them.\n//\n-// Currently implemented are the following helpers. See docs/controllers.md for more\n-// in depth explanation why these are required.\n+// Currently implemented are the following helpers. See docs/controllers.md for\n+// more in depth explanation why these are required.\n//\n//\n// Delete Incompletely Spawned Instances:\n//\n-// It deletes Nodes that didn't manage to register within 10m after\n-// initial creation. This is a workaround for DHCP/DVS (latency) issues. In effect\n-// it will delete the incompletely spawned node and launch control will ramp it\n+// It deletes Nodes that didn't manage to register within 10m after initial\n+// creation. This is a workaround for DHCP/DVS (latency) issues. In effect it\n+// will delete the incompletely spawned node and launch control will ramp it\n// back up.\n//\n+//\n+// Delete Errored Instances:\n+//\n+// It deletes Nodes that are in state \"error\". This can have various causes. It\n+// frequently happens when instances are interrupted with another instance\n+// action while being spawned. Most interesstingly a Node also goes into\n+// \"error\" if the creation takes longer than the validity of the Keystone token\n+// used to create the VM. Instance create is a sequence of actions that happen\n+// sequentially in the same Nova request. If those take too long the next\n+// action in line will fail with an authentication error. This sets the VM into\n+// \"error\" state.\n+// FlightControl will pick these up and delete them. GroundControl on the\n+// other hand ignores errored instances and just creates additional nodes. This\n+// leads to quota exhaustion and left-over instances. This could be the ultimate\n+// battle.\n+//\n+//\n// Ensure Pod-to-Pod Communication via Security Group Rules:\n//\n// It ensures tcp/udp/icmp rules exist in the security group defined during\n-// kluster creation. The rules explicitly allow all pod-to-pod\n-// communication. This is a workaround for Neutron missing the\n-// side-channel security group events.\n+// kluster creation. The rules explicitly allow all pod-to-pod communication.\n+// This is a workaround for Neutron missing the side-channel security group\n+// events.\n//\n//\n// Ensure Nodes belong to the security group:\n//\n-// It ensures each Nodes is member of the security group defined in\n-// the kluster spec. This ensures missing security groups due to whatever\n-// reason are again added to the node.\n+// It ensures each Nodes is member of the security group defined in the kluster\n+// spec. This ensures missing security groups due to whatever reason are again\n+// added to the node.\ntype FlightController struct {\nFactory FlightReconcilerFactory\n@@ -66,6 +83,7 @@ func (d *FlightController) Reconcile(kluster *v1.Kluster) (bool, error) {\nreconciler.EnsureKubernikusRuleInSecurityGroup()\nreconciler.EnsureInstanceSecurityGroupAssignment()\nreconciler.DeleteIncompletelySpawnedInstances()\n+ reconciler.DeleteErroredInstances()\nreturn false, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/controller_test.go", "new_path": "pkg/controller/flight/controller_test.go", "diff": "@@ -37,6 +37,11 @@ func (m *MockFlightReconciler) DeleteIncompletelySpawnedInstances() []string {\nreturn args.Get(0).([]string)\n}\n+func (m *MockFlightReconciler) DeleteErroredInstances() []string {\n+ args := m.Called()\n+ return args.Get(0).([]string)\n+}\n+\nfunc TestReconcile(t *testing.T) {\nkluster := &v1.Kluster{}\n@@ -44,6 +49,7 @@ func TestReconcile(t *testing.T) {\nreconciler.On(\"EnsureKubernikusRuleInSecurityGroup\").Return(true)\nreconciler.On(\"EnsureInstanceSecurityGroupAssignment\").Return([]string{})\nreconciler.On(\"DeleteIncompletelySpawnedInstances\").Return([]string{})\n+ reconciler.On(\"DeleteErroredInstances\").Return([]string{})\nfactory := &MockFlightReconcilerFactory{}\nfactory.On(\"FlightReconciler\", kluster).Return(reconciler, nil)\n@@ -56,4 +62,5 @@ func TestReconcile(t *testing.T) {\nreconciler.AssertCalled(t, \"EnsureKubernikusRuleInSecurityGroup\")\nreconciler.AssertCalled(t, \"EnsureInstanceSecurityGroupAssignment\")\nreconciler.AssertCalled(t, \"DeleteIncompletelySpawnedInstances\")\n+ reconciler.AssertCalled(t, \"DeleteErroredInstances\")\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/instance.go", "new_path": "pkg/controller/flight/instance.go", "diff": "@@ -7,4 +7,5 @@ type Instance interface {\nGetName() string\nGetSecurityGroupNames() []string\nGetCreated() time.Time\n+ Erroring() bool\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/logging.go", "new_path": "pkg/controller/flight/logging.go", "diff": "@@ -35,6 +35,18 @@ func (f *LoggingFlightReconciler) DeleteIncompletelySpawnedInstances() []string\nreturn ids\n}\n+func (f *LoggingFlightReconciler) DeleteErroredInstances() []string {\n+ ids := f.Reconciler.DeleteErroredInstances()\n+ if len(ids) > 0 {\n+ f.Logger.Log(\n+ \"msg\", \"deleted errored instances\",\n+ \"nodes\", strings.Join(ids, \",\"),\n+ \"v\", 2,\n+ )\n+ }\n+ return ids\n+}\n+\nfunc (f *LoggingFlightReconciler) EnsureKubernikusRuleInSecurityGroup() bool {\nensured := f.Reconciler.EnsureKubernikusRuleInSecurityGroup()\nif ensured {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/reconciler.go", "new_path": "pkg/controller/flight/reconciler.go", "diff": "@@ -17,6 +17,7 @@ const (\ntype FlightReconciler interface {\nEnsureInstanceSecurityGroupAssignment() []string\nDeleteIncompletelySpawnedInstances() []string\n+ DeleteErroredInstances() []string\nEnsureKubernikusRuleInSecurityGroup() bool\n}\n@@ -94,6 +95,34 @@ func (f *flightReconciler) DeleteIncompletelySpawnedInstances() []string {\nreturn deletedInstanceIDs\n}\n+func (f *flightReconciler) DeleteErroredInstances() []string {\n+ deletedInstanceIDs := []string{}\n+ erroredInstances := f.getErroredInstances()\n+\n+ for _, errored := range erroredInstances {\n+ if err := f.Client.DeleteNode(errored.GetID()); err != nil {\n+ f.Logger.Log(\n+ \"msg\", \"couldn't delete errored instance\",\n+ \"instance\", errored.GetID(),\n+ \"err\", err)\n+ continue\n+ }\n+ deletedInstanceIDs = append(deletedInstanceIDs, errored.GetID())\n+ }\n+\n+ return deletedInstanceIDs\n+}\n+\n+func (f *flightReconciler) getErroredInstances() []Instance {\n+ errored := []Instance{}\n+ for _, instance := range f.Instances {\n+ if instance.Erroring() {\n+ errored = append(errored, instance)\n+ }\n+ }\n+ return errored\n+}\n+\nfunc (f *flightReconciler) getTimedOutInstances() []Instance {\ntimedOut := []Instance{}\nfor _, instance := range f.Instances {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/flight/reconciler_test.go", "new_path": "pkg/controller/flight/reconciler_test.go", "diff": "@@ -21,6 +21,7 @@ type fakeInstance struct {\nName string\nCreated time.Time\nSecurityGroupNames []string\n+ Errored bool\n}\nfunc (f *fakeInstance) GetID() string {\n@@ -39,6 +40,10 @@ func (f *fakeInstance) GetCreated() time.Time {\nreturn f.Created\n}\n+func (f *fakeInstance) Erroring() bool {\n+ return f.Errored\n+}\n+\ntype MockKlusterClient struct {\nmock.Mock\n}\n@@ -160,6 +165,39 @@ func TestDeleteIncompletelySpawnedInstances(t *testing.T) {\nassert.ElementsMatch(t, ids, []string{\"a\", \"c\"})\n}\n+func TestDeleteErroredInstances(t *testing.T) {\n+ kluster := &v1.Kluster{}\n+ nodes := []*core_v1.Node{}\n+\n+ instances := []Instance{\n+ &fakeInstance{ID: \"a\", Name: \"a\", Errored: true},\n+ &fakeInstance{ID: \"b\", Name: \"b\", Errored: false},\n+ &fakeInstance{ID: \"c\", Name: \"c\", Errored: true},\n+ &fakeInstance{ID: \"d\", Name: \"d\", Errored: true},\n+ }\n+\n+ client := &MockKlusterClient{}\n+ client.On(\"DeleteNode\", \"a\").Return(nil)\n+ client.On(\"DeleteNode\", \"b\").Return(nil)\n+ client.On(\"DeleteNode\", \"c\").Return(fmt.Errorf(\"Boom\"))\n+ client.On(\"DeleteNode\", \"d\").Return(nil)\n+\n+ reconciler := flightReconciler{\n+ kluster,\n+ instances,\n+ nodes,\n+ client,\n+ log.NewNopLogger(),\n+ }\n+\n+ ids := reconciler.DeleteErroredInstances()\n+ client.AssertCalled(t, \"DeleteNode\", \"a\")\n+ client.AssertCalled(t, \"DeleteNode\", \"c\")\n+ client.AssertCalled(t, \"DeleteNode\", \"d\")\n+ client.AssertNotCalled(t, \"DeleteNode\", \"b\")\n+ assert.ElementsMatch(t, ids, []string{\"a\", \"d\"})\n+}\n+\nfunc TestEnsureKubernikusRuleInSecurityGroup(t *testing.T) {\nkluster := &v1.Kluster{}\ninstances := []Instance{}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Clean-Up Errored Instances This commit adds functionality to the flight controller that deletes instances in Error state. Recreation works in tandem with launch control which just ignores errored VMs.
596,240
27.06.2018 15:31:32
-7,200
bcc1631e1e259f13369ffeeaa602c1c35953ecf6
increases termination timeout. needs more time now for volume deletion
[ { "change_type": "MODIFY", "old_path": "test/e2e/cleanup_test.go", "new_path": "test/e2e/cleanup_test.go", "diff": "@@ -17,7 +17,7 @@ import (\nconst (\nKlusterPhaseBecomesTerminatingTimeout = 1 * time.Minute\n- WaitForKlusterToBeDeletedTimeout = 5 * time.Minute\n+ WaitForKlusterToBeDeletedTimeout = 10 * time.Minute\n)\ntype CleanupTests struct {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
increases termination timeout. needs more time now for volume deletion
596,225
10.07.2018 14:29:31
-7,200
a2e5238d630382f74842598edf0cee68d27fc08d
add missing alt-name to apiserver cert the x509 certs for apiserver lacked `kubernetes.default.svc` as alternative name
[ { "change_type": "MODIFY", "old_path": "pkg/util/certificates.go", "new_path": "pkg/util/certificates.go", "diff": "@@ -194,7 +194,7 @@ func CreateCertificates(kluster *v1.Kluster, apiURL, authURL, domain string) (ma\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n- []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain)},\n+ []string{\"kubernetes\", \"kubernetes.default\", \"kubernetes.default.svc\", \"apiserver\", kluster.Name, fmt.Sprintf(\"%v.%v\", kluster.Name, domain)},\n[]net.IP{net.IPv4(127, 0, 0, 1), apiIP})\ncerts.TLS.Wormhole = certs.signTLS(\"wormhole\",\n[]string{fmt.Sprintf(\"%v-wormhole.%v\", kluster.Name, domain)}, []net.IP{})\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add missing alt-name to apiserver cert the x509 certs for apiserver lacked `kubernetes.default.svc` as alternative name
596,240
02.08.2018 16:31:20
-7,200
898e789d0f66e838328f20fa7950b6ddc958e771
add networks by flavor convention
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/kluster/client.go", "new_path": "pkg/client/openstack/kluster/client.go", "diff": "@@ -2,6 +2,7 @@ package kluster\nimport (\n\"fmt\"\n+ \"strings\"\n\"time\"\n\"github.com/gophercloud/gophercloud\"\n@@ -63,11 +64,30 @@ func NewKlusterClient(network, compute, identity *gophercloud.ServiceClient, klu\nfunc (c *klusterClient) CreateNode(pool *models.NodePool, name string, userData []byte) (string, error) {\nconfigDrive := true\n+\n+ networks := []servers.Network{{UUID: c.Kluster.Spec.Openstack.NetworkID}}\n+\n+ if strings.HasPrefix(pool.Flavor, \"zh\") {\n+ networks = []servers.Network{\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ }\n+ }\n+\n+ if strings.HasPrefix(pool.Flavor, \"zg\") {\n+ networks = []servers.Network{\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ {UUID: c.Kluster.Spec.Openstack.NetworkID},\n+ }\n+ }\n+\nserver, err := compute.Create(c.ComputeClient, servers.CreateOpts{\nName: name,\nFlavorName: pool.Flavor,\nImageName: pool.Image,\n- Networks: []servers.Network{{UUID: c.Kluster.Spec.Openstack.NetworkID}},\n+ Networks: networks,\nUserData: userData,\nServiceClient: c.ComputeClient,\nSecurityGroups: []string{c.Kluster.Spec.Openstack.SecurityGroupName},\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add networks by flavor convention
596,240
08.08.2018 15:11:49
-7,200
d7e8e380d2da4404e16d4e7f3e1ac7bad2865f63
bootstraps nvidia device-plugin and nvidia driver. fixes
[ { "change_type": "ADD", "old_path": null, "new_path": "contrib/nvidia-gpu/device-plugin-damonset.yaml", "diff": "+apiVersion: apps/v1\n+kind: DaemonSet\n+metadata:\n+ name: nvidia-gpu-device-plugin\n+ namespace: kube-system\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ addonmanager.kubernetes.io/mode: Reconcile\n+spec:\n+ selector:\n+ matchLabels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ template:\n+ metadata:\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ annotations:\n+ scheduler.alpha.kubernetes.io/critical-pod: ''\n+ spec:\n+ priorityClassName: system-node-critical\n+ affinity:\n+ nodeAffinity:\n+ requiredDuringSchedulingIgnoredDuringExecution:\n+ nodeSelectorTerms:\n+ - matchExpressions:\n+ - key: ccloud.sap.com/nvidia-gpu\n+ operator: Exists\n+ tolerations:\n+ - operator: \"Exists\"\n+ effect: \"NoExecute\"\n+ - operator: \"Exists\"\n+ effect: \"NoSchedule\"\n+ volumes:\n+ - name: device-plugin\n+ hostPath:\n+ path: /var/lib/kubelet/device-plugins\n+ - name: dev\n+ hostPath:\n+ path: /dev\n+ containers:\n+ - image: \"k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e\"\n+ command:\n+ - /usr/bin/nvidia-gpu-device-plugin\n+ - -logtostderr\n+ - -host-path=/opt/nvidia/current/lib64\n+ - -container-path=/usr/local/nvidia/lib64\n+ name: nvidia-gpu-device-plugin\n+ resources:\n+ requests:\n+ cpu: 50m\n+ memory: 10Mi\n+ limits:\n+ cpu: 50m\n+ memory: 10Mi\n+ securityContext:\n+ privileged: true\n+ volumeMounts:\n+ - name: device-plugin\n+ mountPath: /device-plugin\n+ - name: dev\n+ mountPath: /dev\n+ updateStrategy:\n+ type: RollingUpdate\n" }, { "change_type": "ADD", "old_path": null, "new_path": "contrib/nvidia-gpu/device-plugin.yaml", "diff": "+apiVersion: apps/v1\n+kind: DaemonSet\n+metadata:\n+ name: nvidia-gpu-device-plugin\n+ namespace: kube-system\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ addonmanager.kubernetes.io/mode: Reconcile\n+spec:\n+ selector:\n+ matchLabels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ template:\n+ metadata:\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ annotations:\n+ scheduler.alpha.kubernetes.io/critical-pod: ''\n+ spec:\n+ affinity:\n+ nodeAffinity:\n+ requiredDuringSchedulingIgnoredDuringExecution:\n+ nodeSelectorTerms:\n+ - matchExpressions:\n+ - key: ccloud.sap.com/nvidia-gpu\n+ operator: Exists\n+ tolerations:\n+ - operator: \"Exists\"\n+ effect: \"NoExecute\"\n+ - operator: \"Exists\"\n+ effect: \"NoSchedule\"\n+ volumes:\n+ - name: device-plugin\n+ hostPath:\n+ path: /var/lib/kubelet/device-plugins\n+ - name: dev\n+ hostPath:\n+ path: /dev\n+ containers:\n+ - image: \"k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e\"\n+ command:\n+ - /usr/bin/nvidia-gpu-device-plugin\n+ - -logtostderr\n+ - -host-path=/opt/nvidia/current/lib64\n+ - -container-path=/usr/local/nvidia/lib64\n+ name: nvidia-gpu-device-plugin\n+ resources:\n+ requests:\n+ cpu: 50m\n+ memory: 10Mi\n+ limits:\n+ cpu: 50m\n+ memory: 10Mi\n+ securityContext:\n+ privileged: true\n+ volumeMounts:\n+ - name: device-plugin\n+ mountPath: /device-plugin\n+ - name: dev\n+ mountPath: /dev\n+ updateStrategy:\n+ type: RollingUpdate\n" }, { "change_type": "ADD", "old_path": null, "new_path": "contrib/nvidia-gpu/driver-daemonset.yaml", "diff": "+apiVersion: apps/v1\n+kind: DaemonSet\n+metadata:\n+ name: nvidia-driver-installer\n+ namespace: kube-system\n+ labels:\n+ k8s-app: nvidia-driver-installer\n+spec:\n+ selector:\n+ matchLabels:\n+ k8s-app: nvidia-driver-installer\n+ updateStrategy:\n+ type: RollingUpdate\n+ template:\n+ metadata:\n+ labels:\n+ name: nvidia-driver-installer\n+ k8s-app: nvidia-driver-installer\n+ spec:\n+ affinity:\n+ nodeAffinity:\n+ requiredDuringSchedulingIgnoredDuringExecution:\n+ nodeSelectorTerms:\n+ - matchExpressions:\n+ - key: ccloud.sap.com/nvidia-gpu\n+ operator: Exists\n+ tolerations:\n+ - key: \"nvidia.com/gpu\"\n+ effect: \"NoSchedule\"\n+ operator: \"Exists\"\n+ hostNetwork: true\n+ hostPID: true\n+ volumes:\n+ - name: rootfs\n+ hostPath:\n+ path: /\n+ initContainers:\n+ - image: bugroger/coreos-nvidia-driver:stable-396.44-tesla\n+ name: nvidia-driver-installer\n+ imagePullPolicy: Always\n+ resources:\n+ requests:\n+ cpu: 0.15\n+ securityContext:\n+ privileged: true\n+ volumeMounts:\n+ - name: rootfs\n+ mountPath: /root\n+ mountPropagation: Bidirectional\n+ containers:\n+ - image: \"gcr.io/google-containers/pause:2.0\"\n+ name: pause\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap.go", "new_path": "pkg/controller/ground/bootstrap.go", "diff": "@@ -10,7 +10,9 @@ import (\nclientset \"k8s.io/client-go/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/dns\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/gpu\"\n)\nfunc SeedKluster(client clientset.Interface, kluster *v1.Kluster) error {\n@@ -38,6 +40,9 @@ func SeedKluster(client clientset.Interface, kluster *v1.Kluster) error {\nif err := dns.SeedKubeDNS(client, \"\", \"\", kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {\nreturn err\n}\n+ if err := gpu.SeedGPUSupport(client); err != nil {\n+ return err\n+ }\nreturn nil\n}\n@@ -66,7 +71,7 @@ func SeedCinderStorageClass(client clientset.Interface) error {\n}\nfunc SeedKubernikusAdmin(client clientset.Interface) error {\n- return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:admin\",\n},\n@@ -85,7 +90,7 @@ func SeedKubernikusAdmin(client clientset.Interface) error {\n}\nfunc SeedKubernikusMember(client clientset.Interface) error {\n- return CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{\n+ return bootstrap.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:member\",\nNamespace: \"default\",\n@@ -105,7 +110,7 @@ func SeedKubernikusMember(client clientset.Interface) error {\n}\nfunc SeedAllowBootstrapTokensToPostCSRs(client clientset.Interface) error {\n- return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:kubelet-bootstrap\",\n},\n@@ -124,7 +129,7 @@ func SeedAllowBootstrapTokensToPostCSRs(client clientset.Interface) error {\n}\nfunc SeedAllowApiserverToAccessKubeletAPI(client clientset.Interface) error {\n- return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:apiserver-kubeletapi\",\n},\n@@ -144,7 +149,7 @@ func SeedAllowApiserverToAccessKubeletAPI(client clientset.Interface) error {\n// addresses https://github.com/kubernetes/kubernetes/issues/59351\nfunc SeedAllowCertificateControllerToDeleteCSRs(client clientset.Interface) error {\n- return CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\n+ return bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\nObjectMeta: metav1.ObjectMeta{\nName: \"system:controller:certificate-controller\",\nAnnotations: map[string]string{\n@@ -180,7 +185,7 @@ func SeedAllowCertificateControllerToDeleteCSRs(client clientset.Interface) erro\n}\nfunc SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {\n- err := CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\n+ err := bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:approve-node-client-csr\",\n},\n@@ -196,7 +201,7 @@ func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {\nreturn err\n}\n- return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\nObjectMeta: metav1.ObjectMeta{\nName: \"kubernikus:node-client-csr-autoapprove\",\n},\n@@ -213,42 +218,3 @@ func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {\n},\n})\n}\n-\n-func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {\n- if _, err := client.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create RBAC clusterrolebinding: %v\", err)\n- }\n-\n- if _, err := client.RbacV1beta1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil {\n- return fmt.Errorf(\"unable to update RBAC clusterrolebinding: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {\n- if _, err := client.RbacV1beta1().RoleBindings(roleBinding.Namespace).Create(roleBinding); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create RBAC rolebinding: %v\", err)\n- }\n-\n- if _, err := client.RbacV1beta1().RoleBindings(roleBinding.Namespace).Update(roleBinding); err != nil {\n- return fmt.Errorf(\"unable to update RBAC rolebinding: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {\n- if _, err := client.RbacV1beta1().ClusterRoles().Create(clusterRole); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create RBAC clusterrole: %v\", err)\n- }\n-\n- if _, err := client.RbacV1beta1().ClusterRoles().Update(clusterRole); err != nil {\n- return fmt.Errorf(\"unable to update RBAC clusterrole: %v\", err)\n- }\n- }\n- return nil\n-}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap/dns/dns.go", "new_path": "pkg/controller/ground/bootstrap/dns/dns.go", "diff": "package dns\nimport (\n- \"bytes\"\n\"errors\"\n- \"fmt\"\n- \"html/template\"\n\"k8s.io/api/core/v1\"\nextensions \"k8s.io/api/extensions/v1beta1\"\n- apierrors \"k8s.io/apimachinery/pkg/api/errors\"\nmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\"k8s.io/apimachinery/pkg/runtime/serializer\"\nclientset \"k8s.io/client-go/kubernetes\"\nclientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n\"github.com/sapcc/kubernikus/pkg/api/spec\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap\"\n)\nconst (\n@@ -75,7 +72,7 @@ func SeedKubeDNS(client clientset.Interface, repository, version, domain, cluste\n}\nfunc createKubeDNSServiceAccount(client clientset.Interface) error {\n- return CreateOrUpdateServiceAccount(client, &v1.ServiceAccount{\n+ return bootstrap.CreateOrUpdateServiceAccount(client, &v1.ServiceAccount{\nObjectMeta: metav1.ObjectMeta{\nName: SERVICE_ACCOUNT,\nNamespace: metav1.NamespaceSystem,\n@@ -88,7 +85,7 @@ func createKubeDNSServiceAccount(client clientset.Interface) error {\n}\nfunc createKubeDNSConfigMap(client clientset.Interface) error {\n- return CreateOrUpdateConfigMap(client, &v1.ConfigMap{\n+ return bootstrap.CreateOrUpdateConfigMap(client, &v1.ConfigMap{\nObjectMeta: metav1.ObjectMeta{\nName: CONFIGMAP,\nNamespace: metav1.NamespaceSystem,\n@@ -111,7 +108,7 @@ func createKubeDNSDeployment(client clientset.Interface, repository, version, do\nreturn err\n}\n- if err := CreateOrUpdateDeployment(client, deployment); err != nil {\n+ if err := bootstrap.CreateOrUpdateDeployment(client, deployment); err != nil {\nreturn err\n}\n@@ -128,7 +125,7 @@ func createKubeDNSService(client clientset.Interface, clusterIP string) error {\nreturn err\n}\n- if err := CreateOrUpdateService(client, service); err != nil {\n+ if err := bootstrap.CreateOrUpdateService(client, service); err != nil {\nreturn err\n}\n@@ -138,7 +135,7 @@ func createKubeDNSService(client clientset.Interface, clusterIP string) error {\nfunc getKubeDNSDeployment(options *DeploymentOptions) (*extensions.Deployment, error) {\nmanifest := KubeDNSDeployment_v20171016\n- template, err := RenderManifest(manifest, options)\n+ template, err := bootstrap.RenderManifest(manifest, options)\nif err != nil {\nreturn nil, err\n}\n@@ -154,7 +151,7 @@ func getKubeDNSDeployment(options *DeploymentOptions) (*extensions.Deployment, e\nfunc getKubeDNSService(options *ServiceOptions) (*v1.Service, error) {\nmanifest := KubeDNSService_v20171016\n- template, err := RenderManifest(manifest, options)\n+ template, err := bootstrap.RenderManifest(manifest, options)\nif err != nil {\nreturn nil, err\n}\n@@ -166,64 +163,3 @@ func getKubeDNSService(options *ServiceOptions) (*v1.Service, error) {\nreturn service.(*v1.Service), nil\n}\n-\n-func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {\n- if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(sa); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create serviceaccount: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func CreateOrUpdateDeployment(client clientset.Interface, deploy *extensions.Deployment) error {\n- if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create deployment: %v\", err)\n- }\n-\n- if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil {\n- return fmt.Errorf(\"unable to update deployment: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func CreateOrUpdateService(client clientset.Interface, service *v1.Service) error {\n- if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(service); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create a new kube-dns service: %v\", err)\n- }\n-\n- if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(service); err != nil {\n- return fmt.Errorf(\"unable to create/update the kube-dns service: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func CreateOrUpdateConfigMap(client clientset.Interface, configmap *v1.ConfigMap) error {\n- if _, err := client.CoreV1().ConfigMaps(configmap.ObjectMeta.Namespace).Create(configmap); err != nil {\n- if !apierrors.IsAlreadyExists(err) {\n- return fmt.Errorf(\"unable to create configmap: %v\", err)\n- }\n-\n- if _, err := client.CoreV1().ConfigMaps(configmap.ObjectMeta.Namespace).Update(configmap); err != nil {\n- return fmt.Errorf(\"unable to update configmap: %v\", err)\n- }\n- }\n- return nil\n-}\n-\n-func RenderManifest(strtmpl string, obj interface{}) ([]byte, error) {\n- var buf bytes.Buffer\n- tmpl, err := template.New(\"template\").Parse(strtmpl)\n- if err != nil {\n- return nil, fmt.Errorf(\"error when parsing template: %v\", err)\n- }\n- err = tmpl.Execute(&buf, obj)\n- if err != nil {\n- return nil, fmt.Errorf(\"error when executing template: %v\", err)\n- }\n- return buf.Bytes(), nil\n-}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/ground/bootstrap/gpu/gpu.go", "diff": "+package gpu\n+\n+import (\n+ extensions \"k8s.io/api/extensions/v1beta1\"\n+ \"k8s.io/apimachinery/pkg/runtime/serializer\"\n+ clientset \"k8s.io/client-go/kubernetes\"\n+ clientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n+\n+ \"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap\"\n+)\n+\n+func SeedGPUSupport(client clientset.Interface) error {\n+ if err := createDaemonSet(client, NVIDIADevicePlugin_v20180808); err != nil {\n+ return err\n+ }\n+ if err := createDaemonSet(client, NVIDIADriverInstaller_v20180808); err != nil {\n+ return err\n+ }\n+ return nil\n+}\n+\n+func createDaemonSet(client clientset.Interface, manifest string) error {\n+ template, err := bootstrap.RenderManifest(manifest, nil)\n+ if err != nil {\n+ return err\n+ }\n+\n+ daemonset, _, err := serializer.NewCodecFactory(clientsetscheme.Scheme).UniversalDeserializer().Decode(template, nil, &extensions.DaemonSet{})\n+ if err != nil {\n+ return err\n+ }\n+\n+ if err := bootstrap.CreateOrUpdateDaemonset(client, daemonset.(*extensions.DaemonSet)); err != nil {\n+ return err\n+ }\n+ return nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/ground/bootstrap/gpu/manifest.go", "diff": "+package gpu\n+\n+const (\n+ NVIDIADevicePlugin_v20180808 = `\n+apiVersion: apps/v1\n+kind: DaemonSet\n+metadata:\n+ name: nvidia-gpu-device-plugin\n+ namespace: kube-system\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ addonmanager.kubernetes.io/mode: Reconcile\n+spec:\n+ selector:\n+ matchLabels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ template:\n+ metadata:\n+ labels:\n+ k8s-app: nvidia-gpu-device-plugin\n+ annotations:\n+ scheduler.alpha.kubernetes.io/critical-pod: ''\n+ spec:\n+ priorityClassName: system-node-critical\n+ affinity:\n+ nodeAffinity:\n+ requiredDuringSchedulingIgnoredDuringExecution:\n+ nodeSelectorTerms:\n+ - matchExpressions:\n+ - key: ccloud.sap.com/nvidia-gpu\n+ operator: Exists\n+ tolerations:\n+ - operator: \"Exists\"\n+ effect: \"NoExecute\"\n+ - operator: \"Exists\"\n+ effect: \"NoSchedule\"\n+ volumes:\n+ - name: device-plugin\n+ hostPath:\n+ path: /var/lib/kubelet/device-plugins\n+ - name: dev\n+ hostPath:\n+ path: /dev\n+ containers:\n+ - image: \"k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e\"\n+ command:\n+ - /usr/bin/nvidia-gpu-device-plugin\n+ - -logtostderr\n+ - -host-path=/opt/nvidia/current/lib64\n+ - -container-path=/usr/local/nvidia/lib64\n+ name: nvidia-gpu-device-plugin\n+ resources:\n+ requests:\n+ cpu: 50m\n+ memory: 10Mi\n+ limits:\n+ cpu: 50m\n+ memory: 10Mi\n+ securityContext:\n+ privileged: true\n+ volumeMounts:\n+ - name: device-plugin\n+ mountPath: /device-plugin\n+ - name: dev\n+ mountPath: /dev\n+ updateStrategy:\n+ type: RollingUpdate\n+`\n+\n+ NVIDIADriverInstaller_v20180808 = `\n+apiVersion: apps/v1\n+kind: DaemonSet\n+metadata:\n+ name: nvidia-driver-installer\n+ namespace: kube-system\n+ labels:\n+ k8s-app: nvidia-driver-installer\n+spec:\n+ selector:\n+ matchLabels:\n+ k8s-app: nvidia-driver-installer\n+ updateStrategy:\n+ type: RollingUpdate\n+ template:\n+ metadata:\n+ labels:\n+ name: nvidia-driver-installer\n+ k8s-app: nvidia-driver-installer\n+ spec:\n+ affinity:\n+ nodeAffinity:\n+ requiredDuringSchedulingIgnoredDuringExecution:\n+ nodeSelectorTerms:\n+ - matchExpressions:\n+ - key: ccloud.sap.com/nvidia-gpu\n+ operator: Exists\n+ tolerations:\n+ - key: \"nvidia.com/gpu\"\n+ effect: \"NoSchedule\"\n+ operator: \"Exists\"\n+ hostNetwork: true\n+ hostPID: true\n+ volumes:\n+ - name: rootfs\n+ hostPath:\n+ path: /\n+ initContainers:\n+ - image: bugroger/coreos-nvidia-driver:stable-396.44-tesla\n+ name: nvidia-driver-installer\n+ imagePullPolicy: Always\n+ resources:\n+ requests:\n+ cpu: 0.15\n+ securityContext:\n+ privileged: true\n+ volumeMounts:\n+ - name: rootfs\n+ mountPath: /root\n+ mountPropagation: Bidirectional\n+ containers:\n+ - image: \"gcr.io/google-containers/pause:2.0\"\n+ name: pause\n+`\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/ground/bootstrap/util.go", "diff": "+package bootstrap\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+ \"html/template\"\n+\n+ \"k8s.io/api/core/v1\"\n+ rbac \"k8s.io/api/rbac/v1beta1\"\n+ apierrors \"k8s.io/apimachinery/pkg/api/errors\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ clientset \"k8s.io/client-go/kubernetes\"\n+ extensions \"k8s.io/api/extensions/v1beta1\"\n+)\n+\n+func RenderManifest(strtmpl string, obj interface{}) ([]byte, error) {\n+ var buf bytes.Buffer\n+ tmpl, err := template.New(\"template\").Parse(strtmpl)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"error when parsing template: %v\", err)\n+ }\n+ err = tmpl.Execute(&buf, obj)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"error when executing template: %v\", err)\n+ }\n+ return buf.Bytes(), nil\n+}\n+\n+func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {\n+ if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(sa); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create serviceaccount: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateDeployment(client clientset.Interface, deploy *extensions.Deployment) error {\n+ if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create deployment: %v\", err)\n+ }\n+\n+ if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil {\n+ return fmt.Errorf(\"unable to update deployment: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateService(client clientset.Interface, service *v1.Service) error {\n+ if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(service); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create a new kube-dns service: %v\", err)\n+ }\n+\n+ if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(service); err != nil {\n+ return fmt.Errorf(\"unable to create/update the kube-dns service: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateConfigMap(client clientset.Interface, configmap *v1.ConfigMap) error {\n+ if _, err := client.CoreV1().ConfigMaps(configmap.ObjectMeta.Namespace).Create(configmap); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create configmap: %v\", err)\n+ }\n+\n+ if _, err := client.CoreV1().ConfigMaps(configmap.ObjectMeta.Namespace).Update(configmap); err != nil {\n+ return fmt.Errorf(\"unable to update configmap: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateDaemonset(client clientset.Interface, daemonset *extensions.DaemonSet) error {\n+ if _, err := client.ExtensionsV1beta1().DaemonSets(daemonset.ObjectMeta.Namespace).Create(daemonset); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create deployment: %v\", err)\n+ }\n+\n+ if _, err := client.ExtensionsV1beta1().DaemonSets(daemonset.ObjectMeta.Namespace).Update(daemonset); err != nil {\n+ return fmt.Errorf(\"unable to update deployment: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {\n+ if _, err := client.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create RBAC clusterrolebinding: %v\", err)\n+ }\n+\n+ if _, err := client.RbacV1beta1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil {\n+ return fmt.Errorf(\"unable to update RBAC clusterrolebinding: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {\n+ if _, err := client.RbacV1beta1().RoleBindings(roleBinding.Namespace).Create(roleBinding); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create RBAC rolebinding: %v\", err)\n+ }\n+\n+ if _, err := client.RbacV1beta1().RoleBindings(roleBinding.Namespace).Update(roleBinding); err != nil {\n+ return fmt.Errorf(\"unable to update RBAC rolebinding: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {\n+ if _, err := client.RbacV1beta1().ClusterRoles().Create(clusterRole); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create RBAC clusterrole: %v\", err)\n+ }\n+\n+ if _, err := client.RbacV1beta1().ClusterRoles().Update(clusterRole); err != nil {\n+ return fmt.Errorf(\"unable to update RBAC clusterrole: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
bootstraps nvidia device-plugin and nvidia driver. fixes #307
596,240
10.08.2018 09:28:46
-7,200
824b5fd03590dab88e8f3dc72b9571e308f2acf6
uses latest nvidia-device-plugin. fixes hdriver ost-path
[ { "change_type": "MODIFY", "old_path": "contrib/nvidia-gpu/device-plugin-damonset.yaml", "new_path": "contrib/nvidia-gpu/device-plugin-damonset.yaml", "diff": "@@ -38,11 +38,11 @@ spec:\nhostPath:\npath: /dev\ncontainers:\n- - image: \"k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e\"\n+ - image: \"k8s.gcr.io/nvidia-gpu-device-plugin@sha256:c8ad9bb0b192c7cd156747e12ad1a091ef7ae8af2852fb08a96b61477807b279\"\ncommand:\n- /usr/bin/nvidia-gpu-device-plugin\n- -logtostderr\n- - -host-path=/opt/nvidia/current/lib64\n+ - -host-path=/usr/lib64\n- -container-path=/usr/local/nvidia/lib64\nname: nvidia-gpu-device-plugin\nresources:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses latest nvidia-device-plugin. fixes hdriver ost-path
596,240
22.08.2018 14:27:29
-7,200
bb8e8258706d4cc6fd50a6f5d0efa378f4ecbd09
upgrade to v1.10.7
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/values.yaml", "new_path": "charts/kube-master/values.yaml", "diff": "# Declare variables to be passed into your templates.\nimage:\nrepository: sapcc/hyperkube\n- tag: v1.10.1\n+ tag: v1.10.7\npullPolicy: IfNotPresent\n# Settings for the openstack cloudprovider\n@@ -36,7 +36,7 @@ advertiseAddress: 198.18.128.1\nversion:\n# kubernikus:\n- kubernetes: 1.10.1\n+ kubernetes: 1.10.7\napi:\nreplicaCount: 1\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.10.go", "new_path": "pkg/templates/node_1.10.go", "diff": "@@ -87,7 +87,7 @@ systemd:\n--mount volume=var-log,target=/var/log \\\n--mount volume=etc-machine-id,target=/etc/machine-id \\\n--insecure-options=image\"\n- Environment=\"KUBELET_IMAGE_TAG=v1.10.1\"\n+ Environment=\"KUBELET_IMAGE_TAG=v1.10.7\"\nEnvironment=\"KUBELET_IMAGE_URL=docker://sapcc/hyperkube\"\nEnvironment=\"KUBELET_IMAGE_ARGS=--name=kubelet --exec=/kubelet\"\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\n@@ -164,7 +164,7 @@ systemd:\n--mount volume=lib-modules,target=/lib/modules \\\n--stage1-from-dir=stage1-fly.aci \\\n--insecure-options=image \\\n- docker://sapcc/hyperkube:v1.10.1 \\\n+ docker://sapcc/hyperkube:v1.10.7 \\\n--name kube-proxy \\\n--exec=/hyperkube \\\n-- \\\n" }, { "change_type": "MODIFY", "old_path": "pkg/util/constants.go", "new_path": "pkg/util/constants.go", "diff": "@@ -5,5 +5,5 @@ const (\nCA_ISSUER_KUBERNIKUS_IDENTIFIER_1 = \"Kubernikus\"\n// This is the default Kubernetes version that clusters are created in\n- DEFAULT_KUBERNETES_VERSION = \"1.10.1\"\n+ DEFAULT_KUBERNETES_VERSION = \"1.10.7\"\n)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
upgrade to v1.10.7
596,240
22.08.2018 17:12:35
-7,200
336d178dd6fb5499d1e50d21df5becdd35869dc5
fix rkt root pathing issue
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.7.go", "new_path": "pkg/templates/node_1.7.go", "diff": "@@ -131,7 +131,7 @@ systemd:\n--stage1-from-dir=stage1-fly.aci \\\nquay.io/coreos/hyperkube:v1.7.5_coreos.0 \\\n--name=kube-proxy \\\n- --exec=hyperkube \\\n+ --exec=/hyperkube \\\n-- \\\nproxy \\\n--config=/etc/kubernetes/kube-proxy/config\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.8.go", "new_path": "pkg/templates/node_1.8.go", "diff": "@@ -131,7 +131,7 @@ systemd:\n--stage1-from-dir=stage1-fly.aci \\\nquay.io/coreos/hyperkube:v1.8.5_coreos.0 \\\n--name=kube-proxy \\\n- --exec=hyperkube \\\n+ --exec=/hyperkube \\\n-- \\\nproxy \\\n--config=/etc/kubernetes/kube-proxy/config\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.9.go", "new_path": "pkg/templates/node_1.9.go", "diff": "@@ -131,7 +131,7 @@ systemd:\n--stage1-from-dir=stage1-fly.aci \\\nquay.io/coreos/hyperkube:v1.9.0_coreos.0 \\\n--name=kube-proxy \\\n- --exec=hyperkube \\\n+ --exec=/hyperkube \\\n-- \\\nproxy \\\n--config=/etc/kubernetes/kube-proxy/config\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fix rkt root pathing issue
596,240
23.08.2018 11:18:30
-7,200
fa92b92eea3e6639f4eea3bbc3fb333b92c9511c
adds pyrolize tasks to terminate e2e klusters from previous incomplete test runs
[ { "change_type": "MODIFY", "old_path": "test/e2e/framework/kubernikus.go", "new_path": "test/e2e/framework/kubernikus.go", "diff": "@@ -4,6 +4,7 @@ import (\n\"fmt\"\n\"net/url\"\n\"os\"\n+ \"strings\"\n\"time\"\n\"github.com/go-openapi/runtime\"\n@@ -124,3 +125,31 @@ func (k *Kubernikus) WaitForKlusterToBeDeleted(klusterName string, timeout time.\n},\n)\n}\n+\n+func (k *Kubernikus) WaitForKlusters(prefix string, count int, timeout time.Duration) error {\n+ return wait.PollImmediate(Poll, timeout,\n+ func() (done bool, err error) {\n+ res, err := k.Client.Operations.ListClusters(\n+ operations.NewListClustersParams(),\n+ k.AuthInfo,\n+ )\n+\n+ if err != nil {\n+ return true, err\n+ }\n+\n+ k := 0\n+ for _, kluster := range res.Payload {\n+ if strings.HasPrefix(kluster.Name, prefix) {\n+ k++\n+ }\n+ }\n+\n+ if k == count {\n+ return true, nil\n+ }\n+\n+ return false, nil\n+ },\n+ )\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/main_test.go", "new_path": "test/e2e/main_test.go", "diff": "@@ -98,6 +98,12 @@ func TestRunner(t *testing.T) {\nopenstack, err := framework.NewOpenStackFramework()\nrequire.NoError(t, err, \"Must be able to connect to OpenStack\")\n+ // Pyrolize garbage left from previous e2e runs\n+ pyrolisisTests := &PyrolisisTests{kubernikus, *reuse}\n+ if !t.Run(\"Pyrolisis\", pyrolisisTests.Run) {\n+ return\n+ }\n+\nif cleanup != nil && *cleanup == true {\ncleanupTests := &CleanupTests{kubernikus, openstack, klusterName, *reuse}\ndefer t.Run(\"Cleanup\", cleanupTests.Run)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/e2e/pyrolisis_test.go", "diff": "+package main\n+\n+import (\n+ \"fmt\"\n+ \"strings\"\n+ \"testing\"\n+\n+ \"github.com/stretchr/testify/assert\"\n+ \"github.com/stretchr/testify/require\"\n+\n+ \"github.com/sapcc/kubernikus/pkg/api/client/operations\"\n+ \"github.com/sapcc/kubernikus/test/e2e/framework\"\n+)\n+\n+type PyrolisisTests struct {\n+ Kubernikus *framework.Kubernikus\n+ Reuse bool\n+}\n+\n+func (p *PyrolisisTests) Run(t *testing.T) {\n+ if p.Reuse == false {\n+ quota := t.Run(\"SettingKlustersOnFire\", p.SettingKlustersOnFire)\n+ require.True(t, quota, \"Klusters must burn\")\n+\n+ t.Run(\"Wait\", func(t *testing.T) {\n+ t.Run(\"Klusters\", p.WaitForE2EKlustersTerminated)\n+ })\n+ }\n+}\n+\n+func (p *PyrolisisTests) SettingKlustersOnFire(t *testing.T) {\n+ res, err := p.Kubernikus.Client.Operations.ListClusters(\n+ operations.NewListClustersParams(),\n+ p.Kubernikus.AuthInfo,\n+ )\n+ require.NoError(t, err, \"There should be no error while listing klusters\")\n+\n+ for _, kluster := range res.Payload {\n+ if strings.HasPrefix(kluster.Name, \"e2e-\") {\n+ t.Run(fmt.Sprintf(\"TerminatingKluster-%v\", kluster.Name), func(t *testing.T) {\n+ _, err := p.Kubernikus.Client.Operations.TerminateCluster(\n+ operations.NewTerminateClusterParams().WithName(kluster.Name),\n+ p.Kubernikus.AuthInfo,\n+ )\n+ assert.NoError(t, err, \"There should be no error while terminating klusters\")\n+ })\n+ }\n+ }\n+}\n+\n+func (p *PyrolisisTests) WaitForE2EKlustersTerminated(t *testing.T) {\n+ err := p.Kubernikus.WaitForKlusters(\"e2e-\", 0, WaitForKlusterToBeDeletedTimeout)\n+ assert.NoError(t, err, \"E2E Klusters didn't burn down in time\")\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds pyrolize tasks to terminate e2e klusters from previous incomplete test runs
596,240
27.08.2018 16:13:46
-7,200
496b8db90b57dd17e1083148af4db5813c85fab1
allows multi-tenant installations
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus/templates/clusterrolebinding.yaml", "new_path": "charts/kubernikus/templates/clusterrolebinding.yaml", "diff": "+{{- if .Values.standalone}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n@@ -10,3 +11,4 @@ subjects:\n- kind: ServiceAccount\nname: default\nnamespace: kubernikus-system\n+{{- end }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kubernikus/values.yaml", "new_path": "charts/kubernikus/values.yaml", "diff": "@@ -24,5 +24,8 @@ k8sniff:\nenabled: true\n#external_service_ip: \"\"\n+# Multiple Kubernikus Installations in the same cluster?\n+standalone: true\n+\noperator:\nmetrics_port: 9091\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
allows multi-tenant installations
596,240
24.08.2018 17:13:07
-7,200
3208f1b30096c98bb74be8084afe63b6cf75a590
add swift state backend, makefile, services and endpoints
[ { "change_type": "ADD", "old_path": null, "new_path": "terraform/Makefile", "diff": "+TF_PLUGIN_DIR ?= \"$(GOPATH)/bin\"\n+\n+TF_AUTH_URL ?= \"https://identity-3.$(TF_REGION).cloud.sap/v3\"\n+TF_USER_DOMAIN := ccadmin\n+TF_DOMAIN_NAME := ccadmin\n+TF_TENANT := kubernikus\n+\n+TF_BACKEND_AUTH_URL ?= \"$(TF_AUTH_URL)\"\n+TF_BACKEND_REGION ?= \"$(TF_REGION)\"\n+TF_BACKEND_USER ?= \"$(TF_USER)\"\n+TF_BACKEND_USER_DOMAIN ?= \"$(TF_USER_DOMAIN)\"\n+TF_BACKEND_PASSWORD ?= \"$(TF_PASSWORD)\"\n+\n+.PHONY: all validate init plan apply\n+\n+ifndef TF_REGION\n+$(error Set environment variable TF_REGION to continue)\n+endif\n+\n+ifndef TF_USER\n+$(error Set environment variable TF_USER to continue. Requires ccadmin/cloud_admin permissions.)\n+endif\n+\n+ifndef TF_PASSWORD\n+$(error Set environment variable TF_PASSWORD to continue)\n+endif\n+\n+\n+all: validate init plan apply\n+\n+validate:\n+ env | grep \"TF_\\|OS_\"\n+\n+init: validate\n+ terraform init \\\n+ -plugin-dir=$(TF_PLUGIN_DIR) \\\n+ -backend-config=\"auth_url=$(TF_BACKEND_AUTH_URL)\" \\\n+ -backend-config=\"region_name=$(TF_BACKEND_REGION)\" \\\n+ -backend-config=\"user_name=$(TF_BACKEND_USER)\" \\\n+ -backend-config=\"domain_name=$(TF_BACKEND_USER_DOMAIN)\" \\\n+ -backend-config=\"password=$(TF_BACKEND_PASSWORD)\"\n+\n+plan: validate\n+ terraform plan \\\n+ -var region=$(TF_REGION) \\\n+ -var user_name=$(TF_USER) \\\n+ -var user_domain_name=$(TF_USER_DOMAIN) \\\n+ -var password=$(TF_PASSWORD) \\\n+ -var domain_name=$(TF_DOMAIN_NAME) \\\n+ -var tenant_name=$(TF_TENANT) \\\n+ -parallelism=0\n+\n+apply: validate\n+ terraform apply \\\n+ -input=false \\\n+ -auto-approve \\\n+ -var region=$(TF_REGION) \\\n+ -var user_name=$(TF_USER) \\\n+ -var user_domain_name=$(TF_USER_DOMAIN) \\\n+ -var password=$(TF_PASSWORD) \\\n+ -var domain_name=$(TF_DOMAIN_NAME) \\\n+ -var tenant_name=$(TF_TENANT) \\\n+ -parallelism=0\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -31,6 +31,15 @@ provider \"ccloud\" {\ntenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n}\n+terraform {\n+ backend \"swift\" {\n+ tenant_name = \"master\"\n+ domain_name = \"ccadmin\"\n+ container = \"kubernikus_terraform_state\"\n+ archive_container = \"kubernikus_terraform_archive\"\n+ expire_after = \"365d\"\n+ }\n+}\ndata \"openstack_identity_project_v3\" \"kubernikus_domain\" {\nname = \"${var.domain_name}\"\n@@ -201,6 +210,34 @@ resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_1\" {\nsecurity_group_id = \"${openstack_networking_secgroup_v2.kubernikus.id}\"\n}\n+resource \"openstack_identity_service_v3\" \"kubernikus\" {\n+ name = \"kubernikus\"\n+ type = \"kubernikus\"\n+ description = \"End-User Kubernikus Service\"\n+}\n+\n+resource \"openstack_identity_service_v3\" \"kubernikus-kubernikus\" {\n+ name = \"kubernikus\"\n+ type = \"kubernikus-kubernikus\"\n+ description = \"Admin Kubernikus Service\"\n+}\n+\n+resource \"openstack_identity_endpoint_v3\" \"kubernikus\" {\n+ service_id = \"${openstack_identity_service_v3.kubernikus.id}\"\n+ name = \"kubernikus\"\n+ interface = \"public\"\n+ region = \"${var.region}\"\n+ url = \"https://kubernikus.${var.region}.cloud.sap\"\n+}\n+\n+resource \"openstack_identity_endpoint_v3\" \"kubernikus-kubernikus\" {\n+ service_id = \"${openstack_identity_service_v3.kubernikus-kubernikus.id}\"\n+ name = \"kubernikus-kubernikus\"\n+ interface = \"public\"\n+ region = \"${var.region}\"\n+ url = \"https://k-${var.region}.admin.cloud.sap\"\n+}\n+\nresource \"ccloud_kubernetes\" \"kluster\" {\nprovider = \"ccloud.kubernikus\"\n@@ -212,5 +249,8 @@ resource \"ccloud_kubernetes\" \"kluster\" {\n{ name = \"payload1\", flavor = \"m1.xlarge_cpu\", size = 1 }\n]\n- depends_on = [\"openstack_networking_router_v2.router\"]\n+ depends_on = [\n+ \"openstack_identity_endpoint_v3.kubernikus\",\n+ \"openstack_networking_router_v2.router\"\n+ ]\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add swift state backend, makefile, services and endpoints
596,240
24.08.2018 17:19:30
-7,200
c973af5970e984e1d5346407ea485fa3521a5589
adds mini-readme, gitignore and removes some variables
[ { "change_type": "ADD", "old_path": null, "new_path": "terraform/.gitignore", "diff": "+.terraform\n" }, { "change_type": "ADD", "old_path": null, "new_path": "terraform/README.md", "diff": "+# Usage\n+\n+```\n+export TF_REGION=ap-jp-1\n+export TF_USER=d038720\n+export TF_PASSWORD=$(security find-generic-password -a $USER -s openstack -w)\n+\n+# env TF_REGION=ap-jp-1 TF_USER=d038720 TF_PASSWORD=(security find-generic-password -a $USER -s openstack -w) make plan\n+\n+make init\n+make plan\n+make apply\n+```\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -42,7 +42,7 @@ terraform {\n}\ndata \"openstack_identity_project_v3\" \"kubernikus_domain\" {\n- name = \"${var.domain_name}\"\n+ name = \"ccadmin\"\nis_domain = true\n}\n" }, { "change_type": "MODIFY", "old_path": "terraform/variables.tf", "new_path": "terraform/variables.tf", "diff": "@@ -2,6 +2,3 @@ variable \"region\" {}\nvariable \"user_name\" {}\nvariable \"user_domain_name\" {}\nvariable \"password\" {}\n-variable \"tenant_name\" {}\n-variable \"domain_name\" {}\n-\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds mini-readme, gitignore and removes some variables
596,240
27.08.2018 15:44:43
-7,200
0dadfac2f6862a7a76403662c8995ad0bf03c7a2
adds creation of dns entries
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -48,6 +48,7 @@ plan: validate\n-var password=$(TF_PASSWORD) \\\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n+ -var-file=\"${TF_REGION}.tfvars\" \\\n-parallelism=0\napply: validate\n@@ -60,4 +61,5 @@ apply: validate\n-var password=$(TF_PASSWORD) \\\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n+ -var-file=\"${TF_REGION}.tfvars\" \\\n-parallelism=0\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -8,6 +8,30 @@ provider \"openstack\" {\ndomain_name = \"ccadmin\"\n}\n+provider \"openstack\" {\n+ alias = \"master\"\n+\n+ auth_url = \"https://identity-3.${var.region}.cloud.sap/v3\"\n+ region = \"${var.region}\"\n+ user_name = \"${var.user_name}\"\n+ user_domain_name = \"${var.user_domain_name}\"\n+ password = \"${var.password}\"\n+ tenant_name = \"master\"\n+ domain_name = \"ccadmin\"\n+}\n+\n+provider \"openstack\" {\n+ alias = \"master.na-us-1\"\n+\n+ auth_url = \"https://identity-3.na-us-1.cloud.sap/v3\"\n+ region = \"na-us-1\"\n+ user_name = \"${var.user_name}\"\n+ user_domain_name = \"${var.user_domain_name}\"\n+ password = \"${var.password}\"\n+ tenant_name = \"master\"\n+ domain_name = \"ccadmin\"\n+}\n+\nprovider \"ccloud\" {\nalias = \"cloud_admin\"\n@@ -254,3 +278,87 @@ resource \"ccloud_kubernetes\" \"kluster\" {\n\"openstack_networking_router_v2.router\"\n]\n}\n+\n+data \"openstack_dns_zone_v2\" \"region_cloud_sap\" {\n+ provider = \"openstack.master\"\n+ name = \"${var.region}.cloud.sap.\"\n+}\n+\n+data \"openstack_dns_zone_v2\" \"admin_cloud_sap\" {\n+ provider = \"openstack.master.na-us-1\"\n+ name = \"admin.cloud.sap.\"\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"kubernikus-ingress\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"kubernikus-ingress.${var.region}.cloud.sap.\"\n+ type = \"A\"\n+ ttl = 1800\n+ records = [\"${var.lb-kubernikus-ingress-fip}\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"kubernikus-k8sniff\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"kubernikus-k8sniff.${var.region}.cloud.sap.\"\n+ type = \"A\"\n+ ttl = 1800\n+ records = [\"${var.lb-kubernikus-k8sniff-fip}\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"wildcard-kubernikus\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"*.kubernikus.${var.region}.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"kubernikus-k8sniff.${var.region}.cloud.sap.\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"kubernikus\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"kubernikus.${var.region}.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"kubernikus-ingress.${var.region}.cloud.sap.\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"prometheus\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"prometheus.kubernikus.${var.region}.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"kubernikus-ingress.${var.region}.cloud.sap.\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"grafana\" {\n+ provider = \"openstack.master\"\n+ zone_id = \"${data.openstack_dns_zone_v2.region_cloud_sap.id}\"\n+ name = \"grafana.kubernikus.${var.region}.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"kubernikus-ingress.${var.region}.cloud.sap.\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"k-region\" {\n+ provider = \"openstack.master.na-us-1\"\n+ zone_id = \"${data.openstack_dns_zone_v2.admin_cloud_sap.id}\"\n+ name = \"k-${var.region}.admin.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"ingress.admin.cloud.sap.\"]\n+}\n+\n+resource \"openstack_dns_recordset_v2\" \"wildcard-k-region\" {\n+ provider = \"openstack.master.na-us-1\"\n+ zone_id = \"${data.openstack_dns_zone_v2.admin_cloud_sap.id}\"\n+ name = \"*.k-${var.region}.admin.cloud.sap.\"\n+ type = \"CNAME\"\n+ ttl = 1800\n+ records = [\"kubernikus.admin.cloud.sap.\"]\n+}\n+\n+\n" }, { "change_type": "MODIFY", "old_path": "terraform/variables.tf", "new_path": "terraform/variables.tf", "diff": "@@ -2,3 +2,6 @@ variable \"region\" {}\nvariable \"user_name\" {}\nvariable \"user_domain_name\" {}\nvariable \"password\" {}\n+\n+variable \"lb-kubernikus-k8sniff-fip\" {}\n+variable \"lb-kubernikus-ingress-fip\" {}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds creation of dns entries
596,240
27.08.2018 16:14:36
-7,200
0e5d123e0e565e71105d4608c11dd99ae459e05f
adds pipeline user
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -99,6 +99,9 @@ data \"openstack_networking_network_v2\" \"external_network\" {\nname = \"FloatingIP-external-ccadmin\"\n}\n+data \"openstack_identity_user_v3\" \"pipeline\" {\n+ name = \"T175B19A704E280EC\"\n+}\nresource \"openstack_identity_project_v3\" \"kubernikus\" {\nname = \"kubernikus\"\n@@ -146,6 +149,13 @@ resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin\" {\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n+resource \"openstack_identity_role_assignment_v3\" \"pipeline\" {\n+ user_id = \"${data.openstack_identity_user_v3.pipeline.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n+ role_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n+}\n+\n+\nresource \"ccloud_quota\" \"kubernikus\" {\nprovider = \"ccloud.cloud_admin\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds pipeline user
596,240
28.08.2018 15:08:47
-7,200
78debc9b598d68cb610e6b4cbaca90385cd882a2
installs or upgrades tiller
[ { "change_type": "MODIFY", "old_path": "ci/task_tiller.yaml", "new_path": "ci/task_tiller.yaml", "diff": "@@ -18,9 +18,7 @@ run:\nset -exo pipefail\ntag=$(cat secrets.git/kubernikus/tiller.version)\nkubernikusctl auth init\n- helm version\n- kubectl set image --namespace=kube-system deployment/tiller-deploy tiller=sapcc/tiller:$tag\n- kubectl rollout status deployment/tiller-deploy --namespace=kube-system\n+ helm init --service-account tiller --history-max 5 --tiller-image sapcc/tiller:$tag --upgrade --wait\nparams:\nOS_AUTH_URL:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
installs or upgrades tiller
596,240
28.08.2018 15:09:13
-7,200
5a86c61484d88e295c7d4e403c80798ad45dcbf5
limits parallelism to 1 instead of infinity -.-
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -49,7 +49,7 @@ plan: validate\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n-var-file=\"${TF_REGION}.tfvars\" \\\n- -parallelism=0\n+ -parallelism=1\napply: validate\nterraform apply \\\n@@ -62,4 +62,4 @@ apply: validate\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n-var-file=\"${TF_REGION}.tfvars\" \\\n- -parallelism=0\n+ -parallelism=1\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
limits parallelism to 1 instead of infinity -.-
596,240
28.08.2018 15:09:38
-7,200
fcdcee503742a9160160880c41a6b1de1c801225
adds kubernikus service user
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -65,11 +65,21 @@ terraform {\n}\n}\n-data \"openstack_identity_project_v3\" \"kubernikus_domain\" {\n+data \"openstack_identity_project_v3\" \"ccadmin\" {\nname = \"ccadmin\"\nis_domain = true\n}\n+data \"openstack_identity_project_v3\" \"default\" {\n+ name = \"Default\"\n+ is_domain = true\n+}\n+\n+data \"openstack_identity_project_v3\" \"cloud_admin\" {\n+ name = \"cloud_admin\"\n+ domain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\n+}\n+\ndata \"ccloud_identity_group_v3\" \"ccadmin_domain_admins\" {\nprovider = \"ccloud.cloud_admin\"\nname = \"CCADMIN_DOMAIN_ADMINS\"\n@@ -95,17 +105,51 @@ data \"openstack_identity_role_v3\" \"volume_admin\" {\nname = \"volume_admin\"\n}\n+data \"openstack_identity_role_v3\" \"cloud_compute_admin\" {\n+ name = \"cloud_compute_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_dns_admin\" {\n+ name = \"cloud_dns_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_image_admin\" {\n+ name = \"cloud_image_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_keymanager_admin\" {\n+ name = \"cloud_keymanager_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_network_admin\" {\n+ name = \"cloud_network_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_resource_admin\" {\n+ name = \"cloud_resource_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_sharedfilesystem_admin\" {\n+ name = \"cloud_sharedfilesystem_admin\"\n+}\n+\n+data \"openstack_identity_role_v3\" \"cloud_volume_admin\" {\n+ name = \"cloud_volume_admin\"\n+}\n+\ndata \"openstack_networking_network_v2\" \"external_network\" {\nname = \"FloatingIP-external-ccadmin\"\n}\ndata \"openstack_identity_user_v3\" \"pipeline\" {\n- name = \"T175B19A704E280EC\"\n+ name = \"${var.kubernikus-pipeline-user}\"\n}\n+\n+\nresource \"openstack_identity_project_v3\" \"kubernikus\" {\nname = \"kubernikus\"\n- domain_id = \"${data.openstack_identity_project_v3.kubernikus_domain.id}\"\n+ domain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\ndescription = \"Kubernikus Control-Plane\"\n}\n@@ -156,15 +200,84 @@ resource \"openstack_identity_role_assignment_v3\" \"pipeline\" {\n}\n+\n+resource \"openstack_identity_user_v3\" \"kubernikus\" {\n+ domain_id = \"${data.openstack_identity_project_v3.default.id}\"\n+ name = \"kubernikus\"\n+ description = \"Kubernikus Service User\"\n+ password = \"${var.kubernikus-openstack-password}\"\n+\n+ ignore_change_password_upon_first_use = true\n+ ignore_password_expiry = true\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_compute_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_compute_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_dns_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_dns_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_image_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_image_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_keymanager_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_keymanager_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_network_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_network_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_resource_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_resource_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_sharedfilesystem_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_sharedfilesystem_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_volume_admin\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ project_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.cloud_volume_admin.id}\"\n+}\n+\n+\n+\n+\n+\nresource \"ccloud_quota\" \"kubernikus\" {\nprovider = \"ccloud.cloud_admin\"\n- domain_id = \"${data.openstack_identity_project_v3.kubernikus_domain.id}\"\n+ domain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\nproject_id = \"${openstack_identity_project_v3.kubernikus.id}\"\ncompute {\ninstances = 10\n- cores = 32\n+ cores = 48\nram = 81920\n}\n@@ -275,12 +388,12 @@ resource \"openstack_identity_endpoint_v3\" \"kubernikus-kubernikus\" {\nresource \"ccloud_kubernetes\" \"kluster\" {\nprovider = \"ccloud.kubernikus\"\n+ is_admin = true\nname = \"k-${var.region}\"\nssh_public_key = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCXIxVEUgtUVkvk2VM1hmIb8MxvxsmvYoiq9OBy3J8akTGNybqKsA2uhcwxSJX5Cn3si8kfMfka9EWiJT+e1ybvtsGILO5XRZPxyhYzexwb3TcALwc3LuzpF3Z/Dg2jYTRELTGhYmyca3mxzTlCjNXvYayLNedjJ8fIBzoCuSXNqDRToHru7h0Glz+wtuE74mNkOiXSvhtuJtJs7VCNVjobFQNfC1aeDsri2bPRHJJZJ0QF4LLYSayMEz3lVwIDyAviQR2Aa97WfuXiofiAemfGqiH47Kq6b8X7j3bOYGBvJKMUV7XeWhGsskAmTsvvnFxkc5PAD3Ct+liULjiQWlzDrmpTE8aMqLK4l0YQw7/8iRVz6gli42iEc2ZG56ob1ErpTLAKFWyCNOebZuGoygdEQaGTIIunAncXg5Rz07TdPl0Tf5ZZLpiAgR5ck0H1SETnjDTZ/S83CiVZWJgmCpu8YOKWyYRD4orWwdnA77L4+ixeojLIhEoNL8KlBgsP9Twx+fFMWLfxMmiuX+yksM6Hu+Lsm+Ao7Q284VPp36EB1rxP1JM7HCiEOEm50Jb6hNKjgN4aoLhG5yg+GnDhwCZqUwcRJo1bWtm3QvRA+rzrGZkId4EY3cyOK5QnYV5+24x93Ex0UspHMn7HGsHUESsVeV0fLqlfXyd2RbHTmDMP6w==\"\nnode_pools = [\n- { name = \"payload0\", flavor = \"m1.xlarge_cpu\", size = 2 },\n- { name = \"payload1\", flavor = \"m1.xlarge_cpu\", size = 1 }\n+ { name = \"payload\", flavor = \"m1.xlarge_cpu\", size = 3 },\n]\ndepends_on = [\n" }, { "change_type": "MODIFY", "old_path": "terraform/variables.tf", "new_path": "terraform/variables.tf", "diff": "@@ -5,3 +5,6 @@ variable \"password\" {}\nvariable \"lb-kubernikus-k8sniff-fip\" {}\nvariable \"lb-kubernikus-ingress-fip\" {}\n+\n+variable \"kubernikus-pipeline-user\" {}\n+variable \"kubernikus-openstack-password\" {}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds kubernikus service user
596,240
28.08.2018 15:27:21
-7,200
2d71d18cea47e873b305232f654554ccd79703b8
sets up sa and crb for tiller
[ { "change_type": "MODIFY", "old_path": "ci/task_tiller.yaml", "new_path": "ci/task_tiller.yaml", "diff": "@@ -18,6 +18,8 @@ run:\nset -exo pipefail\ntag=$(cat secrets.git/kubernikus/tiller.version)\nkubernikusctl auth init\n+ kubectl -n kube-system create sa tiller || true\n+ kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller || true\nhelm init --service-account tiller --history-max 5 --tiller-image sapcc/tiller:$tag --upgrade --wait\nparams:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
sets up sa and crb for tiller
596,240
29.08.2018 11:40:27
-7,200
5ff0d01fe46178beeea5c3cd9e78cde3db8b6c66
allows to pass in FIP for k8sniff loadbalancer
[ { "change_type": "MODIFY", "old_path": "charts/k8sniff/templates/service.yaml", "new_path": "charts/k8sniff/templates/service.yaml", "diff": "@@ -18,3 +18,7 @@ spec:\nexternalIPs:\n- {{ .Values.external_service_ip }}\n{{- end }}\n+{{- if .Values.loadbalancer_fip }}\n+ loadBalancerIP: {{ .Values.loadBalancerIP }}\n+{{- end }}\n+\n" }, { "change_type": "MODIFY", "old_path": "charts/k8sniff/values.yaml", "new_path": "charts/k8sniff/values.yaml", "diff": "@@ -2,3 +2,5 @@ image: \"kubermatic/k8sniff\"\ntag: \"8d5bf771eb906c58acfdd68e553f9408432043fa\"\nmetrics_port: 9091\nlog_level: 9\n+\n+# loadBalancerIP\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
allows to pass in FIP for k8sniff loadbalancer
596,240
29.08.2018 13:27:41
-7,200
2e16ee8274f19b2a5e0c2ffdff5a3977545b266f
fixes value test
[ { "change_type": "MODIFY", "old_path": "charts/k8sniff/templates/service.yaml", "new_path": "charts/k8sniff/templates/service.yaml", "diff": "@@ -18,7 +18,7 @@ spec:\nexternalIPs:\n- {{ .Values.external_service_ip }}\n{{- end }}\n-{{- if .Values.loadbalancer_fip }}\n+{{- if .Values.loadBalancerIP }}\nloadBalancerIP: {{ .Values.loadBalancerIP }}\n{{- end }}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes value test
596,240
31.08.2018 17:36:16
-7,200
d5c9dd3d7e9a76810b47311e5dea214ca8dd16f9
contribs pipeline image for terraform jobs
[ { "change_type": "ADD", "old_path": null, "new_path": "contrib/kubernikus-terraform/Dockerfile", "diff": "+FROM golang:alpine as builder\n+\n+ARG TERRAFORM_PROVIDER_OPENSTACK_VERSION\n+ARG TERRAFORM_PROVIDER_CCLOUD_VERSION\n+\n+RUN apk add --update git make bash\n+\n+WORKDIR /go/src/github.com/sapcc/terraform-provider-ccloud\n+RUN git clone https://github.com/sapcc/terraform-provider-ccloud.git .\n+RUN git reset --hard ${TERRAFORM_PROVIDER_CCLOUD_VERSION}\n+RUN make\n+\n+WORKDIR /go/src/github.com/terraform-providers/terraform-provider-openstack\n+RUN git clone https://github.com/BugRoger/terraform-provider-openstack.git .\n+RUN git reset --hard ${TERRAFORM_PROVIDER_OPENSTACK_VERSION}\n+RUN make\n+\n+FROM alpine:3.8\n+\n+ARG TERRAFORM_VERSION\n+\n+RUN apk add --update make terraform=${TERRAFORM_VERSION}\n+COPY --from=builder /go/bin/* /usr/local/bin/\n" }, { "change_type": "ADD", "old_path": null, "new_path": "contrib/kubernikus-terraform/Makefile", "diff": "+SHELL := /bin/sh\n+IMAGE := sapcc/kubernikus-terrraform\n+DATE := $(shell date +%Y%m%d%H%M%S)\n+VERSION ?= v$(DATE)\n+\n+TERRAFORM_VERSION := 0.11.7-r0\n+TERRAFORM_PROVIDER_OPENSTACK_VERSION := 8a72ef5435ea15092061fc8acad756a9e87bf93a\n+TERRAFORM_PROVIDER_CCLOUD_VERSION := 59c10debdb5576da5b8e2dcd66a815c376e68e08\n+\n+.PHONY: all\n+all: build push\n+\n+build:\n+ docker build -t $(IMAGE):$(VERSION) -t $(IMAGE):latest \\\n+ --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) \\\n+ --build-arg TERRAFORM_PROVIDER_OPENSTACK_VERSION=$(TERRAFORM_PROVIDER_OPENSTACK_VERSION) \\\n+ --build-arg TERRAFORM_PROVIDER_CCLOUD_VERSION=$(TERRAFORM_PROVIDER_CCLOUD_VERSION) \\\n+ .\n+\n+push:\n+ docker push $(IMAGE):$(VERSION)\n+ docker push $(IMAGE):latest\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
contribs pipeline image for terraform jobs
596,240
31.08.2018 17:36:53
-7,200
33ed3c87a2e649388524a326f51bd80091155815
sets rbac policy for external floatingip network
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -49,7 +49,7 @@ plan: validate\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n-var-file=\"${TF_REGION}.tfvars\" \\\n- -parallelism=1\n+ -parallelism=10\napply: validate\nterraform apply \\\n@@ -62,4 +62,4 @@ apply: validate\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n-var-file=\"${TF_REGION}.tfvars\" \\\n- -parallelism=1\n+ -parallelism=10\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -137,7 +137,7 @@ data \"openstack_identity_role_v3\" \"cloud_volume_admin\" {\nname = \"cloud_volume_admin\"\n}\n-data \"openstack_networking_network_v2\" \"external_network\" {\n+data \"openstack_networking_network_v2\" \"external\" {\nname = \"FloatingIP-external-ccadmin\"\n}\n@@ -303,6 +303,13 @@ resource \"ccloud_quota\" \"kubernikus\" {\n}\n}\n+resource \"openstack_networking_rbacpolicies_v2\" \"external\" {\n+ action = \"access_as_shared\"\n+ object_id = \"${data.openstack_networking_network_v2.external.id}\"\n+ object_type = \"network\"\n+ target_tenant = \"${openstack_identity_project_v3.kubernikus.id}\"\n+}\n+\nresource \"openstack_networking_network_v2\" \"network\" {\ntenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\nname = \"kubernikus\"\n@@ -323,7 +330,7 @@ resource \"openstack_networking_router_v2\" \"router\" {\ntenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\nname = \"kubernikus\"\nadmin_state_up = true\n- external_network_id = \"${data.openstack_networking_network_v2.external_network.id}\"\n+ external_network_id = \"${data.openstack_networking_network_v2.external.id}\"\ndepends_on = [\"ccloud_quota.kubernikus\"]\n}\n@@ -332,11 +339,10 @@ resource \"openstack_networking_router_interface_v2\" \"router_interface\" {\nsubnet_id = \"${openstack_networking_subnet_v2.subnet.id}\"\n}\n-resource \"openstack_networking_secgroup_v2\" \"kubernikus\" {\n+\n+data \"openstack_networking_secgroup_v2\" \"kubernikus_default\" {\n+ name = \"default\"\ntenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n- name = \"kubernikus\"\n- description = \"Kubernikus\"\n- depends_on = [\"ccloud_quota.kubernikus\"]\n}\nresource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_0\" {\n@@ -345,7 +351,9 @@ resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_0\" {\nethertype = \"IPv4\"\nprotocol = \"tcp\"\nremote_ip_prefix = \"198.18.0.0/15\"\n- security_group_id = \"${openstack_networking_secgroup_v2.kubernikus.id}\"\n+ security_group_id = \"${data.openstack_networking_secgroup_v2.kubernikus_default.id}\"\n+\n+ depends_on = [\"ccloud_quota.kubernikus\"]\n}\nresource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_1\" {\n@@ -354,7 +362,9 @@ resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_1\" {\nethertype = \"IPv4\"\nprotocol = \"udp\"\nremote_ip_prefix = \"198.18.0.0/15\"\n- security_group_id = \"${openstack_networking_secgroup_v2.kubernikus.id}\"\n+ security_group_id = \"${data.openstack_networking_secgroup_v2.kubernikus_default.id}\"\n+\n+ depends_on = [\"ccloud_quota.kubernikus\"]\n}\nresource \"openstack_identity_service_v3\" \"kubernikus\" {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
sets rbac policy for external floatingip network
596,240
03.09.2018 16:49:58
-7,200
8f667fd2cff1c3863ad4c1d5f3dcd777f51ea301
terraforms automatically
[ { "change_type": "ADD", "old_path": null, "new_path": "ci/task_terraform.yaml", "diff": "+---\n+platform: 'linux'\n+\n+image_resource:\n+ type: docker-image\n+ source:\n+ repository: sapcc/kubernikus-terraform\n+ tag: 'latest'\n+\n+inputs:\n+ - name: secrets.git\n+ - name: kubernikus.git\n+\n+\n+run:\n+ path: /bin/sh\n+ args:\n+ - -c\n+ - |\n+ set -exo pipefail\n+ cd kubernikus.git/terraform\n+ TF_USER=$OS_USERNAME TF_PASSWORD=$OS_PASSWORD make plan\n+\n+params:\n+ TF_REGION\n+ OS_USERNAME\n+ OS_PASSWORD\n+\n+\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
terraforms automatically
596,240
03.09.2018 18:03:56
-7,200
a4b2c0584c8ff5283f1307526114c4c2deeccffa
terraforms ap-jp-1
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -402,6 +402,14 @@ resources:\nbranch: master\ndepth: 1\n+ - name: kubernikus.git\n+ type: git\n+ check_every: 1h\n+ source:\n+ uri: https://github.com/sapcc/kubernikus.git\n+ branch: master\n+ depth: 1\n+\n- name: daily\ntype: time\nsource: {interval: 24h}\n@@ -1284,6 +1292,19 @@ jobs:\n<<: *auth_infra\n+ - name: terraform_ap-jp-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-jp-1\n+ <<: *auth_ap-jp-1\n+\ngroups:\n- name: deploy\njobs:\n@@ -1339,6 +1360,9 @@ groups:\n- soak_qa-de-1\n+ - name: terraform\n+ jobs:\n+ - terraform_ap-jp-1\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -130,6 +130,14 @@ resources:\nbranch: master\ndepth: 1\n+ - name: kubernikus.git\n+ type: git\n+ check_every: 1h\n+ source:\n+ uri: https://github.com/sapcc/kubernikus.git\n+ branch: master\n+ depth: 1\n+\n- name: daily\ntype: time\nsource: {interval: 24h}\n@@ -420,6 +428,19 @@ jobs:\n<<: *auth_<%= region %>\n<% end %>\n+ - name: terraform_ap-jp-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-jp-1\n+ <<: *auth_ap-jp-1\n+\ngroups:\n- name: deploy\njobs:\n@@ -439,6 +460,9 @@ groups:\n<% (ASIA + EMEA + AMERICAS + [\"qa-de-1\"]).each do |region| %>\n- soak_<%= region %>\n<% end %>\n+ - name: terraform\n+ jobs:\n+ - terraform_ap-jp-1\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/task_terraform.yaml", "new_path": "ci/task_terraform.yaml", "diff": "@@ -19,12 +19,18 @@ run:\n- |\nset -exo pipefail\ncd kubernikus.git/terraform\n- TF_USER=$OS_USERNAME TF_PASSWORD=$OS_PASSWORD make plan\n+ ls\n+ TF_USER=$OS_USERNAME \\\n+ TF_PASSWORD=$OS_PASSWORD \\\n+ TF_VARS_DIR=../../secrets.git/$TF_REGION/terraform \\\n+ make init plan apply\nparams:\n- TF_REGION\n- OS_USERNAME\n- OS_PASSWORD\n+ TF_REGION:\n+ TF_PLUGIN_DIR: /usr/local/bin\n+ TF_VARS_DIR:\n+ OS_USERNAME:\n+ OS_PASSWORD:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
terraforms ap-jp-1
596,240
03.09.2018 18:04:16
-7,200
f704e48f8e130de0fbdc29c2aa18225b9966496d
fixes type, adds certificates
[ { "change_type": "MODIFY", "old_path": "contrib/kubernikus-terraform/Dockerfile", "new_path": "contrib/kubernikus-terraform/Dockerfile", "diff": "@@ -19,5 +19,5 @@ FROM alpine:3.8\nARG TERRAFORM_VERSION\n-RUN apk add --update make terraform=${TERRAFORM_VERSION}\n+RUN apk add --update make ca-certificates terraform=${TERRAFORM_VERSION}\nCOPY --from=builder /go/bin/* /usr/local/bin/\n" }, { "change_type": "MODIFY", "old_path": "contrib/kubernikus-terraform/Makefile", "new_path": "contrib/kubernikus-terraform/Makefile", "diff": "SHELL := /bin/sh\n-IMAGE := sapcc/kubernikus-terrraform\n+IMAGE := sapcc/kubernikus-terraform\nDATE := $(shell date +%Y%m%d%H%M%S)\nVERSION ?= v$(DATE)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes type, adds certificates
596,240
04.09.2018 10:58:14
-7,200
590bd972cbd7d73936d278193db04bc6860e324f
allows to specify tfvar directory
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "TF_PLUGIN_DIR ?= \"$(GOPATH)/bin\"\n+TF_VARS_DIR ?= .\nTF_AUTH_URL ?= \"https://identity-3.$(TF_REGION).cloud.sap/v3\"\nTF_USER_DOMAIN := ccadmin\n@@ -48,7 +49,7 @@ plan: validate\n-var password=$(TF_PASSWORD) \\\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n- -var-file=\"${TF_REGION}.tfvars\" \\\n+ -var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=10\napply: validate\n@@ -61,5 +62,5 @@ apply: validate\n-var password=$(TF_PASSWORD) \\\n-var domain_name=$(TF_DOMAIN_NAME) \\\n-var tenant_name=$(TF_TENANT) \\\n- -var-file=\"${TF_REGION}.tfvars\" \\\n+ -var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=10\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
allows to specify tfvar directory
596,240
04.09.2018 15:56:13
-7,200
fe88c66cc3be9acde87ab9e538161625a3280dde
creates service user
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -147,16 +147,42 @@ data \"openstack_identity_user_v3\" \"pipeline\" {\n+resource \"openstack_identity_role_v3\" \"kubernetes_admin\" {\n+ name = \"kubernetes_admin\"\n+}\n+\n+resource \"openstack_identity_role_v3\" \"kubernetes_member\" {\n+ name = \"kubernetes_member\"\n+}\n+\n+resource \"openstack_identity_user_v3\" \"kubernikus_pipeline\" {\n+ domain_id = \"${data.openstack_identity_project_v3.default.id}\"\n+ name = \"kubernikus-pipeline\"\n+ description = \"Kubernikus Pipeline User\"\n+ password = \"${var.kubernikus-pipeline-password}\"\n+\n+ ignore_change_password_upon_first_use = true\n+ ignore_password_expiry = true\n+}\n+\n+resource \"openstack_identity_user_v3\" \"kubernikus_service\" {\n+ domain_id = \"${data.openstack_identity_project_v3.default.id}\"\n+ name = \"kubernikus\"\n+ description = \"Kubernikus Service User\"\n+ password = \"${var.kubernikus-service-password}\"\n+\n+ ignore_change_password_upon_first_use = true\n+ ignore_password_expiry = true\n+}\n+\n+\n+\nresource \"openstack_identity_project_v3\" \"kubernikus\" {\nname = \"kubernikus\"\ndomain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\ndescription = \"Kubernikus Control-Plane\"\n}\n-resource \"openstack_identity_role_v3\" \"kubernetes_admin\" {\n- name = \"kubernetes_admin\"\n-}\n-\nresource \"openstack_identity_role_assignment_v3\" \"admin\" {\ngroup_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\nproject_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n@@ -193,74 +219,64 @@ resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin\" {\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n-resource \"openstack_identity_role_assignment_v3\" \"pipeline\" {\n- user_id = \"${data.openstack_identity_user_v3.pipeline.id}\"\n+resource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_admin\" {\n+ user_id = \"${data.openstack_identity_user_v3.kubernikus_pipeline.id}\"\nproject_id = \"${openstack_identity_project_v3.kubernikus.id}\"\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n-resource \"openstack_identity_user_v3\" \"kubernikus\" {\n- domain_id = \"${data.openstack_identity_project_v3.default.id}\"\n- name = \"kubernikus\"\n- description = \"Kubernikus Service User\"\n- password = \"${var.kubernikus-openstack-password}\"\n-\n- ignore_change_password_upon_first_use = true\n- ignore_password_expiry = true\n-}\n-\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_compute_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_compute_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_dns_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_dns_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_image_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_image_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_keymanager_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_keymanager_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_network_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_network_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_resource_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_resource_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_sharedfilesystem_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_sharedfilesystem_admin.id}\"\n}\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-cloud_volume_admin\" {\n- user_id = \"${openstack_identity_user_v3.kubernikus.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\nrole_id = \"${data.openstack_identity_role_v3.cloud_volume_admin.id}\"\n}\n" }, { "change_type": "MODIFY", "old_path": "terraform/variables.tf", "new_path": "terraform/variables.tf", "diff": "@@ -6,5 +6,5 @@ variable \"password\" {}\nvariable \"lb-kubernikus-k8sniff-fip\" {}\nvariable \"lb-kubernikus-ingress-fip\" {}\n-variable \"kubernikus-pipeline-user\" {}\n-variable \"kubernikus-openstack-password\" {}\n+variable \"kubernikus-pipeline-password\" {}\n+variable \"kubernikus-service-password\" {}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
creates service user
596,240
05.09.2018 14:04:41
-7,200
461c95d27c20cc0f5102a12d9968c86a82e57956
uses forked terraform for proper swift authentication
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -1303,7 +1303,8 @@ jobs:\ntimeout: 45m\nparams:\nTF_REGION: ap-jp-1\n- <<: *auth_ap-jp-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\ngroups:\n- name: deploy\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -439,7 +439,8 @@ jobs:\ntimeout: 45m\nparams:\nTF_REGION: ap-jp-1\n- <<: *auth_ap-jp-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\ngroups:\n- name: deploy\n" }, { "change_type": "MODIFY", "old_path": "ci/task_terraform.yaml", "new_path": "ci/task_terraform.yaml", "diff": "@@ -19,9 +19,6 @@ run:\n- |\nset -exo pipefail\ncd kubernikus.git/terraform\n- ls\n- TF_USER=$OS_USERNAME \\\n- TF_PASSWORD=$OS_PASSWORD \\\nTF_VARS_DIR=../../secrets.git/$TF_REGION/terraform \\\nmake init plan apply\n@@ -29,8 +26,4 @@ params:\nTF_REGION:\nTF_PLUGIN_DIR: /usr/local/bin\nTF_VARS_DIR:\n- OS_USERNAME:\n- OS_PASSWORD:\n-\n-\n-\n+ TF_PASSWORD:\n" }, { "change_type": "MODIFY", "old_path": "contrib/kubernikus-terraform/Dockerfile", "new_path": "contrib/kubernikus-terraform/Dockerfile", "diff": "@@ -3,7 +3,7 @@ FROM golang:alpine as builder\nARG TERRAFORM_PROVIDER_OPENSTACK_VERSION\nARG TERRAFORM_PROVIDER_CCLOUD_VERSION\n-RUN apk add --update git make bash\n+RUN apk add --update git make bash gcc musl-dev\nWORKDIR /go/src/github.com/sapcc/terraform-provider-ccloud\nRUN git clone https://github.com/sapcc/terraform-provider-ccloud.git .\n@@ -15,9 +15,16 @@ RUN git clone https://github.com/BugRoger/terraform-provider-openstack.git .\nRUN git reset --hard ${TERRAFORM_PROVIDER_OPENSTACK_VERSION}\nRUN make\n-FROM alpine:3.8\n+WORKDIR /go/src/github.com/hashicorp/terraform\n+RUN git clone https://github.com/jtopjian/terraform.git --branch backend-swift-auth-update .\n+RUN make tools\n+RUN make fmt\n+RUN XC_OS=linux XC_ARCH=amd64 make bin\n+\n-ARG TERRAFORM_VERSION\n+FROM alpine:3.8\n-RUN apk add --update make ca-certificates terraform=${TERRAFORM_VERSION}\n+RUN apk add --update make ca-certificates\nCOPY --from=builder /go/bin/* /usr/local/bin/\n+COPY --from=builder /go/src/github.com/hashicorp/terraform/bin/terraform /usr/local/bin/\n+\n" }, { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "-TF_PLUGIN_DIR ?= \"$(GOPATH)/bin\"\n+TF_PLUGIN_DIR ?= $(GOPATH)/bin\nTF_VARS_DIR ?= .\n-TF_AUTH_URL ?= \"https://identity-3.$(TF_REGION).cloud.sap/v3\"\n-TF_USER_DOMAIN := ccadmin\n-TF_DOMAIN_NAME := ccadmin\n-TF_TENANT := kubernikus\n+TF_AUTH_URL ?= https://identity-3.$(TF_REGION).cloud.sap/v3\n+TF_USER ?= kubernikus-terraform\n+TF_USER_DOMAIN ?= Default\n+TF_PROJECT_DOMAIN := ccadmin\n+TF_PROJECT := kubernikus\n-TF_BACKEND_AUTH_URL ?= \"$(TF_AUTH_URL)\"\n-TF_BACKEND_REGION ?= \"$(TF_REGION)\"\n-TF_BACKEND_USER ?= \"$(TF_USER)\"\n-TF_BACKEND_USER_DOMAIN ?= \"$(TF_USER_DOMAIN)\"\n-TF_BACKEND_PASSWORD ?= \"$(TF_PASSWORD)\"\n+TF_BACKEND_AUTH_URL ?= $(TF_AUTH_URL)\n+TF_BACKEND_REGION ?= $(TF_REGION)\n+TF_BACKEND_USER ?= $(TF_USER)\n+TF_BACKEND_USER_DOMAIN ?= $(TF_USER_DOMAIN)\n+TF_BACKEND_PASSWORD ?= $(TF_PASSWORD)\n.PHONY: all validate init plan apply\n@@ -18,10 +19,6 @@ ifndef TF_REGION\n$(error Set environment variable TF_REGION to continue)\nendif\n-ifndef TF_USER\n-$(error Set environment variable TF_USER to continue. Requires ccadmin/cloud_admin permissions.)\n-endif\n-\nifndef TF_PASSWORD\n$(error Set environment variable TF_PASSWORD to continue)\nendif\n@@ -38,7 +35,7 @@ init: validate\n-backend-config=\"auth_url=$(TF_BACKEND_AUTH_URL)\" \\\n-backend-config=\"region_name=$(TF_BACKEND_REGION)\" \\\n-backend-config=\"user_name=$(TF_BACKEND_USER)\" \\\n- -backend-config=\"domain_name=$(TF_BACKEND_USER_DOMAIN)\" \\\n+ -backend-config=\"user_domain_name=$(TF_BACKEND_USER_DOMAIN)\" \\\n-backend-config=\"password=$(TF_BACKEND_PASSWORD)\"\nplan: validate\n@@ -47,8 +44,8 @@ plan: validate\n-var user_name=$(TF_USER) \\\n-var user_domain_name=$(TF_USER_DOMAIN) \\\n-var password=$(TF_PASSWORD) \\\n- -var domain_name=$(TF_DOMAIN_NAME) \\\n- -var tenant_name=$(TF_TENANT) \\\n+ -var domain_name=$(TF_PROJECT_DOMAIN) \\\n+ -var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=10\n@@ -60,7 +57,7 @@ apply: validate\n-var user_name=$(TF_USER) \\\n-var user_domain_name=$(TF_USER_DOMAIN) \\\n-var password=$(TF_PASSWORD) \\\n- -var domain_name=$(TF_DOMAIN_NAME) \\\n- -var tenant_name=$(TF_TENANT) \\\n+ -var domain_name=$(TF_PROJECT_DOMAIN) \\\n+ -var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=10\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -141,10 +141,6 @@ data \"openstack_networking_network_v2\" \"external\" {\nname = \"FloatingIP-external-ccadmin\"\n}\n-data \"openstack_identity_user_v3\" \"pipeline\" {\n- name = \"${var.kubernikus-pipeline-user}\"\n-}\n-\nresource \"openstack_identity_role_v3\" \"kubernetes_admin\" {\n@@ -220,7 +216,7 @@ resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin\" {\n}\nresource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_admin\" {\n- user_id = \"${data.openstack_identity_user_v3.kubernikus_pipeline.id}\"\n+ user_id = \"${openstack_identity_user_v3.kubernikus_pipeline.id}\"\nproject_id = \"${openstack_identity_project_v3.kubernikus.id}\"\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses forked terraform for proper swift authentication
596,240
05.09.2018 14:15:04
-7,200
510a148d902d410f2f5f119b41d4771421996835
removes leaking passwords
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -26,11 +26,9 @@ endif\nall: validate init plan apply\n-validate:\n- env | grep \"TF_\\|OS_\"\n-init: validate\n- terraform init \\\n+init:\n+ @terraform init \\\n-plugin-dir=$(TF_PLUGIN_DIR) \\\n-backend-config=\"auth_url=$(TF_BACKEND_AUTH_URL)\" \\\n-backend-config=\"region_name=$(TF_BACKEND_REGION)\" \\\n@@ -38,8 +36,8 @@ init: validate\n-backend-config=\"user_domain_name=$(TF_BACKEND_USER_DOMAIN)\" \\\n-backend-config=\"password=$(TF_BACKEND_PASSWORD)\"\n-plan: validate\n- terraform plan \\\n+plan:\n+ @terraform plan \\\n-var region=$(TF_REGION) \\\n-var user_name=$(TF_USER) \\\n-var user_domain_name=$(TF_USER_DOMAIN) \\\n@@ -49,8 +47,8 @@ plan: validate\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=10\n-apply: validate\n- terraform apply \\\n+apply:\n+ @terraform apply \\\n-input=false \\\n-auto-approve \\\n-var region=$(TF_REGION) \\\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes leaking passwords
596,240
05.09.2018 14:51:57
-7,200
22d1449008c4bce4a945003174ccdc61178c23bc
adds seed task
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -402,6 +402,15 @@ resources:\nbranch: master\ndepth: 1\n+ - name: kubernikus-ci.git\n+ type: git\n+ check_every: 1h\n+ source:\n+ uri: https://github.com/sapcc/kubernikus.git\n+ branch: master\n+ depth: 1\n+ paths: [ ci ]\n+\n- name: kubernikus.git\ntype: git\ncheck_every: 1h\n@@ -410,6 +419,14 @@ resources:\nbranch: master\ndepth: 1\n+ - name: helm-charts-openstack-kubernikus.git\n+ type: git\n+ source:\n+ uri: https://github.com/sapcc/helm-charts.git\n+ branch: master\n+ paths: [ openstack/kubernikus/ ]\n+\n+\n- name: daily\ntype: time\nsource: {interval: 24h}\n@@ -1306,6 +1323,21 @@ jobs:\nTF_PASSWORD: ((kubernikus-terraform-password))\n+ - name: seed_ap-jp-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-jp-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\ngroups:\n- name: deploy\njobs:\n@@ -1364,6 +1396,9 @@ groups:\n- name: terraform\njobs:\n- terraform_ap-jp-1\n+ - name: seed\n+ jobs:\n+ - seed_ap-jp-1\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -130,6 +130,15 @@ resources:\nbranch: master\ndepth: 1\n+ - name: kubernikus-ci.git\n+ type: git\n+ check_every: 1h\n+ source:\n+ uri: https://github.com/sapcc/kubernikus.git\n+ branch: master\n+ depth: 1\n+ paths: [ ci ]\n+\n- name: kubernikus.git\ntype: git\ncheck_every: 1h\n@@ -138,6 +147,14 @@ resources:\nbranch: master\ndepth: 1\n+ - name: helm-charts-openstack-kubernikus.git\n+ type: git\n+ source:\n+ uri: https://github.com/sapcc/helm-charts.git\n+ branch: master\n+ paths: [ openstack/kubernikus/ ]\n+\n+\n- name: daily\ntype: time\nsource: {interval: 24h}\n@@ -442,6 +459,21 @@ jobs:\nTF_PASSWORD: ((kubernikus-terraform-password))\n+ - name: seed_ap-jp-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-jp-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\ngroups:\n- name: deploy\njobs:\n@@ -464,6 +496,9 @@ groups:\n- name: terraform\njobs:\n- terraform_ap-jp-1\n+ - name: seed\n+ jobs:\n+ - seed_ap-jp-1\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "ADD", "old_path": null, "new_path": "ci/task_helm_seed.yaml", "diff": "+\n+---\n+platform: linux\n+\n+image_resource:\n+ type: docker-image\n+ source: { repository: hub.global.cloud.sap/monsoon/kubectl, tag: 'v1.7.7'}\n+\n+inputs:\n+ - name: secrets.git\n+ - name: helm-charts-openstack-kubernikus.git\n+\n+run:\n+ path: sh\n+ args:\n+ - -exc\n+ - |\n+ set -o pipefail\n+ helm upgrade $RELEASE helm-charts-openstack-kubernikus.git/openstack/kubernikus --namespace $NAMESPACE --values secrets.git/$REGION/values/kubernikus.yaml --install --debug\n+\n+params:\n+ REGION:\n+ GITHUB_TOKEN:\n+ NAMESPACE: monsoon3\n+ RELEASE: kubernikus\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds seed task
596,240
05.09.2018 15:04:06
-7,200
408b33467b098993caeb81136dfff4900d9ab97c
uses global value
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -1323,6 +1323,52 @@ jobs:\nTF_PASSWORD: ((kubernikus-terraform-password))\n+\n+ - name: seed_ap-ae-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-ae-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_ap-au-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-au-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_ap-cn-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-cn-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n- name: seed_ap-jp-1\nserial: true\nplan:\n@@ -1331,13 +1377,194 @@ jobs:\n- get: secrets.git\n- get: kubernikus-ci.git\n- task: seed\n- file: kubernikus-ci.git/ci/task_seed.yaml\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\ninput_mapping:\ncharts.git: helm-charts-openstack-kubernikus.git\nparams:\nREGION: ap-jp-1\nGITHUB_TOKEN: ((github-access-token))\n+ - name: seed_ap-jp-2\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-jp-2\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_ap-sa-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: ap-sa-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_eu-de-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: eu-de-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_eu-de-2\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: eu-de-2\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_eu-nl-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: eu-nl-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_eu-ru-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: eu-ru-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_la-br-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: la-br-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_na-ca-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: na-ca-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_na-us-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: na-us-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_na-us-3\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: na-us-3\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_qa-de-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: qa-de-1\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+ - name: seed_staging\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: helm-charts-openstack-kubernikus.git\n+ - get: secrets.git\n+ - get: kubernikus-ci.git\n+ - task: seed\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\n+ input_mapping:\n+ charts.git: helm-charts-openstack-kubernikus.git\n+ params:\n+ REGION: staging\n+ GITHUB_TOKEN: ((github-access-token))\n+\n+\ngroups:\n- name: deploy\njobs:\n@@ -1398,7 +1625,39 @@ groups:\n- terraform_ap-jp-1\n- name: seed\njobs:\n+\n+ - seed_ap-ae-1\n+\n+ - seed_ap-au-1\n+\n+ - seed_ap-cn-1\n+\n- seed_ap-jp-1\n+\n+ - seed_ap-jp-2\n+\n+ - seed_ap-sa-1\n+\n+ - seed_eu-de-1\n+\n+ - seed_eu-de-2\n+\n+ - seed_eu-nl-1\n+\n+ - seed_eu-ru-1\n+\n+ - seed_la-br-1\n+\n+ - seed_na-ca-1\n+\n+ - seed_na-us-1\n+\n+ - seed_na-us-3\n+\n+ - seed_qa-de-1\n+\n+ - seed_staging\n+\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -459,7 +459,8 @@ jobs:\nTF_PASSWORD: ((kubernikus-terraform-password))\n- - name: seed_ap-jp-1\n+<% (ASIA + EMEA + AMERICAS + INFRA).each do |region| %>\n+ - name: seed_<%= region %>\nserial: true\nplan:\n- aggregate:\n@@ -467,12 +468,13 @@ jobs:\n- get: secrets.git\n- get: kubernikus-ci.git\n- task: seed\n- file: kubernikus-ci.git/ci/task_seed.yaml\n+ file: kubernikus-ci.git/ci/task_helm_seed.yaml\ninput_mapping:\ncharts.git: helm-charts-openstack-kubernikus.git\nparams:\n- REGION: ap-jp-1\n+ REGION: <%= region %>\nGITHUB_TOKEN: ((github-access-token))\n+<% end %>\ngroups:\n- name: deploy\n@@ -498,7 +500,9 @@ groups:\n- terraform_ap-jp-1\n- name: seed\njobs:\n- - seed_ap-jp-1\n+<% (ASIA + EMEA + AMERICAS + INFRA).each do |region| %>\n+ - seed_<%= region %>\n+<% end %>\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/task_helm_seed.yaml", "new_path": "ci/task_helm_seed.yaml", "diff": "@@ -16,7 +16,7 @@ run:\n- -exc\n- |\nset -o pipefail\n- helm upgrade $RELEASE helm-charts-openstack-kubernikus.git/openstack/kubernikus --namespace $NAMESPACE --values secrets.git/$REGION/values/kubernikus.yaml --install --debug\n+ helm upgrade $RELEASE helm-charts-openstack-kubernikus.git/openstack/kubernikus --namespace $NAMESPACE --values secrets.git/global/values/kubernikus-seed.yaml --install --debug\nparams:\nREGION:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses global value
596,240
05.09.2018 16:19:32
-7,200
187a81118f4526854659940ae3cd19c2aeacf1d3
voodoo shuffling
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -58,4 +58,4 @@ apply:\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n- -parallelism=10\n+ -parallelism=10 \\\n" }, { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -44,17 +44,6 @@ provider \"ccloud\" {\ndomain_name = \"ccadmin\"\n}\n-provider \"ccloud\" {\n- alias = \"kubernikus\"\n-\n- auth_url = \"https://identity-3.${var.region}.cloud.sap/v3\"\n- region = \"${var.region}\"\n- user_name = \"${var.user_name}\"\n- user_domain_name = \"${var.user_domain_name}\"\n- password = \"${var.password}\"\n- tenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n-}\n-\nterraform {\nbackend \"swift\" {\ntenant_name = \"master\"\n@@ -85,6 +74,7 @@ data \"ccloud_identity_group_v3\" \"ccadmin_domain_admins\" {\nname = \"CCADMIN_DOMAIN_ADMINS\"\n}\n+\ndata \"openstack_identity_role_v3\" \"admin\" {\nname = \"admin\"\n}\n@@ -221,8 +211,6 @@ resource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_admin\" {\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n-\n-\nresource \"openstack_identity_role_assignment_v3\" \"kubernikus-admin\" {\nuser_id = \"${openstack_identity_user_v3.kubernikus_service.id}\"\nproject_id = \"${data.openstack_identity_project_v3.cloud_admin.id}\"\n@@ -407,22 +395,8 @@ resource \"openstack_identity_endpoint_v3\" \"kubernikus-kubernikus\" {\nurl = \"https://k-${var.region}.admin.cloud.sap\"\n}\n-resource \"ccloud_kubernetes\" \"kluster\" {\n- provider = \"ccloud.kubernikus\"\n- is_admin = true\n- name = \"k-${var.region}\"\n- ssh_public_key = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCXIxVEUgtUVkvk2VM1hmIb8MxvxsmvYoiq9OBy3J8akTGNybqKsA2uhcwxSJX5Cn3si8kfMfka9EWiJT+e1ybvtsGILO5XRZPxyhYzexwb3TcALwc3LuzpF3Z/Dg2jYTRELTGhYmyca3mxzTlCjNXvYayLNedjJ8fIBzoCuSXNqDRToHru7h0Glz+wtuE74mNkOiXSvhtuJtJs7VCNVjobFQNfC1aeDsri2bPRHJJZJ0QF4LLYSayMEz3lVwIDyAviQR2Aa97WfuXiofiAemfGqiH47Kq6b8X7j3bOYGBvJKMUV7XeWhGsskAmTsvvnFxkc5PAD3Ct+liULjiQWlzDrmpTE8aMqLK4l0YQw7/8iRVz6gli42iEc2ZG56ob1ErpTLAKFWyCNOebZuGoygdEQaGTIIunAncXg5Rz07TdPl0Tf5ZZLpiAgR5ck0H1SETnjDTZ/S83CiVZWJgmCpu8YOKWyYRD4orWwdnA77L4+ixeojLIhEoNL8KlBgsP9Twx+fFMWLfxMmiuX+yksM6Hu+Lsm+Ao7Q284VPp36EB1rxP1JM7HCiEOEm50Jb6hNKjgN4aoLhG5yg+GnDhwCZqUwcRJo1bWtm3QvRA+rzrGZkId4EY3cyOK5QnYV5+24x93Ex0UspHMn7HGsHUESsVeV0fLqlfXyd2RbHTmDMP6w==\"\n-\n- node_pools = [\n- { name = \"payload\", flavor = \"m1.xlarge_cpu\", size = 3 },\n- ]\n- depends_on = [\n- \"openstack_identity_endpoint_v3.kubernikus\",\n- \"openstack_networking_router_v2.router\"\n- ]\n-}\ndata \"openstack_dns_zone_v2\" \"region_cloud_sap\" {\nprovider = \"openstack.master\"\n@@ -507,3 +481,34 @@ resource \"openstack_dns_recordset_v2\" \"wildcard-k-region\" {\n}\n+\n+\n+provider \"ccloud\" {\n+ alias = \"kubernikus\"\n+\n+ auth_url = \"https://identity-3.${var.region}.cloud.sap/v3\"\n+ region = \"${var.region}\"\n+ user_name = \"kubernikus-terraform\"\n+ user_domain_name = \"Default\"\n+ password = \"${var.password}\"\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n+}\n+\n+resource \"ccloud_kubernetes\" \"kluster\" {\n+ provider = \"ccloud.kubernikus\"\n+\n+ is_admin = true\n+ name = \"k-${var.region}\"\n+ ssh_public_key = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCXIxVEUgtUVkvk2VM1hmIb8MxvxsmvYoiq9OBy3J8akTGNybqKsA2uhcwxSJX5Cn3si8kfMfka9EWiJT+e1ybvtsGILO5XRZPxyhYzexwb3TcALwc3LuzpF3Z/Dg2jYTRELTGhYmyca3mxzTlCjNXvYayLNedjJ8fIBzoCuSXNqDRToHru7h0Glz+wtuE74mNkOiXSvhtuJtJs7VCNVjobFQNfC1aeDsri2bPRHJJZJ0QF4LLYSayMEz3lVwIDyAviQR2Aa97WfuXiofiAemfGqiH47Kq6b8X7j3bOYGBvJKMUV7XeWhGsskAmTsvvnFxkc5PAD3Ct+liULjiQWlzDrmpTE8aMqLK4l0YQw7/8iRVz6gli42iEc2ZG56ob1ErpTLAKFWyCNOebZuGoygdEQaGTIIunAncXg5Rz07TdPl0Tf5ZZLpiAgR5ck0H1SETnjDTZ/S83CiVZWJgmCpu8YOKWyYRD4orWwdnA77L4+ixeojLIhEoNL8KlBgsP9Twx+fFMWLfxMmiuX+yksM6Hu+Lsm+Ao7Q284VPp36EB1rxP1JM7HCiEOEm50Jb6hNKjgN4aoLhG5yg+GnDhwCZqUwcRJo1bWtm3QvRA+rzrGZkId4EY3cyOK5QnYV5+24x93Ex0UspHMn7HGsHUESsVeV0fLqlfXyd2RbHTmDMP6w==\"\n+\n+ node_pools = [\n+ { name = \"payload\", flavor = \"m1.xlarge_cpu\", size = 3 },\n+ ]\n+\n+ depends_on = [\n+ \"openstack_identity_endpoint_v3.kubernikus\",\n+ \"openstack_networking_router_v2.router\"\n+ ]\n+}\n+\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
voodoo shuffling
596,240
05.09.2018 16:45:30
-7,200
af0f3b7bf847b05d0755c947a80cd8f6109f9b6d
adds terraform user to kubernikus project as kubernikus admin
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -74,6 +74,10 @@ data \"ccloud_identity_group_v3\" \"ccadmin_domain_admins\" {\nname = \"CCADMIN_DOMAIN_ADMINS\"\n}\n+data \"openstack_identity_user_v3\" \"kubernikus_terraform\" {\n+ name = \"kubernikus-terraform\"\n+ domain_id = \"${data.openstack_identity_project_v3.default.id}\"\n+}\ndata \"openstack_identity_role_v3\" \"admin\" {\nname = \"admin\"\n@@ -205,6 +209,13 @@ resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin\" {\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"terraform_kubernetes_admin\" {\n+ user_id = \"${data.openstack_identity_user_v3.kubernikus_terraform.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n+ role_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n+}\n+\nresource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_admin\" {\nuser_id = \"${openstack_identity_user_v3.kubernikus_pipeline.id}\"\nproject_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds terraform user to kubernikus project as kubernikus admin
596,240
06.09.2018 17:02:41
-7,200
1bf0201f13cb45177759dbbcd6f29f2334a0868d
pulls infrastructure pause image from dockerhub. required for download in china
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.10.go", "new_path": "pkg/templates/node_1.10.go", "diff": "@@ -104,6 +104,7 @@ systemd:\n--network-plugin=kubenet \\\n--non-masquerade-cidr=0.0.0.0/0 \\\n--lock-file=/var/run/lock/kubelet.lock \\\n+ --pod-infra-container-image=sapcc/pause-amd64:3.1 \\\n{{- if .NodeLabels }}\n--node-labels={{ .NodeLabels | join \",\" }} \\\n{{- end }}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
pulls infrastructure pause image from dockerhub. required for download in china
596,240
07.09.2018 13:59:22
-7,200
d43a13d87b722627d6c22a72f9b64b48ad89e80d
work in progresss
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -21,47 +21,17 @@ auth_feature: &auth_feature\nKUBERNIKUS_NAME: k-feature\nKUBERNIKUS_URL: https://k-feature.admin.cloud.sap\n-auth_infra: &auth_infra\n- OS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus-infra\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-infra\n- KUBERNIKUS_URL: https://k-infra.admin.cloud.sap\n-\n-\n-auth_staging: &auth_staging\n- OS_AUTH_URL: https://identity-3.staging.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-staging-username))\n- OS_PASSWORD: ((kubernikus-staging-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-staging\n- KUBERNIKUS_URL: https://k-staging.admin.cloud.sap\n-\n-auth_qa-de-1: &auth_qa-de-1\n- OS_AUTH_URL: https://identity-3.qa-de-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-qa-username))\n- OS_PASSWORD: ((kubernikus-qa-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-qa-de-1\n- KUBERNIKUS_URL: https://k-qa-de-1.admin.cloud.sap\n-auth_ap-ae-1: &auth_ap-ae-1\n- OS_AUTH_URL: https://identity-3.ap-ae-1.cloud.sap/v3\n+auth_na-us-1: &auth_na-us-1\n+ OS_AUTH_URL: https://identity-3.na-us-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\nOS_PASSWORD: ((kubernikus-prod-password))\nOS_USER_DOMAIN_NAME: ccadmin\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-ap-ae-1\n- KUBERNIKUS_URL: https://k-ap-ae-1.admin.cloud.sap\n+ KUBERNIKUS_NAME: k-na-us-1\n+ KUBERNIKUS_URL: https://k-na-us-1.admin.cloud.sap\nauth_ap-au-1: &auth_ap-au-1\nOS_AUTH_URL: https://identity-3.ap-au-1.cloud.sap/v3\n@@ -73,46 +43,6 @@ auth_ap-au-1: &auth_ap-au-1\nKUBERNIKUS_NAME: k-ap-au-1\nKUBERNIKUS_URL: https://k-ap-au-1.admin.cloud.sap\n-auth_ap-cn-1: &auth_ap-cn-1\n- OS_AUTH_URL: https://identity-3.ap-cn-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-ap-cn-1\n- KUBERNIKUS_URL: https://k-ap-cn-1.admin.cloud.sap\n-\n-auth_ap-jp-1: &auth_ap-jp-1\n- OS_AUTH_URL: https://identity-3.ap-jp-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-ap-jp-1\n- KUBERNIKUS_URL: https://k-ap-jp-1.admin.cloud.sap\n-\n-auth_ap-jp-2: &auth_ap-jp-2\n- OS_AUTH_URL: https://identity-3.ap-jp-2.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-ap-jp-2\n- KUBERNIKUS_URL: https://k-ap-jp-2.admin.cloud.sap\n-\n-auth_ap-sa-1: &auth_ap-sa-1\n- OS_AUTH_URL: https://identity-3.ap-sa-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-ap-sa-1\n- KUBERNIKUS_URL: https://k-ap-sa-1.admin.cloud.sap\n-\nauth_eu-de-1: &auth_eu-de-1\nOS_AUTH_URL: https://identity-3.eu-de-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n@@ -143,21 +73,13 @@ auth_eu-nl-1: &auth_eu-nl-1\nKUBERNIKUS_NAME: k-eu-nl-1\nKUBERNIKUS_URL: https://k-eu-nl-1.admin.cloud.sap\n-auth_eu-ru-1: &auth_eu-ru-1\n- OS_AUTH_URL: https://identity-3.eu-ru-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-eu-ru-1\n- KUBERNIKUS_URL: https://k-eu-ru-1.admin.cloud.sap\n+\nauth_la-br-1: &auth_la-br-1\nOS_AUTH_URL: https://identity-3.la-br-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-la-br-1\n@@ -165,34 +87,94 @@ auth_la-br-1: &auth_la-br-1\nauth_na-ca-1: &auth_na-ca-1\nOS_AUTH_URL: https://identity-3.na-ca-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-na-ca-1\nKUBERNIKUS_URL: https://k-na-ca-1.admin.cloud.sap\n-auth_na-us-1: &auth_na-us-1\n- OS_AUTH_URL: https://identity-3.na-us-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-na-us-1\n- KUBERNIKUS_URL: https://k-na-us-1.admin.cloud.sap\n-\nauth_na-us-3: &auth_na-us-3\nOS_AUTH_URL: https://identity-3.na-us-3.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-na-us-3\nKUBERNIKUS_URL: https://k-na-us-3.admin.cloud.sap\n+auth_ap-ae-1: &auth_ap-ae-1\n+ OS_AUTH_URL: https://identity-3.ap-ae-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-ap-ae-1\n+ KUBERNIKUS_URL: https://k-ap-ae-1.admin.cloud.sap\n+\n+auth_ap-cn-1: &auth_ap-cn-1\n+ OS_AUTH_URL: https://identity-3.ap-cn-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-ap-cn-1\n+ KUBERNIKUS_URL: https://k-ap-cn-1.admin.cloud.sap\n+\n+auth_ap-jp-1: &auth_ap-jp-1\n+ OS_AUTH_URL: https://identity-3.ap-jp-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-ap-jp-1\n+ KUBERNIKUS_URL: https://k-ap-jp-1.admin.cloud.sap\n+\n+auth_ap-jp-2: &auth_ap-jp-2\n+ OS_AUTH_URL: https://identity-3.ap-jp-2.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-ap-jp-2\n+ KUBERNIKUS_URL: https://k-ap-jp-2.admin.cloud.sap\n+\n+auth_ap-sa-1: &auth_ap-sa-1\n+ OS_AUTH_URL: https://identity-3.ap-sa-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-ap-sa-1\n+ KUBERNIKUS_URL: https://k-ap-sa-1.admin.cloud.sap\n+\n+auth_eu-ru-1: &auth_eu-ru-1\n+ OS_AUTH_URL: https://identity-3.eu-ru-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-eu-ru-1\n+ KUBERNIKUS_URL: https://k-eu-ru-1.admin.cloud.sap\n+\n+auth_qa-de-1: &auth_qa-de-1\n+ OS_AUTH_URL: https://identity-3.qa-de-1.cloud.sap/v3\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-qa-de-1\n+ KUBERNIKUS_URL: https://k-qa-de-1.admin.cloud.sap\n+\nauth_e2e_eu-nl-1_master: &auth_e2e_eu-nl-1_master\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n@@ -381,17 +363,6 @@ resources:\nbranch: feature\ndepth: 1\n- - name: infra.builds\n- type: gh-status\n- source:\n- username: sapcc-bot\n- password: ((github-com-access-token))\n- owner: sapcc\n- repo: kubernikus\n- access_token: ((github-com-access-token))\n- branch: infra\n- depth: 1\n-\n- name: secrets.git\ntype: git\nwebhook_token: aldhjalkdhahdjkahdjkhjadhjadhkjadlkjhAAdd\n@@ -411,13 +382,14 @@ resources:\ndepth: 1\npaths: [ ci ]\n- - name: kubernikus.git\n+ - name: kubernikus-terraform.git\ntype: git\ncheck_every: 1h\nsource:\nuri: https://github.com/sapcc/kubernikus.git\nbranch: master\ndepth: 1\n+ paths: [ terraform ]\n- name: helm-charts-openstack-kubernikus.git\ntype: git\n@@ -496,22 +468,6 @@ jobs:\nparams:\n<<: *auth_feature\n- - name: infra\n- serial: true\n- plan:\n- - aggregate:\n- - get: secrets.git\n- - get: kubernikus.builds\n- resource: infra.builds\n- trigger: true\n- - aggregate:\n- - task: k-infra\n- file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n- params:\n- REGION: admin\n- KUBERNIKUS_NAME: k-infra\n- GITHUB_TOKEN: ((github-access-token))\n-\n- name: e2e\nserial: true\nplan:\n@@ -530,17 +486,6 @@ jobs:\non_failure: { put: slack, params: {alert_type: broke } }\non_abort: { put: slack, params: {alert_type: broke } }\n- - name: conformance\n- serial: true\n- build_logs_to_retain: 30\n- plan:\n- - aggregate:\n- - get: kubernikus.builds\n- resource: master.builds\n- passed: [e2e]\n- - get: daily\n- trigger: true\n-\n- name: prod\nserial: true\nplan:\n@@ -835,31 +780,6 @@ jobs:\n- - name: staging\n- serial: true\n- plan:\n- - aggregate:\n- - get: secrets.git\n- - get: kubernikus.builds\n- resource: feature.builds\n- passed: [feature]\n- trigger: true\n- - aggregate:\n- - task: k-staging\n- file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n- params:\n- REGION: admin\n- KUBERNIKUS_NAME: k-staging\n- GITHUB_TOKEN: ((github-access-token))\n- - task: kubernikus\n- file: kubernikus.builds/ci/task_helm_kubernikus.yaml\n- params:\n- <<: *auth_staging\n- - task: kubernikus-system\n- file: kubernikus.builds/ci/task_helm_kubernikus-system.yaml\n- params:\n- <<: *auth_staging\n-\n- name: qa-de-1\nserial: true\nplan:\n@@ -1284,12 +1204,6 @@ jobs:\nparams:\n<<: *auth_qa-de-1\n- - task: k-staging\n- file: kubernikus.builds/ci/task_tiller.yaml\n- timeout: 10m\n- params:\n- <<: *auth_staging\n-\n- task: k-master\nfile: kubernikus.builds/ci/task_tiller.yaml\ntimeout: 10m\n@@ -1302,19 +1216,121 @@ jobs:\nparams:\n<<: *auth_feature\n- - task: k-infra\n- file: kubernikus.builds/ci/task_tiller.yaml\n- timeout: 10m\n+\n+ - name: terraform\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ - get: daily\n+ trigger: true\n+\n+\n+ - name: terraform_AMERICAS\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ passed: [terraform]\n+ trigger: true\n+\n+\n+ - name: terraform_la-br-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_AMERICAS]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: la-br-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+ - name: terraform_na-ca-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_AMERICAS]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: na-ca-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+ - name: terraform_na-us-3\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_AMERICAS]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\nparams:\n- <<: *auth_infra\n+ TF_REGION: na-us-3\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+\n+ - name: terraform_ASIA\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ passed: [terraform]\n+ trigger: true\n+ - name: terraform_ap-ae-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_ASIA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-ae-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+ - name: terraform_ap-cn-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_ASIA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-cn-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n- name: terraform_ap-jp-1\nserial: true\nplan:\n- aggregate:\n- get: secrets.git\n- get: kubernikus.git\n+ passed: [terraform_ASIA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n- task: terraform\nfile: kubernikus.git/ci/task_terraform.yaml\ntimeout: 45m\n@@ -1322,6 +1338,90 @@ jobs:\nTF_REGION: ap-jp-1\nTF_PASSWORD: ((kubernikus-terraform-password))\n+ - name: terraform_ap-jp-2\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_ASIA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-jp-2\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+ - name: terraform_ap-sa-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_ASIA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: ap-sa-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+\n+ - name: terraform_EMEA\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ passed: [terraform]\n+ trigger: true\n+\n+\n+ - name: terraform_eu-ru-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_EMEA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: eu-ru-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+\n+ - name: terraform_INFRA\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ passed: [terraform]\n+ trigger: true\n+\n+\n+ - name: terraform_qa-de-1\n+ serial: true\n+ plan:\n+ - aggregate:\n+ - get: secrets.git\n+ - get: kubernikus.git\n+ passed: [terraform_INFRA]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n+ - task: terraform\n+ file: kubernikus.git/ci/task_terraform.yaml\n+ timeout: 45m\n+ params:\n+ TF_REGION: qa-de-1\n+ TF_PASSWORD: ((kubernikus-terraform-password))\n+\n+\n+\n- name: seed_ap-ae-1\n@@ -1549,21 +1649,6 @@ jobs:\nREGION: qa-de-1\nGITHUB_TOKEN: ((github-access-token))\n- - name: seed_staging\n- serial: true\n- plan:\n- - aggregate:\n- - get: helm-charts-openstack-kubernikus.git\n- - get: secrets.git\n- - get: kubernikus-ci.git\n- - task: seed\n- file: kubernikus-ci.git/ci/task_helm_seed.yaml\n- input_mapping:\n- charts.git: helm-charts-openstack-kubernikus.git\n- params:\n- REGION: staging\n- GITHUB_TOKEN: ((github-access-token))\n-\ngroups:\n- name: deploy\n@@ -1571,16 +1656,11 @@ groups:\n- qa-de-1\n- - staging\n-\n- master\n- feature\n- - infra\n-\n- e2e\n- - conformance\n- prod\n- feature\n- admin\n@@ -1622,7 +1702,40 @@ groups:\n- name: terraform\njobs:\n+ - terraform\n+\n+ - terraform_AMERICAS\n+\n+ - terraform_la-br-1\n+\n+ - terraform_na-ca-1\n+\n+ - terraform_na-us-3\n+\n+\n+ - terraform_ASIA\n+\n+ - terraform_ap-ae-1\n+\n+ - terraform_ap-cn-1\n+\n- terraform_ap-jp-1\n+\n+ - terraform_ap-jp-2\n+\n+ - terraform_ap-sa-1\n+\n+\n+ - terraform_EMEA\n+\n+ - terraform_eu-ru-1\n+\n+\n+ - terraform_INFRA\n+\n+ - terraform_qa-de-1\n+\n+\n- name: seed\njobs:\n@@ -1656,8 +1769,6 @@ groups:\n- seed_qa-de-1\n- - seed_staging\n-\n- name: misc\njobs:\n- tiller\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "<%\n+\n+CONFIG = {\n+ \"AMERICAS\" => {\n+ \"MANUAL\" => [\"na-us-1\"],\n+ \"AUTO\" => [\"la-br-1\", \"na-ca-1\", \"na-us-3\"]\n+ },\n+ \"ASIA\" => {\n+ \"MANUAL\" => [\"ap-au-1\"],\n+ \"AUTO\" => [\"ap-ae-1\", \"ap-cn-1\", \"ap-jp-1\", \"ap-jp-2\", \"ap-sa-1\"]\n+ },\n+ \"EMEA\" => {\n+ \"MANUAL\" => [\"eu-de-1\", \"eu-de-2\", \"eu-nl-1\"],\n+ \"AUTO\" => [\"eu-ru-1\"]\n+ },\n+ \"INFRA\" => {\n+ \"MANUAL\" => [],\n+ \"AUTO\" => [\"qa-de-1\"]\n+ }\n+}\n+\nASIA = [\"ap-ae-1\", \"ap-au-1\", \"ap-cn-1\", \"ap-jp-1\", \"ap-jp-2\", \"ap-sa-1\"]\nEMEA = [\"eu-de-1\", \"eu-de-2\", \"eu-nl-1\", \"eu-ru-1\", \"la-br-1\"]\nAMERICAS = [\"na-ca-1\", \"na-us-1\", \"na-us-3\"]\n- INFRA = [\"qa-de-1\", \"staging\"]\n- INTERNAL = [\"master\", \"feature\", \"infra\"]\n+ INFRA = [\"qa-de-1\"]\n+ INTERNAL = [\"master\", \"feature\"]\nADMIN = (ASIA + EMEA + AMERICAS + INFRA).map { |r| \"k-#{r}\" }\n%>\n@@ -20,32 +40,24 @@ auth_<%= region %>: &auth_<%= region %>\nKUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n<% end %>\n-auth_staging: &auth_staging\n- OS_AUTH_URL: https://identity-3.staging.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-staging-username))\n- OS_PASSWORD: ((kubernikus-staging-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-staging\n- KUBERNIKUS_URL: https://k-staging.admin.cloud.sap\n-\n-auth_qa-de-1: &auth_qa-de-1\n- OS_AUTH_URL: https://identity-3.qa-de-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-qa-username))\n- OS_PASSWORD: ((kubernikus-qa-password))\n+<% CONFIG.map { |_, group| group[\"MANUAL\"] }.flatten.each do |region| %>\n+auth_<%= region %>: &auth_<%= region %>\n+ OS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\n+ OS_USERNAME: ((kubernikus-prod-username))\n+ OS_PASSWORD: ((kubernikus-prod-password))\nOS_USER_DOMAIN_NAME: ccadmin\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-qa-de-1\n- KUBERNIKUS_URL: https://k-qa-de-1.admin.cloud.sap\n+ KUBERNIKUS_NAME: k-<%= region %>\n+ KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+<% end %>\n-<% (ASIA + EMEA + AMERICAS).each do |region| %>\n+<% CONFIG.map { |_, group| group[\"AUTO\"] }.flatten.each do |region| %>\nauth_<%= region %>: &auth_<%= region %>\nOS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\nOS_PROJECT_NAME: kubernikus\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-<%= region %>\n@@ -109,17 +121,6 @@ resources:\nbranch: feature\ndepth: 1\n- - name: infra.builds\n- type: gh-status\n- source:\n- username: sapcc-bot\n- password: ((github-com-access-token))\n- owner: sapcc\n- repo: kubernikus\n- access_token: ((github-com-access-token))\n- branch: infra\n- depth: 1\n-\n- name: secrets.git\ntype: git\nwebhook_token: aldhjalkdhahdjkahdjkhjadhjadhkjadlkjhAAdd\n@@ -139,13 +140,14 @@ resources:\ndepth: 1\npaths: [ ci ]\n- - name: kubernikus.git\n+ - name: kubernikus-terraform.git\ntype: git\ncheck_every: 1h\nsource:\nuri: https://github.com/sapcc/kubernikus.git\nbranch: master\ndepth: 1\n+ paths: [ terraform ]\n- name: helm-charts-openstack-kubernikus.git\ntype: git\n@@ -224,22 +226,6 @@ jobs:\nparams:\n<<: *auth_feature\n- - name: infra\n- serial: true\n- plan:\n- - aggregate:\n- - get: secrets.git\n- - get: kubernikus.builds\n- resource: infra.builds\n- trigger: true\n- - aggregate:\n- - task: k-infra\n- file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n- params:\n- REGION: admin\n- KUBERNIKUS_NAME: k-infra\n- GITHUB_TOKEN: ((github-access-token))\n-\n- name: e2e\nserial: true\nplan:\n@@ -258,17 +244,6 @@ jobs:\non_failure: { put: slack, params: {alert_type: broke } }\non_abort: { put: slack, params: {alert_type: broke } }\n- - name: conformance\n- serial: true\n- build_logs_to_retain: 30\n- plan:\n- - aggregate:\n- - get: kubernikus.builds\n- resource: master.builds\n- passed: [e2e]\n- - get: daily\n- trigger: true\n-\n- name: prod\nserial: true\nplan:\n@@ -349,7 +324,7 @@ jobs:\npassed: [prod]\ntrigger: true\n- aggregate:\n-<% (ASIA + EMEA + AMERICAS + INTERNAL - [\"infra\"]).each do |region| %>\n+<% (ASIA + EMEA + AMERICAS + INTERNAL).each do |region| %>\n- task: k-<%= region %>\nfile: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\nparams:\n@@ -359,31 +334,6 @@ jobs:\n<% end %>\n- - name: staging\n- serial: true\n- plan:\n- - aggregate:\n- - get: secrets.git\n- - get: kubernikus.builds\n- resource: feature.builds\n- passed: [feature]\n- trigger: true\n- - aggregate:\n- - task: k-staging\n- file: kubernikus.builds/ci/task_helm-admin_kubernikus.yaml\n- params:\n- REGION: admin\n- KUBERNIKUS_NAME: k-staging\n- GITHUB_TOKEN: ((github-access-token))\n- - task: kubernikus\n- file: kubernikus.builds/ci/task_helm_kubernikus.yaml\n- params:\n- <<: *auth_staging\n- - task: kubernikus-system\n- file: kubernikus.builds/ci/task_helm_kubernikus-system.yaml\n- params:\n- <<: *auth_staging\n-\n- name: qa-de-1\nserial: true\nplan:\n@@ -445,18 +395,39 @@ jobs:\n<<: *auth_<%= region %>\n<% end %>\n- - name: terraform_ap-jp-1\n+ - name: terraform\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ - get: daily\n+ trigger: true\n+\n+<% CONFIG.each do |group, regions| %>\n+ - name: terraform_<%= group %>\n+ plan:\n+ - aggregate:\n+ - get: kubernikus-terraform.git\n+ passed: [terraform]\n+ trigger: true\n+\n+<% regions[\"AUTO\"].each do |region| %>\n+ - name: terraform_<%= region %>\nserial: true\nplan:\n- aggregate:\n- get: secrets.git\n- get: kubernikus.git\n+ passed: [terraform_<%= group %>]\n+ resource: kubernikus-terraform.git\n+ trigger: true\n- task: terraform\nfile: kubernikus.git/ci/task_terraform.yaml\ntimeout: 45m\nparams:\n- TF_REGION: ap-jp-1\n+ TF_REGION: <%= region %>\nTF_PASSWORD: ((kubernikus-terraform-password))\n+<% end %>\n+<% end %>\n<% (ASIA + EMEA + AMERICAS + INFRA).each do |region| %>\n@@ -483,7 +454,6 @@ groups:\n- <%= region %>\n<% end %>\n- e2e\n- - conformance\n- prod\n- feature\n- admin\n@@ -497,7 +467,13 @@ groups:\n<% end %>\n- name: terraform\njobs:\n- - terraform_ap-jp-1\n+ - terraform\n+<% CONFIG.each do |group, regions| %>\n+ - terraform_<%= group %>\n+<% regions[\"AUTO\"].each do |region| %>\n+ - terraform_<%= region %>\n+<% end %>\n+<% end %>\n- name: seed\njobs:\n<% (ASIA + EMEA + AMERICAS + INFRA).each do |region| %>\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
work in progresss
596,240
07.09.2018 15:30:28
-7,200
e31a3aa18b2d8f423a4963707356b240d12aba92
creates e2e project
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -135,7 +135,9 @@ data \"openstack_networking_network_v2\" \"external\" {\nname = \"FloatingIP-external-ccadmin\"\n}\n-\n+data \"openstack_networking_network_v2\" \"external_e2e\" {\n+ name = \"FloatingIP-external-monsoon3-01\"\n+}\nresource \"openstack_identity_role_v3\" \"kubernetes_admin\" {\nname = \"kubernetes_admin\"\n@@ -523,3 +525,117 @@ resource \"ccloud_kubernetes\" \"kluster\" {\n}\n+\n+resource \"openstack_identity_project_v3\" \"kubernikus_e2e\" {\n+ name = \"kubernikus_e2e\"\n+ domain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\n+ description = \"Kubernikus E2E Tests\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"compute_admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.compute_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"network_admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.network_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"resource_admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.resource_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"volume_admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.volume_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin_e2e\" {\n+ group_id = \"${data.ccloud_identity_group_v3.ccadmin_domain_admins.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n+}\n+\n+resource \"ccloud_quota\" \"kubernikus_e2e\" {\n+ provider = \"ccloud.cloud_admin\"\n+\n+ domain_id = \"${data.openstack_identity_project_v3.ccadmin.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+\n+ compute {\n+ instances = 5\n+ cores = 32\n+ ram = 8192\n+ }\n+\n+ volumev2 {\n+ capacity = 16\n+ snapshots = 2\n+ volumes = 2\n+ }\n+\n+ network {\n+ floating_ips = 2\n+ networks = 1\n+ ports = 500\n+ routers = 1\n+ security_group_rules = 64\n+ security_groups = 4\n+ subnets = 1\n+ healthmonitors = 0\n+ l7policies = 0\n+ listeners = 0\n+ loadbalancers = 0\n+ pools = 0\n+ }\n+}\n+\n+\n+resource \"openstack_networking_rbacpolicies_v2\" \"external_e2e\" {\n+ action = \"access_as_shared\"\n+ object_id = \"${data.openstack_networking_network_v2.external_e2e.id}\"\n+ object_type = \"network\"\n+ target_tenant = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+}\n+\n+resource \"openstack_networking_network_v2\" \"network_e2e\" {\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ name = \"kubernikus_e2e\"\n+ admin_state_up = \"true\"\n+ depends_on = [\"ccloud_quota.kubernikus_e2e\"]\n+}\n+\n+resource \"openstack_networking_subnet_v2\" \"subnet_e2e\" {\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ name = \"default\"\n+ network_id = \"${openstack_networking_network_v2.network_e2e.id}\"\n+ cidr = \"10.180.0.0/16\"\n+ ip_version = 4\n+ depends_on = [\"ccloud_quota.kubernikus_e2e\"]\n+}\n+\n+resource \"openstack_networking_router_v2\" \"router_e2e\" {\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ name = \"default\"\n+ admin_state_up = true\n+ external_network_id = \"${data.openstack_networking_network_v2.external_e2e.id}\"\n+ depends_on = [\"ccloud_quota.kubernikus_e2e\"]\n+}\n+\n+resource \"openstack_networking_router_interface_v2\" \"router_interface_e2e\" {\n+ router_id = \"${openstack_networking_router_v2.router_e2e.id}\"\n+ subnet_id = \"${openstack_networking_subnet_v2.subnet_e2e.id}\"\n+}\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
creates e2e project
596,240
07.09.2018 15:50:05
-7,200
e65d0e6e95d0650bf8106e44753dae146cc62740
no paralleism for limes
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -45,7 +45,7 @@ plan:\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n- -parallelism=10\n+ -parallelism=1\napply:\n@terraform apply \\\n@@ -58,4 +58,4 @@ apply:\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n- -parallelism=10 \\\n+ -parallelism=1\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
no paralleism for limes
596,240
11.09.2018 17:23:31
-7,200
448b2ea93a2360b2159b5f390e984155f7b0e672
fixes overlapping service/cluster cidrs
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -83,6 +83,10 @@ data \"openstack_identity_role_v3\" \"admin\" {\nname = \"admin\"\n}\n+data \"openstack_identity_role_v3\" \"member\" {\n+ name = \"member\"\n+}\n+\ndata \"openstack_identity_role_v3\" \"compute_admin\" {\nname = \"compute_admin\"\n}\n@@ -336,7 +340,6 @@ resource \"openstack_networking_subnet_v2\" \"subnet\" {\nnetwork_id = \"${openstack_networking_network_v2.network.id}\"\ncidr = \"198.18.0.0/24\"\nip_version = 4\n- depends_on = [\"ccloud_quota.kubernikus\"]\n}\nresource \"openstack_networking_router_v2\" \"router\" {\n@@ -512,6 +515,8 @@ resource \"ccloud_kubernetes\" \"kluster\" {\nis_admin = true\nname = \"k-${var.region}\"\n+ cluster_cidr = \"198.19.0.0/16\"\n+ service_cidr = \"192.168.128.0/17\"\nssh_public_key = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCXIxVEUgtUVkvk2VM1hmIb8MxvxsmvYoiq9OBy3J8akTGNybqKsA2uhcwxSJX5Cn3si8kfMfka9EWiJT+e1ybvtsGILO5XRZPxyhYzexwb3TcALwc3LuzpF3Z/Dg2jYTRELTGhYmyca3mxzTlCjNXvYayLNedjJ8fIBzoCuSXNqDRToHru7h0Glz+wtuE74mNkOiXSvhtuJtJs7VCNVjobFQNfC1aeDsri2bPRHJJZJ0QF4LLYSayMEz3lVwIDyAviQR2Aa97WfuXiofiAemfGqiH47Kq6b8X7j3bOYGBvJKMUV7XeWhGsskAmTsvvnFxkc5PAD3Ct+liULjiQWlzDrmpTE8aMqLK4l0YQw7/8iRVz6gli42iEc2ZG56ob1ErpTLAKFWyCNOebZuGoygdEQaGTIIunAncXg5Rz07TdPl0Tf5ZZLpiAgR5ck0H1SETnjDTZ/S83CiVZWJgmCpu8YOKWyYRD4orWwdnA77L4+ixeojLIhEoNL8KlBgsP9Twx+fFMWLfxMmiuX+yksM6Hu+Lsm+Ao7Q284VPp36EB1rxP1JM7HCiEOEm50Jb6hNKjgN4aoLhG5yg+GnDhwCZqUwcRJo1bWtm3QvRA+rzrGZkId4EY3cyOK5QnYV5+24x93Ex0UspHMn7HGsHUESsVeV0fLqlfXyd2RbHTmDMP6w==\"\nnode_pools = [\n@@ -568,6 +573,18 @@ resource \"openstack_identity_role_assignment_v3\" \"kubernetes_admin_e2e\" {\nrole_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n}\n+resource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_admin_e2e\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus_pipeline.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${openstack_identity_role_v3.kubernetes_admin.id}\"\n+}\n+\n+resource \"openstack_identity_role_assignment_v3\" \"pipeline_kubernetes_member_e2e\" {\n+ user_id = \"${openstack_identity_user_v3.kubernikus_pipeline.id}\"\n+ project_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ role_id = \"${data.openstack_identity_role_v3.member.id}\"\n+}\n+\nresource \"ccloud_quota\" \"kubernikus_e2e\" {\nprovider = \"ccloud.cloud_admin\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes overlapping service/cluster cidrs
596,240
13.09.2018 10:24:04
-7,200
87776ad31a4ddc1c387d8ee944a5a63587d4ac6a
fixes kube-dns image pull china regions
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap/dns/dns.go", "new_path": "pkg/controller/ground/bootstrap/dns/dns.go", "diff": "@@ -17,7 +17,7 @@ import (\nconst (\nSERVICE_ACCOUNT = \"kube-dns\"\nCONFIGMAP = \"kube-dns\"\n- DEFAULT_REPOSITORY = \"gcr.io/google_containers\"\n+ DEFAULT_REPOSITORY = \"sapcc\" // Used to be gcr.io/google_containers but that is not working in china\nDEFAULT_VERSION = \"1.14.9\"\n)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes kube-dns image pull china regions
596,240
13.09.2018 10:54:02
-7,200
d36c4810e1b27b8fc957332dc446b2ca9c8349b5
updates documentation for image republishing
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap/dns/dns.go", "new_path": "pkg/controller/ground/bootstrap/dns/dns.go", "diff": "@@ -18,6 +18,17 @@ const (\nSERVICE_ACCOUNT = \"kube-dns\"\nCONFIGMAP = \"kube-dns\"\nDEFAULT_REPOSITORY = \"sapcc\" // Used to be gcr.io/google_containers but that is not working in china\n+\n+ // If you change this version you need to republish the images:\n+ // * k8s-dns-kube-dns-amd64\n+ // * k8s-dns-sidecar-amd64\n+ // * k8s-dns-dnsmasq-nanny-amd64\n+ //\n+ // Workflow:\n+ // docker pull gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.9\n+ // docker tag gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.9 sapcc/k8s-dns-kube-dns-amd64:1.14.9\n+ // docker push sapcc/k8s-dns-kube-dns-amd64:1.14.9\n+ //\nDEFAULT_VERSION = \"1.14.9\"\n)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
updates documentation for image republishing
596,240
13.09.2018 11:17:03
-7,200
58f1de642e4b2e9c9a574f544027b0ded0a5bf62
uses vendored etcd for china
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/values.yaml", "new_path": "charts/kube-master/charts/etcd/values.yaml", "diff": "# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\nimage:\n- repository: gcr.io/google_containers/etcd\n+ repository: sapcc/etcd\ntag: 3.1.12\npullPolicy: IfNotPresent\n## Persist data to a persitent volume\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses vendored etcd for china
596,229
13.09.2018 13:50:29
-7,200
873bf79e2bc65dcc08ec4b442beec9c63500531d
GPU node labels changed "ccloud.sap.com/nvidia-gpu" to "gpu"
[ { "change_type": "MODIFY", "old_path": "contrib/nvidia-gpu/device-plugin-damonset.yaml", "new_path": "contrib/nvidia-gpu/device-plugin-damonset.yaml", "diff": "@@ -23,7 +23,7 @@ spec:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- - key: ccloud.sap.com/nvidia-gpu\n+ - key: gpu\noperator: Exists\ntolerations:\n- operator: \"Exists\"\n" }, { "change_type": "MODIFY", "old_path": "contrib/nvidia-gpu/device-plugin.yaml", "new_path": "contrib/nvidia-gpu/device-plugin.yaml", "diff": "@@ -22,7 +22,7 @@ spec:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- - key: ccloud.sap.com/nvidia-gpu\n+ - key: gpu\noperator: Exists\ntolerations:\n- operator: \"Exists\"\n" }, { "change_type": "MODIFY", "old_path": "contrib/nvidia-gpu/driver-daemonset.yaml", "new_path": "contrib/nvidia-gpu/driver-daemonset.yaml", "diff": "@@ -22,7 +22,7 @@ spec:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- - key: ccloud.sap.com/nvidia-gpu\n+ - key: gpu\noperator: Exists\ntolerations:\n- key: \"nvidia.com/gpu\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap/gpu/manifest.go", "new_path": "pkg/controller/ground/bootstrap/gpu/manifest.go", "diff": "@@ -27,7 +27,7 @@ spec:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- - key: ccloud.sap.com/nvidia-gpu\n+ - key: gpu\noperator: Exists\ntolerations:\n- operator: \"Exists\"\n@@ -92,7 +92,7 @@ spec:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- - key: ccloud.sap.com/nvidia-gpu\n+ - key: gpu\noperator: Exists\ntolerations:\n- key: \"nvidia.com/gpu\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -91,7 +91,7 @@ func (i *ignition) GenerateNode(kluster *kubernikusv1.Kluster, pool *models.Node\nif pool != nil {\nnodeLabels = append(nodeLabels, \"ccloud.sap.com/nodepool=\"+pool.Name)\nif strings.HasPrefix(pool.Flavor, \"zg\") {\n- nodeLabels = append(nodeLabels, \"ccloud.sap.com/nvidia-gpu=nvidia-tesla-v100\")\n+ nodeLabels = append(nodeLabels, \"gpu=nvidia-tesla-v100\")\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/ignition_test.go", "new_path": "pkg/templates/ignition_test.go", "diff": "@@ -89,6 +89,6 @@ func TestNodeLabels(t *testing.T) {\ndata, err = Ignition.GenerateNode(kluster, gpuPool, \"test\", &testKlusterSecret, log.NewNopLogger())\nif assert.NoError(t, err, \"Failed to generate node\") {\n//Ensure we rendered the expected template\n- assert.Contains(t, string(data), fmt.Sprintf(\"--node-labels=ccloud.sap.com/nodepool=%s,ccloud.sap.com/nvidia-gpu=nvidia-tesla-v100\", pool.Name))\n+ assert.Contains(t, string(data), fmt.Sprintf(\"--node-labels=ccloud.sap.com/nodepool=%s,gpu=nvidia-tesla-v100\", pool.Name))\n}\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
GPU node labels changed "ccloud.sap.com/nvidia-gpu" to "gpu" (#314)
596,229
18.09.2018 09:49:51
-7,200
d7d59b133cd38bb6c4e5def44e6dfb7520d5d64b
Delete passwords from keyring if setup fails
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/kubernikusctl/auth/init.go", "new_path": "pkg/cmd/kubernikusctl/auth/init.go", "diff": "@@ -102,6 +102,12 @@ func (o *InitOptions) Run(c *cobra.Command) (err error) {\n}\nif err := o.setup(); err != nil {\n+\n+ if o.openstack.Username != \"\" {\n+ fmt.Println(\"Deleting password from keyring\")\n+ keyring.Delete(\"kubernikus\", o.openstack.Username)\n+ }\n+\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/kubernikusctl/auth/refresh.go", "new_path": "pkg/cmd/kubernikusctl/auth/refresh.go", "diff": "@@ -166,6 +166,12 @@ func (o *RefreshOptions) Run(c *cobra.Command) error {\n}\nif err := o.setupClients(); err != nil {\n+\n+ if o.openstack.Username != \"\" {\n+ fmt.Println(\"Deleting password from keyring\")\n+ keyring.Delete(\"kubernikus\", o.openstack.Username)\n+ }\n+\nreturn err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Delete passwords from keyring if setup fails
596,240
18.09.2018 11:45:59
-7,200
5d7569f5f41be12a9d4688f0fddbdf3f36991fb6
adds dual e2e projects for eu-nl-1
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "auth_ap-ae-1: &auth_ap-ae-1\n+\n+\nOS_AUTH_URL: https://identity-3.ap-ae-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -12,6 +14,10 @@ auth_ap-ae-1: &auth_ap-ae-1\nKUBERNIKUS_NAME: k-ap-ae-1\nKUBERNIKUS_URL: https://k-ap-ae-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_ap-ae-1: &auth_e2e_ap-ae-1\nOS_AUTH_URL: https://identity-3.ap-ae-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -24,6 +30,9 @@ auth_e2e_ap-ae-1: &auth_e2e_ap-ae-1\n+\n+\n+\nauth_ap-au-1: &auth_ap-au-1\nOS_AUTH_URL: https://identity-3.ap-au-1.cloud.sap/v3\n@@ -35,6 +44,14 @@ auth_ap-au-1: &auth_ap-au-1\nKUBERNIKUS_NAME: k-ap-au-1\nKUBERNIKUS_URL: https://k-ap-au-1.admin.cloud.sap\n+\n+\n+\n+\n+\n+\n+\n+\nauth_e2e_ap-au-1: &auth_e2e_ap-au-1\nOS_AUTH_URL: https://identity-3.ap-au-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n@@ -47,8 +64,11 @@ auth_e2e_ap-au-1: &auth_e2e_ap-au-1\n+\nauth_ap-cn-1: &auth_ap-cn-1\n+\n+\nOS_AUTH_URL: https://identity-3.ap-cn-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -58,6 +78,10 @@ auth_ap-cn-1: &auth_ap-cn-1\nKUBERNIKUS_NAME: k-ap-cn-1\nKUBERNIKUS_URL: https://k-ap-cn-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_ap-cn-1: &auth_e2e_ap-cn-1\nOS_AUTH_URL: https://identity-3.ap-cn-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -70,8 +94,13 @@ auth_e2e_ap-cn-1: &auth_e2e_ap-cn-1\n+\n+\n+\nauth_ap-jp-1: &auth_ap-jp-1\n+\n+\nOS_AUTH_URL: https://identity-3.ap-jp-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -81,6 +110,10 @@ auth_ap-jp-1: &auth_ap-jp-1\nKUBERNIKUS_NAME: k-ap-jp-1\nKUBERNIKUS_URL: https://k-ap-jp-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_ap-jp-1: &auth_e2e_ap-jp-1\nOS_AUTH_URL: https://identity-3.ap-jp-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -93,8 +126,13 @@ auth_e2e_ap-jp-1: &auth_e2e_ap-jp-1\n+\n+\n+\nauth_ap-jp-2: &auth_ap-jp-2\n+\n+\nOS_AUTH_URL: https://identity-3.ap-jp-2.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -104,6 +142,10 @@ auth_ap-jp-2: &auth_ap-jp-2\nKUBERNIKUS_NAME: k-ap-jp-2\nKUBERNIKUS_URL: https://k-ap-jp-2.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_ap-jp-2: &auth_e2e_ap-jp-2\nOS_AUTH_URL: https://identity-3.ap-jp-2.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -116,8 +158,13 @@ auth_e2e_ap-jp-2: &auth_e2e_ap-jp-2\n+\n+\n+\nauth_ap-sa-1: &auth_ap-sa-1\n+\n+\nOS_AUTH_URL: https://identity-3.ap-sa-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -127,6 +174,10 @@ auth_ap-sa-1: &auth_ap-sa-1\nKUBERNIKUS_NAME: k-ap-sa-1\nKUBERNIKUS_URL: https://k-ap-sa-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_ap-sa-1: &auth_e2e_ap-sa-1\nOS_AUTH_URL: https://identity-3.ap-sa-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -139,6 +190,9 @@ auth_e2e_ap-sa-1: &auth_e2e_ap-sa-1\n+\n+\n+\nauth_eu-de-1: &auth_eu-de-1\nOS_AUTH_URL: https://identity-3.eu-de-1.cloud.sap/v3\n@@ -150,6 +204,14 @@ auth_eu-de-1: &auth_eu-de-1\nKUBERNIKUS_NAME: k-eu-de-1\nKUBERNIKUS_URL: https://k-eu-de-1.admin.cloud.sap\n+\n+\n+\n+\n+\n+\n+\n+\nauth_e2e_eu-de-1: &auth_e2e_eu-de-1\nOS_AUTH_URL: https://identity-3.eu-de-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n@@ -162,6 +224,7 @@ auth_e2e_eu-de-1: &auth_e2e_eu-de-1\n+\nauth_eu-de-2: &auth_eu-de-2\nOS_AUTH_URL: https://identity-3.eu-de-2.cloud.sap/v3\n@@ -173,6 +236,14 @@ auth_eu-de-2: &auth_eu-de-2\nKUBERNIKUS_NAME: k-eu-de-2\nKUBERNIKUS_URL: https://k-eu-de-2.admin.cloud.sap\n+\n+\n+\n+\n+\n+\n+\n+\nauth_e2e_eu-de-2: &auth_e2e_eu-de-2\nOS_AUTH_URL: https://identity-3.eu-de-2.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n@@ -185,6 +256,7 @@ auth_e2e_eu-de-2: &auth_e2e_eu-de-2\n+\nauth_eu-nl-1: &auth_eu-nl-1\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n@@ -196,20 +268,31 @@ auth_eu-nl-1: &auth_eu-nl-1\nKUBERNIKUS_NAME: k-eu-nl-1\nKUBERNIKUS_URL: https://k-eu-nl-1.admin.cloud.sap\n+\n+\n+\n+\n+\n+\nauth_e2e_eu-nl-1: &auth_e2e_eu-nl-1\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus-e2e\n+ OS_USERNAME: kubernikus-pipeline\n+ OS_PASSWORD: ((kubernikus-pipeline-password))\n+ OS_USER_DOMAIN_NAME: Default\n+ OS_PROJECT_NAME: kubernikus_e2e\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: e2e\nKUBERNIKUS_URL: https://kubernikus.eu-nl-1.cloud.sap\n+\n+\n+\nauth_eu-ru-1: &auth_eu-ru-1\n+\n+\nOS_AUTH_URL: https://identity-3.eu-ru-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -219,6 +302,10 @@ auth_eu-ru-1: &auth_eu-ru-1\nKUBERNIKUS_NAME: k-eu-ru-1\nKUBERNIKUS_URL: https://k-eu-ru-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_eu-ru-1: &auth_e2e_eu-ru-1\nOS_AUTH_URL: https://identity-3.eu-ru-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -231,8 +318,13 @@ auth_e2e_eu-ru-1: &auth_e2e_eu-ru-1\n+\n+\n+\nauth_la-br-1: &auth_la-br-1\n+\n+\nOS_AUTH_URL: https://identity-3.la-br-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -242,6 +334,10 @@ auth_la-br-1: &auth_la-br-1\nKUBERNIKUS_NAME: k-la-br-1\nKUBERNIKUS_URL: https://k-la-br-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_la-br-1: &auth_e2e_la-br-1\nOS_AUTH_URL: https://identity-3.la-br-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -254,8 +350,13 @@ auth_e2e_la-br-1: &auth_e2e_la-br-1\n+\n+\n+\nauth_na-ca-1: &auth_na-ca-1\n+\n+\nOS_AUTH_URL: https://identity-3.na-ca-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -265,6 +366,10 @@ auth_na-ca-1: &auth_na-ca-1\nKUBERNIKUS_NAME: k-na-ca-1\nKUBERNIKUS_URL: https://k-na-ca-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_na-ca-1: &auth_e2e_na-ca-1\nOS_AUTH_URL: https://identity-3.na-ca-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -277,6 +382,9 @@ auth_e2e_na-ca-1: &auth_e2e_na-ca-1\n+\n+\n+\nauth_na-us-1: &auth_na-us-1\nOS_AUTH_URL: https://identity-3.na-us-1.cloud.sap/v3\n@@ -288,6 +396,14 @@ auth_na-us-1: &auth_na-us-1\nKUBERNIKUS_NAME: k-na-us-1\nKUBERNIKUS_URL: https://k-na-us-1.admin.cloud.sap\n+\n+\n+\n+\n+\n+\n+\n+\nauth_e2e_na-us-1: &auth_e2e_na-us-1\nOS_AUTH_URL: https://identity-3.na-us-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n@@ -300,8 +416,11 @@ auth_e2e_na-us-1: &auth_e2e_na-us-1\n+\nauth_na-us-3: &auth_na-us-3\n+\n+\nOS_AUTH_URL: https://identity-3.na-us-3.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -311,6 +430,10 @@ auth_na-us-3: &auth_na-us-3\nKUBERNIKUS_NAME: k-na-us-3\nKUBERNIKUS_URL: https://k-na-us-3.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_na-us-3: &auth_e2e_na-us-3\nOS_AUTH_URL: https://identity-3.na-us-3.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -323,8 +446,13 @@ auth_e2e_na-us-3: &auth_e2e_na-us-3\n+\n+\n+\nauth_qa-de-1: &auth_qa-de-1\n+\n+\nOS_AUTH_URL: https://identity-3.qa-de-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -334,6 +462,10 @@ auth_qa-de-1: &auth_qa-de-1\nKUBERNIKUS_NAME: k-qa-de-1\nKUBERNIKUS_URL: https://k-qa-de-1.admin.cloud.sap\n+\n+\n+\n+\nauth_e2e_qa-de-1: &auth_e2e_qa-de-1\nOS_AUTH_URL: https://identity-3.qa-de-1.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -346,8 +478,15 @@ auth_e2e_qa-de-1: &auth_e2e_qa-de-1\n+\n+\n+\nauth_master: &auth_master\n+\n+\n+\n+\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\nOS_PASSWORD: ((kubernikus-prod-password))\n@@ -357,6 +496,12 @@ auth_master: &auth_master\nKUBERNIKUS_NAME: k-master\nKUBERNIKUS_URL: https://k-master.admin.cloud.sap\n+\n+\n+\n+\n+\n+\nauth_e2e_master: &auth_e2e_master\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "<%\nREGIONS = {\n- \"ap-ae-1\": { continent: 'ASIA', install: 'terraform' },\n- \"ap-au-1\": { continent: 'ASIA', install: 'manual' },\n- \"ap-cn-1\": { continent: 'ASIA', install: 'terraform' },\n- \"ap-jp-1\": { continent: 'ASIA', install: 'terraform' },\n- \"ap-jp-2\": { continent: 'ASIA', install: 'terraform' },\n- \"ap-sa-1\": { continent: 'ASIA', install: 'terraform' },\n- \"eu-de-1\": { continent: 'EMEA', install: 'manual' },\n- \"eu-de-2\": { continent: 'EMEA', install: 'manual' },\n- \"eu-nl-1\": { continent: 'EMEA', install: 'manual' },\n- \"eu-ru-1\": { continent: 'EMEA', install: 'terraform' },\n- \"la-br-1\": { continent: 'AMERICAS', install: 'terraform' },\n- \"na-ca-1\": { continent: 'AMERICAS', install: 'terraform' },\n- \"na-us-1\": { continent: 'AMERICAS', install: 'manual' },\n- \"na-us-3\": { continent: 'AMERICAS', install: 'terraform' },\n- \"qa-de-1\": { continent: 'INFRA', install: 'terraform' },\n- \"master\": { continent: \"INFRA\", install: 'virtual' }\n+ \"ap-ae-1\": { continent: 'ASIA', install: 'terraform', e2e: 'terraform' },\n+ \"ap-au-1\": { continent: 'ASIA', install: 'manual' , e2e: 'manual' },\n+ \"ap-cn-1\": { continent: 'ASIA', install: 'terraform', e2e: 'terraform' },\n+ \"ap-jp-1\": { continent: 'ASIA', install: 'terraform', e2e: 'terraform' },\n+ \"ap-jp-2\": { continent: 'ASIA', install: 'terraform', e2e: 'terraform' },\n+ \"ap-sa-1\": { continent: 'ASIA', install: 'terraform', e2e: 'terraform' },\n+ \"eu-de-1\": { continent: 'EMEA', install: 'manual' , e2e: 'manual' },\n+ \"eu-de-2\": { continent: 'EMEA', install: 'manual' , e2e: 'manual' },\n+ \"eu-nl-1\": { continent: 'EMEA', install: 'manual' , e2e: 'terraform' },\n+ \"eu-ru-1\": { continent: 'EMEA', install: 'terraform', e2e: 'terraform' },\n+ \"la-br-1\": { continent: 'AMERICAS', install: 'terraform', e2e: 'terraform' },\n+ \"na-ca-1\": { continent: 'AMERICAS', install: 'terraform', e2e: 'terraform' },\n+ \"na-us-1\": { continent: 'AMERICAS', install: 'manual' , e2e: 'manual' },\n+ \"na-us-3\": { continent: 'AMERICAS', install: 'terraform', e2e: 'terraform' },\n+ \"qa-de-1\": { continent: 'INFRA', install: 'terraform', e2e: 'terraform' },\n+ \"master\": { continent: \"INFRA\", install: 'virtual' , e2e: 'virtual' }\n}\nGROUPS = REGIONS.values.map{ |v| v[:continent]}.uniq\n@@ -33,18 +33,9 @@ auth_<%= region %>: &auth_<%= region %>\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-<%= region %>\nKUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+<% end %>\n-auth_e2e_<%= region %>: &auth_e2e_<%= region %>\n- OS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\n- OS_USERNAME: ((kubernikus-prod-username))\n- OS_PASSWORD: ((kubernikus-prod-password))\n- OS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus-e2e\n- OS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: e2e\n- KUBERNIKUS_URL: https://kubernikus.<%= region %>.cloud.sap\n-\n-<% elsif meta[:install] == \"terraform\" %>\n+<% if meta[:install] == \"terraform\" %>\nOS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\nOS_PASSWORD: ((kubernikus-pipeline-password))\n@@ -53,7 +44,20 @@ auth_e2e_<%= region %>: &auth_e2e_<%= region %>\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: k-<%= region %>\nKUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+<% end %>\n+\n+<% if meta[:install] == \"virtual\" %>\n+ OS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n+ OS_USERNAME: ((kubernikus-prod-username))\n+ OS_PASSWORD: ((kubernikus-prod-password))\n+ OS_USER_DOMAIN_NAME: ccadmin\n+ OS_PROJECT_NAME: kubernikus-<%= region %>\n+ OS_PROJECT_DOMAIN_NAME: ccadmin\n+ KUBERNIKUS_NAME: k-<%= region %>\n+ KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+<% end %>\n+<% if meta[:e2e] == \"terraform\" %>\nauth_e2e_<%= region %>: &auth_e2e_<%= region %>\nOS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\nOS_USERNAME: kubernikus-pipeline\n@@ -63,17 +67,21 @@ auth_e2e_<%= region %>: &auth_e2e_<%= region %>\nOS_PROJECT_DOMAIN_NAME: ccadmin\nKUBERNIKUS_NAME: e2e\nKUBERNIKUS_URL: https://kubernikus.<%= region %>.cloud.sap\n+<% end %>\n-<% elsif meta[:install] == \"virtual\" %>\n- OS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\n+<% if meta[:e2e] == \"manual\" %>\n+auth_e2e_<%= region %>: &auth_e2e_<%= region %>\n+ OS_AUTH_URL: https://identity-3.<%= region %>.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\nOS_PASSWORD: ((kubernikus-prod-password))\nOS_USER_DOMAIN_NAME: ccadmin\n- OS_PROJECT_NAME: kubernikus-<%= region %>\n+ OS_PROJECT_NAME: kubernikus-e2e\nOS_PROJECT_DOMAIN_NAME: ccadmin\n- KUBERNIKUS_NAME: k-<%= region %>\n- KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+ KUBERNIKUS_NAME: e2e\n+ KUBERNIKUS_URL: https://kubernikus.<%= region %>.cloud.sap\n+<% end %>\n+<% if meta[:e2e] == \"virtual\" %>\nauth_e2e_<%= region %>: &auth_e2e_<%= region %>\nOS_AUTH_URL: https://identity-3.eu-nl-1.cloud.sap/v3\nOS_USERNAME: ((kubernikus-prod-username))\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds dual e2e projects for eu-nl-1
596,240
18.09.2018 13:46:04
-7,200
26ad26b9559ba1b5fa303ff43cd44bfd07cabf0a
uses mirrored serve-hostname image for china
[ { "change_type": "MODIFY", "old_path": "test/e2e/network_test.go", "new_path": "test/e2e/network_test.go", "diff": "@@ -30,7 +30,7 @@ const (\nPollInterval = 6 * time.Second // DNS Timeout is 5s\n- ServeHostnameImage = \"gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1\"\n+ ServeHostnameImage = \"sapcc/serve-hostname-amd64:1.1\"\nServeHostnamePort = 9376\n)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses mirrored serve-hostname image for china
596,240
18.09.2018 14:04:06
-7,200
35366c2be337fa819f88dc0597ff9c7971215340
allows auto-sign up for users signing in with certificate
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus-system/values.yaml", "new_path": "charts/kubernikus-system/values.yaml", "diff": "@@ -178,7 +178,7 @@ grafana:\nenabled = true\nheader_name = X-REMOTE-USER\nheader_property = username\n- auto_sign_up = false\n+ auto_sign_up = true\n[auth.basic]\nenabled = false\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
allows auto-sign up for users signing in with certificate
596,240
18.09.2018 14:15:09
-7,200
201afe0c994851c52deb905603dce478733350f5
triggers continental rollout on promoted builds
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -1197,6 +1197,7 @@ jobs:\n- get: kubernikus.builds\nresource: master.builds\npassed: [admin]\n+ trigger: true\n- put: ASIA.tag\n@@ -1872,6 +1873,7 @@ jobs:\n- get: kubernikus.builds\nresource: master.builds\npassed: [admin]\n+ trigger: true\n- put: EMEA.tag\n@@ -2309,6 +2311,7 @@ jobs:\n- get: kubernikus.builds\nresource: master.builds\npassed: [admin]\n+ trigger: true\n- put: AMERICAS.tag\n@@ -2762,6 +2765,7 @@ jobs:\n- get: kubernikus.builds\nresource: master.builds\npassed: [admin]\n+ trigger: true\n- put: INFRA.tag\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -260,6 +260,7 @@ jobs:\n- get: kubernikus.builds\nresource: master.builds\npassed: [admin]\n+ trigger: true\n- put: <%= group %>.tag\n<% REGIONS.select{ |k, v| v[:continent] == group }.each do |region, meta| %>\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
triggers continental rollout on promoted builds
596,240
18.09.2018 14:15:28
-7,200
5170075621f1aa4afe733673908f22ca11013538
allows port 22 for spore usage
[ { "change_type": "MODIFY", "old_path": "terraform/kubernikus.tf", "new_path": "terraform/kubernikus.tf", "diff": "@@ -383,6 +383,19 @@ resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_1\" {\ndepends_on = [\"ccloud_quota.kubernikus\"]\n}\n+resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_ssh\" {\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus.id}\"\n+ direction = \"ingress\"\n+ ethertype = \"IPv4\"\n+ protocol = \"tcp\"\n+ remote_ip_prefix = \"198.18.0.0/24\"\n+ port_range_min = 22\n+ port_range_max = 22\n+ security_group_id = \"${data.openstack_networking_secgroup_v2.kubernikus_default.id}\"\n+\n+ depends_on = [\"ccloud_quota.kubernikus\"]\n+}\n+\nresource \"openstack_identity_service_v3\" \"kubernikus\" {\nname = \"kubernikus\"\ntype = \"kubernikus\"\n@@ -656,3 +669,20 @@ resource \"openstack_networking_router_interface_v2\" \"router_interface_e2e\" {\nsubnet_id = \"${openstack_networking_subnet_v2.subnet_e2e.id}\"\n}\n+data \"openstack_networking_secgroup_v2\" \"kubernikus_e2e_default\" {\n+ name = \"default\"\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+}\n+\n+resource \"openstack_networking_secgroup_rule_v2\" \"secgroup_rule_e2e_ssh\" {\n+ tenant_id = \"${openstack_identity_project_v3.kubernikus_e2e.id}\"\n+ direction = \"ingress\"\n+ ethertype = \"IPv4\"\n+ protocol = \"tcp\"\n+ remote_ip_prefix = \"10.180.0.0/16\"\n+ port_range_min = 22\n+ port_range_max = 22\n+ security_group_id = \"${data.openstack_networking_secgroup_v2.kubernikus_e2e_default.id}\"\n+\n+ depends_on = [\"ccloud_quota.kubernikus_e2e\"]\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
allows port 22 for spore usage
596,226
08.10.2018 14:46:58
-7,200
63584621ad1cd456955bdda9f0b0eacc6bcc9d87
Create swagger path on download, show glide error message
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -26,7 +26,7 @@ SWAGGER_BIN := bin/$(GOOS)/swagger-$(GO_SWAGGER_VERSION)\nall: $(BINARIES:%=bin/$(GOOS)/%)\nbin/$(GOOS)/swagger-%:\n- curl -f -z $@ -o $@ -L'#' https://github.com/go-swagger/go-swagger/releases/download/$*/swagger_$(GOOS)_amd64\n+ curl -f --create-dirs -z $@ -o $@ -L'#' https://github.com/go-swagger/go-swagger/releases/download/$*/swagger_$(GOOS)_amd64\nchmod +x $@\nbin/%: $(GOFILES) Makefile\n@@ -143,7 +143,7 @@ endif\nbootstrap: $(SWAGGER_BIN)\nifndef HAS_GLIDE\n- brew install glide\n+ $(error glide not found. Please run `brew install glide` or install it from https://github.com/Masterminds/glide)\nendif\nifndef HAS_GLIDE_VC\ngo get -u github.com/sgotti/glide-vc\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Create swagger path on download, show glide error message (#323)
596,240
11.10.2018 21:39:18
-7,200
16b584767a3ad374095d4dc89447a669640f65a5
adds alert for APIserver being killed due to OOM
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus-system/charts/prometheus/kubernetes.alerts", "new_path": "charts/kubernikus-system/charts/prometheus/kubernetes.alerts", "diff": "@@ -90,3 +90,17 @@ groups:\nannotations:\nsummary: More than 3 OOM killed pods on a node within 24h\ndescription: More than 3 OOM killed pods on node {{ $labels.instance }} within 24h\n+\n+ - alert: KubernetesAPIServerContainerOOMKilled\n+ expr: delta(kube_pod_container_status_restarts_total{container=\"apiserver\",namespace=\"kubernikus\",pod!~\"e2e.*\"}[1h]) > 1\n+ for: 1h\n+ labels:\n+ tier: kubernikus\n+ service: node\n+ severity: warning\n+ context: memory\n+ annotations:\n+ summary: An APIServer is OOM. Increase Limits.\n+ description: APIServer {{ $labels.pod }} is OOM. Increase Limits.\n+\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds alert for APIserver being killed due to OOM
596,240
13.10.2018 21:40:07
-7,200
58611ed175716b22f305fdc07101d23a6aae029d
Only detect OOM kills on Running Apiservers
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus-system/charts/prometheus/kubernetes.alerts", "new_path": "charts/kubernikus-system/charts/prometheus/kubernetes.alerts", "diff": "@@ -92,7 +92,8 @@ groups:\ndescription: More than 3 OOM killed pods on node {{ $labels.instance }} within 24h\n- alert: KubernetesAPIServerContainerOOMKilled\n- expr: delta(kube_pod_container_status_restarts_total{container=\"apiserver\",namespace=\"kubernikus\",pod!~\"e2e.*\"}[1h]) > 1\n+ expr: (label_replace(delta(kube_pod_container_status_restarts_total{container=\"apiserver\",namespace=\"kubernikus\",pod!~\"e2e.*\"}[1h])\n+ , \"kluster_id\", \"$1\", \"pod\", \"(.*)-api.*\") > 1) and on(kluster_id) (kubernikus_kluster_status_phase{phase=\"Running\"} == 1)\nfor: 1h\nlabels:\ntier: kubernikus\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Only detect OOM kills on Running Apiservers
596,240
22.10.2018 11:15:59
-7,200
b38f7ea81878dff4eb2ff3e2415a5bc0b6c7319b
fixes missing line-break for SAP cert
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.10.go", "new_path": "pkg/templates/node_1.10.go", "diff": "@@ -217,7 +217,7 @@ storage:\nfilesystem: root\nmode: 0644\ncontents:\n- inline: |-\n+ inline: |\n-----BEGIN CERTIFICATE-----\nMIIGPTCCBCWgAwIBAgIKYQ4GNwAAAAAADDANBgkqhkiG9w0BAQsFADBOMQswCQYD\nVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBBRzEbMBkG\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.7.go", "new_path": "pkg/templates/node_1.7.go", "diff": "@@ -178,7 +178,7 @@ storage:\nfilesystem: root\nmode: 0644\ncontents:\n- inline: |-\n+ inline: |\n-----BEGIN CERTIFICATE-----\nMIIGPTCCBCWgAwIBAgIKYQ4GNwAAAAAADDANBgkqhkiG9w0BAQsFADBOMQswCQYD\nVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBBRzEbMBkG\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.8.go", "new_path": "pkg/templates/node_1.8.go", "diff": "@@ -178,7 +178,7 @@ storage:\nfilesystem: root\nmode: 0644\ncontents:\n- inline: |-\n+ inline: |\n-----BEGIN CERTIFICATE-----\nMIIGPTCCBCWgAwIBAgIKYQ4GNwAAAAAADDANBgkqhkiG9w0BAQsFADBOMQswCQYD\nVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBBRzEbMBkG\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.9.go", "new_path": "pkg/templates/node_1.9.go", "diff": "@@ -178,7 +178,7 @@ storage:\nfilesystem: root\nmode: 0644\ncontents:\n- inline: |-\n+ inline: |\n-----BEGIN CERTIFICATE-----\nMIIGPTCCBCWgAwIBAgIKYQ4GNwAAAAAADDANBgkqhkiG9w0BAQsFADBOMQswCQYD\nVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBBRzEbMBkG\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes missing line-break for SAP cert
596,240
22.10.2018 11:36:20
-7,200
0478439e9eceec6c433bd772200863019b60b883
adds global root certificate
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.10.go", "new_path": "pkg/templates/node_1.10.go", "diff": "@@ -213,6 +213,47 @@ networkd:\nstorage:\nfiles:\n+ - path: /etc/ssl/certs/SAPGlobalRootCA.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |\n+ -----BEGIN CERTIFICATE-----\n+ MIIGTDCCBDSgAwIBAgIQXQPZPTFhXY9Iizlwx48bmTANBgkqhkiG9w0BAQsFADBO\n+ MQswCQYDVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBB\n+ RzEbMBkGA1UEAwwSU0FQIEdsb2JhbCBSb290IENBMB4XDTEyMDQyNjE1NDE1NVoX\n+ DTMyMDQyNjE1NDYyN1owTjELMAkGA1UEBhMCREUxETAPBgNVBAcMCFdhbGxkb3Jm\n+ MQ8wDQYDVQQKDAZTQVAgQUcxGzAZBgNVBAMMElNBUCBHbG9iYWwgUm9vdCBDQTCC\n+ AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOrxJKFFA1eTrZg1Ux8ax6n/\n+ LQRHZlgLc2FZpfyAgwvkt71wLkPLiTOaRb3Bd1dyydpKcwJLy0dzGkunzNkPRSFz\n+ bKy2IPS0RS45hUCCPzhGnqQM6TcDYWeWpSUvygqujgb/cAG0mSJpvzAD3SMDQ+VJ\n+ Az5Ryq4IrP7LkfCb63LKZxLsHEkEcNKoGPsSsd4LTwuEIyM3ZHcCoA97m6hvgLWV\n+ GLzLIQMEblkswqX29z7JZH+zJopoqZB6eEogE2YpExkw52PufytEslDY3dyVubjp\n+ GlvD4T03F2zm6CYleMwgWbATLVYvk2I9WfqPAP+ln2IU9DZzegSMTWHCE+jizaiq\n+ b5f5s7m8f+cz7ndHSrz8KD/S9iNdWpuSlknHDrh+3lFTX/uWNBRs5mC/cdejcqS1\n+ v6erflyIfqPWWO6PxhIs49NL9Lix3ou6opJo+m8K757T5uP/rQ9KYALIXvl2uFP7\n+ 0CqI+VGfossMlSXa1keagraW8qfplz6ffeSJQWO/+zifbfsf0tzUAC72zBuO0qvN\n+ E7rSbqAfpav/o010nKP132gbkb4uOkUfZwCuvZjA8ddsQ4udIBRj0hQlqnPLJOR1\n+ PImrAFC3PW3NgaDEo9QAJBEp5jEJmQghNvEsmzXgABebwLdI9u0VrDz4mSb6TYQC\n+ XTUaSnH3zvwAv8oMx7q7AgMBAAGjggEkMIIBIDAOBgNVHQ8BAf8EBAMCAQYwEgYD\n+ VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUg8dB/Q4mTynBuHmOhnrhv7XXagMw\n+ gdoGA1UdIASB0jCBzzCBzAYKKwYBBAGFNgRkATCBvTAmBggrBgEFBQcCARYaaHR0\n+ cDovL3d3dy5wa2kuY28uc2FwLmNvbS8wgZIGCCsGAQUFBwICMIGFHoGCAEMAZQBy\n+ AHQAaQBmAGkAYwBhAHQAZQAgAFAAbwBsAGkAYwB5ACAAYQBuAGQAIABDAGUAcgB0\n+ AGkAZgBpAGMAYQB0AGkAbwBuACAAUAByAGEAYwB0AGkAYwBlACAAUwB0AGEAdABl\n+ AG0AZQBuAHQAIABvAGYAIABTAEEAUAAgAEEARzANBgkqhkiG9w0BAQsFAAOCAgEA\n+ 0HpCIaC36me6ShB3oHDexA2a3UFcU149nZTABPKT+yUCnCQPzvK/6nJUc5I4xPfv\n+ 2Q8cIlJjPNRoh9vNSF7OZGRmWQOFFrPWeqX5JA7HQPsRVURjJMeYgZWMpy4t1Tof\n+ lF13u6OY6xV6A5kQZIISFj/dOYLT3+O7wME5SItL+YsNh6BToNU0xAZt71Z8JNdY\n+ VJb2xSPMzn6bNXY8ioGzHlVxfEvzMqebV0KY7BTXR3y/Mh+v/RjXGmvZU6L/gnU7\n+ 8mTRPgekYKY8JX2CXTqgfuW6QSnJ+88bHHMhMP7nPwv+YkPcsvCPBSY08ykzFATw\n+ SNoKP1/QFtERVUwrUXt3Cufz9huVysiy23dEyfAglgCCRWA+ZlaaXfieKkUWCJaE\n+ Kw/2Jqz02HDc7uXkFLS1BMYjr3WjShg1a+ulYvrBhNtseRoZT833SStlS/jzZ8Bi\n+ c1dt7UOiIZCGUIODfcZhO8l4mtjh034hdARLF0sUZhkVlosHPml5rlxh+qn8yJiJ\n+ GJ7CUQtNCDBVGksVlwew/+XnesITxrDjUMu+2297at7wjBwCnO93zr1/wsx1e2Um\n+ Xn+IfM6K/pbDar/y6uI9rHlyWu4iJ6cg7DAPJ2CCklw/YHJXhDHGwheO/qSrKtgz\n+ PGHZoN9jcvvvWDLUGtJkEotMgdFpEA2XWR83H4fVFVc=\n+ -----END CERTIFICATE-----\n- path: /etc/ssl/certs/SAPNetCA_G2.pem\nfilesystem: root\nmode: 0644\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.7.go", "new_path": "pkg/templates/node_1.7.go", "diff": "@@ -174,6 +174,47 @@ networkd:\nstorage:\nfiles:\n+ - path: /etc/ssl/certs/SAPGlobalRootCA.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |\n+ -----BEGIN CERTIFICATE-----\n+ MIIGTDCCBDSgAwIBAgIQXQPZPTFhXY9Iizlwx48bmTANBgkqhkiG9w0BAQsFADBO\n+ MQswCQYDVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBB\n+ RzEbMBkGA1UEAwwSU0FQIEdsb2JhbCBSb290IENBMB4XDTEyMDQyNjE1NDE1NVoX\n+ DTMyMDQyNjE1NDYyN1owTjELMAkGA1UEBhMCREUxETAPBgNVBAcMCFdhbGxkb3Jm\n+ MQ8wDQYDVQQKDAZTQVAgQUcxGzAZBgNVBAMMElNBUCBHbG9iYWwgUm9vdCBDQTCC\n+ AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOrxJKFFA1eTrZg1Ux8ax6n/\n+ LQRHZlgLc2FZpfyAgwvkt71wLkPLiTOaRb3Bd1dyydpKcwJLy0dzGkunzNkPRSFz\n+ bKy2IPS0RS45hUCCPzhGnqQM6TcDYWeWpSUvygqujgb/cAG0mSJpvzAD3SMDQ+VJ\n+ Az5Ryq4IrP7LkfCb63LKZxLsHEkEcNKoGPsSsd4LTwuEIyM3ZHcCoA97m6hvgLWV\n+ GLzLIQMEblkswqX29z7JZH+zJopoqZB6eEogE2YpExkw52PufytEslDY3dyVubjp\n+ GlvD4T03F2zm6CYleMwgWbATLVYvk2I9WfqPAP+ln2IU9DZzegSMTWHCE+jizaiq\n+ b5f5s7m8f+cz7ndHSrz8KD/S9iNdWpuSlknHDrh+3lFTX/uWNBRs5mC/cdejcqS1\n+ v6erflyIfqPWWO6PxhIs49NL9Lix3ou6opJo+m8K757T5uP/rQ9KYALIXvl2uFP7\n+ 0CqI+VGfossMlSXa1keagraW8qfplz6ffeSJQWO/+zifbfsf0tzUAC72zBuO0qvN\n+ E7rSbqAfpav/o010nKP132gbkb4uOkUfZwCuvZjA8ddsQ4udIBRj0hQlqnPLJOR1\n+ PImrAFC3PW3NgaDEo9QAJBEp5jEJmQghNvEsmzXgABebwLdI9u0VrDz4mSb6TYQC\n+ XTUaSnH3zvwAv8oMx7q7AgMBAAGjggEkMIIBIDAOBgNVHQ8BAf8EBAMCAQYwEgYD\n+ VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUg8dB/Q4mTynBuHmOhnrhv7XXagMw\n+ gdoGA1UdIASB0jCBzzCBzAYKKwYBBAGFNgRkATCBvTAmBggrBgEFBQcCARYaaHR0\n+ cDovL3d3dy5wa2kuY28uc2FwLmNvbS8wgZIGCCsGAQUFBwICMIGFHoGCAEMAZQBy\n+ AHQAaQBmAGkAYwBhAHQAZQAgAFAAbwBsAGkAYwB5ACAAYQBuAGQAIABDAGUAcgB0\n+ AGkAZgBpAGMAYQB0AGkAbwBuACAAUAByAGEAYwB0AGkAYwBlACAAUwB0AGEAdABl\n+ AG0AZQBuAHQAIABvAGYAIABTAEEAUAAgAEEARzANBgkqhkiG9w0BAQsFAAOCAgEA\n+ 0HpCIaC36me6ShB3oHDexA2a3UFcU149nZTABPKT+yUCnCQPzvK/6nJUc5I4xPfv\n+ 2Q8cIlJjPNRoh9vNSF7OZGRmWQOFFrPWeqX5JA7HQPsRVURjJMeYgZWMpy4t1Tof\n+ lF13u6OY6xV6A5kQZIISFj/dOYLT3+O7wME5SItL+YsNh6BToNU0xAZt71Z8JNdY\n+ VJb2xSPMzn6bNXY8ioGzHlVxfEvzMqebV0KY7BTXR3y/Mh+v/RjXGmvZU6L/gnU7\n+ 8mTRPgekYKY8JX2CXTqgfuW6QSnJ+88bHHMhMP7nPwv+YkPcsvCPBSY08ykzFATw\n+ SNoKP1/QFtERVUwrUXt3Cufz9huVysiy23dEyfAglgCCRWA+ZlaaXfieKkUWCJaE\n+ Kw/2Jqz02HDc7uXkFLS1BMYjr3WjShg1a+ulYvrBhNtseRoZT833SStlS/jzZ8Bi\n+ c1dt7UOiIZCGUIODfcZhO8l4mtjh034hdARLF0sUZhkVlosHPml5rlxh+qn8yJiJ\n+ GJ7CUQtNCDBVGksVlwew/+XnesITxrDjUMu+2297at7wjBwCnO93zr1/wsx1e2Um\n+ Xn+IfM6K/pbDar/y6uI9rHlyWu4iJ6cg7DAPJ2CCklw/YHJXhDHGwheO/qSrKtgz\n+ PGHZoN9jcvvvWDLUGtJkEotMgdFpEA2XWR83H4fVFVc=\n+ -----END CERTIFICATE-----\n- path: /etc/ssl/certs/SAPNetCA_G2.pem\nfilesystem: root\nmode: 0644\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.8.go", "new_path": "pkg/templates/node_1.8.go", "diff": "@@ -174,6 +174,47 @@ networkd:\nstorage:\nfiles:\n+ - path: /etc/ssl/certs/SAPGlobalRootCA.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |\n+ -----BEGIN CERTIFICATE-----\n+ MIIGTDCCBDSgAwIBAgIQXQPZPTFhXY9Iizlwx48bmTANBgkqhkiG9w0BAQsFADBO\n+ MQswCQYDVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBB\n+ RzEbMBkGA1UEAwwSU0FQIEdsb2JhbCBSb290IENBMB4XDTEyMDQyNjE1NDE1NVoX\n+ DTMyMDQyNjE1NDYyN1owTjELMAkGA1UEBhMCREUxETAPBgNVBAcMCFdhbGxkb3Jm\n+ MQ8wDQYDVQQKDAZTQVAgQUcxGzAZBgNVBAMMElNBUCBHbG9iYWwgUm9vdCBDQTCC\n+ AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOrxJKFFA1eTrZg1Ux8ax6n/\n+ LQRHZlgLc2FZpfyAgwvkt71wLkPLiTOaRb3Bd1dyydpKcwJLy0dzGkunzNkPRSFz\n+ bKy2IPS0RS45hUCCPzhGnqQM6TcDYWeWpSUvygqujgb/cAG0mSJpvzAD3SMDQ+VJ\n+ Az5Ryq4IrP7LkfCb63LKZxLsHEkEcNKoGPsSsd4LTwuEIyM3ZHcCoA97m6hvgLWV\n+ GLzLIQMEblkswqX29z7JZH+zJopoqZB6eEogE2YpExkw52PufytEslDY3dyVubjp\n+ GlvD4T03F2zm6CYleMwgWbATLVYvk2I9WfqPAP+ln2IU9DZzegSMTWHCE+jizaiq\n+ b5f5s7m8f+cz7ndHSrz8KD/S9iNdWpuSlknHDrh+3lFTX/uWNBRs5mC/cdejcqS1\n+ v6erflyIfqPWWO6PxhIs49NL9Lix3ou6opJo+m8K757T5uP/rQ9KYALIXvl2uFP7\n+ 0CqI+VGfossMlSXa1keagraW8qfplz6ffeSJQWO/+zifbfsf0tzUAC72zBuO0qvN\n+ E7rSbqAfpav/o010nKP132gbkb4uOkUfZwCuvZjA8ddsQ4udIBRj0hQlqnPLJOR1\n+ PImrAFC3PW3NgaDEo9QAJBEp5jEJmQghNvEsmzXgABebwLdI9u0VrDz4mSb6TYQC\n+ XTUaSnH3zvwAv8oMx7q7AgMBAAGjggEkMIIBIDAOBgNVHQ8BAf8EBAMCAQYwEgYD\n+ VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUg8dB/Q4mTynBuHmOhnrhv7XXagMw\n+ gdoGA1UdIASB0jCBzzCBzAYKKwYBBAGFNgRkATCBvTAmBggrBgEFBQcCARYaaHR0\n+ cDovL3d3dy5wa2kuY28uc2FwLmNvbS8wgZIGCCsGAQUFBwICMIGFHoGCAEMAZQBy\n+ AHQAaQBmAGkAYwBhAHQAZQAgAFAAbwBsAGkAYwB5ACAAYQBuAGQAIABDAGUAcgB0\n+ AGkAZgBpAGMAYQB0AGkAbwBuACAAUAByAGEAYwB0AGkAYwBlACAAUwB0AGEAdABl\n+ AG0AZQBuAHQAIABvAGYAIABTAEEAUAAgAEEARzANBgkqhkiG9w0BAQsFAAOCAgEA\n+ 0HpCIaC36me6ShB3oHDexA2a3UFcU149nZTABPKT+yUCnCQPzvK/6nJUc5I4xPfv\n+ 2Q8cIlJjPNRoh9vNSF7OZGRmWQOFFrPWeqX5JA7HQPsRVURjJMeYgZWMpy4t1Tof\n+ lF13u6OY6xV6A5kQZIISFj/dOYLT3+O7wME5SItL+YsNh6BToNU0xAZt71Z8JNdY\n+ VJb2xSPMzn6bNXY8ioGzHlVxfEvzMqebV0KY7BTXR3y/Mh+v/RjXGmvZU6L/gnU7\n+ 8mTRPgekYKY8JX2CXTqgfuW6QSnJ+88bHHMhMP7nPwv+YkPcsvCPBSY08ykzFATw\n+ SNoKP1/QFtERVUwrUXt3Cufz9huVysiy23dEyfAglgCCRWA+ZlaaXfieKkUWCJaE\n+ Kw/2Jqz02HDc7uXkFLS1BMYjr3WjShg1a+ulYvrBhNtseRoZT833SStlS/jzZ8Bi\n+ c1dt7UOiIZCGUIODfcZhO8l4mtjh034hdARLF0sUZhkVlosHPml5rlxh+qn8yJiJ\n+ GJ7CUQtNCDBVGksVlwew/+XnesITxrDjUMu+2297at7wjBwCnO93zr1/wsx1e2Um\n+ Xn+IfM6K/pbDar/y6uI9rHlyWu4iJ6cg7DAPJ2CCklw/YHJXhDHGwheO/qSrKtgz\n+ PGHZoN9jcvvvWDLUGtJkEotMgdFpEA2XWR83H4fVFVc=\n+ -----END CERTIFICATE-----\n- path: /etc/ssl/certs/SAPNetCA_G2.pem\nfilesystem: root\nmode: 0644\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node_1.9.go", "new_path": "pkg/templates/node_1.9.go", "diff": "@@ -174,6 +174,47 @@ networkd:\nstorage:\nfiles:\n+ - path: /etc/ssl/certs/SAPGlobalRootCA.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |\n+ -----BEGIN CERTIFICATE-----\n+ MIIGTDCCBDSgAwIBAgIQXQPZPTFhXY9Iizlwx48bmTANBgkqhkiG9w0BAQsFADBO\n+ MQswCQYDVQQGEwJERTERMA8GA1UEBwwIV2FsbGRvcmYxDzANBgNVBAoMBlNBUCBB\n+ RzEbMBkGA1UEAwwSU0FQIEdsb2JhbCBSb290IENBMB4XDTEyMDQyNjE1NDE1NVoX\n+ DTMyMDQyNjE1NDYyN1owTjELMAkGA1UEBhMCREUxETAPBgNVBAcMCFdhbGxkb3Jm\n+ MQ8wDQYDVQQKDAZTQVAgQUcxGzAZBgNVBAMMElNBUCBHbG9iYWwgUm9vdCBDQTCC\n+ AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOrxJKFFA1eTrZg1Ux8ax6n/\n+ LQRHZlgLc2FZpfyAgwvkt71wLkPLiTOaRb3Bd1dyydpKcwJLy0dzGkunzNkPRSFz\n+ bKy2IPS0RS45hUCCPzhGnqQM6TcDYWeWpSUvygqujgb/cAG0mSJpvzAD3SMDQ+VJ\n+ Az5Ryq4IrP7LkfCb63LKZxLsHEkEcNKoGPsSsd4LTwuEIyM3ZHcCoA97m6hvgLWV\n+ GLzLIQMEblkswqX29z7JZH+zJopoqZB6eEogE2YpExkw52PufytEslDY3dyVubjp\n+ GlvD4T03F2zm6CYleMwgWbATLVYvk2I9WfqPAP+ln2IU9DZzegSMTWHCE+jizaiq\n+ b5f5s7m8f+cz7ndHSrz8KD/S9iNdWpuSlknHDrh+3lFTX/uWNBRs5mC/cdejcqS1\n+ v6erflyIfqPWWO6PxhIs49NL9Lix3ou6opJo+m8K757T5uP/rQ9KYALIXvl2uFP7\n+ 0CqI+VGfossMlSXa1keagraW8qfplz6ffeSJQWO/+zifbfsf0tzUAC72zBuO0qvN\n+ E7rSbqAfpav/o010nKP132gbkb4uOkUfZwCuvZjA8ddsQ4udIBRj0hQlqnPLJOR1\n+ PImrAFC3PW3NgaDEo9QAJBEp5jEJmQghNvEsmzXgABebwLdI9u0VrDz4mSb6TYQC\n+ XTUaSnH3zvwAv8oMx7q7AgMBAAGjggEkMIIBIDAOBgNVHQ8BAf8EBAMCAQYwEgYD\n+ VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUg8dB/Q4mTynBuHmOhnrhv7XXagMw\n+ gdoGA1UdIASB0jCBzzCBzAYKKwYBBAGFNgRkATCBvTAmBggrBgEFBQcCARYaaHR0\n+ cDovL3d3dy5wa2kuY28uc2FwLmNvbS8wgZIGCCsGAQUFBwICMIGFHoGCAEMAZQBy\n+ AHQAaQBmAGkAYwBhAHQAZQAgAFAAbwBsAGkAYwB5ACAAYQBuAGQAIABDAGUAcgB0\n+ AGkAZgBpAGMAYQB0AGkAbwBuACAAUAByAGEAYwB0AGkAYwBlACAAUwB0AGEAdABl\n+ AG0AZQBuAHQAIABvAGYAIABTAEEAUAAgAEEARzANBgkqhkiG9w0BAQsFAAOCAgEA\n+ 0HpCIaC36me6ShB3oHDexA2a3UFcU149nZTABPKT+yUCnCQPzvK/6nJUc5I4xPfv\n+ 2Q8cIlJjPNRoh9vNSF7OZGRmWQOFFrPWeqX5JA7HQPsRVURjJMeYgZWMpy4t1Tof\n+ lF13u6OY6xV6A5kQZIISFj/dOYLT3+O7wME5SItL+YsNh6BToNU0xAZt71Z8JNdY\n+ VJb2xSPMzn6bNXY8ioGzHlVxfEvzMqebV0KY7BTXR3y/Mh+v/RjXGmvZU6L/gnU7\n+ 8mTRPgekYKY8JX2CXTqgfuW6QSnJ+88bHHMhMP7nPwv+YkPcsvCPBSY08ykzFATw\n+ SNoKP1/QFtERVUwrUXt3Cufz9huVysiy23dEyfAglgCCRWA+ZlaaXfieKkUWCJaE\n+ Kw/2Jqz02HDc7uXkFLS1BMYjr3WjShg1a+ulYvrBhNtseRoZT833SStlS/jzZ8Bi\n+ c1dt7UOiIZCGUIODfcZhO8l4mtjh034hdARLF0sUZhkVlosHPml5rlxh+qn8yJiJ\n+ GJ7CUQtNCDBVGksVlwew/+XnesITxrDjUMu+2297at7wjBwCnO93zr1/wsx1e2Um\n+ Xn+IfM6K/pbDar/y6uI9rHlyWu4iJ6cg7DAPJ2CCklw/YHJXhDHGwheO/qSrKtgz\n+ PGHZoN9jcvvvWDLUGtJkEotMgdFpEA2XWR83H4fVFVc=\n+ -----END CERTIFICATE-----\n- path: /etc/ssl/certs/SAPNetCA_G2.pem\nfilesystem: root\nmode: 0644\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds global root certificate
596,226
30.10.2018 09:44:42
-3,600
f9a8784592763ace84195f35eefb72530c2227f6
Fix procfile on linux, should work everywhere
[ { "change_type": "MODIFY", "old_path": "Procfile", "new_path": "Procfile", "diff": "-operator: env NODEPOOL_AFFINITY=true bin/darwin/kubernikus operator --auth-username=$KS_USERNAME --auth-domain=$KS_USER_DOMAIN_NAME --auth-password=\"$KS_PASSWORD\" --auth-project=$KS_PROJECT_NAME --auth-project-domain=$KS_PROJECT_DOMAIN_NAME --auth-url=$KS_AUTH_URL --namespace=$KS_NAMESPACE --context=$KS_CONTEXT --kubernikus-domain=$KS_DOMAIN --v=5\n-api: bin/darwin/apiserver --context=$KS_CONTEXT --namespace=$KS_NAMESPACE --auth-url=$KS_AUTH_URL --v=5\n+operator: env NODEPOOL_AFFINITY=true bin/$(go env | grep GOOS | cut -d'\"' -f2)/kubernikus operator --auth-username=$KS_USERNAME --auth-domain=$KS_USER_DOMAIN_NAME --auth-password=\"$KS_PASSWORD\" --auth-project=$KS_PROJECT_NAME --auth-project-domain=$KS_PROJECT_DOMAIN_NAME --auth-url=$KS_AUTH_URL --namespace=$KS_NAMESPACE --context=$KS_CONTEXT --kubernikus-domain=$KS_DOMAIN --v=5\n+api: bin/$(go env | grep GOOS | cut -d'\"' -f2)/apiserver --context=$KS_CONTEXT --namespace=$KS_NAMESPACE --auth-url=$KS_AUTH_URL --v=5\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Fix procfile on linux, should work everywhere
596,226
05.11.2018 14:14:21
-3,600
c9e6ae7c7c5ce038e2c85de97f8995786d5ab4a4
Updates etcd to version 3.3.10, adds probes to deployment
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "new_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "diff": "@@ -40,7 +40,6 @@ metadata:\nlabels:\nchart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\nrelease: {{ .Release.Name }}\n-\nspec:\nreplicas: 1\nselector:\n@@ -79,6 +78,20 @@ spec:\nname: data\n- mountPath: /bootstrap\nname: bootstrap\n+ livenessProbe:\n+ exec:\n+ command:\n+ - /bin/sh\n+ - -ec\n+ - ETCDCTL_API=3 etcdctl get foo\n+ initialDelaySeconds: 5\n+ periodSeconds: 5\n+ readinessProbe:\n+ httpGet:\n+ path: /healthz\n+ port: 8080\n+ initialDelaySeconds: 5\n+ periodSeconds: 10\nresources:\n{{ toYaml .Values.resources | indent 12 }}\n- name: backup\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/values.yaml", "new_path": "charts/kube-master/charts/etcd/values.yaml", "diff": "# Declare variables to be passed into your templates.\nimage:\nrepository: sapcc/etcd\n- tag: 3.1.12\n+ tag: v3.3.10\npullPolicy: IfNotPresent\n## Persist data to a persitent volume\npersistence:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Updates etcd to version 3.3.10, adds probes to deployment (#330)
596,226
05.11.2018 09:42:02
-3,600
4b7863874f7c59d124717798fe51a4d73e0984c1
Set control plane kluster in env vars
[ { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml", "new_path": "ci/pipeline.yaml", "diff": "@@ -31,6 +31,7 @@ auth_e2e_ap-ae-1: &auth_e2e_ap-ae-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-ae-1.admin.cloud.sap\n+ CP_KLUSTER: k-ap-ae-1\n@@ -69,6 +70,7 @@ auth_e2e_ap-au-1: &auth_e2e_ap-au-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-au-1.admin.cloud.sap\n+ CP_KLUSTER: k-ap-au-1\n@@ -103,6 +105,7 @@ auth_e2e_ap-cn-1: &auth_e2e_ap-cn-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-cn-1.admin.cloud.sap\n+ CP_KLUSTER: k-ap-cn-1\n@@ -139,6 +142,7 @@ auth_e2e_ap-jp-1: &auth_e2e_ap-jp-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-jp-1.admin.cloud.sap\n+ CP_KLUSTER: k-ap-jp-1\n@@ -175,6 +179,7 @@ auth_e2e_ap-jp-2: &auth_e2e_ap-jp-2\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-jp-2.admin.cloud.sap\n+ CP_KLUSTER: k-ap-jp-2\n@@ -211,6 +216,7 @@ auth_e2e_ap-sa-1: &auth_e2e_ap-sa-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-ap-sa-1.admin.cloud.sap\n+ CP_KLUSTER: k-ap-sa-1\n@@ -249,6 +255,7 @@ auth_e2e_eu-de-1: &auth_e2e_eu-de-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-eu-de-1.admin.cloud.sap\n+ CP_KLUSTER: k-eu-de-1\n@@ -285,6 +292,7 @@ auth_e2e_eu-de-2: &auth_e2e_eu-de-2\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-eu-de-2.admin.cloud.sap\n+ CP_KLUSTER: k-eu-de-2\n@@ -319,6 +327,7 @@ auth_e2e_eu-nl-1: &auth_e2e_eu-nl-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-eu-nl-1.admin.cloud.sap\n+ CP_KLUSTER: k-eu-nl-1\n@@ -355,6 +364,7 @@ auth_e2e_eu-ru-1: &auth_e2e_eu-ru-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-eu-ru-1.admin.cloud.sap\n+ CP_KLUSTER: k-eu-ru-1\n@@ -391,6 +401,7 @@ auth_e2e_la-br-1: &auth_e2e_la-br-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-la-br-1.admin.cloud.sap\n+ CP_KLUSTER: k-la-br-1\n@@ -427,6 +438,7 @@ auth_e2e_na-ca-1: &auth_e2e_na-ca-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-na-ca-1.admin.cloud.sap\n+ CP_KLUSTER: k-na-ca-1\n@@ -465,6 +477,7 @@ auth_e2e_na-us-1: &auth_e2e_na-us-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-na-us-1.admin.cloud.sap\n+ CP_KLUSTER: k-na-us-1\n@@ -499,6 +512,7 @@ auth_e2e_na-us-3: &auth_e2e_na-us-3\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-na-us-3.admin.cloud.sap\n+ CP_KLUSTER: k-na-us-3\n@@ -535,6 +549,7 @@ auth_e2e_qa-de-1: &auth_e2e_qa-de-1\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-qa-de-1.admin.cloud.sap\n+ CP_KLUSTER: k-qa-de-1\n@@ -574,6 +589,7 @@ auth_e2e_master: &auth_e2e_master\nCP_OS_PROJECT_NAME: kubernikus-master\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-master.admin.cloud.sap\n+ CP_KLUSTER: k-master\n@@ -714,6 +730,10 @@ task_e2e_tests: &task_e2e_tests\nOS_PROJECT_DOMAIN_NAME:\nOS_REGION_NAME:\nSENTRY_DSN:\n+ CP_OS_PROJECT_NAME:\n+ CP_OS_PROJECT_DOMAIN_NAME:\n+ CP_KUBERNIKUS_URL:\n+ CP_KLUSTER:\ntask_helm-admin_kubernikus: &task_helm-admin_kubernikus\n" }, { "change_type": "MODIFY", "old_path": "ci/pipeline.yaml.erb", "new_path": "ci/pipeline.yaml.erb", "diff": "@@ -71,6 +71,7 @@ auth_e2e_<%= region %>: &auth_e2e_<%= region %>\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+ CP_KLUSTER: k-<%= region %>\n<% end %>\n<% if meta[:e2e] == \"manual\" %>\n@@ -87,6 +88,7 @@ auth_e2e_<%= region %>: &auth_e2e_<%= region %>\nCP_OS_PROJECT_NAME: kubernikus\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+ CP_KLUSTER: k-<%= region %>\n<% end %>\n<% if meta[:e2e] == \"virtual\" %>\n@@ -102,6 +104,7 @@ auth_e2e_<%= region %>: &auth_e2e_<%= region %>\nCP_OS_PROJECT_NAME: kubernikus-<%= region %>\nCP_OS_PROJECT_DOMAIN_NAME: ccadmin\nCP_KUBERNIKUS_URL: https://k-<%= region %>.admin.cloud.sap\n+ CP_KLUSTER: k-<%= region %>\n<% end %>\n<% end %>\n" }, { "change_type": "MODIFY", "old_path": "ci/task_e2e_tests.yaml", "new_path": "ci/task_e2e_tests.yaml", "diff": "@@ -44,3 +44,7 @@ params:\nOS_PROJECT_DOMAIN_NAME:\nOS_REGION_NAME:\nSENTRY_DSN:\n+ CP_OS_PROJECT_NAME:\n+ CP_OS_PROJECT_DOMAIN_NAME:\n+ CP_KUBERNIKUS_URL:\n+ CP_KLUSTER:\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/main_test.go", "new_path": "test/e2e/main_test.go", "diff": "@@ -97,6 +97,9 @@ func TestRunner(t *testing.T) {\nfmt.Printf(\"Kluster Name: %v\\n\", klusterName)\nfmt.Printf(\"Reuse: %v\\n\", *reuse)\nfmt.Printf(\"Cleanup: %v\\n\", *cleanup)\n+ if os.Getenv(\"CP_KUBERNIKUS_URL\") != \"\" {\n+ fmt.Printf(\"CP Kluster Name: %v\\n\", os.Getenv(\"CP_KLUSTER\"))\n+ }\nfmt.Printf(\"\\n\\n\")\nauthOptions := &tokens.AuthOptions{\n@@ -173,8 +176,7 @@ func TestRunner(t *testing.T) {\nt.Run(\"Smoke\", func(t *testing.T) {\nif os.Getenv(\"CP_KUBERNIKUS_URL\") != \"\" {\n- // TODO: get k8s cp namespace from env\n- kubernetesControlPlane, err := framework.NewKubernetesFramework(kubernikusControlPlane, \"k-master\")\n+ kubernetesControlPlane, err := framework.NewKubernetesFramework(kubernikusControlPlane, os.Getenv(\"CP_KLUSTER\"))\nrequire.NoError(t, err, \"Must be able to create a control plane kubernetes client\")\nnamespace := \"kubernikus\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Set control plane kluster in env vars
596,240
13.11.2018 15:49:07
-3,600
4c09f83d554a26404537d38e16217232b1924290
plan once and execute
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -51,7 +51,8 @@ plan:\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n- -parallelism=1\n+ -parallelism=1 \\\n+ -out terraform.tfplan\napply:\n@$(TERRAFORM_BIN) apply \\\n@@ -64,4 +65,5 @@ apply:\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n-var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n- -parallelism=1\n+ -parallelism=1 \\\n+ terraform.tfplan\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
plan once and execute
596,240
13.11.2018 15:49:54
-3,600
4d42d44b13ec379adbf9a30a187d378c422d9077
pull in updated ccloud provider
[ { "change_type": "MODIFY", "old_path": "contrib/kubernikus-terraform/Makefile", "new_path": "contrib/kubernikus-terraform/Makefile", "diff": "@@ -4,8 +4,8 @@ DATE := $(shell date +%Y%m%d%H%M%S)\nVERSION ?= v$(DATE)\nTERRAFORM_VERSION := 0.11.7-r0\n-TERRAFORM_PROVIDER_OPENSTACK_VERSION := 8a72ef5435ea15092061fc8acad756a9e87bf93a\n-TERRAFORM_PROVIDER_CCLOUD_VERSION := 59c10debdb5576da5b8e2dcd66a815c376e68e08\n+TERRAFORM_PROVIDER_OPENSTACK_VERSION := b2c32b19aeb26e10a1d1a1d331f89ba2fdc4ef53\n+TERRAFORM_PROVIDER_CCLOUD_VERSION := e44c79529e8b390a42c517b97e6ace5b135170fa\n.PHONY: all\nall: build push\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
pull in updated ccloud provider
596,240
13.11.2018 15:57:11
-3,600
557a67f5284dd96b7393e757b8f1b4753a94f8ba
apparently variables are included in the plan
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -64,6 +64,5 @@ apply:\n-var password=$(TF_PASSWORD) \\\n-var domain_name=$(TF_PROJECT_DOMAIN) \\\n-var tenant_name=$(TF_PROJECT) \\\n- -var-file=\"${TF_VARS_DIR}/kubernikus.tfvars\" \\\n-parallelism=1 \\\nterraform.tfplan\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
apparently variables are included in the plan
596,240
13.11.2018 16:02:58
-3,600
60e0284a25e2804b3c8a199a4d1a3f6bc73485ea
apparently all variables are included in the plan
[ { "change_type": "MODIFY", "old_path": "terraform/Makefile", "new_path": "terraform/Makefile", "diff": "@@ -58,11 +58,5 @@ apply:\n@$(TERRAFORM_BIN) apply \\\n-input=false \\\n-auto-approve \\\n- -var region=$(TF_REGION) \\\n- -var user_name=$(TF_USER) \\\n- -var user_domain_name=$(TF_USER_DOMAIN) \\\n- -var password=$(TF_PASSWORD) \\\n- -var domain_name=$(TF_PROJECT_DOMAIN) \\\n- -var tenant_name=$(TF_PROJECT) \\\n-parallelism=1 \\\nterraform.tfplan\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
apparently all variables are included in the plan
596,226
20.11.2018 12:59:29
-3,600
a910f296e3c1900d76fccdfdcbe317d643735294
Jump to etcdbrctl 0.3.1, raise deployment resource limits
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "new_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "diff": "@@ -84,13 +84,13 @@ spec:\n- /bin/sh\n- -ec\n- ETCDCTL_API=3 etcdctl get foo\n- initialDelaySeconds: 5\n+ initialDelaySeconds: 15\nperiodSeconds: 5\nreadinessProbe:\nhttpGet:\npath: /healthz\nport: 8080\n- initialDelaySeconds: 5\n+ initialDelaySeconds: 15\nperiodSeconds: 10\nresources:\n{{ toYaml .Values.resources | indent 12 }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/values.yaml", "new_path": "charts/kube-master/charts/etcd/values.yaml", "diff": "@@ -14,15 +14,15 @@ persistence:\n# existingClaim: claimName\nresources:\nrequests:\n- cpu: 100m\n- memory: 256Mi\n+ cpu: 200m\n+ memory: 500Mi\nlimits:\n- cpu: 500m\n- memory: 512Mi\n+ cpu: 750m\n+ memory: 2560Mi\nbackup:\nimage:\nrepository: sapcc/etcdbrctl\n- tag: 0.3.0\n+ tag: 0.3.1\npullPolicy: IfNotPresent\nconfig:\n# do a full-backup every hour\n@@ -38,7 +38,7 @@ backup:\nresources:\nrequests:\ncpu: 100m\n- memory: 256Mi\n+ memory: 128Mi\nlimits:\n- cpu: 500m\n- memory: 512Mi\n+ cpu: 300m\n+ memory: 1Gi\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Jump to etcdbrctl 0.3.1, raise deployment resource limits
596,226
16.11.2018 11:15:17
-3,600
8b5431f788a110dcfad6676664458c228036ab80
Fix pyrolisis test, wait for objects are actually deleted
[ { "change_type": "MODIFY", "old_path": "test/e2e/pyrolisis_test.go", "new_path": "test/e2e/pyrolisis_test.go", "diff": "@@ -4,6 +4,7 @@ import (\n\"fmt\"\n\"strings\"\n\"testing\"\n+ \"time\"\n\"github.com/gophercloud/gophercloud\"\n\"github.com/gophercloud/gophercloud/openstack\"\n@@ -11,12 +12,18 @@ import (\n\"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects\"\n\"github.com/stretchr/testify/assert\"\n\"github.com/stretchr/testify/require\"\n+ \"k8s.io/apimachinery/pkg/util/wait\"\n\"github.com/sapcc/kubernikus/pkg/api/client/operations\"\netcd_util \"github.com/sapcc/kubernikus/pkg/util/etcd\"\n\"github.com/sapcc/kubernikus/test/e2e/framework\"\n)\n+const (\n+ CleanupBackupContainerDeleteInterval = 1 * time.Second\n+ CleanupBackupContainerDeleteTimeout = 1 * time.Minute\n+)\n+\ntype PyrolisisTests struct {\nKubernikus *framework.Kubernikus\nOpenStack *framework.OpenStack\n@@ -91,7 +98,11 @@ func (p *PyrolisisTests) CleanupBackupStorageContainers(t *testing.T) {\nrequire.NoError(t, err, \"There should be no error while deleting object %s/%s\", container, object)\n}\n+ wait.PollImmediate(CleanupBackupContainerDeleteInterval, CleanupBackupContainerDeleteTimeout,\n+ func() (bool, error) {\n_, err = containers.Delete(storageClient, container).Extract()\n+ return (err == nil), nil\n+ })\nrequire.NoError(t, err, \"There should be no error while deleting storage container: %s\", container)\n}\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Fix pyrolisis test, wait for objects are actually deleted
596,226
16.11.2018 16:01:17
-3,600
aebc1a4dc1559fe4e23b1f7ccd587a45ea209768
Check for status code 409 in pyrolisis test
[ { "change_type": "MODIFY", "old_path": "test/e2e/pyrolisis_test.go", "new_path": "test/e2e/pyrolisis_test.go", "diff": "@@ -98,10 +98,13 @@ func (p *PyrolisisTests) CleanupBackupStorageContainers(t *testing.T) {\nrequire.NoError(t, err, \"There should be no error while deleting object %s/%s\", container, object)\n}\n- wait.PollImmediate(CleanupBackupContainerDeleteInterval, CleanupBackupContainerDeleteTimeout,\n+ err = wait.PollImmediate(CleanupBackupContainerDeleteInterval, CleanupBackupContainerDeleteTimeout,\nfunc() (bool, error) {\n- _, err = containers.Delete(storageClient, container).Extract()\n- return (err == nil), nil\n+ _, err := containers.Delete(storageClient, container).Extract()\n+ if errResponseCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok && errResponseCode.Actual == 409 {\n+ return false, nil\n+ }\n+ return true, err\n})\nrequire.NoError(t, err, \"There should be no error while deleting storage container: %s\", container)\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Check for status code 409 in pyrolisis test
596,226
20.11.2018 15:30:59
-3,600
9158f5b59b874da056615efe5b9470b7dda4bad1
Move etcd data to subfolder
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "new_path": "charts/kube-master/charts/etcd/templates/deployment.yaml", "diff": "@@ -8,6 +8,12 @@ metadata:\ndata:\nbootstrap.sh: |-\n#!/bin/sh\n+ if [ ! -d /var/lib/etcd/new.etcd ]; then\n+ mkdir /var/lib/etcd/new.etcd\n+ fi\n+ if [ -d /var/lib/etcd/member ]; then\n+ mv /var/lib/etcd/member /var/lib/etcd/new.etcd/member\n+ fi\nwhile true;\ndo\nwget http://localhost:8080/initialization/status -S -O status;\n@@ -23,7 +29,7 @@ data:\n\"Successful\")\nexec etcd \\\n--name=kubernikus \\\n- --data-dir=/var/lib/etcd \\\n+ --data-dir=/var/lib/etcd/new.etcd \\\n--advertise-client-urls=http://${ETCD_IP}:2379 \\\n--initial-advertise-peer-urls=http://${ETCD_IP}:2380 \\\n--initial-cluster=kubernikus=http://${ETCD_IP}:2380 \\\n@@ -100,7 +106,7 @@ spec:\n- server\n- --schedule={{ .Values.backup.config.schedule }}\n- --max-backups={{ .Values.backup.config.maxBackups }}\n- - --data-dir=/var/lib/etcd\n+ - --data-dir=/var/lib/etcd/new.etcd\n- --insecure-transport=true\n- --storage-provider=Swift\n- --delta-snapshot-period-seconds={{ .Values.backup.config.deltaSnapshotPeriod }}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/etcdbr_test.go", "new_path": "test/e2e/etcdbr_test.go", "diff": "@@ -17,7 +17,7 @@ const (\nEtcdFailTimeout = 60 * time.Second\nEtcdRestorePollInterval = 2 * time.Second\nEtcdRestoreTimeout = 60 * time.Second\n- EtcdDataDir = \"/var/lib/etcd\"\n+ EtcdDataDir = \"/var/lib/etcd/new.etcd\"\n)\ntype EtcdBackupTests struct {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Move etcd data to subfolder
596,226
26.11.2018 11:08:58
-3,600
ef1522e5ba571da7995e7ee982c6ef29991b012b
Cleanup volumes in e2e test pyrolisis
[ { "change_type": "MODIFY", "old_path": "deps/deps.go", "new_path": "deps/deps.go", "diff": "@@ -5,6 +5,7 @@ package extra_dependencies\nimport (\n_ \"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets\"\n+ _ \"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes\"\n_ \"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets\"\n_ \"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects\"\n_ \"github.com/stretchr/testify/assert\"\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/pyrolisis_test.go", "new_path": "test/e2e/pyrolisis_test.go", "diff": "@@ -8,6 +8,8 @@ import (\n\"github.com/gophercloud/gophercloud\"\n\"github.com/gophercloud/gophercloud/openstack\"\n+ \"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes\"\n+ \"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens\"\n\"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers\"\n\"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects\"\n\"github.com/stretchr/testify/assert\"\n@@ -38,10 +40,12 @@ func (p *PyrolisisTests) Run(t *testing.T) {\nt.Run(\"Wait\", func(t *testing.T) {\nt.Run(\"Klusters\", p.WaitForE2EKlustersTerminated)\n})\n- }\n- cleanup := t.Run(\"CleanupBackupStorageContainers\", p.CleanupBackupStorageContainers)\n- require.True(t, cleanup, \"Etcd backup storage container cleanup failed\")\n+ cleanupStorageContainer := t.Run(\"CleanupBackupStorageContainers\", p.CleanupBackupStorageContainers)\n+ require.True(t, cleanupStorageContainer, \"Etcd backup storage container cleanup failed\")\n+\n+ t.Run(\"CleanupVolumes\", p.CleanupVolumes)\n+ }\n}\nfunc (p *PyrolisisTests) SettingKlustersOnFire(t *testing.T) {\n@@ -110,3 +114,29 @@ func (p *PyrolisisTests) CleanupBackupStorageContainers(t *testing.T) {\n}\n}\n}\n+\n+func (p *PyrolisisTests) CleanupVolumes(t *testing.T) {\n+ storageClient, err := openstack.NewBlockStorageV3(p.OpenStack.Provider, gophercloud.EndpointOpts{})\n+ require.NoError(t, err, \"Could not create block storage client\")\n+\n+ project, err := tokens.Get(p.OpenStack.Identity, p.OpenStack.Provider.Token()).ExtractProject()\n+ require.NoError(t, err, \"There should be no error while extracting the project\")\n+\n+ volumeListOpts := volumes.ListOpts{\n+ TenantID: project.ID,\n+ }\n+\n+ allPages, err := volumes.List(storageClient, volumeListOpts).AllPages()\n+ require.NoError(t, err, \"There should be no error while retrieving volume pages\")\n+\n+ allVolumes, err := volumes.ExtractVolumes(allPages)\n+ require.NoError(t, err, \"There should be no error while extracting volumes\")\n+\n+ for _, vol := range allVolumes {\n+ if strings.HasPrefix(vol.Name, \"kubernetes-dynamic-pvc-\") &&\n+ strings.HasPrefix(vol.Metadata[\"kubernetes.io/created-for/pvc/namespace\"], \"e2e-volumes-\") {\n+ err := volumes.Delete(storageClient, vol.ID).ExtractErr()\n+ require.NoError(t, err, \"There should be no error while deleting volume %s (%s)\", vol.Name, vol.ID)\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go", "diff": "+// Package volumes provides information and interaction with volumes in the\n+// OpenStack Block Storage service. A volume is a detachable block storage\n+// device, akin to a USB hard drive. It can only be attached to one instance at\n+// a time.\n+package volumes\n" }, { "change_type": "ADD", "old_path": null, "new_path": "vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go", "diff": "+package volumes\n+\n+import (\n+ \"github.com/gophercloud/gophercloud\"\n+ \"github.com/gophercloud/gophercloud/pagination\"\n+)\n+\n+// CreateOptsBuilder allows extensions to add additional parameters to the\n+// Create request.\n+type CreateOptsBuilder interface {\n+ ToVolumeCreateMap() (map[string]interface{}, error)\n+}\n+\n+// CreateOpts contains options for creating a Volume. This object is passed to\n+// the volumes.Create function. For more information about these parameters,\n+// see the Volume object.\n+type CreateOpts struct {\n+ // The size of the volume, in GB\n+ Size int `json:\"size\" required:\"true\"`\n+ // The availability zone\n+ AvailabilityZone string `json:\"availability_zone,omitempty\"`\n+ // ConsistencyGroupID is the ID of a consistency group\n+ ConsistencyGroupID string `json:\"consistencygroup_id,omitempty\"`\n+ // The volume description\n+ Description string `json:\"description,omitempty\"`\n+ // One or more metadata key and value pairs to associate with the volume\n+ Metadata map[string]string `json:\"metadata,omitempty\"`\n+ // The volume name\n+ Name string `json:\"name,omitempty\"`\n+ // the ID of the existing volume snapshot\n+ SnapshotID string `json:\"snapshot_id,omitempty\"`\n+ // SourceReplica is a UUID of an existing volume to replicate with\n+ SourceReplica string `json:\"source_replica,omitempty\"`\n+ // the ID of the existing volume\n+ SourceVolID string `json:\"source_volid,omitempty\"`\n+ // The ID of the image from which you want to create the volume.\n+ // Required to create a bootable volume.\n+ ImageID string `json:\"imageRef,omitempty\"`\n+ // The associated volume type\n+ VolumeType string `json:\"volume_type,omitempty\"`\n+}\n+\n+// ToVolumeCreateMap assembles a request body based on the contents of a\n+// CreateOpts.\n+func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) {\n+ return gophercloud.BuildRequestBody(opts, \"volume\")\n+}\n+\n+// Create will create a new Volume based on the values in CreateOpts. To extract\n+// the Volume object from the response, call the Extract method on the\n+// CreateResult.\n+func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n+ b, err := opts.ToVolumeCreateMap()\n+ if err != nil {\n+ r.Err = err\n+ return\n+ }\n+ _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n+ OkCodes: []int{202},\n+ })\n+ return\n+}\n+\n+// Delete will delete the existing Volume with the provided ID.\n+func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) {\n+ _, r.Err = client.Delete(deleteURL(client, id), nil)\n+ return\n+}\n+\n+// Get retrieves the Volume with the provided ID. To extract the Volume object\n+// from the response, call the Extract method on the GetResult.\n+func Get(client *gophercloud.ServiceClient, id string) (r GetResult) {\n+ _, r.Err = client.Get(getURL(client, id), &r.Body, nil)\n+ return\n+}\n+\n+// ListOptsBuilder allows extensions to add additional parameters to the List\n+// request.\n+type ListOptsBuilder interface {\n+ ToVolumeListQuery() (string, error)\n+}\n+\n+// ListOpts holds options for listing Volumes. It is passed to the volumes.List\n+// function.\n+type ListOpts struct {\n+ // AllTenants will retrieve volumes of all tenants/projects.\n+ AllTenants bool `q:\"all_tenants\"`\n+\n+ // Metadata will filter results based on specified metadata.\n+ Metadata map[string]string `q:\"metadata\"`\n+\n+ // Name will filter by the specified volume name.\n+ Name string `q:\"name\"`\n+\n+ // Status will filter by the specified status.\n+ Status string `q:\"status\"`\n+\n+ // TenantID will filter by a specific tenant/project ID.\n+ // Setting AllTenants is required for this.\n+ TenantID string `q:\"project_id\"`\n+\n+ // Comma-separated list of sort keys and optional sort directions in the\n+ // form of <key>[:<direction>].\n+ Sort string `q:\"sort\"`\n+\n+ // Requests a page size of items.\n+ Limit int `q:\"limit\"`\n+\n+ // Used in conjunction with limit to return a slice of items.\n+ Offset int `q:\"offset\"`\n+\n+ // The ID of the last-seen item.\n+ Marker string `q:\"marker\"`\n+}\n+\n+// ToVolumeListQuery formats a ListOpts into a query string.\n+func (opts ListOpts) ToVolumeListQuery() (string, error) {\n+ q, err := gophercloud.BuildQueryString(opts)\n+ return q.String(), err\n+}\n+\n+// List returns Volumes optionally limited by the conditions provided in ListOpts.\n+func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {\n+ url := listURL(client)\n+ if opts != nil {\n+ query, err := opts.ToVolumeListQuery()\n+ if err != nil {\n+ return pagination.Pager{Err: err}\n+ }\n+ url += query\n+ }\n+\n+ return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {\n+ return VolumePage{pagination.LinkedPageBase{PageResult: r}}\n+ })\n+}\n+\n+// UpdateOptsBuilder allows extensions to add additional parameters to the\n+// Update request.\n+type UpdateOptsBuilder interface {\n+ ToVolumeUpdateMap() (map[string]interface{}, error)\n+}\n+\n+// UpdateOpts contain options for updating an existing Volume. This object is passed\n+// to the volumes.Update function. For more information about the parameters, see\n+// the Volume object.\n+type UpdateOpts struct {\n+ Name string `json:\"name,omitempty\"`\n+ Description string `json:\"description,omitempty\"`\n+ Metadata map[string]string `json:\"metadata,omitempty\"`\n+}\n+\n+// ToVolumeUpdateMap assembles a request body based on the contents of an\n+// UpdateOpts.\n+func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) {\n+ return gophercloud.BuildRequestBody(opts, \"volume\")\n+}\n+\n+// Update will update the Volume with provided information. To extract the updated\n+// Volume from the response, call the Extract method on the UpdateResult.\n+func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {\n+ b, err := opts.ToVolumeUpdateMap()\n+ if err != nil {\n+ r.Err = err\n+ return\n+ }\n+ _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{\n+ OkCodes: []int{200},\n+ })\n+ return\n+}\n+\n+// IDFromName is a convienience function that returns a server's ID given its name.\n+func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) {\n+ count := 0\n+ id := \"\"\n+ pages, err := List(client, nil).AllPages()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ all, err := ExtractVolumes(pages)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ for _, s := range all {\n+ if s.Name == name {\n+ count++\n+ id = s.ID\n+ }\n+ }\n+\n+ switch count {\n+ case 0:\n+ return \"\", gophercloud.ErrResourceNotFound{Name: name, ResourceType: \"volume\"}\n+ case 1:\n+ return id, nil\n+ default:\n+ return \"\", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: \"volume\"}\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go", "diff": "+package volumes\n+\n+import (\n+ \"encoding/json\"\n+ \"time\"\n+\n+ \"github.com/gophercloud/gophercloud\"\n+ \"github.com/gophercloud/gophercloud/pagination\"\n+)\n+\n+// Attachment represents a Volume Attachment record\n+type Attachment struct {\n+ AttachedAt time.Time `json:\"-\"`\n+ AttachmentID string `json:\"attachment_id\"`\n+ Device string `json:\"device\"`\n+ HostName string `json:\"host_name\"`\n+ ID string `json:\"id\"`\n+ ServerID string `json:\"server_id\"`\n+ VolumeID string `json:\"volume_id\"`\n+}\n+\n+// UnmarshalJSON is our unmarshalling helper\n+func (r *Attachment) UnmarshalJSON(b []byte) error {\n+ type tmp Attachment\n+ var s struct {\n+ tmp\n+ AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:\"attached_at\"`\n+ }\n+ err := json.Unmarshal(b, &s)\n+ if err != nil {\n+ return err\n+ }\n+ *r = Attachment(s.tmp)\n+\n+ r.AttachedAt = time.Time(s.AttachedAt)\n+\n+ return err\n+}\n+\n+// Volume contains all the information associated with an OpenStack Volume.\n+type Volume struct {\n+ // Unique identifier for the volume.\n+ ID string `json:\"id\"`\n+ // Current status of the volume.\n+ Status string `json:\"status\"`\n+ // Size of the volume in GB.\n+ Size int `json:\"size\"`\n+ // AvailabilityZone is which availability zone the volume is in.\n+ AvailabilityZone string `json:\"availability_zone\"`\n+ // The date when this volume was created.\n+ CreatedAt time.Time `json:\"-\"`\n+ // The date when this volume was last updated\n+ UpdatedAt time.Time `json:\"-\"`\n+ // Instances onto which the volume is attached.\n+ Attachments []Attachment `json:\"attachments\"`\n+ // Human-readable display name for the volume.\n+ Name string `json:\"name\"`\n+ // Human-readable description for the volume.\n+ Description string `json:\"description\"`\n+ // The type of volume to create, either SATA or SSD.\n+ VolumeType string `json:\"volume_type\"`\n+ // The ID of the snapshot from which the volume was created\n+ SnapshotID string `json:\"snapshot_id\"`\n+ // The ID of another block storage volume from which the current volume was created\n+ SourceVolID string `json:\"source_volid\"`\n+ // Arbitrary key-value pairs defined by the user.\n+ Metadata map[string]string `json:\"metadata\"`\n+ // UserID is the id of the user who created the volume.\n+ UserID string `json:\"user_id\"`\n+ // Indicates whether this is a bootable volume.\n+ Bootable string `json:\"bootable\"`\n+ // Encrypted denotes if the volume is encrypted.\n+ Encrypted bool `json:\"encrypted\"`\n+ // ReplicationStatus is the status of replication.\n+ ReplicationStatus string `json:\"replication_status\"`\n+ // ConsistencyGroupID is the consistency group ID.\n+ ConsistencyGroupID string `json:\"consistencygroup_id\"`\n+ // Multiattach denotes if the volume is multi-attach capable.\n+ Multiattach bool `json:\"multiattach\"`\n+}\n+\n+// UnmarshalJSON another unmarshalling function\n+func (r *Volume) UnmarshalJSON(b []byte) error {\n+ type tmp Volume\n+ var s struct {\n+ tmp\n+ CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:\"created_at\"`\n+ UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:\"updated_at\"`\n+ }\n+ err := json.Unmarshal(b, &s)\n+ if err != nil {\n+ return err\n+ }\n+ *r = Volume(s.tmp)\n+\n+ r.CreatedAt = time.Time(s.CreatedAt)\n+ r.UpdatedAt = time.Time(s.UpdatedAt)\n+\n+ return err\n+}\n+\n+// VolumePage is a pagination.pager that is returned from a call to the List function.\n+type VolumePage struct {\n+ pagination.LinkedPageBase\n+}\n+\n+// IsEmpty returns true if a ListResult contains no Volumes.\n+func (r VolumePage) IsEmpty() (bool, error) {\n+ volumes, err := ExtractVolumes(r)\n+ return len(volumes) == 0, err\n+}\n+\n+func (page VolumePage) NextPageURL() (string, error) {\n+ var s struct {\n+ Links []gophercloud.Link `json:\"volumes_links\"`\n+ }\n+ err := page.ExtractInto(&s)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ return gophercloud.ExtractNextURL(s.Links)\n+}\n+\n+// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call.\n+func ExtractVolumes(r pagination.Page) ([]Volume, error) {\n+ var s []Volume\n+ err := ExtractVolumesInto(r, &s)\n+ return s, err\n+}\n+\n+type commonResult struct {\n+ gophercloud.Result\n+}\n+\n+// Extract will get the Volume object out of the commonResult object.\n+func (r commonResult) Extract() (*Volume, error) {\n+ var s Volume\n+ err := r.ExtractInto(&s)\n+ return &s, err\n+}\n+\n+// ExtractInto converts our response data into a volume struct\n+func (r commonResult) ExtractInto(v interface{}) error {\n+ return r.Result.ExtractIntoStructPtr(v, \"volume\")\n+}\n+\n+// ExtractVolumesInto similar to ExtractInto but operates on a `list` of volumes\n+func ExtractVolumesInto(r pagination.Page, v interface{}) error {\n+ return r.(VolumePage).Result.ExtractIntoSlicePtr(v, \"volumes\")\n+}\n+\n+// CreateResult contains the response body and error from a Create request.\n+type CreateResult struct {\n+ commonResult\n+}\n+\n+// GetResult contains the response body and error from a Get request.\n+type GetResult struct {\n+ commonResult\n+}\n+\n+// UpdateResult contains the response body and error from an Update request.\n+type UpdateResult struct {\n+ commonResult\n+}\n+\n+// DeleteResult contains the response body and error from a Delete request.\n+type DeleteResult struct {\n+ gophercloud.ErrResult\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go", "diff": "+package volumes\n+\n+import \"github.com/gophercloud/gophercloud\"\n+\n+func createURL(c *gophercloud.ServiceClient) string {\n+ return c.ServiceURL(\"volumes\")\n+}\n+\n+func listURL(c *gophercloud.ServiceClient) string {\n+ return c.ServiceURL(\"volumes\", \"detail\")\n+}\n+\n+func deleteURL(c *gophercloud.ServiceClient, id string) string {\n+ return c.ServiceURL(\"volumes\", id)\n+}\n+\n+func getURL(c *gophercloud.ServiceClient, id string) string {\n+ return deleteURL(c, id)\n+}\n+\n+func updateURL(c *gophercloud.ServiceClient, id string) string {\n+ return deleteURL(c, id)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go", "diff": "+package volumes\n+\n+import (\n+ \"github.com/gophercloud/gophercloud\"\n+)\n+\n+// WaitForStatus will continually poll the resource, checking for a particular\n+// status. It will do this for the amount of seconds defined.\n+func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {\n+ return gophercloud.WaitFor(secs, func() (bool, error) {\n+ current, err := Get(c, id).Extract()\n+ if err != nil {\n+ return false, err\n+ }\n+\n+ if current.Status == status {\n+ return true, nil\n+ }\n+\n+ return false, nil\n+ })\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Cleanup volumes in e2e test pyrolisis
596,226
26.11.2018 14:38:46
-3,600
ebf8749c1b6df6599120675f00466b54659058b4
Fix etcd backup e2e test
[ { "change_type": "MODIFY", "old_path": "test/e2e/etcdbr_test.go", "new_path": "test/e2e/etcdbr_test.go", "diff": "@@ -5,7 +5,7 @@ import (\n\"testing\"\n\"time\"\n- \"github.com/stretchr/testify/assert\"\n+ \"github.com/stretchr/testify/require\"\nmeta_v1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n@@ -14,15 +14,15 @@ import (\nconst (\nEtcdFailPollInterval = 1 * time.Second\n- EtcdFailTimeout = 60 * time.Second\n- EtcdRestorePollInterval = 2 * time.Second\n- EtcdRestoreTimeout = 60 * time.Second\n+ EtcdFailTimeout = 90 * time.Second\n+ EtcdRestorePollInterval = 1 * time.Second\n+ EtcdRestoreTimeout = 90 * time.Second\nEtcdDataDir = \"/var/lib/etcd/new.etcd\"\n)\ntype EtcdBackupTests struct {\n- KubernikusControlPlane *framework.Kubernikus\nKubernetesControlPlane *framework.Kubernetes\n+ Kubernetes *framework.Kubernetes\nFullKlusterName string\nNamespace string\n}\n@@ -32,25 +32,30 @@ func (e *EtcdBackupTests) Run(t *testing.T) {\n}\nfunc (e *EtcdBackupTests) WaitForBackupRestore(t *testing.T) {\n- UID, err := e.getServiceAccountUID(\"default\")\n- assert.NoError(t, err, \"Error retrieving secret: %s\", err)\n- assert.NotEmpty(t, UID, \"ServiceAccount UID is empty\")\n+ err := e.Kubernetes.WaitForDefaultServiceAccountInNamespace(\"default\")\n+ require.NoError(t, err, \"There must be no error while waiting for the namespace\")\n+\n+ UID, err := e.getServiceAccountUID(\"default\", \"default\")\n+ require.NoError(t, err, \"Error retrieving default secret\")\n+ require.NotEmpty(t, UID, \"ServiceAccount UID should not be empty\")\nopts := meta_v1.ListOptions{\nLabelSelector: fmt.Sprintf(\"app=%s-etcd\", e.FullKlusterName),\n}\npods, err := e.KubernetesControlPlane.ClientSet.CoreV1().Pods(e.Namespace).List(opts)\n- assert.NoError(t, err, \"Error retrieving etcd pod: %s\", err)\n- assert.EqualValues(t, 1, len(pods.Items), \"There should be exactly one etcd pod, %d found\", len(pods.Items))\n+ require.NoError(t, err, \"Error retrieving etcd pod: %s\", err)\n+ require.EqualValues(t, 1, len(pods.Items), \"There should be exactly one etcd pod, %d found\", len(pods.Items))\npodName := pods.Items[0].GetName()\n+ require.NotEmpty(t, podName, \"Podname should not be empty\")\npod, err := e.KubernetesControlPlane.ClientSet.CoreV1().Pods(e.Namespace).Get(podName, meta_v1.GetOptions{})\n- assert.NoError(t, err, \"Error retrieving resource version\")\n+ require.NoError(t, err, \"Error retrieving resource version\")\nrv := pod.GetResourceVersion()\n+ require.NotEmpty(t, rv, \"ResourceVersion should not be empty\")\ncmd := fmt.Sprintf(\"rm -rf %s/*\", EtcdDataDir)\n_, _, err = e.KubernetesControlPlane.ExecCommandInContainerWithFullOutput(e.Namespace, podName, \"backup\", \"/bin/sh\", \"-c\", cmd)\n- assert.NoError(t, err, \"Deletion of etcd data failed: %s\", err)\n+ require.NoError(t, err, \"Deletion of etcd data failed: %s\", err)\nnewRv := string(rv)\nwait.PollImmediate(EtcdFailPollInterval, EtcdFailTimeout,\n@@ -59,20 +64,19 @@ func (e *EtcdBackupTests) WaitForBackupRestore(t *testing.T) {\nnewRv = pod.GetResourceVersion()\nreturn (newRv != rv), nil\n})\n- assert.NotEqual(t, rv, newRv, \"Etcd is still up, can't test recovery\")\n+ require.NotEqual(t, rv, newRv, \"Etcd is still up, can't test recovery\")\nvar newUID string\nwait.PollImmediate(EtcdRestorePollInterval, EtcdRestoreTimeout,\nfunc() (bool, error) {\n- newUID, _ = e.getServiceAccountUID(\"default\")\n+ newUID, _ = e.getServiceAccountUID(\"default\", \"default\")\nreturn (UID == newUID), nil\n})\n-\n- assert.EqualValues(t, UID, newUID, \"Recovery of etcd backup failed\")\n+ require.EqualValues(t, UID, newUID, \"Recovery of etcd backup failed\")\n}\n-func (e *EtcdBackupTests) getServiceAccountUID(serviceAccountName string) (string, error) {\n- serviceAccount, err := e.KubernetesControlPlane.ClientSet.CoreV1().ServiceAccounts(e.Namespace).Get(serviceAccountName, meta_v1.GetOptions{})\n+func (e *EtcdBackupTests) getServiceAccountUID(namespace, serviceAccountName string) (string, error) {\n+ serviceAccount, err := e.Kubernetes.ClientSet.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, meta_v1.GetOptions{})\nif err != nil {\nreturn \"\", err\n}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/main_test.go", "new_path": "test/e2e/main_test.go", "diff": "@@ -185,8 +185,8 @@ func TestRunner(t *testing.T) {\n}\netcdBackupTests := &EtcdBackupTests{\n- KubernikusControlPlane: kubernikusControlPlane,\nKubernetesControlPlane: kubernetesControlPlane,\n+ Kubernetes: kubernetes,\nFullKlusterName: fullKlusterName,\nNamespace: namespace,\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Fix etcd backup e2e test
596,226
27.11.2018 10:34:45
-3,600
7046560978ad014257ac32a35de4752ee9375ebc
Cleanup e2e test instances in pyrolisis
[ { "change_type": "MODIFY", "old_path": "deps/deps.go", "new_path": "deps/deps.go", "diff": "@@ -7,6 +7,7 @@ import (\n_ \"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets\"\n_ \"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes\"\n_ \"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets\"\n+ _ \"github.com/gophercloud/gophercloud/openstack/compute/v2/servers\"\n_ \"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects\"\n_ \"github.com/stretchr/testify/assert\"\n_ \"github.com/stretchr/testify/mock\"\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/pyrolisis_test.go", "new_path": "test/e2e/pyrolisis_test.go", "diff": "@@ -9,6 +9,7 @@ import (\n\"github.com/gophercloud/gophercloud\"\n\"github.com/gophercloud/gophercloud/openstack\"\n\"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes\"\n+ \"github.com/gophercloud/gophercloud/openstack/compute/v2/servers\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens\"\n\"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers\"\n\"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects\"\n@@ -45,6 +46,7 @@ func (p *PyrolisisTests) Run(t *testing.T) {\nrequire.True(t, cleanupStorageContainer, \"Etcd backup storage container cleanup failed\")\nt.Run(\"CleanupVolumes\", p.CleanupVolumes)\n+ t.Run(\"CleanupInstances\", p.CleanupInstances)\n}\n}\n@@ -140,3 +142,27 @@ func (p *PyrolisisTests) CleanupVolumes(t *testing.T) {\n}\n}\n}\n+\n+func (p *PyrolisisTests) CleanupInstances(t *testing.T) {\n+ computeClient, err := openstack.NewComputeV2(p.OpenStack.Provider, gophercloud.EndpointOpts{})\n+ require.NoError(t, err, \"There should be no error creating compute client\")\n+\n+ project, err := tokens.Get(p.OpenStack.Identity, p.OpenStack.Provider.Token()).ExtractProject()\n+ require.NoError(t, err, \"There should be no error while extracting the project\")\n+\n+ serversListOpts := servers.ListOpts{\n+ Name: \"e2e-\",\n+ TenantID: project.ID,\n+ }\n+\n+ allPages, err := servers.List(computeClient, serversListOpts).AllPages()\n+ require.NoError(t, err, \"There should be no error while listing all servers\")\n+\n+ allServers, err := servers.ExtractServers(allPages)\n+ require.NoError(t, err, \"There should be no error while extracting all servers\")\n+\n+ for _, srv := range allServers {\n+ err := servers.Delete(computeClient, srv.ID).ExtractErr()\n+ require.NoError(t, err, \"There should be no error while deleting server %s (%s)\", srv.Name, srv.ID)\n+ }\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
Cleanup e2e test instances in pyrolisis