author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
596,240
06.09.2017 14:49:40
-7,200
9ff68e0bd7adf3a463b8460d2fd31f028828bf84
stuffs klusters into the queue
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "package controller\nimport (\n+ \"reflect\"\n\"sync\"\n\"time\"\n\"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n+ \"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n)\n@@ -20,6 +23,27 @@ func NewLaunchController(factories Factories) *LaunchControl {\nqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),\n}\n+ launchctl.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n+ AddFunc: func(obj interface{}) {\n+ key, err := cache.MetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ queue.Add(key)\n+ }\n+ },\n+ UpdateFunc: func(old interface{}, new interface{}) {\n+ key, err := cache.MetaNamespaceKeyFunc(new)\n+ if err == nil {\n+ queue.Add(key)\n+ }\n+ },\n+ DeleteFunc: func(obj interface{}) {\n+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ queue.Add(key)\n+ }\n+ },\n+ })\n+\nreturn launchctl\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
stuffs klusters into the queue
596,240
07.09.2017 16:10:12
-7,200
e75341f559bd8d1dbd7fb320e7304ade7b865f41
fixes nodepool initialization
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -31,7 +31,7 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\n},\nSpec: v1.KlusterSpec{\nName: name,\n- NodePools: []v1.NodePool,\n+ NodePools: []v1.NodePool{},\n},\nStatus: v1.KlusterStatus{\nState: v1.KlusterPending,\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes nodepool initialization
596,240
07.09.2017 16:11:20
-7,200
440620bf69956ba5a999a432e1624713fb83af3d
implement launch controller. openstack client cache for domain and project scopes
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -2,9 +2,12 @@ package openstack\nimport (\n\"errors\"\n+ \"fmt\"\n+ \"github.com/golang/glog\"\n\"github.com/gophercloud/gophercloud\"\n\"github.com/gophercloud/gophercloud/openstack\"\n+ \"github.com/gophercloud/gophercloud/openstack/compute/v2/servers\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/projects\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/users\"\n@@ -13,15 +16,31 @@ import (\n\"github.com/gophercloud/gophercloud/openstack/networking/v2/ports\"\n\"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets\"\n\"github.com/gophercloud/gophercloud/pagination\"\n+ kubernikus_v1 \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"k8s.io/client-go/informers\"\n+ informers_core \"k8s.io/client-go/informers/core/v1\"\n+ \"k8s.io/client-go/pkg/api/v1\"\n\"github.com/sapcc/kubernikus/pkg/client/openstack/domains\"\n)\ntype client struct {\n- provider *gophercloud.ProviderClient\n+ domainProviders map[string]*gophercloud.ProviderClient\n+ projectProviders map[string]*gophercloud.ProviderClient\n+\n+ authURL string\n+ authUsername string\n+ authPassword string\n+ authDomain string\n+ authProject string\n+ authProjectDomain string\n+\n+ secrets informers_core.SecretInformer\n}\ntype Client interface {\n+ CreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) (string, error)\n+ GetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\nGetProject(id string) (*Project, error)\nGetRouters(project_id string) ([]Router, error)\nDeleteUser(username, domainID string) error\n@@ -44,33 +63,112 @@ type Subnet struct {\nCIDR string\n}\n-func NewClient(authURL, username, password, domain, project, projectDomain string) (Client, error) {\n+type Node struct {\n+ ID string\n+ Name string\n+ Status string\n+}\n+\n+func NewClient(informers informers.SharedInformerFactory, authURL, username, password, domain, project, projectDomain string) Client {\n+ informers.Core().V1().Secrets().Informer()\n+\n+ return &client{\n+ domainProviders: make(map[string]*gophercloud.ProviderClient),\n+ projectProviders: make(map[string]*gophercloud.ProviderClient),\n+ authURL: authURL,\n+ authUsername: username,\n+ authPassword: password,\n+ authDomain: domain,\n+ authProject: project,\n+ authProjectDomain: projectDomain,\n+ secrets: informers.Core().V1().Secrets(),\n+ }\n+}\n+\n+func (c *client) domainProvider() (*gophercloud.ProviderClient, error) {\n+ return c.domainProviderFor(c.authDomain)\n+}\n+\n+func (c *client) domainProviderFor(domain string) (*gophercloud.ProviderClient, error) {\n+ if c.domainProviders[domain] != nil {\n+ return c.domainProviders[domain], nil\n+ }\n+\n+ provider, err := openstack.NewClient(c.authURL)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ authOptions := &tokens.AuthOptions{\n+ IdentityEndpoint: c.authURL,\n+ Username: c.authUsername,\n+ Password: c.authPassword,\n+ DomainName: c.authDomain,\n+ AllowReauth: true,\n+ Scope: tokens.Scope{\n+ ProjectName: c.authProject,\n+ DomainName: c.authProjectDomain,\n+ },\n+ }\n+\n+ err = openstack.AuthenticateV3(provider, authOptions, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ c.domainProviders[domain] = provider\n+\n+ return c.domainProviders[domain], nil\n+}\n+\n+func (c *client) projectProviderFor(kluster *kubernikus_v1.Kluster) (*gophercloud.ProviderClient, error) {\n+ project_id := kluster.Account()\n+ secret_name := kluster.Name\n+\n+ if c.projectProviders[project_id] != nil {\n+ return c.projectProviders[project_id], nil\n+ }\n- provider, err := openstack.NewClient(authURL)\n+ secret, err := c.secrets.Lister().Secrets(\"kubernikus\").Get(secret_name)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"Couldn't retrieve secret kubernikus/%v: %v\", secret_name, err)\n+ }\n+\n+ provider, err := openstack.NewClient(string(secret.Data[\"openstack-auth-url\"]))\nif err != nil {\nreturn nil, err\n}\n- err = openstack.AuthenticateV3(provider, &tokens.AuthOptions{\n- IdentityEndpoint: authURL,\n- Username: username,\n- Password: password,\n- DomainName: domain,\n+\n+ authOptions := &tokens.AuthOptions{\n+ IdentityEndpoint: string(secret.Data[\"openstack-auth-url\"]),\n+ Username: string(secret.Data[\"openstack-username\"]),\n+ Password: string(secret.Data[\"openstack-password\"]),\n+ DomainName: string(secret.Data[\"openstack-domain-name\"]),\nAllowReauth: true,\nScope: tokens.Scope{\n- ProjectName: project,\n- DomainName: projectDomain,\n+ ProjectID: project_id,\n},\n- }, gophercloud.EndpointOpts{})\n+ }\n+\n+ glog.V(5).Infof(\"AuthOptions: %#v\", authOptions)\n+\n+ err = openstack.AuthenticateV3(provider, authOptions, gophercloud.EndpointOpts{})\nif err != nil {\nreturn nil, err\n}\n- return &client{provider: provider}, nil\n+ c.projectProviders[project_id] = provider\n+ return c.projectProviders[project_id], nil\n}\nfunc (c *client) GetProject(id string) (*Project, error) {\n- identity, err := openstack.NewIdentityV3(c.provider, gophercloud.EndpointOpts{})\n+ provider, err := c.domainProvider()\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ identity, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})\nif err != nil {\nreturn nil, err\n}\n@@ -88,7 +186,12 @@ func (c *client) GetProject(id string) (*Project, error) {\n}\nfunc (c *client) GetRouters(project_id string) ([]Router, error) {\n- networkClient, err := openstack.NewNetworkV2(c.provider, gophercloud.EndpointOpts{})\n+ provider, err := c.domainProvider()\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ networkClient, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{})\nif err != nil {\nreturn nil, err\n}\n@@ -132,7 +235,12 @@ func (c *client) GetRouters(project_id string) ([]Router, error) {\n}\nfunc (c *client) DeleteUser(username, domainID string) error {\n- identity, err := openstack.NewIdentityV3(c.provider, gophercloud.EndpointOpts{})\n+ provider, err := c.domainProvider()\n+ if err != nil {\n+ return err\n+ }\n+\n+ identity, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})\nif err != nil {\nreturn err\n}\n@@ -166,3 +274,70 @@ func getRouterNetworks(client *gophercloud.ServiceClient, routerID string) ([]st\n})\nreturn networks, err\n}\n+\n+func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool) ([]Node, error) {\n+ project_id := kluster.Account()\n+ pool_id := pool.Name\n+\n+ provider, err := c.projectProviderFor(kluster)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ nodes := []Node{}\n+ client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return nodes, err\n+ }\n+ glog.V(5).Infof(\"Listing nodes for %v/%v\", project_id, pool_id)\n+\n+ prefix := fmt.Sprintf(\"kubernikus-%v\", pool_id)\n+ opts := servers.ListOpts{Name: prefix}\n+\n+ servers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) {\n+ serverList, err := servers.ExtractServers(page)\n+ if err != nil {\n+ glog.V(5).Infof(\"Couldn't extract server %v\", err)\n+ return false, err\n+ }\n+\n+ for _, s := range serverList {\n+ glog.V(5).Infof(\"Found node %v\", s.ID)\n+ nodes = append(nodes, Node{ID: s.ID, Name: s.Name, Status: s.Status})\n+ }\n+\n+ return true, nil\n+ })\n+\n+ return nodes, nil\n+}\n+\n+func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool) (string, error) {\n+ provider, err := c.projectProviderFor(kluster)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ name := v1.SimpleNameGenerator.GenerateName(fmt.Sprintf(\"kubernikus-%v-\", pool.Name))\n+ glog.V(5).Infof(\"Creating node %v\", name)\n+\n+ server, err := servers.Create(client, servers.CreateOpts{\n+ Name: name,\n+ FlavorName: pool.Flavor,\n+ ImageName: pool.Image,\n+ Networks: []servers.Network{servers.Network{UUID: \"2c731ffb-b8ac-48ac-9ccc-1f8c57fb61ce\"}},\n+ ServiceClient: client,\n+ }).Extract()\n+\n+ if err != nil {\n+ glog.V(5).Infof(\"Couldn't create node %v: %v\", name, err)\n+ return \"\", err\n+ }\n+\n+ return server.ID, nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -24,7 +24,7 @@ import (\n)\nconst (\n- TPR_RECHECK_INTERVAL = 5 * time.Minute\n+ KLUSTER_RECHECK_INTERVAL = 5 * time.Minute\n)\ntype GroundControl struct {\n@@ -40,7 +40,8 @@ func NewGroundController(factories Factories, clients Clients, config Config) *G\noperator := &GroundControl{\nClients: clients,\nFactories: factories,\n- queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),\n+ Config: config,\n+ queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\ntprInformer: factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n}\n@@ -63,12 +64,12 @@ func (op *GroundControl) Run(threadiness int, stopCh <-chan struct{}, wg *sync.W\ngo wait.Until(op.runWorker, time.Second, stopCh)\n}\n- ticker := time.NewTicker(TPR_RECHECK_INTERVAL)\n+ ticker := time.NewTicker(KLUSTER_RECHECK_INTERVAL)\ngo func() {\nfor {\nselect {\ncase <-ticker.C:\n- glog.V(2).Infof(\"I now would do reconciliation if its was implemented. Next run in %v\", TPR_RECHECK_INTERVAL)\n+ glog.V(2).Infof(\"I now would do reconciliation if its was implemented. Next run in %v\", KLUSTER_RECHECK_INTERVAL)\n//op.queue.Add(true)\ncase <-stopCh:\nticker.Stop()\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "package controller\nimport (\n- \"reflect\"\n+ \"fmt\"\n\"sync\"\n\"time\"\n\"github.com/golang/glog\"\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"k8s.io/apimachinery/pkg/labels\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n\"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n@@ -14,32 +15,34 @@ import (\ntype LaunchControl struct {\nFactories\n+ Clients\nqueue workqueue.RateLimitingInterface\n}\n-func NewLaunchController(factories Factories) *LaunchControl {\n+func NewLaunchController(factories Factories, clients Clients) *LaunchControl {\nlaunchctl := &LaunchControl{\nFactories: factories,\n- queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),\n+ Clients: clients,\n+ queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n}\nlaunchctl.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\nAddFunc: func(obj interface{}) {\nkey, err := cache.MetaNamespaceKeyFunc(obj)\nif err == nil {\n- queue.Add(key)\n+ launchctl.queue.Add(key)\n}\n},\nUpdateFunc: func(old interface{}, new interface{}) {\nkey, err := cache.MetaNamespaceKeyFunc(new)\nif err == nil {\n- queue.Add(key)\n+ launchctl.queue.Add(key)\n}\n},\nDeleteFunc: func(obj interface{}) {\nkey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\nif err == nil {\n- queue.Add(key)\n+ launchctl.queue.Add(key)\n}\n},\n})\n@@ -57,6 +60,31 @@ func (launchctl *LaunchControl) Run(threadiness int, stopCh <-chan struct{}, wg\ngo wait.Until(launchctl.runWorker, time.Second, stopCh)\n}\n+ ticker := time.NewTicker(KLUSTER_RECHECK_INTERVAL)\n+ go func() {\n+ for {\n+ select {\n+ case <-ticker.C:\n+ glog.V(2).Infof(\"Running periodic recheck. Queuing all Klusters...\")\n+\n+ klusters, err := launchctl.Factories.Kubernikus.Kubernikus().V1().Klusters().Lister().List(labels.Everything())\n+ if err != nil {\n+ glog.Errorf(\"Couldn't run periodic recheck. Listing klusters failed: %v\", err)\n+ }\n+\n+ for _, kluster := range klusters {\n+ key, err := cache.MetaNamespaceKeyFunc(kluster)\n+ if err == nil {\n+ launchctl.queue.Add(key)\n+ }\n+ }\n+ case <-stopCh:\n+ ticker.Stop()\n+ return\n+ }\n+ }\n+ }()\n+\n<-stopCh\n}\n@@ -72,18 +100,100 @@ func (launchctl *LaunchControl) processNextWorkItem() bool {\n}\ndefer launchctl.queue.Done(key)\n- err := launchctl.handler(key.(string))\n- if err == nil {\n- launchctl.queue.Forget(key)\n+ // Invoke the method containing the business logic\n+ err := launchctl.reconcile(key.(string))\n+ launchctl.handleErr(err, key)\nreturn true\n}\n- glog.Warningf(\"Error running handler: %v\", err)\n- launchctl.queue.AddRateLimited(key)\n+func (launchctl *LaunchControl) requeue(kluster *v1.Kluster) {\n+ key, err := cache.MetaNamespaceKeyFunc(kluster)\n+ if err == nil {\n+ launchctl.queue.AddAfter(key, 5*time.Second)\n+ }\n+}\n- return true\n+func (launchctl *LaunchControl) reconcile(key string) error {\n+ obj, exists, err := launchctl.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer().GetIndexer().GetByKey(key)\n+ if err != nil {\n+ return fmt.Errorf(\"Failed to fetch key %s from cache: %s\", key, err)\n+ }\n+ if !exists {\n+ glog.Infof(\"Kluster %s deleted in the meantime\", key)\n+ return nil\n+ }\n+\n+ kluster := obj.(*v1.Kluster)\n+ glog.V(2).Infof(\"Handling kluster %v\", kluster.Name)\n+\n+ for _, pool := range kluster.Spec.NodePools {\n+ err := launchctl.syncPool(kluster, &pool)\n+ if err != nil {\n+ return err\n+ }\n+ }\n+\n+ return nil\n+}\n+\n+func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool) error {\n+ nodes, err := launchctl.Clients.Openstack.GetNodes(kluster, pool)\n+ if err != nil {\n+ return fmt.Errorf(\"Couldn't list nodes for %v/%v: %v\", kluster.Name, pool.Name, err)\n+ }\n+\n+ switch {\n+ case len(nodes) < pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n+ return launchctl.createNode(kluster, pool)\n+ case len(nodes) > pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n+ return launchctl.terminateNode(kluster, nodes[0].ID)\n+ case len(nodes) == pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n}\n-func (launchctl *LaunchControl) handler(key string) error {\nreturn nil\n}\n+\n+func (launchctl *LaunchControl) createNode(kluster *v1.Kluster, pool *v1.NodePool) error {\n+ glog.V(2).Infof(\"Pool %v/%v: Creating new node\", kluster.Name, pool.Name)\n+\n+ id, err := launchctl.Clients.Openstack.CreateNode(kluster, pool)\n+ if err != nil {\n+ return err\n+ }\n+\n+ glog.V(2).Infof(\"Pool %v/%v: Created node %v.\", kluster.Name, pool.Name, id)\n+\n+ launchctl.requeue(kluster)\n+ return nil\n+}\n+\n+func (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, id string) error {\n+ launchctl.requeue(kluster)\n+ return nil\n+}\n+\n+func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\n+ if err == nil {\n+ // Forget about the #AddRateLimited history of the key on every successful synchronization.\n+ // This ensures that future processing of updates for this key is not delayed because of\n+ // an outdated error history.\n+ launchctl.queue.Forget(key)\n+ return\n+ }\n+\n+ // This controller retries 5 times if something goes wrong. After that, it stops trying.\n+ if launchctl.queue.NumRequeues(key) < 5 {\n+ glog.Errorf(\"Error while managing nodes for kluster %q: %v\", key, err)\n+\n+ // Re-enqueue the key rate limited. Based on the rate limiter on the\n+ // queue and the re-enqueue history, the key will be processed later again.\n+ launchctl.queue.AddRateLimited(key)\n+ return\n+ }\n+\n+ launchctl.queue.Forget(key)\n+ glog.Infof(\"Dropping kluster %q out of the queue. Too many retries: %v\", key, err)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -104,18 +104,6 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nglog.Fatalf(\"Failed to create kubernikus clients: %s\", err)\n}\n- o.Clients.Openstack, err = openstack.NewClient(\n- options.AuthURL,\n- options.AuthUsername,\n- options.AuthPassword,\n- options.AuthDomain,\n- options.AuthProject,\n- options.AuthProjectDomain,\n- )\n- if err != nil {\n- glog.Fatalf(\"Failed to create openstack client: %s\", err)\n- }\n-\nconfig, err := kube.NewConfig(options.KubeConfig)\nif err != nil {\nglog.Fatalf(\"Failed to create kubernetes config: %s\", err)\n@@ -134,6 +122,16 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nDeleteFunc: o.debugDelete,\n})\n+ o.Clients.Openstack = openstack.NewClient(\n+ o.Factories.Kubernetes,\n+ options.AuthURL,\n+ options.AuthUsername,\n+ options.AuthPassword,\n+ options.AuthDomain,\n+ options.AuthProject,\n+ options.AuthProjectDomain,\n+ )\n+\nreturn o\n}\n@@ -141,7 +139,7 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nfmt.Printf(\"Welcome to Kubernikus %v\\n\", version.VERSION)\ngroundctl := NewGroundController(o.Factories, o.Clients, o.Config)\n- launchctl := NewLaunchController(o.Factories)\n+ launchctl := NewLaunchController(o.Factories, o.Clients)\no.Factories.Kubernikus.Start(stopCh)\no.Factories.Kubernetes.Start(stopCh)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
implement launch controller. openstack client cache for domain and project scopes
596,240
07.09.2017 16:32:35
-7,200
4dd4f85e8679d8d10ce7c72d0188123f5b0acabe
fix helm client initialization
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/helm/helm.go", "new_path": "pkg/cmd/helm/helm.go", "diff": "@@ -96,10 +96,7 @@ func (o *HelmOptions) Run(c *cobra.Command) error {\n}\nif o.AuthURL != \"\" {\ncluster.OpenStack.AuthURL = o.AuthURL\n- oclient, err := openstack.NewClient(o.AuthURL, o.AuthUsername, o.AuthPassword, o.AuthDomain, o.AuthProject, o.AuthProjectDomain)\n- if err != nil {\n- return err\n- }\n+ oclient := openstack.NewClient(nil, o.AuthURL, o.AuthUsername, o.AuthPassword, o.AuthDomain, o.AuthProject, o.AuthProjectDomain)\nif err := cluster.DiscoverValues(o.Name, o.ProjectID, oclient); err != nil {\nreturn err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fix helm client initialization
596,240
08.09.2017 13:41:30
-7,200
2ced60794f47b2238d92a5ff9b0c0bb9c0a88299
adds persistent auto-discovery for openstack parameters
[ { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -15,8 +15,15 @@ type NodePool struct {\nConfig NodePoolConfig `json:\"config\"`\n}\n+type Openstack struct {\n+ ProjectID string `json:\"projectID\"`\n+ RouterID string `json:\"routerID\"`\n+ NetworkID string `json:\"networkID\"`\n+}\n+\ntype KlusterSpec struct {\nName string `json:\"name\"`\n+ Openstack Openstack `json:\"openstack,omitempty\"`\nNodePools []NodePool `json:\"nodePools,omitempty\"`\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -55,9 +55,14 @@ type Project struct {\ntype Router struct {\nID string\n+ Networks []Network\nSubnets []Subnet\n}\n+type Network struct {\n+ ID string\n+}\n+\ntype Subnet struct {\nID string\nCIDR string\n@@ -212,6 +217,8 @@ func (c *client) GetRouters(project_id string) ([]Router, error) {\nif err != nil {\nreturn false, err\n}\n+ resultRouter.Networks = append(resultRouter.Networks, Network{ID: network.ID})\n+\nfor _, subnetID := range network.Subnets {\nsubnet, err := subnets.Get(networkClient, subnetID).Extract()\nif err != nil {\n@@ -330,7 +337,7 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nName: name,\nFlavorName: pool.Flavor,\nImageName: pool.Image,\n- Networks: []servers.Network{servers.Network{UUID: \"2c731ffb-b8ac-48ac-9ccc-1f8c57fb61ce\"}},\n+ Networks: []servers.Network{servers.Network{UUID: kluster.Spec.Openstack.NetworkID}},\nServiceClient: client,\n}).Extract()\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -117,10 +117,22 @@ func (op *GroundControl) handler(key string) error {\nswitch state := tpr.Status.State; state {\ncase v1.KlusterPending:\n{\n+ if op.requiresOpenstackDiscovery(tpr) {\n+ if err := op.discoverOpenstackSpec(tpr); err != nil {\n+ glog.Errorf(\"[%v] Discovery of openstack spec failed: %s\", tpr.GetName(), err)\n+ if err := op.updateStatus(tpr, v1.KlusterError, err.Error()); err != nil {\n+ glog.Errorf(\"Failed to update status of kluster %s:%s\", tpr.GetName(), err)\n+ }\n+ return err\n+ }\n+ return nil\n+ }\n+\nglog.Infof(\"Creating Kluster %s\", tpr.GetName())\nif err := op.updateStatus(tpr, v1.KlusterCreating, \"Creating Cluster\"); err != nil {\nglog.Errorf(\"Failed to update status of kluster %s:%s\", tpr.GetName(), err)\n}\n+\nif err := op.createKluster(tpr); err != nil {\nglog.Errorf(\"Creating kluster %s failed: %s\", tpr.GetName(), err)\nif err := op.updateStatus(tpr, v1.KlusterError, err.Error()); err != nil {\n@@ -244,6 +256,54 @@ func (op *GroundControl) terminateKluster(tpr *v1.Kluster) error {\nreturn op.Clients.Kubernikus.Kubernikus().Klusters(tpr.Namespace).Delete(tpr.Name, &metav1.DeleteOptions{})\n}\n+func (op *GroundControl) requiresOpenstackDiscovery(kluster *v1.Kluster) bool {\n+ return kluster.Spec.Openstack.ProjectID == \"\" ||\n+ kluster.Spec.Openstack.NetworkID == \"\" ||\n+ kluster.Spec.Openstack.RouterID == \"\"\n+}\n+\n+func (op *GroundControl) discoverOpenstackSpec(kluster *v1.Kluster) error {\n+ glog.V(5).Infof(\"[%v] Discovering Openstack Spec\", kluster.Name)\n+\n+ routers, err := op.Clients.Openstack.GetRouters(kluster.Account())\n+ if err != nil {\n+ return err\n+ }\n+\n+ copy, err := op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{})\n+ if err != nil {\n+ return err\n+ }\n+\n+ if copy.Spec.Openstack.ProjectID == \"\" {\n+ copy.Spec.Openstack.ProjectID = kluster.Account()\n+ glog.V(5).Infof(\"[%v] Setting ProjectID to %v\", kluster.Name, copy.Spec.Openstack.ProjectID)\n+ }\n+\n+ if copy.Spec.Openstack.RouterID == \"\" {\n+ if len(routers) == 1 {\n+ copy.Spec.Openstack.RouterID = routers[0].ID\n+ glog.V(5).Infof(\"[%v] Setting RouterID to %v\", kluster.Name, copy.Spec.Openstack.RouterID)\n+ } else {\n+ glog.V(5).Infof(\"[%v] There's more than 1 router. Autodiscovery not possible!\")\n+ }\n+ }\n+\n+ if copy.Spec.Openstack.NetworkID == \"\" {\n+ if len(routers) == 1 {\n+ if len(routers[0].Networks) == 1 {\n+ copy.Spec.Openstack.NetworkID = routers[0].Networks[0].ID\n+ glog.V(5).Infof(\"[%v] Setting NetworkID to %v\", kluster.Name, copy.Spec.Openstack.NetworkID)\n+ } else {\n+ glog.V(5).Infof(\"[%v] There's more than 1 network on the router. Autodiscovery not possible!\")\n+ }\n+ }\n+ }\n+\n+ _, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\n+ return err\n+}\n+\nfunc serviceUsername(name string) string {\nreturn fmt.Sprintf(\"kubernikus-%s\", name)\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds persistent auto-discovery for openstack parameters
596,240
08.09.2017 14:05:26
-7,200
7ad985b41bed6657d26a87db099af37dc86fa15b
removes json tags. renames OpenstackSpec
[ { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -3,28 +3,33 @@ package v1\nimport metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\ntype NodePoolConfig struct {\n- Upgrade bool `json:\"upgrade\"`\n- Repair bool `json:\"repair\"`\n+ Upgrade bool\n+ Repair bool\n}\ntype NodePool struct {\n- Name string `json:\"name\"`\n- Size int `json:\"size\"`\n- Flavor string `json:\"flavor\"`\n- Image string `json:\"image\"`\n- Config NodePoolConfig `json:\"config\"`\n+ Name string\n+ Size int\n+ Flavor string\n+ Image string\n+ Config NodePoolConfig\n}\n-type Openstack struct {\n- ProjectID string `json:\"projectID\"`\n- RouterID string `json:\"routerID\"`\n- NetworkID string `json:\"networkID\"`\n+type OpenstackInfo struct {\n+ ProjectID string\n+ RouterID string\n+ NetworkID string\n+}\n+\n+type KubernetesInfo struct {\n+ Server string\n}\ntype KlusterSpec struct {\n- Name string `json:\"name\"`\n- Openstack Openstack `json:\"openstack,omitempty\"`\n- NodePools []NodePool `json:\"nodePools,omitempty\"`\n+ Name string\n+ OpenstackInfo OpenstackInfo\n+ KubernetesInfo KubernetesInfo\n+ NodePools []NodePool\n}\ntype KlusterState string\n@@ -39,8 +44,8 @@ const (\n)\ntype KlusterStatus struct {\n- State KlusterState `json:\"state,omitempty\"`\n- Message string `json:\"message,omitempty\"`\n+ State KlusterState\n+ Message string\n}\n// +genclient\n" }, { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -337,7 +337,7 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nName: name,\nFlavorName: pool.Flavor,\nImageName: pool.Image,\n- Networks: []servers.Network{servers.Network{UUID: kluster.Spec.Openstack.NetworkID}},\n+ Networks: []servers.Network{servers.Network{UUID: kluster.Spec.OpenstackInfo.NetworkID}},\nServiceClient: client,\n}).Extract()\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -117,8 +117,8 @@ func (op *GroundControl) handler(key string) error {\nswitch state := tpr.Status.State; state {\ncase v1.KlusterPending:\n{\n- if op.requiresOpenstackDiscovery(tpr) {\n- if err := op.discoverOpenstackSpec(tpr); err != nil {\n+ if op.requiresOpenstackInfo(tpr) {\n+ if err := op.discoverOpenstackInfo(tpr); err != nil {\nglog.Errorf(\"[%v] Discovery of openstack spec failed: %s\", tpr.GetName(), err)\nif err := op.updateStatus(tpr, v1.KlusterError, err.Error()); err != nil {\nglog.Errorf(\"Failed to update status of kluster %s:%s\", tpr.GetName(), err)\n@@ -256,14 +256,14 @@ func (op *GroundControl) terminateKluster(tpr *v1.Kluster) error {\nreturn op.Clients.Kubernikus.Kubernikus().Klusters(tpr.Namespace).Delete(tpr.Name, &metav1.DeleteOptions{})\n}\n-func (op *GroundControl) requiresOpenstackDiscovery(kluster *v1.Kluster) bool {\n- return kluster.Spec.Openstack.ProjectID == \"\" ||\n- kluster.Spec.Openstack.NetworkID == \"\" ||\n- kluster.Spec.Openstack.RouterID == \"\"\n+func (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\n+ return kluster.Spec.OpenstackInfo.ProjectID == \"\" ||\n+ kluster.Spec.OpenstackInfo.NetworkID == \"\" ||\n+ kluster.Spec.OpenstackInfo.RouterID == \"\"\n}\n-func (op *GroundControl) discoverOpenstackSpec(kluster *v1.Kluster) error {\n- glog.V(5).Infof(\"[%v] Discovering Openstack Spec\", kluster.Name)\n+func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\n+ glog.V(5).Infof(\"[%v] Discovering Openstack Info\", kluster.Name)\nrouters, err := op.Clients.Openstack.GetRouters(kluster.Account())\nif err != nil {\n@@ -275,25 +275,25 @@ func (op *GroundControl) discoverOpenstackSpec(kluster *v1.Kluster) error {\nreturn err\n}\n- if copy.Spec.Openstack.ProjectID == \"\" {\n- copy.Spec.Openstack.ProjectID = kluster.Account()\n- glog.V(5).Infof(\"[%v] Setting ProjectID to %v\", kluster.Name, copy.Spec.Openstack.ProjectID)\n+ if copy.Spec.OpenstackInfo.ProjectID == \"\" {\n+ copy.Spec.OpenstackInfo.ProjectID = kluster.Account()\n+ glog.V(5).Infof(\"[%v] Setting ProjectID to %v\", kluster.Name, copy.Spec.OpenstackInfo.ProjectID)\n}\n- if copy.Spec.Openstack.RouterID == \"\" {\n+ if copy.Spec.OpenstackInfo.RouterID == \"\" {\nif len(routers) == 1 {\n- copy.Spec.Openstack.RouterID = routers[0].ID\n- glog.V(5).Infof(\"[%v] Setting RouterID to %v\", kluster.Name, copy.Spec.Openstack.RouterID)\n+ copy.Spec.OpenstackInfo.RouterID = routers[0].ID\n+ glog.V(5).Infof(\"[%v] Setting RouterID to %v\", kluster.Name, copy.Spec.OpenstackInfo.RouterID)\n} else {\nglog.V(5).Infof(\"[%v] There's more than 1 router. Autodiscovery not possible!\")\n}\n}\n- if copy.Spec.Openstack.NetworkID == \"\" {\n+ if copy.Spec.OpenstackInfo.NetworkID == \"\" {\nif len(routers) == 1 {\nif len(routers[0].Networks) == 1 {\n- copy.Spec.Openstack.NetworkID = routers[0].Networks[0].ID\n- glog.V(5).Infof(\"[%v] Setting NetworkID to %v\", kluster.Name, copy.Spec.Openstack.NetworkID)\n+ copy.Spec.OpenstackInfo.NetworkID = routers[0].Networks[0].ID\n+ glog.V(5).Infof(\"[%v] Setting NetworkID to %v\", kluster.Name, copy.Spec.OpenstackInfo.NetworkID)\n} else {\nglog.V(5).Infof(\"[%v] There's more than 1 network on the router. Autodiscovery not possible!\")\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes json tags. renames OpenstackSpec
596,240
08.09.2017 14:51:04
-7,200
64fd80460db4281b869cf891b70503f020a16bb6
removes hardcoded kubernikus domain
[ { "change_type": "MODIFY", "old_path": "charts/kubernikus/templates/operator.yaml", "new_path": "charts/kubernikus/templates/operator.yaml", "diff": "@@ -28,4 +28,5 @@ spec:\n- --auth-domain={{ .Values.openstack.auth_domain }}\n- --auth-project={{ .Values.openstack.auth_project }}\n- --auth-project-domain={{ .Values.openstack.auth_project_domain }}\n+ - --kubernikus-domain={{ .Values.kubernikus.domain }}\n- --v={{ default 1 .Values.groundctl.log_level }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kubernikus/values.yaml", "new_path": "charts/kubernikus/values.yaml", "diff": "@@ -10,6 +10,9 @@ openstack:\nauth_project: \"master\"\nauth_project_domain: \"Default\"\n+kubernikus:\n+ domain: \"kubernikus.replaceme.cloud.sap\"\n+\napi:\nport: 1234\nlog_level: 1\n" }, { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -3,33 +3,33 @@ package v1\nimport metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\ntype NodePoolConfig struct {\n- Upgrade bool\n- Repair bool\n+ Upgrade bool `json:\"upgrade\"`\n+ Repair bool `json:\"repair\"`\n}\ntype NodePool struct {\n- Name string\n- Size int\n- Flavor string\n- Image string\n- Config NodePoolConfig\n+ Name string `json:\"name\"`\n+ Size int `json:\"size\"`\n+ Flavor string `json:\"flavor\"`\n+ Image string `json:\"image\"`\n+ Config NodePoolConfig `json:\"config\"`\n}\ntype OpenstackInfo struct {\n- ProjectID string\n- RouterID string\n- NetworkID string\n+ ProjectID string `json:\"projectID\"`\n+ RouterID string `json:\"routerID\"`\n+ NetworkID string `json:\"networkID\"`\n}\n-type KubernetesInfo struct {\n- Server string\n+type KubernikusInfo struct {\n+ Server string `json:\"server\"`\n}\ntype KlusterSpec struct {\n- Name string\n- OpenstackInfo OpenstackInfo\n- KubernetesInfo KubernetesInfo\n- NodePools []NodePool\n+ Name string `json:\"name\"`\n+ OpenstackInfo OpenstackInfo `json:\"openstackInfo,omitempty\"`\n+ KubernikusInfo KubernikusInfo `json:\"kubernikusInfo,omitempty\"`\n+ NodePools []NodePool `json:\"nodePools,omitempty\"`\n}\ntype KlusterState string\n@@ -44,8 +44,8 @@ const (\n)\ntype KlusterStatus struct {\n- State KlusterState\n- Message string\n+ State KlusterState `json:\"state,omitempty\"`\n+ Message string `json:\"message,omitempty\"`\n}\n// +genclient\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/operator/operator.go", "new_path": "pkg/cmd/operator/operator.go", "diff": "@@ -48,6 +48,8 @@ type Options struct {\nAuthDomain string\nAuthProject string\nAuthProjectDomain string\n+\n+ KubernikusDomain string\n}\nfunc NewOperatorOptions() *Options {\n@@ -56,6 +58,7 @@ func NewOperatorOptions() *Options {\nAuthURL: \"http://keystone.monsoon3:5000/v3\",\nAuthUsername: \"kubernikus\",\nAuthDomain: \"Default\",\n+ KubernikusDomain: \"kluster.staging.cloud.sap\",\n}\n}\n@@ -68,6 +71,8 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {\nflags.StringVar(&o.AuthDomain, \"auth-domain\", o.AuthDomain, \"Service user domain\")\nflags.StringVar(&o.AuthProject, \"auth-project\", o.AuthProject, \"Scope service user to this project\")\nflags.StringVar(&o.AuthProjectDomain, \"auth-project-domain\", o.AuthProjectDomain, \"Domain of the project\")\n+\n+ flags.StringVar(&o.KubernikusDomain, \"kubernikus-domain\", o.KubernikusDomain, \"Regional domain name for all Kubernikus clusters\")\n}\nfunc (o *Options) Validate(c *cobra.Command, args []string) error {\n@@ -97,6 +102,7 @@ func (o *Options) Run(c *cobra.Command) error {\nAuthDomain: o.AuthDomain,\nAuthProject: o.AuthProject,\nAuthProjectDomain: o.AuthProjectDomain,\n+ KubernikusDomain: o.KubernikusDomain,\n}\ngo controller.NewKubernikusOperator(opts).Run(stop, wg)\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -114,12 +114,25 @@ func (op *GroundControl) handler(key string) error {\nglog.Infof(\"TPR of kluster %s deleted\", key)\n} else {\ntpr := obj.(*v1.Kluster)\n+ glog.V(5).Infof(\"Handling kluster %v in state %q\", tpr.Name, tpr.Status.State)\n+\nswitch state := tpr.Status.State; state {\ncase v1.KlusterPending:\n{\nif op.requiresOpenstackInfo(tpr) {\nif err := op.discoverOpenstackInfo(tpr); err != nil {\n- glog.Errorf(\"[%v] Discovery of openstack spec failed: %s\", tpr.GetName(), err)\n+ glog.Errorf(\"[%v] Discovery of openstack parameters failed: %s\", tpr.GetName(), err)\n+ if err := op.updateStatus(tpr, v1.KlusterError, err.Error()); err != nil {\n+ glog.Errorf(\"Failed to update status of kluster %s:%s\", tpr.GetName(), err)\n+ }\n+ return err\n+ }\n+ return nil\n+ }\n+\n+ if op.requiresKubernikusInfo(tpr) {\n+ if err := op.discoverKubernikusInfo(tpr); err != nil {\n+ glog.Errorf(\"[%v] Discovery of kubernikus parameters failed: %s\", tpr.GetName(), err)\nif err := op.updateStatus(tpr, v1.KlusterError, err.Error()); err != nil {\nglog.Errorf(\"Failed to update status of kluster %s:%s\", tpr.GetName(), err)\n}\n@@ -218,8 +231,7 @@ func (op *GroundControl) updateStatus(tpr *v1.Kluster, state v1.KlusterState, me\n}\nfunc (op *GroundControl) createKluster(tpr *v1.Kluster) error {\n-\n- cluster, err := ground.NewCluster(tpr.GetName(), \"kluster.staging.cloud.sap\")\n+ cluster, err := ground.NewCluster(tpr.GetName(), op.Config.Kubernikus.Domain)\nif err != nil {\nreturn err\n}\n@@ -262,6 +274,27 @@ func (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\nkluster.Spec.OpenstackInfo.RouterID == \"\"\n}\n+func (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool {\n+ return kluster.Spec.KubernikusInfo.Server == \"\"\n+}\n+\n+func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\n+ glog.V(5).Infof(\"[%v] Discovering KubernikusInfo\", kluster.Name)\n+\n+ copy, err := op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{})\n+ if err != nil {\n+ return err\n+ }\n+\n+ if copy.Spec.KubernikusInfo.Server == \"\" {\n+ copy.Spec.KubernikusInfo.Server = fmt.Sprintf(\"%s.%s\", kluster.Spec.Name, op.Config.Kubernikus.Domain)\n+ glog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.Server)\n+ }\n+\n+ _, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\n+ return err\n+}\n+\nfunc (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\nglog.V(5).Infof(\"[%v] Discovering OpenstackInfo\", kluster.Name)\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -32,6 +32,8 @@ type KubernikusOperatorOptions struct {\nAuthDomain string\nAuthProject string\nAuthProjectDomain string\n+\n+ KubernikusDomain string\n}\ntype Clients struct {\n@@ -54,8 +56,13 @@ type HelmConfig struct {\nChartDirectory string\n}\n+type KubernikusConfig struct {\n+ Domain string\n+}\n+\ntype Config struct {\nOpenstack OpenstackConfig\n+ Kubernikus KubernikusConfig\nHelm HelmConfig\n}\n@@ -91,6 +98,9 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nHelm: HelmConfig{\nChartDirectory: options.ChartDirectory,\n},\n+ Kubernikus: KubernikusConfig{\n+ Domain: options.KubernikusDomain,\n+ },\n},\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes hardcoded kubernikus domain
596,240
08.09.2017 15:42:01
-7,200
2f801d5330ea620dbe15535cfa6f78ff2fb3cf8d
only discover openstack parameters once and for all. persist on spec
[ { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -19,6 +19,10 @@ type OpenstackInfo struct {\nProjectID string `json:\"projectID\"`\nRouterID string `json:\"routerID\"`\nNetworkID string `json:\"networkID\"`\n+ LBSubnetID string `json:\"lbSubnetID\"`\n+ Domain string `json:\"domain\"`\n+ Username string `json:\"username\"`\n+ Password string `json:\"password\"`\n}\ntype KubernikusInfo struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -127,7 +127,7 @@ func (c *client) domainProviderFor(domain string) (*gophercloud.ProviderClient,\n}\nfunc (c *client) projectProviderFor(kluster *kubernikus_v1.Kluster) (*gophercloud.ProviderClient, error) {\n- project_id := kluster.Account()\n+ project_id := kluster.Spec.OpenstackInfo.ProjectID\nsecret_name := kluster.Name\nif c.projectProviders[project_id] != nil {\n@@ -283,7 +283,7 @@ func getRouterNetworks(client *gophercloud.ServiceClient, routerID string) ([]st\n}\nfunc (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool) ([]Node, error) {\n- project_id := kluster.Account()\n+ project_id := kluster.Spec.OpenstackInfo.RouterID\npool_id := pool.Name\nprovider, err := c.projectProviderFor(kluster)\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/certificates/files.go", "new_path": "pkg/cmd/certificates/files.go", "diff": "@@ -3,6 +3,8 @@ package certificates\nimport (\n\"errors\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n@@ -52,7 +54,12 @@ func (o *FilesOptions) Complete(args []string) error {\n}\nfunc (o *FilesOptions) Run(c *cobra.Command) error {\n- cluster, err := ground.NewCluster(o.Name, \"localdomain\")\n+ cluster, err := ground.NewCluster(\n+ &v1.Kluster{\n+ Spec: v1.KlusterSpec{\n+ Name: o.Name,\n+ },\n+ }, \"localdomain\")\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/certificates/plain.go", "new_path": "pkg/cmd/certificates/plain.go", "diff": "@@ -3,6 +3,8 @@ package certificates\nimport (\n\"errors\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n@@ -46,7 +48,12 @@ func (o *PlainOptions) Complete(args []string) error {\n}\nfunc (o *PlainOptions) Run(c *cobra.Command) error {\n- cluster, err := ground.NewCluster(o.Name, \"localdomain\")\n+ cluster, err := ground.NewCluster(\n+ &v1.Kluster{\n+ Spec: v1.KlusterSpec{\n+ Name: o.Name,\n+ },\n+ }, \"localdomain\")\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/helm/helm.go", "new_path": "pkg/cmd/helm/helm.go", "diff": "@@ -6,7 +6,7 @@ import (\n\"os\"\n\"strings\"\n- \"github.com/sapcc/kubernikus/pkg/client/openstack\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n@@ -90,16 +90,17 @@ func (o *HelmOptions) Complete(args []string) error {\nfunc (o *HelmOptions) Run(c *cobra.Command) error {\nnameA := strings.SplitN(o.Name, \".\", 2)\n- cluster, err := ground.NewCluster(nameA[0], nameA[1])\n+ cluster, err := ground.NewCluster(\n+ &v1.Kluster{\n+ Spec: v1.KlusterSpec{\n+ Name: nameA[0],\n+ },\n+ }, nameA[1])\nif err != nil {\nreturn err\n}\nif o.AuthURL != \"\" {\ncluster.OpenStack.AuthURL = o.AuthURL\n- oclient := openstack.NewClient(nil, o.AuthURL, o.AuthUsername, o.AuthPassword, o.AuthDomain, o.AuthProject, o.AuthProjectDomain)\n- if err := cluster.DiscoverValues(o.Name, o.ProjectID, oclient); err != nil {\n- return err\n- }\n}\nresult, err := yaml.Marshal(cluster)\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -9,6 +9,7 @@ import (\nyaml \"gopkg.in/yaml.v2\"\n+ \"github.com/Masterminds/goutils\"\n\"github.com/golang/glog\"\nmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n@@ -231,16 +232,11 @@ func (op *GroundControl) updateStatus(tpr *v1.Kluster, state v1.KlusterState, me\n}\nfunc (op *GroundControl) createKluster(tpr *v1.Kluster) error {\n- cluster, err := ground.NewCluster(tpr.GetName(), op.Config.Kubernikus.Domain)\n+ cluster, err := ground.NewCluster(tpr, op.Config.Openstack.AuthURL)\nif err != nil {\nreturn err\n}\n- cluster.OpenStack.AuthURL = op.Config.Openstack.AuthURL\n- if err := cluster.DiscoverValues(tpr.GetName(), tpr.Account(), op.Clients.Openstack); err != nil {\n- return err\n- }\n-\n//Generate helm values from cluster struct\nrawValues, err := yaml.Marshal(cluster)\nif err != nil {\n@@ -270,8 +266,12 @@ func (op *GroundControl) terminateKluster(tpr *v1.Kluster) error {\nfunc (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\nreturn kluster.Spec.OpenstackInfo.ProjectID == \"\" ||\n+ kluster.Spec.OpenstackInfo.RouterID == \"\" ||\nkluster.Spec.OpenstackInfo.NetworkID == \"\" ||\n- kluster.Spec.OpenstackInfo.RouterID == \"\"\n+ kluster.Spec.OpenstackInfo.LBSubnetID == \"\" ||\n+ kluster.Spec.OpenstackInfo.Domain == \"\" ||\n+ kluster.Spec.OpenstackInfo.Username == \"\" ||\n+ kluster.Spec.OpenstackInfo.Password == \"\"\n}\nfunc (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool {\n@@ -333,6 +333,34 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\n}\n}\n+ if copy.Spec.OpenstackInfo.LBSubnetID == \"\" {\n+ if len(routers) == 1 {\n+ if len(routers[0].Subnets) == 1 {\n+ copy.Spec.OpenstackInfo.LBSubnetID = routers[0].Subnets[0].ID\n+ glog.V(5).Infof(\"[%v] Setting LBSubnetID to %v\", kluster.Name, copy.Spec.OpenstackInfo.LBSubnetID)\n+ } else {\n+ glog.V(5).Infof(\"[%v] There's more than 1 subnet on the router. Autodiscovery not possible!\")\n+ }\n+ }\n+ }\n+\n+ if copy.Spec.OpenstackInfo.Domain == \"\" {\n+ glog.V(5).Infof(\"[%v] Setting domain to %v\", kluster.Name, \"kubernikus\")\n+ copy.Spec.OpenstackInfo.Domain = \"kubernikus\"\n+ }\n+\n+ if copy.Spec.OpenstackInfo.Username == \"\" {\n+ glog.V(5).Infof(\"[%v] Setting Username to %v\", kluster.Name, copy.Spec.OpenstackInfo.Username)\n+ copy.Spec.OpenstackInfo.Username = fmt.Sprintf(\"kubernikus-%s\", kluster.Name)\n+ }\n+\n+ if copy.Spec.OpenstackInfo.Password == \"\" {\n+ glog.V(5).Infof(\"[%v] Setting Password to %v\", kluster.Name, \"[redacted]\")\n+ if copy.Spec.OpenstackInfo.Password, err = goutils.RandomAscii(20); err != nil {\n+ return fmt.Errorf(\"Failed to generate password: %s\", err)\n+ }\n+ }\n+\n_, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/certificates.go", "new_path": "pkg/controller/ground/certificates.go", "diff": "@@ -153,7 +153,7 @@ func (c Certificates) all() []Bundle {\n}\n}\n-func (certs *Certificates) populateForSatellite(satellite, domain string) error {\n+func (certs *Certificates) populateForSatellite(satellite, fqSatelliteName string) error {\ncreateCA(satellite, \"Etcd Clients\", &certs.Etcd.Clients.CA)\ncreateCA(satellite, \"Etcd Peers\", &certs.Etcd.Peers.CA)\ncreateCA(satellite, \"ApiServer Clients\", &certs.ApiServer.Clients.CA)\n@@ -170,7 +170,7 @@ func (certs *Certificates) populateForSatellite(satellite, domain string) error\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n- []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", satellite, fmt.Sprintf(\"%s.%s\", satellite, domain)},\n+ []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", satellite, fqSatelliteName},\n[]net.IP{net.IPv4(127, 0, 0, 1)})\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/cluster.go", "new_path": "pkg/controller/ground/cluster.go", "diff": "package ground\nimport (\n- \"fmt\"\n-\n- \"github.com/Masterminds/goutils\"\n- \"github.com/golang/glog\"\n- \"github.com/sapcc/kubernikus/pkg/client/openstack\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n)\ntype Cluster struct {\n@@ -30,15 +26,24 @@ type OpenStack struct {\nRouterID string `yaml:\"routerID,omitempty\"`\n}\n-func NewCluster(name, domain string) (*Cluster, error) {\n+func NewCluster(kluster *v1.Kluster, authURL string) (*Cluster, error) {\ncluster := &Cluster{\nCertificates: &Certificates{},\nAPI: API{\n- IngressHost: fmt.Sprintf(\"%s.%s\", name, domain),\n+ IngressHost: kluster.Spec.KubernikusInfo.Server,\n+ },\n+ OpenStack: OpenStack{\n+ AuthURL: authURL,\n+ Username: kluster.Spec.OpenstackInfo.Username,\n+ Password: kluster.Spec.OpenstackInfo.Password,\n+ DomainName: kluster.Spec.OpenstackInfo.Domain,\n+ ProjectID: kluster.Spec.OpenstackInfo.ProjectID,\n+ LBSubnetID: kluster.Spec.OpenstackInfo.LBSubnetID,\n+ RouterID: kluster.Spec.OpenstackInfo.RouterID,\n},\n}\n- if err := cluster.Certificates.populateForSatellite(name, domain); err != nil {\n+ if err := cluster.Certificates.populateForSatellite(kluster.Spec.Name, kluster.Spec.KubernikusInfo.Server); err != nil {\nreturn cluster, err\n}\n@@ -48,41 +53,3 @@ func NewCluster(name, domain string) (*Cluster, error) {\nfunc (c Cluster) WriteConfig(persister ConfigPersister) error {\nreturn persister.WriteConfig(c)\n}\n-\n-func (c *Cluster) DiscoverValues(name, projectID string, oclient openstack.Client) error {\n- if c.OpenStack.Username == \"\" {\n- c.OpenStack.Username = fmt.Sprintf(\"kubernikus-%s\", name)\n- }\n- var err error\n- if c.OpenStack.Password == \"\" {\n- if c.OpenStack.Password, err = goutils.RandomAscii(20); err != nil {\n- return fmt.Errorf(\"Failed to generate password: %s\", err)\n- }\n- }\n- if c.OpenStack.DomainName == \"\" {\n- c.OpenStack.DomainName = \"Default\"\n- }\n- if c.OpenStack.ProjectID == \"\" {\n- c.OpenStack.ProjectID = projectID\n- }\n- if c.OpenStack.RouterID == \"\" || c.OpenStack.LBSubnetID == \"\" {\n- routers, err := oclient.GetRouters(projectID)\n- if err != nil {\n- return fmt.Errorf(\"Couldn't get routers for project %s: %s\", projectID, err)\n- }\n-\n- glog.V(2).Infof(\"Found routers for project %s: %#v\", projectID, routers)\n-\n- if !(len(routers) == 1 && len(routers[0].Subnets) == 1) {\n- return fmt.Errorf(\"Project needs to contain a router with exactly one subnet\")\n- }\n-\n- if c.OpenStack.RouterID == \"\" {\n- c.OpenStack.RouterID = routers[0].ID\n- }\n- if c.OpenStack.LBSubnetID == \"\" {\n- c.OpenStack.LBSubnetID = routers[0].Subnets[0].ID\n- }\n- }\n- return nil\n-}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
only discover openstack parameters once and for all. persist on spec
596,240
08.09.2017 16:26:34
-7,200
36743b8b782a50856c143c640a53c61d008aa103
adds and picks service user from spec
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -255,9 +255,9 @@ func (op *GroundControl) terminateKluster(tpr *v1.Kluster) error {\nif err != nil && !strings.Contains(grpc.ErrorDesc(err), fmt.Sprintf(`release: \"%s\" not found`, tpr.GetName())) {\nreturn err\n}\n- u := serviceUsername(tpr.GetName())\n- glog.Infof(\"Deleting openstack user %s@default\", u)\n- if err := op.Clients.Openstack.DeleteUser(u, \"default\"); err != nil {\n+\n+ glog.Infof(\"Deleting openstack user %s@%s\", tpr.Spec.OpenstackInfo.Username, tpr.Spec.OpenstackInfo.Domain)\n+ if err := op.Clients.Openstack.DeleteUser(tpr.Spec.OpenstackInfo.Username, tpr.Spec.OpenstackInfo.Domain); err != nil {\nreturn err\n}\n@@ -287,7 +287,7 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\n}\nif copy.Spec.KubernikusInfo.Server == \"\" {\n- copy.Spec.KubernikusInfo.Server = fmt.Sprintf(\"%s.%s\", kluster.Spec.Name, op.Config.Kubernikus.Domain)\n+ copy.Spec.KubernikusInfo.Server = fmt.Sprintf(\"%s.%s\", kluster.GetName(), op.Config.Kubernikus.Domain)\nglog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.Server)\n}\n@@ -364,7 +364,3 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\n_, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\nreturn err\n}\n-\n-func serviceUsername(name string) string {\n- return fmt.Sprintf(\"kubernikus-%s\", name)\n-}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds and picks service user from spec
596,240
09.09.2017 09:19:32
-7,200
74d24066b145ce249b5e526f9908faac805ea99b
fetch region via api. wtf openstack? wtf???!?
[ { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -16,17 +16,20 @@ type NodePool struct {\n}\ntype OpenstackInfo struct {\n+ AuthURL string `json:\"authURL\"`\nProjectID string `json:\"projectID\"`\nRouterID string `json:\"routerID\"`\nNetworkID string `json:\"networkID\"`\nLBSubnetID string `json:\"lbSubnetID\"`\nDomain string `json:\"domain\"`\n+ Region string `json:\"region\"`\nUsername string `json:\"username\"`\nPassword string `json:\"password\"`\n}\ntype KubernikusInfo struct {\nServer string `json:\"server\"`\n+ ServerURL string `json:\"serverURL\"`\n}\ntype KlusterSpec struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -8,7 +8,9 @@ import (\n\"github.com/gophercloud/gophercloud\"\n\"github.com/gophercloud/gophercloud/openstack\"\n\"github.com/gophercloud/gophercloud/openstack/compute/v2/servers\"\n+ \"github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/projects\"\n+ \"github.com/gophercloud/gophercloud/openstack/identity/v3/services\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens\"\n\"github.com/gophercloud/gophercloud/openstack/identity/v3/users\"\n\"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers\"\n@@ -42,6 +44,7 @@ type Client interface {\nCreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) (string, error)\nGetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\nGetProject(id string) (*Project, error)\n+ GetRegion() (string, error)\nGetRouters(project_id string) ([]Router, error)\nDeleteUser(username, domainID string) error\n}\n@@ -348,3 +351,63 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nreturn server.ID, nil\n}\n+\n+func (c *client) GetRegion() (string, error) {\n+ provider, err := c.domainProvider()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ identity, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ opts := services.ListOpts{ServiceType: \"compute\"}\n+ computeServiceID := \"\"\n+ err = services.List(identity, opts).EachPage(func(page pagination.Page) (bool, error) {\n+ serviceList, err := services.ExtractServices(page)\n+ if err != nil {\n+ return false, err\n+ }\n+\n+ if computeServiceID == \"\" {\n+ computeServiceID = serviceList[0].ID\n+ }\n+\n+ return true, nil\n+ })\n+\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ if computeServiceID == \"\" {\n+ return \"\", fmt.Errorf(\"Couldn't find a compute service. Bailing out.\")\n+ }\n+\n+ endpointOpts := endpoints.ListOpts{Availability: gophercloud.AvailabilityPublic, ServiceID: computeServiceID}\n+ region := \"\"\n+ err = endpoints.List(identity, endpointOpts).EachPage(func(page pagination.Page) (bool, error) {\n+ endpoints, err := endpoints.ExtractEndpoints(page)\n+ if err != nil {\n+ return false, err\n+ }\n+\n+ if region == \"\" {\n+ region = endpoints[0].Region\n+ }\n+\n+ return true, nil\n+ })\n+\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ if region == \"\" {\n+ return \"\", fmt.Errorf(\"Couldn't find the region. Bailing out.\")\n+ }\n+\n+ return region, nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -270,12 +270,16 @@ func (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\nkluster.Spec.OpenstackInfo.NetworkID == \"\" ||\nkluster.Spec.OpenstackInfo.LBSubnetID == \"\" ||\nkluster.Spec.OpenstackInfo.Domain == \"\" ||\n+ kluster.Spec.OpenstackInfo.Region == \"\" ||\nkluster.Spec.OpenstackInfo.Username == \"\" ||\n- kluster.Spec.OpenstackInfo.Password == \"\"\n+ kluster.Spec.OpenstackInfo.Password == \"\" ||\n+ kluster.Spec.OpenstackInfo.AuthURL == \"\"\n+\n}\nfunc (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool {\n- return kluster.Spec.KubernikusInfo.Server == \"\"\n+ return kluster.Spec.KubernikusInfo.Server == \"\" ||\n+ kluster.Spec.KubernikusInfo.ServerURL == \"\"\n}\nfunc (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\n@@ -291,6 +295,11 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\nglog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.Server)\n}\n+ if copy.Spec.KubernikusInfo.ServerURL == \"\" {\n+ copy.Spec.KubernikusInfo.ServerURL = fmt.Sprintf(\"https://%s.%s\", kluster.GetName(), op.Config.Kubernikus.Domain)\n+ glog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.ServerURL)\n+ }\n+\n_, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\nreturn err\n}\n@@ -349,6 +358,19 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\ncopy.Spec.OpenstackInfo.Domain = \"kubernikus\"\n}\n+ if copy.Spec.OpenstackInfo.Region == \"\" {\n+ copy.Spec.OpenstackInfo.Region, err = op.Clients.Openstack.GetRegion()\n+ if err != nil {\n+ return err\n+ }\n+ glog.V(5).Infof(\"[%v] Setting region to %v\", kluster.Name, copy.Spec.OpenstackInfo.Region)\n+ }\n+\n+ if copy.Spec.OpenstackInfo.AuthURL == \"\" {\n+ glog.V(5).Infof(\"[%v] Setting authURL to %v\", kluster.Name, op.Config.Openstack.AuthURL)\n+ copy.Spec.OpenstackInfo.AuthURL = op.Config.Openstack.AuthURL\n+ }\n+\nif copy.Spec.OpenstackInfo.Username == \"\" {\nglog.V(5).Infof(\"[%v] Setting Username to %v\", kluster.Name, copy.Spec.OpenstackInfo.Username)\ncopy.Spec.OpenstackInfo.Username = fmt.Sprintf(\"kubernikus-%s\", kluster.Name)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fetch region via api. wtf openstack? wtf???!?
596,240
09.09.2017 09:30:00
-7,200
739ad7c3ee4729788a7cbef9d9f7b51469a5ed9f
fixes debug output
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -367,13 +367,13 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\n}\nif copy.Spec.OpenstackInfo.AuthURL == \"\" {\n- glog.V(5).Infof(\"[%v] Setting authURL to %v\", kluster.Name, op.Config.Openstack.AuthURL)\ncopy.Spec.OpenstackInfo.AuthURL = op.Config.Openstack.AuthURL\n+ glog.V(5).Infof(\"[%v] Setting authURL to %v\", kluster.Name, op.Config.Openstack.AuthURL)\n}\nif copy.Spec.OpenstackInfo.Username == \"\" {\n- glog.V(5).Infof(\"[%v] Setting Username to %v\", kluster.Name, copy.Spec.OpenstackInfo.Username)\ncopy.Spec.OpenstackInfo.Username = fmt.Sprintf(\"kubernikus-%s\", kluster.Name)\n+ glog.V(5).Infof(\"[%v] Setting Username to %v\", kluster.Name, copy.Spec.OpenstackInfo.Username)\n}\nif copy.Spec.OpenstackInfo.Password == \"\" {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes debug output
596,240
09.09.2017 21:59:57
-7,200
9f463d8d642635e877fb30a2b95c3a454c636ffb
adds ignition userdata for spawning kubelets
[ { "change_type": "MODIFY", "old_path": "glide.lock", "new_path": "glide.lock", "diff": "-hash: 56cc165b7ea4bd90814a0ad72aab3be6c775f6d44a3ad9da8a14f7af14700643\n-updated: 2017-08-30T17:40:40.19949364+02:00\n+hash: 0a7c46d3f030e44a5ba4231dedd3cef50307a2f8ce1882acb2adeb6161c1f68f\n+updated: 2017-09-09T20:32:02.673298879+02:00\nimports:\n+- name: github.com/ajeddeloh/yaml\n+ version: 1072abfea31191db507785e2e0c1b8d1440d35a5\n+- name: github.com/alecthomas/units\n+ version: 6b4e7dc5e3143b85ea77909c72caf89416fc2915\n+- name: github.com/aokoli/goutils\n+ version: 3391d3790d23d03408670993e957e8f408993c34\n- name: github.com/asaskevich/govalidator\nversion: 7664702784775e51966f0885f5cd27435916517b\n- name: github.com/BurntSushi/toml\nversion: b26d9c308763d68093482582cea63d69be07a0f0\n+- name: github.com/coreos/container-linux-config-transpiler\n+ version: d42f09a1374bc318d853b53e5a31148db68a4e2a\n+ subpackages:\n+ - config\n+ - config/astyaml\n+ - config/platform\n+ - config/templating\n+ - config/types\n+ - config/types/util\n+- name: github.com/coreos/go-semver\n+ version: 5e3acbb5668c4c3deb4842615c4098eb61fb6b1e\n+ subpackages:\n+ - semver\n+- name: github.com/coreos/go-systemd\n+ version: 7c9533367ef925dc1078d75e5b7141e10da2c4e8\n+ subpackages:\n+ - dbus\n+ - unit\n+- name: github.com/coreos/ignition\n+ version: 11813c57bc05f30644bbae7891ae30a4a62e0b33\n+ subpackages:\n+ - config/types\n+ - config/v2_0/types\n+ - config/validate\n+ - config/validate/report\n- name: github.com/databus23/keystone\nversion: 12c566d59fdb198f5a6d7ad7dfbf99f2a7e09929\nsubpackages:\n@@ -32,8 +63,6 @@ imports:\nversion: dcef7f55730566d41eae5db10e7d6981829720f6\n- name: github.com/facebookgo/symwalk\nversion: 42004b9f322246749dd73ad71008b1f3160c0052\n-- name: github.com/fsnotify/fsnotify\n- version: 4da3e2cfbabc9f751898f250b49f2439785783a1\n- name: github.com/ghodss/yaml\nversion: 73d445a93680fa1a78ae23a5839bad48f32ba1ee\n- name: github.com/go-openapi/analysis\n@@ -91,11 +120,18 @@ imports:\n- name: github.com/gophercloud/gophercloud\nversion: fd552c8a657302349c44fde99e1197a9f01a97c6\nsubpackages:\n+ - internal\n- openstack\n+ - openstack/compute/v2/flavors\n+ - openstack/compute/v2/images\n+ - openstack/compute/v2/servers\n- openstack/identity/v2/tenants\n- openstack/identity/v2/tokens\n+ - openstack/identity/v3/endpoints\n- openstack/identity/v3/projects\n+ - openstack/identity/v3/services\n- openstack/identity/v3/tokens\n+ - openstack/identity/v3/users\n- openstack/networking/v2/extensions/layer3/routers\n- openstack/networking/v2/networks\n- openstack/networking/v2/ports\n@@ -108,19 +144,10 @@ imports:\nversion: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4\nsubpackages:\n- simplelru\n-- name: github.com/hashicorp/hcl\n- version: d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1\n- subpackages:\n- - hcl/ast\n- - hcl/parser\n- - hcl/scanner\n- - hcl/strconv\n- - hcl/token\n- - json/parser\n- - json/scanner\n- - json/token\n- name: github.com/howeyc/gopass\nversion: bf9dde6d0d2c004a008c27aaee91170c786f6db8\n+- name: github.com/huandu/xstrings\n+ version: d6590c0c31d16526217fa60fbd2067f7afcd78c5\n- name: github.com/imdario/mergo\nversion: 6633656539c1639d9d78127b7d47c622b5d7b6dc\n- name: github.com/inconshreveable/mousetrap\n@@ -129,8 +156,6 @@ imports:\nversion: 5b9ff866471762aa2ab2dced63c9fb6f53921342\n- name: github.com/kennygrant/sanitize\nversion: 6a0bfdde8629a3a3a7418a7eae45c54154692514\n-- name: github.com/magiconair/properties\n- version: 51463bfca2576e06c62a8504b5c0f06d61312647\n- name: github.com/mailru/easyjson\nversion: 44c0351a5bc860bcb2608d54aa03ea686c4e7b25\nsubpackages:\n@@ -141,12 +166,10 @@ imports:\nversion: 3391d3790d23d03408670993e957e8f408993c34\n- name: github.com/Masterminds/semver\nversion: 3f0ab6d4ab4bed1c61caf056b63a6e62190c7801\n+- name: github.com/Masterminds/sprig\n+ version: 9526be0327b26ad31aa70296a7b10704883976d5\n- name: github.com/mitchellh/mapstructure\nversion: d0303fe809921458f417bcf828397a65db30a7e4\n-- name: github.com/pelletier/go-buffruneio\n- version: c37440a7cf42ac63b919c752ca73a85067e05992\n-- name: github.com/pelletier/go-toml\n- version: fe7536c3dee2596cdd23ee9976a17c22bdaae286\n- name: github.com/pmylund/go-cache\nversion: 93d85800f2fa6bd0a739e7bd612bfa3bc008b72d\n- name: github.com/PuerkitoBio/purell\n@@ -155,36 +178,33 @@ imports:\nversion: 5bd2802263f21d8788851d5305584c82a5c75d7e\n- name: github.com/rs/cors\nversion: eabcc6af4bbe5ad3a949d36450326a2b0b9894b8\n-- name: github.com/spf13/afero\n- version: 9be650865eab0c12963d8753212f4f9c66cdcf12\n- subpackages:\n- - mem\n-- name: github.com/spf13/cast\n- version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4\n+- name: github.com/satori/go.uuid\n+ version: 879c5887cd475cd7864858769793b2ceb0d44feb\n- name: github.com/spf13/cobra\nversion: f62e98d28ab7ad31d707ba837a966378465c7b57\nsubpackages:\n- cobra\n-- name: github.com/spf13/jwalterweatherman\n- version: 0efa5202c04663c757d84f90f5219c1250baf94f\n- name: github.com/spf13/pflag\nversion: 9ff6c6923cfffbcd502984b8e0c80539a94968b7\n-- name: github.com/spf13/viper\n- version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2\n- name: github.com/tylerb/graceful\nversion: d72b0151351a13d0421b763b88f791469c4f5dc7\n- name: github.com/ugorji/go\nversion: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74\nsubpackages:\n- codec\n+- name: github.com/vincent-petithory/dataurl\n+ version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c\n- name: golang.org/x/crypto\nversion: d172538b2cfce0c13cee31e647d0367aa8cd2486\nsubpackages:\n+ - pbkdf2\n+ - scrypt\n- ssh/terminal\n- name: golang.org/x/net\nversion: f2499483f923065a842d38eb4c7f1927e6fc6e6d\nsubpackages:\n- context\n+ - context/ctxhttp\n- html\n- html/atom\n- http2\n@@ -232,14 +252,15 @@ imports:\n- bson\n- internal/json\n- name: gopkg.in/yaml.v2\n- version: a83829b6f1293c91addabc89d0571c246397bbf4\n+ version: 53feefa2559fb8dfa8d81baad31be332c97d6c77\n- name: k8s.io/apimachinery\n- version: 1fd2e63a9a370677308a42f24fd40c86438afddf\n+ version: 917740426ad66ff818da4809990480bcc0786a77\nsubpackages:\n- pkg/api/equality\n- pkg/api/errors\n- pkg/api/meta\n- pkg/api/resource\n+ - pkg/api/testing\n- pkg/apimachinery\n- pkg/apimachinery/announced\n- pkg/apimachinery/registered\n@@ -271,21 +292,24 @@ imports:\n- pkg/util/httpstream/spdy\n- pkg/util/intstr\n- pkg/util/json\n+ - pkg/util/mergepatch\n- pkg/util/net\n- pkg/util/rand\n- pkg/util/remotecommand\n- pkg/util/runtime\n- pkg/util/sets\n+ - pkg/util/strategicpatch\n- pkg/util/validation\n- pkg/util/validation/field\n- pkg/util/wait\n- pkg/util/yaml\n- pkg/version\n- pkg/watch\n+ - third_party/forked/golang/json\n- third_party/forked/golang/netutil\n- third_party/forked/golang/reflect\n- name: k8s.io/client-go\n- version: d92e8497f71b7b4e0494e5bd204b48d34bd6f254\n+ version: 2a227f04f328fe506bd562f50b4d2a175fce80c5\nsubpackages:\n- discovery\n- discovery/fake\n@@ -415,7 +439,7 @@ imports:\n- util/integer\n- util/workqueue\n- name: k8s.io/code-generator\n- version: bf449d588b78132603c5e2dba7286e7d335abedc\n+ version: 2e958d2f6f666e291ecc50467d58f0cc81e13299\nrepo: https://github.com/sttts/code-generator\n- name: k8s.io/gengo\nversion: 9e661e9308f078838e266cca1c673922088c0ea4\n" }, { "change_type": "MODIFY", "old_path": "glide.yaml", "new_path": "glide.yaml", "diff": "@@ -25,3 +25,6 @@ import:\nrepo: https://github.com/sttts/code-generator\n- package: k8s.io/gengo\nversion: 9e661e9308f078838e266cca1c673922088c0ea4\n+- package: github.com/coreos/container-linux-config-transpiler\n+ version: v0.4.2\n+\n" }, { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -41,7 +41,7 @@ type client struct {\n}\ntype Client interface {\n- CreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) (string, error)\n+ CreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool, []byte) (string, error)\nGetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\nGetProject(id string) (*Project, error)\nGetRegion() (string, error)\n@@ -322,7 +322,7 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.No\nreturn nodes, nil\n}\n-func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool) (string, error) {\n+func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool, userData []byte) (string, error) {\nprovider, err := c.projectProviderFor(kluster)\nif err != nil {\nreturn \"\", err\n@@ -341,6 +341,7 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nFlavorName: pool.Flavor,\nImageName: pool.Image,\nNetworks: []servers.Network{servers.Network{UUID: kluster.Spec.OpenstackInfo.NetworkID}},\n+ UserData: userData,\nServiceClient: client,\n}).Extract()\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -7,6 +7,7 @@ import (\n\"github.com/golang/glog\"\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"github.com/sapcc/kubernikus/pkg/templates\"\n\"k8s.io/apimachinery/pkg/labels\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n\"k8s.io/client-go/tools/cache\"\n@@ -126,6 +127,11 @@ func (launchctl *LaunchControl) reconcile(key string) error {\nkluster := obj.(*v1.Kluster)\nglog.V(2).Infof(\"Handling kluster %v\", kluster.Name)\n+ _, err = templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\n+ if err != nil {\n+ glog.Errorf(\"%v\", err)\n+ }\n+\nfor _, pool := range kluster.Spec.NodePools {\nerr := launchctl.syncPool(kluster, &pool)\nif err != nil {\n@@ -159,7 +165,12 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nfunc (launchctl *LaunchControl) createNode(kluster *v1.Kluster, pool *v1.NodePool) error {\nglog.V(2).Infof(\"Pool %v/%v: Creating new node\", kluster.Name, pool.Name)\n- id, err := launchctl.Clients.Openstack.CreateNode(kluster, pool)\n+ userdata, err := templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\n+ if err != nil {\n+ glog.Errorf(\"Ignition userdata couldn't be generated: %v\", err)\n+ }\n+\n+ id, err := launchctl.Clients.Openstack.CreateNode(kluster, pool, userdata)\nif err != nil {\nreturn err\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/templates/ignition.go", "diff": "+package templates\n+\n+import (\n+ \"bytes\"\n+ \"encoding/json\"\n+ \"fmt\"\n+ \"text/template\"\n+\n+ \"github.com/Masterminds/sprig\"\n+ \"github.com/coreos/container-linux-config-transpiler/config\"\n+ \"github.com/coreos/container-linux-config-transpiler/config/platform\"\n+ \"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ \"k8s.io/client-go/kubernetes\"\n+)\n+\n+type ignition struct {\n+}\n+\n+var Ignition = &ignition{}\n+\n+func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface) ([]byte, error) {\n+ secret, err := client.CoreV1().Secrets(kluster.Namespace).Get(kluster.GetName(), metav1.GetOptions{})\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ tmpl, err := template.New(\"node\").Funcs(sprig.TxtFuncMap()).Parse(Node)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ data := struct {\n+ ApiserverClientsCA string\n+ ApiserverNodesCA string\n+ ApiserverNodesCAKkey string\n+ ApiserverURL string\n+ OpenstackAuthURL string\n+ OpenstackUsername string\n+ OpenstackPassword string\n+ OpenstackDomain string\n+ OpenstackRegion string\n+ OpenstackLBSubnetID string\n+ OpenstackRouterID string\n+ }{\n+ ApiserverClientsCA: string(secret.Data[\"apiserver-clients-ca.pem\"]),\n+ ApiserverNodesCA: string(secret.Data[\"apiserver-nodes-ca.pem\"]),\n+ ApiserverNodesCAKkey: string(secret.Data[\"apiserver-nodes-ca-key.pem\"]),\n+ ApiserverURL: kluster.Spec.KubernikusInfo.ServerURL,\n+ OpenstackAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\n+ OpenstackUsername: kluster.Spec.OpenstackInfo.Username,\n+ OpenstackPassword: kluster.Spec.OpenstackInfo.Password,\n+ OpenstackDomain: kluster.Spec.OpenstackInfo.Domain,\n+ OpenstackRegion: kluster.Spec.OpenstackInfo.Region,\n+ OpenstackLBSubnetID: kluster.Spec.OpenstackInfo.LBSubnetID,\n+ OpenstackRouterID: kluster.Spec.OpenstackInfo.RouterID,\n+ }\n+\n+ var buffer bytes.Buffer\n+ err = tmpl.Execute(&buffer, data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ glog.V(5).Infof(\"IgnitionData: %v\", data)\n+ glog.V(5).Infof(\"IgnitionYAML: %v\", string(buffer.Bytes()))\n+\n+ ignitionConfig, ast, report := config.Parse(buffer.Bytes())\n+ if len(report.Entries) > 0 {\n+ glog.V(2).Infof(\"Something odd while transpiling ignition file: %v\", report.String())\n+ if report.IsFatal() {\n+ return nil, fmt.Errorf(\"Couldn't transpile ignition file: %v\", report.String())\n+ }\n+ }\n+\n+ ignitionConfig2_0, report := config.ConvertAs2_0(ignitionConfig, platform.OpenStackMetadata, ast)\n+ if len(report.Entries) > 0 {\n+ glog.V(2).Infof(\"Something odd while convertion ignition config: %v\", report.String())\n+ if report.IsFatal() {\n+ return nil, fmt.Errorf(\"Couldn't convert ignition config: %v\", report.String())\n+ }\n+ }\n+\n+ var dataOut []byte\n+ dataOut, err = json.MarshalIndent(&ignitionConfig2_0, \"\", \" \")\n+ dataOut = append(dataOut, '\\n')\n+\n+ glog.V(5).Infof(\"IgnitionJSON: %v\", string(dataOut))\n+\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ return dataOut, nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/templates/node.go", "diff": "+package templates\n+\n+var Node = `\n+passwd:\n+ users:\n+ - name: core\n+ password_hash: xyTGJkB462ewk\n+ ssh_authorized_keys:\n+ - \"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvFapuevZeHFpFn438XMjvEQYd0wt7+tzUdAkMiSd007Tx1h79Xm9ZziDDUe4W6meinVOq93MAS/ER27hoVWGo2H/vn/Cz5M8xr2j5rQODnrF3RmfrJTbZAWaDN0JTq2lFjmCHhZJNhr+VQP1uw4z2ofMBP6MLybnLmm9ukzxFYZqCCyfEEUTCMA9SWywtTpGQp8VLM4INCxzBSCuyt3SO6PBvJSo4HoKg/sLvmRwpCVZth48PI0EUbJ72wp88Cw3bv8CLce2TOkLMwkE6NRN55w2aOyqP1G3vixHa6YcVaLlkQhJoJsBwE3rX5603y2KjOhMomqHfXxXn/3GKTWlsQ== michael.j.schmidt@gmail.com\"\n+\n+locksmith:\n+ reboot_strategy: \"reboot\"\n+\n+systemd:\n+ units:\n+ - name: ccloud-metadata.service\n+ contents: |\n+ [Unit]\n+ Description=Converged Cloud Metadata Agent\n+\n+ [Service]\n+ Type=oneshot\n+ ExecStart=/usr/bin/coreos-metadata --provider=openstack-metadata --attributes=/run/metadata/coreos --ssh-keys=core --hostname=/etc/hostname\n+ - name: ccloud-metadata-hostname.service\n+ enable: true\n+ contents: |\n+ [Unit]\n+ Description=Workaround for coreos-metadata hostname bug\n+ Requires=ccloud-metadata.service\n+ After=ccloud-metadata.service\n+\n+ [Service]\n+ Type=oneshot\n+ EnvironmentFile=/run/metadata/coreos\n+ ExecStart=/usr/bin/hostnamectl set-hostname ${COREOS_OPENSTACK_HOSTNAME}\n+\n+ [Install]\n+ WantedBy=multi-user.target\n+ - name: kubelet.service\n+ enable: true\n+ contents: |\n+ [Unit]\n+ Description=Kubelet via Hyperkube ACI\n+\n+ [Service]\n+ Environment=\"RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \\\n+ --volume=resolv,kind=host,source=/etc/resolv.conf \\\n+ --mount volume=resolv,target=/etc/resolv.conf \\\n+ --volume var-log,kind=host,source=/var/log \\\n+ --mount volume=var-log,target=/var/log\"\n+ Environment=\"KUBELET_IMAGE_TAG=v1.7.5_coreos.0\"\n+ Environment=\"KUBELET_IMAGE_URL=quay.io/coreos/hyperkube\"\n+ ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\n+ ExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests\n+ ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid\n+ ExecStart=/usr/lib/coreos/kubelet-wrapper \\\n+ --kubeconfig=/etc/kubernetes/kubeconfig \\\n+ --cloud-config=/etc/kubernetes/openstack/openstack.config \\\n+ --cloud-provider=openstack \\\n+ --require-kubeconfig \\\n+ --network-plugin=kubenet \\\n+ --lock-file=/var/run/lock/kubelet.lock \\\n+ --exit-on-lock-contention \\\n+ --pod-manifest-path=/etc/kubernetes/manifests \\\n+ --allow-privileged \\\n+ --cluster_domain=cluster.local \\\n+ --client-ca-file=/etc/kubernetes/certs/kube-clients/ca.pem \\\n+ --anonymous-auth=false\n+ ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid\n+ Restart=always\n+ RestartSec=10\n+\n+ [Install]\n+ WantedBy=multi-user.target\n+\n+storage:\n+ files:\n+ - path: /etc/kubernetes/certs/kube-clients/ca.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+{{ .ApiserverClientsCA | indent 10 }}\n+ - path: /etc/kubernetes/certs/kube-clients/nodes.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+{{ .ApiserverNodesCA | indent 10 }}\n+ - path: /etc/kubernetes/certs/kube-clients/nodes-key.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+{{ .ApiserverNodesCAKkey | indent 10 }}\n+ - path: /etc/kubernetes/kubeconfig\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+ apiVersion: v1\n+ kind: Config\n+ clusters:\n+ - name: local\n+ cluster:\n+ server: {{ .ApiserverURL }}\n+ contexts:\n+ - name: local\n+ context:\n+ cluster: local\n+ user: local\n+ current-context: local\n+ users:\n+ - name: local\n+ user:\n+ client-certificate: /etc/kubernetes/certs/kube-clients/nodes.pem\n+ client-key: /etc/kubernetes/certs/kube-clients/nodes-key.pem\n+ - path: /etc/kubernetes/openstack/openstack.config\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+ [Global]\n+ auth-url = {{ .OpenstackAuthURL }}\n+ username = {{ .OpenstackUsername }}\n+ password = {{ .OpenstackPassword }}\n+ domain-name = {{ .OpenstackDomain }}\n+ region = {{ .OpenstackRegion }}\n+\n+ [LoadBalancer]\n+ lb-version=v2\n+ subnet-id = {{ .OpenstackLBSubnetID }}\n+ create-monitor = yes\n+ monitor-delay = 1m\n+ monitor-timeout = 30s\n+ monitor-max-retries = 3\n+\n+ [BlockStorage]\n+ trust-device-path = no\n+\n+ [Route]\n+ router-id = {{ .OpenstackRouterID }}\n+`\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds ignition userdata for spawning kubelets
596,240
10.09.2017 13:56:51
-7,200
f8cfbddbbd2f161587070cf072929f51997360be
adds gophercloud extension to make sense of openstack server states (wtf?)
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -75,6 +75,62 @@ type Node struct {\nID string\nName string\nStatus string\n+ TaskState string\n+ VMState string\n+ PowerState int\n+}\n+\n+func (n *Node) Ready() bool {\n+ // 0: NOSTATE\n+ // 1: RUNNING\n+ // 3: PAUSED\n+ // 4: SHUTDOWN\n+ // 6: CRASHED\n+ // 7: SUSPENDED\n+ if n.PowerState != 1 {\n+ if n.TaskState != \"spawning\" {\n+ return false\n+ }\n+ }\n+\n+ //ACTIVE = 'active'\n+ //BUILDING = 'building'\n+ //PAUSED = 'paused'\n+ //SUSPENDED = 'suspended'\n+ //STOPPED = 'stopped'\n+ //RESCUED = 'rescued'\n+ //RESIZED = 'resized'\n+ //SOFT_DELETED = 'soft-delete'\n+ //DELETED = 'deleted'\n+ //ERROR = 'error'\n+ //SHELVED = 'shelved'\n+ //SHELVED_OFFLOADED = 'shelved_offloaded'\n+\n+ if n.VMState != \"active\" || n.VMState != \"building\" {\n+ return false\n+ }\n+\n+ // https://github.com/openstack/nova/blob/be3a66781f7fd58e5c5c0fe89b33f8098cfb0f0d/nova/objects/fields.py#L884\n+ if n.TaskState == \"deleting\" {\n+ return false\n+ }\n+\n+ return true\n+}\n+\n+type StateExt struct {\n+ TaskState string `json:\"OS-EXT-STS:task_state\"`\n+ VMState string `json:\"OS-EXT-STS:vm_state\"`\n+ PowerState int `json:\"OS-EXT-STS:power_state\"`\n+}\n+\n+type ServerExt struct {\n+ servers.Server\n+ StateExt\n+}\n+\n+func (r *StateExt) UnmarshalJSON(b []byte) error {\n+ return nil\n}\nfunc NewClient(informers informers.SharedInformerFactory, authURL, username, password, domain, project, projectDomain string) Client {\n@@ -305,15 +361,15 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.No\nopts := servers.ListOpts{Name: prefix}\nservers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) {\n- serverList, err := servers.ExtractServers(page)\n+ serverList, err := ExtractServers(page)\nif err != nil {\nglog.V(5).Infof(\"Couldn't extract server %v\", err)\nreturn false, err\n}\nfor _, s := range serverList {\n- glog.V(5).Infof(\"Found node %v\", s.ID)\n- nodes = append(nodes, Node{ID: s.ID, Name: s.Name, Status: s.Status})\n+ node := Node{ID: s.ID, Name: s.Name, Status: s.Status, TaskState: s.TaskState, VMState: s.VMState}\n+ nodes = append(nodes, node)\n}\nreturn true, nil\n@@ -412,3 +468,9 @@ func (c *client) GetRegion() (string, error) {\nreturn region, nil\n}\n+\n+func ExtractServers(r pagination.Page) ([]ServerExt, error) {\n+ var s []ServerExt\n+ err := servers.ExtractServersInto(r, &s)\n+ return s, err\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -7,6 +7,7 @@ import (\n\"github.com/golang/glog\"\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"github.com/sapcc/kubernikus/pkg/client/openstack\"\n\"github.com/sapcc/kubernikus/pkg/templates\"\n\"k8s.io/apimachinery/pkg/labels\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n@@ -127,10 +128,10 @@ func (launchctl *LaunchControl) reconcile(key string) error {\nkluster := obj.(*v1.Kluster)\nglog.V(2).Infof(\"Handling kluster %v\", kluster.Name)\n- _, err = templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\n- if err != nil {\n- glog.Errorf(\"%v\", err)\n- }\n+ //_, err = templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\n+ //if err != nil {\n+ // glog.Errorf(\"%v\", err)\n+ //}\nfor _, pool := range kluster.Spec.NodePools {\nerr := launchctl.syncPool(kluster, &pool)\n@@ -148,15 +149,17 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn fmt.Errorf(\"Couldn't list nodes for %v/%v: %v\", kluster.Name, pool.Name, err)\n}\n+ ready := ready(nodes)\n+\nswitch {\n- case len(nodes) < pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n+ case ready < pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, ready, pool.Size)\nreturn launchctl.createNode(kluster, pool)\n- case len(nodes) > pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n+ case ready > pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, ready, pool.Size)\nreturn launchctl.terminateNode(kluster, nodes[0].ID)\n- case len(nodes) == pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, len(nodes), pool.Size)\n+ case ready == pool.Size:\n+ glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, ready, pool.Size)\n}\nreturn nil\n@@ -208,3 +211,14 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\nlaunchctl.queue.Forget(key)\nglog.Infof(\"Dropping kluster %q out of the queue. Too many retries: %v\", key, err)\n}\n+\n+func ready(nodes []openstack.Node) int {\n+ ready := 0\n+ for _, n := range nodes {\n+ if n.Ready() {\n+ ready = ready + 1\n+ }\n+ }\n+\n+ return ready\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds gophercloud extension to make sense of openstack server states (wtf?)
596,240
11.09.2017 12:59:11
-7,200
d5fe8c785c0cce6144c7976e6735de09c708b1ed
generates and adds a bootstrap token to apiserver/kubelets
[ { "change_type": "ADD", "old_path": null, "new_path": "charts/kube-master/templates/_token.csv.tpl", "diff": "+{{/* vim: set filetype=gotexttmpl: */ -}}\n+{{ required \"missing kubernikus.boostrapToken\" .Values.kubernikus.bootstrapToken }},kubelet-bootstrap,10001,\"system:bootstrappers\"\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -56,6 +56,12 @@ spec:\nitems:\n- key: openstack.config\npath: openstack.config\n+ - name: bootstrap\n+ secret:\n+ secretName: {{ include \"master.fullname\" . }}\n+ items:\n+ - key: token.csv\n+ path: token.csv\ncontainers:\n- name: apiserver\nimage: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n@@ -70,6 +76,7 @@ spec:\n- --cloud-config=/etc/kubernetes/cloudprovider/openstack.config\n- --cloud-provider=openstack\n- --experimental-bootstrap-token-auth=true\n+ - --token-auth-file=/etc/kubernetes/bootstrap/token.csv\n- --runtime-config=rbac.authorization.k8s.io/v1alpha1,extensions/v1beta1=true,extensions/v1beta1/thirdpartyresources=true\n- --service-cluster-ip-range={{ .Values.serviceCIDR }}\n#Cert Spratz\n@@ -91,5 +98,8 @@ spec:\n- mountPath: /etc/kubernetes/cloudprovider\nname: cloudprovider\nreadOnly: true\n+ - mountPath: /etc/kubernetes/bootstrap\n+ name: bootstrap\n+ readOnly: true\nresources:\n{{ toYaml .Values.api.resources | indent 12 }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/secrets.yaml", "new_path": "charts/kube-master/templates/secrets.yaml", "diff": "@@ -16,6 +16,9 @@ data:\nopenstack-domain-name: {{ .Values.openstack.domainName | b64enc }}\nopenstack-lb-subnet-id: {{ .Values.openstack.lbSubnetID | b64enc }}\nopenstack-router-id: {{ .Values.openstack.routerID | b64enc }}\n+\n+ token.csv: {{ include (print $.Template.BasePath \"/_token.csv.tpl\") . | b64enc }}\n+\n{{- if empty .Values.certsSecretName }}\n{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" }}\n{{ . }}: {{ required (printf \"missing cert/key: %s\" .) (index $.Values.certs .) | b64enc -}}\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/values.yaml", "new_path": "charts/kube-master/values.yaml", "diff": "@@ -17,6 +17,9 @@ openstack: {}\n#lbSubnetID:\n#routerID:\n+kubernikus: {}\n+ #bootstrapToken\n+\n# specify a different certsSecretName if you want to use\n# an exiting secret\n# certsSecretName:\n" }, { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -30,6 +30,7 @@ type OpenstackInfo struct {\ntype KubernikusInfo struct {\nServer string `json:\"server\"`\nServerURL string `json:\"serverURL\"`\n+ BootstrapToken string `json:\"bootstrapToken\"`\n}\ntype KlusterSpec struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -279,7 +279,8 @@ func (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\nfunc (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool {\nreturn kluster.Spec.KubernikusInfo.Server == \"\" ||\n- kluster.Spec.KubernikusInfo.ServerURL == \"\"\n+ kluster.Spec.KubernikusInfo.ServerURL == \"\" ||\n+ kluster.Spec.KubernikusInfo.BootstrapToken == \"\"\n}\nfunc (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\n@@ -300,6 +301,15 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\nglog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.ServerURL)\n}\n+ if copy.Spec.KubernikusInfo.BootstrapToken == \"\" {\n+ token, err := goutils.Random(16, 32, 127, true, true)\n+ if err != nil {\n+ return fmt.Errorf(\"Failed to generate bootstrap token: %s\", err)\n+ }\n+ copy.Spec.KubernikusInfo.BootstrapToken = strings.ToLower(token)\n+ glog.V(5).Infof(\"[%v] Setting bootstrap token to %v\", kluster.Name, copy.Spec.KubernikusInfo.BootstrapToken)\n+ }\n+\n_, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy)\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/cluster.go", "new_path": "pkg/controller/ground/cluster.go", "diff": "@@ -8,6 +8,7 @@ type Cluster struct {\nCertificates *Certificates `yaml:\"certs\"`\nAPI API `yaml:\"api,omitempty\"`\nOpenStack OpenStack\n+ Kubernikus Kubernikus\n}\ntype API struct {\n@@ -26,6 +27,10 @@ type OpenStack struct {\nRouterID string `yaml:\"routerID,omitempty\"`\n}\n+type Kubernikus struct {\n+ BootstrapToken string `yaml:\"bootstrapToken,omitempty\"`\n+}\n+\nfunc NewCluster(kluster *v1.Kluster, authURL string) (*Cluster, error) {\ncluster := &Cluster{\nCertificates: &Certificates{},\n@@ -41,6 +46,9 @@ func NewCluster(kluster *v1.Kluster, authURL string) (*Cluster, error) {\nLBSubnetID: kluster.Spec.OpenstackInfo.LBSubnetID,\nRouterID: kluster.Spec.OpenstackInfo.RouterID,\n},\n+ Kubernikus: Kubernikus{\n+ BootstrapToken: kluster.Spec.KubernikusInfo.BootstrapToken,\n+ },\n}\nif err := cluster.Certificates.populateForSatellite(kluster.Spec.Name, kluster.Spec.KubernikusInfo.Server); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -54,10 +54,10 @@ systemd:\nExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n- --kubeconfig=/etc/kubernetes/kubeconfig \\\n--cloud-config=/etc/kubernetes/openstack/openstack.config \\\n--cloud-provider=openstack \\\n--require-kubeconfig \\\n+ --bootstrap-kubeconfig=/etc/kubernetes/bootstrap/kubeconfig \\\n--network-plugin=kubenet \\\n--lock-file=/var/run/lock/kubelet.lock \\\n--exit-on-lock-contention \\\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
generates and adds a bootstrap token to apiserver/kubelets
596,240
11.09.2017 14:39:57
-7,200
be74215d74edd26db9ef5572a306f51a6afa1823
ensures kluster TPR is registered
[ { "change_type": "MODIFY", "old_path": "pkg/client/kubernetes/client.go", "new_path": "pkg/client/kubernetes/client.go", "diff": "package kubernetes\nimport (\n+ \"time\"\n+\n\"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ apierrors \"k8s.io/apimachinery/pkg/api/errors\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ \"k8s.io/apimachinery/pkg/util/wait\"\n\"k8s.io/client-go/kubernetes\"\n+ \"k8s.io/client-go/pkg/apis/extensions/v1beta1\"\n\"k8s.io/client-go/rest\"\n\"k8s.io/client-go/tools/clientcmd\"\nclientcmdapiv1 \"k8s.io/client-go/tools/clientcmd/api/v1\"\n@@ -37,6 +44,14 @@ func NewClient(kubeconfig string) (kubernetes.Interface, error) {\nglog.V(3).Infof(\"Using Kubernetes Api at %s\", config.Host)\n+ if err := ensureTPR(clientset); err != nil {\n+ return nil, err\n+ }\n+\n+ if err := waitForTPR(clientset); err != nil {\n+ return nil, err\n+ }\n+\nreturn clientset, nil\n}\n@@ -74,3 +89,34 @@ func NewClientConfigV1(name, user, url string, key, cert, ca []byte) clientcmdap\n},\n}\n}\n+\n+func ensureTPR(clientset kubernetes.Interface) error {\n+ tpr := &v1beta1.ThirdPartyResource{\n+ ObjectMeta: metav1.ObjectMeta{\n+ Name: \"kluster.\" + v1.GroupName,\n+ },\n+ Versions: []v1beta1.APIVersion{\n+ {Name: v1.SchemeGroupVersion.Version},\n+ },\n+ Description: \"Managed kubernetes cluster\",\n+ }\n+\n+ _, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)\n+ if err != nil && !apierrors.IsAlreadyExists(err) {\n+ return err\n+ }\n+ return nil\n+}\n+\n+func waitForTPR(clientset kubernetes.Interface) error {\n+ return wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {\n+ _, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Get(\"kluster.\"+v1.GroupName, metav1.GetOptions{})\n+ if err == nil {\n+ return true, nil\n+ }\n+ if apierrors.IsNotFound(err) {\n+ return false, nil\n+ }\n+ return false, err\n+ })\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
ensures kluster TPR is registered
596,240
11.09.2017 17:59:34
-7,200
08ad2ee258faf3205a555192bfac0d7ce4098524
adds scheduling nodes to be healthy also
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -88,7 +88,7 @@ func (n *Node) Ready() bool {\n// 6: CRASHED\n// 7: SUSPENDED\nif n.PowerState != 1 {\n- if n.TaskState != \"spawning\" {\n+ if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" {\nreturn true\n}\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds scheduling nodes to be healthy also
596,240
11.09.2017 18:00:36
-7,200
84b02a349e9a0afdfdd2b31269b925984430effd
use bootstrap token for inital authentication
[ { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -32,10 +32,12 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\n}\ndata := struct {\n+ TLSCA string\nApiserverClientsCA string\nApiserverNodesCA string\nApiserverNodesCAKkey string\nApiserverURL string\n+ BootstrapToken string\nOpenstackAuthURL string\nOpenstackUsername string\nOpenstackPassword string\n@@ -44,10 +46,12 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\nOpenstackLBSubnetID string\nOpenstackRouterID string\n}{\n+ TLSCA: string(secret.Data[\"tls-ca.pem\"]),\nApiserverClientsCA: string(secret.Data[\"apiserver-clients-ca.pem\"]),\nApiserverNodesCA: string(secret.Data[\"apiserver-nodes-ca.pem\"]),\nApiserverNodesCAKkey: string(secret.Data[\"apiserver-nodes-ca-key.pem\"]),\nApiserverURL: kluster.Spec.KubernikusInfo.ServerURL,\n+ BootstrapToken: kluster.Spec.KubernikusInfo.BootstrapToken,\nOpenstackAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\nOpenstackUsername: kluster.Spec.OpenstackInfo.Username,\nOpenstackPassword: kluster.Spec.OpenstackInfo.Password,\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -81,19 +81,13 @@ storage:\ncontents:\ninline: |-\n{{ .ApiserverClientsCA | indent 10 }}\n- - path: /etc/kubernetes/certs/kube-clients/nodes.pem\n+ - path: /etc/kubernetes/certs/tls-ca.pem\nfilesystem: root\nmode: 0644\ncontents:\ninline: |-\n-{{ .ApiserverNodesCA | indent 10 }}\n- - path: /etc/kubernetes/certs/kube-clients/nodes-key.pem\n- filesystem: root\n- mode: 0644\n- contents:\n- inline: |-\n-{{ .ApiserverNodesCAKkey | indent 10 }}\n- - path: /etc/kubernetes/kubeconfig\n+{{ .TLSCA | indent 10 }}\n+ - path: /etc/kubernetes/bootstrap/kubeconfig\nfilesystem: root\nmode: 0644\ncontents:\n@@ -103,6 +97,7 @@ storage:\nclusters:\n- name: local\ncluster:\n+ certificate-authority: /etc/kubernetes/certs/tls-ca.pem\nserver: {{ .ApiserverURL }}\ncontexts:\n- name: local\n@@ -113,8 +108,7 @@ storage:\nusers:\n- name: local\nuser:\n- client-certificate: /etc/kubernetes/certs/kube-clients/nodes.pem\n- client-key: /etc/kubernetes/certs/kube-clients/nodes-key.pem\n+ token: {{ .BootstrapToken }}\n- path: /etc/kubernetes/openstack/openstack.config\nfilesystem: root\nmode: 0644\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
use bootstrap token for inital authentication
596,240
11.09.2017 19:44:37
-7,200
fe77c3def26fb9fea620e4f1f0827bcda0230c14
add networking state as beign as well
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -88,7 +88,7 @@ func (n *Node) Ready() bool {\n// 6: CRASHED\n// 7: SUSPENDED\nif n.PowerState != 1 {\n- if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" {\n+ if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" || n.TaskState != \"networking\" {\nreturn true\n}\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add networking state as beign as well
596,240
11.09.2017 19:54:03
-7,200
ebcbab7b13efa3aa2505658398161ae7757f125c
negate all logic for ready state detection
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -87,10 +87,14 @@ func (n *Node) Ready() bool {\n// 4: SHUTDOWN\n// 6: CRASHED\n// 7: SUSPENDED\n- if n.PowerState != 1 {\n- if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" || n.TaskState != \"networking\" {\n- return true\n+ if n.PowerState == 0 {\n+ if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" || n.TaskState != \"networking\" || n.TaskState != \"block_device_mapping\" {\n+ return false\n+ }\n}\n+\n+ if n.PowerState > 1 {\n+ return false\n}\n//ACTIVE = 'active'\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
negate all logic for ready state detection
596,240
12.09.2017 14:59:59
-7,200
2b09825f384d7005a2098fc70103b50e6d784296
naturally we need the clients CA here
[ { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -33,9 +33,7 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\ndata := struct {\nTLSCA string\n- ApiserverClientsCA string\n- ApiserverNodesCA string\n- ApiserverNodesCAKkey string\n+ KubeletClientsCA string\nApiserverURL string\nBootstrapToken string\nOpenstackAuthURL string\n@@ -47,9 +45,7 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\nOpenstackRouterID string\n}{\nTLSCA: string(secret.Data[\"tls-ca.pem\"]),\n- ApiserverClientsCA: string(secret.Data[\"apiserver-clients-ca.pem\"]),\n- ApiserverNodesCA: string(secret.Data[\"apiserver-nodes-ca.pem\"]),\n- ApiserverNodesCAKkey: string(secret.Data[\"apiserver-nodes-ca-key.pem\"]),\n+ KubeletClientsCA: string(secret.Data[\"kubelet-clients-ca.pem\"]),\nApiserverURL: kluster.Spec.KubernikusInfo.ServerURL,\nBootstrapToken: kluster.Spec.KubernikusInfo.BootstrapToken,\nOpenstackAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -64,7 +64,7 @@ systemd:\n--pod-manifest-path=/etc/kubernetes/manifests \\\n--allow-privileged \\\n--cluster_domain=cluster.local \\\n- --client-ca-file=/etc/kubernetes/certs/kube-clients/ca.pem \\\n+ --client-ca-file=/etc/kubernetes/certs/kubelet-clients-ca.pem \\\n--anonymous-auth=false\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid\nRestart=always\n@@ -75,12 +75,12 @@ systemd:\nstorage:\nfiles:\n- - path: /etc/kubernetes/certs/kube-clients/ca.pem\n+ - path: /etc/kubernetes/certs/kubelet-clients-ca.pem\nfilesystem: root\nmode: 0644\ncontents:\ninline: |-\n-{{ .ApiserverClientsCA | indent 10 }}\n+{{ .KubeletClientsCA | indent 10 }}\n- path: /etc/kubernetes/certs/tls-ca.pem\nfilesystem: root\nmode: 0644\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
naturally we need the clients CA here
596,240
12.09.2017 15:00:39
-7,200
fe7ef38cb1f98e5bd377a8867cde1b1303933670
this needs to be without the apiserver prefix. it is not included in the generated certs
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/service.yaml", "new_path": "charts/kube-master/templates/service.yaml", "diff": "apiVersion: v1\nkind: Service\nmetadata:\n- name: {{ include \"master.fullname\" . }}-apiserver\n+ name: {{ include \"master.fullname\" . }}\nlabels:\nchart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\nrelease: {{ .Release.Name }}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
this needs to be without the apiserver prefix. it is not included in the generated certs
596,240
12.09.2017 15:01:11
-7,200
50e9e24e2cc057bf526219a0a8e98c951a6c7128
matches service name without -apiserver
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/ingress.yaml", "new_path": "charts/kube-master/templates/ingress.yaml", "diff": "@@ -17,6 +17,6 @@ spec:\npaths:\n- path: /\nbackend:\n- serviceName: {{ include \"master.fullname\" . }}-apiserver\n+ serviceName: {{ include \"master.fullname\" . }}\nservicePort: 6443\n{{- end }}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
matches service name without -apiserver
596,240
12.09.2017 15:06:49
-7,200
9a66400bd3ac183b1bc1985e5584c5a76ecf4531
uses ingress fqdn. hack until we have this figured out correctly
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/configmap.yaml", "new_path": "charts/kube-master/templates/configmap.yaml", "diff": "@@ -14,7 +14,7 @@ data:\n- name: local\ncluster:\ncertificate-authority: /etc/kubernetes/certs/tls-ca.pem\n- server: https://{{ include \"master.fullname\" . }}-apiserver:6443\n+ server: https://{{ .Values.api.ingressHost }}\ncontexts:\n- name: local\ncontext:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
uses ingress fqdn. hack until we have this figured out correctly
596,240
12.09.2017 15:28:49
-7,200
2cd7478b72a6482c097eeff8868435fd6568373d
do not verify kubelets
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -38,8 +38,6 @@ spec:\npath: etcd-clients-apiserver.pem\n- key: etcd-clients-apiserver-key.pem\npath: etcd-clients-apiserver-key.pem\n- - key: kubelet-clients-ca.pem\n- path: kubelet-clients-ca.pem\n- key: kubelet-clients-apiserver.pem\npath: kubelet-clients-apiserver.pem\n- key: kubelet-clients-apiserver-key.pem\n@@ -84,7 +82,6 @@ spec:\n- --etcd-cafile=/etc/kubernetes/certs/etcd-clients-ca.pem\n- --etcd-certfile=/etc/kubernetes/certs/etcd-clients-apiserver.pem\n- --etcd-keyfile=/etc/kubernetes/certs/etcd-clients-apiserver-key.pem\n- - --kubelet-certificate-authority=/etc/kubernetes/certs/kubelet-clients-ca.pem\n- --kubelet-client-certificate=/etc/kubernetes/certs/kubelet-clients-apiserver.pem\n- --kubelet-client-key=/etc/kubernetes/certs/kubelet-clients-apiserver-key.pem\n- --tls-ca-file=/etc/kubernetes/certs/tls-ca.pem\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
do not verify kubelets
596,240
12.09.2017 16:14:10
-7,200
18d53374d237c44ae3ba356370a119646934e1d7
ok, this is it. this is the state sieve that works. maybe. hopefully
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -81,16 +81,20 @@ type Node struct {\n}\nfunc (n *Node) Ready() bool {\n+ glog.V(6).Infof(\"[%v] PowerState: %v, VMState: %v, TaskState: %v\", n.Name, n.PowerState, n.VMState, n.TaskState)\n// 0: NOSTATE\n// 1: RUNNING\n// 3: PAUSED\n// 4: SHUTDOWN\n// 6: CRASHED\n// 7: SUSPENDED\n- if n.PowerState == 0 {\n- if n.TaskState != \"spawning\" || n.TaskState != \"scheduling\" || n.TaskState != \"networking\" || n.TaskState != \"block_device_mapping\" {\n- return false\n+ if n.TaskState == \"spawning\" || n.TaskState == \"scheduling\" || n.TaskState == \"networking\" || n.TaskState == \"block_device_mapping\" {\n+ return true\n}\n+\n+ // https://github.com/openstack/nova/blob/be3a66781f7fd58e5c5c0fe89b33f8098cfb0f0d/nova/objects/fields.py#L884\n+ if n.TaskState == \"deleting\" {\n+ return false\n}\nif n.PowerState > 1 {\n@@ -110,12 +114,7 @@ func (n *Node) Ready() bool {\n//SHELVED = 'shelved'\n//SHELVED_OFFLOADED = 'shelved_offloaded'\n- if n.VMState != \"active\" || n.VMState != \"building\" {\n- return false\n- }\n-\n- // https://github.com/openstack/nova/blob/be3a66781f7fd58e5c5c0fe89b33f8098cfb0f0d/nova/objects/fields.py#L884\n- if n.TaskState == \"deleting\" {\n+ if !(n.VMState == \"active\" || n.VMState == \"building\") {\nreturn false\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
ok, this is it. this is the state sieve that works. maybe. hopefully
596,240
12.09.2017 16:14:34
-7,200
e6fee5683c0a36e46bdcd86f78e1731d890872a7
tunes logging down
[ { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -63,8 +63,8 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\nreturn nil, err\n}\n- glog.V(5).Infof(\"IgnitionData: %v\", data)\n- glog.V(5).Infof(\"IgnitionYAML: %v\", string(buffer.Bytes()))\n+ glog.V(6).Infof(\"IgnitionData: %v\", data)\n+ glog.V(6).Infof(\"IgnitionYAML: %v\", string(buffer.Bytes()))\nignitionConfig, ast, report := config.Parse(buffer.Bytes())\nif len(report.Entries) > 0 {\n@@ -86,7 +86,7 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\ndataOut, err = json.MarshalIndent(&ignitionConfig2_0, \"\", \" \")\ndataOut = append(dataOut, '\\n')\n- glog.V(5).Infof(\"IgnitionJSON: %v\", string(dataOut))\n+ glog.V(6).Infof(\"IgnitionJSON: %v\", string(dataOut))\nif err != nil {\nreturn nil, err\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
tunes logging down
596,240
12.09.2017 17:03:35
-7,200
7b0aa6afd7be7e9aff963874e825c676d86d18c8
adds delete node function
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -42,6 +42,7 @@ type client struct {\ntype Client interface {\nCreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool, []byte) (string, error)\n+ DeleteNode(*kubernikus_v1.Kluster, string) error\nGetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\nGetProject(id string) (*Project, error)\nGetRegion() (string, error)\n@@ -412,6 +413,26 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nreturn server.ID, nil\n}\n+func (c *client) DeleteNode(kluster *kubernikus_v1.Kluster, ID string) error {\n+ provider, err := c.projectProviderFor(kluster)\n+ if err != nil {\n+ return err\n+ }\n+\n+ client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return err\n+ }\n+\n+ err = servers.Delete(client, ID).ExtractErr()\n+ if err != nil {\n+ glog.V(5).Infof(\"Couldn't delete node %v: %v\", kluster.Name, err)\n+ return err\n+ }\n+\n+ return nil\n+}\n+\nfunc (c *client) GetRegion() (string, error) {\nprovider, err := c.domainProvider()\nif err != nil {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds delete node function
596,240
12.09.2017 17:03:57
-7,200
400a1845fcf135570fe049807128c5ca02af3c1e
delete clusters. make state aware
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -121,17 +121,16 @@ func (launchctl *LaunchControl) reconcile(key string) error {\nreturn fmt.Errorf(\"Failed to fetch key %s from cache: %s\", key, err)\n}\nif !exists {\n- glog.Infof(\"Kluster %s deleted in the meantime\", key)\n+ glog.Infof(\"[%v] Kluster deleted in the meantime\", key)\nreturn nil\n}\nkluster := obj.(*v1.Kluster)\n- glog.V(2).Infof(\"Handling kluster %v\", kluster.Name)\n+ glog.V(5).Infof(\"[%v] Reconciling\", kluster.Name)\n- //_, err = templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\n- //if err != nil {\n- // glog.Errorf(\"%v\", err)\n- //}\n+ if !(kluster.Status.State == v1.KlusterReady || kluster.Status.State == v1.KlusterTerminating) {\n+ return fmt.Errorf(\"[%v] Kluster is not yet ready. Requeuing.\", kluster.Name)\n+ }\nfor _, pool := range kluster.Spec.NodePools {\nerr := launchctl.syncPool(kluster, &pool)\n@@ -146,27 +145,39 @@ func (launchctl *LaunchControl) reconcile(key string) error {\nfunc (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool) error {\nnodes, err := launchctl.Clients.Openstack.GetNodes(kluster, pool)\nif err != nil {\n- return fmt.Errorf(\"Couldn't list nodes for %v/%v: %v\", kluster.Name, pool.Name, err)\n+ return fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n+ }\n+\n+ if kluster.Status.State == v1.KlusterTerminating && toBeTerminated(nodes) > 0 {\n+ glog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\n+ for _, node := range nodes {\n+ err := launchctl.terminateNode(kluster, node.ID)\n+ if err != nil {\n+ return err\n+ }\n+ }\n+\n+ return nil\n}\nready := ready(nodes)\nswitch {\ncase ready < pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, ready, pool.Size)\nreturn launchctl.createNode(kluster, pool)\ncase ready > pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, ready, pool.Size)\nreturn launchctl.terminateNode(kluster, nodes[0].ID)\ncase ready == pool.Size:\n- glog.V(3).Infof(\"Pool %v/%v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, ready, pool.Size)\n}\nreturn nil\n}\nfunc (launchctl *LaunchControl) createNode(kluster *v1.Kluster, pool *v1.NodePool) error {\n- glog.V(2).Infof(\"Pool %v/%v: Creating new node\", kluster.Name, pool.Name)\n+ glog.V(2).Infof(\"[%v] Pool %v: Creating new node\", kluster.Name, pool.Name)\nuserdata, err := templates.Ignition.GenerateNode(kluster, launchctl.Clients.Kubernetes)\nif err != nil {\n@@ -178,13 +189,18 @@ func (launchctl *LaunchControl) createNode(kluster *v1.Kluster, pool *v1.NodePoo\nreturn err\n}\n- glog.V(2).Infof(\"Pool %v/%v: Created node %v.\", kluster.Name, pool.Name, id)\n+ glog.V(2).Infof(\"[%v]Pool %v: Created node %v.\", kluster.Name, pool.Name, id)\nlaunchctl.requeue(kluster)\nreturn nil\n}\nfunc (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, id string) error {\n+ err := launchctl.Clients.Openstack.DeleteNode(kluster, id)\n+ if err != nil {\n+ return err\n+ }\n+\nlaunchctl.requeue(kluster)\nreturn nil\n}\n@@ -200,7 +216,7 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\n// This controller retries 5 times if something goes wrong. After that, it stops trying.\nif launchctl.queue.NumRequeues(key) < 5 {\n- glog.Errorf(\"Error while managing nodes for kluster %q: %v\", key, err)\n+ glog.V(6).Infof(\"Error while managing nodes for kluster %q: %v\", key, err)\n// Re-enqueue the key rate limited. Based on the rate limiter on the\n// queue and the re-enqueue history, the key will be processed later again.\n@@ -209,7 +225,7 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\n}\nlaunchctl.queue.Forget(key)\n- glog.Infof(\"Dropping kluster %q out of the queue. Too many retries: %v\", key, err)\n+ glog.V(5).Infof(\"[%v] Dropping out of the queue. Too many retries...\", key)\n}\nfunc ready(nodes []openstack.Node) int {\n@@ -222,3 +238,16 @@ func ready(nodes []openstack.Node) int {\nreturn ready\n}\n+\n+func toBeTerminated(nodes []openstack.Node) int {\n+ toBeTerminated := 0\n+ for _, n := range nodes {\n+ if n.TaskState == \"deleting\" {\n+ continue\n+ }\n+\n+ toBeTerminated = toBeTerminated + 1\n+ }\n+\n+ return toBeTerminated\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
delete clusters. make state aware
596,240
12.09.2017 17:07:41
-7,200
2f09d6758609f3628fb5f1ee350c1f55a955d3ca
exit early before more nodes are being spawned
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -148,7 +148,8 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n}\n- if kluster.Status.State == v1.KlusterTerminating && toBeTerminated(nodes) > 0 {\n+ if kluster.Status.State == v1.KlusterTerminating {\n+ if toBeTerminated(nodes) > 0 {\nglog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\nfor _, node := range nodes {\nerr := launchctl.terminateNode(kluster, node.ID)\n@@ -156,6 +157,7 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn err\n}\n}\n+ }\nreturn nil\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
exit early before more nodes are being spawned
596,240
12.09.2017 18:40:27
-7,200
90eedb68496a9972e6709a17b6fbe007de22112e
enables Node authorizers. this puts kubelets into the system:nodes group
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -70,7 +70,7 @@ spec:\n- --secure-port=6443\n- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota\n- --allow-privileged=true\n- - --authorization-mode=RBAC\n+ - --authorization-mode=Node,RBAC\n- --cloud-config=/etc/kubernetes/cloudprovider/openstack.config\n- --cloud-provider=openstack\n- --experimental-bootstrap-token-auth=true\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
enables Node authorizers. this puts kubelets into the system:nodes group
596,240
12.09.2017 18:41:10
-7,200
9771330a5d52c258f58b5a52a80688d9688660d9
looks like this needs to be on after all
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/controller-manager.yaml", "new_path": "charts/kube-master/templates/controller-manager.yaml", "diff": "@@ -70,6 +70,7 @@ spec:\n- --cluster-signing-cert-file=/etc/kubernetes/certs/apiserver-nodes-ca.pem\n- --cluster-signing-key-file=/etc/kubernetes/certs/apiserver-nodes-ca-key.pem\n- --configure-cloud-routes=true\n+ - --controllers=*,bootstrapsigner,tokencleaner\n- --kubeconfig=/etc/kubernetes/config/kubeconfig\n- --leader-elect=false\n- --root-ca-file=/etc/kubernetes/certs/apiserver-clients-ca.pem\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
looks like this needs to be on after all
596,240
12.09.2017 18:42:18
-7,200
16b35f1701b7413b93308f986e620586307bcf45
gargrgl. with certain chars like `\` in the password we get authentication errors. something is interpreting this. helm i'm looking at you
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -414,7 +414,7 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\nif copy.Spec.OpenstackInfo.Password == \"\" {\nglog.V(5).Infof(\"[%v] Setting Password to %v\", kluster.Name, \"[redacted]\")\n- if copy.Spec.OpenstackInfo.Password, err = goutils.RandomAscii(20); err != nil {\n+ if copy.Spec.OpenstackInfo.Password, err = goutils.Random(20, 32, 127, true, true); err != nil {\nreturn fmt.Errorf(\"Failed to generate password: %s\", err)\n}\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
gargrgl. with certain chars like `\` in the password we get authentication errors. something is interpreting this. helm i'm looking at you
596,240
12.09.2017 21:56:32
-7,200
ff00a76e39784e522309b8914ea1ac744e1624c7
seed cluster roles for kubelet bootstrap
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -423,6 +423,21 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error {\nreturn err\n}\n+func (op *GroundControl) seedClusterRoles(kluster *v1.Kluster) error {\n+ glog.V(5).Infof(\"[%v] Seeding ClusterRoles and ClusterRoleBindings\", kluster.Name)\n+ //client := op.Clients.KubernetesFor(kluster)\n+\n+ //if err := ground.SeedAllowBootstrapTokensToPostCSRs(client); err != nil {\n+ // return err\n+ //}\n+\n+ //if err := ground.SeedAutoApproveNodeBootstrapTokens(client); err != nil {\n+ // return err\n+ //}\n+\n+ return nil\n+}\n+\nfunc (op *GroundControl) podAdd(obj interface{}) {\npod := obj.(*api_v1.Pod)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/ground/bootstrap.go", "diff": "+package ground\n+\n+import (\n+ \"fmt\"\n+\n+ apierrors \"k8s.io/apimachinery/pkg/api/errors\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ clientset \"k8s.io/client-go/kubernetes\"\n+ rbac \"k8s.io/client-go/pkg/apis/rbac/v1beta1\"\n+)\n+\n+func SeedAllowBootstrapTokensToPostCSRs(client clientset.Interface) error {\n+ return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ ObjectMeta: metav1.ObjectMeta{\n+ Name: \"kubernikus:kubelet-bootstrap\",\n+ },\n+ RoleRef: rbac.RoleRef{\n+ APIGroup: rbac.GroupName,\n+ Kind: \"ClusterRole\",\n+ Name: \"system:node-bootstrapper\",\n+ },\n+ Subjects: []rbac.Subject{\n+ {\n+ Kind: rbac.GroupKind,\n+ Name: \"system:bootstrappers\",\n+ },\n+ },\n+ })\n+}\n+\n+func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {\n+ err := CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\n+ ObjectMeta: metav1.ObjectMeta{\n+ Name: \"kubernikus:approve-node-client-csr\",\n+ },\n+ Rules: []rbac.PolicyRule{\n+ rbac.NewRule(\"create\").Groups(\"certificates.k8s.io\").Resources(\"certificatesigningrequests/nodeclient\").RuleOrDie(),\n+ },\n+ })\n+ if err != nil {\n+ return err\n+ }\n+\n+ return CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n+ ObjectMeta: metav1.ObjectMeta{\n+ Name: \"kubernikus:node-client-csr-autoapprove\",\n+ },\n+ RoleRef: rbac.RoleRef{\n+ APIGroup: rbac.GroupName,\n+ Kind: \"ClusterRole\",\n+ Name: \"kubernikus:kubelet-bootstrap\",\n+ },\n+ Subjects: []rbac.Subject{\n+ {\n+ Kind: \"Group\",\n+ Name: \"system:bootstrappers\",\n+ },\n+ },\n+ })\n+}\n+\n+func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {\n+ if _, err := client.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create RBAC clusterrolebinding: %v\", err)\n+ }\n+\n+ if _, err := client.RbacV1beta1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil {\n+ return fmt.Errorf(\"unable to update RBAC clusterrolebinding: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n+func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {\n+ if _, err := client.RbacV1beta1().ClusterRoles().Create(clusterRole); err != nil {\n+ if !apierrors.IsAlreadyExists(err) {\n+ return fmt.Errorf(\"unable to create RBAC clusterrole: %v\", err)\n+ }\n+\n+ if _, err := client.RbacV1beta1().ClusterRoles().Update(clusterRole); err != nil {\n+ return fmt.Errorf(\"unable to update RBAC clusterrole: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
seed cluster roles for kubelet bootstrap
596,240
13.09.2017 11:14:38
-7,200
76643ff4ec90f8bbf7afb54da06442bec772d03d
use rest client to delete TPR. fixes
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -287,7 +287,12 @@ func (op *GroundControl) terminateKluster(tpr *v1.Kluster) error {\nreturn err\n}\n- return op.Clients.Kubernikus.Kubernikus().Klusters(tpr.Namespace).Delete(tpr.Name, &metav1.DeleteOptions{})\n+ return op.Clients.Kubernikus.Discovery().RESTClient().Delete().AbsPath(\"apis/kubernikus.sap.cc/v1\").\n+ Namespace(tpr.Namespace).\n+ Resource(\"klusters\").\n+ Name(tpr.Name).\n+ Do().\n+ Error()\n}\nfunc (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
use rest client to delete TPR. fixes #36
596,240
13.09.2017 17:26:44
-7,200
dab0d650c113096a772e492a9fdd89a8b5e3244c
crud operations for kluster/spec/nodepools. fixes
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -23,6 +23,17 @@ type createCluster struct {\nfunc (d *createCluster) Handle(params operations.CreateClusterParams, principal *models.Principal) middleware.Responder {\nname := *params.Body.Name\n+\n+ nodePools := make([]v1.NodePool, len(params.Body.Spec.NodePools))\n+ for _, pPool := range params.Body.Spec.NodePools {\n+ nodePools = append(nodePools, v1.NodePool{\n+ Name: pPool.Name,\n+ Size: int(pPool.Size),\n+ Flavor: pPool.Flavor,\n+ Image: pPool.Image,\n+ })\n+ }\n+\nkluster := &v1.Kluster{\nObjectMeta: metav1.ObjectMeta{\nName: fmt.Sprintf(\"%s-%s\", name, principal.Account),\n@@ -31,7 +42,7 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\n},\nSpec: v1.KlusterSpec{\nName: name,\n- NodePools: []v1.NodePool{},\n+ NodePools: nodePools,\n},\nStatus: v1.KlusterStatus{\nState: v1.KlusterPending,\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/update_cluster.go", "new_path": "pkg/api/handlers/update_cluster.go", "diff": "@@ -21,7 +21,42 @@ type updateCluster struct {\nfunc (d *updateCluster) Handle(params operations.UpdateClusterParams, principal *models.Principal) middleware.Responder {\nkluster, err := editCluster(d.Kubernikus.Kubernikus().Klusters(d.Namespace), principal, params.Name, func(kluster *v1.Kluster) {\n- //TODO: currently no field to update\n+ // Update Sizes\n+ for _, pPool := range params.Body.Spec.NodePools {\n+ isNewPool := true\n+\n+ for _, kPool := range kluster.Spec.NodePools {\n+ if pPool.Name == kPool.Name {\n+ kPool.Size = int(pPool.Size)\n+ isNewPool = false\n+ }\n+ }\n+\n+ if isNewPool {\n+ kluster.Spec.NodePools = append(kluster.Spec.NodePools, v1.NodePool{\n+ Name: pPool.Name,\n+ Size: int(pPool.Size),\n+ Flavor: pPool.Flavor,\n+ Image: pPool.Image,\n+ })\n+ }\n+ }\n+\n+ for i, kPool := range kluster.Spec.NodePools {\n+ isDeleted := true\n+ for _, pPool := range params.Body.Spec.NodePools {\n+ if pPool.Name == kPool.Name {\n+ isDeleted = false\n+ break\n+ }\n+ }\n+ if isDeleted {\n+ // wtf? I want my ruby back...\n+ kluster.Spec.NodePools[i] = kluster.Spec.NodePools[len(kluster.Spec.NodePools)-1]\n+ kluster.Spec.NodePools = kluster.Spec.NodePools[:len(kluster.Spec.NodePools)-1]\n+ }\n+ }\n+\n})\nif err != nil {\nif apierrors.IsNotFound(err) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -40,6 +40,25 @@ func editCluster(client kubernikusv1.KlusterInterface, principal *models.Princip\n}\n+func ClusterSpecNodePoolItems(k *v1.Kluster) []*models.ClusterSpecNodePoolsItems0 {\n+ items := make([]*models.ClusterSpecNodePoolsItems0, int64(len(k.Spec.NodePools)))\n+ for i, nodePool := range k.Spec.NodePools {\n+ items[i] = &models.ClusterSpecNodePoolsItems0{\n+ Name: nodePool.Name,\n+ Image: nodePool.Image,\n+ Flavor: nodePool.Flavor,\n+ Size: int64(nodePool.Size),\n+ }\n+ }\n+ return items\n+}\n+\nfunc clusterModelFromTPR(k *v1.Kluster) *models.Cluster {\n- return &models.Cluster{Name: swag.String(k.Spec.Name), Status: string(k.Status.State)}\n+ return &models.Cluster{\n+ Name: swag.String(k.Spec.Name),\n+ Spec: &models.ClusterSpec{\n+ NodePools: ClusterSpecNodePoolItems(k),\n+ },\n+ Status: string(k.Status.State),\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/models/cluster.go", "new_path": "pkg/api/models/cluster.go", "diff": "@@ -6,6 +6,8 @@ package models\n// Editing this file might prove futile when you re-run the swagger generate command\nimport (\n+ \"strconv\"\n+\nstrfmt \"github.com/go-openapi/strfmt\"\n\"github.com/go-openapi/errors\"\n@@ -22,6 +24,9 @@ type Cluster struct {\n// Pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$\nName *string `json:\"name\"`\n+ // spec\n+ Spec *ClusterSpec `json:\"spec,omitempty\"`\n+\n// status of the cluster\nStatus string `json:\"status,omitempty\"`\n}\n@@ -35,6 +40,11 @@ func (m *Cluster) Validate(formats strfmt.Registry) error {\nres = append(res, err)\n}\n+ if err := m.validateSpec(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif len(res) > 0 {\nreturn errors.CompositeValidationError(res...)\n}\n@@ -54,6 +64,25 @@ func (m *Cluster) validateName(formats strfmt.Registry) error {\nreturn nil\n}\n+func (m *Cluster) validateSpec(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.Spec) { // not required\n+ return nil\n+ }\n+\n+ if m.Spec != nil {\n+\n+ if err := m.Spec.Validate(formats); err != nil {\n+ if ve, ok := err.(*errors.Validation); ok {\n+ return ve.ValidateName(\"spec\")\n+ }\n+ return err\n+ }\n+ }\n+\n+ return nil\n+}\n+\n// MarshalBinary interface implementation\nfunc (m *Cluster) MarshalBinary() ([]byte, error) {\nif m == nil {\n@@ -71,3 +100,135 @@ func (m *Cluster) UnmarshalBinary(b []byte) error {\n*m = res\nreturn nil\n}\n+\n+// ClusterSpec cluster spec\n+// swagger:model ClusterSpec\n+type ClusterSpec struct {\n+\n+ // node pools\n+ NodePools []*ClusterSpecNodePoolsItems0 `json:\"nodePools\"`\n+}\n+\n+// Validate validates this cluster spec\n+func (m *ClusterSpec) Validate(formats strfmt.Registry) error {\n+ var res []error\n+\n+ if err := m.validateNodePools(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if len(res) > 0 {\n+ return errors.CompositeValidationError(res...)\n+ }\n+ return nil\n+}\n+\n+func (m *ClusterSpec) validateNodePools(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.NodePools) { // not required\n+ return nil\n+ }\n+\n+ for i := 0; i < len(m.NodePools); i++ {\n+\n+ if swag.IsZero(m.NodePools[i]) { // not required\n+ continue\n+ }\n+\n+ if m.NodePools[i] != nil {\n+\n+ if err := m.NodePools[i].Validate(formats); err != nil {\n+ if ve, ok := err.(*errors.Validation); ok {\n+ return ve.ValidateName(\"spec\" + \".\" + \"nodePools\" + \".\" + strconv.Itoa(i))\n+ }\n+ return err\n+ }\n+ }\n+\n+ }\n+\n+ return nil\n+}\n+\n+// MarshalBinary interface implementation\n+func (m *ClusterSpec) MarshalBinary() ([]byte, error) {\n+ if m == nil {\n+ return nil, nil\n+ }\n+ return swag.WriteJSON(m)\n+}\n+\n+// UnmarshalBinary interface implementation\n+func (m *ClusterSpec) UnmarshalBinary(b []byte) error {\n+ var res ClusterSpec\n+ if err := swag.ReadJSON(b, &res); err != nil {\n+ return err\n+ }\n+ *m = res\n+ return nil\n+}\n+\n+// ClusterSpecNodePoolsItems0 cluster spec node pools items0\n+// swagger:model ClusterSpecNodePoolsItems0\n+type ClusterSpecNodePoolsItems0 struct {\n+\n+ // flavor\n+ Flavor string `json:\"flavor,omitempty\"`\n+\n+ // image\n+ Image string `json:\"image,omitempty\"`\n+\n+ // name\n+ // Pattern: ^[a-z]([a-z0-9]*)?$\n+ Name string `json:\"name,omitempty\"`\n+\n+ // size\n+ Size int64 `json:\"size,omitempty\"`\n+}\n+\n+// Validate validates this cluster spec node pools items0\n+func (m *ClusterSpecNodePoolsItems0) Validate(formats strfmt.Registry) error {\n+ var res []error\n+\n+ if err := m.validateName(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if len(res) > 0 {\n+ return errors.CompositeValidationError(res...)\n+ }\n+ return nil\n+}\n+\n+func (m *ClusterSpecNodePoolsItems0) validateName(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.Name) { // not required\n+ return nil\n+ }\n+\n+ if err := validate.Pattern(\"name\", \"body\", string(m.Name), `^[a-z]([a-z0-9]*)?$`); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+// MarshalBinary interface implementation\n+func (m *ClusterSpecNodePoolsItems0) MarshalBinary() ([]byte, error) {\n+ if m == nil {\n+ return nil, nil\n+ }\n+ return swag.WriteJSON(m)\n+}\n+\n+// UnmarshalBinary interface implementation\n+func (m *ClusterSpecNodePoolsItems0) UnmarshalBinary(b []byte) error {\n+ var res ClusterSpecNodePoolsItems0\n+ if err := swag.ReadJSON(b, &res); err != nil {\n+ return err\n+ }\n+ *m = res\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/rest/embedded_spec.go", "new_path": "pkg/api/rest/embedded_spec.go", "diff": "@@ -234,6 +234,30 @@ func init() {\n\"type\": \"string\",\n\"pattern\": \"^[a-z]([-a-z0-9]*[a-z0-9])?$\"\n},\n+ \"spec\": {\n+ \"properties\": {\n+ \"nodePools\": {\n+ \"type\": \"array\",\n+ \"items\": {\n+ \"properties\": {\n+ \"flavor\": {\n+ \"type\": \"string\"\n+ },\n+ \"image\": {\n+ \"type\": \"string\"\n+ },\n+ \"name\": {\n+ \"type\": \"string\",\n+ \"pattern\": \"^[a-z]([a-z0-9]*)?$\"\n+ },\n+ \"size\": {\n+ \"type\": \"integer\"\n+ }\n+ }\n+ }\n+ }\n+ }\n+ },\n\"status\": {\n\"description\": \"status of the cluster\",\n\"type\": \"string\"\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
crud operations for kluster/spec/nodepools. fixes #31
596,240
14.09.2017 09:55:24
-7,200
bd7d607d8f1000db4df0674829fca90d412f738e
extends status object with nodepools status. breaking change for kluster state. sorry esther...
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -40,7 +40,7 @@ func editCluster(client kubernikusv1.KlusterInterface, principal *models.Princip\n}\n-func ClusterSpecNodePoolItems(k *v1.Kluster) []*models.ClusterSpecNodePoolsItems0 {\n+func clusterSpecNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterSpecNodePoolsItems0 {\nitems := make([]*models.ClusterSpecNodePoolsItems0, int64(len(k.Spec.NodePools)))\nfor i, nodePool := range k.Spec.NodePools {\nitems[i] = &models.ClusterSpecNodePoolsItems0{\n@@ -53,12 +53,27 @@ func ClusterSpecNodePoolItems(k *v1.Kluster) []*models.ClusterSpecNodePoolsItems\nreturn items\n}\n+func clusterStatusNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterStatusNodePoolsItems0 {\n+ items := make([]*models.ClusterStatusNodePoolsItems0, int64(len(k.Spec.NodePools)))\n+ for i, nodePool := range k.Spec.NodePools {\n+ items[i] = &models.ClusterStatusNodePoolsItems0{\n+ Name: nodePool.Name,\n+ Size: int64(nodePool.Size),\n+ Ready: int64(nodePool.Size), // TODO\n+ }\n+ }\n+ return items\n+}\n+\nfunc clusterModelFromTPR(k *v1.Kluster) *models.Cluster {\nreturn &models.Cluster{\nName: swag.String(k.Spec.Name),\nSpec: &models.ClusterSpec{\n- NodePools: ClusterSpecNodePoolItems(k),\n+ NodePools: clusterSpecNodePoolItemsFromTPR(k),\n+ },\n+ Status: &models.ClusterStatus{\n+ Kluster: string(k.Status.State),\n+ NodePools: clusterStatusNodePoolItemsFromTPR(k),\n},\n- Status: string(k.Status.State),\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/models/cluster.go", "new_path": "pkg/api/models/cluster.go", "diff": "@@ -27,8 +27,8 @@ type Cluster struct {\n// spec\nSpec *ClusterSpec `json:\"spec,omitempty\"`\n- // status of the cluster\n- Status string `json:\"status,omitempty\"`\n+ // status\n+ Status *ClusterStatus `json:\"status,omitempty\"`\n}\n// Validate validates this cluster\n@@ -45,6 +45,11 @@ func (m *Cluster) Validate(formats strfmt.Registry) error {\nres = append(res, err)\n}\n+ if err := m.validateStatus(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif len(res) > 0 {\nreturn errors.CompositeValidationError(res...)\n}\n@@ -83,6 +88,25 @@ func (m *Cluster) validateSpec(formats strfmt.Registry) error {\nreturn nil\n}\n+func (m *Cluster) validateStatus(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.Status) { // not required\n+ return nil\n+ }\n+\n+ if m.Status != nil {\n+\n+ if err := m.Status.Validate(formats); err != nil {\n+ if ve, ok := err.(*errors.Validation); ok {\n+ return ve.ValidateName(\"status\")\n+ }\n+ return err\n+ }\n+ }\n+\n+ return nil\n+}\n+\n// MarshalBinary interface implementation\nfunc (m *Cluster) MarshalBinary() ([]byte, error) {\nif m == nil {\n@@ -232,3 +256,116 @@ func (m *ClusterSpecNodePoolsItems0) UnmarshalBinary(b []byte) error {\n*m = res\nreturn nil\n}\n+\n+// ClusterStatus cluster status\n+// swagger:model ClusterStatus\n+type ClusterStatus struct {\n+\n+ // status of the cluster\n+ Kluster string `json:\"kluster,omitempty\"`\n+\n+ // node pools\n+ NodePools []*ClusterStatusNodePoolsItems0 `json:\"nodePools\"`\n+}\n+\n+// Validate validates this cluster status\n+func (m *ClusterStatus) Validate(formats strfmt.Registry) error {\n+ var res []error\n+\n+ if err := m.validateNodePools(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if len(res) > 0 {\n+ return errors.CompositeValidationError(res...)\n+ }\n+ return nil\n+}\n+\n+func (m *ClusterStatus) validateNodePools(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.NodePools) { // not required\n+ return nil\n+ }\n+\n+ for i := 0; i < len(m.NodePools); i++ {\n+\n+ if swag.IsZero(m.NodePools[i]) { // not required\n+ continue\n+ }\n+\n+ if m.NodePools[i] != nil {\n+\n+ if err := m.NodePools[i].Validate(formats); err != nil {\n+ if ve, ok := err.(*errors.Validation); ok {\n+ return ve.ValidateName(\"status\" + \".\" + \"nodePools\" + \".\" + strconv.Itoa(i))\n+ }\n+ return err\n+ }\n+ }\n+\n+ }\n+\n+ return nil\n+}\n+\n+// MarshalBinary interface implementation\n+func (m *ClusterStatus) MarshalBinary() ([]byte, error) {\n+ if m == nil {\n+ return nil, nil\n+ }\n+ return swag.WriteJSON(m)\n+}\n+\n+// UnmarshalBinary interface implementation\n+func (m *ClusterStatus) UnmarshalBinary(b []byte) error {\n+ var res ClusterStatus\n+ if err := swag.ReadJSON(b, &res); err != nil {\n+ return err\n+ }\n+ *m = res\n+ return nil\n+}\n+\n+// ClusterStatusNodePoolsItems0 cluster status node pools items0\n+// swagger:model ClusterStatusNodePoolsItems0\n+type ClusterStatusNodePoolsItems0 struct {\n+\n+ // name\n+ Name string `json:\"name,omitempty\"`\n+\n+ // ready\n+ Ready int64 `json:\"ready,omitempty\"`\n+\n+ // size\n+ Size int64 `json:\"size,omitempty\"`\n+}\n+\n+// Validate validates this cluster status node pools items0\n+func (m *ClusterStatusNodePoolsItems0) Validate(formats strfmt.Registry) error {\n+ var res []error\n+\n+ if len(res) > 0 {\n+ return errors.CompositeValidationError(res...)\n+ }\n+ return nil\n+}\n+\n+// MarshalBinary interface implementation\n+func (m *ClusterStatusNodePoolsItems0) MarshalBinary() ([]byte, error) {\n+ if m == nil {\n+ return nil, nil\n+ }\n+ return swag.WriteJSON(m)\n+}\n+\n+// UnmarshalBinary interface implementation\n+func (m *ClusterStatusNodePoolsItems0) UnmarshalBinary(b []byte) error {\n+ var res ClusterStatusNodePoolsItems0\n+ if err := swag.ReadJSON(b, &res); err != nil {\n+ return err\n+ }\n+ *m = res\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/rest/embedded_spec.go", "new_path": "pkg/api/rest/embedded_spec.go", "diff": "@@ -259,8 +259,28 @@ func init() {\n}\n},\n\"status\": {\n+ \"properties\": {\n+ \"kluster\": {\n\"description\": \"status of the cluster\",\n\"type\": \"string\"\n+ },\n+ \"nodePools\": {\n+ \"type\": \"array\",\n+ \"items\": {\n+ \"properties\": {\n+ \"name\": {\n+ \"type\": \"string\"\n+ },\n+ \"ready\": {\n+ \"type\": \"integer\"\n+ },\n+ \"size\": {\n+ \"type\": \"integer\"\n+ }\n+ }\n+ }\n+ }\n+ }\n}\n}\n},\n" }, { "change_type": "MODIFY", "old_path": "swagger.yml", "new_path": "swagger.yml", "diff": "@@ -152,9 +152,37 @@ definitions:\ndescription: name of the cluster\ntype: string\npattern: '^[a-z]([-a-z0-9]*[a-z0-9])?$'\n+ spec:\n+ properties:\n+ nodePools:\n+ type: array\n+ items:\n+ properties:\n+ name:\n+ type: string\n+ pattern: '^[a-z]([a-z0-9]*)?$'\n+ size:\n+ type: integer\n+ flavor:\n+ type: string\n+ image:\n+ type: string\nstatus:\n+ properties:\n+ kluster:\ndescription: status of the cluster\ntype: string\n+ nodePools:\n+ type: array\n+ items:\n+ properties:\n+ name:\n+ type: string\n+ size:\n+ type: integer\n+ ready:\n+ type: integer\n+\nCredentials:\ntype: object\nproperties:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
extends status object with nodepools status. breaking change for kluster state. sorry esther...
596,240
17.09.2017 13:12:08
-7,200
dc732cc64983bcf73e018e6f38aa9ac0da14e24b
first stab at a base controller
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/base.go", "diff": "+package controller\n+\n+import (\n+ \"fmt\"\n+ \"reflect\"\n+ \"sync\"\n+ \"time\"\n+\n+ \"github.com/golang/glog\"\n+ \"k8s.io/apimachinery/pkg/util/wait\"\n+ \"k8s.io/client-go/tools/cache\"\n+ \"k8s.io/client-go/util/workqueue\"\n+)\n+\n+type Controller interface {\n+ Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup)\n+ reconcile(key string) error\n+}\n+\n+type Base struct {\n+ Clients\n+ queue workqueue.RateLimitingInterface\n+ informer cache.SharedIndexInformer\n+ Controller Controller\n+}\n+\n+func NewBaseController(clients Clients, informer cache.SharedIndexInformer) Base {\n+ base := Base{\n+ Clients: clients,\n+ queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n+ informer: informer,\n+ }\n+\n+ informer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n+ AddFunc: func(obj interface{}) {\n+ key, err := cache.MetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ base.queue.Add(key)\n+ }\n+ },\n+ UpdateFunc: func(old interface{}, new interface{}) {\n+ key, err := cache.MetaNamespaceKeyFunc(new)\n+ if err == nil {\n+ base.queue.Add(key)\n+ }\n+ },\n+ DeleteFunc: func(obj interface{}) {\n+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ base.queue.Add(key)\n+ }\n+ },\n+ })\n+\n+ return base\n+}\n+\n+func (base *Base) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup) {\n+ defer base.queue.ShutDown()\n+ defer wg.Done()\n+ wg.Add(1)\n+ glog.Infof(\"Starting %v with %d workers\", base.getName(), threadiness)\n+\n+ for i := 0; i < threadiness; i++ {\n+ go wait.Until(base.runWorker, time.Second, stopCh)\n+ }\n+\n+ ticker := time.NewTicker(KLUSTER_RECHECK_INTERVAL)\n+ go func() {\n+ for {\n+ select {\n+ case <-ticker.C:\n+ for key := range base.informer.GetStore().ListKeys() {\n+ base.queue.Add(key)\n+ }\n+ case <-stopCh:\n+ ticker.Stop()\n+ return\n+ }\n+ }\n+ }()\n+\n+ <-stopCh\n+}\n+\n+func (base *Base) runWorker() {\n+ for base.processNextWorkItem() {\n+ }\n+}\n+\n+func (base *Base) processNextWorkItem() bool {\n+ key, quit := base.queue.Get()\n+ if quit {\n+ return false\n+ }\n+ defer base.queue.Done(key)\n+\n+ // Invoke the method containing the business logic\n+ err := base.reconciliation(key.(string))\n+ base.handleErr(err, key)\n+ return true\n+}\n+\n+func (base *Base) reconcile(key string) error {\n+ return fmt.Errorf(\"NotImplemented\")\n+}\n+\n+func (base *Base) handleErr(err error, key interface{}) {\n+ if err == nil {\n+ // Forget about the #AddRateLimited history of the key on every successful synchronization.\n+ // This ensures that future processing of updates for this key is not delayed because of\n+ // an outdated error history.\n+ base.queue.Forget(key)\n+ return\n+ }\n+\n+ glog.Infof(\"[%v] Error while processing %v: %v\", base.getName(), key, err)\n+ // This controller retries 5 times if something goes wrong. After that, it stops trying.\n+ if base.queue.NumRequeues(key) < 5 {\n+ // Re-enqueue the key rate limited. Based on the rate limiter on the\n+ // queue and the re-enqueue history, the key will be processed later again.\n+ base.queue.AddRateLimited(key)\n+ return\n+ }\n+\n+ glog.V(5).Infof(\"[%v] Dropping %v from queue because of too many errors...\", base.getName(), key)\n+ base.queue.Forget(key)\n+}\n+\n+func getControllerName(c Controller) string {\n+ return reflect.TypeOf(c).Elem().Name()\n+\n+}\n+\n+func (base *Base) getName() string {\n+ return getControllerName(base.Controller)\n+}\n+\n+func (base *Base) reconciliation(key string) error {\n+ glog.V(5).Infof(\"[%v] Reconciling %v\", base.getName(), key)\n+ return base.Controller.reconcile(key)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/wormhole.go", "diff": "+package controller\n+\n+type WormholeGenerator struct {\n+ Base\n+}\n+\n+func NewWormholeGenerator(factories Factories, clients Clients) Controller {\n+ informers := factories.Kubernikus.Kubernikus().V1().Klusters().Informer()\n+\n+ wg := &WormholeGenerator{\n+ NewBaseController(clients, informers),\n+ }\n+\n+ wg.Controller = interface{}(wg).(Controller)\n+\n+ return wg\n+}\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
first stab at a base controller
596,240
17.09.2017 22:02:59
-7,200
d65ec6e05cb2b2ed880783eb89d551204e90ea3c
adds fancy wormhole generator
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -42,10 +42,12 @@ type client struct {\ntype Client interface {\nCreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool, []byte) (string, error)\n+ CreateWormhole(*kubernikus_v1.Kluster) (string, error)\nDeleteNode(*kubernikus_v1.Kluster, string) error\nGetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\nGetProject(id string) (*Project, error)\nGetRegion() (string, error)\n+ GetWormhole(*kubernikus_v1.Kluster) (*Node, error)\nGetRouters(project_id string) ([]Router, error)\nDeleteUser(username, domainID string) error\n}\n@@ -73,12 +75,8 @@ type Subnet struct {\n}\ntype Node struct {\n- ID string\n- Name string\n- Status string\n- TaskState string\n- VMState string\n- PowerState int\n+ servers.Server\n+ StateExt\n}\nfunc (n *Node) Ready() bool {\n@@ -128,11 +126,6 @@ type StateExt struct {\nPowerState int `json:\"OS-EXT-STS:power_state\"`\n}\n-type ServerExt struct {\n- servers.Server\n- StateExt\n-}\n-\nfunc (r *StateExt) UnmarshalJSON(b []byte) error {\nreturn nil\n}\n@@ -364,6 +357,34 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.No\nprefix := fmt.Sprintf(\"kubernikus-%v\", pool_id)\nopts := servers.ListOpts{Name: prefix}\n+ servers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) {\n+ nodes, err = ExtractServers(page)\n+ if err != nil {\n+ glog.V(5).Infof(\"Couldn't extract server %v\", err)\n+ return false, err\n+ }\n+\n+ return true, nil\n+ })\n+\n+ return nodes, nil\n+}\n+\n+func (c *client) GetWormhole(kluster *kubernikus_v1.Kluster) (*Node, error) {\n+ provider, err := c.projectProviderFor(kluster)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ prefix := fmt.Sprintf(\"wormhole-%v\", kluster.Name)\n+ opts := servers.ListOpts{Name: prefix}\n+\n+ var node *Node\nservers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) {\nserverList, err := ExtractServers(page)\nif err != nil {\n@@ -371,15 +392,14 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.No\nreturn false, err\n}\n- for _, s := range serverList {\n- node := Node{ID: s.ID, Name: s.Name, Status: s.Status, TaskState: s.TaskState, VMState: s.VMState}\n- nodes = append(nodes, node)\n+ if len(serverList) > 0 {\n+ node = &serverList[0]\n}\nreturn true, nil\n})\n- return nodes, nil\n+ return node, nil\n}\nfunc (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.NodePool, userData []byte) (string, error) {\n@@ -413,6 +433,36 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nreturn server.ID, nil\n}\n+func (c *client) CreateWormhole(kluster *kubernikus_v1.Kluster) (string, error) {\n+ provider, err := c.projectProviderFor(kluster)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ name := fmt.Sprintf(\"wormhole-%v\", kluster.Name)\n+ glog.V(5).Infof(\"Creating %v\", name)\n+\n+ server, err := servers.Create(client, servers.CreateOpts{\n+ Name: name,\n+ FlavorName: \"m1.tiny\",\n+ ImageName: \"cirros-vmware\",\n+ Networks: []servers.Network{servers.Network{UUID: kluster.Spec.OpenstackInfo.NetworkID}},\n+ ServiceClient: client,\n+ }).Extract()\n+\n+ if err != nil {\n+ glog.V(5).Infof(\"Couldn't create %v: %v\", name, err)\n+ return \"\", err\n+ }\n+\n+ return server.ID, nil\n+}\n+\nfunc (c *client) DeleteNode(kluster *kubernikus_v1.Kluster, ID string) error {\nprovider, err := c.projectProviderFor(kluster)\nif err != nil {\n@@ -493,8 +543,8 @@ func (c *client) GetRegion() (string, error) {\nreturn region, nil\n}\n-func ExtractServers(r pagination.Page) ([]ServerExt, error) {\n- var s []ServerExt\n+func ExtractServers(r pagination.Page) ([]Node, error) {\n+ var s []Node\nerr := servers.ExtractServersInto(r, &s)\nreturn s, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -91,6 +91,7 @@ type KubernikusOperator struct {\nconst (\nGROUNDCTL_WORKERS = 10\nLAUNCHCTL_WORKERS = 1\n+ WORMHOLEGENERATOR_WORKERS = 1\nRECONCILIATION_DURATION = 5 * time.Minute\n)\n@@ -190,6 +191,7 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\ngroundctl := NewGroundController(o.Factories, o.Clients, o.Config)\nlaunchctl := NewLaunchController(o.Factories, o.Clients)\n+ wormhole := NewWormholeGenerator(o.Factories, o.Clients)\no.Factories.Kubernikus.Start(stopCh)\no.Factories.Kubernetes.Start(stopCh)\n@@ -200,13 +202,15 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nglog.Info(\"Cache primed. Ready for Action!\")\nfor _, c := range o.Config.Kubernikus.Controllers {\n- if c == \"groundctl\" || c == \"*\" {\n+ switch c {\n+ case \"groundctl\":\n+ go groundctl.Run(GROUNDCTL_WORKERS, stopCh, wg)\n+ case \"launchctl\":\n+ go launchctl.Run(LAUNCHCTL_WORKERS, stopCh, wg)\n+ case \"wormholegenerator\":\n+ go wormhole.Run(WORMHOLEGENERATOR_WORKERS, stopCh, wg)\n+ case \"*\":\ngo groundctl.Run(GROUNDCTL_WORKERS, stopCh, wg)\n- break\n- }\n- }\n- for _, c := range o.Config.Kubernikus.Controllers {\n- if c == \"launchctl\" || c == \"*\" {\ngo launchctl.Run(LAUNCHCTL_WORKERS, stopCh, wg)\nbreak\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/wormhole.go", "new_path": "pkg/controller/wormhole.go", "diff": "package controller\n+import (\n+ \"fmt\"\n+ \"time\"\n+\n+ \"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"github.com/sapcc/kubernikus/pkg/client/openstack\"\n+)\n+\ntype WormholeGenerator struct {\nBase\n}\n+type State struct {\n+ key string\n+ kluster *v1.Kluster\n+ node *openstack.Node\n+ message string\n+ err error\n+}\n+\n+type Transition func(*State) (Transition, error)\n+\nfunc NewWormholeGenerator(factories Factories, clients Clients) Controller {\ninformers := factories.Kubernikus.Kubernikus().V1().Klusters().Informer()\n@@ -16,3 +35,83 @@ func NewWormholeGenerator(factories Factories, clients Clients) Controller {\nreturn wg\n}\n+func (wg *WormholeGenerator) reconcile(key string) error {\n+ var err error\n+ state := &State{key: key}\n+ transition := wg.start\n+\n+ for transition != nil && err == nil {\n+ transition, err = transition(state)\n+ if state.message != \"\" {\n+ glog.V(5).Infof(\"[%v] %v\", key, state.message)\n+ state.message = \"\"\n+ }\n+ }\n+\n+ return err\n+}\n+\n+func (wg *WormholeGenerator) start(state *State) (Transition, error) {\n+ obj, exists, err := wg.informer.GetIndexer().GetByKey(state.key)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"Failed to fetch key %s from cache: %s\", state.key, err)\n+ }\n+\n+ if !exists {\n+ state.message = \"Kluster deleted in the meantime\"\n+ return nil, nil\n+ }\n+\n+ state.kluster = obj.(*v1.Kluster)\n+\n+ return wg.findOrCreateWormhole, nil\n+}\n+\n+func (wg *WormholeGenerator) findOrCreateWormhole(state *State) (Transition, error) {\n+ wormhole, err := wg.Clients.Openstack.GetWormhole(state.kluster)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"Couldn't get wormhole VM: %v\", err)\n+ }\n+\n+ if wormhole == nil {\n+ state.message = \"Wormhole does not exist. Need to create it.\"\n+ return wg.createWormhole, nil\n+ }\n+\n+ state.node = wormhole\n+ state.message = fmt.Sprintf(\"Found wormhole: %v\", wormhole.Name)\n+ return wg.checkWormhole, nil\n+}\n+\n+func (wg *WormholeGenerator) checkWormhole(state *State) (Transition, error) {\n+ if state.node.Ready() {\n+ state.message = \"Wormhole ok\"\n+ return nil, nil\n+ }\n+\n+ if time.Since(state.node.Created) > 5*time.Minute {\n+ state.message = \"Wormhole is hanging. Trying to repair.\"\n+ return wg.repairWormhole, nil\n+ }\n+\n+ return nil, fmt.Errorf(\"Wormhole is not ready\")\n+}\n+\n+func (wg *WormholeGenerator) repairWormhole(state *State) (Transition, error) {\n+ err := wg.Clients.Openstack.DeleteNode(state.kluster, state.node.ID)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"Couldn't repair wormhole %v: %v\", state.node.Name, err)\n+ }\n+ state.message = fmt.Sprintf(\"Terminated wormhole %v\", state.node.Name)\n+ return wg.findOrCreateWormhole, nil\n+}\n+\n+func (wg *WormholeGenerator) createWormhole(state *State) (Transition, error) {\n+ name, err := wg.Clients.Openstack.CreateWormhole(state.kluster)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ state.message = fmt.Sprintf(\"Wormhole %v ceated\", name)\n+ return wg.findOrCreateWormhole, nil\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds fancy wormhole generator
596,240
18.09.2017 09:26:54
-7,200
e045e2020359fa3ef1828191e196fd471d48e802
fix controller double runner
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/operator/operator.go", "new_path": "pkg/cmd/operator/operator.go", "diff": "@@ -62,7 +62,7 @@ func NewOperatorOptions() *Options {\nAuthDomain: \"Default\",\nKubernikusDomain: \"kluster.staging.cloud.sap\",\nNamespace: \"kubernikus\",\n- Controllers: []string{\"*\"},\n+ Controllers: []string{\"groundctl\", \"launchctl\", \"wormholegenerator\"},\n}\n}\n@@ -78,7 +78,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {\nflags.StringVar(&o.KubernikusDomain, \"kubernikus-domain\", o.KubernikusDomain, \"Regional domain name for all Kubernikus clusters\")\nflags.StringVar(&o.Namespace, \"namespace\", o.Namespace, \"Restrict operator to resources in the given namespace\")\n- flags.StringSliceVar(&o.Controllers, \"controllers\", o.Controllers, \"A list of controllers to enable. '*' enables all. controllers: groundctl, launchctl\")\n+ flags.StringSliceVar(&o.Controllers, \"controllers\", o.Controllers, \"A list of controllers to enable. Default is to enable all. controllers: groundctl, launchctl, wormholegenerator\")\n}\nfunc (o *Options) Validate(c *cobra.Command, args []string) error {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/base.go", "new_path": "pkg/controller/base.go", "diff": "@@ -14,6 +14,10 @@ import (\ntype Controller interface {\nRun(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup)\n+}\n+\n+type BaseController interface {\n+ Controller\nreconcile(key string) error\n}\n@@ -21,7 +25,7 @@ type Base struct {\nClients\nqueue workqueue.RateLimitingInterface\ninformer cache.SharedIndexInformer\n- Controller Controller\n+ Controller BaseController\n}\nfunc NewBaseController(clients Clients, informer cache.SharedIndexInformer) Base {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -68,7 +68,7 @@ type HelmConfig struct {\ntype KubernikusConfig struct {\nDomain string\n- Controllers []string\n+ Controllers map[string]Controller\n}\ntype Config struct {\n@@ -89,10 +89,16 @@ type KubernikusOperator struct {\n}\nconst (\n- GROUNDCTL_WORKERS = 10\n- LAUNCHCTL_WORKERS = 1\n- WORMHOLEGENERATOR_WORKERS = 1\n- RECONCILIATION_DURATION = 5 * time.Minute\n+ DEFAULT_WORKERS = 1\n+ DEFAULT_RECONCILIATION = 5 * time.Minute\n+)\n+\n+var (\n+ CONTROLLER_OPTIONS = map[string]int{\n+ \"groundctl\": 10,\n+ \"launchctl\": DEFAULT_WORKERS,\n+ \"wormholegenerator\": DEFAULT_WORKERS,\n+ }\n)\nfunc NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperator {\n@@ -112,7 +118,7 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n},\nKubernikus: KubernikusConfig{\nDomain: options.KubernikusDomain,\n- Controllers: options.Controllers,\n+ Controllers: make(map[string]Controller),\n},\n},\n}\n@@ -136,7 +142,7 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nglog.Fatalf(\"Failed to create helm client: %s\", err)\n}\n- o.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, RECONCILIATION_DURATION)\n+ o.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION)\n//Manually create shared Kluster informer that only watches the given namespace\no.Factories.Kubernikus.InformerFor(\n&kubernikus_v1.Kluster{},\n@@ -155,7 +161,7 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nDeleteFunc: o.debugDelete,\n})\n- o.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, RECONCILIATION_DURATION)\n+ o.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, DEFAULT_RECONCILIATION)\n//Manually create shared pod Informer that only watches the given namespace\no.Factories.Kubernetes.InformerFor(&api_v1.Pod{}, func(client kubernetes_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\nreturn cache.NewSharedIndexInformer(\n@@ -183,16 +189,23 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\noptions.AuthProjectDomain,\n)\n+ for _, k := range options.Controllers {\n+ switch k {\n+ case \"groundctl\":\n+ o.Config.Kubernikus.Controllers[\"groundctl\"] = NewGroundController(o.Factories, o.Clients, o.Config)\n+ case \"launchctl\":\n+ o.Config.Kubernikus.Controllers[\"launchctl\"] = NewLaunchController(o.Factories, o.Clients)\n+ case \"wormholegenerator\":\n+ o.Config.Kubernikus.Controllers[\"wormholegenerator\"] = NewWormholeGenerator(o.Factories, o.Clients)\n+ }\n+ }\n+\nreturn o\n}\nfunc (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nfmt.Printf(\"Welcome to Kubernikus %v\\n\", version.VERSION)\n- groundctl := NewGroundController(o.Factories, o.Clients, o.Config)\n- launchctl := NewLaunchController(o.Factories, o.Clients)\n- wormhole := NewWormholeGenerator(o.Factories, o.Clients)\n-\no.Factories.Kubernikus.Start(stopCh)\no.Factories.Kubernetes.Start(stopCh)\n@@ -201,19 +214,8 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nglog.Info(\"Cache primed. Ready for Action!\")\n- for _, c := range o.Config.Kubernikus.Controllers {\n- switch c {\n- case \"groundctl\":\n- go groundctl.Run(GROUNDCTL_WORKERS, stopCh, wg)\n- case \"launchctl\":\n- go launchctl.Run(LAUNCHCTL_WORKERS, stopCh, wg)\n- case \"wormholegenerator\":\n- go wormhole.Run(WORMHOLEGENERATOR_WORKERS, stopCh, wg)\n- case \"*\":\n- go groundctl.Run(GROUNDCTL_WORKERS, stopCh, wg)\n- go launchctl.Run(LAUNCHCTL_WORKERS, stopCh, wg)\n- break\n- }\n+ for name, controller := range o.Config.Kubernikus.Controllers {\n+ go controller.Run(CONTROLLER_OPTIONS[name], stopCh, wg)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/wormhole.go", "new_path": "pkg/controller/wormhole.go", "diff": "@@ -30,7 +30,7 @@ func NewWormholeGenerator(factories Factories, clients Clients) Controller {\nNewBaseController(clients, informers),\n}\n- wg.Controller = interface{}(wg).(Controller)\n+ wg.Controller = interface{}(wg).(BaseController)\nreturn wg\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fix controller double runner
596,240
18.09.2017 09:43:51
-7,200
b2f7e83c72b62ccd5d92b9c290071babfc7187fd
move initialization into controllers. only starts informers for controllers that are actually started
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -13,7 +13,10 @@ import (\n\"google.golang.org/grpc\"\nyaml \"gopkg.in/yaml.v2\"\nmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ \"k8s.io/apimachinery/pkg/runtime\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n+ \"k8s.io/apimachinery/pkg/watch\"\n+ kubernetes_clientset \"k8s.io/client-go/kubernetes\"\napi_v1 \"k8s.io/client-go/pkg/api/v1\"\n\"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n@@ -22,6 +25,8 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n+ kubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\n+ kubernikus_informers_v1 \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions/kubernikus/v1\"\n)\nconst (\n@@ -44,10 +49,39 @@ func NewGroundController(factories Factories, clients Clients, config Config) *G\nFactories: factories,\nConfig: config,\nqueue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n- tprInformer: factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\npodInformer: factories.Kubernetes.Core().V1().Pods().Informer(),\n}\n+ //Manually create shared Kluster informer that only watches the given namespace\n+ operator.tprInformer = operator.Factories.Kubernikus.InformerFor(\n+ &v1.Kluster{},\n+ func(client kubernikus_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n+ return kubernikus_informers_v1.NewKlusterInformer(\n+ client,\n+ config.Kubernikus.Namespace,\n+ resyncPeriod,\n+ cache.Indexers{},\n+ )\n+ },\n+ )\n+\n+ //Manually create shared pod Informer that only watches the given namespace\n+ operator.podInformer = operator.Factories.Kubernetes.InformerFor(&api_v1.Pod{}, func(client kubernetes_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n+ return cache.NewSharedIndexInformer(\n+ &cache.ListWatch{\n+ ListFunc: func(o metav1.ListOptions) (runtime.Object, error) {\n+ return client.CoreV1().Pods(config.Kubernikus.Namespace).List(o)\n+ },\n+ WatchFunc: func(o metav1.ListOptions) (watch.Interface, error) {\n+ return client.CoreV1().Pods(config.Kubernikus.Namespace).Watch(o)\n+ },\n+ },\n+ &api_v1.Pod{},\n+ resyncPeriod,\n+ cache.Indexers{\"kluster\": MetaLabelReleaseIndexFunc},\n+ )\n+ })\n+\noperator.tprInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\nAddFunc: operator.klusterAdd,\nUpdateFunc: operator.klusterUpdate,\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -3,29 +3,21 @@ package controller\nimport (\n\"errors\"\n\"fmt\"\n- \"reflect\"\n\"sync\"\n\"time\"\n\"github.com/golang/glog\"\n\"k8s.io/apimachinery/pkg/api/meta\"\n- meta_v1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n- \"k8s.io/apimachinery/pkg/runtime\"\n- \"k8s.io/apimachinery/pkg/watch\"\nkubernetes_informers \"k8s.io/client-go/informers\"\nkubernetes_clientset \"k8s.io/client-go/kubernetes\"\n- api_v1 \"k8s.io/client-go/pkg/api/v1\"\n- \"k8s.io/client-go/tools/cache\"\n\"k8s.io/helm/pkg/helm\"\n- kubernikus_v1 \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\nhelmutil \"github.com/sapcc/kubernikus/pkg/client/helm\"\nkube \"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/client/kubernikus\"\n\"github.com/sapcc/kubernikus/pkg/client/openstack\"\nkubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\nkubernikus_informers \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions\"\n- kubernikus_informers_v1 \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/version\"\n)\n@@ -68,6 +60,7 @@ type HelmConfig struct {\ntype KubernikusConfig struct {\nDomain string\n+ Namespace string\nControllers map[string]Controller\n}\n@@ -118,6 +111,7 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n},\nKubernikus: KubernikusConfig{\nDomain: options.KubernikusDomain,\n+ Namespace: options.Namespace,\nControllers: make(map[string]Controller),\n},\n},\n@@ -143,41 +137,7 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n}\no.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION)\n- //Manually create shared Kluster informer that only watches the given namespace\n- o.Factories.Kubernikus.InformerFor(\n- &kubernikus_v1.Kluster{},\n- func(client kubernikus_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n- return kubernikus_informers_v1.NewKlusterInformer(\n- client,\n- options.Namespace,\n- resyncPeriod,\n- cache.Indexers{},\n- )\n- },\n- )\n- o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n- AddFunc: o.debugAdd,\n- UpdateFunc: o.debugUpdate,\n- DeleteFunc: o.debugDelete,\n- })\n-\no.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, DEFAULT_RECONCILIATION)\n- //Manually create shared pod Informer that only watches the given namespace\n- o.Factories.Kubernetes.InformerFor(&api_v1.Pod{}, func(client kubernetes_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n- return cache.NewSharedIndexInformer(\n- &cache.ListWatch{\n- ListFunc: func(o meta_v1.ListOptions) (runtime.Object, error) {\n- return client.CoreV1().Pods(options.Namespace).List(o)\n- },\n- WatchFunc: func(o meta_v1.ListOptions) (watch.Interface, error) {\n- return client.CoreV1().Pods(options.Namespace).Watch(o)\n- },\n- },\n- &api_v1.Pod{},\n- resyncPeriod,\n- cache.Indexers{\"kluster\": MetaLabelReleaseIndexFunc},\n- )\n- })\no.Clients.Openstack = openstack.NewClient(\no.Factories.Kubernetes,\n@@ -219,21 +179,6 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n}\n}\n-func (p *KubernikusOperator) debugAdd(obj interface{}) {\n- key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n- glog.V(5).Infof(\"ADD %s (%s)\", reflect.TypeOf(obj), key)\n-}\n-\n-func (p *KubernikusOperator) debugDelete(obj interface{}) {\n- key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n- glog.V(5).Infof(\"DELETE %s (%s)\", reflect.TypeOf(obj), key)\n-}\n-\n-func (p *KubernikusOperator) debugUpdate(cur, old interface{}) {\n- key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(cur)\n- glog.V(5).Infof(\"UPDATE %s (%s)\", reflect.TypeOf(cur), key)\n-}\n-\n// MetaLabelReleaseIndexFunc is a default index function that indexes based on an object's release label\nfunc MetaLabelReleaseIndexFunc(obj interface{}) ([]string, error) {\nmeta, err := meta.Accessor(obj)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
move initialization into controllers. only starts informers for controllers that are actually started
596,240
18.09.2017 11:07:57
-7,200
36fec0063acac2e3c5bf51f00c275f29ba28d677
actually queue keys and not their index
[ { "change_type": "MODIFY", "old_path": "pkg/controller/base.go", "new_path": "pkg/controller/base.go", "diff": "@@ -74,7 +74,7 @@ func (base *Base) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGrou\nfor {\nselect {\ncase <-ticker.C:\n- for key := range base.informer.GetStore().ListKeys() {\n+ for _, key := range base.informer.GetStore().ListKeys() {\nbase.queue.Add(key)\n}\ncase <-stopCh:\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
actually queue keys and not their index
596,240
18.09.2017 11:08:16
-7,200
afacd21c78046952dd31c0869992cd5fd04d209a
this state function
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -79,23 +79,46 @@ type Node struct {\nStateExt\n}\n-func (n *Node) Ready() bool {\n- glog.V(6).Infof(\"[%v] PowerState: %v, VMState: %v, TaskState: %v\", n.Name, n.PowerState, n.VMState, n.TaskState)\n- // 0: NOSTATE\n- // 1: RUNNING\n- // 3: PAUSED\n- // 4: SHUTDOWN\n- // 6: CRASHED\n- // 7: SUSPENDED\n+func (n *Node) Starting() bool {\n+ // https://github.com/openstack/nova/blob/be3a66781f7fd58e5c5c0fe89b33f8098cfb0f0d/nova/objects/fields.py#L884\nif n.TaskState == \"spawning\" || n.TaskState == \"scheduling\" || n.TaskState == \"networking\" || n.TaskState == \"block_device_mapping\" {\nreturn true\n}\n- // https://github.com/openstack/nova/blob/be3a66781f7fd58e5c5c0fe89b33f8098cfb0f0d/nova/objects/fields.py#L884\n- if n.TaskState == \"deleting\" {\n+ if n.TaskState != \"\" {\n+ return false\n+ }\n+\n+ if n.VMState == \"building\" {\n+ return true\n+ }\n+\n+ return false\n+}\n+\n+func (n *Node) Stopping() bool {\n+ if n.TaskState == \"spawning\" || n.TaskState == \"scheduling\" || n.TaskState == \"networking\" || n.TaskState == \"block_device_mapping\" {\n+ return false\n+ }\n+\n+ if n.TaskState != \"\" {\n+ return true\n+ }\n+\nreturn false\n}\n+func (n *Node) Running() bool {\n+ if n.Starting() || n.Stopping() {\n+ return false\n+ }\n+\n+ // 0: NOSTATE\n+ // 1: RUNNING\n+ // 3: PAUSED\n+ // 4: SHUTDOWN\n+ // 6: CRASHED\n+ // 7: SUSPENDED\nif n.PowerState > 1 {\nreturn false\n}\n@@ -112,12 +135,11 @@ func (n *Node) Ready() bool {\n//ERROR = 'error'\n//SHELVED = 'shelved'\n//SHELVED_OFFLOADED = 'shelved_offloaded'\n-\n- if !(n.VMState == \"active\" || n.VMState == \"building\") {\n- return false\n+ if n.VMState == \"active\" {\n+ return true\n}\n- return true\n+ return false\n}\ntype StateExt struct {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
this state function
596,240
18.09.2017 11:09:05
-7,200
b4e84cc02c5256500d4f09bd1a5d586ce75538f8
adds smarter requeing and state detection
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -233,7 +233,7 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\nfunc ready(nodes []openstack.Node) int {\nready := 0\nfor _, n := range nodes {\n- if n.Ready() {\n+ if n.Running() {\nready = ready + 1\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/wormhole.go", "new_path": "pkg/controller/wormhole.go", "diff": "@@ -84,17 +84,32 @@ func (wg *WormholeGenerator) findOrCreateWormhole(state *State) (Transition, err\n}\nfunc (wg *WormholeGenerator) checkWormhole(state *State) (Transition, error) {\n- if state.node.Ready() {\n+ if state.node.Running() {\nstate.message = \"Wormhole ok\"\nreturn nil, nil\n}\n- if time.Since(state.node.Created) > 5*time.Minute {\n- state.message = \"Wormhole is hanging. Trying to repair.\"\n+ if state.node.Starting() {\n+ if time.Since(state.node.Created) < 5*time.Minute {\n+ state.message = \"Wormhole still spawning\"\n+ return wg.requeue, nil\n+ } else {\n+ state.message = \"Wormhole hangs while spawning\"\nreturn wg.repairWormhole, nil\n}\n+ }\n+\n+ if state.node.Stopping() {\n+ if time.Since(state.node.Updated) < 5*time.Minute {\n+ state.message = \"Wormhole still stopping\"\n+ return wg.requeue, nil\n+ } else {\n+ state.message = \"Wormhole hangs while terminating\"\n+ return wg.repairWormhole, nil\n+ }\n+ }\n- return nil, fmt.Errorf(\"Wormhole is not ready\")\n+ return nil, fmt.Errorf(\"Wormhole is in an undefined state. If this happens the universe collapses.\")\n}\nfunc (wg *WormholeGenerator) repairWormhole(state *State) (Transition, error) {\n@@ -103,7 +118,7 @@ func (wg *WormholeGenerator) repairWormhole(state *State) (Transition, error) {\nreturn nil, fmt.Errorf(\"Couldn't repair wormhole %v: %v\", state.node.Name, err)\n}\nstate.message = fmt.Sprintf(\"Terminated wormhole %v\", state.node.Name)\n- return wg.findOrCreateWormhole, nil\n+ return wg.requeue, nil\n}\nfunc (wg *WormholeGenerator) createWormhole(state *State) (Transition, error) {\n@@ -113,5 +128,10 @@ func (wg *WormholeGenerator) createWormhole(state *State) (Transition, error) {\n}\nstate.message = fmt.Sprintf(\"Wormhole %v ceated\", name)\n- return wg.findOrCreateWormhole, nil\n+ return wg.requeue, nil\n+}\n+\n+func (wg *WormholeGenerator) requeue(state *State) (Transition, error) {\n+ wg.queue.AddAfter(state.key, 10*time.Second)\n+ return nil, nil\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds smarter requeing and state detection
596,240
18.09.2017 11:09:27
-7,200
d8175b77bd0fa3facaf7b1adc55426b78d3e803c
move to initalize Kluster with namespace by coincidence :(
[ { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -150,11 +150,6 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\noptions.AuthProjectDomain,\n)\n- o.Clients.Satellites = kube.NewSharedClientFactory(\n- o.Clients.Kubernetes.Core().Secrets(options.Namespace),\n- o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n- )\n-\nfor _, k := range options.Controllers {\nswitch k {\ncase \"groundctl\":\n@@ -166,6 +161,11 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n}\n}\n+ o.Clients.Satellites = kube.NewSharedClientFactory(\n+ o.Clients.Kubernetes.Core().Secrets(options.Namespace),\n+ o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n+ )\n+\nreturn o\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
move to initalize Kluster with namespace by coincidence :(
596,240
19.09.2017 10:48:01
-7,200
fc63df3d095a81cb62afe560e2c05107a882774b
implements workholes
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -44,14 +44,16 @@ type client struct {\ntype Client interface {\nCreateNode(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool, []byte) (string, error)\n- CreateWormhole(*kubernikus_v1.Kluster) (string, error)\nDeleteNode(*kubernikus_v1.Kluster, string) error\nGetNodes(*kubernikus_v1.Kluster, *kubernikus_v1.NodePool) ([]Node, error)\n+\nGetProject(id string) (*Project, error)\nGetRegion() (string, error)\n- GetWormhole(*kubernikus_v1.Kluster) (*Node, error)\nGetRouters(project_id string) ([]Router, error)\nDeleteUser(username, domainID string) error\n+\n+ CreateWormhole(*kubernikus_v1.Kluster, string, string) (string, error)\n+ GetWormhole(*kubernikus_v1.Kluster) (*Node, error)\n}\ntype Project struct {\n@@ -209,6 +211,37 @@ func (c *client) adminClient() (*gophercloud.ProviderClient, error) {\nreturn c.adminProviderClient, nil\n}\n+func (c *client) controlPlaneClient() (*gophercloud.ProviderClient, error) {\n+ if c.adminProviderClient != nil {\n+ return c.adminProviderClient, nil\n+ }\n+\n+ provider, err := openstack.NewClient(c.authURL)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ authOptions := &tokens.AuthOptions{\n+ IdentityEndpoint: c.authURL,\n+ Username: c.authUsername,\n+ Password: c.authPassword,\n+ DomainName: c.authDomain,\n+ AllowReauth: true,\n+ Scope: tokens.Scope{\n+ ProjectID: \"06a832fedd4b422bbf2d6d52be59a93d\",\n+ },\n+ }\n+\n+ err = openstack.AuthenticateV3(provider, authOptions, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ c.adminProviderClient = provider\n+\n+ return c.adminProviderClient, nil\n+}\n+\nfunc (c *client) klusterClientFor(kluster *kubernikus_v1.Kluster) (*gophercloud.ProviderClient, error) {\nsecret_name := kluster.Name\n@@ -459,31 +492,49 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nreturn server.ID, nil\n}\n-func (c *client) CreateWormhole(kluster *kubernikus_v1.Kluster) (string, error) {\n- provider, err := c.klusterClientFor(kluster)\n+func (c *client) CreateWormhole(kluster *kubernikus_v1.Kluster, projectID, networkID string) (string, error) {\n+ provider, err := c.controlPlaneClient()\nif err != nil {\n- return \"\", err\n+ return \"\", fmt.Errorf(\"Couldn't get provider for %v: %v\", projectID, err)\n}\nclient, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{})\nif err != nil {\n- return \"\", err\n+ return \"\", fmt.Errorf(\"Couldn't get Compute client: %v\", err)\n}\nname := fmt.Sprintf(\"wormhole-%v\", kluster.Name)\nglog.V(5).Infof(\"Creating %v\", name)\n+ localPort, err := c.FindOrCreateWormholeLocalPort(kluster, projectID, networkID)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't find/create local wormhole: %v\", err)\n+ }\n+\n+ foreignPort, err := c.FindOrCreateWormholeForeignPort(kluster, networkID)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't find/create local wormhol: %v\", err)\n+ }\n+\n+ glog.Infof(\"%#v\", servers.CreateOpts{\n+ Name: name,\n+ FlavorName: \"m1.small\",\n+ ImageName: \"ubuntu-16.04-amd64-vmware\",\n+ Networks: []servers.Network{servers.Network{Port: foreignPort}, servers.Network{Port: localPort}},\n+ ServiceClient: client,\n+ })\n+\nserver, err := servers.Create(client, servers.CreateOpts{\nName: name,\n- FlavorName: \"m1.tiny\",\n- ImageName: \"cirros-vmware\",\n- Networks: []servers.Network{servers.Network{UUID: kluster.Spec.OpenstackInfo.NetworkID}},\n+ FlavorName: \"m1.small\",\n+ ImageName: \"ubuntu-16.04-amd64-vmware\",\n+ Networks: []servers.Network{servers.Network{Port: foreignPort}, servers.Network{Port: localPort}},\nServiceClient: client,\n}).Extract()\nif err != nil {\nglog.V(5).Infof(\"Couldn't create %v: %v\", name, err)\n- return \"\", err\n+ return \"\", fmt.Errorf(\"Couldn't create wormhole: %v\", err)\n}\nreturn server.ID, nil\n@@ -569,6 +620,128 @@ func (c *client) GetRegion() (string, error) {\nreturn region, nil\n}\n+func (c *client) GetWormholeForeignPort(kluster *kubernikus_v1.Kluster) (string, error) {\n+ provider, err := c.klusterClientFor(kluster)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create foreign wormhole port: %v\", err)\n+ }\n+\n+ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create foreign wormhole port: %v\", err)\n+ }\n+\n+ name := fmt.Sprintf(\"kubernikus:wormhole-foreign-%v\", kluster.Name)\n+ id, err := ports.IDFromName(client, name)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ port, err := ports.Get(client, id).Extract()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ return port.ID, nil\n+}\n+\n+func (c *client) GetWormholeLocalPort(kluster *kubernikus_v1.Kluster) (string, error) {\n+ provider, err := c.controlPlaneClient()\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create local wormhole port: %v\", err)\n+ }\n+\n+ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create local wormhole port: %v\", err)\n+ }\n+\n+ name := fmt.Sprintf(\"kubernikus:wormhole-local-%v\", kluster.Name)\n+ id, err := ports.IDFromName(client, name)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ port, err := ports.Get(client, id).Extract()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ return port.ID, nil\n+}\n+\n+func (c *client) CreateWormholeLocalPort(kluster *kubernikus_v1.Kluster, projectID, networkID string) (string, error) {\n+ provider, err := c.controlPlaneClient()\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create local wormhole port: %v\", err)\n+ }\n+\n+ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create local wormhole port: %v\", err)\n+ }\n+\n+ name := fmt.Sprintf(\"kubernikus:wormhole-local-%v\", kluster.Name)\n+ port, err := ports.Create(client, ports.CreateOpts{\n+ Name: name,\n+ NetworkID: networkID,\n+ }).Extract()\n+\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create local wormhole port: %v\", err)\n+ }\n+\n+ return port.ID, nil\n+}\n+\n+func (c *client) CreateWormholeForeignPort(kluster *kubernikus_v1.Kluster) (string, error) {\n+ provider, err := c.klusterClientFor(kluster)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create foreign wormhole port: %v\", err)\n+ }\n+\n+ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{})\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create foreign wormhole port: %v\", err)\n+ }\n+\n+ name := fmt.Sprintf(\"kubernikus:wormhole-foreign-%v\", kluster.Name)\n+ port, err := ports.Create(client, ports.CreateOpts{\n+ Name: name,\n+ NetworkID: kluster.Spec.OpenstackInfo.NetworkID,\n+ }).Extract()\n+\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"Couldn't create foreign wormhole port: %v\", err)\n+ }\n+\n+ return port.ID, nil\n+}\n+\n+func (c *client) FindOrCreateWormholeForeignPort(kluster *kubernikus_v1.Kluster, networkID string) (string, error) {\n+ id, err := c.GetWormholeForeignPort(kluster)\n+ if err != nil {\n+ if _, ok := err.(gophercloud.ErrResourceNotFound); ok {\n+ return c.CreateWormholeForeignPort(kluster)\n+ } else {\n+ return \"\", fmt.Errorf(\"Couldn't find or create foreign wormhole port: %v\", err)\n+ }\n+ }\n+ return id, nil\n+}\n+\n+func (c *client) FindOrCreateWormholeLocalPort(kluster *kubernikus_v1.Kluster, projectID, networkID string) (string, error) {\n+ id, err := c.GetWormholeLocalPort(kluster)\n+ if err != nil {\n+ if _, ok := err.(gophercloud.ErrResourceNotFound); ok {\n+ return c.CreateWormholeLocalPort(kluster, projectID, networkID)\n+ } else {\n+ return \"\", fmt.Errorf(\"Couldn't find or create local wormhole port: %v\", err)\n+ }\n+ }\n+ return id, nil\n+}\n+\nfunc ExtractServers(r pagination.Page) ([]Node, error) {\nvar s []Node\nerr := servers.ExtractServersInto(r, &s)\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/operator/operator.go", "new_path": "pkg/cmd/operator/operator.go", "diff": "@@ -50,6 +50,9 @@ type Options struct {\nAuthProjectDomain string\nKubernikusDomain string\n+ KubernikusProjectID string\n+ KubernikusNetworkID string\n+\nNamespace string\nControllers []string\n}\n@@ -77,6 +80,8 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {\nflags.StringVar(&o.AuthProjectDomain, \"auth-project-domain\", o.AuthProjectDomain, \"Domain of the project\")\nflags.StringVar(&o.KubernikusDomain, \"kubernikus-domain\", o.KubernikusDomain, \"Regional domain name for all Kubernikus clusters\")\n+ flags.StringVar(&o.KubernikusProjectID, \"kubernikus-projectid\", o.KubernikusProjectID, \"ID of the project the k*s control plane.\")\n+ flags.StringVar(&o.KubernikusNetworkID, \"kubernikus-networkid\", o.KubernikusNetworkID, \"ID of the network the k*s control plane.\")\nflags.StringVar(&o.Namespace, \"namespace\", o.Namespace, \"Restrict operator to resources in the given namespace\")\nflags.StringSliceVar(&o.Controllers, \"controllers\", o.Controllers, \"A list of controllers to enable. Default is to enable all. controllers: groundctl, launchctl, wormholegenerator\")\n}\n@@ -109,6 +114,8 @@ func (o *Options) Run(c *cobra.Command) error {\nAuthProject: o.AuthProject,\nAuthProjectDomain: o.AuthProjectDomain,\nKubernikusDomain: o.KubernikusDomain,\n+ KubernikusProjectID: o.KubernikusProjectID,\n+ KubernikusNetworkID: o.KubernikusNetworkID,\nNamespace: o.Namespace,\nControllers: o.Controllers,\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -34,6 +34,8 @@ type KubernikusOperatorOptions struct {\nAuthProjectDomain string\nKubernikusDomain string\n+ KubernikusProjectID string\n+ KubernikusNetworkID string\nNamespace string\nControllers []string\n}\n@@ -62,6 +64,8 @@ type HelmConfig struct {\ntype KubernikusConfig struct {\nDomain string\nNamespace string\n+ ProjectID string\n+ NetworkID string\nControllers map[string]Controller\n}\n@@ -113,6 +117,8 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nKubernikus: KubernikusConfig{\nDomain: options.KubernikusDomain,\nNamespace: options.Namespace,\n+ ProjectID: options.KubernikusProjectID,\n+ NetworkID: options.KubernikusNetworkID,\nControllers: make(map[string]Controller),\n},\n},\n@@ -140,17 +146,6 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\no.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION)\no.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, DEFAULT_RECONCILIATION)\n- for _, k := range options.Controllers {\n- switch k {\n- case \"groundctl\":\n- o.Config.Kubernikus.Controllers[\"groundctl\"] = NewGroundController(o.Factories, o.Clients, o.Config)\n- case \"launchctl\":\n- o.Config.Kubernikus.Controllers[\"launchctl\"] = NewLaunchController(o.Factories, o.Clients)\n- case \"wormholegenerator\":\n- o.Config.Kubernikus.Controllers[\"wormholegenerator\"] = NewWormholeGenerator(o.Factories, o.Clients)\n- }\n- }\n-\nsecrets := o.Clients.Kubernetes.Core().Secrets(options.Namespace)\no.Clients.Openstack = openstack.NewClient(\n@@ -169,6 +164,17 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\no.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n)\n+ for _, k := range options.Controllers {\n+ switch k {\n+ case \"groundctl\":\n+ o.Config.Kubernikus.Controllers[\"groundctl\"] = NewGroundController(o.Factories, o.Clients, o.Config)\n+ case \"launchctl\":\n+ o.Config.Kubernikus.Controllers[\"launchctl\"] = NewLaunchController(o.Factories, o.Clients)\n+ case \"wormholegenerator\":\n+ o.Config.Kubernikus.Controllers[\"wormholegenerator\"] = NewWormholeGenerator(o.Factories, o.Clients, o.Config)\n+ }\n+ }\n+\nreturn o\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/wormhole.go", "new_path": "pkg/controller/wormhole.go", "diff": "@@ -11,6 +11,7 @@ import (\ntype WormholeGenerator struct {\nBase\n+ Config\n}\ntype State struct {\n@@ -18,16 +19,16 @@ type State struct {\nkluster *v1.Kluster\nnode *openstack.Node\nmessage string\n- err error\n}\ntype Transition func(*State) (Transition, error)\n-func NewWormholeGenerator(factories Factories, clients Clients) Controller {\n+func NewWormholeGenerator(factories Factories, clients Clients, config Config) Controller {\ninformers := factories.Kubernikus.Kubernikus().V1().Klusters().Informer()\nwg := &WormholeGenerator{\nNewBaseController(clients, informers),\n+ config,\n}\nwg.Controller = interface{}(wg).(BaseController)\n@@ -122,7 +123,7 @@ func (wg *WormholeGenerator) repairWormhole(state *State) (Transition, error) {\n}\nfunc (wg *WormholeGenerator) createWormhole(state *State) (Transition, error) {\n- name, err := wg.Clients.Openstack.CreateWormhole(state.kluster)\n+ name, err := wg.Clients.Openstack.CreateWormhole(state.kluster, wg.Config.Kubernikus.ProjectID, wg.Config.Kubernikus.NetworkID)\nif err != nil {\nreturn nil, err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
implements workholes
596,240
19.09.2017 13:31:56
-7,200
17155cfbf71fe0e823d47be14675868d9571c735
adds hardness for wormhole tunnels
[ { "change_type": "ADD", "old_path": null, "new_path": "cmd/wormhole/main.go", "diff": "+package main\n+\n+import (\n+ \"os\"\n+ \"path/filepath\"\n+\n+ \"github.com/golang/glog\"\n+\n+ \"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/sapcc/kubernikus/pkg/cmd/wormhole\"\n+)\n+\n+func main() {\n+ defer glog.Flush()\n+\n+ baseName := filepath.Base(os.Args[0])\n+\n+ err := wormhole.NewCommand(baseName).Execute()\n+ cmd.CheckError(err)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/cmd/wormhole/client.go", "diff": "+package wormhole\n+\n+import (\n+ \"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/spf13/cobra\"\n+)\n+\n+func NewClientCommand() *cobra.Command {\n+ o := NewClientOptions()\n+\n+ c := &cobra.Command{\n+ Use: \"client\",\n+ Short: \"Creates a Wormhole Client\",\n+ Run: func(c *cobra.Command, args []string) {\n+ cmd.CheckError(o.Validate(c, args))\n+ cmd.CheckError(o.Complete(args))\n+ cmd.CheckError(o.Run(c))\n+ },\n+ }\n+\n+ return c\n+}\n+\n+type ClientOptions struct {\n+}\n+\n+func NewClientOptions() *ClientOptions {\n+ return &ClientOptions{}\n+}\n+\n+func (o *ClientOptions) Validate(c *cobra.Command, args []string) error {\n+ return nil\n+}\n+\n+func (o *ClientOptions) Complete(args []string) error {\n+ return nil\n+}\n+\n+func (o *ClientOptions) Run(c *cobra.Command) error {\n+ return nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/cmd/wormhole/server.go", "diff": "+package wormhole\n+\n+import (\n+ \"os\"\n+ \"os/signal\"\n+ \"sync\"\n+ \"syscall\"\n+\n+ \"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/sapcc/kubernikus/pkg/wormhole\"\n+ \"github.com/spf13/cobra\"\n+ \"github.com/spf13/pflag\"\n+)\n+\n+func NewServerCommand() *cobra.Command {\n+ o := NewServerOptions()\n+\n+ c := &cobra.Command{\n+ Use: \"server\",\n+ Short: \"Creates a Wormhole Server\",\n+ Run: func(c *cobra.Command, args []string) {\n+ cmd.CheckError(o.Validate(c, args))\n+ cmd.CheckError(o.Complete(args))\n+ cmd.CheckError(o.Run(c))\n+ },\n+ }\n+\n+ o.BindFlags(c.Flags())\n+\n+ return c\n+}\n+\n+type ServerOptions struct {\n+ KubeConfig string\n+}\n+\n+func NewServerOptions() *ServerOptions {\n+ return &ServerOptions{}\n+}\n+\n+func (o *ServerOptions) BindFlags(flags *pflag.FlagSet) {\n+ flags.StringVar(&o.KubeConfig, \"kubeconfig\", o.KubeConfig, \"Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration\")\n+}\n+\n+func (o *ServerOptions) Validate(c *cobra.Command, args []string) error {\n+ return nil\n+}\n+\n+func (o *ServerOptions) Complete(args []string) error {\n+ return nil\n+}\n+\n+func (o *ServerOptions) Run(c *cobra.Command) error {\n+ sigs := make(chan os.Signal, 1)\n+ stop := make(chan struct{})\n+ signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) // Push signals into channel\n+ wg := &sync.WaitGroup{} // Goroutines can add themselves to this to be waited on\n+\n+ opts := &wormhole.ServerOptions{\n+ KubeConfig: o.KubeConfig,\n+ }\n+\n+ go wormhole.NewServer(opts).Run(stop, wg)\n+\n+ <-sigs // Wait for signals (this hangs until a signal arrives)\n+ glog.Info(\"Shutting down...\")\n+ close(stop) // Tell goroutines to stop themselves\n+ wg.Wait() // Wait for all to be stopped\n+\n+ return nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/cmd/wormhole/wormhole.go", "diff": "+package wormhole\n+\n+import (\n+ \"flag\"\n+\n+ \"github.com/spf13/cobra\"\n+)\n+\n+func NewCommand(name string) *cobra.Command {\n+ c := &cobra.Command{\n+ Use: name,\n+ Short: \"Wormhole as a Service\",\n+ Long: `Creates node-aware tunnelt connections between API server and Nodes`,\n+ }\n+\n+ c.AddCommand(\n+ NewServerCommand(),\n+ NewClientCommand(),\n+ )\n+ c.PersistentFlags().AddGoFlagSet(flag.CommandLine)\n+\n+ return c\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/wormhole/server.go", "diff": "+package wormhole\n+\n+import (\n+ \"fmt\"\n+ \"sync\"\n+ \"time\"\n+\n+ \"github.com/golang/glog\"\n+ \"k8s.io/client-go/informers\"\n+ \"k8s.io/client-go/kubernetes\"\n+\n+ kube \"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n+ \"github.com/sapcc/kubernikus/pkg/version\"\n+ \"github.com/sapcc/kubernikus/pkg/wormhole/server\"\n+)\n+\n+const (\n+ DEFAULT_RECONCILIATION = 5 * time.Minute\n+)\n+\n+type ServerOptions struct {\n+ KubeConfig string\n+}\n+\n+type Server struct {\n+ factory informers.SharedInformerFactory\n+ client kubernetes.Interface\n+ controller *server.Controller\n+}\n+\n+func NewServer(options *ServerOptions) *Server {\n+ s := &Server{}\n+\n+ client, err := kube.NewClient(options.KubeConfig)\n+ if err != nil {\n+ glog.Fatalf(\"Failed to create kubernetes clients: %s\", err)\n+ }\n+\n+ s.client = client\n+ s.factory = informers.NewSharedInformerFactory(s.client, DEFAULT_RECONCILIATION)\n+ s.controller = server.NewController(s.factory.Core().V1().Nodes())\n+\n+ return s\n+}\n+\n+func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n+ fmt.Printf(\"Welcome to Wormhole %v\\n\", version.VERSION)\n+\n+ s.factory.Start(stopCh)\n+ s.factory.WaitForCacheSync(stopCh)\n+\n+ glog.Info(\"Cache primed. Ready for Action!\")\n+\n+ go s.controller.Run(1, stopCh, wg)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/wormhole/server/controller.go", "diff": "+package server\n+\n+import (\n+ \"sync\"\n+ \"time\"\n+\n+ \"github.com/golang/glog\"\n+ \"k8s.io/apimachinery/pkg/util/wait\"\n+ informers \"k8s.io/client-go/informers/core/v1\"\n+ \"k8s.io/client-go/tools/cache\"\n+ \"k8s.io/client-go/util/workqueue\"\n+)\n+\n+type Controller struct {\n+ nodes informers.NodeInformer\n+ queue workqueue.RateLimitingInterface\n+}\n+\n+func NewController(informer informers.NodeInformer) *Controller {\n+ c := &Controller{\n+ nodes: informer,\n+ queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n+ }\n+\n+ c.nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n+ AddFunc: func(obj interface{}) {\n+ key, err := cache.MetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ c.queue.Add(key)\n+ }\n+ },\n+ DeleteFunc: func(obj interface{}) {\n+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n+ if err == nil {\n+ c.queue.Add(key)\n+ }\n+ },\n+ })\n+\n+ return c\n+}\n+\n+func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup) {\n+ defer c.queue.ShutDown()\n+ defer wg.Done()\n+ wg.Add(1)\n+ glog.Infof(`Starting WormholeGenerator with %d workers`, threadiness)\n+\n+ for i := 0; i < threadiness; i++ {\n+ go wait.Until(c.runWorker, time.Second, stopCh)\n+ }\n+\n+ <-stopCh\n+}\n+\n+func (c *Controller) runWorker() {\n+ for c.processNextWorkItem() {\n+ }\n+}\n+\n+func (c *Controller) processNextWorkItem() bool {\n+ key, quit := c.queue.Get()\n+ if quit {\n+ return false\n+ }\n+ defer c.queue.Done(key)\n+\n+ // Invoke the method containing the business logic\n+ err := c.reconcile(key.(string))\n+ c.handleErr(err, key)\n+ return true\n+}\n+\n+func (c *Controller) reconcile(key string) error {\n+ glog.Infof(\"Creating tunnel server for %v\", key)\n+ return nil\n+}\n+\n+func (c *Controller) handleErr(err error, key interface{}) {\n+ if err == nil {\n+ // Forget about the #AddRateLimited history of the key on every successful synchronization.\n+ // This ensures that future processing of updates for this key is not delayed because of\n+ // an outdated error history.\n+ c.queue.Forget(key)\n+ return\n+ }\n+\n+ // This controller retries 5 times if something goes wrong. After that, it stops trying.\n+ if c.queue.NumRequeues(key) < 5 {\n+ // Re-enqueue the key rate limited. Based on the rate limiter on the\n+ // queue and the re-enqueue history, the key will be processed later again.\n+ c.queue.AddRateLimited(key)\n+ return\n+ }\n+\n+ c.queue.Forget(key)\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adds hardness for wormhole tunnels
596,240
19.09.2017 13:32:42
-7,200
b5938841591962625ff0c1d14d7f83564b124d11
add wormhole binary
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -3,7 +3,7 @@ DATE = $(shell date +%Y%m%d%H%M)\nIMAGE ?= sapcc/kubernikus\nVERSION ?= latest\nGOOS ?= $(shell go env | grep GOOS | cut -d'\"' -f2)\n-BINARIES := apiserver kubernikus tunnelclient\n+BINARIES := apiserver kubernikus wormhole tunnelclient\nLDFLAGS := -X github.com/sapcc/kubernikus/pkg/version.VERSION=$(VERSION)\nGOFLAGS := -ldflags \"$(LDFLAGS) -s -w\"\n" }, { "change_type": "ADD", "old_path": null, "new_path": "_scratch/.curlrc", "diff": "+header \"X-Auth-Token: gAAAAABZuP3N9WywK6HiMPTD-j6Yn-0hKQj5KPcfLKveq2fyq186HydvifVOy60FrD3z1x427NQeyJo-VQQJlPLjXjHKu1PeDjGdacQPKwCBVpLtM1lcLbfas1gLI093uKAxY2x8Vbd0cZvVNGd-1Tur3hwHXMvRhTdGubRtATpvcS6CW2TXCyMbyD-488kEUixfFoUPsDelPEICZowVRD8qUAZ6NWhnOiYL17i5fIXtB5e-j0fgfrc\"\n+header \"Content-Type: application/json\"\n" }, { "change_type": "ADD", "old_path": null, "new_path": "_scratch/m4.yaml", "diff": "+apiVersion: kubernikus.sap.cc/v1\n+kind: Kluster\n+metadata:\n+ creationTimestamp: 2017-09-05T09:48:34Z\n+ labels:\n+ account: 8b25871959204ff1a27605b7bcf873f7\n+ name: michi\n+ namespace: kubernikus\n+spec:\n+ name: michi\n+ nodePools:\n+ - name: small\n+ size: 2\n+ flavor: m1.small\n+ image: coreos-stable-amd64\n+ - name: large\n+ size: 2\n+ flavor: m1.small\n+ image: coreos-stable-amd64\n+status:\n+ state: Pending\n" }, { "change_type": "ADD", "old_path": null, "new_path": "_scratch/michi-kube-config", "diff": "+apiVersion: v1\n+clusters:\n+- cluster:\n+ certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNRENDQWhpZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJKTVRrd0dnWURWUVFMRXhOVFFWQWcKUTI5dWRtVnlaMlZrSUVOc2IzVmtNQkVHQTFVRUN4TUtTM1ZpWlhKdWFXdDFjekFJQmdOVkJBc1RBVzB4RERBSwpCZ05WQkFNVEExUk1VekFlRncweE56QTVNVE13T1RRd05EaGFGdzB5TnpBNU1URXdPVFF3TkRoYU1Fa3hPVEFhCkJnTlZCQXNURTFOQlVDQkRiMjUyWlhKblpXUWdRMnh2ZFdRd0VRWURWUVFMRXdwTGRXSmxjbTVwYTNWek1BZ0cKQTFVRUN4TUJiVEVNTUFvR0ExVUVBeE1EVkV4VE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQgpDZ0tDQVFFQXU1YnlMOVNWcUlWYW9maExQZ0R2L2ZxR1Jwc2k5bHQzMmVxREJRYnFiQjFwZW1zTStZQzZHelkwClV5WU54eVFlOWtlWjZrWlpFK3JLb3ZiUTZwWFBUWXo0a0RoSXpHS2sxWjZuL0lxZmtSUFEzSEdhWDNNcHh0elQKZXV5ZE9EUE96emwwdUlQSDJLYUF4Y0NBNSs2c3Nmbm0rQ2w1eU5FL0VvN3c2ZWVKZHFJcUR0TTJOcU1ydjBEcApER1cyOEtvYTlDUXh1L3dPNGlWNU1vTlZTWFk0YnhPdGRrNXlVK1pxcXRLODdrY2MvNTlmbWxWOHdqcmp2UHVrCkpEZkRDY3FUT3N4R2dCbEdubGphVEFtK0dCQlBOQzZFemozSmczdUlIU2xqVEtxRWpKNGtrU3NwUnA1dUZsOGwKQ3h1eEJDQWFwWnBRYWRWL0ErTDZOdHpCc25XY0NRSURBUUFCb3lNd0lUQU9CZ05WSFE4QkFmOEVCQU1DQXFRdwpEd1lEVlIwVEFRSC9CQVV3QXdFQi96QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFqRkRhNVZFVVNiWnR3c2lkCjdSSXB5bDgzeDEwUjF4YzMzblFJdlNvTi94ci9lYllCeFdyYWpQV1dacVk2MjNyNFU5MVJHemQ5bkhTQmlqM28KRWNjem5mcitpVWM4OUxLblVZSnUydG9DcDI5eFFQcEFDSForaDhjcHdOaVJxemZ4eWh5aWIvWkFpK1ZZM2R1TQpKakd0eTR3ZmtKWjh6UG5GQmpWYUt5RmVSQllNemkzWFlnYmlMVEkwV2VnenlqTUk5Tm1tRTNnM3BNaHF4SEdmCmJ0Qi9qUjJKMVdFeFFLdnNrN3NqVzZFZmZSWS82aVMyYi83dmdBQXB5Q0ZsdWNzb250UFJmOHg3ZEhJdlNGYUIKSlk5OWhkVzhvYkFLdjZiV3YxbmFnY2JNdWNvNmh6Q1I0NWsxR3ZvUncvQ0kvNzR2WHZNamhua1pnWWdxbGR1TgpQbm9Md0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n+ server: https://m-8b25871959204ff1a27605b7bcf873f7.kluster.staging.cloud.sap\n+ name: m-8b25871959204ff1a27605b7bcf873f7\n+contexts:\n+- context:\n+ cluster: m-8b25871959204ff1a27605b7bcf873f7\n+ user: D038720\n+ name: m-8b25871959204ff1a27605b7bcf873f7\n+current-context: m-8b25871959204ff1a27605b7bcf873f7\n+kind: Config\n+preferences: {}\n+users:\n+- name: D038720\n+ user:\n+ client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaakNDQWs2Z0F3SUJBZ0lJS09hZmlIdncxd0F3RFFZSktvWklodmNOQVFFTEJRQXdWekU1TUJvR0ExVUUKQ3hNVFUwRlFJRU52Ym5abGNtZGxaQ0JEYkc5MVpEQVJCZ05WQkFzVENrdDFZbVZ5Ym1scmRYTXdDQVlEVlFRTApFd0Z0TVJvd0dBWURWUVFERXhGQmNHbFRaWEoyWlhJZ1EyeHBaVzUwY3pBZUZ3MHhOekE1TVRNd09UUXdORGRhCkZ3MHlOekE1TVRFd09UUTBNalZhTUdZeEZ6QVZCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVRrd0dnWUQKVlFRTEV4TlRRVkFnUTI5dWRtVnlaMlZrSUVOc2IzVmtNQkVHQTFVRUN4TUtTM1ZpWlhKdWFXdDFjekFJQmdOVgpCQXNUQVcweEVEQU9CZ05WQkFNVEIwUXdNemczTWpBd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFEWmU3MDFQdjZQbm1idG52OGFER1RZOXMrNENva2JGajVrNXVEZGVyc05VNHJKQVdOMEhBeEMKZnlIeEdNVmwwVmQwZEdlUmNKWDZJSlo0NldZWUZDNDlwZ3FrRlBta1RpQnROejR3K0NySjFSYS9CbE54Q3dHSAoxRWtVbHBCelJrQ1BDbTNxbWZXTWtiTStuZU0rVWJkeXk3OW1PZ29LeWxTMzhCSHZ2QmVpVDM2K3podEFRSnQ5ClFHdVBNYUd2WG1MbTdDaXMwT3kxSkZhZzZlL0R2Z2VEeW1vUlNzNmNkdlVnSm9ydjhyWXA3U1ZabUJKclRaaDcKNHBzT1NrR1gvajZnamZsTDRiVFJudWxkdkllc1REaUxiQlFZaENBQ2dxNGl5Y0duaWxhd1UyaS81ekpBZkdEMwpXWlhoSDZRcjRxVndmcHA1OHVNTURlVGpaU2pyQU9zUEFnTUJBQUdqSnpBbE1BNEdBMVVkRHdFQi93UUVBd0lGCm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQkVWWm9vbmcKKzhoSnBHNFdmZlZldmdUQkFVL2hVM1VldWp2V0tsSkhKQUZRYm9Ra2N1bjVqMGRUNlIrYVVhTHNHWVZoNkxFVQpXN0RsTElBWjU4RCtmM2tMVGVOOTYrdjB1NlhLTG5uZHF5WmxEY00wR2ZUaHBHMjZMQUxJV25tTVo4TUVXRjkzCk1sM3FxZWN3amJnTFhaSjFFSk9TamhXZndKaFZZTkdXaEw2c3YwWTFDQVpVMlFUVXE1ejcwMlZ0TjUwSlEzLy8KL2t5MHp1ZlRYRTl3NGRxQVpjVGhVWCsra0ZXSUFLREZNaVRQWUFQblFQZWRQS2ZuVjdERllSQ3lISmg2bWZWUgpKU3kxSmJQak5nZ25NWkZDdlZhbWcvQWtBQ1lOekpRVjU1MFBMQWZxTXVBRUZweUpLL05qT3NNdUNWbmRNUzZvCkdKT0xIZ0krTWNYeit3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n+ client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMlh1OU5UNytqNTVtN1o3L0dneGsyUGJQdUFxSkd4WStaT2JnM1hxN0RWT0t5UUZqCmRCd01RbjhoOFJqRlpkRlhkSFJua1hDVitpQ1dlT2xtR0JRdVBhWUtwQlQ1cEU0Z2JUYytNUGdxeWRVV3Z3WlQKY1FzQmg5UkpGSmFRYzBaQWp3cHQ2cG4xakpHelBwM2pQbEczY3N1L1pqb0tDc3BVdC9BUjc3d1hvazkrdnM0YgpRRUNiZlVCcmp6R2hyMTVpNXV3b3JORHN0U1JXb09udnc3NEhnOHBxRVVyT25IYjFJQ2FLNy9LMktlMGxXWmdTCmEwMlllK0tiRGtwQmwvNCtvSTM1UytHMDBaN3BYYnlIckV3NGkyd1VHSVFnQW9LdUlzbkJwNHBXc0ZOb3YrY3kKUUh4ZzkxbVY0UitrSytLbGNINmFlZkxqREEzazQyVW82d0RyRHdJREFRQUJBb0lCQUJIVWlEMmVUWVoreVJlQQpLby83SmlWM2pxNGJCd1NXZlVVT2ZLOEx5ZHdjMWtzR2R4M2swc2g4M2M2RFptT2pseFR3R2pHZFZsNXNYNG5JCk1OdVdnL2YvN2UxbjN0T3VxQS9zTUlnWlFJRE5uSkRLdHZxVW1XcUdaL3R0SUtpaXdOWTVWNnVkTmJseGxJMTAKcWxYSW8wNmhENjhVNFEvamFMQ3YyUGlicGlCalYxYm90SFJlZW9Kd0ZieXZ1WEMyT0NWcWI0SVQ4bCsxVjlFZgoxR2MzRTYza3p0RitDamxCRUFpTFZ4UUtRY09HYkphbjJlUm83bEJhbUJZd1VDdHoybEtFWXYzSjJuR3J4aS9xClVjL3ZyaWxMZzRucXM1RDRvM0RXV2ZzWDBOR3pRZDN2cUg3WlZxNjFZTWNJTTRRQ3U0cjlTcEhaY0gxTGdYWE8KTTNGalB5a0NnWUVBOS96ZThSS1o2SERwSzJwVFQ5Q21tL0Jyb1loMUppMVpoMUE3cEt2NXdmVTRINVcwcGtnUAp5UnVjOWVVRmdqTDVqTnh5cHcrVFhVRWFyWHYyV01KTVNJNHFMYzYrZFRsUlFHUEpTbytkTjZRR3hzZkh6Qm13CnhlWmVjQzBvbjl3RUdXajB6dHhGNloranY0d04vRWNvNzJkQ1d3MlZvU3hORGlXcVlZVS85Mk1DZ1lFQTRJS1EKUE1Hd2VQamZnajFKNWNuMTZmUENMKzBSWVEvSlRzcm9vbFJ4d1NhdVJjeVhSRlRSZEtYOW5nZVRUaFc0djNXTQpzR0pvQWhiRDU2QlJDS0FqOExmSUhNT1F1aDFSejk2NGFNcDRycy9IYVl4NHluZW0zUkJ6d3AyQmwrSDYvMjIrCmUwa1FDZG5Icm1LK2lTYmt0VzRQdUZkYUpPc0xwM1k2eW5kbHUyVUNnWUJaTC9hMldPeEZicVpOV1BZUmxJYjMKT21aYk9BcDU2b0Z5YXVhb2VMbGdpbUxlNlNhSlR3MXVWS0lFeFpOUG1pbXFBWGF5b3FWck9vanZyOCtQYUlHcQo3b1k0NTB3VU9NTUNGMkZEa1pjSGRkU0NGVVBFSWRFenU0Qm5jN1pTbC94WmJ5ZnNaY2czRjVUNnhOSWEwelRqCnRRMW1WZUpranR3L3BFZzh2b0djNVFLQmdGY002T1pWNTVvUVRZR1BTQTdzTHdpbWRqM2srdDhhb3VDRk5veHkKbDZUZjNhUzh2azVGWU8yTXl2aXlVVUJ1bGJMTzNsMHAydEFzaVl0UEV2eDRNMWRMdXpERDkyTGlxVzBqRzNldQpRZnJ1SDVEaVlaZUhxMUI5NTVTblE2OWpkTklEZUY4UEprUGJpRndxZDF4d2t5OXgzSENLbGk1SVlMMXV6ejdmCjZyY3RBb0dCQUpqQm1IUnRVMFdUaHdsdHQwZmhqVzZnMVBIVjJsTS9EcmJCc21wZHVpanJoRWhIOW5iTGRGNFYKQWU2MjViemF2bWhtNDNpYVhHcUlXaHFJMytrU05KNDJuNUFsNTlTbHdRWkV1bi9jK2JQbDllRXVzK29MLzBCdAppZXc5OHVHUUY1L2srU1RYUXlPTVhZYXRpcnI4cjg4TWg3aFZZays5RkRxZXg5VUtUNURmCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "_scratch/michi.kluster.eu-nl-1.cloud.sap.yaml", "diff": "+apiVersion: kubernikus.sap.cc/v1\n+kind: Kluster\n+metadata:\n+ creationTimestamp: 2017-09-05T09:48:34Z\n+ labels:\n+ account: 5d725ddf97664a16b011e8a8dd75772b\n+ name: m4-5d725ddf97664a16b011e8a8dd75772b\n+ namespace: kubernikus\n+spec:\n+ name: m4\n+ nodePools:\n+ - name: small\n+ size: 2\n+ flavor: m1.small\n+ image: coreos-stable-amd64\n+ - name: large\n+ size: 2\n+ flavor: m1.small\n+ image: coreos-stable-amd64\n+status:\n+ state: Pending\n" }, { "change_type": "ADD", "old_path": null, "new_path": "_scratch/michi.kluster.staging.cloud.sap.yaml", "diff": "+apiVersion: kubernikus.sap.cc/v1\n+kind: Kluster\n+metadata:\n+ creationTimestamp: 2017-09-05T09:48:34Z\n+ labels:\n+ account: 8b25871959204ff1a27605b7bcf873f7\n+ name: m-8b25871959204ff1a27605b7bcf873f7\n+ namespace: kubernikus\n+spec:\n+ name: m\n+ nodePools:\n+ - name: small\n+ size: flavor: m1.small\n+ image: coreos-stable-amd64\n+ - name: large\n+ size: 2\n+ flavor: m1.small\n+ image: coreos-stable-amd64\n+status:\n+ state: Pending\n+ nodePools:\n+ - name: small\n+ desired: 2\n+ running: 0\n+ error: 0\n+ resources:\n+ pods:\n+ availble: 20\n+ useD: 10\n+\n+\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add wormhole binary
596,240
19.09.2017 13:40:19
-7,200
70e93e3c66dfb76af22ffc0d2aab84596bc3b4e1
gieb tunnel.server
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/wormhole/wormhole.go", "diff": "+package wormhole\n+\n+import (\n+ \"fmt\"\n+ \"sync\"\n+ \"time\"\n+\n+ \"github.com/golang/glog\"\n+ \"github.com/koding/tunnel\"\n+ \"k8s.io/client-go/informers\"\n+ \"k8s.io/client-go/kubernetes\"\n+\n+ kube \"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n+ \"github.com/sapcc/kubernikus/pkg/version\"\n+ \"github.com/sapcc/kubernikus/pkg/wormhole/server\"\n+)\n+\n+const (\n+ DEFAULT_RECONCILIATION = 5 * time.Minute\n+)\n+\n+type ServerOptions struct {\n+ KubeConfig string\n+}\n+\n+type Server struct {\n+ factory informers.SharedInformerFactory\n+ client kubernetes.Interface\n+ server *tunnel.Server\n+ controller *server.Controller\n+}\n+\n+func NewServer(options *ServerOptions) *Server {\n+ s := &Server{}\n+\n+ client, err := kube.NewClient(options.KubeConfig)\n+ if err != nil {\n+ glog.Fatalf(\"Failed to create kubernetes clients: %s\", err)\n+ }\n+\n+ s.client = client\n+ s.server = nil\n+ s.factory = informers.NewSharedInformerFactory(s.client, DEFAULT_RECONCILIATION)\n+ s.controller = server.NewController(s.factory.Core().V1().Nodes(), s.server)\n+\n+ return s\n+}\n+\n+func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n+ fmt.Printf(\"Welcome to Wormhole %v\\n\", version.VERSION)\n+\n+ s.factory.Start(stopCh)\n+ s.factory.WaitForCacheSync(stopCh)\n+\n+ glog.Info(\"Cache primed. Ready for Action!\")\n+\n+ go s.controller.Run(1, stopCh, wg)\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
gieb tunnel.server
596,240
19.09.2017 17:44:19
-7,200
9c3b80aeb241b759ae643567b058d595f78f806c
add missing iptables functionality
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/util/iptables/save_restore.go", "diff": "+/*\n+Copyright 2014 The Kubernetes Authors.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+*/\n+\n+package iptables\n+\n+import (\n+ \"fmt\"\n+ \"strings\"\n+)\n+\n+// MakeChainLine return an iptables-save/restore formatted chain line given a Chain\n+func MakeChainLine(chain Chain) string {\n+ return fmt.Sprintf(\":%s - [0:0]\", chain)\n+}\n+\n+// GetChainLines parses a table's iptables-save data to find chains in the table.\n+// It returns a map of iptables.Chain to string where the string is the chain line from the save (with counters etc).\n+func GetChainLines(table Table, save []byte) map[Chain]string {\n+ chainsMap := make(map[Chain]string)\n+ tablePrefix := \"*\" + string(table)\n+ readIndex := 0\n+ // find beginning of table\n+ for readIndex < len(save) {\n+ line, n := ReadLine(readIndex, save)\n+ readIndex = n\n+ if strings.HasPrefix(line, tablePrefix) {\n+ break\n+ }\n+ }\n+ // parse table lines\n+ for readIndex < len(save) {\n+ line, n := ReadLine(readIndex, save)\n+ readIndex = n\n+ if len(line) == 0 {\n+ continue\n+ }\n+ if strings.HasPrefix(line, \"COMMIT\") || strings.HasPrefix(line, \"*\") {\n+ break\n+ } else if strings.HasPrefix(line, \"#\") {\n+ continue\n+ } else if strings.HasPrefix(line, \":\") && len(line) > 1 {\n+ // We assume that the <line> contains space - chain lines have 3 fields,\n+ // space delimited. If there is no space, this line will panic.\n+ chain := Chain(line[1:strings.Index(line, \" \")])\n+ chainsMap[chain] = line\n+ }\n+ }\n+ return chainsMap\n+}\n+\n+func ReadLine(readIndex int, byteArray []byte) (string, int) {\n+ currentReadIndex := readIndex\n+\n+ // consume left spaces\n+ for currentReadIndex < len(byteArray) {\n+ if byteArray[currentReadIndex] == ' ' {\n+ currentReadIndex++\n+ } else {\n+ break\n+ }\n+ }\n+\n+ // leftTrimIndex stores the left index of the line after the line is left-trimmed\n+ leftTrimIndex := currentReadIndex\n+\n+ // rightTrimIndex stores the right index of the line after the line is right-trimmed\n+ // it is set to -1 since the correct value has not yet been determined.\n+ rightTrimIndex := -1\n+\n+ for ; currentReadIndex < len(byteArray); currentReadIndex++ {\n+ if byteArray[currentReadIndex] == ' ' {\n+ // set rightTrimIndex\n+ if rightTrimIndex == -1 {\n+ rightTrimIndex = currentReadIndex\n+ }\n+ } else if (byteArray[currentReadIndex] == '\\n') || (currentReadIndex == (len(byteArray) - 1)) {\n+ // end of line or byte buffer is reached\n+ if currentReadIndex <= leftTrimIndex {\n+ return \"\", currentReadIndex + 1\n+ }\n+ // set the rightTrimIndex\n+ if rightTrimIndex == -1 {\n+ rightTrimIndex = currentReadIndex\n+ if currentReadIndex == (len(byteArray)-1) && (byteArray[currentReadIndex] != '\\n') {\n+ // ensure that the last character is part of the returned string,\n+ // unless the last character is '\\n'\n+ rightTrimIndex = currentReadIndex + 1\n+ }\n+ }\n+ return string(byteArray[leftTrimIndex:rightTrimIndex]), currentReadIndex + 1\n+ } else {\n+ // unset rightTrimIndex\n+ rightTrimIndex = -1\n+ }\n+ }\n+ return \"\", currentReadIndex\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add missing iptables functionality
596,240
19.09.2017 17:45:08
-7,200
289aaf8a2515a59a22bc6a1f508affde46f76f15
added iptables monkeying
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "package server\nimport (\n+ \"bytes\"\n+ \"fmt\"\n\"net\"\n+ \"strings\"\n\"sync\"\n\"time\"\n\"github.com/golang/glog\"\n\"github.com/koding/tunnel\"\n+ \"github.com/sapcc/kubernikus/pkg/util/iptables\"\n\"k8s.io/apimachinery/pkg/util/wait\"\ninformers \"k8s.io/client-go/informers/core/v1\"\n\"k8s.io/client-go/pkg/api/v1\"\n\"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n+ utilexec \"k8s.io/utils/exec\"\n+)\n+\n+const (\n+ KUBERNIKUS_TUNNELS iptables.Chain = \"KUBERNIKUS-TUNNELS\"\n)\ntype Controller struct {\n@@ -19,6 +28,7 @@ type Controller struct {\ntunnel *tunnel.Server\nqueue workqueue.RateLimitingInterface\nstore map[string]net.Listener\n+ iptables iptables.Interface\n}\nfunc NewController(informer informers.NodeInformer, tunnel *tunnel.Server) *Controller {\n@@ -27,6 +37,7 @@ func NewController(informer informers.NodeInformer, tunnel *tunnel.Server) *Cont\ntunnel: tunnel,\nqueue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\nstore: make(map[string]net.Listener),\n+ iptables: iptables.New(utilexec.New(), iptables.ProtocolIpv4),\n}\nc.nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n@@ -141,6 +152,10 @@ func (c *Controller) addNode(key string, node *v1.Node) error {\nc.store[key] = listener\nc.tunnel.AddAddr(listener, nil, node.Spec.ExternalID)\n+\n+ if err := c.redoIPTablesSpratz(); err != nil {\n+ return err\n+ }\n} else {\nglog.V(5).Infof(\"Already listening on this node... Skipping %v\", key)\n}\n@@ -154,8 +169,116 @@ func (c *Controller) delNode(key string) error {\nc.tunnel.DeleteAddr(listener, nil)\nlistener.Close()\nc.store[key] = nil\n+\n+ if err := c.redoIPTablesSpratz(); err != nil {\n+ return err\n+ }\n} else {\nglog.V(5).Infof(\"Not listening on this node... Skipping %v\", key)\n}\nreturn nil\n}\n+\n+func (c *Controller) redoIPTablesSpratz() error {\n+ table := iptables.TableFilter\n+\n+ if _, err := c.iptables.EnsureChain(table, KUBERNIKUS_TUNNELS); err != nil {\n+ glog.Errorf(\"Failed to ensure that %s chain %s exists: %v\", table, KUBERNIKUS_TUNNELS, err)\n+ return err\n+ }\n+\n+ args := []string{\"-m\", \"comment\", \"--comment\", \"kubernikus tunnels\", \"-j\", string(KUBERNIKUS_TUNNELS)}\n+ if _, err := c.iptables.EnsureRule(iptables.Append, table, iptables.ChainInput, args...); err != nil {\n+ glog.Errorf(\"Failed to ensure that %s chain %s jumps to %s: %v\", table, iptables.ChainInput, KUBERNIKUS_TUNNELS, err)\n+ return err\n+ }\n+\n+ iptablesSaveRaw := bytes.NewBuffer(nil)\n+ existingFilterChains := make(map[iptables.Chain]string)\n+ err := c.iptables.SaveInto(table, iptablesSaveRaw)\n+ if err != nil {\n+ glog.Errorf(\"Failed to execute iptables-save, syncing all rules: %v\", err)\n+ } else {\n+ existingFilterChains = iptables.GetChainLines(table, iptablesSaveRaw.Bytes())\n+ }\n+\n+ filterChains := bytes.NewBuffer(nil)\n+ filterRules := bytes.NewBuffer(nil)\n+ writeLine(filterChains, \"*filter\")\n+ if chain, ok := existingFilterChains[KUBERNIKUS_TUNNELS]; ok {\n+ writeLine(filterChains, chain)\n+ } else {\n+ writeLine(filterChains, iptables.MakeChainLine(KUBERNIKUS_TUNNELS))\n+ }\n+\n+ for key, _ := range c.store {\n+ err := c.writeTunnelRedirect(key, filterRules)\n+ if err != nil {\n+ return err\n+ }\n+ }\n+\n+ writeLine(filterRules, \"COMMIT\")\n+\n+ lines := append(filterChains.Bytes(), filterRules.Bytes()...)\n+ glog.V(6).Infof(\"Restoring iptables rules: %s\", lines)\n+ err = c.iptables.RestoreAll(lines, iptables.NoFlushTables, iptables.RestoreCounters)\n+ if err != nil {\n+ glog.Errorf(\"Failed to execute iptables-restore: %v\", err)\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func (c *Controller) writeTunnelRedirect(key string, filterRules *bytes.Buffer) error {\n+ obj, exists, err := c.nodes.Informer().GetIndexer().GetByKey(key)\n+ if err != nil {\n+ return err\n+ }\n+\n+ if !exists {\n+ return nil\n+ }\n+\n+ node := obj.(*v1.Node)\n+ ip, err := GetNodeHostIP(node)\n+ if err != nil {\n+ return err\n+ }\n+\n+ port := c.store[key].Addr().(*net.TCPAddr).Port\n+\n+ writeLine(filterRules,\n+ \"-A\", string(KUBERNIKUS_TUNNELS),\n+ \"-m\", \"comment\", \"--comment\", fmt.Sprintf(`\"tunnel to %v\"`, key),\n+ \"-t\", \"nat\",\n+ \"-I\", \"PREROUTING\",\n+ \"-p\", \"tcp\",\n+ \"--dst\", ip.String(),\n+ \"--dport\", \"22\",\n+ \"--to-ports\", fmt.Sprintf(\"%v\", port),\n+ \"-j\", \"REDIRECT\",\n+ )\n+\n+ return nil\n+}\n+\n+func writeLine(buf *bytes.Buffer, words ...string) {\n+ buf.WriteString(strings.Join(words, \" \") + \"\\n\")\n+}\n+\n+func GetNodeHostIP(node *v1.Node) (net.IP, error) {\n+ addresses := node.Status.Addresses\n+ addressMap := make(map[v1.NodeAddressType][]v1.NodeAddress)\n+ for i := range addresses {\n+ addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i])\n+ }\n+ if addresses, ok := addressMap[v1.NodeInternalIP]; ok {\n+ return net.ParseIP(addresses[0].Address), nil\n+ }\n+ if addresses, ok := addressMap[v1.NodeExternalIP]; ok {\n+ return net.ParseIP(addresses[0].Address), nil\n+ }\n+ return nil, fmt.Errorf(\"host IP unknown; known addresses: %v\", addresses)\n+}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
added iptables monkeying
596,240
19.09.2017 17:47:27
-7,200
73ed8ffd5c9bc742c6b37a8dac8c89f3dbac6d85
fix comment double quotes
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "@@ -251,7 +251,7 @@ func (c *Controller) writeTunnelRedirect(key string, filterRules *bytes.Buffer)\nwriteLine(filterRules,\n\"-A\", string(KUBERNIKUS_TUNNELS),\n- \"-m\", \"comment\", \"--comment\", fmt.Sprintf(`\"tunnel to %v\"`, key),\n+ \"-m\", \"comment\", \"--comment\", fmt.Sprintf(\"tunnel to %v\", key),\n\"-t\", \"nat\",\n\"-I\", \"PREROUTING\",\n\"-p\", \"tcp\",\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fix comment double quotes
596,240
19.09.2017 17:58:20
-7,200
330a6e9c44461dc698b9a6c78660b994645f3465
add wormhole sidecar
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -42,6 +42,10 @@ spec:\npath: kubelet-clients-apiserver.pem\n- key: kubelet-clients-apiserver-key.pem\npath: kubelet-clients-apiserver-key.pem\n+ - key: apiserver-clients-system-kube-controller-manager.pem\n+ path: kube-client.pem\n+ - key: apiserver-clients-system-kube-controller-manager-key.pem\n+ path: kube-client.key\n- key: tls-ca.pem\npath: tls-ca.pem\n- key: tls-apiserver.pem\n@@ -60,6 +64,9 @@ spec:\nitems:\n- key: token.csv\npath: token.csv\n+ - name: config\n+ configMap:\n+ name: {{ include \"master.fullname\" . }}\ncontainers:\n- name: apiserver\nimage: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n@@ -100,3 +107,18 @@ spec:\nreadOnly: true\nresources:\n{{ toYaml .Values.api.resources | indent 12 }}\n+ - name: wormhole\n+ image: sapcc/kubernikus:latest\n+ args:\n+ - wormhole\n+ - server\n+ - --kubeconfig=/etc/kubernetes/config/kubeconfig\n+ - --v=5\n+ - --logtostderr\n+ volumeMounts:\n+ - mountPath: /etc/kubernetes/certs/\n+ name: certs\n+ readOnly: true\n+ - mountPath: /etc/kubernetes/config\n+ name: config\n+ readOnly: true\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add wormhole sidecar
596,240
19.09.2017 18:09:56
-7,200
7be27d11009293aa551739f1d6b1220f41849be3
needs to start after groundctl
[ { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -159,11 +159,6 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\noptions.AuthProjectDomain,\n)\n- o.Clients.Satellites = kube.NewSharedClientFactory(\n- secrets,\n- o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n- )\n-\nfor _, k := range options.Controllers {\nswitch k {\ncase \"groundctl\":\n@@ -175,6 +170,11 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n}\n}\n+ o.Clients.Satellites = kube.NewSharedClientFactory(\n+ secrets,\n+ o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n+ )\n+\nreturn o\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
needs to start after groundctl
596,240
19.09.2017 18:12:14
-7,200
52d4d3489b422848942526b63ce7b76dfc4ca64f
do not start wormhole generator by default
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/operator/operator.go", "new_path": "pkg/cmd/operator/operator.go", "diff": "@@ -65,7 +65,7 @@ func NewOperatorOptions() *Options {\nAuthDomain: \"Default\",\nKubernikusDomain: \"kluster.staging.cloud.sap\",\nNamespace: \"kubernikus\",\n- Controllers: []string{\"groundctl\", \"launchctl\", \"wormholegenerator\"},\n+ Controllers: []string{\"groundctl\", \"launchctl\"},\n}\n}\n@@ -83,7 +83,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) {\nflags.StringVar(&o.KubernikusProjectID, \"kubernikus-projectid\", o.KubernikusProjectID, \"ID of the project the k*s control plane.\")\nflags.StringVar(&o.KubernikusNetworkID, \"kubernikus-networkid\", o.KubernikusNetworkID, \"ID of the network the k*s control plane.\")\nflags.StringVar(&o.Namespace, \"namespace\", o.Namespace, \"Restrict operator to resources in the given namespace\")\n- flags.StringSliceVar(&o.Controllers, \"controllers\", o.Controllers, \"A list of controllers to enable. Default is to enable all. controllers: groundctl, launchctl, wormholegenerator\")\n+ flags.StringSliceVar(&o.Controllers, \"controllers\", o.Controllers, \"A list of controllers to enable. Default is to enable all. controllers: groundctl, launchctl\")\n}\nfunc (o *Options) Validate(c *cobra.Command, args []string) error {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
do not start wormhole generator by default
596,240
19.09.2017 20:06:56
-7,200
b67435a641cf74f8ec9e49e2a29afebac003edf6
adding iptables for wormhole redirects
[ { "change_type": "MODIFY", "old_path": "Dockerfile", "new_path": "Dockerfile", "diff": "@@ -7,7 +7,7 @@ RUN make all\nFROM alpine:3.6\nMAINTAINER \"Fabian Ruff <fabian.ruff@sap.com>\"\n-RUN apk add --no-cache curl\n+RUN apk add --no-cache curl iptables\nRUN curl -Lo /bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64 \\\n&& chmod +x /bin/dumb-init \\\n&& dumb-init -V\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
adding iptables for wormhole redirects
596,240
19.09.2017 20:16:23
-7,200
ee170c5aff82fe8f5beeaf218d9a9b933414e0e0
generate wormhole user certs
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -42,9 +42,9 @@ spec:\npath: kubelet-clients-apiserver.pem\n- key: kubelet-clients-apiserver-key.pem\npath: kubelet-clients-apiserver-key.pem\n- - key: apiserver-clients-system-kube-controller-manager.pem\n+ - key: apiserver-clients-kubernikus-wormhole.pem\npath: kube-client.pem\n- - key: apiserver-clients-system-kube-controller-manager-key.pem\n+ - key: apiserver-clients-kubernikus-wormhole-key.pem\npath: kube-client.key\n- key: tls-ca.pem\npath: tls-ca.pem\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/secrets.yaml", "new_path": "charts/kube-master/templates/secrets.yaml", "diff": "@@ -20,7 +20,7 @@ data:\ntoken.csv: {{ include (print $.Template.BasePath \"/_token.csv.tpl\") . | b64enc }}\n{{- if empty .Values.certsSecretName }}\n-{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" }}\n+{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-clients-kubernikus-wormhole.pem\" \"apiserver-clients-kubernikus-wormhole-key.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" }}\n{{ . }}: {{ required (printf \"missing cert/key: %s\" .) (index $.Values.certs .) | b64enc -}}\n{{ end }}\napiserver-clients-and-nodes-ca.pem: {{ printf \"%s%s\" (index .Values.certs \"apiserver-clients-ca.pem\") (index .Values.certs \"apiserver-nodes-ca.pem\") | b64enc }}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/certificates.go", "new_path": "pkg/controller/ground/certificates.go", "diff": "@@ -41,6 +41,7 @@ type Certificates struct {\nScheduler Bundle\nProxy Bundle\nClusterAdmin Bundle\n+ Wormhole Bundle\n}\nNodes struct {\nCA Bundle\n@@ -144,6 +145,7 @@ func (c Certificates) all() []Bundle {\nc.ApiServer.Clients.Scheduler,\nc.ApiServer.Clients.Proxy,\nc.ApiServer.Clients.ClusterAdmin,\n+ c.ApiServer.Clients.Wormhole,\nc.ApiServer.Nodes.CA,\nc.ApiServer.Nodes.Universal,\nc.Kubelet.Clients.CA,\n@@ -167,6 +169,7 @@ func (certs *Certificates) populateForSatellite(satellite, fqSatelliteName strin\ncerts.ApiServer.Clients.ControllerManager = certs.signApiServerClient(\"system:kube-controller-manager\")\ncerts.ApiServer.Clients.Proxy = certs.signApiServerClient(\"system:kube-proxy\")\ncerts.ApiServer.Clients.Scheduler = certs.signApiServerClient(\"system:kube-scheduler\")\n+ certs.ApiServer.Clients.Wormhole = certs.signApiServerClient(\"kubernikus:wormhole\")\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
generate wormhole user certs
596,240
19.09.2017 21:42:34
-7,200
4445aedb4c57bc88e4b8a4acddbd17b7057b8b68
update charts to expose wormhole via ingress
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -52,6 +52,10 @@ spec:\npath: tls-apiserver.pem\n- key: tls-apiserver-key.pem\npath: tls-apiserver-key.pem\n+ - key: tls-wormhole.pem\n+ path: tls-wormhole.pem\n+ - key: tls-wormhole-key.pem\n+ path: tls-wormhole-key.pem\n- name: cloudprovider\nsecret:\nsecretName: {{ include \"master.fullname\" . }}\n@@ -113,6 +117,10 @@ spec:\n- wormhole\n- server\n- --kubeconfig=/etc/kubernetes/config/kubeconfig\n+ #- --client-ca-file=/etc/kubernetes/certs/apiserver-clients-and-nodes-ca.pem\n+ - --ca /etc/kubernetes/certs/tls-ca.pem\n+ - --cert /etc/kubernetes/certs/tls-wormhole.pem\n+ - --key /etc/kubernetes/certs/tls-wormhole-key.pem\n- --v=5\nvolumeMounts:\n- mountPath: /etc/kubernetes/certs/\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/ingress.yaml", "new_path": "charts/kube-master/templates/ingress.yaml", "diff": "@@ -19,4 +19,11 @@ spec:\nbackend:\nserviceName: {{ include \"master.fullname\" . }}\nservicePort: 6443\n+ - host: {{ .Values.api.wormholeHost }}\n+ http:\n+ paths:\n+ - path: /\n+ backend:\n+ serviceName: {{ include \"master.fullname\" . }}\n+ servicePort: 6553\n{{- end }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/secrets.yaml", "new_path": "charts/kube-master/templates/secrets.yaml", "diff": "@@ -20,7 +20,7 @@ data:\ntoken.csv: {{ include (print $.Template.BasePath \"/_token.csv.tpl\") . | b64enc }}\n{{- if empty .Values.certsSecretName }}\n-{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-clients-kubernikus-wormhole.pem\" \"apiserver-clients-kubernikus-wormhole-key.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" }}\n+{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-clients-kubernikus-wormhole.pem\" \"apiserver-clients-kubernikus-wormhole-key.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" \"tls-wormhole.pem\" \"tls-wormhole-key.pem\"}}\n{{ . }}: {{ required (printf \"missing cert/key: %s\" .) (index $.Values.certs .) | b64enc -}}\n{{ end }}\napiserver-clients-and-nodes-ca.pem: {{ printf \"%s%s\" (index .Values.certs \"apiserver-clients-ca.pem\") (index .Values.certs \"apiserver-nodes-ca.pem\") | b64enc }}\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/service.yaml", "new_path": "charts/kube-master/templates/service.yaml", "diff": "@@ -10,5 +10,6 @@ spec:\ntype: ClusterIP\nports:\n- port: 6443\n+ - port: 6553\nselector:\napp: {{ include \"master.fullname\" . }}-apiserver\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
update charts to expose wormhole via ingress
596,240
19.09.2017 21:43:07
-7,200
87b74c039ce0c4bc196746856f9ab4bb5a650a9a
populates wormholeURL
[ { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -28,8 +28,8 @@ type OpenstackInfo struct {\n}\ntype KubernikusInfo struct {\n- Server string `json:\"server\"`\nServerURL string `json:\"serverURL\"`\n+ WormholeURL string `json:\"wormholeURL\"`\nBootstrapToken string `json:\"bootstrapToken\"`\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
populates wormholeURL
596,240
19.09.2017 21:44:22
-7,200
2f864c9e41ea2e08b5d2f4cc7b686cb2f76b8157
use less confusing port
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/tunnel.go", "new_path": "pkg/wormhole/server/tunnel.go", "diff": "@@ -53,7 +53,7 @@ func (t *Tunnel) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n}\nserver := http.Server{\n- Addr: \":443\",\n+ Addr: \":6553\",\nHandler: t,\nTLSConfig: &tls.Config{\nClientAuth: tls.RequireAndVerifyClientCert,\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
use less confusing port
596,240
19.09.2017 21:45:09
-7,200
798fe963c1c6491faabd953f226c5e9f825670e2
generate wormhole certificates
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/certificates/files.go", "new_path": "pkg/cmd/certificates/files.go", "diff": "@@ -6,6 +6,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n\"github.com/spf13/pflag\"\n@@ -59,7 +60,11 @@ func (o *FilesOptions) Run(c *cobra.Command) error {\nSpec: v1.KlusterSpec{\nName: o.Name,\n},\n- }, \"localdomain\")\n+ }, config.Config{\n+ Kubernikus: config.KubernikusConfig{\n+ Domain: \"local\",\n+ },\n+ })\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/certificates/plain.go", "new_path": "pkg/cmd/certificates/plain.go", "diff": "@@ -6,6 +6,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n)\n@@ -53,7 +54,12 @@ func (o *PlainOptions) Run(c *cobra.Command) error {\nSpec: v1.KlusterSpec{\nName: o.Name,\n},\n- }, \"localdomain\")\n+ },\n+ config.Config{\n+ Kubernikus: config.KubernikusConfig{\n+ Domain: \"local\",\n+ },\n+ })\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/cmd/helm/helm.go", "new_path": "pkg/cmd/helm/helm.go", "diff": "@@ -8,6 +8,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/cmd\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n\"github.com/spf13/cobra\"\n\"github.com/spf13/pflag\"\n@@ -95,7 +96,11 @@ func (o *HelmOptions) Run(c *cobra.Command) error {\nSpec: v1.KlusterSpec{\nName: nameA[0],\n},\n- }, nameA[1])\n+ }, config.Config{\n+ Kubernikus: config.KubernikusConfig{\n+ Domain: nameA[1],\n+ },\n+ })\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/base.go", "new_path": "pkg/controller/base.go", "diff": "@@ -7,17 +7,14 @@ import (\n\"time\"\n\"github.com/golang/glog\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n\"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n)\n-type Controller interface {\n- Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup)\n-}\n-\ntype BaseController interface {\n- Controller\n+ config.Controller\nreconcile(key string) error\n}\n@@ -131,7 +128,7 @@ func (base *Base) handleErr(err error, key interface{}) {\nbase.queue.Forget(key)\n}\n-func getControllerName(c Controller) string {\n+func getControllerName(c config.Controller) string {\nreturn reflect.TypeOf(c).Elem().Name()\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/controller/config/config.go", "diff": "+package config\n+\n+import \"sync\"\n+\n+type Controller interface {\n+ Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup)\n+}\n+\n+type OpenstackConfig struct {\n+ AuthURL string\n+ AuthUsername string\n+ AuthPassword string\n+ AuthDomain string\n+ AuthProject string\n+ AuthProjectDomain string\n+}\n+\n+type HelmConfig struct {\n+ ChartDirectory string\n+}\n+\n+type KubernikusConfig struct {\n+ Domain string\n+ Namespace string\n+ ProjectID string\n+ NetworkID string\n+ Controllers map[string]Controller\n+}\n+\n+type Config struct {\n+ Openstack OpenstackConfig\n+ Kubernikus KubernikusConfig\n+ Helm HelmConfig\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -24,6 +24,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\nkubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\nkubernikus_informers_v1 \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions/kubernikus/v1\"\n@@ -36,14 +37,14 @@ const (\ntype GroundControl struct {\nClients\nFactories\n- Config\n+ config.Config\nqueue workqueue.RateLimitingInterface\ntprInformer cache.SharedIndexInformer\npodInformer cache.SharedIndexInformer\n}\n-func NewGroundController(factories Factories, clients Clients, config Config) *GroundControl {\n+func NewGroundController(factories Factories, clients Clients, config config.Config) *GroundControl {\noperator := &GroundControl{\nClients: clients,\nFactories: factories,\n@@ -299,7 +300,7 @@ func (op *GroundControl) updateStatus(tpr *v1.Kluster, state v1.KlusterState, me\n}\nfunc (op *GroundControl) createKluster(tpr *v1.Kluster) error {\n- cluster, err := ground.NewCluster(tpr, op.Config.Openstack.AuthURL)\n+ cluster, err := ground.NewCluster(tpr, op.Config)\nif err != nil {\nreturn err\n}\n@@ -350,8 +351,8 @@ func (op *GroundControl) requiresOpenstackInfo(kluster *v1.Kluster) bool {\n}\nfunc (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool {\n- return kluster.Spec.KubernikusInfo.Server == \"\" ||\n- kluster.Spec.KubernikusInfo.ServerURL == \"\" ||\n+ return kluster.Spec.KubernikusInfo.ServerURL == \"\" ||\n+ kluster.Spec.KubernikusInfo.WormholeURL == \"\" ||\nkluster.Spec.KubernikusInfo.BootstrapToken == \"\"\n}\n@@ -363,14 +364,14 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error {\nreturn err\n}\n- if copy.Spec.KubernikusInfo.Server == \"\" {\n- copy.Spec.KubernikusInfo.Server = fmt.Sprintf(\"%s.%s\", kluster.GetName(), op.Config.Kubernikus.Domain)\n- glog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.Server)\n- }\n-\nif copy.Spec.KubernikusInfo.ServerURL == \"\" {\ncopy.Spec.KubernikusInfo.ServerURL = fmt.Sprintf(\"https://%s.%s\", kluster.GetName(), op.Config.Kubernikus.Domain)\n- glog.V(5).Infof(\"[%v] Setting Server to %v\", kluster.Name, copy.Spec.KubernikusInfo.ServerURL)\n+ glog.V(5).Infof(\"[%v] Setting ServerURL to %v\", kluster.Name, copy.Spec.KubernikusInfo.ServerURL)\n+ }\n+\n+ if copy.Spec.KubernikusInfo.WormholeURL == \"\" {\n+ copy.Spec.KubernikusInfo.WormholeURL = fmt.Sprintf(\"https://%s-wormhole.%s\", kluster.GetName(), op.Config.Kubernikus.Domain)\n+ glog.V(5).Infof(\"[%v] Setting WormholeURL to %v\", kluster.Name, copy.Spec.KubernikusInfo.WormholeURL)\n}\nif copy.Spec.KubernikusInfo.BootstrapToken == \"\" {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/certificates.go", "new_path": "pkg/controller/ground/certificates.go", "diff": "@@ -14,6 +14,7 @@ import (\n\"time\"\n\"github.com/kennygrant/sanitize\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\ncertutil \"k8s.io/client-go/util/cert\"\n)\n@@ -59,6 +60,7 @@ type Certificates struct {\nTLS struct {\nCA Bundle\nApiServer Bundle\n+ Wormhole Bundle\n}\n}\n@@ -152,10 +154,11 @@ func (c Certificates) all() []Bundle {\nc.Kubelet.Clients.ApiServer,\nc.TLS.CA,\nc.TLS.ApiServer,\n+ c.TLS.Wormhole,\n}\n}\n-func (certs *Certificates) populateForSatellite(satellite, fqSatelliteName string) error {\n+func (certs *Certificates) populateForSatellite(satellite string, config config.Config) error {\ncreateCA(satellite, \"Etcd Clients\", &certs.Etcd.Clients.CA)\ncreateCA(satellite, \"Etcd Peers\", &certs.Etcd.Peers.CA)\ncreateCA(satellite, \"ApiServer Clients\", &certs.ApiServer.Clients.CA)\n@@ -173,7 +176,10 @@ func (certs *Certificates) populateForSatellite(satellite, fqSatelliteName strin\ncerts.ApiServer.Nodes.Universal = certs.signApiServerNode(\"universal\")\ncerts.Kubelet.Clients.ApiServer = certs.signKubeletClient(\"apiserver\")\ncerts.TLS.ApiServer = certs.signTLS(\"apiserver\",\n- []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", satellite, fqSatelliteName},\n+ []string{\"kubernetes\", \"kubernetes.default\", \"apiserver\", satellite, fmt.Sprintf(\"%v.%v\", satellite, config.Kubernikus.Domain)},\n+ []net.IP{net.IPv4(127, 0, 0, 1)})\n+ certs.TLS.Wormhole = certs.signTLS(\"wormhole\",\n+ []string{fmt.Sprintf(\"%v-wormhole.%v\", satellite, config.Kubernikus.Domain)},\n[]net.IP{net.IPv4(127, 0, 0, 1)})\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground/cluster.go", "new_path": "pkg/controller/ground/cluster.go", "diff": "package ground\nimport (\n+ \"fmt\"\n+\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n)\ntype Cluster struct {\n@@ -14,6 +17,7 @@ type Cluster struct {\ntype API struct {\nIngressHost string `yaml:\"ingressHost,omitempty\"`\nIngressClass string `yaml:\"ingressClass,omitempty\"`\n+ WormholeHost string `yaml:\"wormholeHost,omitempty\"`\n}\ntype OpenStack struct {\n@@ -31,14 +35,15 @@ type Kubernikus struct {\nBootstrapToken string `yaml:\"bootstrapToken,omitempty\"`\n}\n-func NewCluster(kluster *v1.Kluster, authURL string) (*Cluster, error) {\n+func NewCluster(kluster *v1.Kluster, config config.Config) (*Cluster, error) {\ncluster := &Cluster{\nCertificates: &Certificates{},\nAPI: API{\n- IngressHost: kluster.Spec.KubernikusInfo.Server,\n+ IngressHost: fmt.Sprintf(\"%v.%v\", kluster.Spec.Name, config.Kubernikus.Domain),\n+ WormholeHost: fmt.Sprintf(\"%v-wormhole.%v\", kluster.Spec.Name, config.Kubernikus.Domain),\n},\nOpenStack: OpenStack{\n- AuthURL: authURL,\n+ AuthURL: kluster.Spec.OpenstackInfo.AuthURL,\nUsername: kluster.Spec.OpenstackInfo.Username,\nPassword: kluster.Spec.OpenstackInfo.Password,\nDomainName: kluster.Spec.OpenstackInfo.Domain,\n@@ -51,7 +56,7 @@ func NewCluster(kluster *v1.Kluster, authURL string) (*Cluster, error) {\n},\n}\n- if err := cluster.Certificates.populateForSatellite(kluster.Spec.Name, kluster.Spec.KubernikusInfo.Server); err != nil {\n+ if err := cluster.Certificates.populateForSatellite(kluster.Spec.Name, config); err != nil {\nreturn cluster, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -16,6 +16,7 @@ import (\nkube \"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/client/kubernikus\"\n\"github.com/sapcc/kubernikus/pkg/client/openstack\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\nkubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\nkubernikus_informers \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions\"\n\"github.com/sapcc/kubernikus/pkg/version\"\n@@ -48,33 +49,6 @@ type Clients struct {\nHelm *helm.Client\n}\n-type OpenstackConfig struct {\n- AuthURL string\n- AuthUsername string\n- AuthPassword string\n- AuthDomain string\n- AuthProject string\n- AuthProjectDomain string\n-}\n-\n-type HelmConfig struct {\n- ChartDirectory string\n-}\n-\n-type KubernikusConfig struct {\n- Domain string\n- Namespace string\n- ProjectID string\n- NetworkID string\n- Controllers map[string]Controller\n-}\n-\n-type Config struct {\n- Openstack OpenstackConfig\n- Kubernikus KubernikusConfig\n- Helm HelmConfig\n-}\n-\ntype Factories struct {\nKubernikus kubernikus_informers.SharedInformerFactory\nKubernetes kubernetes_informers.SharedInformerFactory\n@@ -82,7 +56,7 @@ type Factories struct {\ntype KubernikusOperator struct {\nClients\n- Config\n+ config.Config\nFactories\n}\n@@ -103,23 +77,23 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nvar err error\no := &KubernikusOperator{\n- Config: Config{\n- Openstack: OpenstackConfig{\n+ Config: config.Config{\n+ Openstack: config.OpenstackConfig{\nAuthURL: options.AuthURL,\nAuthUsername: options.AuthUsername,\nAuthPassword: options.AuthPassword,\nAuthProject: options.AuthProjectDomain,\nAuthProjectDomain: options.AuthProjectDomain,\n},\n- Helm: HelmConfig{\n+ Helm: config.HelmConfig{\nChartDirectory: options.ChartDirectory,\n},\n- Kubernikus: KubernikusConfig{\n+ Kubernikus: config.KubernikusConfig{\nDomain: options.KubernikusDomain,\nNamespace: options.Namespace,\nProjectID: options.KubernikusProjectID,\nNetworkID: options.KubernikusNetworkID,\n- Controllers: make(map[string]Controller),\n+ Controllers: make(map[string]config.Controller),\n},\n},\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/wormhole.go", "new_path": "pkg/controller/wormhole.go", "diff": "@@ -7,11 +7,12 @@ import (\n\"github.com/golang/glog\"\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/client/openstack\"\n+ \"github.com/sapcc/kubernikus/pkg/controller/config\"\n)\ntype WormholeGenerator struct {\nBase\n- Config\n+ config.Config\n}\ntype State struct {\n@@ -23,7 +24,7 @@ type State struct {\ntype Transition func(*State) (Transition, error)\n-func NewWormholeGenerator(factories Factories, clients Clients, config Config) Controller {\n+func NewWormholeGenerator(factories Factories, clients Clients, config config.Config) config.Controller {\ninformers := factories.Kubernikus.Kubernikus().V1().Klusters().Informer()\nwg := &WormholeGenerator{\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
generate wormhole certificates
596,240
19.09.2017 22:06:35
-7,200
614fe771e1ae567080842bfd0f2107cf3177b13d
bring fqKlusterNames back. Maybe...
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground/cluster.go", "new_path": "pkg/controller/ground/cluster.go", "diff": "@@ -39,8 +39,8 @@ func NewCluster(kluster *v1.Kluster, config config.Config) (*Cluster, error) {\ncluster := &Cluster{\nCertificates: &Certificates{},\nAPI: API{\n- IngressHost: fmt.Sprintf(\"%v.%v\", kluster.Spec.Name, config.Kubernikus.Domain),\n- WormholeHost: fmt.Sprintf(\"%v-wormhole.%v\", kluster.Spec.Name, config.Kubernikus.Domain),\n+ IngressHost: fmt.Sprintf(\"%v.%v\", kluster.GetName(), config.Kubernikus.Domain),\n+ WormholeHost: fmt.Sprintf(\"%v-wormhole.%v\", kluster.GetName(), config.Kubernikus.Domain),\n},\nOpenStack: OpenStack{\nAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\n@@ -56,7 +56,7 @@ func NewCluster(kluster *v1.Kluster, config config.Config) (*Cluster, error) {\n},\n}\n- if err := cluster.Certificates.populateForSatellite(kluster.Spec.Name, config); err != nil {\n+ if err := cluster.Certificates.populateForSatellite(kluster.GetName(), config); err != nil {\nreturn cluster, err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
bring fqKlusterNames back. Maybe...
596,240
19.09.2017 22:11:58
-7,200
01c680eebd2bf7f02800fa1adf167d04dd54f914
apparently multi-port services need names for each port. who would have known?
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/service.yaml", "new_path": "charts/kube-master/templates/service.yaml", "diff": "@@ -9,7 +9,9 @@ metadata:\nspec:\ntype: ClusterIP\nports:\n- - port: 6443\n- - port: 6553\n+ - name: apiserver\n+ port: 6443\n+ - name: wormhole\n+ port: 6553\nselector:\napp: {{ include \"master.fullname\" . }}-apiserver\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
apparently multi-port services need names for each port. who would have known?
596,240
19.09.2017 22:22:19
-7,200
a661807c05edbcf9e11e7ba90b12bf79a688c936
garglblbl. again and again...
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -118,9 +118,9 @@ spec:\n- server\n- --kubeconfig=/etc/kubernetes/config/kubeconfig\n#- --client-ca-file=/etc/kubernetes/certs/apiserver-clients-and-nodes-ca.pem\n- - --ca /etc/kubernetes/certs/tls-ca.pem\n- - --cert /etc/kubernetes/certs/tls-wormhole.pem\n- - --key /etc/kubernetes/certs/tls-wormhole-key.pem\n+ - --ca=/etc/kubernetes/certs/tls-ca.pem\n+ - --cert=/etc/kubernetes/certs/tls-wormhole.pem\n+ - --key=/etc/kubernetes/certs/tls-wormhole-key.pem\n- --v=5\nvolumeMounts:\n- mountPath: /etc/kubernetes/certs/\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
garglblbl. again and again...
596,240
19.09.2017 22:40:07
-7,200
e8f46b1bae6dda12f919c98538be8ce397d1d957
using cluster-admin until seeding rbac roles works
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -42,9 +42,9 @@ spec:\npath: kubelet-clients-apiserver.pem\n- key: kubelet-clients-apiserver-key.pem\npath: kubelet-clients-apiserver-key.pem\n- - key: apiserver-clients-kubernikus-wormhole.pem\n+ - key: apiserver-clients-cluster-admin.pem\npath: kube-client.pem\n- - key: apiserver-clients-kubernikus-wormhole-key.pem\n+ - key: apiserver-clients-cluster-admin-key.pem\npath: kube-client.key\n- key: tls-ca.pem\npath: tls-ca.pem\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
using cluster-admin until seeding rbac roles works
596,240
19.09.2017 22:42:31
-7,200
44c9e64620b0251615c5bbfe15b86cda0ee67dbc
removes premature initialization
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -50,7 +50,6 @@ func NewGroundController(factories Factories, clients Clients, config config.Con\nFactories: factories,\nConfig: config,\nqueue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n- podInformer: factories.Kubernetes.Core().V1().Pods().Informer(),\n}\n//Manually create shared Kluster informer that only watches the given namespace\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
removes premature initialization
596,240
20.09.2017 08:59:21
-7,200
e3b51aef918804e05dc11c800547e151aa8faf40
initialize custom informers before anything else
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -13,10 +13,7 @@ import (\n\"google.golang.org/grpc\"\nyaml \"gopkg.in/yaml.v2\"\nmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n- \"k8s.io/apimachinery/pkg/runtime\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n- \"k8s.io/apimachinery/pkg/watch\"\n- kubernetes_clientset \"k8s.io/client-go/kubernetes\"\napi_v1 \"k8s.io/client-go/pkg/api/v1\"\n\"k8s.io/client-go/tools/cache\"\n\"k8s.io/client-go/util/workqueue\"\n@@ -26,8 +23,6 @@ import (\n\"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/controller/config\"\n\"github.com/sapcc/kubernikus/pkg/controller/ground\"\n- kubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\n- kubernikus_informers_v1 \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions/kubernikus/v1\"\n)\nconst (\n@@ -50,38 +45,10 @@ func NewGroundController(factories Factories, clients Clients, config config.Con\nFactories: factories,\nConfig: config,\nqueue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)),\n+ tprInformer: factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n+ podInformer: factories.Kubernetes.Core().V1().Pods().Informer(),\n}\n- //Manually create shared Kluster informer that only watches the given namespace\n- operator.tprInformer = operator.Factories.Kubernikus.InformerFor(\n- &v1.Kluster{},\n- func(client kubernikus_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n- return kubernikus_informers_v1.NewKlusterInformer(\n- client,\n- config.Kubernikus.Namespace,\n- resyncPeriod,\n- cache.Indexers{},\n- )\n- },\n- )\n-\n- //Manually create shared pod Informer that only watches the given namespace\n- operator.podInformer = operator.Factories.Kubernetes.InformerFor(&api_v1.Pod{}, func(client kubernetes_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n- return cache.NewSharedIndexInformer(\n- &cache.ListWatch{\n- ListFunc: func(o metav1.ListOptions) (runtime.Object, error) {\n- return client.CoreV1().Pods(config.Kubernikus.Namespace).List(o)\n- },\n- WatchFunc: func(o metav1.ListOptions) (watch.Interface, error) {\n- return client.CoreV1().Pods(config.Kubernikus.Namespace).Watch(o)\n- },\n- },\n- &api_v1.Pod{},\n- resyncPeriod,\n- cache.Indexers{\"kluster\": MetaLabelReleaseIndexFunc},\n- )\n- })\n-\noperator.tprInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\nAddFunc: operator.klusterAdd,\nUpdateFunc: operator.klusterUpdate,\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -8,10 +8,16 @@ import (\n\"github.com/golang/glog\"\n\"k8s.io/apimachinery/pkg/api/meta\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n+ \"k8s.io/apimachinery/pkg/runtime\"\n+ \"k8s.io/apimachinery/pkg/watch\"\nkubernetes_informers \"k8s.io/client-go/informers\"\nkubernetes_clientset \"k8s.io/client-go/kubernetes\"\n+ api_v1 \"k8s.io/client-go/pkg/api/v1\"\n+ \"k8s.io/client-go/tools/cache\"\n\"k8s.io/helm/pkg/helm\"\n+ \"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\nhelmutil \"github.com/sapcc/kubernikus/pkg/client/helm\"\nkube \"github.com/sapcc/kubernikus/pkg/client/kubernetes\"\n\"github.com/sapcc/kubernikus/pkg/client/kubernikus\"\n@@ -19,6 +25,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/controller/config\"\nkubernikus_clientset \"github.com/sapcc/kubernikus/pkg/generated/clientset\"\nkubernikus_informers \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions\"\n+ kubernikus_informers_v1 \"github.com/sapcc/kubernikus/pkg/generated/informers/externalversions/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/version\"\n)\n@@ -119,12 +126,12 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\no.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION)\no.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, DEFAULT_RECONCILIATION)\n+ o.initializeCustomInformers()\nsecrets := o.Clients.Kubernetes.Core().Secrets(options.Namespace)\n+ klusters := o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer()\n- o.Clients.Openstack = openstack.NewClient(\n- secrets,\n- o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n+ o.Clients.Openstack = openstack.NewClient(secrets, klusters,\noptions.AuthURL,\noptions.AuthUsername,\noptions.AuthPassword,\n@@ -133,6 +140,8 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\noptions.AuthProjectDomain,\n)\n+ o.Clients.Satellites = kube.NewSharedClientFactory(secrets, klusters)\n+\nfor _, k := range options.Controllers {\nswitch k {\ncase \"groundctl\":\n@@ -144,11 +153,6 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\n}\n}\n- o.Clients.Satellites = kube.NewSharedClientFactory(\n- secrets,\n- o.Factories.Kubernikus.Kubernikus().V1().Klusters().Informer(),\n- )\n-\nreturn o\n}\n@@ -175,10 +179,41 @@ func MetaLabelReleaseIndexFunc(obj interface{}) ([]string, error) {\nreturn []string{\"\"}, fmt.Errorf(\"object has no meta: %v\", err)\n}\nif release, found := meta.GetLabels()[\"release\"]; found {\n- glog.Infof(\"Found release %v for pod %v\", release, meta.GetName())\n+ glog.V(6).Infof(\"Found release %v for pod %v\", release, meta.GetName())\nreturn []string{release}, nil\n}\n- glog.Infof(\"meta labels: %v\", meta.GetLabels())\n+ glog.V(6).Infof(\"meta labels: %v\", meta.GetLabels())\nreturn []string{\"\"}, errors.New(\"object has no release label\")\n+}\n+\n+func (o *KubernikusOperator) initializeCustomInformers() {\n+ //Manually create shared Kluster informer that only watches the given namespace\n+ o.Factories.Kubernikus.InformerFor(\n+ &v1.Kluster{},\n+ func(client kubernikus_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n+ return kubernikus_informers_v1.NewKlusterInformer(\n+ client,\n+ o.Config.Kubernikus.Namespace,\n+ resyncPeriod,\n+ cache.Indexers{},\n+ )\n+ },\n+ )\n+ //Manually create shared pod Informer that only watches the given namespace\n+ o.Factories.Kubernetes.InformerFor(&api_v1.Pod{}, func(client kubernetes_clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n+ return cache.NewSharedIndexInformer(\n+ &cache.ListWatch{\n+ ListFunc: func(opt metav1.ListOptions) (runtime.Object, error) {\n+ return client.CoreV1().Pods(o.Config.Kubernikus.Namespace).List(opt)\n+ },\n+ WatchFunc: func(opt metav1.ListOptions) (watch.Interface, error) {\n+ return client.CoreV1().Pods(o.Config.Kubernikus.Namespace).Watch(opt)\n+ },\n+ },\n+ &api_v1.Pod{},\n+ resyncPeriod,\n+ cache.Indexers{\"kluster\": MetaLabelReleaseIndexFunc},\n+ )\n+ })\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
initialize custom informers before anything else
596,240
20.09.2017 09:30:07
-7,200
33a7e9f8b7519dba65b4bc6a6856be007cf0d505
do not create empty nodepools
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -25,7 +25,7 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nname := *params.Body.Name\nvar nodePools []v1.NodePool\nif params.Body.Spec != nil && params.Body.Spec.NodePools != nil {\n- nodePools = make([]v1.NodePool, len(params.Body.Spec.NodePools))\n+ nodePools = make([]v1.NodePool)\nfor _, pPool := range params.Body.Spec.NodePools {\nnodePools = append(nodePools, v1.NodePool{\nName: pPool.Name,\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
do not create empty nodepools
596,240
20.09.2017 09:55:31
-7,200
31360910cfbed93fbc367d4fac68dfa9eeb5be77
take starting nodes into account
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -233,7 +233,7 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\nfunc ready(nodes []openstack.Node) int {\nready := 0\nfor _, n := range nodes {\n- if n.Running() {\n+ if n.Running() || n.Starting() {\nready = ready + 1\n}\n}\n@@ -244,7 +244,7 @@ func ready(nodes []openstack.Node) int {\nfunc toBeTerminated(nodes []openstack.Node) int {\ntoBeTerminated := 0\nfor _, n := range nodes {\n- if n.TaskState == \"deleting\" {\n+ if n.Stopping() {\ncontinue\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
take starting nodes into account
596,240
20.09.2017 10:04:59
-7,200
378becdcdaa29344d29ccfa164efd1155fc453c2
namespaces nodepools with kluster name
[ { "change_type": "MODIFY", "old_path": "pkg/client/openstack/client.go", "new_path": "pkg/client/openstack/client.go", "diff": "@@ -413,7 +413,7 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.No\n}\nglog.V(5).Infof(\"Listing nodes for %v/%v\", project_id, pool_id)\n- prefix := fmt.Sprintf(\"kubernikus-%v\", pool_id)\n+ prefix := fmt.Sprintf(\"kubernikus-%v-%v\", kluster.Spec.Name, pool_id)\nopts := servers.ListOpts{Name: prefix}\nservers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) {\n@@ -472,7 +472,7 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *kubernikus_v1.\nreturn \"\", err\n}\n- name := v1.SimpleNameGenerator.GenerateName(fmt.Sprintf(\"kubernikus-%v-\", pool.Name))\n+ name := v1.SimpleNameGenerator.GenerateName(fmt.Sprintf(\"kubernikus-%v-%v-\", kluster.Spec.Name, pool.Name))\nglog.V(5).Infof(\"Creating node %v\", name)\nserver, err := servers.Create(client, servers.CreateOpts{\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
namespaces nodepools with kluster name
596,240
20.09.2017 14:56:18
-7,200
92fc0ae872421fdc5e3362098cdc9058045ee4c2
add second kubeconfig for sidecar pods that connect locally
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -64,9 +64,7 @@ spec:\npath: apiserver-nodes-ca.pem\n#Todo: Replace with wormhole client cert but that needs some RBAC seeding\n- key: apiserver-clients-cluster-admin.pem\n- # - key: apiserver-clients-kubernikus-wormhole.pem\npath: kube-client.pem\n- # - key: apiserver-clients-kubernikus-wormhole-key.pem\n- key: apiserver-clients-cluster-admin-key.pem\npath: kube-client.key\n@@ -85,6 +83,12 @@ spec:\n- name: config\nconfigMap:\nname: {{ include \"master.fullname\" . }}\n+ - name: wormhole-config\n+ configMap:\n+ name: {{ include \"master.fullname\" . }}\n+ items:\n+ - key: local-kubeconfig\n+ path: kubeconfig\ncontainers:\n- name: apiserver\nimage: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n@@ -144,5 +148,5 @@ spec:\nname: wormhole-certs\nreadOnly: true\n- mountPath: /etc/kubernetes/config\n- name: config\n+ name: wormhole-config\nreadOnly: true\n" }, { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/configmap.yaml", "new_path": "charts/kube-master/templates/configmap.yaml", "diff": "@@ -26,3 +26,22 @@ data:\nuser:\nclient-certificate: /etc/kubernetes/certs/kube-client.pem\nclient-key: /etc/kubernetes/certs/kube-client.key\n+ local-kubeconfig: |-\n+ apiVersion: v1\n+ kind: Config\n+ clusters:\n+ - name: local\n+ cluster:\n+ certificate-authority: /etc/kubernetes/certs/tls-ca.pem\n+ server: https://127.0.0.1:6443\n+ contexts:\n+ - name: local\n+ context:\n+ cluster: local\n+ user: local\n+ current-context: local\n+ users:\n+ - name: local\n+ user:\n+ client-certificate: /etc/kubernetes/certs/kube-client.pem\n+ client-key: /etc/kubernetes/certs/kube-client.key\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
add second kubeconfig for sidecar pods that connect locally
596,240
20.09.2017 15:14:27
-7,200
0b732250c1d9df1180782b9d17aa5a9e6b051af2
move REDIRECT to nat table and a few other fixes
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "@@ -180,7 +180,7 @@ func (c *Controller) delNode(key string) error {\n}\nfunc (c *Controller) redoIPTablesSpratz() error {\n- table := iptables.TableFilter\n+ table := iptables.TableNAT\nif _, err := c.iptables.EnsureChain(table, KUBERNIKUS_TUNNELS); err != nil {\nglog.Errorf(\"Failed to ensure that %s chain %s exists: %v\", table, KUBERNIKUS_TUNNELS, err)\n@@ -188,39 +188,39 @@ func (c *Controller) redoIPTablesSpratz() error {\n}\nargs := []string{\"-m\", \"comment\", \"--comment\", \"kubernikus tunnels\", \"-j\", string(KUBERNIKUS_TUNNELS)}\n- if _, err := c.iptables.EnsureRule(iptables.Append, table, iptables.ChainInput, args...); err != nil {\n- glog.Errorf(\"Failed to ensure that %s chain %s jumps to %s: %v\", table, iptables.ChainInput, KUBERNIKUS_TUNNELS, err)\n+ if _, err := c.iptables.EnsureRule(iptables.Append, table, iptables.ChainPrerouting, args...); err != nil {\n+ glog.Errorf(\"Failed to ensure that %s chain %s jumps to %s: %v\", table, iptables.ChainPrerouting, KUBERNIKUS_TUNNELS, err)\nreturn err\n}\niptablesSaveRaw := bytes.NewBuffer(nil)\n- existingFilterChains := make(map[iptables.Chain]string)\n+ existingNatChains := make(map[iptables.Chain]string)\nerr := c.iptables.SaveInto(table, iptablesSaveRaw)\nif err != nil {\nglog.Errorf(\"Failed to execute iptables-save, syncing all rules: %v\", err)\n} else {\n- existingFilterChains = iptables.GetChainLines(table, iptablesSaveRaw.Bytes())\n+ existingNatChains = iptables.GetChainLines(table, iptablesSaveRaw.Bytes())\n}\n- filterChains := bytes.NewBuffer(nil)\n- filterRules := bytes.NewBuffer(nil)\n- writeLine(filterChains, \"*filter\")\n- if chain, ok := existingFilterChains[KUBERNIKUS_TUNNELS]; ok {\n- writeLine(filterChains, chain)\n+ natChains := bytes.NewBuffer(nil)\n+ natRules := bytes.NewBuffer(nil)\n+ writeLine(natChains, \"*nat\")\n+ if chain, ok := existingNatChains[KUBERNIKUS_TUNNELS]; ok {\n+ writeLine(natChains, chain)\n} else {\n- writeLine(filterChains, iptables.MakeChainLine(KUBERNIKUS_TUNNELS))\n+ writeLine(natChains, iptables.MakeChainLine(KUBERNIKUS_TUNNELS))\n}\nfor key, _ := range c.store {\n- err := c.writeTunnelRedirect(key, filterRules)\n+ err := c.writeTunnelRedirect(key, natRules)\nif err != nil {\nreturn err\n}\n}\n- writeLine(filterRules, \"COMMIT\")\n+ writeLine(natRules, \"COMMIT\")\n- lines := append(filterChains.Bytes(), filterRules.Bytes()...)\n+ lines := append(natChains.Bytes(), natRules.Bytes()...)\nglog.V(6).Infof(\"Restoring iptables rules: %s\", lines)\nerr = c.iptables.RestoreAll(lines, iptables.NoFlushTables, iptables.RestoreCounters)\nif err != nil {\n@@ -251,14 +251,12 @@ func (c *Controller) writeTunnelRedirect(key string, filterRules *bytes.Buffer)\nwriteLine(filterRules,\n\"-A\", string(KUBERNIKUS_TUNNELS),\n- \"-m\", \"comment\", \"--comment\", fmt.Sprintf(\"tunnel to %v\", key),\n- \"-t\", \"nat\",\n- \"-I\", \"PREROUTING\",\n- \"-p\", \"tcp\",\n+ \"-m\", \"comment\", \"--comment\", key,\n\"--dst\", ip.String(),\n+ \"-p\", \"tcp\",\n\"--dport\", \"22\",\n- \"--to-ports\", fmt.Sprintf(\"%v\", port),\n\"-j\", \"REDIRECT\",\n+ \"--to-ports\", fmt.Sprintf(\"%v\", port),\n)\nreturn nil\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
move REDIRECT to nat table and a few other fixes
596,240
20.09.2017 17:53:28
-7,200
eef211887b93861755f6b9a11c6da426ed87788b
decouples TPR create/wait from each client creation. it's only needed when talking to the control plane from the operator
[ { "change_type": "MODIFY", "old_path": "pkg/client/kubernetes/client.go", "new_path": "pkg/client/kubernetes/client.go", "diff": "@@ -116,14 +116,6 @@ func NewClient(kubeconfig string) (kubernetes.Interface, error) {\nglog.V(3).Infof(\"Using Kubernetes Api at %s\", config.Host)\n- if err := ensureTPR(clientset); err != nil {\n- return nil, err\n- }\n-\n- if err := waitForTPR(clientset); err != nil {\n- return nil, err\n- }\n-\nreturn clientset, nil\n}\n@@ -162,7 +154,7 @@ func NewClientConfigV1(name, user, url string, key, cert, ca []byte) clientcmdap\n}\n}\n-func ensureTPR(clientset kubernetes.Interface) error {\n+func EnsureTPR(clientset kubernetes.Interface) error {\ntpr := &v1beta1.ThirdPartyResource{\nObjectMeta: metav1.ObjectMeta{\nName: \"kluster.\" + kubernikus_v1.GroupName,\n@@ -180,7 +172,7 @@ func ensureTPR(clientset kubernetes.Interface) error {\nreturn nil\n}\n-func waitForTPR(clientset kubernetes.Interface) error {\n+func WaitForTPR(clientset kubernetes.Interface) error {\nreturn wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {\n_, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Get(\"kluster.\"+kubernikus_v1.GroupName, metav1.GetOptions{})\nif err == nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -124,6 +124,14 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nglog.Fatalf(\"Failed to create helm client: %s\", err)\n}\n+ if err := kube.EnsureTPR(clientset); err != nil {\n+ glog.Fatalf(\"Couldn't create TPRs: %s\", err)\n+ }\n+\n+ if err := kube.waitForTPR(clientset); err != nil {\n+ glog.Fatalf(\"Couldn't find TPRs: %s\", err)\n+ }\n+\no.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION)\no.Factories.Kubernetes = kubernetes_informers.NewSharedInformerFactory(o.Clients.Kubernetes, DEFAULT_RECONCILIATION)\no.initializeCustomInformers()\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
decouples TPR create/wait from each client creation. it's only needed when talking to the control plane from the operator
596,240
20.09.2017 17:54:18
-7,200
d5d96b2bc9937c657c09ec3f78dc82ae41dfc2c7
fixes from certificate name. adds missing tls-ca
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -56,6 +56,8 @@ spec:\nsecret:\nsecretName: {{ include \"master.fullname\" . }}\nitems:\n+ - key: tls-ca.pem\n+ path: tls-ca.pem\n- key: tls-wormhole.pem\npath: tls-wormhole.pem\n- key: tls-wormhole-key.pem\n@@ -135,7 +137,7 @@ spec:\n- wormhole\n- server\n- --kubeconfig=/etc/kubernetes/config/kubeconfig\n- - --ca=/etc/kubernetes/certs/apiserver-nodes-ca.pem.pem\n+ - --ca=/etc/kubernetes/certs/apiserver-nodes-ca.pem\n- --cert=/etc/kubernetes/certs/tls-wormhole.pem\n- --key=/etc/kubernetes/certs/tls-wormhole-key.pem\n- --v=5\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
fixes from certificate name. adds missing tls-ca
596,240
20.09.2017 18:19:24
-7,200
baf0ead540d3912a66d0ad275311aac6867018b9
wait for apiserver before crashing. useful for clients running as sidecar
[ { "change_type": "MODIFY", "old_path": "pkg/client/kubernetes/client.go", "new_path": "pkg/client/kubernetes/client.go", "diff": "@@ -2,6 +2,8 @@ package kubernetes\nimport (\n\"errors\"\n+ \"fmt\"\n+ \"net/http\"\n\"sync\"\n\"time\"\n@@ -184,3 +186,25 @@ func WaitForTPR(clientset kubernetes.Interface) error {\nreturn false, err\n})\n}\n+\n+func WaitForServer(client kubernetes.Interface, stopCh <-chan struct{}) error {\n+ var healthzContent string\n+\n+ err := wait.PollUntil(time.Second, func() (bool, error) {\n+ healthStatus := 0\n+ resp := client.Discovery().RESTClient().Get().AbsPath(\"/healthz\").Do().StatusCode(&healthStatus)\n+ if healthStatus != http.StatusOK {\n+ glog.Errorf(\"Server isn't healthy yet. Waiting a little while.\")\n+ return false, nil\n+ }\n+ content, _ := resp.Raw()\n+ healthzContent = string(content)\n+\n+ return true, nil\n+ }, stopCh)\n+ if err != nil {\n+ return fmt.Errorf(\"Failed to contact apiserver. Last health: %v Error: %v\", healthzContent, err)\n+ }\n+\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/operator.go", "new_path": "pkg/controller/operator.go", "diff": "@@ -124,11 +124,11 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nglog.Fatalf(\"Failed to create helm client: %s\", err)\n}\n- if err := kube.EnsureTPR(clientset); err != nil {\n+ if err := kube.EnsureTPR(o.Clients.Kubernetes); err != nil {\nglog.Fatalf(\"Couldn't create TPRs: %s\", err)\n}\n- if err := kube.waitForTPR(clientset); err != nil {\n+ if err := kube.WaitForTPR(o.Clients.Kubernetes); err != nil {\nglog.Fatalf(\"Couldn't find TPRs: %s\", err)\n}\n@@ -167,6 +167,8 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions) *KubernikusOperat\nfunc (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nfmt.Printf(\"Welcome to Kubernikus %v\\n\", version.VERSION)\n+ kube.WaitForServer(o.Clients.Kubernetes, stopCh)\n+\no.Factories.Kubernikus.Start(stopCh)\no.Factories.Kubernetes.Start(stopCh)\n" }, { "change_type": "MODIFY", "old_path": "pkg/wormhole/server.go", "new_path": "pkg/wormhole/server.go", "diff": "@@ -49,6 +49,8 @@ func NewServer(options *ServerOptions) *Server {\nfunc (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\nfmt.Printf(\"Welcome to Wormhole %v\\n\", version.VERSION)\n+ kube.WaitForServer(s.client, stopCh)\n+\ns.factory.Start(stopCh)\ns.factory.WaitForCacheSync(stopCh)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
wait for apiserver before crashing. useful for clients running as sidecar
596,240
20.09.2017 21:20:17
-7,200
358b6886207131c456be492e1534307ee310a9ee
required certificate to enable service account validation
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/controller-manager.yaml", "new_path": "charts/kube-master/templates/controller-manager.yaml", "diff": "@@ -76,6 +76,7 @@ spec:\n- --root-ca-file=/etc/kubernetes/certs/apiserver-clients-ca.pem\n- --service-account-private-key-file=/etc/kubernetes/certs/apiserver-clients-ca-key.pem\n- --service-cluster-ip-range={{ .Values.serviceCIDR }}\n+ - --use-service-account-credentials\nvolumeMounts:\n- mountPath: /etc/kubernetes/certs/\nname: certs\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
required certificate to enable service account validation
596,240
20.09.2017 21:21:00
-7,200
72a742b9e797ae88e10f1790abaf913d7dbbd9ad
now that service account tokens can be validated the indivual roles per controller work. seeding works.
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -32,6 +32,8 @@ spec:\nitems:\n- key: apiserver-clients-and-nodes-ca.pem\npath: apiserver-clients-and-nodes-ca.pem\n+ - key: apiserver-clients-ca-key.pem\n+ path: apiserver-clients-ca-key.pem\n- key: etcd-clients-ca.pem\npath: etcd-clients-ca.pem\n- key: etcd-clients-apiserver.pem\n@@ -115,6 +117,7 @@ spec:\n- --etcd-keyfile=/etc/kubernetes/certs/etcd-clients-apiserver-key.pem\n- --kubelet-client-certificate=/etc/kubernetes/certs/kubelet-clients-apiserver.pem\n- --kubelet-client-key=/etc/kubernetes/certs/kubelet-clients-apiserver-key.pem\n+ - --service-account-key-file=/etc/kubernetes/certs/apiserver-clients-ca-key.pem\n- --tls-ca-file=/etc/kubernetes/certs/tls-ca.pem\n- --tls-cert-file=/etc/kubernetes/certs/tls-apiserver.pem\n- --tls-private-key-file=/etc/kubernetes/certs/tls-apiserver-key.pem\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
now that service account tokens can be validated the indivual roles per controller work. seeding works.
596,240
20.09.2017 21:21:23
-7,200
c7ddc06df5e1daaf91fae5ee3cf2144c7372d9d0
actually bind the correct role
[ { "change_type": "MODIFY", "old_path": "pkg/controller/ground/bootstrap.go", "new_path": "pkg/controller/ground/bootstrap.go", "diff": "@@ -59,7 +59,7 @@ func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {\nRoleRef: rbac.RoleRef{\nAPIGroup: rbac.GroupName,\nKind: \"ClusterRole\",\n- Name: \"kubernikus:kubelet-bootstrap\",\n+ Name: \"kubernikus:approve-node-client-csr\",\n},\nSubjects: []rbac.Subject{\n{\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
actually bind the correct role
596,240
21.09.2017 10:53:49
-7,200
ff5f2abe719c50777aaa0f8f633a8329bb6ef9fa
provision wormhole tunnel client as rkt/systemd unit
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -70,6 +70,40 @@ systemd:\nRestart=always\nRestartSec=10\n+ [Install]\n+ WantedBy=multi-user.target\n+ - name: wormhole.service\n+ contents: |\n+ [Unit]\n+ Description=Kubernikus Wormhole\n+ Requires=network-online.target\n+ After=network-online.target\n+\n+ [Service]\n+ Slice=machine.slice\n+ ExecStartPre=/usr/bin/rkt fetch --insecure-options=image --pull-policy=new docker://sapcc/kubernikus:latest\n+ ExecStart=/usr/bin/rkt run \\\n+ --inherit-env \\\n+ --net=host \\\n+ --dns=host \\\n+ --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=true \\\n+ --mount volume=var-lib-kubelet,target=/var/lib/kubelet \\\n+ --volume var-run-kubernetes,kind=host,source=/var/run/kubernetes,readOnly=true \\\n+ --mount volume=var-run-kubernetes,target=/var/run/kubernetes \\\n+ --volume etc-kubernetes-certs,kind=host,source=/etc/kubernetes/certs,readOnly=true \\\n+ --mount volume=etc-kubernetes-certs,target=/etc/kubernetes/certs \\\n+ docker://sapcc/kubernikus:latest \\\n+ --exec wormhole -- client --kubeconfig=/var/lib/kubelet/kubeconfig\n+ ExecStopPost=/usr/bin/rkt gc --mark-only\n+ KillMode=mixed\n+ Restart=always\n+ RestartSec=10s\n+ - name: wormhole.path\n+ enable: true\n+ contents: |\n+ [Path]\n+ PathExists=/var/lib/kubelet/kubeconfig\n+\n[Install]\nWantedBy=multi-user.target\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
provision wormhole tunnel client as rkt/systemd unit
596,240
21.09.2017 10:55:36
-7,200
6edd954bf5e7cb20f7e4fa0a1bf610b26aae1ae7
same suffix in client and server
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/wormhole/client.go", "new_path": "pkg/cmd/wormhole/client.go", "diff": "@@ -129,7 +129,7 @@ func (o *ClientOptions) Run(c *cobra.Command) error {\n}\nc := strings.Split(url.Hostname(), \".\")\n//Add \"-t\" to first component of hostname\n- c[0] = fmt.Sprintf(\"%s-t\", c[0])\n+ c[0] = fmt.Sprintf(\"%s-wormhole\", c[0])\nserverAddr = fmt.Sprintf(\"%s:%s\", strings.Join(c, \".\"), url.Port())\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
same suffix in client and server
596,240
21.09.2017 10:59:29
-7,200
f74079b3c66b3ee08601f6069d8f64cf4ca6eec3
use full nodename including group info as identifier
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "@@ -151,7 +151,7 @@ func (c *Controller) addNode(key string, node *v1.Node) error {\nglog.Infof(\"Listening to node %v on %v\", key, listener.Addr())\nc.store[key] = listener\n- c.tunnel.AddAddr(listener, nil, node.Spec.ExternalID)\n+ c.tunnel.AddAddr(listener, nil, fmt.Sprintf(\"system:node:%v\", node.Spec.ExternalID))\nif err := c.redoIPTablesSpratz(); err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
use full nodename including group info as identifier
596,240
21.09.2017 11:25:33
-7,200
ffe6baa4c59e63ea58251c603a01a6c153a88cca
sprint listening address correctly
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/tunnel.go", "new_path": "pkg/wormhole/server/tunnel.go", "diff": "@@ -61,7 +61,7 @@ func (t *Tunnel) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n},\n}\n- glog.Info(\"Starting tunnel server. Listening on %s \", server.Addr)\n+ glog.Infof(\"Starting tunnel server. Listening on %s\", server.Addr)\ngo func() {\nerr := server.ListenAndServeTLS(t.options.Certificate, t.options.PrivateKey)\nif err != http.ErrServerClosed {\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
sprint listening address correctly
596,240
21.09.2017 12:29:57
-7,200
06b5bcdbabe290ed1bbb1bbf9929a6ff3d2c4166
url port is empty. hard-coded to ssl for now
[ { "change_type": "MODIFY", "old_path": "pkg/cmd/wormhole/client.go", "new_path": "pkg/cmd/wormhole/client.go", "diff": "@@ -130,7 +130,7 @@ func (o *ClientOptions) Run(c *cobra.Command) error {\nc := strings.Split(url.Hostname(), \".\")\n//Add \"-t\" to first component of hostname\nc[0] = fmt.Sprintf(\"%s-wormhole\", c[0])\n- serverAddr = fmt.Sprintf(\"%s:%s\", strings.Join(c, \".\"), url.Port())\n+ serverAddr = fmt.Sprintf(\"%s:%s\", strings.Join(c, \".\"), \"443\")\n}\ncfg := &tunnel.ClientConfig{\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
url port is empty. hard-coded to ssl for now
596,240
21.09.2017 12:30:14
-7,200
db12728145fe64cd1fadf42a8a3ae40bd399aa23
stupid. actually use node name as identifier
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "@@ -148,10 +148,11 @@ func (c *Controller) addNode(key string, node *v1.Node) error {\nreturn err\n}\n- glog.Infof(\"Listening to node %v on %v\", key, listener.Addr())\n+ identifier := fmt.Sprintf(\"system:node:%v\", node.GetName())\n+ glog.Infof(\"Listening to node %v on %v\", identifier, listener.Addr())\nc.store[key] = listener\n- c.tunnel.AddAddr(listener, nil, fmt.Sprintf(\"system:node:%v\", node.Spec.ExternalID))\n+ c.tunnel.AddAddr(listener, nil, identifier)\nif err := c.redoIPTablesSpratz(); err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
stupid. actually use node name as identifier
596,240
21.09.2017 12:59:09
-7,200
798c73e4fcd2b21959361d290814de3330b36b02
we need some host
[ { "change_type": "MODIFY", "old_path": "pkg/wormhole/server/controller.go", "new_path": "pkg/wormhole/server/controller.go", "diff": "@@ -153,6 +153,7 @@ func (c *Controller) addNode(key string, node *v1.Node) error {\nc.store[key] = listener\nc.tunnel.AddAddr(listener, nil, identifier)\n+ c.tunnel.AddHost(identifier, identifier)\nif err := c.redoIPTablesSpratz(); err != nil {\nreturn err\n@@ -281,3 +282,4 @@ func GetNodeHostIP(node *v1.Node) (net.IP, error) {\n}\nreturn nil, fmt.Errorf(\"host IP unknown; known addresses: %v\", addresses)\n}\n+\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
we need some host
596,240
21.09.2017 14:36:29
-7,200
7c86eeca699343785abcff8e544e9104e435a045
prefer InternalIPs over node names
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/api.yaml", "new_path": "charts/kube-master/templates/api.yaml", "diff": "@@ -110,6 +110,7 @@ spec:\n- --token-auth-file=/etc/kubernetes/bootstrap/token.csv\n- --runtime-config=rbac.authorization.k8s.io/v1alpha1,extensions/v1beta1=true,extensions/v1beta1/thirdpartyresources=true\n- --service-cluster-ip-range={{ .Values.serviceCIDR }}\n+ - --kubelet-preferred-address-types=InternalIP\n#Cert Spratz\n- --client-ca-file=/etc/kubernetes/certs/apiserver-clients-and-nodes-ca.pem\n- --etcd-cafile=/etc/kubernetes/certs/etcd-clients-ca.pem\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
prefer InternalIPs over node names
596,240
21.09.2017 16:28:17
-7,200
320b5feea2fbf637239d84fdf47c4bb74c21905e
first throw os kube-proxy. Fixes
[ { "change_type": "MODIFY", "old_path": "charts/kube-master/templates/secrets.yaml", "new_path": "charts/kube-master/templates/secrets.yaml", "diff": "@@ -20,7 +20,7 @@ data:\ntoken.csv: {{ include (print $.Template.BasePath \"/_token.csv.tpl\") . | b64enc }}\n{{- if empty .Values.certsSecretName }}\n-{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-clients-kubernikus-wormhole.pem\" \"apiserver-clients-kubernikus-wormhole-key.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" \"tls-wormhole.pem\" \"tls-wormhole-key.pem\"}}\n+{{- range list \"apiserver-clients-ca-key.pem\" \"apiserver-clients-ca.pem\" \"apiserver-clients-cluster-admin-key.pem\" \"apiserver-clients-cluster-admin.pem\" \"apiserver-clients-system-kube-controller-manager-key.pem\" \"apiserver-clients-system-kube-controller-manager.pem\" \"apiserver-clients-system-kube-proxy-key.pem\" \"apiserver-clients-system-kube-proxy.pem\" \"apiserver-clients-system-kube-scheduler-key.pem\" \"apiserver-clients-system-kube-scheduler.pem\" \"apiserver-clients-kubernikus-wormhole.pem\" \"apiserver-clients-kubernikus-wormhole-key.pem\" \"apiserver-nodes-ca-key.pem\" \"apiserver-nodes-ca.pem\" \"etcd-clients-apiserver-key.pem\" \"etcd-clients-apiserver.pem\" \"etcd-clients-ca-key.pem\" \"etcd-clients-ca.pem\" \"etcd-peers-ca-key.pem\" \"etcd-peers-ca.pem\" \"kubelet-clients-apiserver-key.pem\" \"kubelet-clients-apiserver.pem\" \"kubelet-clients-ca-key.pem\" \"kubelet-clients-ca.pem\" \"tls-ca-key.pem\" \"tls-ca.pem\" \"tls-apiserver.pem\" \"tls-apiserver-key.pem\" \"tls-wormhole.pem\" \"tls-wormhole-key.pem\"}}\n{{ . }}: {{ required (printf \"missing cert/key: %s\" .) (index $.Values.certs .) | b64enc -}}\n{{ end }}\napiserver-clients-and-nodes-ca.pem: {{ printf \"%s%s\" (index .Values.certs \"apiserver-clients-ca.pem\") (index .Values.certs \"apiserver-nodes-ca.pem\") | b64enc }}\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -46,6 +46,8 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\n}{\nTLSCA: string(secret.Data[\"tls-ca.pem\"]),\nKubeletClientsCA: string(secret.Data[\"kubelet-clients-ca.pem\"]),\n+ ApiserverClientsSystemKubeProxy: string(secret.Data[\"apiserver-clients-system-kube-proxy.pem\"]),\n+ ApiserverClientsSystemKubeProxyKey: string(secret.Data[\"apiserver-clients-system-kube-proxy-key.pem\"]),\nApiserverURL: kluster.Spec.KubernikusInfo.ServerURL,\nBootstrapToken: kluster.Spec.KubernikusInfo.BootstrapToken,\nOpenstackAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -103,6 +103,33 @@ systemd:\ncontents: |\n[Path]\nPathExists=/var/lib/kubelet/kubeconfig\n+ [Install]\n+ WantedBy=multi-user.target\n+ - name: kube-proxy.service\n+ enable: true\n+ contents: |\n+ [Unit]\n+ Description=Kube-Proxy\n+ Requires=network-online.target\n+ After=network-online.target\n+\n+ [Service]\n+ Slice=machine.slice\n+ ExecStart=/usr/bin/rkt run \\\n+ --trust-keys-from-https \\\n+ --inherit-env \\\n+ --net=host \\\n+ --dns=host \\\n+ --volume etc-kubernetes,kind=host,source=/etc/kubernetes,readOnly=true \\\n+ --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n+ quay.io/coreos/hyperkube:v1.7.5_coreos.0 \\\n+ --exec=kube-proxy \\\n+ -- \\\n+ --config=/etc/kubernetes/kube-proxy/config\n+ ExecStopPost=/usr/bin/rkt gc --mark-only\n+ KillMode=mixed\n+ Restart=always\n+ RestartSec=10s\n[Install]\nWantedBy=multi-user.target\n@@ -115,6 +142,18 @@ storage:\ncontents:\ninline: |-\n{{ .KubeletClientsCA | indent 10 }}\n+ - path: /etc/kubernetes/certs/apiserver-clients-system-kube-proxy-key.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+{{ .ApiserverClientsSystemKubeProxyKey | indent 10 }}\n+ - path: /etc/kubernetes/certs/apiserver-clients-system-kube-proxy.pem\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+{{ .ApiserverClientsSystemKubeProxy | indent 10 }}\n- path: /etc/kubernetes/certs/tls-ca.pem\nfilesystem: root\nmode: 0644\n@@ -143,6 +182,66 @@ storage:\n- name: local\nuser:\ntoken: {{ .BootstrapToken }}\n+ - path: /etc/kubernetes/kube-proxy/kubeconfig\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+ apiVersion: v1\n+ kind: Config\n+ clusters:\n+ - name: local\n+ cluster:\n+ certificate-authority: /etc/kubernetes/certs/tls-ca.pem\n+ server: {{ .ApiserverURL }}\n+ contexts:\n+ - name: local\n+ context:\n+ cluster: local\n+ user: local\n+ current-context: local\n+ users:\n+ - name: local\n+ user:\n+ client-certificate: /etc/kubernetes/certs/apiserver-clients-system-kube-proxy.pem\n+ client-key: /etc/kubernetes/certs/apiserver-clients-system-kube-proxy-key.pem\n+ - path: /etc/kubernetes/kube-proxy/config\n+ filesystem: root\n+ mode: 0644\n+ contents:\n+ inline: |-\n+ apiVersion: componentconfig/v1alpha1\n+ kind: KubeProxyConfiguration\n+ bindAddress: 0.0.0.0\n+ clientConnection:\n+ acceptContentTypes: \"\"\n+ burst: 10\n+ contentType: application/vnd.kubernetes.protobuf\n+ kubeconfig: \"/etc/kubernetes/kube-proxy/kubeconfig\"\n+ qps: 5\n+ clusterCIDR: \"10.180.127.0/17\"\n+ configSyncPeriod: 15m0s\n+ conntrack:\n+ max: 0\n+ maxPerCore: 32768\n+ min: 131072\n+ tcpCloseWaitTimeout: 1h0m0s\n+ tcpEstablishedTimeout: 24h0m0s\n+ enableProfiling: false\n+ featureGates: \"\"\n+ healthzBindAddress: 0.0.0.0:10256\n+ hostnameOverride: \"\"\n+ iptables:\n+ masqueradeAll: false\n+ masqueradeBit: 14\n+ minSyncPeriod: 0s\n+ syncPeriod: 30s\n+ metricsBindAddress: 127.0.0.1:10249\n+ mode: \"\"\n+ oomScoreAdj: -999\n+ portRange: \"\"\n+ resourceContainer: /kube-proxy\n+ udpTimeoutMilliseconds: 250ms\n- path: /etc/kubernetes/openstack/openstack.config\nfilesystem: root\nmode: 0644\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
first throw os kube-proxy. Fixes #38
596,240
21.09.2017 16:37:44
-7,200
0dc962c27080eb8e66806b01a4b0a6ff1d07c2a0
pass clusterCIDR
[ { "change_type": "MODIFY", "old_path": "pkg/templates/ignition.go", "new_path": "pkg/templates/ignition.go", "diff": "@@ -34,6 +34,9 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\ndata := struct {\nTLSCA string\nKubeletClientsCA string\n+ ApiserverClientsSystemKubeProxy string\n+ ApiserverClientsSystemKubeProxyKey string\n+ ClusterCIDR string\nApiserverURL string\nBootstrapToken string\nOpenstackAuthURL string\n@@ -48,6 +51,7 @@ func (i *ignition) GenerateNode(kluster *v1.Kluster, client kubernetes.Interface\nKubeletClientsCA: string(secret.Data[\"kubelet-clients-ca.pem\"]),\nApiserverClientsSystemKubeProxy: string(secret.Data[\"apiserver-clients-system-kube-proxy.pem\"]),\nApiserverClientsSystemKubeProxyKey: string(secret.Data[\"apiserver-clients-system-kube-proxy-key.pem\"]),\n+ ClusterCIDR: \"10.180.127.0/17\",\nApiserverURL: kluster.Spec.KubernikusInfo.ServerURL,\nBootstrapToken: kluster.Spec.KubernikusInfo.BootstrapToken,\nOpenstackAuthURL: kluster.Spec.OpenstackInfo.AuthURL,\n" }, { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -219,7 +219,7 @@ storage:\ncontentType: application/vnd.kubernetes.protobuf\nkubeconfig: \"/etc/kubernetes/kube-proxy/kubeconfig\"\nqps: 5\n- clusterCIDR: \"10.180.127.0/17\"\n+ clusterCIDR: \"{{ .ClusterCIDR }}\"\nconfigSyncPeriod: 15m0s\nconntrack:\nmax: 0\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
pass clusterCIDR
596,240
21.09.2017 17:44:18
-7,200
1e5e9bf6cb9037d937881d9bdce758ad59a26ca5
start proxy in stage1
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -122,8 +122,10 @@ systemd:\n--dns=host \\\n--volume etc-kubernetes,kind=host,source=/etc/kubernetes,readOnly=true \\\n--mount volume=etc-kubernetes,target=/etc/kubernetes \\\n+ --stage1-from-dir=stage1-fly.aci \\\nquay.io/coreos/hyperkube:v1.7.5_coreos.0 \\\n- --exec=kube-proxy \\\n+ --exec=hyperkube \\\n+ proxy \\\n-- \\\n--config=/etc/kubernetes/kube-proxy/config\nExecStopPost=/usr/bin/rkt gc --mark-only\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
start proxy in stage1
596,240
21.09.2017 17:51:27
-7,200
377d64f1d680fd6eea9108f9bb1e45b3205f82d3
move parameter delimiter to right position
[ { "change_type": "MODIFY", "old_path": "pkg/templates/node.go", "new_path": "pkg/templates/node.go", "diff": "@@ -125,8 +125,8 @@ systemd:\n--stage1-from-dir=stage1-fly.aci \\\nquay.io/coreos/hyperkube:v1.7.5_coreos.0 \\\n--exec=hyperkube \\\n- proxy \\\n-- \\\n+ proxy \\\n--config=/etc/kubernetes/kube-proxy/config\nExecStopPost=/usr/bin/rkt gc --mark-only\nKillMode=mixed\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
move parameter delimiter to right position
596,240
22.09.2017 14:30:12
-7,200
edf38d0d1e11141853dd398d2beda64a6b5f6005
required fields for nodepools. validations for size
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -28,9 +28,9 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nnodePools = []v1.NodePool{}\nfor _, pPool := range params.Body.Spec.NodePools {\nnodePools = append(nodePools, v1.NodePool{\n- Name: pPool.Name,\n- Size: int(pPool.Size),\n- Flavor: pPool.Flavor,\n+ Name: *pPool.Name,\n+ Size: int(*pPool.Size),\n+ Flavor: *pPool.Flavor,\nImage: pPool.Image,\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/update_cluster.go", "new_path": "pkg/api/handlers/update_cluster.go", "diff": "@@ -26,17 +26,17 @@ func (d *updateCluster) Handle(params operations.UpdateClusterParams, principal\nisNewPool := true\nfor _, kPool := range kluster.Spec.NodePools {\n- if pPool.Name == kPool.Name {\n- kPool.Size = int(pPool.Size)\n+ if *pPool.Name == kPool.Name {\n+ kPool.Size = int(*pPool.Size)\nisNewPool = false\n}\n}\nif isNewPool {\nkluster.Spec.NodePools = append(kluster.Spec.NodePools, v1.NodePool{\n- Name: pPool.Name,\n- Size: int(pPool.Size),\n- Flavor: pPool.Flavor,\n+ Name: *pPool.Name,\n+ Size: int(*pPool.Size),\n+ Flavor: *pPool.Flavor,\nImage: pPool.Image,\n})\n}\n@@ -45,7 +45,7 @@ func (d *updateCluster) Handle(params operations.UpdateClusterParams, principal\nfor i, kPool := range kluster.Spec.NodePools {\nisDeleted := true\nfor _, pPool := range params.Body.Spec.NodePools {\n- if pPool.Name == kPool.Name {\n+ if *pPool.Name == kPool.Name {\nisDeleted = false\nbreak\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -44,10 +44,10 @@ func clusterSpecNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterSpecNodePoo\nitems := make([]*models.ClusterSpecNodePoolsItems0, int64(len(k.Spec.NodePools)))\nfor i, nodePool := range k.Spec.NodePools {\nitems[i] = &models.ClusterSpecNodePoolsItems0{\n- Name: nodePool.Name,\n+ Name: &nodePool.Name,\nImage: nodePool.Image,\n- Flavor: nodePool.Flavor,\n- Size: int64(nodePool.Size),\n+ Flavor: &nodePool.Flavor,\n+ Size: &[]int64{int64(nodePool.Size)}[0],\n}\n}\nreturn items\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/models/cluster.go", "new_path": "pkg/api/models/cluster.go", "diff": "@@ -198,41 +198,82 @@ func (m *ClusterSpec) UnmarshalBinary(b []byte) error {\ntype ClusterSpecNodePoolsItems0 struct {\n// flavor\n- Flavor string `json:\"flavor,omitempty\"`\n+ // Required: true\n+ Flavor *string `json:\"flavor\"`\n// image\nImage string `json:\"image,omitempty\"`\n// name\n+ // Required: true\n// Pattern: ^[a-z]([a-z0-9]*)?$\n- Name string `json:\"name,omitempty\"`\n+ Name *string `json:\"name\"`\n// size\n- Size int64 `json:\"size,omitempty\"`\n+ // Required: true\n+ // Maximum: 127\n+ // Minimum: 0\n+ Size *int64 `json:\"size\"`\n}\n// Validate validates this cluster spec node pools items0\nfunc (m *ClusterSpecNodePoolsItems0) Validate(formats strfmt.Registry) error {\nvar res []error\n+ if err := m.validateFlavor(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif err := m.validateName(formats); err != nil {\n// prop\nres = append(res, err)\n}\n+ if err := m.validateSize(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif len(res) > 0 {\nreturn errors.CompositeValidationError(res...)\n}\nreturn nil\n}\n+func (m *ClusterSpecNodePoolsItems0) validateFlavor(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"flavor\", \"body\", m.Flavor); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\nfunc (m *ClusterSpecNodePoolsItems0) validateName(formats strfmt.Registry) error {\n- if swag.IsZero(m.Name) { // not required\n+ if err := validate.Required(\"name\", \"body\", m.Name); err != nil {\n+ return err\n+ }\n+\n+ if err := validate.Pattern(\"name\", \"body\", string(*m.Name), `^[a-z]([a-z0-9]*)?$`); err != nil {\n+ return err\n+ }\n+\nreturn nil\n}\n- if err := validate.Pattern(\"name\", \"body\", string(m.Name), `^[a-z]([a-z0-9]*)?$`); err != nil {\n+func (m *ClusterSpecNodePoolsItems0) validateSize(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"size\", \"body\", m.Size); err != nil {\n+ return err\n+ }\n+\n+ if err := validate.MinimumInt(\"size\", \"body\", int64(*m.Size), 0, false); err != nil {\n+ return err\n+ }\n+\n+ if err := validate.MaximumInt(\"size\", \"body\", int64(*m.Size), 127, false); err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/rest/embedded_spec.go", "new_path": "pkg/api/rest/embedded_spec.go", "diff": "@@ -242,6 +242,11 @@ func init() {\n\"nodePools\": {\n\"type\": \"array\",\n\"items\": {\n+ \"required\": [\n+ \"name\",\n+ \"size\",\n+ \"flavor\"\n+ ],\n\"properties\": {\n\"flavor\": {\n\"type\": \"string\"\n@@ -254,7 +259,9 @@ func init() {\n\"pattern\": \"^[a-z]([a-z0-9]*)?$\"\n},\n\"size\": {\n- \"type\": \"integer\"\n+ \"type\": \"integer\",\n+ \"maximum\": 127,\n+ \"minimum\": 0\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "swagger.yml", "new_path": "swagger.yml", "diff": "@@ -12,9 +12,7 @@ securityDefinitions:\ntype: apiKey\nin: header\nname: x-auth-token\n-\nsecurity: []\n-\nresponses:\nerrorResponse:\ndescription: Error\n@@ -26,37 +24,37 @@ paths:\noperationId: ListAPIVersions\nsummary: List available api versions\nresponses:\n- 200:\n+ '200':\ndescription: OK\nschema:\n$ref: '#/definitions/ApiVersions'\n- 401:\n+ '401':\ndescription: Unauthorized\n/api/v1/clusters/:\nget:\noperationId: ListClusters\nsummary: List available clusters\nresponses:\n- 200:\n+ '200':\ndescription: OK\nschema:\ntype: array\nitems:\n$ref: '#/definitions/Cluster'\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nsecurity:\n- keystone: []\npost:\noperationId: CreateCluster\nsummary: Create a cluster\nresponses:\n- 201:\n+ '201':\ndescription: OK\nschema:\n$ref: '#/definitions/Cluster'\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nparameters:\n- name: body\nin: body\n@@ -65,8 +63,7 @@ paths:\n$ref: '#/definitions/Cluster'\nsecurity:\n- keystone: []\n-\n- /api/v1/clusters/{name}:\n+ '/api/v1/clusters/{name}':\nparameters:\n- uniqueItems: true\ntype: string\n@@ -77,39 +74,36 @@ paths:\noperationId: ShowCluster\nsummary: Show the specified cluser\nresponses:\n- 200:\n+ '200':\ndescription: OK\nschema:\n$ref: '#/definitions/Cluster'\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nsecurity:\n- keystone: []\ndelete:\noperationId: TerminateCluster\nsummary: Terminate the specified cluster\nresponses:\n- 202:\n+ '202':\ndescription: OK\n- # go-openapi always sends a content-type header also on empty response bodys\n- # jquery and other tools barf on this as the try to parse an empty string\n- # which is not valid json\nschema:\ntype: object\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nsecurity:\n- keystone: []\nput:\noperationId: UpdateCluster\nsummary: Update the specified cluser\nresponses:\n- 200:\n+ '200':\ndescription: OK\nschema:\n$ref: '#/definitions/Cluster'\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nsecurity:\n- keystone: []\nparameters:\n@@ -118,7 +112,7 @@ paths:\nrequired: true\nschema:\n$ref: '#/definitions/Cluster'\n- /api/v1/clusters/{name}/credentials:\n+ '/api/v1/clusters/{name}/credentials':\nparameters:\n- uniqueItems: true\ntype: string\n@@ -129,15 +123,14 @@ paths:\noperationId: GetClusterCredentials\nsummary: Get user specific credentials to access the cluster\nresponses:\n- 200:\n+ '200':\ndescription: OK\nschema:\n- $ref: \"#/definitions/Credentials\"\n+ $ref: '#/definitions/Credentials'\ndefault:\n- $ref: \"#/responses/errorResponse\"\n+ $ref: '#/responses/errorResponse'\nsecurity:\n- keystone: []\n-\ndefinitions:\nApiVersions:\nrequired:\n@@ -162,12 +155,18 @@ definitions:\nnodePools:\ntype: array\nitems:\n+ required:\n+ - name\n+ - size\n+ - flavor\nproperties:\nname:\ntype: string\npattern: '^[a-z]([a-z0-9]*)?$'\nsize:\ntype: integer\n+ maximum: 127\n+ minimum: 0\nflavor:\ntype: string\nimage:\n@@ -187,13 +186,11 @@ definitions:\ntype: integer\nready:\ntype: integer\n-\nCredentials:\ntype: object\nproperties:\nkubeconfig:\ntype: string\n-\nPrincipal:\ntype: object\nproperties:\n@@ -212,8 +209,9 @@ definitions:\nitems:\ntype: string\nerror:\n- description: |\n- the error model is a model for all the error responses coming from Kubernikus\n+ description: >\n+ the error model is a model for all the error responses coming from\n+ Kubernikus\ntype: object\nrequired:\n- message\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
required fields for nodepools. validations for size
596,240
22.09.2017 14:54:45
-7,200
d4904d1888b790f498b2b7b3714a33e409efa456
enforce uniqueness for nodepool names
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -4,6 +4,7 @@ import (\n\"fmt\"\n\"github.com/go-openapi/runtime/middleware\"\n+ \"github.com/go-openapi/validate\"\n\"github.com/golang/glog\"\n\"github.com/sapcc/kubernikus/pkg/api\"\n\"github.com/sapcc/kubernikus/pkg/api/models\"\n@@ -23,6 +24,11 @@ type createCluster struct {\nfunc (d *createCluster) Handle(params operations.CreateClusterParams, principal *models.Principal) middleware.Responder {\nname := *params.Body.Name\n+\n+ if err := validate.UniqueItems(\"name\", \"body\", params.Body.Spec.NodePools); err != nil {\n+ return NewErrorResponse(&operations.CreateClusterDefault{}, int(err.Code()), err.Error())\n+ }\n+\nvar nodePools []v1.NodePool\nif params.Body.Spec != nil && params.Body.Spec.NodePools != nil {\nnodePools = []v1.NodePool{}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
enforce uniqueness for nodepool names
596,240
22.09.2017 17:04:52
-7,200
1b9bd75220222f0091d587ca0d3756a85bc7a272
sync cluster and kluster info and kluster state status state
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -53,8 +53,10 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nNodePools: nodePools,\n},\nStatus: v1.KlusterStatus{\n+ Kluster: v1.KlusterInfo{\nState: v1.KlusterPending,\n},\n+ },\n}\nkluster, err := d.Kubernikus.Kubernikus().Klusters(d.Namespace).Create(kluster)\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/terminate_cluster.go", "new_path": "pkg/api/handlers/terminate_cluster.go", "diff": "@@ -21,8 +21,8 @@ type terminateCluster struct {\nfunc (d *terminateCluster) Handle(params operations.TerminateClusterParams, principal *models.Principal) middleware.Responder {\n_, err := editCluster(d.Kubernikus.Kubernikus().Klusters(d.Namespace), principal, params.Name, func(kluster *v1.Kluster) {\n- kluster.Status.State = v1.KlusterTerminating\n- kluster.Status.Message = \"Cluster terminating\"\n+ kluster.Status.Kluster.State = v1.KlusterTerminating\n+ kluster.Status.Kluster.Message = \"Cluster terminating\"\n})\nif err != nil {\nif apierrors.IsNotFound(err) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -55,11 +55,13 @@ func clusterSpecNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterSpecNodePoo\nfunc clusterStatusNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterStatusNodePoolsItems0 {\nitems := make([]*models.ClusterStatusNodePoolsItems0, int64(len(k.Spec.NodePools)))\n- for i, nodePool := range k.Spec.NodePools {\n+ for i, nodePool := range k.Status.NodePools {\nitems[i] = &models.ClusterStatusNodePoolsItems0{\nName: nodePool.Name,\nSize: int64(nodePool.Size),\n- Ready: int64(nodePool.Size), // TODO\n+ Running: int64(nodePool.Running),\n+ Healthy: int64(nodePool.Healthy),\n+ Schedulable: int64(nodePool.Schedulable),\n}\n}\nreturn items\n@@ -72,7 +74,10 @@ func clusterModelFromTPR(k *v1.Kluster) *models.Cluster {\nNodePools: clusterSpecNodePoolItemsFromTPR(k),\n},\nStatus: &models.ClusterStatus{\n- Kluster: string(k.Status.State),\n+ Kluster: &models.ClusterStatusKluster{\n+ State: string(k.Status.Kluster.State),\n+ Message: k.Status.Kluster.Message,\n+ },\nNodePools: clusterStatusNodePoolItemsFromTPR(k),\n},\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/models/cluster.go", "new_path": "pkg/api/models/cluster.go", "diff": "@@ -302,8 +302,8 @@ func (m *ClusterSpecNodePoolsItems0) UnmarshalBinary(b []byte) error {\n// swagger:model ClusterStatus\ntype ClusterStatus struct {\n- // status of the cluster\n- Kluster string `json:\"kluster,omitempty\"`\n+ // kluster\n+ Kluster *ClusterStatusKluster `json:\"kluster,omitempty\"`\n// node pools\nNodePools []*ClusterStatusNodePoolsItems0 `json:\"nodePools\"`\n@@ -313,6 +313,11 @@ type ClusterStatus struct {\nfunc (m *ClusterStatus) Validate(formats strfmt.Registry) error {\nvar res []error\n+ if err := m.validateKluster(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif err := m.validateNodePools(formats); err != nil {\n// prop\nres = append(res, err)\n@@ -324,6 +329,25 @@ func (m *ClusterStatus) Validate(formats strfmt.Registry) error {\nreturn nil\n}\n+func (m *ClusterStatus) validateKluster(formats strfmt.Registry) error {\n+\n+ if swag.IsZero(m.Kluster) { // not required\n+ return nil\n+ }\n+\n+ if m.Kluster != nil {\n+\n+ if err := m.Kluster.Validate(formats); err != nil {\n+ if ve, ok := err.(*errors.Validation); ok {\n+ return ve.ValidateName(\"status\" + \".\" + \"kluster\")\n+ }\n+ return err\n+ }\n+ }\n+\n+ return nil\n+}\n+\nfunc (m *ClusterStatus) validateNodePools(formats strfmt.Registry) error {\nif swag.IsZero(m.NodePools) { // not required\n@@ -369,15 +393,60 @@ func (m *ClusterStatus) UnmarshalBinary(b []byte) error {\nreturn nil\n}\n+// ClusterStatusKluster cluster status kluster\n+// swagger:model ClusterStatusKluster\n+type ClusterStatusKluster struct {\n+\n+ // message\n+ Message string `json:\"message,omitempty\"`\n+\n+ // status of the cluster\n+ State string `json:\"state,omitempty\"`\n+}\n+\n+// Validate validates this cluster status kluster\n+func (m *ClusterStatusKluster) Validate(formats strfmt.Registry) error {\n+ var res []error\n+\n+ if len(res) > 0 {\n+ return errors.CompositeValidationError(res...)\n+ }\n+ return nil\n+}\n+\n+// MarshalBinary interface implementation\n+func (m *ClusterStatusKluster) MarshalBinary() ([]byte, error) {\n+ if m == nil {\n+ return nil, nil\n+ }\n+ return swag.WriteJSON(m)\n+}\n+\n+// UnmarshalBinary interface implementation\n+func (m *ClusterStatusKluster) UnmarshalBinary(b []byte) error {\n+ var res ClusterStatusKluster\n+ if err := swag.ReadJSON(b, &res); err != nil {\n+ return err\n+ }\n+ *m = res\n+ return nil\n+}\n+\n// ClusterStatusNodePoolsItems0 cluster status node pools items0\n// swagger:model ClusterStatusNodePoolsItems0\ntype ClusterStatusNodePoolsItems0 struct {\n+ // healthy\n+ Healthy int64 `json:\"healthy,omitempty\"`\n+\n// name\nName string `json:\"name,omitempty\"`\n- // ready\n- Ready int64 `json:\"ready,omitempty\"`\n+ // running\n+ Running int64 `json:\"running,omitempty\"`\n+\n+ // schedulable\n+ Schedulable int64 `json:\"schedulable,omitempty\"`\n// size\nSize int64 `json:\"size,omitempty\"`\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/rest/embedded_spec.go", "new_path": "pkg/api/rest/embedded_spec.go", "diff": "@@ -271,17 +271,30 @@ func init() {\n\"status\": {\n\"properties\": {\n\"kluster\": {\n+ \"properties\": {\n+ \"message\": {\n+ \"type\": \"string\"\n+ },\n+ \"state\": {\n\"description\": \"status of the cluster\",\n\"type\": \"string\"\n+ }\n+ }\n},\n\"nodePools\": {\n\"type\": \"array\",\n\"items\": {\n\"properties\": {\n+ \"healthy\": {\n+ \"type\": \"integer\"\n+ },\n\"name\": {\n\"type\": \"string\"\n},\n- \"ready\": {\n+ \"running\": {\n+ \"type\": \"integer\"\n+ },\n+ \"schedulable\": {\n\"type\": \"integer\"\n},\n\"size\": {\n" }, { "change_type": "MODIFY", "old_path": "pkg/apis/kubernikus/v1/kluster.go", "new_path": "pkg/apis/kubernikus/v1/kluster.go", "diff": "@@ -52,10 +52,23 @@ const (\n)\ntype KlusterStatus struct {\n+ Kluster KlusterInfo `json:\"kluster\"`\n+ NodePools []NodePoolInfo `json:\"nodePools,omitempty\"`\n+}\n+\n+type KlusterInfo struct {\nState KlusterState `json:\"state,omitempty\"`\nMessage string `json:\"message,omitempty\"`\n}\n+type NodePoolInfo struct {\n+ Name string `json:\"name\"`\n+ Size int `json:size`\n+ Running int `json:running`\n+ Healthy int `json:healthy`\n+ Schedulable int `json:schedulable`\n+}\n+\n// +genclient\ntype Kluster struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/ground.go", "new_path": "pkg/controller/ground.go", "diff": "@@ -124,9 +124,9 @@ func (op *GroundControl) handler(key string) error {\nglog.Infof(\"TPR of kluster %s deleted\", key)\n} else {\ntpr := obj.(*v1.Kluster)\n- glog.V(5).Infof(\"Handling kluster %v in state %q\", tpr.Name, tpr.Status.State)\n+ glog.V(5).Infof(\"Handling kluster %v in state %q\", tpr.Name, tpr.Status.Kluster.State)\n- switch state := tpr.Status.State; state {\n+ switch state := tpr.Status.Kluster.State; state {\ncase v1.KlusterPending:\n{\nif op.requiresOpenstackInfo(tpr) {\n@@ -258,8 +258,8 @@ func (op *GroundControl) updateStatus(tpr *v1.Kluster, state v1.KlusterState, me\nif err != nil {\nreturn err\n}\n- tpr.Status.Message = message\n- tpr.Status.State = state\n+ tpr.Status.Kluster.Message = message\n+ tpr.Status.Kluster.State = state\n_, err = op.Clients.Kubernikus.Kubernikus().Klusters(tpr.Namespace).Update(tpr)\nreturn err\n" }, { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -9,6 +9,7 @@ import (\n\"github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1\"\n\"github.com/sapcc/kubernikus/pkg/client/openstack\"\n\"github.com/sapcc/kubernikus/pkg/templates\"\n+ metav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\"k8s.io/apimachinery/pkg/labels\"\n\"k8s.io/apimachinery/pkg/util/wait\"\n\"k8s.io/client-go/tools/cache\"\n@@ -128,7 +129,7 @@ func (launchctl *LaunchControl) reconcile(key string) error {\nkluster := obj.(*v1.Kluster)\nglog.V(5).Infof(\"[%v] Reconciling\", kluster.Name)\n- if !(kluster.Status.State == v1.KlusterReady || kluster.Status.State == v1.KlusterTerminating) {\n+ if !(kluster.Status.Kluster.State == v1.KlusterReady || kluster.Status.Kluster.State == v1.KlusterTerminating) {\nreturn fmt.Errorf(\"[%v] Kluster is not yet ready. Requeuing.\", kluster.Name)\n}\n@@ -148,7 +149,7 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n}\n- if kluster.Status.State == v1.KlusterTerminating {\n+ if kluster.Status.Kluster.State == v1.KlusterTerminating {\nif toBeTerminated(nodes) > 0 {\nglog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\nfor _, node := range nodes {\n@@ -162,17 +163,31 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn nil\n}\n- ready := ready(nodes)\n+ running := running(nodes)\n+ starting := starting(nodes)\n+ ready := running + starting\n+\n+ info := v1.NodePoolInfo{\n+ Name: pool.Name,\n+ Size: pool.Size,\n+ Running: running + starting, // Should be running only\n+ Healthy: running,\n+ Schedulable: running,\n+ }\n+\n+ if err = launchctl.updateNodePoolStatus(kluster, info); err != nil {\n+ return err\n+ }\nswitch {\ncase ready < pool.Size:\n- glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v%v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, starting, running, pool.Size)\nreturn launchctl.createNode(kluster, pool)\ncase ready > pool.Size:\n- glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, pool.Name, starting, running, pool.Size)\nreturn launchctl.terminateNode(kluster, nodes[0].ID)\ncase ready == pool.Size:\n- glog.V(3).Infof(\"[%v] Pool %v: Running %v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, ready, pool.Size)\n+ glog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, pool.Name, starting, running, pool.Size)\n}\nreturn nil\n@@ -207,6 +222,23 @@ func (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, id string) er\nreturn nil\n}\n+func (launchctl *LaunchControl) updateNodePoolStatus(kluster *v1.Kluster, newInfo v1.NodePoolInfo) error {\n+ copy, err := launchctl.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{})\n+ if err != nil {\n+ return err\n+ }\n+\n+ for _, curInfo := range copy.Status.NodePools {\n+ if curInfo.Name == newInfo.Name {\n+ curInfo = newInfo\n+ _, err = launchctl.Clients.Kubernikus.Kubernikus().Klusters(copy.Namespace).Update(copy)\n+ return err\n+ }\n+ }\n+\n+ return fmt.Errorf(\"Couldn't update Nodepool %v. It's not part of kluster %v.\", newInfo.Name, copy.Name)\n+}\n+\nfunc (launchctl *LaunchControl) handleErr(err error, key interface{}) {\nif err == nil {\n// Forget about the #AddRateLimited history of the key on every successful synchronization.\n@@ -230,15 +262,26 @@ func (launchctl *LaunchControl) handleErr(err error, key interface{}) {\nglog.V(5).Infof(\"[%v] Dropping out of the queue. Too many retries...\", key)\n}\n-func ready(nodes []openstack.Node) int {\n- ready := 0\n+func starting(nodes []openstack.Node) int {\n+ count := 0\n+ for _, n := range nodes {\n+ if n.Starting() {\n+ count = count + 1\n+ }\n+ }\n+\n+ return count\n+}\n+\n+func running(nodes []openstack.Node) int {\n+ count := 0\nfor _, n := range nodes {\n- if n.Running() || n.Starting() {\n- ready = ready + 1\n+ if n.Running() {\n+ count = count + 1\n}\n}\n- return ready\n+ return count\n}\nfunc toBeTerminated(nodes []openstack.Node) int {\n" }, { "change_type": "MODIFY", "old_path": "swagger.yml", "new_path": "swagger.yml", "diff": "@@ -174,8 +174,12 @@ definitions:\nstatus:\nproperties:\nkluster:\n+ properties:\n+ state:\ndescription: status of the cluster\ntype: string\n+ message:\n+ type: string\nnodePools:\ntype: array\nitems:\n@@ -184,7 +188,11 @@ definitions:\ntype: string\nsize:\ntype: integer\n- ready:\n+ running:\n+ type: integer\n+ healthy:\n+ type: integer\n+ schedulable:\ntype: integer\nCredentials:\ntype: object\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
sync cluster and kluster info and kluster state status state
596,240
22.09.2017 17:27:45
-7,200
c1a748825520e5cd7040e86b9f4df81cc2137fd2
make it that nodepoolinfos are not null
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -30,8 +30,10 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\n}\nvar nodePools []v1.NodePool\n+ var nodePoolInfos []v1.NodePoolInfo\nif params.Body.Spec != nil && params.Body.Spec.NodePools != nil {\nnodePools = []v1.NodePool{}\n+ nodePoolInfos = []v1.NodePoolInfo{}\nfor _, pPool := range params.Body.Spec.NodePools {\nnodePools = append(nodePools, v1.NodePool{\nName: *pPool.Name,\n@@ -39,6 +41,14 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nFlavor: *pPool.Flavor,\nImage: pPool.Image,\n})\n+\n+ nodePoolInfos = append(nodePoolInfos, v1.NodePoolInfo{\n+ Name: *pPool.Name,\n+ Size: int(*pPool.Size),\n+ Running: 0,\n+ Healthy: 0,\n+ Schedulable: 0,\n+ })\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -54,7 +54,7 @@ func clusterSpecNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterSpecNodePoo\n}\nfunc clusterStatusNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterStatusNodePoolsItems0 {\n- items := make([]*models.ClusterStatusNodePoolsItems0, int64(len(k.Spec.NodePools)))\n+ items := make([]*models.ClusterStatusNodePoolsItems0, int64(len(k.Status.NodePools)))\nfor i, nodePool := range k.Status.NodePools {\nitems[i] = &models.ClusterStatusNodePoolsItems0{\nName: nodePool.Name,\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
make it that nodepoolinfos are not null
596,240
22.09.2017 17:37:40
-7,200
e30abeafd29556f270412214c20b1891e920e4c1
actually stick nodepools into state :/
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -66,6 +66,7 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nKluster: v1.KlusterInfo{\nState: v1.KlusterPending,\n},\n+ NodePools: nodePoolInfos,\n},\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
actually stick nodepools into state :/
596,240
22.09.2017 18:23:50
-7,200
003f03098bf9094d94676f99e6be84d10f6a948c
learning golang is hard. pointers and magic
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -222,15 +222,15 @@ func (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, id string) er\nreturn nil\n}\n-func (launchctl *LaunchControl) updateNodePoolStatus(kluster *v1.Kluster, newInfo v1.NodePoolInfo) error {\n+func (launchctl *LaunchControl) updateNodePoolStatus(tpr *v1.Kluster, newInfo v1.NodePoolInfo) error {\ncopy, err := launchctl.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{})\nif err != nil {\nreturn err\n}\n- for _, curInfo := range copy.Status.NodePools {\n+ for i, curInfo := range copy.Status.NodePools {\nif curInfo.Name == newInfo.Name {\n- curInfo = newInfo\n+ copy.Status.NodePools[i] = newInfo\n_, err = launchctl.Clients.Kubernikus.Kubernikus().Klusters(copy.Namespace).Update(copy)\nreturn err\n}\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
learning golang is hard. pointers and magic
596,240
22.09.2017 18:24:02
-7,200
ce4ea6137bad8f136d0f7e2cb5dcc026f4cb6867
default to coreos image
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/create_cluster.go", "new_path": "pkg/api/handlers/create_cluster.go", "diff": "@@ -39,7 +39,7 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal\nName: *pPool.Name,\nSize: int(*pPool.Size),\nFlavor: *pPool.Flavor,\n- Image: pPool.Image,\n+ Image: \"coreos-stable-amd64\",\n})\nnodePoolInfos = append(nodePoolInfos, v1.NodePoolInfo{\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
default to coreos image
596,240
22.09.2017 19:35:10
-7,200
677d798f359e35c87e43c023e187f0b231b207ad
updates nodepoolinfo while terminating the cluster
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -149,20 +149,6 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n}\n- if kluster.Status.Kluster.State == v1.KlusterTerminating {\n- if toBeTerminated(nodes) > 0 {\n- glog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\n- for _, node := range nodes {\n- err := launchctl.terminateNode(kluster, node.ID)\n- if err != nil {\n- return err\n- }\n- }\n- }\n-\n- return nil\n- }\n-\nrunning := running(nodes)\nstarting := starting(nodes)\nready := running + starting\n@@ -179,6 +165,20 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn err\n}\n+ if kluster.Status.Kluster.State == v1.KlusterTerminating {\n+ if toBeTerminated(nodes) > 0 {\n+ glog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\n+ for _, node := range nodes {\n+ err := launchctl.terminateNode(kluster, node.ID)\n+ if err != nil {\n+ return err\n+ }\n+ }\n+ }\n+\n+ return nil\n+ }\n+\nswitch {\ncase ready < pool.Size:\nglog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, starting, running, pool.Size)\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
updates nodepoolinfo while terminating the cluster
596,240
22.09.2017 20:17:59
-7,200
537de8367d833544e1fe13b19607e9b0e06d08da
an integer that is 0 is ommited from the response. the hell? need to make them required so they turn into pointers. super cool, way to cast a in64 to *int64 by the way, golang
[ { "change_type": "MODIFY", "old_path": "pkg/api/handlers/util.go", "new_path": "pkg/api/handlers/util.go", "diff": "@@ -57,11 +57,11 @@ func clusterStatusNodePoolItemsFromTPR(k *v1.Kluster) []*models.ClusterStatusNod\nitems := make([]*models.ClusterStatusNodePoolsItems0, int64(len(k.Status.NodePools)))\nfor i, nodePool := range k.Status.NodePools {\nitems[i] = &models.ClusterStatusNodePoolsItems0{\n- Name: nodePool.Name,\n- Size: int64(nodePool.Size),\n- Running: int64(nodePool.Running),\n- Healthy: int64(nodePool.Healthy),\n- Schedulable: int64(nodePool.Schedulable),\n+ Name: &nodePool.Name,\n+ Size: &[]int64{int64(nodePool.Size)}[0],\n+ Running: &[]int64{int64(nodePool.Running)}[0],\n+ Healthy: &[]int64{int64(nodePool.Healthy)}[0],\n+ Schedulable: &[]int64{int64(nodePool.Schedulable)}[0],\n}\n}\nreturn items\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/models/cluster.go", "new_path": "pkg/api/models/cluster.go", "diff": "@@ -437,31 +437,106 @@ func (m *ClusterStatusKluster) UnmarshalBinary(b []byte) error {\ntype ClusterStatusNodePoolsItems0 struct {\n// healthy\n- Healthy int64 `json:\"healthy,omitempty\"`\n+ // Required: true\n+ Healthy *int64 `json:\"healthy\"`\n// name\n- Name string `json:\"name,omitempty\"`\n+ // Required: true\n+ Name *string `json:\"name\"`\n// running\n- Running int64 `json:\"running,omitempty\"`\n+ // Required: true\n+ Running *int64 `json:\"running\"`\n// schedulable\n- Schedulable int64 `json:\"schedulable,omitempty\"`\n+ // Required: true\n+ Schedulable *int64 `json:\"schedulable\"`\n// size\n- Size int64 `json:\"size,omitempty\"`\n+ // Required: true\n+ Size *int64 `json:\"size\"`\n}\n// Validate validates this cluster status node pools items0\nfunc (m *ClusterStatusNodePoolsItems0) Validate(formats strfmt.Registry) error {\nvar res []error\n+ if err := m.validateHealthy(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if err := m.validateName(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if err := m.validateRunning(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if err := m.validateSchedulable(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\n+ if err := m.validateSize(formats); err != nil {\n+ // prop\n+ res = append(res, err)\n+ }\n+\nif len(res) > 0 {\nreturn errors.CompositeValidationError(res...)\n}\nreturn nil\n}\n+func (m *ClusterStatusNodePoolsItems0) validateHealthy(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"healthy\", \"body\", m.Healthy); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func (m *ClusterStatusNodePoolsItems0) validateName(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"name\", \"body\", m.Name); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func (m *ClusterStatusNodePoolsItems0) validateRunning(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"running\", \"body\", m.Running); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func (m *ClusterStatusNodePoolsItems0) validateSchedulable(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"schedulable\", \"body\", m.Schedulable); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func (m *ClusterStatusNodePoolsItems0) validateSize(formats strfmt.Registry) error {\n+\n+ if err := validate.Required(\"size\", \"body\", m.Size); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n// MarshalBinary interface implementation\nfunc (m *ClusterStatusNodePoolsItems0) MarshalBinary() ([]byte, error) {\nif m == nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/api/rest/embedded_spec.go", "new_path": "pkg/api/rest/embedded_spec.go", "diff": "@@ -284,6 +284,13 @@ func init() {\n\"nodePools\": {\n\"type\": \"array\",\n\"items\": {\n+ \"required\": [\n+ \"name\",\n+ \"size\",\n+ \"running\",\n+ \"healthy\",\n+ \"schedulable\"\n+ ],\n\"properties\": {\n\"healthy\": {\n\"type\": \"integer\"\n" }, { "change_type": "MODIFY", "old_path": "swagger.yml", "new_path": "swagger.yml", "diff": "@@ -183,6 +183,12 @@ definitions:\nnodePools:\ntype: array\nitems:\n+ required:\n+ - name\n+ - size\n+ - running\n+ - healthy\n+ - schedulable\nproperties:\nname:\ntype: string\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
an integer that is 0 is ommited from the response. the hell? need to make them required so they turn into pointers. super cool, way to cast a in64 to *int64 by the way, golang
596,240
22.09.2017 20:19:17
-7,200
b33b681b349d1a32bf5e90ed2f3881a247f12e46
remove endless update loop. only update on actual change. remove double queuing. why does that even matter?
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -149,27 +149,11 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n}\n- running := running(nodes)\n- starting := starting(nodes)\n- ready := running + starting\n-\n- info := v1.NodePoolInfo{\n- Name: pool.Name,\n- Size: pool.Size,\n- Running: running + starting, // Should be running only\n- Healthy: running,\n- Schedulable: running,\n- }\n-\n- if err = launchctl.updateNodePoolStatus(kluster, info); err != nil {\n- return err\n- }\n-\nif kluster.Status.Kluster.State == v1.KlusterTerminating {\nif toBeTerminated(nodes) > 0 {\nglog.V(3).Infof(\"[%v] Kluster is terminating. Terminating Nodes for Pool %v.\", kluster.Name, pool.Name)\nfor _, node := range nodes {\n- err := launchctl.terminateNode(kluster, node.ID)\n+ err := launchctl.terminateNode(kluster, pool, node.ID)\nif err != nil {\nreturn err\n}\n@@ -179,13 +163,17 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nreturn nil\n}\n+ running := running(nodes)\n+ starting := starting(nodes)\n+ ready := running + starting\n+\nswitch {\ncase ready < pool.Size:\nglog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. Too few nodes. Need to spawn more.\", kluster.Name, pool.Name, starting, running, pool.Size)\nreturn launchctl.createNode(kluster, pool)\ncase ready > pool.Size:\nglog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. Too many nodes. Need to delete some.\", kluster.Name, pool.Name, starting, running, pool.Size)\n- return launchctl.terminateNode(kluster, nodes[0].ID)\n+ return launchctl.terminateNode(kluster, pool, nodes[0].ID)\ncase ready == pool.Size:\nglog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, starting, running, pool.Size)\n}\n@@ -207,22 +195,44 @@ func (launchctl *LaunchControl) createNode(kluster *v1.Kluster, pool *v1.NodePoo\n}\nglog.V(2).Infof(\"[%v] Pool %v: Created node %v.\", kluster.Name, pool.Name, id)\n+ if err = launchctl.updateNodePoolStatus(kluster, pool); err != nil {\n+ return err\n+ }\n- launchctl.requeue(kluster)\nreturn nil\n}\n-func (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, id string) error {\n+func (launchctl *LaunchControl) terminateNode(kluster *v1.Kluster, pool *v1.NodePool, id string) error {\nerr := launchctl.Clients.Openstack.DeleteNode(kluster, id)\nif err != nil {\nreturn err\n}\n- launchctl.requeue(kluster)\n+ glog.V(2).Infof(\"[%v] Pool %v: Deleted node %v.\", kluster.Name, pool.Name, id)\n+ if err = launchctl.updateNodePoolStatus(kluster, pool); err != nil {\n+ return err\n+ }\n+\nreturn nil\n}\n-func (launchctl *LaunchControl) updateNodePoolStatus(kluster *v1.Kluster, newInfo v1.NodePoolInfo) error {\n+func (launchctl *LaunchControl) updateNodePoolStatus(kluster *v1.Kluster, pool *v1.NodePool) error {\n+ nodes, err := launchctl.Clients.Openstack.GetNodes(kluster, pool)\n+ if err != nil {\n+ return fmt.Errorf(\"[%v] Couldn't list nodes for pool %v: %v\", kluster.Name, pool.Name, err)\n+ }\n+\n+ running := running(nodes)\n+ starting := starting(nodes)\n+\n+ newInfo := v1.NodePoolInfo{\n+ Name: pool.Name,\n+ Size: pool.Size,\n+ Running: running + starting, // Should be running only\n+ Healthy: running,\n+ Schedulable: running,\n+ }\n+\ncopy, err := launchctl.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{})\nif err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
remove endless update loop. only update on actual change. remove double queuing. why does that even matter?
596,240
22.09.2017 20:47:02
-7,200
e893e770ad9993c932a7afa8ba2dd6d4ee8e43ed
need more updates but only if actually needed
[ { "change_type": "MODIFY", "old_path": "pkg/controller/launch.go", "new_path": "pkg/controller/launch.go", "diff": "@@ -178,6 +178,10 @@ func (launchctl *LaunchControl) syncPool(kluster *v1.Kluster, pool *v1.NodePool)\nglog.V(3).Infof(\"[%v] Pool %v: Starting/Running/Total: %v/%v/%v. All good. Doing nothing.\", kluster.Name, pool.Name, starting, running, pool.Size)\n}\n+ if err = launchctl.updateNodePoolStatus(kluster, pool); err != nil {\n+ return err\n+ }\n+\nreturn nil\n}\n@@ -240,6 +244,10 @@ func (launchctl *LaunchControl) updateNodePoolStatus(kluster *v1.Kluster, pool *\nfor i, curInfo := range copy.Status.NodePools {\nif curInfo.Name == newInfo.Name {\n+ if curInfo == newInfo {\n+ return nil\n+ }\n+\ncopy.Status.NodePools[i] = newInfo\n_, err = launchctl.Clients.Kubernikus.Kubernikus().Klusters(copy.Namespace).Update(copy)\nreturn err\n" } ]
Go
Apache License 2.0
sapcc/kubernikus
need more updates but only if actually needed