k8saas: add support for Kubernetes endpoints

This commit is contained in:
Antoine Grondin 2018-11-12 15:02:26 -08:00
parent d29fc3d820
commit f628842763
3 changed files with 1154 additions and 0 deletions

View File

@ -65,6 +65,7 @@ type Client struct {
Certificates CertificatesService
Firewalls FirewallsService
Projects ProjectsService
Kubernetes KubernetesService
// Optional function called after every successful request made to the DO APIs
onRequestCompleted RequestCompletionCallback
@ -178,6 +179,7 @@ func NewClient(httpClient *http.Client) *Client {
c.Storage = &StorageServiceOp{client: c}
c.StorageActions = &StorageActionsServiceOp{client: c}
c.Tags = &TagsServiceOp{client: c}
c.Kubernetes = &KubernetesServiceOp{client: c}
return c
}

381
kubernetes.go Normal file
View File

@ -0,0 +1,381 @@
package godo
import (
"bytes"
"context"
"fmt"
"net/http"
"time"
)
const (
kubernetesBasePath = "/v2/kubernetes"
kubernetesClustersPath = kubernetesBasePath + "/clusters"
kubernetesOptionsPath = kubernetesBasePath + "/options"
)
// KubernetesService is an interface for interfacing with the kubernetes endpoints
// of the DigitalOcean API.
// See: https://developers.digitalocean.com/documentation/v2#kubernetes
type KubernetesService interface {
Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error)
Get(context.Context, string) (*KubernetesCluster, *Response, error)
GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error)
List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error)
Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error)
Delete(context.Context, string) (*Response, error)
CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error)
GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error)
ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error)
UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error)
RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolRecycleNodesRequest) (*Response, error)
DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error)
GetOptions(context.Context) (*KubernetesOptions, *Response, error)
}
var _ KubernetesService = &KubernetesServiceOp{}
// KubernetesServiceOp handles communication with Kubernetes methods of the DigitalOcean API.
type KubernetesServiceOp struct {
client *Client
}
// KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster.
type KubernetesClusterCreateRequest struct {
Name string `json:"name,omitempty"`
RegionSlug string `json:"region,omitempty"`
VersionSlug string `json:"version,omitempty"`
Tags []string `json:"tags,omitempty"`
NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"`
}
// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster.
type KubernetesClusterUpdateRequest struct {
Name string `json:"name,omitempty"`
Tags []string `json:"tags,omitempty"`
}
// KubernetesNodePoolCreateRequest represents a request to create a node pool for a
// Kubernetes cluster.
type KubernetesNodePoolCreateRequest struct {
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
}
// KubernetesNodePoolUpdateRequest represents a request to update a node pool in a
// Kubernetes cluster.
type KubernetesNodePoolUpdateRequest struct {
Name string `json:"name,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
}
// KubernetesNodePoolRecycleNodesRequest represents a request to recycle a set of
// nodes in a node pool. This will recycle the nodes by ID.
type KubernetesNodePoolRecycleNodesRequest struct {
Nodes []string `json:"nodes,omitempty"`
}
// KubernetesCluster represents a Kubernetes cluster.
type KubernetesCluster struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
RegionSlug string `json:"region,omitempty"`
VersionSlug string `json:"version,omitempty"`
ClusterSubnet string `json:"cluster_subnet,omitempty"`
ServiceSubnet string `json:"service_subnet,omitempty"`
IPv4 string `json:"ipv4,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Tags []string `json:"tags,omitempty"`
NodePools []*KubernetesNodePool `json:"node_pools,omitempty"`
Status *KubernetesClusterStatus `json:"status,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// KubernetesClusterStatus describes the status of a cluster.
type KubernetesClusterStatus struct {
State string `json:"state,omitempty"`
Message string `json:"message,omitempty"`
}
// KubernetesNodePool represents a node pool in a Kubernetes cluster.
type KubernetesNodePool struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
Nodes []*KubernetesNode `json:"nodes,omitempty"`
}
// KubernetesNode represents a Node in a node pool in a Kubernetes cluster.
type KubernetesNode struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Status *KubernetesNodeStatus `json:"status,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster.
type KubernetesNodeStatus struct {
State string `json:"state,omitempty"`
Message string `json:"message,omitempty"`
}
// KubernetesOptions represents options available for creating Kubernetes clusters.
type KubernetesOptions struct {
Versions []*KubernetesVersion `json:"versions,omitempty"`
}
// KubernetesVersion is a DigitalOcean Kubernetes release.
type KubernetesVersion struct {
Slug string `json:"slug,omitempty"`
KubernetesVersion string `json:"kubernetes_version,omitempty"`
}
type kubernetesClustersRoot struct {
Clusters []*KubernetesCluster `json:"kubernetes_clusters,omitempty"`
Links *Links `json:"links,omitempty"`
}
type kubernetesClusterRoot struct {
Cluster *KubernetesCluster `json:"kubernetes_cluster,omitempty"`
Links *Links `json:"links,omitempty"`
}
type kubernetesNodePoolRoot struct {
NodePool *KubernetesNodePool `json:"node_pool,omitempty"`
}
type kubernetesNodePoolsRoot struct {
NodePool []*KubernetesNodePool `json:"node_pools,omitempty"`
Links *Links `json:"links,omitempty"`
}
// Get retrieves the details of a Kubernetes cluster.
func (svc *KubernetesServiceOp) Get(ctx context.Context, clusterID string) (*KubernetesCluster, *Response, error) {
path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesClusterRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Cluster, resp, nil
}
// Create creates a Kubernetes cluster.
func (svc *KubernetesServiceOp) Create(ctx context.Context, create *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error) {
path := kubernetesClustersPath
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create)
if err != nil {
return nil, nil, err
}
root := new(kubernetesClusterRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Cluster, resp, nil
}
// Delete deletes a Kubernetes cluster. There is no way to recover a cluster
// once it has been destroyed.
func (svc *KubernetesServiceOp) Delete(ctx context.Context, clusterID string) (*Response, error) {
path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// List returns a list of the Kubernetes clusters visible with the caller's API token.
func (svc *KubernetesServiceOp) List(ctx context.Context, opts *ListOptions) ([]*KubernetesCluster, *Response, error) {
path := kubernetesClustersPath
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesClustersRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Clusters, resp, nil
}
// KubernetesClusterConfig is the content of a Kubernetes config file, which can be
// used to interact with your Kubernetes cluster using `kubectl`.
// See: https://kubernetes.io/docs/tasks/tools/install-kubectl/
type KubernetesClusterConfig struct {
KubeconfigYAML []byte
}
// GetKubeConfig returns a Kubernetes config file for the specified cluster.
func (svc *KubernetesServiceOp) GetKubeConfig(ctx context.Context, clusterID string) (*KubernetesClusterConfig, *Response, error) {
path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
configBytes := bytes.NewBuffer(nil)
resp, err := svc.client.Do(ctx, req, configBytes)
if err != nil {
return nil, resp, err
}
res := &KubernetesClusterConfig{
KubeconfigYAML: configBytes.Bytes(),
}
return res, resp, nil
}
// Update updates a Kubernetes cluster's properties.
func (svc *KubernetesServiceOp) Update(ctx context.Context, clusterID string, update *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) {
path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update)
if err != nil {
return nil, nil, err
}
root := new(kubernetesClusterRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Cluster, resp, nil
}
// CreateNodePool creates a new node pool in an existing Kubernetes cluster.
func (svc *KubernetesServiceOp) CreateNodePool(ctx context.Context, clusterID string, create *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create)
if err != nil {
return nil, nil, err
}
root := new(kubernetesNodePoolRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.NodePool, resp, nil
}
// GetNodePool retrieves an existing node pool in a Kubernetes cluster.
func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesNodePoolRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.NodePool, resp, nil
}
// ListNodePools lists all the node pools found in a Kubernetes cluster.
func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesNodePoolsRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.NodePool, resp, nil
}
// UpdateNodePool updates the details of an existing node pool.
func (svc *KubernetesServiceOp) UpdateNodePool(ctx context.Context, clusterID, poolID string, update *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update)
if err != nil {
return nil, nil, err
}
root := new(kubernetesNodePoolRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.NodePool, resp, nil
}
// RecycleNodePoolNodes schedules nodes in a node pool for recycling.
func (svc *KubernetesServiceOp) RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, recycle *KubernetesNodePoolRecycleNodesRequest) (*Response, error) {
path := fmt.Sprintf("%s/%s/node_pools/%s/recycle", kubernetesClustersPath, clusterID, poolID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, recycle)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// DeleteNodePool deletes a node pool, and subsequently all the nodes in that pool.
func (svc *KubernetesServiceOp) DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error) {
path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
type kubernetesOptionsRoot struct {
Options *KubernetesOptions `json:"options,omitempty"`
Links *Links `json:"links,omitempty"`
}
// GetOptions returns options about the Kubernetes service, such as the versions available for
// cluster creation.
func (svc *KubernetesServiceOp) GetOptions(ctx context.Context) (*KubernetesOptions, *Response, error) {
path := kubernetesOptionsPath
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesOptionsRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Options, resp, nil
}

771
kubernetes_test.go Normal file
View File

@ -0,0 +1,771 @@
package godo
import (
"encoding/json"
"fmt"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestKubernetesClusters_ListClusters(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := []*KubernetesCluster{
&KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "blablabla",
RegionSlug: "nyc1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
IPv4: "",
Tags: []string(nil),
Status: &KubernetesClusterStatus{
State: "running",
},
NodePools: []*KubernetesNodePool{
{
ID: "1a17a012-cb31-4886-a787-deadbeef1191",
Name: "blablabla-1",
Size: "s-1vcpu-2gb",
Count: 2,
Nodes: []*KubernetesNode{
{
ID: "",
Name: "",
Status: &KubernetesNodeStatus{},
CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
},
{
ID: "",
Name: "",
Status: &KubernetesNodeStatus{},
CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
},
},
},
},
CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
},
&KubernetesCluster{
ID: "deadbeef-dead-4aa5-beef-deadbeef347d",
Name: "antoine",
RegionSlug: "nyc1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
IPv4: "1.2.3.4",
Status: &KubernetesClusterStatus{
State: "running",
},
NodePools: []*KubernetesNodePool{
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b3",
Name: "antoine-1",
Size: "s-1vcpu-2gb",
Count: 5,
Nodes: []*KubernetesNode{
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b1",
Name: "worker-393",
Status: &KubernetesNodeStatus{State: "running"},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
},
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b2",
Name: "worker-394",
Status: &KubernetesNodeStatus{State: "running"},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
},
},
},
},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
},
}
jBlob := `
{
"kubernetes_clusters": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "blablabla",
"region": "nyc1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"ipv4": "",
"tags": null,
"status": {
"state": "running"
},
"node_pools": [
{
"id": "1a17a012-cb31-4886-a787-deadbeef1191",
"name": "blablabla-1",
"version": "1.10.0-gen0",
"size": "s-1vcpu-2gb",
"count": 2,
"tags": null,
"nodes": [
{
"id": "",
"name": "",
"status": {
"state": ""
},
"created_at": "2018-06-21T08:44:38Z",
"updated_at": "2018-06-21T08:44:38Z"
},
{
"id": "",
"name": "",
"status": {
"state": ""
},
"created_at": "2018-06-21T08:44:38Z",
"updated_at": "2018-06-21T08:44:38Z"
}
]
}
],
"created_at": "2018-06-21T08:44:38Z",
"updated_at": "2018-06-21T08:44:38Z"
},
{
"id": "deadbeef-dead-4aa5-beef-deadbeef347d",
"name": "antoine",
"region": "nyc1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"ipv4": "1.2.3.4",
"tags": null,
"status": {
"state": "running"
},
"node_pools": [
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b3",
"name": "antoine-1",
"version": "1.10.0-gen0",
"size": "s-1vcpu-2gb",
"count": 5,
"tags": null,
"nodes": [
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b1",
"name": "worker-393",
"status": {
"state": "running"
},
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
},
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b2",
"name": "worker-394",
"status": {
"state": "running"
},
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
}
]
}
],
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
}
]
}`
mux.HandleFunc("/v2/kubernetes/clusters", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.List(ctx, nil)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Get(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "deadbeef-dead-4aa5-beef-deadbeef347d",
Name: "antoine",
RegionSlug: "nyc1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
IPv4: "1.2.3.4",
Status: &KubernetesClusterStatus{
State: "running",
},
NodePools: []*KubernetesNodePool{
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b3",
Name: "antoine-1",
Size: "s-1vcpu-2gb",
Count: 5,
Nodes: []*KubernetesNode{
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b1",
Name: "worker-393",
Status: &KubernetesNodeStatus{State: "running"},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
},
{
ID: "deadbeef-dead-beef-dead-deadbeefb4b2",
Name: "worker-394",
Status: &KubernetesNodeStatus{State: "running"},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
},
},
},
},
CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC),
}
jBlob := `
{
"kubernetes_cluster": {
"id": "deadbeef-dead-4aa5-beef-deadbeef347d",
"name": "antoine",
"region": "nyc1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"ipv4": "1.2.3.4",
"tags": null,
"status": {
"state": "running"
},
"node_pools": [
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b3",
"name": "antoine-1",
"version": "1.10.0-gen0",
"size": "s-1vcpu-2gb",
"count": 5,
"tags": null,
"nodes": [
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b1",
"name": "worker-393",
"status": {
"state": "running"
},
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
},
{
"id": "deadbeef-dead-beef-dead-deadbeefb4b2",
"name": "worker-394",
"status": {
"state": "running"
},
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
}
]
}
],
"created_at": "2018-06-15T07:10:23Z",
"updated_at": "2018-06-15T07:11:26Z"
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Get(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetKubeConfig(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := "some YAML"
blob := []byte(want)
mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/kubeconfig", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, want)
})
got, _, err := kubeSvc.GetKubeConfig(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
require.NoError(t, err)
require.Equal(t, blob, got.KubeconfigYAML)
}
func TestKubernetesClusters_Create(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "antoine-test-cluster",
RegionSlug: "s2r1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
Tags: []string{"cluster-tag-1", "cluster-tag-2"},
NodePools: []*KubernetesNodePool{
&KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
},
},
}
createRequest := &KubernetesClusterCreateRequest{
Name: want.Name,
RegionSlug: want.RegionSlug,
VersionSlug: want.VersionSlug,
Tags: want.Tags,
NodePools: []*KubernetesNodePoolCreateRequest{
&KubernetesNodePoolCreateRequest{
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
},
},
}
jBlob := `
{
"kubernetes_cluster": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "antoine-test-cluster",
"region": "s2r1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"tags": [
"cluster-tag-1",
"cluster-tag-2"
],
"node_pools": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
]
}
]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesClusterCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Create(ctx, createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Update(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "antoine-test-cluster",
RegionSlug: "s2r1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
Tags: []string{"cluster-tag-1", "cluster-tag-2"},
NodePools: []*KubernetesNodePool{
&KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
},
},
}
updateRequest := &KubernetesClusterUpdateRequest{
Name: want.Name,
Tags: want.Tags,
}
jBlob := `
{
"kubernetes_cluster": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "antoine-test-cluster",
"region": "s2r1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"tags": [
"cluster-tag-1",
"cluster-tag-2"
],
"node_pools": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
]
}
]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesClusterUpdateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Update(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Destroy(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodDelete)
})
_, err := kubeSvc.Delete(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
require.NoError(t, err)
}
func TestKubernetesClusters_CreateNodePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
}
createRequest := &KubernetesNodePoolCreateRequest{
Size: want.Size,
Count: want.Count,
Name: want.Name,
Tags: want.Tags,
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesNodePoolCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.CreateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetNodePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-0739-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.GetNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-0739-4a1a-acc5-deadbeefbb8a")
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_ListNodePools(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := []*KubernetesNodePool{
{
ID: "1a17a012-cb31-4886-a787-deadbeef1191",
Name: "blablabla-1",
Size: "s-1vcpu-2gb",
Count: 2,
Nodes: []*KubernetesNode{
{
ID: "",
Name: "",
Status: &KubernetesNodeStatus{},
CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
},
{
ID: "",
Name: "",
Status: &KubernetesNodeStatus{},
CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
},
},
},
}
jBlob := `
{
"node_pools": [
{
"id": "1a17a012-cb31-4886-a787-deadbeef1191",
"name": "blablabla-1",
"version": "1.10.0-gen0",
"size": "s-1vcpu-2gb",
"count": 2,
"tags": null,
"nodes": [
{
"id": "",
"name": "",
"status": {
"state": ""
},
"created_at": "2018-06-21T08:44:38Z",
"updated_at": "2018-06-21T08:44:38Z"
},
{
"id": "",
"name": "",
"status": {
"state": ""
},
"created_at": "2018-06-21T08:44:38Z",
"updated_at": "2018-06-21T08:44:38Z"
}
]
}
]
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.ListNodePools(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", nil)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_UpdateNodePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "a better name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
}
updateRequest := &KubernetesNodePoolUpdateRequest{
Name: "a better name",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 4,
"name": "a better name",
"tags": [
"tag-1", "tag-2"
]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesNodePoolUpdateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.UpdateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_DeleteNodePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodDelete)
})
_, err := kubeSvc.DeleteNodePool(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a")
require.NoError(t, err)
}
func TestKubernetesClusters_RecycleNodePoolNodes(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
recycleRequest := &KubernetesNodePoolRecycleNodesRequest{
Nodes: []string{"node1", "node2"},
}
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a/recycle", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesNodePoolRecycleNodesRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, recycleRequest)
})
_, err := kubeSvc.RecycleNodePoolNodes(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", recycleRequest)
require.NoError(t, err)
}
func TestKubernetesVersions_List(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesOptions{
Versions: []*KubernetesVersion{
{Slug: "1.10.0-gen0", KubernetesVersion: "1.10.0"},
},
}
jBlob := `
{
"options": {
"versions": [
{
"slug": "1.10.0-gen0",
"kubernetes_version": "1.10.0"
}
]
}
}`
mux.HandleFunc("/v2/kubernetes/options", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.GetOptions(ctx)
require.NoError(t, err)
require.Equal(t, want, got)
}