Merge pull request #252 from digitalocean/k8s-pool-autoscale

Add Kubernetes node pool autoscale fields
This commit is contained in:
Steven Normore 2019-09-06 13:58:05 -04:00 committed by GitHub
commit a5d1b0dbb9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 276 additions and 31 deletions

View File

@ -2,6 +2,7 @@
## Unreleased
- #252 Add Kubernetes autoscale config fields - @snormore
- #250 Add Kubernetes GetUser method - @snormore
## [v1.19.0] - 2019-07-19

View File

@ -82,18 +82,24 @@ type KubernetesClusterUpgradeRequest struct {
// KubernetesNodePoolCreateRequest represents a request to create a node pool for a
// Kubernetes cluster.
type KubernetesNodePoolCreateRequest struct {
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale bool `json:"auto_scale,omitempty"`
MinNodes int `json:"min_nodes,omitempty"`
MaxNodes int `json:"max_nodes,omitempty"`
}
// KubernetesNodePoolUpdateRequest represents a request to update a node pool in a
// Kubernetes cluster.
type KubernetesNodePoolUpdateRequest struct {
Name string `json:"name,omitempty"`
Count *int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
Name string `json:"name,omitempty"`
Count *int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale *bool `json:"auto_scale,omitempty"`
MinNodes *int `json:"min_nodes,omitempty"`
MaxNodes *int `json:"max_nodes,omitempty"`
}
// KubernetesNodePoolRecycleNodesRequest is DEPRECATED please use DeleteNode
@ -274,11 +280,14 @@ type KubernetesClusterStatus struct {
// KubernetesNodePool represents a node pool in a Kubernetes cluster.
type KubernetesNodePool struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale bool `json:"auto_scale,omitempty"`
MinNodes int `json:"min_nodes,omitempty"`
MaxNodes int `json:"max_nodes,omitempty"`
Nodes []*KubernetesNode `json:"nodes,omitempty"`
}

View File

@ -437,10 +437,13 @@ func TestKubernetesClusters_Create(t *testing.T) {
VPCUUID: want.VPCUUID,
NodePools: []*KubernetesNodePoolCreateRequest{
&KubernetesNodePoolCreateRequest{
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
AutoScale: want.NodePools[0].AutoScale,
MinNodes: want.NodePools[0].MinNodes,
MaxNodes: want.NodePools[0].MaxNodes,
},
},
MaintenancePolicy: want.MaintenancePolicy,
@ -495,6 +498,110 @@ func TestKubernetesClusters_Create(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "antoine-test-cluster",
RegionSlug: "s2r1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
Tags: []string{"cluster-tag-1", "cluster-tag-2"},
VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6",
NodePools: []*KubernetesNodePool{
&KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
},
},
MaintenancePolicy: &KubernetesMaintenancePolicy{
StartTime: "00:00",
Day: KubernetesMaintenanceDayMonday,
},
}
createRequest := &KubernetesClusterCreateRequest{
Name: want.Name,
RegionSlug: want.RegionSlug,
VersionSlug: want.VersionSlug,
Tags: want.Tags,
VPCUUID: want.VPCUUID,
NodePools: []*KubernetesNodePoolCreateRequest{
&KubernetesNodePoolCreateRequest{
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
AutoScale: want.NodePools[0].AutoScale,
MinNodes: want.NodePools[0].MinNodes,
MaxNodes: want.NodePools[0].MaxNodes,
},
},
MaintenancePolicy: want.MaintenancePolicy,
}
jBlob := `
{
"kubernetes_cluster": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "antoine-test-cluster",
"region": "s2r1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"tags": [
"cluster-tag-1",
"cluster-tag-2"
],
"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
"node_pools": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
],
"maintenance_policy": {
"start_time": "00:00",
"day": "monday"
}
}
}`
mux.HandleFunc("/v2/kubernetes/clusters", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesClusterCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Create(ctx, createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Update(t *testing.T) {
setup()
defer teardown()
@ -717,11 +824,14 @@ func TestKubernetesClusters_CreateNodePool(t *testing.T) {
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
createRequest := &KubernetesNodePoolCreateRequest{
Size: want.Size,
@ -760,6 +870,65 @@ func TestKubernetesClusters_CreateNodePool(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_CreateNodePool_AutoScale(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
}
createRequest := &KubernetesNodePoolCreateRequest{
Size: want.Size,
Count: want.Count,
Name: want.Name,
Tags: want.Tags,
AutoScale: want.AutoScale,
MinNodes: want.MinNodes,
MaxNodes: want.MaxNodes,
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesNodePoolCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.CreateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetNodePool(t *testing.T) {
setup()
defer teardown()
@ -878,11 +1047,14 @@ func TestKubernetesClusters_UpdateNodePool(t *testing.T) {
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "a better name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "a better name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
Name: "a better name",
@ -927,11 +1099,14 @@ func TestKubernetesClusters_UpdateNodePool_ZeroCount(t *testing.T) {
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "name",
Size: "s-1vcpu-1gb",
Count: 0,
Tags: []string{"tag-1", "tag-2"},
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "name",
Size: "s-1vcpu-1gb",
Count: 0,
Tags: []string{"tag-1", "tag-2"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
Count: intPtr(0),
@ -972,6 +1147,66 @@ func TestKubernetesClusters_UpdateNodePool_ZeroCount(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_UpdateNodePool_AutoScale(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
AutoScale: boolPtr(true),
MinNodes: intPtr(0),
MaxNodes: intPtr(10),
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 4,
"name": "name",
"tags": [
"tag-1", "tag-2"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
}`
expectedReqJSON := `{"auto_scale":true,"min_nodes":0,"max_nodes":10}
`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
require.Equal(t, expectedReqJSON, buf.String())
v := new(KubernetesNodePoolUpdateRequest)
err := json.NewDecoder(buf).Decode(v)
require.NoError(t, err)
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.UpdateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_DeleteNodePool(t *testing.T) {
setup()
defer teardown()