doks: add node pool auto-scale fields (#307)

* doks: add auto_scale, min_nodes, max_nodes, and actual_node_count to node pool

* doks: add more auto-scaling node pool test coverage

* doks: documentation for auto-scaling node pools

* Handle case where a node pool is empty.

* Bump testClusterVersion to 1.15.5-do.0
This commit is contained in:
Steven Normore 2019-10-30 18:39:39 -04:00 committed by Andrew Starr-Bochicchio
parent 8ace5c43d6
commit 320ee053c3
9 changed files with 802 additions and 70 deletions

View File

@ -71,11 +71,31 @@ func dataSourceDigitalOceanKubernetesCluster() *schema.Resource {
Computed: true,
},
"actual_node_count": {
Type: schema.TypeInt,
Computed: true,
},
"node_count": {
Type: schema.TypeInt,
Computed: true,
},
"auto_scale": {
Type: schema.TypeBool,
Computed: true,
},
"min_nodes": {
Type: schema.TypeInt,
Computed: true,
},
"max_nodes": {
Type: schema.TypeInt,
Computed: true,
},
"tags": tagsSchema(),
"nodes": nodeSchema(),

View File

@ -153,10 +153,13 @@ func resourceDigitalOceanKubernetesClusterCreate(d *schema.ResourceData, meta in
for i, pool := range pools {
tags := append(pool.Tags, digitaloceanKubernetesDefaultNodePoolTag)
poolCreateRequests[i] = &godo.KubernetesNodePoolCreateRequest{
Name: pool.Name,
Size: pool.Size,
Tags: tags,
Count: pool.Count,
Name: pool.Name,
Size: pool.Size,
Tags: tags,
Count: pool.Count,
AutoScale: pool.AutoScale,
MinNodes: pool.MinNodes,
MaxNodes: pool.MaxNodes,
}
}
@ -216,10 +219,11 @@ func digitaloceanKubernetesClusterRead(client *godo.Client, cluster *godo.Kubern
// find the default node pool from all the pools in the cluster
// the default node pool has a custom tag k8s:default-node-pool
for _, p := range cluster.NodePools {
for i, p := range cluster.NodePools {
for _, t := range p.Tags {
if t == digitaloceanKubernetesDefaultNodePoolTag {
if err := d.Set("node_pool", flattenNodePool(p, cluster.Tags...)); err != nil {
keyPrefix := fmt.Sprintf("node_pool.%d.", i)
if err := d.Set("node_pool", flattenNodePool(d, keyPrefix, p, cluster.Tags...)); err != nil {
log.Printf("[DEBUG] Error setting node pool attributes: %s %#v", err, cluster.NodePools)
}
}
@ -283,6 +287,11 @@ func resourceDigitalOceanKubernetesClusterUpdate(d *schema.ResourceData, meta in
oldPool := old.([]interface{})[0].(map[string]interface{})
newPool := new.([]interface{})[0].(map[string]interface{})
// If the node_count is unset, then remove it from the update map.
if _, ok := d.GetOk("node_pool.0.node_count"); !ok {
delete(newPool, "node_count")
}
// update the existing default pool
_, err := digitaloceanKubernetesNodePoolUpdate(client, newPool, d.Id(), oldPool["id"].(string), digitaloceanKubernetesDefaultNodePoolTag)
if err != nil {
@ -453,3 +462,31 @@ func filterTags(tags []string) []string {
return filteredTags
}
func flattenNodePool(d *schema.ResourceData, keyPrefix string, pool *godo.KubernetesNodePool, parentTags ...string) []interface{} {
rawPool := map[string]interface{}{
"id": pool.ID,
"name": pool.Name,
"size": pool.Size,
"actual_node_count": pool.Count,
"auto_scale": pool.AutoScale,
"min_nodes": pool.MinNodes,
"max_nodes": pool.MaxNodes,
}
if pool.Tags != nil {
rawPool["tags"] = flattenTags(filterTags(pool.Tags))
}
if pool.Nodes != nil {
rawPool["nodes"] = flattenNodes(pool.Nodes)
}
// Assign a node_count only if it's been set explicitly, since it's
// optional and we don't want to update with a 0 if it's not set.
if _, ok := d.GetOk(keyPrefix + "node_count"); ok {
rawPool["node_count"] = pool.Count
}
return []interface{}{rawPool}
}

View File

@ -13,6 +13,8 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
const testClusterVersion = "1.15.5-do.0"
func TestAccDigitalOceanKubernetesCluster_Basic(t *testing.T) {
t.Parallel()
rName := randomTestName()
@ -29,7 +31,7 @@ func TestAccDigitalOceanKubernetesCluster_Basic(t *testing.T) {
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "region", "lon1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "version", "1.15.4-do.0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "version", testClusterVersion),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "ipv4_address"),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "cluster_subnet"),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "service_subnet"),
@ -43,6 +45,7 @@ func TestAccDigitalOceanKubernetesCluster_Basic(t *testing.T) {
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.tags.#", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.tags.2053932785", "one"), // Currently tags are being copied from parent this will fail
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.tags.298486374", "two"), // requires API update
@ -109,6 +112,8 @@ func TestAccDigitalOceanKubernetesCluster_UpdatePoolDetails(t *testing.T) {
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.name", "default"),
),
},
@ -117,9 +122,10 @@ func TestAccDigitalOceanKubernetesCluster_UpdatePoolDetails(t *testing.T) {
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.name", "default-rename"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.tags.#", "3"),
),
},
@ -143,6 +149,8 @@ func TestAccDigitalOceanKubernetesCluster_UpdatePoolSize(t *testing.T) {
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
),
},
@ -153,6 +161,7 @@ func TestAccDigitalOceanKubernetesCluster_UpdatePoolSize(t *testing.T) {
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-2vcpu-4gb"),
),
},
@ -160,6 +169,236 @@ func TestAccDigitalOceanKubernetesCluster_UpdatePoolSize(t *testing.T) {
})
}
func TestAccDigitalOceanKubernetesCluster_CreatePoolWithAutoScale(t *testing.T) {
t.Parallel()
rName := randomTestName()
var k8s godo.KubernetesCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanKubernetesClusterDestroy,
Steps: []resource.TestStep{
// Create with auto-scaling and explicit node_count.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
auto_scale = true
min_nodes = 1
max_nodes = 3
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "3"),
),
},
// Remove node_count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
auto_scale = true
min_nodes = 1
max_nodes = 3
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "3"),
),
},
// Update node_count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 2
auto_scale = true
min_nodes = 1
max_nodes = 3
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "3"),
),
},
// Disable auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 2
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "false"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "0"),
),
},
},
})
}
func TestAccDigitalOceanKubernetesCluster_UpdatePoolWithAutoScale(t *testing.T) {
t.Parallel()
rName := randomTestName()
var k8s godo.KubernetesCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanKubernetesClusterDestroy,
Steps: []resource.TestStep{
// Create with auto-scaling disabled.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "false"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "0"),
),
},
// Enable auto-scaling with explicit node_count.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
auto_scale = true
min_nodes = 1
max_nodes = 3
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "3"),
),
},
// Remove node_count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
auto_scale = true
min_nodes = 1
max_nodes = 3
}
}
`, rName, testClusterVersion),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.size", "s-1vcpu-2gb"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "node_pool.0.max_nodes", "3"),
),
},
},
})
}
func TestAccDigitalOceanKubernetesCluster_KubernetesProviderInteroperability(t *testing.T) {
t.Parallel()
rName := randomTestName()
@ -188,7 +427,7 @@ func testAccDigitalOceanKubernetesConfigBasic(rName string) string {
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["foo","bar", "one"]
node_pool {
@ -198,7 +437,7 @@ resource "digitalocean_kubernetes_cluster" "foobar" {
tags = ["one","two"]
}
}
`, rName)
`, rName, testClusterVersion)
}
func testAccDigitalOceanKubernetesConfigBasic2(rName string) string {
@ -206,7 +445,7 @@ func testAccDigitalOceanKubernetesConfigBasic2(rName string) string {
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["foo","bar"]
node_pool {
@ -216,7 +455,7 @@ resource "digitalocean_kubernetes_cluster" "foobar" {
tags = ["one","two","three"]
}
}
`, rName)
`, rName, testClusterVersion)
}
func testAccDigitalOceanKubernetesConfigBasic3(rName string) string {
@ -224,7 +463,7 @@ func testAccDigitalOceanKubernetesConfigBasic3(rName string) string {
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["foo","bar"]
node_pool {
@ -234,7 +473,7 @@ resource "digitalocean_kubernetes_cluster" "foobar" {
tags = ["one","two"]
}
}
`, rName)
`, rName, testClusterVersion)
}
func testAccDigitalOceanKubernetesConfigBasic4(rName string) string {
@ -242,7 +481,7 @@ func testAccDigitalOceanKubernetesConfigBasic4(rName string) string {
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["one","two"]
node_pool {
@ -252,7 +491,7 @@ resource "digitalocean_kubernetes_cluster" "foobar" {
tags = ["foo","bar"]
}
}
`, rName)
`, rName, testClusterVersion)
}
func testAccDigitalOceanKubernetesConfig_KubernetesProviderInteroperability(rName string) string {
@ -260,7 +499,7 @@ func testAccDigitalOceanKubernetesConfig_KubernetesProviderInteroperability(rNam
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
node_pool {
name = "default"
@ -292,7 +531,7 @@ resource "kubernetes_service_account" "tiller" {
automount_service_account_token = true
}
`, rName)
`, rName, testClusterVersion)
}
func testAccCheckDigitalOceanKubernetesClusterDestroy(s *terraform.State) error {

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/digitalocean/godo"
@ -65,10 +66,56 @@ func nodePoolSchema() map[string]*schema.Schema {
ValidateFunc: validation.NoZeroValues,
},
"actual_node_count": {
Type: schema.TypeInt,
Computed: true,
},
"node_count": {
Type: schema.TypeInt,
Required: true,
Optional: true,
ValidateFunc: validation.IntAtLeast(1),
DiffSuppressFunc: func(key, old, new string, d *schema.ResourceData) bool {
nodeCountKey := "node_count"
actualNodeCountKey := "actual_node_count"
// Since this schema is shared between the node pool resource
// and as the node pool sub-element of the cluster resource,
// we need to check for both variants of the incoming key.
keyParts := strings.Split(key, ".")
if keyParts[0] == "node_pool" {
npKeyParts := keyParts[:len(keyParts)-1]
nodeCountKeyParts := append(npKeyParts, "node_count")
nodeCountKey = strings.Join(nodeCountKeyParts, ".")
actualNodeCountKeyParts := append(npKeyParts, "actual_node_count")
actualNodeCountKey = strings.Join(actualNodeCountKeyParts, ".")
}
// If node_count equals actual_node_count already, then
// suppress the diff.
if d.Get(nodeCountKey).(int) == d.Get(actualNodeCountKey).(int) {
return true
}
// Otherwise suppress the diff only if old equals new.
return old == new
},
},
"auto_scale": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"min_nodes": {
Type: schema.TypeInt,
Optional: true,
},
"max_nodes": {
Type: schema.TypeInt,
Optional: true,
},
"tags": tagsSchema(),
@ -118,8 +165,11 @@ func resourceDigitalOceanKubernetesNodePoolCreate(d *schema.ResourceData, meta i
rawPool := map[string]interface{}{
"name": d.Get("name"),
"size": d.Get("size"),
"node_count": d.Get("node_count"),
"tags": d.Get("tags"),
"node_count": d.Get("node_count"),
"auto_scale": d.Get("auto_scale"),
"min_nodes": d.Get("min_nodes"),
"max_nodes": d.Get("max_nodes"),
}
pool, err := digitaloceanKubernetesNodePoolCreate(client, rawPool, d.Get("cluster_id").(string))
@ -148,10 +198,17 @@ func resourceDigitalOceanKubernetesNodePoolRead(d *schema.ResourceData, meta int
d.Set("name", pool.Name)
d.Set("size", pool.Size)
d.Set("node_count", pool.Count)
d.Set("actual_node_count", pool.Count)
d.Set("tags", flattenTags(filterTags(pool.Tags)))
d.Set("auto_scale", pool.AutoScale)
d.Set("min_nodes", pool.MinNodes)
d.Set("max_nodes", pool.MaxNodes)
d.Set("nodes", flattenNodes(pool.Nodes))
if pool.Nodes != nil {
d.Set("nodes", flattenNodes(pool.Nodes))
// Assign a node_count only if it's been set explicitly, since it's
// optional and we don't want to update with a 0 if it's not set.
if _, ok := d.GetOk("node_count"); ok {
d.Set("node_count", pool.Count)
}
return nil
@ -161,11 +218,18 @@ func resourceDigitalOceanKubernetesNodePoolUpdate(d *schema.ResourceData, meta i
client := meta.(*CombinedConfig).godoClient()
rawPool := map[string]interface{}{
"name": d.Get("name"),
"node_count": d.Get("node_count"),
"tags": d.Get("tags"),
"name": d.Get("name"),
"tags": d.Get("tags"),
}
if _, ok := d.GetOk("node_count"); ok {
rawPool["node_count"] = d.Get("node_count")
}
rawPool["auto_scale"] = d.Get("auto_scale")
rawPool["min_nodes"] = d.Get("min_nodes")
rawPool["max_nodes"] = d.Get("max_nodes")
_, err := digitaloceanKubernetesNodePoolUpdate(client, rawPool, d.Get("cluster_id").(string), d.Id())
if err != nil {
return fmt.Errorf("Error updating node pool: %s", err)
@ -185,12 +249,17 @@ func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]i
tags := expandTags(pool["tags"].(*schema.Set).List())
tags = append(tags, customTags...)
p, _, err := client.Kubernetes.CreateNodePool(context.Background(), clusterID, &godo.KubernetesNodePoolCreateRequest{
Name: pool["name"].(string),
Size: pool["size"].(string),
Count: pool["node_count"].(int),
Tags: tags,
})
req := &godo.KubernetesNodePoolCreateRequest{
Name: pool["name"].(string),
Size: pool["size"].(string),
Count: pool["node_count"].(int),
Tags: tags,
AutoScale: pool["auto_scale"].(bool),
MinNodes: pool["min_nodes"].(int),
MaxNodes: pool["max_nodes"].(int),
}
p, _, err := client.Kubernetes.CreateNodePool(context.Background(), clusterID, req)
if err != nil {
return nil, fmt.Errorf("Unable to create new default node pool %s", err)
@ -208,12 +277,29 @@ func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, pool map[string]i
tags := expandTags(pool["tags"].(*schema.Set).List())
tags = append(tags, customTags...)
count := pool["node_count"].(int)
p, resp, err := client.Kubernetes.UpdateNodePool(context.Background(), clusterID, poolID, &godo.KubernetesNodePoolUpdateRequest{
Name: pool["name"].(string),
Count: &count,
Tags: tags,
})
req := &godo.KubernetesNodePoolUpdateRequest{
Name: pool["name"].(string),
Tags: tags,
}
if pool["node_count"] != nil {
req.Count = intPtr(pool["node_count"].(int))
}
if pool["auto_scale"] == nil {
pool["auto_scale"] = false
}
req.AutoScale = boolPtr(pool["auto_scale"].(bool))
if pool["min_nodes"] != nil {
req.MinNodes = intPtr(pool["min_nodes"].(int))
}
if pool["max_nodes"] != nil {
req.MaxNodes = intPtr(pool["max_nodes"].(int))
}
p, resp, err := client.Kubernetes.UpdateNodePool(context.Background(), clusterID, poolID, req)
if err != nil {
if resp != nil && resp.StatusCode == 404 {
@ -316,12 +402,15 @@ func expandNodePools(nodePools []interface{}) []*godo.KubernetesNodePool {
for _, rawPool := range nodePools {
pool := rawPool.(map[string]interface{})
cr := &godo.KubernetesNodePool{
ID: pool["id"].(string),
Name: pool["name"].(string),
Size: pool["size"].(string),
Count: pool["node_count"].(int),
Tags: expandTags(pool["tags"].(*schema.Set).List()),
Nodes: expandNodes(pool["nodes"].([]interface{})),
ID: pool["id"].(string),
Name: pool["name"].(string),
Size: pool["size"].(string),
Count: pool["node_count"].(int),
AutoScale: pool["auto_scale"].(bool),
MinNodes: pool["min_nodes"].(int),
MaxNodes: pool["max_nodes"].(int),
Tags: expandTags(pool["tags"].(*schema.Set).List()),
Nodes: expandNodes(pool["nodes"].([]interface{})),
}
expandedNodePools = append(expandedNodePools, cr)
@ -345,31 +434,12 @@ func expandNodes(nodes []interface{}) []*godo.KubernetesNode {
return expandedNodes
}
func flattenNodePool(pool *godo.KubernetesNodePool, parentTags ...string) []interface{} {
rawPool := map[string]interface{}{
"id": pool.ID,
"name": pool.Name,
"size": pool.Size,
"node_count": pool.Count,
}
if pool.Tags != nil {
rawPool["tags"] = flattenTags(filterTags(pool.Tags))
}
if pool.Nodes != nil {
rawPool["nodes"] = flattenNodes(pool.Nodes)
}
return []interface{}{rawPool}
}
func flattenNodes(nodes []*godo.KubernetesNode) []interface{} {
flattenedNodes := make([]interface{}, 0)
if nodes == nil {
return nil
return flattenedNodes
}
flattenedNodes := make([]interface{}, 0)
for _, node := range nodes {
rawNode := map[string]interface{}{
"id": node.ID,

View File

@ -54,6 +54,7 @@ func TestAccDigitalOceanKubernetesNodePool_Update(t *testing.T) {
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "tags.#", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
),
},
@ -66,6 +67,7 @@ func TestAccDigitalOceanKubernetesNodePool_Update(t *testing.T) {
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName+"-updated"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "tags.#", "3"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "2"),
),
},
@ -73,12 +75,329 @@ func TestAccDigitalOceanKubernetesNodePool_Update(t *testing.T) {
})
}
func TestAccDigitalOceanKubernetesNodePool_CreateWithAutoScale(t *testing.T) {
t.Parallel()
rName := randomTestName()
var k8s godo.KubernetesCluster
var k8sPool godo.KubernetesNodePool
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanKubernetesClusterDestroy,
Steps: []resource.TestStep{
// Create without auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
node_count = 1
auto_scale = true
min_nodes = 1
max_nodes = 5
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "5"),
),
},
// Remove node count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
auto_scale = true
min_nodes = 1
max_nodes = 3
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "3"),
),
},
// Update node count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
node_count = 2
auto_scale = true
min_nodes = 1
max_nodes = 3
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "3"),
),
},
// Disable auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
node_count = 2
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "2"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "false"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "0"),
),
},
},
})
}
func TestAccDigitalOceanKubernetesNodePool_UpdateWithAutoScale(t *testing.T) {
t.Parallel()
rName := randomTestName()
var k8s godo.KubernetesCluster
var k8sPool godo.KubernetesNodePool
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanKubernetesClusterDestroy,
Steps: []resource.TestStep{
// Create without auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
node_count = 1
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "false"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "0"),
),
},
// Update to enable auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
node_count = 1
auto_scale = true
min_nodes = 1
max_nodes = 3
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "3"),
),
},
// Remove node count, keep auto-scaling.
{
Config: fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s"
size = "s-1vcpu-2gb"
auto_scale = true
min_nodes = 1
max_nodes = 3
}
`, rName, testClusterVersion, rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "1"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "3"),
),
},
},
})
}
func TestAccDigitalOceanKubernetesNodePool_WithEmptyNodePool(t *testing.T) {
t.Parallel()
rName := randomTestName()
var k8s godo.KubernetesCluster
var k8sPool godo.KubernetesNodePool
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccDigitalOceanKubernetesConfigWithEmptyNodePool(rName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckDigitalOceanKubernetesClusterExists("digitalocean_kubernetes_cluster.foobar", &k8s),
testAccCheckDigitalOceanKubernetesNodePoolExists("digitalocean_kubernetes_node_pool.barfoo", &k8s, &k8sPool),
resource.TestCheckResourceAttr("digitalocean_kubernetes_cluster.foobar", "name", rName),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "name", fmt.Sprintf("%s-pool", rName)),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "actual_node_count", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "nodes.#", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "auto_scale", "true"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "min_nodes", "0"),
resource.TestCheckResourceAttr("digitalocean_kubernetes_node_pool.barfoo", "max_nodes", "3"),
),
},
},
})
}
func testAccDigitalOceanKubernetesConfigBasicWithNodePool(rName string) string {
return fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["foo","bar"]
node_pool {
@ -97,7 +416,7 @@ resource digitalocean_kubernetes_node_pool "barfoo" {
node_count = 1
tags = ["three","four"]
}
`, rName, rName)
`, rName, testClusterVersion, rName)
}
func testAccDigitalOceanKubernetesConfigBasicWithNodePool2(rName string) string {
@ -105,7 +424,7 @@ func testAccDigitalOceanKubernetesConfigBasicWithNodePool2(rName string) string
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "1.15.4-do.0"
version = "%s"
tags = ["foo","bar"]
node_pool {
@ -124,7 +443,33 @@ resource digitalocean_kubernetes_node_pool "barfoo" {
node_count = 2
tags = ["one","two", "three"]
}
`, rName, rName)
`, rName, testClusterVersion, rName)
}
func testAccDigitalOceanKubernetesConfigWithEmptyNodePool(rName string) string {
return fmt.Sprintf(`
resource "digitalocean_kubernetes_cluster" "foobar" {
name = "%s"
region = "lon1"
version = "%s"
node_pool {
name = "default"
size = "s-1vcpu-2gb"
node_count = 1
}
}
resource digitalocean_kubernetes_node_pool "barfoo" {
cluster_id = "${digitalocean_kubernetes_cluster.foobar.id}"
name = "%s-pool"
size = "s-1vcpu-2gb"
auto_scale = true
min_nodes = 0
max_nodes = 3
}
`, rName, testClusterVersion, rName)
}
func testAccCheckDigitalOceanKubernetesNodePoolExists(n string, cluster *godo.KubernetesCluster, pool *godo.KubernetesNodePool) resource.TestCheckFunc {

9
digitalocean/util.go Normal file
View File

@ -0,0 +1,9 @@
package digitalocean
func boolPtr(val bool) *bool {
return &val
}
func intPtr(val int) *int {
return &val
}

View File

@ -60,6 +60,10 @@ The following attributes are exported:
- `name` - The name of the node pool.
- `size` - The slug identifier for the type of Droplet used as workers in the node pool.
- `node_count` - The number of Droplet instances in the node pool.
- `actual_node_count` - The actual number of nodes in the node pool, which is especially useful when auto-scaling is enabled.
- `auto_scale` - A boolean indicating whether auto-scaling is enabled on the node pool.
- `min_nodes` - If auto-scaling is enabled, this represents the minimum number of nodes that the node pool can be scaled down to.
- `max_nodes` - If auto-scaling is enabled, this represents the maximum number of nodes that the node pool can be scaled up to.
- `tags` - A list of tag names applied to the node pool.
- `nodes` - A list of nodes in the pool. Each node exports the following attributes:
+ `id` - A unique ID that can be used to identify and reference the node.

View File

@ -63,7 +63,10 @@ The following arguments are supported:
* `node_pool` - (Required) A block representing the cluster's default node pool. Additional node pools may be added to the cluster using the `digitalocean_kubernetes_node_pool` resource. The following arguments may be specified:
- `name` - (Required) A name for the node pool.
- `size` - (Required) The slug identifier for the type of Droplet to be used as workers in the node pool.
- `node_count` - (Required) The number of Droplet instances in the node pool.
- `node_count` - (Optional) The number of Droplet instances in the node pool. If auto-scaling is enabled, this should only be set if the desired result is to explicitly reset the number of nodes to this value. If auto-scaling is enabled, and the node count is outside of the given min/max range, it will use the min nodes value.
- `auto_scale` - (Optional) Enable auto-scaling of the number of nodes in the node pool within the given min/max range.
- `min_nodes` - (Optional) If auto-scaling is enabled, this represents the minimum number of nodes that the node pool can be scaled down to.
- `max_nodes` - (Optional) If auto-scaling is enabled, this represents the maximum number of nodes that the node pool can be scaled up to.
- `tags` - (Optional) A list of tag names to be applied to the Kubernetes cluster.
* `tags` - (Optional) A list of tag names to be applied to the Kubernetes cluster.
@ -89,6 +92,7 @@ In addition to the arguments listed above, the following additional attributes a
- `expires_at` - The date and time when the credentials will expire and need to be regenerated.
* `node_pool` - In addition to the arguments provided, these additional attributes about the cluster's default node pool are exported:
- `id` - A unique ID that can be used to identify and reference the node pool.
- `actual_node_count` - A computed field representing the actual number of nodes in the node pool, which is especially useful when auto-scaling is enabled.
- `nodes` - A list of nodes in the pool. Each node exports the following attributes:
+ `id` - A unique ID that can be used to identify and reference the node.
+ `name` - The auto-generated name for the node.

View File

@ -43,7 +43,10 @@ The following arguments are supported:
* `cluster_id` - (Required) The ID of the Kubernetes cluster to which the node pool is associated.
* `name` - (Required) A name for the node pool.
* `size` - (Required) The slug identifier for the type of Droplet to be used as workers in the node pool.
* `node_count` - (Required) The number of Droplet instances in the node pool.
* `node_count` - (Optional) The number of Droplet instances in the node pool. If auto-scaling is enabled, this should only be set if the desired result is to explicitly reset the number of nodes to this value. If auto-scaling is enabled, and the node count is outside of the given min/max range, it will use the min nodes value.
* `auto_scale` - (Optional) Enable auto-scaling of the number of nodes in the node pool within the given min/max range.
* `min_nodes` - (Optional) If auto-scaling is enabled, this represents the minimum number of nodes that the node pool can be scaled down to.
* `max_nodes` - (Optional) If auto-scaling is enabled, this represents the maximum number of nodes that the node pool can be scaled up to.
* `tags` - (Optional) A list of tag names to be applied to the Kubernetes cluster.
## Attributes Reference
@ -51,6 +54,7 @@ The following arguments are supported:
In addition to the arguments listed above, the following additional attributes are exported:
* `id` - A unique ID that can be used to identify and reference the node pool.
* `actual_node_count` - A computed field representing the actual number of nodes in the node pool, which is especially useful when auto-scaling is enabled.
* `nodes` - A list of nodes in the pool. Each node exports the following attributes:
- `id` - A unique ID that can be used to identify and reference the node.
- `name` - The auto-generated name for the node.