3

I want to use same terraform code for both kubernetes clusters oracle (OKE) and AWS (EKS) and install the argocd helm chart on it, I given my code below for your reference, now the problem I am facing now, if I set k8s_cluster_type to "eks", so everything works fine but when I set the k8s_cluster_type to "oke", it fails with given below error,

I get this error:-

│ Error: error reading EKS Cluster (oh-appb-01): couldn't find resource
│ 
│   with data.aws_eks_cluster.eks,
│   on main.tf line 137, in data "aws_eks_cluster" "eks":
│  137: data "aws_eks_cluster" "eks" {
│ 

What I want to do:-

In case if I give eks_cluster_name (dummy_cluster_name) or eks cluster doesn't exist, still this code should run as same like oke, because if I assign dummy_name oke cluster or oke cluster doens't exist and input variable k8s_cluster_type to "eks" this terraform runs successfully, Could you suggest, how can I use same terraform for both kubernetes cluster to install argocd helm chart

locals {
  argocd_ns = "argocd"

kubeconfig_str = var.k8s_cluster_type == "oke" ? data.oci_containerengine_cluster_kube_config.k8s.content : data.template_file.temp_kubeconfig_eks.rendered kubeconfig = yamldecode( local.kubeconfig_str )

exec_cli = var.k8s_cluster_type == "oke"? "oci" : "aws"

cluster_cert = var.k8s_cluster_type == "oke" ? base64decode(local.kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"]) : base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) cluster_endpoint = var.k8s_cluster_type == "oke" ? local.kubeconfig["clusters"][0]["cluster"]["server"] : data.aws_eks_cluster.eks.endpoint

exec_args_oke = [ local.kubeconfig["users"][0]["user"]["exec"]["args"][0], local.kubeconfig["users"][0]["user"]["exec"]["args"][1], local.kubeconfig["users"][0]["user"]["exec"]["args"][2], local.kubeconfig["users"][0]["user"]["exec"]["args"][3], local.kubeconfig["users"][0]["user"]["exec"]["args"][4], local.kubeconfig["users"][0]["user"]["exec"]["args"][5], local.kubeconfig["users"][0]["user"]["exec"]["args"][6] ]

exec_args_eks = ["eks", "get-token", "--cluster-name", var.eks_cluster_name] exec_args = var.k8s_cluster_type == "oke" ? local.exec_args_oke : local.exec_args_eks

}

data "oci_containerengine_cluster_kube_config" "k8s" { #count = var.k8s_cluster_type == "oke" ? 1 : 0 cluster_id = var.k8s_cluster_id }

resource "local_file" "temp_kubeconfig" { count = var.k8s_cluster_type == "oke" ? 1 : 0 content = local.kubeconfig_str filename = "${path.module}/kubeconfig_temp" }

resource "local_file" "temp_kubeconfig_eks" { count = var.k8s_cluster_type == "oke" ? 0 : 1 content = data.template_file.temp_kubeconfig_eks.rendered filename = "${path.module}/kubeconfig_temp" }

resource "kubernetes_namespace" "argocd" { metadata { name = local.argocd_ns } }

resource "kubernetes_secret" "root_repo" { depends_on = [kubernetes_namespace.argocd] metadata { name = var.argocd_root_repo.name namespace = local.argocd_ns labels = { "argocd.argoproj.io/secret-type" = "repository" } } data = { url = var.argocd_root_repo.url name = var.argocd_root_repo.name password = var.argocd_root_repo_token username = var.argocd_root_repo.name

} }

data "template_file" "argocd-helm-values-override" { template = file("${path.module}/templates/argocd-helm-values-override.tpl") vars = { argocd_ns = local.argocd_ns repo_url = var.argocd_root_repo.url repo_path = var.argocd_root_repo.path repo_revision = var.argocd_root_repo.revision } }

resource "helm_release" "argocd" { depends_on = [kubernetes_namespace.argocd, kubernetes_secret.root_repo ] version = var.argocd_release.chart_version name = var.argocd_release.release_name chart = var.argocd_release.chart_name repository = var.argocd_release.chart_repo namespace = local.argocd_ns values = [ data.template_file.argocd-helm-values-override.rendered ] }

data "template_file" "temp_kubeconfig_eks" { count = var.k8s_cluster_type == "oke" ? 0 : 1 template = file("${path.module}/templates/kubeconfig_eks.tpl") vars = { cluster_url = data.aws_eks_cluster.eks.endpoint cluster_region = var.region cluster_cert_authority_data = data.aws_eks_cluster.eks.certificate_authority.0.data cluster_name = var.eks_cluster_name } }

provider "kubernetes" { cluster_ca_certificate = local.cluster_cert host = local.cluster_endpoint exec { api_version = "client.authentication.k8s.io/v1beta1" command = local.exec_cli args = local.exec_args } } provider "helm" { kubernetes { cluster_ca_certificate = local.cluster_cert host = local.cluster_endpoint exec { api_version = "client.authentication.k8s.io/v1beta1" command = local.exec_cli args = local.exec_args } } }

data "aws_eks_cluster" "eks" { #count = var.k8s_cluster_type == "oke" ? 0 : 1 name = var.eks_cluster_name }

data "aws_eks_cluster_auth" "eks" { #count = var.k8s_cluster_type == "oke" ? 0 : 1 name = var.eks_cluster_name }

*.tfvars file:-

8s_cluster_id ="ocid1.cluster.oc1.xxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
k8s_cluster_type = "oke"

argocd_root_repo = { name = "argocd-xxxx-xxxx-config", url = "https://github.com/xxxxx/xxxx/argocd-xxxx-xxxx-config", path = "clusters/localsand1/apps", revision = "master" }

region = "us-east-1" eks_cluster_name = "oh-appb-01"

Pierre.Vriens
  • 7,205
  • 14
  • 37
  • 84
San
  • 31
  • 1

1 Answers1

0

You have commented out the count check of the aws_eks_cluster.eks data source that would make the data source conditional, depending on the k8s_cluster_type setting. Uncommenting the count argument should make it skip the data source and not complain about it not being found.

data "aws_eks_cluster" "eks" {
  count = var.k8s_cluster_type == "oke" ? 0 : 1
  name  = var.eks_cluster_name
}
Oldskool
  • 136
  • 3