Kubernetes 在GKE集群上使用Terraform部署Helm工作负载
我试图使用Terraform Helm provider()将工作负载部署到GKE集群 我或多或少地遵循Google的例子,但我确实希望通过手动创建服务帐户来使用RBAC 我的helm.tf看起来像这样:Kubernetes 在GKE集群上使用Terraform部署Helm工作负载,kubernetes,google-cloud-platform,terraform,google-kubernetes-engine,kubernetes-helm,Kubernetes,Google Cloud Platform,Terraform,Google Kubernetes Engine,Kubernetes Helm,我试图使用Terraform Helm provider()将工作负载部署到GKE集群 我或多或少地遵循Google的例子,但我确实希望通过手动创建服务帐户来使用RBAC 我的helm.tf看起来像这样: variable "helm_version" { default = "v2.13.1" } data "google_client_config" "current" {} provider "helm" { tiller_image = "gcr.io/kubernetes-
variable "helm_version" {
default = "v2.13.1"
}
data "google_client_config" "current" {}
provider "helm" {
tiller_image = "gcr.io/kubernetes-helm/tiller:${var.helm_version}"
install_tiller = false # Temporary
kubernetes {
host = "${google_container_cluster.data-dome-cluster.endpoint}"
token = "${data.google_client_config.current.access_token}"
client_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_certificate)}"
client_key = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_key)}"
cluster_ca_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.cluster_ca_certificate)}"
}
}
resource "helm_release" "nginx-ingress" {
name = "ingress"
chart = "stable/nginx-ingress"
values = [<<EOF
rbac:
create: false
controller:
stats:
enabled: true
metrics:
enabled: true
service:
annotations:
cloud.google.com/load-balancer-type: "Internal"
externalTrafficPolicy: "Local"
EOF
]
depends_on = [
"google_container_cluster.data-dome-cluster",
]
}
这发生在我手动创建Helm RBAC并安装Tiller之后
我以前也尝试过设置“install_tiller=true”,但在安装tiller时出现了完全相同的错误
“kubectl get pods”没有任何问题
这个用户“客户端”是什么?为什么禁止它访问集群
感谢为服务帐户和群集角色绑定创建资源对我来说非常有效:
resource "kubernetes_service_account" "helm_account" {
depends_on = [
"google_container_cluster.data-dome-cluster",
]
metadata {
name = "${var.helm_account_name}"
namespace = "kube-system"
}
}
resource "kubernetes_cluster_role_binding" "helm_role_binding" {
metadata {
name = "${kubernetes_service_account.helm_account.metadata.0.name}"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
api_group = ""
kind = "ServiceAccount"
name = "${kubernetes_service_account.helm_account.metadata.0.name}"
namespace = "kube-system"
}
provisioner "local-exec" {
command = "sleep 15"
}
}
provider "helm" {
service_account = "${kubernetes_service_account.helm_account.metadata.0.name}"
tiller_image = "gcr.io/kubernetes-helm/tiller:${var.helm_version}"
#install_tiller = false # Temporary
kubernetes {
host = "${google_container_cluster.data-dome-cluster.endpoint}"
token = "${data.google_client_config.current.access_token}"
client_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_certificate)}"
client_key = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_key)}"
cluster_ca_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.cluster_ca_certificate)}"
}
}
当您在集群(Tiller)上安装Helm时,是否在运行
Helm init
时指定了--service account
标志?如果你想通过terraform安装Tiller,你还需要添加服务\u帐户
属性。我指定了--服务帐户
你能描述一下服务帐户吗,即kubectl Descripte clusterrole
并将其添加到你的帖子中吗?
resource "kubernetes_service_account" "helm_account" {
depends_on = [
"google_container_cluster.data-dome-cluster",
]
metadata {
name = "${var.helm_account_name}"
namespace = "kube-system"
}
}
resource "kubernetes_cluster_role_binding" "helm_role_binding" {
metadata {
name = "${kubernetes_service_account.helm_account.metadata.0.name}"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
api_group = ""
kind = "ServiceAccount"
name = "${kubernetes_service_account.helm_account.metadata.0.name}"
namespace = "kube-system"
}
provisioner "local-exec" {
command = "sleep 15"
}
}
provider "helm" {
service_account = "${kubernetes_service_account.helm_account.metadata.0.name}"
tiller_image = "gcr.io/kubernetes-helm/tiller:${var.helm_version}"
#install_tiller = false # Temporary
kubernetes {
host = "${google_container_cluster.data-dome-cluster.endpoint}"
token = "${data.google_client_config.current.access_token}"
client_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_certificate)}"
client_key = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.client_key)}"
cluster_ca_certificate = "${base64decode(google_container_cluster.data-dome-cluster.master_auth.0.cluster_ca_certificate)}"
}
}