Home Lab 4
With a Kubernetes cluster up and running, it's time to bootstrap GitOps so I can automate configuration and deployment of applications. What we'll need:
- Istio for a Network Control Interface
- MetalLB for a bare-metal load balancer
- Certmanager to provide certificates for DNS
- Rook Ceph As a Container Storage Interface
- Gitea to provide git, the core to GitOps
- Flux to implement Gitops Of course, I deployed all of this with Terraform!
Istio
Installing Istio is a bit of an undocumented pain with Terraform, so I had to use the kubectl_manifest
resource:
resource "null_resource" "deploy_istio_operator" {
provisioner "local-exec" {
command = <<EOT
# Download the chart because they refuse to actually support this
git clone https://github.com/istio/istio || true
helm install istio-operator istio/manifests/charts/istio-operator \
--kubeconfig "${path.module}/../kubernetes/.kube/config" \
--set operatorNamespace="istio-operator" \
--set watchedNamespaces="istio-system"
EOT
interpreter = ["bash", "-c"]
}
provisioner "local-exec" {
when = destroy
command = <<EOT
helm uninstall istio-operator --kubeconfig "${path.module}/../kubernetes/.kube/config"
EOT
}
}
resource "kubectl_manifest" "istio_profile" {
yaml_body = <<EOT
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
name: istio-control-plane
namespace: istio-system
spec:
profile: default
components:
base:
enabled: true
pilot:
enabled: true
cni:
enabled: true
istiodRemote:
enabled: false
ingressGateways:
- name: istio-ingressgateway
enabled: true
egressGateways:
- name: istio-egressgateway
enabled: true
values:
cni:
excludeNamespaces:
- istio-system
- kube-system
- metallb-system
logLevel: info
EOT
depends_on = [
null_resource.deploy_istio_operator
]
}
MetalLB
Similar to Istio, MetalLB has to be installed by generating manifests in-line:
// https://metallb.universe.tf/installation/
resource "null_resource" "install_metal_lb" {
provisioner "local-exec" {
command = <<EOT
# see what changes would be made, returns nonzero returncode if different
kubectl --kubeconfig ${path.module}/../kubernetes/.kube/config get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl diff -f - -n kube-system
# actually apply the changes, returns nonzero returncode on errors only
kubectl --kubeconfig ${path.module}/../kubernetes/.kube/config get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl apply -f - -n kube-system
# Apply
kubectl --kubeconfig "${path.module}/../kubernetes/.kube/config" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/namespace.yaml
kubectl --kubeconfig "${path.module}/../kubernetes/.kube/config" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/metallb.yaml
kubectl --kubeconfig "${path.module}/../kubernetes/.kube/config" create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
EOT
interpreter = ["bash", "-c"]
}
provisioner "local-exec" {
when = destroy
command = <<EOT
kubectl --kubeconfig ${path.module}/../kubernetes/.kube/config delete namespace metallb-system
EOT
interpreter = ["bash", "-c"]
}
}
resource "local_file" "metallb-config" {
content = <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- ${var.LOAD_BALANCER_IP}/32
avoid-buggy-ips: true
EOF
filename = "${path.module}/config.yaml"
}
resource "null_resource" "config_metal_lb" {
provisioner "local-exec" {
command = <<EOT
kubectl --kubeconfig "${path.module}/../kubernetes/.kube/config" apply -f ${path.module}/config.yaml
EOT
interpreter = ["bash", "-c"]
}
depends_on = [
null_resource.install_metal_lb
]
triggers = {
"profile_manifest" = local_file.metallb-config.content
}
}
Certmanager
Here we have something we can install with Helm:
// Create namespace to deploy into
resource "kubernetes_namespace" "cert-manager" {
metadata {
name = "cert-manager"
}
lifecycle {
ignore_changes = [
metadata[0].labels,
]
}
}
resource "helm_release" "cert-manager" {
name = "cert-manager"
repository = "https://charts.jetstack.io"
chart = "cert-manager"
namespace = "${kubernetes_namespace.cert-manager.metadata[0].name}"
set {
name = "installCRDs"
value = true
}
}
Rook-Ceph
Deploying ceph is a multi-part process, and due to their current lack of support for proper helm chart hosting you've got to clone the repo...
resource "null_resource" "download_rook_repo" {
provisioner "local-exec" {
command = "git clone https://github.com/rook/rook || true"
}
}
Then you can create a namespace and deploy the chart, using template values of course
// Create namespace to deploy into
resource "kubernetes_namespace" "ceph" {
metadata {
annotations = {
name = "rook-ceph"
}
labels = {
namespace = "rook-ceph"
istio-injection = "false"
}
name = "rook-ceph"
}
}
data "template_file" "chart-values" {
template = "${file("${path.module}/values.yaml.tpl")}"
vars = {
}
}
resource "helm_release" "rook-operator" {
name = "rook-ceph-operator"
repository = "https://charts.rook.io/release"
chart = "rook-ceph"
namespace = "${kubernetes_namespace.ceph.metadata[0].name}"
values = [
"${data.template_file.chart-values.rendered}"
]
}
resource "helm_release" "rook-cluster" {
name = "rook-ceph-cluster"
chart = "./rook/cluster/charts/rook-ceph-cluster"
namespace = "${kubernetes_namespace.ceph.metadata[0].name}"
values = [
"${data.template_file.chart-values.rendered}"
]
depends_on = [
helm_release.rook-operator,
null_resource.download_rook_repo
]
}
Then you can define the storage classes the cluster will use to back containers:
resource "kubectl_manifest" "block-pool" {
yaml_body = <<EOT
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: replicapool
namespace: rook-ceph
spec:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#targetSizeRatio: .5
EOT
depends_on = [
helm_release.rook-cluster
]
}
resource "kubectl_manifest" "storage-class" {
yaml_body = <<EOT
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-block
annotations:
storageclass.kubernetes.io/is-default-class: "true"
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph # namespace:cluster
# If you want to use erasure coded pool with RBD, you need to create
# two pools. one erasure coded and one replicated.
# You need to specify the replicated pool here in the `pool` parameter, it is
# used for the metadata of the images.
# The erasure coded pool must be set as the `dataPool` parameter below.
#dataPool: ec-data-pool
pool: replicapool
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# uncomment the following to use rbd-nbd as mounter on supported nodes
# **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi
# issue that causes the mount to be disconnected. You will need to follow special upgrade steps
# to restart your application pods. Therefore, this option is not recommended.
#mounter: rbd-nbd
allowVolumeExpansion: true
reclaimPolicy: Delete
EOT
depends_on = [
kubectl_manifest.block-pool
]
}
resource "kubectl_manifest" "filesystem" {
yaml_body = <<EOT
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: cephfs
namespace: rook-ceph
spec:
metadataPool:
replicated:
size: 3
dataPools:
- replicated:
size: 3
preserveFilesystemOnDelete: true
metadataServer:
activeCount: 1
activeStandby: true
EOT
depends_on = [
helm_release.rook-cluster
]
}
resource "kubectl_manifest" "storage-class-fs" {
yaml_body = <<EOT
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs
provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
parameters:
# clusterID is the namespace where operator is deployed.
clusterID: rook-ceph # namespace:cluster
# CephFS filesystem name into which the volume shall be created
fsName: cephfs
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: cephfs-data0
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
# or by setting the default mounter explicitly via --volumemounter command-line argument.
# mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
# uncomment the following line for debugging
#- debug
EOT
depends_on = [
kubectl_manifest.filesystem
]
}
Gitea
Again, this is something we can install with Helm! This is actually a workload though and it'll be our first. So we're going to use the Gitea deployment to declare our storage classes and test our certificate/DNS/Load Balancing configuration. Gitea doesn't play nice with Istio, so there's a little workaround I had to find to get there. This may be something I contribute to in the future.
Create certificates
// Generate a tls private key
resource "tls_private_key" "gitea-ca-key" {
algorithm = "ECDSA"
ecdsa_curve = "P384"
}
// Generate a CA cert
resource "tls_self_signed_cert" "ca-cert" {
key_algorithm = "ECDSA"
private_key_pem = tls_private_key.gitea-ca-key.private_key_pem
subject {
common_name = "salmon.sec"
organization = "Salmonsec"
}
// One year
validity_period_hours = 8760
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
"cert_signing"
]
is_ca_certificate = true
}
resource "local_file" "ca_cert" {
content = tls_self_signed_cert.ca-cert.cert_pem
filename = "Root_CA.pem"
}
# Generate a Kubernetes secret with the Git credentials
resource "kubernetes_secret" "ca-key-pair-gitea" {
metadata {
name = "ca-key-pair-gitea"
namespace = "istio-system"
}
data = {
"tls.crt" = "${tls_self_signed_cert.ca-cert.cert_pem}"
"tls.key" = "${tls_private_key.gitea-ca-key.private_key_pem}"
}
}
resource "kubectl_manifest" "issuer" {
yaml_body = <<EOT
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: istio-system
spec:
ca:
secretName: ca-key-pair-gitea
EOT
}
resource "kubectl_manifest" "certificate" {
yaml_body = <<EOT
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: gitea-cert
namespace: istio-system
spec:
# Secret names are always required.
secretName: self-signed-gitea-tls
duration: 2160h # 90d
renewBefore: 360h # 15d
commonName: git.salmon.sec
dnsNames:
- git.salmon.sec
issuerRef:
name: selfsigned-issuer
EOT
}
Define chart templated values:
#ingress:
# enabled: true
# hosts:
# - git.salmon.sec
# tls:
# - secretName: self-signed-gitea-tls
# hosts:
# - git.salmon.sec
persistence:
enabled: true
size: 10Gi
storageClass: rook-ceph-block
postgresql:
persistence:
size: 10Gi
storageClass: rook-ceph-block
volumePermissions:
enabled: true
service:
annotations:
networking.istio.io/exportTo: "."
gitea:
admin:
username: ${PACKER_SUDO_USER}
password: ${PACKER_SUDO_PASS}
email: "${PACKER_SUDO_USER}@${EMAIL_DOMAIN}"
Then deploy the chart:
// Create namespace to deploy into
resource "kubernetes_namespace" "gitea" {
metadata {
annotations = {
name = "gitea"
}
labels = {
namespace = "gitea"
istio-injection = "enabled"
}
name = "gitea"
}
}
data "template_file" "chart-values" {
template = "${file("${path.module}/values.yaml.tpl")}"
vars = {
PACKER_SUDO_USER = "${var.PACKER_SUDO_USER}"
PACKER_SUDO_PASS = "${var.PACKER_SUDO_PASS}"
EMAIL_DOMAIN = "salmon.sec"
HOST = "git.salmon.sec"
}
}
resource "helm_release" "gitea" {
name = "gitea"
repository = "https://dl.gitea.io/charts"
chart = "gitea"
version = "2.2.5"
namespace = "${kubernetes_namespace.gitea.metadata[0].name}"
values = [
"${data.template_file.chart-values.rendered}"
]
//depends_on = [
// kubectl_manifest.gitea-pvc,
//kubectl_manifest.gitea-pvc-postgres
//]
}
resource "null_resource" "timeout" {
provisioner "local-exec" {
command = "sleep 30"
}
}
resource "kubectl_manifest" "secret-patch" {
yaml_body = <<EOT
apiVersion: v1
data:
init_gitea.sh: IyEvYmluL2Jhc2gKZWNobyAic3RhcnQiCmNob3duIDEwMDA6MTAwMCAvZGF0YQpta2RpciAtcCAvZGF0YS9naXQvLnNzaApjaG1vZCAtUiA3MDAgL2RhdGEvZ2l0Ly5zc2gKbWtkaXIgLXAgL2RhdGEvZ2l0ZWEvY29uZgpjcCAvZXRjL2dpdGVhL2NvbmYvYXBwLmluaSAvZGF0YS9naXRlYS9jb25mL2FwcC5pbmkKY2htb2QgYStyd3ggL2RhdGEvZ2l0ZWEvY29uZi9hcHAuaW5pCm5jIC12IC13MiAteiBnaXRlYS1wb3N0Z3Jlc3FsIDU0MzIgJiYgXApzdSBnaXQgLWMgJyBcCnNldCAteDsgXApnaXRlYSBtaWdyYXRlOyBcCmdpdGVhIGFkbWluIGNyZWF0ZS11c2VyIC0tdXNlcm5hbWUgIG1hdHQgLS1wYXNzd29yZCAiSmV3aXNoMTIzIiAtLWVtYWlsIG1hdHRAc2FsbW9uLnNlYyAtLWFkbWluIC0tbXVzdC1jaGFuZ2UtcGFzc3dvcmQ9ZmFsc2UgXAp8fCBcCmdpdGVhIGFkbWluIGNoYW5nZS1wYXNzd29yZCAtLXVzZXJuYW1lIG1hdHQgLS1wYXNzd29yZCAiSmV3aXNoMTIzIjsgXAonCmVjaG8gImRvbmUiCg==
kind: Secret
metadata:
name: gitea-init
namespace: gitea
type: Opaque
EOT
depends_on = [
null_resource.timeout
]
}
Notice the weird hack around init_gitea, see the issue referenced above to understand why that's happening...
Flux
Flux actually has a terraform resource declaration, so installing it is amazingly easy!
// Create namespace to deploy into
resource "kubernetes_namespace" "flux_system" {
metadata {
name = "flux-system"
}
lifecycle {
ignore_changes = [
metadata[0].labels,
]
}
}
# Generate manifests
data "flux_install" "main" {
target_path = "manifests/flux"
network_policy = false
}
# Split multi-doc YAML with
# https://registry.terraform.io/providers/gavinbunney/kubectl/latest
data "kubectl_file_documents" "apply" {
content = data.flux_install.main.content
}
# Convert documents list to include parsed yaml data
locals {
apply = [ for v in data.kubectl_file_documents.apply.documents : {
data: yamldecode(v)
content: v
}
]
}
# Apply manifests on the cluster
resource "kubectl_manifest" "apply" {
for_each = { for v in local.apply : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content }
depends_on = [kubernetes_namespace.flux_system]
yaml_body = each.value
}
Add sync so it will pull manifests from a repo I'll create next time ;)
# Generate manifests
data "flux_sync" "main" {
target_path = "clusters/testing"
url = "https://git.salmon.sec/matt/birtast"
branch = "master"
name = "birtast"
}
# Split multi-doc YAML with
# https://registry.terraform.io/providers/gavinbunney/kubectl/latest
data "kubectl_file_documents" "sync" {
content = data.flux_sync.main.content
}
# Convert documents list to include parsed yaml data
locals {
sync = [ for v in data.kubectl_file_documents.sync.documents : {
data: yamldecode(v)
content: v
}
]
}
# Apply manifests on the cluster
resource "kubectl_manifest" "sync" {
for_each = { for v in local.sync : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content }
depends_on = [kubernetes_namespace.flux_system]
yaml_body = each.value
}
# Generate a Kubernetes secret with the Git credentials
resource "kubernetes_secret" "main" {
depends_on = [kubectl_manifest.apply]
metadata {
name = data.flux_sync.main.secret
namespace = data.flux_sync.main.namespace
}
data = {
username = var.PACKER_SUDO_USER
password = var.PACKER_SUDO_PASS
caFile = "${file("../gitea/Root_CA.pem")}"
}
}
Awesome, now my cluster is all Bootstrapped for GitOps and will automatically pull manifests from 'birtast' of my Git cluster!